aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Gololobov <davenger@yandex-team.com>2022-02-10 16:47:38 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:47:38 +0300
commitfccc62e9bfdce9be2fe7e0f23479da3a5512211a (patch)
treec0748b5dcbade83af788c0abfa89c0383d6b779c
parent39608cdb86363c75ce55b2b9a69841c3b71f22cf (diff)
downloadydb-fccc62e9bfdce9be2fe7e0f23479da3a5512211a.tar.gz
Restoring authorship annotation for Alexander Gololobov <davenger@yandex-team.com>. Commit 2 of 2.
-rw-r--r--contrib/libs/grpc/src/cpp/common/channel_arguments.cc12
-rw-r--r--contrib/libs/pire/Makefile.am4
-rw-r--r--contrib/libs/pire/README12
-rw-r--r--contrib/libs/pire/configure.ac94
-rw-r--r--contrib/libs/pire/pire/Makefile.am228
-rw-r--r--contrib/libs/pire/pire/align.h198
-rw-r--r--contrib/libs/pire/pire/any.h246
-rw-r--r--contrib/libs/pire/pire/classes.cpp260
-rw-r--r--contrib/libs/pire/pire/defs.h224
-rw-r--r--contrib/libs/pire/pire/determine.h252
-rw-r--r--contrib/libs/pire/pire/easy.cpp60
-rw-r--r--contrib/libs/pire/pire/easy.h444
-rw-r--r--contrib/libs/pire/pire/encoding.cpp198
-rw-r--r--contrib/libs/pire/pire/encoding.h136
-rw-r--r--contrib/libs/pire/pire/extra.h58
-rw-r--r--contrib/libs/pire/pire/extra/capture.cpp166
-rw-r--r--contrib/libs/pire/pire/extra/capture.h264
-rw-r--r--contrib/libs/pire/pire/extra/count.cpp274
-rw-r--r--contrib/libs/pire/pire/extra/count.h218
-rw-r--r--contrib/libs/pire/pire/extra/glyphs.cpp244
-rw-r--r--contrib/libs/pire/pire/extra/glyphs.h78
-rw-r--r--contrib/libs/pire/pire/fsm.cpp1584
-rw-r--r--contrib/libs/pire/pire/fsm.h512
-rw-r--r--contrib/libs/pire/pire/fwd.h82
-rw-r--r--contrib/libs/pire/pire/glue.h306
-rw-r--r--contrib/libs/pire/pire/inline.l510
-rw-r--r--contrib/libs/pire/pire/partition.h324
-rw-r--r--contrib/libs/pire/pire/pire.h64
-rw-r--r--contrib/libs/pire/pire/platform.h316
-rw-r--r--contrib/libs/pire/pire/re_lexer.cpp198
-rw-r--r--contrib/libs/pire/pire/re_lexer.h442
-rw-r--r--contrib/libs/pire/pire/re_parser.y208
-rw-r--r--contrib/libs/pire/pire/run.h604
-rw-r--r--contrib/libs/pire/pire/scanner_io.cpp276
-rw-r--r--contrib/libs/pire/pire/scanners/common.h188
-rw-r--r--contrib/libs/pire/pire/scanners/loaded.h420
-rw-r--r--contrib/libs/pire/pire/scanners/multi.h1986
-rw-r--r--contrib/libs/pire/pire/scanners/null.cpp8
-rw-r--r--contrib/libs/pire/pire/scanners/pair.h196
-rw-r--r--contrib/libs/pire/pire/scanners/simple.h420
-rw-r--r--contrib/libs/pire/pire/scanners/slow.h620
-rw-r--r--contrib/libs/pire/pire/static_assert.h70
-rw-r--r--contrib/libs/pire/pire/stub/defaults.h6
-rw-r--r--contrib/libs/pire/pire/stub/hacks.h14
-rw-r--r--contrib/libs/pire/pire/stub/lexical_cast.h2
-rw-r--r--contrib/libs/pire/pire/stub/memstreams.h20
-rw-r--r--contrib/libs/pire/pire/stub/noncopyable.h10
-rw-r--r--contrib/libs/pire/pire/stub/saveload.h4
-rw-r--r--contrib/libs/pire/pire/stub/singleton.h16
-rw-r--r--contrib/libs/pire/pire/stub/stl.h68
-rw-r--r--contrib/libs/pire/pire/stub/utf8.h2
-rw-r--r--contrib/libs/pire/pire/vbitset.h232
-rw-r--r--library/cpp/actors/core/actor.h14
-rw-r--r--library/cpp/actors/core/actorsystem.h6
-rw-r--r--library/cpp/actors/core/defs.h12
-rw-r--r--library/cpp/actors/core/event.h22
-rw-r--r--library/cpp/actors/core/event_pb.h100
-rw-r--r--library/cpp/actors/core/event_pb_payload_ut.cpp128
-rw-r--r--library/cpp/actors/core/executelater.h8
-rw-r--r--library/cpp/actors/core/executor_pool_base.cpp26
-rw-r--r--library/cpp/actors/core/executor_pool_base.h4
-rw-r--r--library/cpp/actors/core/executor_pool_basic.cpp10
-rw-r--r--library/cpp/actors/core/executor_pool_basic.h4
-rw-r--r--library/cpp/actors/core/executor_pool_io.h2
-rw-r--r--library/cpp/actors/core/executor_pool_united.cpp2
-rw-r--r--library/cpp/actors/core/executor_thread.cpp42
-rw-r--r--library/cpp/actors/core/executor_thread.h6
-rw-r--r--library/cpp/actors/core/log.cpp76
-rw-r--r--library/cpp/actors/core/log.h4
-rw-r--r--library/cpp/actors/core/log_settings.cpp12
-rw-r--r--library/cpp/actors/core/log_settings.h6
-rw-r--r--library/cpp/actors/core/mailbox.cpp6
-rw-r--r--library/cpp/actors/core/mailbox.h2
-rw-r--r--library/cpp/actors/core/mon_stats.h36
-rw-r--r--library/cpp/actors/core/probes.cpp6
-rw-r--r--library/cpp/actors/core/probes.h10
-rw-r--r--library/cpp/actors/core/process_stats.cpp80
-rw-r--r--library/cpp/actors/core/process_stats.h14
-rw-r--r--library/cpp/actors/core/scheduler_actor.cpp2
-rw-r--r--library/cpp/actors/core/ya.make8
-rw-r--r--library/cpp/actors/helpers/selfping_actor.cpp4
-rw-r--r--library/cpp/actors/helpers/selfping_actor_ut.cpp4
-rw-r--r--library/cpp/actors/interconnect/interconnect_handshake.cpp8
-rw-r--r--library/cpp/actors/interconnect/interconnect_tcp_session.h8
-rw-r--r--library/cpp/actors/interconnect/load.cpp12
-rw-r--r--library/cpp/actors/interconnect/poller_actor.cpp8
-rw-r--r--library/cpp/actors/interconnect/ut/lib/node.h6
-rw-r--r--library/cpp/actors/protos/unittests.proto6
-rw-r--r--library/cpp/actors/testlib/test_runtime.cpp34
-rw-r--r--library/cpp/actors/testlib/test_runtime.h8
-rw-r--r--library/cpp/execprofile/annotate_profile.pl720
-rw-r--r--library/cpp/execprofile/profile.cpp586
-rw-r--r--library/cpp/execprofile/profile.h32
-rw-r--r--library/cpp/execprofile/ya.make10
-rw-r--r--library/cpp/grpc/server/grpc_request.h70
-rw-r--r--library/cpp/grpc/server/grpc_request_base.h18
-rw-r--r--library/cpp/grpc/server/grpc_server.h18
-rw-r--r--library/cpp/lfalloc/alloc_profiler/profiler.cpp16
-rw-r--r--library/cpp/lfalloc/alloc_profiler/profiler.h12
-rw-r--r--library/cpp/lfalloc/alloc_profiler/profiler_ut.cpp150
-rw-r--r--library/cpp/lfalloc/alloc_profiler/stackcollect.cpp156
-rw-r--r--library/cpp/lfalloc/alloc_profiler/stackcollect.h162
-rw-r--r--library/cpp/lfalloc/alloc_profiler/ut/ya.make38
-rw-r--r--library/cpp/lfalloc/alloc_profiler/ya.make30
-rw-r--r--library/cpp/lfalloc/dbg_info/dbg_info.cpp14
-rw-r--r--library/cpp/lfalloc/dbg_info/dbg_info.h4
-rw-r--r--library/cpp/lfalloc/lf_allocX64.h60
-rw-r--r--library/cpp/lwtrace/mon/mon_lwtrace.cpp306
-rw-r--r--library/cpp/lwtrace/mon/mon_lwtrace.h4
-rw-r--r--library/cpp/lwtrace/protos/ya.make10
-rw-r--r--library/cpp/lwtrace/ya.make12
-rw-r--r--library/cpp/malloc/api/malloc.cpp8
-rw-r--r--library/cpp/malloc/api/malloc.h2
-rw-r--r--library/cpp/messagebus/actor/executor.cpp20
-rw-r--r--library/cpp/messagebus/actor/thread_extra.h22
-rw-r--r--library/cpp/messagebus/actor/what_thread_does.cpp8
-rw-r--r--library/cpp/messagebus/config/defs.h2
-rw-r--r--library/cpp/messagebus/latch.h4
-rw-r--r--library/cpp/messagebus/local_tasks.h12
-rw-r--r--library/cpp/messagebus/message.h2
-rw-r--r--library/cpp/messagebus/oldmodule/module.cpp8
-rw-r--r--library/cpp/messagebus/protobuf/ybusbuf.cpp10
-rw-r--r--library/cpp/messagebus/remote_connection.cpp6
-rw-r--r--library/cpp/messagebus/remote_connection_status.cpp2
-rw-r--r--library/cpp/messagebus/test/ut/messagebus_ut.cpp70
-rw-r--r--library/cpp/messagebus/test/ut/one_way_ut.cpp20
-rw-r--r--library/cpp/monlib/counters/counters.cpp20
-rw-r--r--library/cpp/monlib/dynamic_counters/counters.h12
-rw-r--r--library/cpp/monlib/dynamic_counters/counters_ut.cpp54
-rw-r--r--library/cpp/monlib/service/pages/templates.h4
-rw-r--r--library/cpp/monlib/service/pages/version_mon_page.cpp2
-rw-r--r--library/cpp/regex/pire/extraencodings.cpp40
-rw-r--r--library/cpp/regex/pire/inline/ya.make16
-rw-r--r--library/cpp/regex/pire/pire.h80
-rw-r--r--library/cpp/regex/pire/regexp.h20
-rw-r--r--library/cpp/regex/pire/ut/regexp_ut.cpp50
-rw-r--r--library/cpp/regex/pire/ut/ya.make28
-rw-r--r--library/cpp/regex/pire/ya.make38
-rw-r--r--util/draft/holder_vector.h20
-rw-r--r--util/generic/object_counter.h2
-rw-r--r--util/memory/benchmark/pool/main.cpp30
-rw-r--r--util/memory/benchmark/pool/metrics/main.py8
-rw-r--r--util/memory/benchmark/pool/metrics/ya.make20
-rw-r--r--util/memory/benchmark/pool/ya.make14
-rw-r--r--util/memory/benchmark/ya.make8
-rw-r--r--util/memory/pool.cpp18
-rw-r--r--util/memory/pool.h16
-rw-r--r--util/memory/pool_ut.cpp28
-rw-r--r--util/stream/aligned.h4
-rw-r--r--util/string/cast.cpp22
-rw-r--r--util/string/cast_ut.cpp6
-rw-r--r--util/system/align.h6
-rw-r--r--util/system/align_ut.cpp14
-rw-r--r--util/system/context.h2
-rw-r--r--util/system/dynlib.cpp4
-rw-r--r--util/system/dynlib.h12
-rw-r--r--util/system/sanitizers.h4
-rw-r--r--util/system/thread.cpp2
-rw-r--r--util/system/thread.h2
-rw-r--r--util/thread/lfstack.h6
-rw-r--r--util/thread/lfstack_ut.cpp92
-rw-r--r--ydb/core/actorlib_impl/async_destroyer.h60
-rw-r--r--ydb/core/actorlib_impl/load_network.cpp4
-rw-r--r--ydb/core/actorlib_impl/ya.make2
-rw-r--r--ydb/core/base/appdata.cpp6
-rw-r--r--ydb/core/base/appdata.h8
-rw-r--r--ydb/core/base/blobstorage.h20
-rw-r--r--ydb/core/base/board_lookup.cpp4
-rw-r--r--ydb/core/base/board_publish.cpp8
-rw-r--r--ydb/core/base/counters.cpp6
-rw-r--r--ydb/core/base/domain.h60
-rw-r--r--ydb/core/base/events.h6
-rw-r--r--ydb/core/base/localdb.cpp72
-rw-r--r--ydb/core/base/localdb.h22
-rw-r--r--ydb/core/base/pool_stats_collector.cpp66
-rw-r--r--ydb/core/base/tablet.h44
-rw-r--r--ydb/core/base/tablet_pipe.h14
-rw-r--r--ydb/core/base/ticket_parser.h4
-rw-r--r--ydb/core/base/ya.make4
-rw-r--r--ydb/core/blobstorage/base/transparent.h4
-rw-r--r--ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp6
-rw-r--r--ydb/core/blobstorage/incrhuge/incrhuge_keeper.h4
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp2
-rw-r--r--ydb/core/blobstorage/testload/test_load_vdisk_write.cpp4
-rw-r--r--ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_impl.h2
-rw-r--r--ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcommit.h2
-rw-r--r--ydb/core/client/cancel_tx_ut.cpp288
-rw-r--r--ydb/core/client/client_ut.cpp338
-rw-r--r--ydb/core/client/flat_ut.cpp4128
-rw-r--r--ydb/core/client/flat_ut_client.h116
-rw-r--r--ydb/core/client/locks_ut.cpp2
-rw-r--r--ydb/core/client/minikql_compile/mkql_compile_service.cpp4
-rw-r--r--ydb/core/client/minikql_compile/yql_expr_minikql.cpp48
-rw-r--r--ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp60
-rw-r--r--ydb/core/client/query_stats_ut.cpp340
-rw-r--r--ydb/core/client/s3_listing_ut.cpp1314
-rw-r--r--ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp2
-rw-r--r--ydb/core/client/server/grpc_server.cpp72
-rw-r--r--ydb/core/client/server/grpc_server.h12
-rw-r--r--ydb/core/client/server/msgbus_server.cpp48
-rw-r--r--ydb/core/client/server/msgbus_server.h8
-rw-r--r--ydb/core/client/server/msgbus_server_cms.cpp4
-rw-r--r--ydb/core/client/server/msgbus_server_console.cpp4
-rw-r--r--ydb/core/client/server/msgbus_server_db.cpp10
-rw-r--r--ydb/core/client/server/msgbus_server_local_enumerate_tablets.cpp80
-rw-r--r--ydb/core/client/server/msgbus_server_local_scheme_tx.cpp102
-rw-r--r--ydb/core/client/server/msgbus_server_node_registration.cpp4
-rw-r--r--ydb/core/client/server/msgbus_server_persqueue.cpp38
-rw-r--r--ydb/core/client/server/msgbus_server_pq_metacache.cpp14
-rw-r--r--ydb/core/client/server/msgbus_server_pq_metacache.h10
-rw-r--r--ydb/core/client/server/msgbus_server_proxy.cpp16
-rw-r--r--ydb/core/client/server/msgbus_server_proxy.h4
-rw-r--r--ydb/core/client/server/msgbus_server_request.cpp28
-rw-r--r--ydb/core/client/server/msgbus_server_s3_listing.cpp1364
-rw-r--r--ydb/core/client/server/msgbus_server_scheme_initroot.cpp8
-rw-r--r--ydb/core/client/server/msgbus_server_scheme_request.cpp22
-rw-r--r--ydb/core/client/server/msgbus_server_tablet_counters.cpp86
-rw-r--r--ydb/core/client/server/msgbus_server_tx_request.cpp12
-rw-r--r--ydb/core/client/server/ya.make10
-rw-r--r--ydb/core/client/ut/ya.make8
-rw-r--r--ydb/core/cms/console/console_ut_configs.cpp152
-rw-r--r--ydb/core/cms/console/immediate_controls_configurator_ut.cpp4
-rw-r--r--ydb/core/cms/ui/datashard_hist.js6
-rw-r--r--ydb/core/cms/ui/datashard_info.js20
-rw-r--r--ydb/core/cms/walle_check_task_adapter.cpp4
-rw-r--r--ydb/core/cms/walle_create_task_adapter.cpp4
-rw-r--r--ydb/core/cms/walle_list_tasks_adapter.cpp4
-rw-r--r--ydb/core/cms/walle_remove_task_adapter.cpp4
-rw-r--r--ydb/core/control/immediate_control_board_actor.cpp4
-rw-r--r--ydb/core/control/immediate_control_board_actor_ut.cpp2
-rw-r--r--ydb/core/driver_lib/cli_base/cli_cmds_db.cpp306
-rw-r--r--ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp68
-rw-r--r--ydb/core/driver_lib/cli_utils/cli_minikql_compile_and_exec.cpp6
-rw-r--r--ydb/core/driver_lib/run/config.h2
-rw-r--r--ydb/core/driver_lib/run/kikimr_services_initializers.cpp64
-rw-r--r--ydb/core/driver_lib/run/kikimr_services_initializers.h18
-rw-r--r--ydb/core/driver_lib/run/main.cpp100
-rw-r--r--ydb/core/driver_lib/run/run.cpp98
-rw-r--r--ydb/core/driver_lib/run/run.h4
-rw-r--r--ydb/core/driver_lib/run/version.cpp2
-rw-r--r--ydb/core/engine/kikimr_program_builder.cpp4
-rw-r--r--ydb/core/engine/minikql/flat_local_minikql_host.h2
-rw-r--r--ydb/core/engine/minikql/flat_local_tx_factory.cpp10
-rw-r--r--ydb/core/engine/minikql/flat_local_tx_minikql.h84
-rw-r--r--ydb/core/engine/minikql/flat_local_tx_read_columns.h394
-rw-r--r--ydb/core/engine/minikql/flat_local_tx_scheme.h60
-rw-r--r--ydb/core/engine/minikql/minikql_engine_host.cpp242
-rw-r--r--ydb/core/engine/minikql/minikql_engine_host.h124
-rw-r--r--ydb/core/engine/mkql_engine_flat.cpp392
-rw-r--r--ydb/core/engine/mkql_engine_flat.h34
-rw-r--r--ydb/core/engine/mkql_engine_flat_extfunc.cpp8
-rw-r--r--ydb/core/engine/mkql_engine_flat_host.h12
-rw-r--r--ydb/core/engine/mkql_engine_flat_host_ut.cpp4
-rw-r--r--ydb/core/engine/mkql_engine_flat_impl.h30
-rw-r--r--ydb/core/engine/mkql_engine_flat_ut.cpp56
-rw-r--r--ydb/core/engine/mkql_keys.cpp8
-rw-r--r--ydb/core/engine/mkql_keys.h10
-rw-r--r--ydb/core/engine/mkql_proto.cpp422
-rw-r--r--ydb/core/engine/mkql_proto.h20
-rw-r--r--ydb/core/engine/mkql_proto_ut.cpp412
-rw-r--r--ydb/core/formats/arrow_batch_builder.cpp52
-rw-r--r--ydb/core/formats/arrow_batch_builder.h28
-rw-r--r--ydb/core/formats/arrow_helpers.cpp54
-rw-r--r--ydb/core/formats/arrow_helpers.h34
-rw-r--r--ydb/core/formats/clickhouse_block.cpp1392
-rw-r--r--ydb/core/formats/clickhouse_block.h18
-rw-r--r--ydb/core/formats/factory.h80
-rw-r--r--ydb/core/formats/sharding.h22
-rw-r--r--ydb/core/formats/ut_arrow.cpp2
-rw-r--r--ydb/core/formats/ya.make26
-rw-r--r--ydb/core/grpc_services/base/base.h26
-rw-r--r--ydb/core/grpc_services/grpc_helper.cpp64
-rw-r--r--ydb/core/grpc_services/grpc_helper.h58
-rw-r--r--ydb/core/grpc_services/grpc_mon.cpp196
-rw-r--r--ydb/core/grpc_services/grpc_mon.h24
-rw-r--r--ydb/core/grpc_services/grpc_request_proxy.cpp4
-rw-r--r--ydb/core/grpc_services/grpc_request_proxy.h10
-rw-r--r--ydb/core/grpc_services/resolve_local_db_table.cpp122
-rw-r--r--ydb/core/grpc_services/resolve_local_db_table.h40
-rw-r--r--ydb/core/grpc_services/rpc_deferrable.h28
-rw-r--r--ydb/core/grpc_services/rpc_discovery.cpp4
-rw-r--r--ydb/core/grpc_services/rpc_execute_data_query.cpp82
-rw-r--r--ydb/core/grpc_services/rpc_get_operation.cpp2
-rw-r--r--ydb/core/grpc_services/rpc_get_shard_locations.cpp354
-rw-r--r--ydb/core/grpc_services/rpc_kh_describe.cpp654
-rw-r--r--ydb/core/grpc_services/rpc_kh_snapshots.cpp20
-rw-r--r--ydb/core/grpc_services/rpc_load_rows.cpp458
-rw-r--r--ydb/core/grpc_services/rpc_log_store.cpp904
-rw-r--r--ydb/core/grpc_services/rpc_long_tx.cpp554
-rw-r--r--ydb/core/grpc_services/rpc_long_tx.h26
-rw-r--r--ydb/core/grpc_services/rpc_read_columns.cpp1382
-rw-r--r--ydb/core/grpc_services/rpc_read_table.cpp4
-rw-r--r--ydb/core/grpc_services/rpc_s3_listing.cpp28
-rw-r--r--ydb/core/grpc_services/ya.make16
-rw-r--r--ydb/core/grpc_streaming/grpc_streaming.h32
-rw-r--r--ydb/core/kesus/proxy/proxy.cpp4
-rw-r--r--ydb/core/keyvalue/keyvalue_flat_impl.h4
-rw-r--r--ydb/core/keyvalue/keyvalue_state.cpp6
-rw-r--r--ydb/core/keyvalue/keyvalue_ut.cpp30
-rw-r--r--ydb/core/kqp/common/kqp_resolve.h16
-rw-r--r--ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp2
-rw-r--r--ydb/core/kqp/counters/kqp_counters.h2
-rw-r--r--ydb/core/kqp/executer/kqp_executer_impl.h2
-rw-r--r--ydb/core/kqp/executer/kqp_partition_helper.cpp4
-rw-r--r--ydb/core/kqp/executer/kqp_partition_helper.h8
-rw-r--r--ydb/core/kqp/executer/kqp_planner.cpp16
-rw-r--r--ydb/core/kqp/executer/kqp_planner_strategy.cpp2
-rw-r--r--ydb/core/kqp/executer/kqp_scan_executer.cpp124
-rw-r--r--ydb/core/kqp/executer/kqp_table_resolver.cpp26
-rw-r--r--ydb/core/kqp/executer/kqp_tasks_graph.cpp2
-rw-r--r--ydb/core/kqp/executer/kqp_tasks_graph.h36
-rw-r--r--ydb/core/kqp/executer/kqp_tasks_validate.cpp14
-rw-r--r--ydb/core/kqp/host/kqp_run_prepared.cpp6
-rw-r--r--ydb/core/kqp/kqp_ic_gateway.cpp50
-rw-r--r--ydb/core/kqp/kqp_metadata_loader.cpp12
-rw-r--r--ydb/core/kqp/node/kqp_node.cpp26
-rw-r--r--ydb/core/kqp/prepare/kqp_query_exec.cpp40
-rw-r--r--ydb/core/kqp/provider/yql_kikimr_exec.cpp4
-rw-r--r--ydb/core/kqp/provider/yql_kikimr_results.cpp162
-rw-r--r--ydb/core/kqp/rm/kqp_snapshot_manager.cpp14
-rw-r--r--ydb/core/kqp/rm/kqp_snapshot_manager.h2
-rw-r--r--ydb/core/kqp/ut/common/kqp_ut_common.cpp100
-rw-r--r--ydb/core/kqp/ut/common/kqp_ut_common.h12
-rw-r--r--ydb/core/kqp/ut/kqp_explain_ut.cpp36
-rw-r--r--ydb/core/kqp/ut/kqp_flip_join_ut.cpp96
-rw-r--r--ydb/core/kqp/ut/kqp_join_ut.cpp6
-rw-r--r--ydb/core/kqp/ut/kqp_locks_ut.cpp8
-rw-r--r--ydb/core/kqp/ut/kqp_newengine_flowcontrol_ut.cpp10
-rw-r--r--ydb/core/kqp/ut/kqp_newengine_ut.cpp176
-rw-r--r--ydb/core/kqp/ut/kqp_olap_ut.cpp1852
-rw-r--r--ydb/core/kqp/ut/kqp_params_ut.cpp4
-rw-r--r--ydb/core/kqp/ut/kqp_pragma_ut.cpp6
-rw-r--r--ydb/core/kqp/ut/kqp_query_ut.cpp52
-rw-r--r--ydb/core/kqp/ut/kqp_scan_ut.cpp246
-rw-r--r--ydb/core/kqp/ut/kqp_scheme_ut.cpp104
-rw-r--r--ydb/core/kqp/ut/kqp_scripting_ut.cpp22
-rw-r--r--ydb/core/kqp/ut/kqp_service_ut.cpp22
-rw-r--r--ydb/core/kqp/ut/kqp_sort_ut.cpp42
-rw-r--r--ydb/core/kqp/ut/kqp_stats_ut.cpp4
-rw-r--r--ydb/core/kqp/ut/kqp_sys_col_ut.cpp72
-rw-r--r--ydb/core/kqp/ut/kqp_sys_view_ut.cpp46
-rw-r--r--ydb/core/kqp/ut/kqp_table_predicate_ut.cpp92
-rw-r--r--ydb/core/kqp/ut/kqp_tx_ut.cpp50
-rw-r--r--ydb/core/kqp/ut/kqp_write_ut.cpp44
-rw-r--r--ydb/core/kqp/ut/kqp_yql_ut.cpp44
-rw-r--r--ydb/core/kqp/ut/ya.make2
-rw-r--r--ydb/core/mind/hive/monitoring.cpp142
-rw-r--r--ydb/core/mind/lease_holder.cpp4
-rw-r--r--ydb/core/mon/mon.cpp32
-rw-r--r--ydb/core/mon_alloc/monitor.cpp30
-rw-r--r--ydb/core/persqueue/events/internal.h4
-rw-r--r--ydb/core/persqueue/partition.cpp40
-rw-r--r--ydb/core/persqueue/partition.h22
-rw-r--r--ydb/core/persqueue/pq_impl.cpp40
-rw-r--r--ydb/core/persqueue/pq_ut.cpp4
-rw-r--r--ydb/core/persqueue/read.h4
-rw-r--r--ydb/core/protos/config.proto32
-rw-r--r--ydb/core/protos/counters.proto2
-rw-r--r--ydb/core/protos/counters_columnshard.proto44
-rw-r--r--ydb/core/protos/counters_datashard.proto154
-rw-r--r--ydb/core/protos/counters_schemeshard.proto102
-rw-r--r--ydb/core/protos/flat_scheme_op.proto330
-rw-r--r--ydb/core/protos/flat_tx_scheme.proto120
-rw-r--r--ydb/core/protos/grpc.proto10
-rw-r--r--ydb/core/protos/kqp.proto2
-rw-r--r--ydb/core/protos/minikql_engine.proto2
-rw-r--r--ydb/core/protos/msgbus.proto88
-rw-r--r--ydb/core/protos/msgbus_pq.proto2
-rw-r--r--ydb/core/protos/query_stats.proto92
-rw-r--r--ydb/core/protos/scheme_log.proto4
-rw-r--r--ydb/core/protos/services.proto24
-rw-r--r--ydb/core/protos/table_stats.proto78
-rw-r--r--ydb/core/protos/tablet_database.proto30
-rw-r--r--ydb/core/protos/tablet_tx.proto50
-rw-r--r--ydb/core/protos/tx_columnshard.proto44
-rw-r--r--ydb/core/protos/tx_datashard.proto482
-rw-r--r--ydb/core/protos/tx_proxy.proto6
-rw-r--r--ydb/core/protos/ya.make10
-rw-r--r--ydb/core/scheme/scheme_tablecell.cpp46
-rw-r--r--ydb/core/scheme/scheme_tablecell.h430
-rw-r--r--ydb/core/scheme/scheme_tablecell_ut.cpp318
-rw-r--r--ydb/core/scheme/scheme_tabledefs.cpp4
-rw-r--r--ydb/core/scheme/scheme_tabledefs.h28
-rw-r--r--ydb/core/scheme/ut/ya.make30
-rw-r--r--ydb/core/scheme_types/scheme_raw_type_value.h8
-rw-r--r--ydb/core/scheme_types/scheme_type_registry.h2
-rw-r--r--ydb/core/sys_view/common/schema.cpp72
-rw-r--r--ydb/core/sys_view/common/schema.h62
-rw-r--r--ydb/core/tablet/bootstrapper.h2
-rw-r--r--ydb/core/tablet/tablet_counters_aggregator.h10
-rw-r--r--ydb/core/tablet/tablet_counters_protobuf.h4
-rw-r--r--ydb/core/tablet/tablet_exception.h12
-rw-r--r--ydb/core/tablet/tablet_metrics_ut.cpp2
-rw-r--r--ydb/core/tablet/tablet_monitoring_proxy.cpp8
-rw-r--r--ydb/core/tablet/tablet_pipe_client_cache.cpp18
-rw-r--r--ydb/core/tablet/tablet_pipe_client_cache.h2
-rw-r--r--ydb/core/tablet/tablet_pipe_server.cpp24
-rw-r--r--ydb/core/tablet/tablet_pipe_ut.cpp138
-rw-r--r--ydb/core/tablet/tablet_responsiveness_pinger.h4
-rw-r--r--ydb/core/tablet/tablet_sys.h4
-rw-r--r--ydb/core/tablet_flat/flat_comp_shard.cpp6
-rw-r--r--ydb/core/tablet_flat/flat_cxx_database.h306
-rw-r--r--ydb/core/tablet_flat/flat_cxx_database_ut.cpp68
-rw-r--r--ydb/core/tablet_flat/flat_database.cpp126
-rw-r--r--ydb/core/tablet_flat/flat_database.h24
-rw-r--r--ydb/core/tablet_flat/flat_dbase_naked.h2
-rw-r--r--ydb/core/tablet_flat/flat_dbase_scheme.cpp14
-rw-r--r--ydb/core/tablet_flat/flat_dbase_scheme.h24
-rw-r--r--ydb/core/tablet_flat/flat_executor.cpp128
-rw-r--r--ydb/core/tablet_flat/flat_executor.h16
-rw-r--r--ydb/core/tablet_flat/flat_executor_borrowlogic.cpp12
-rw-r--r--ydb/core/tablet_flat/flat_executor_compaction_logic.cpp66
-rw-r--r--ydb/core/tablet_flat/flat_executor_compaction_logic.h50
-rw-r--r--ydb/core/tablet_flat/flat_executor_counters.h8
-rw-r--r--ydb/core/tablet_flat/flat_executor_database_ut.cpp324
-rw-r--r--ydb/core/tablet_flat/flat_executor_db_mon.cpp194
-rw-r--r--ydb/core/tablet_flat/flat_executor_ut.cpp2
-rw-r--r--ydb/core/tablet_flat/flat_iterator.h140
-rw-r--r--ydb/core/tablet_flat/flat_mem_warm.cpp40
-rw-r--r--ydb/core/tablet_flat/flat_ops_compact.h2
-rw-r--r--ydb/core/tablet_flat/flat_part_store.h4
-rw-r--r--ydb/core/tablet_flat/flat_row_eggs.h8
-rw-r--r--ydb/core/tablet_flat/flat_sausagecache.cpp2
-rw-r--r--ydb/core/tablet_flat/flat_sausagecache.h2
-rw-r--r--ydb/core/tablet_flat/flat_scan_actor.h22
-rw-r--r--ydb/core/tablet_flat/flat_scan_iface.h8
-rw-r--r--ydb/core/tablet_flat/flat_stat_part.h224
-rw-r--r--ydb/core/tablet_flat/flat_stat_table.cpp104
-rw-r--r--ydb/core/tablet_flat/flat_stat_table.h350
-rw-r--r--ydb/core/tablet_flat/flat_table.cpp34
-rw-r--r--ydb/core/tablet_flat/flat_table.h42
-rw-r--r--ydb/core/tablet_flat/flat_table_part_ut.cpp398
-rw-r--r--ydb/core/tablet_flat/flat_table_subset.h6
-rw-r--r--ydb/core/tablet_flat/flat_update_op.h12
-rw-r--r--ydb/core/tablet_flat/shared_sausagecache.cpp8
-rw-r--r--ydb/core/tablet_flat/shared_sausagecache.h2
-rw-r--r--ydb/core/tablet_flat/tablet_flat_executed.cpp22
-rw-r--r--ydb/core/tablet_flat/tablet_flat_executed.h4
-rw-r--r--ydb/core/tablet_flat/test/libs/exec/runner.h2
-rw-r--r--ydb/core/tablet_flat/test/libs/rows/misc.h4
-rw-r--r--ydb/core/tablet_flat/test/libs/table/test_part.h6
-rw-r--r--ydb/core/tablet_flat/ut/flat_database_ut_common.h4
-rw-r--r--ydb/core/tablet_flat/ut/flat_test_db.cpp402
-rw-r--r--ydb/core/tablet_flat/ut/flat_test_db.h444
-rw-r--r--ydb/core/tablet_flat/ut/flat_test_db_helpers.h328
-rw-r--r--ydb/core/tablet_flat/ut/ut_db_scheme.cpp16
-rw-r--r--ydb/core/tablet_flat/ut/ya.make28
-rw-r--r--ydb/core/tablet_flat/ya.make12
-rw-r--r--ydb/core/testlib/actors/test_runtime.cpp6
-rw-r--r--ydb/core/testlib/actors/test_runtime_ut.cpp2
-rw-r--r--ydb/core/testlib/basics/appdata.h2
-rw-r--r--ydb/core/testlib/basics/services.cpp18
-rw-r--r--ydb/core/testlib/fake_coordinator.h56
-rw-r--r--ydb/core/testlib/fake_scheme_shard.h4
-rw-r--r--ydb/core/testlib/tablet_helpers.cpp474
-rw-r--r--ydb/core/testlib/tablet_helpers.h66
-rw-r--r--ydb/core/testlib/tenant_runtime.cpp8
-rw-r--r--ydb/core/testlib/test_client.cpp480
-rw-r--r--ydb/core/testlib/test_client.h124
-rw-r--r--ydb/core/tx/columnshard/blob.cpp238
-rw-r--r--ydb/core/tx/columnshard/blob.h536
-rw-r--r--ydb/core/tx/columnshard/blob_cache.cpp1180
-rw-r--r--ydb/core/tx/columnshard/blob_cache.h202
-rw-r--r--ydb/core/tx/columnshard/blob_manager.cpp986
-rw-r--r--ydb/core/tx/columnshard/blob_manager.h422
-rw-r--r--ydb/core/tx/columnshard/blob_manager_db.cpp214
-rw-r--r--ydb/core/tx/columnshard/blob_manager_db.h98
-rw-r--r--ydb/core/tx/columnshard/blob_manager_txs.cpp154
-rw-r--r--ydb/core/tx/columnshard/columnshard.cpp112
-rw-r--r--ydb/core/tx/columnshard/columnshard.h50
-rw-r--r--ydb/core/tx/columnshard/columnshard__index_scan.h254
-rw-r--r--ydb/core/tx/columnshard/columnshard__init.cpp98
-rw-r--r--ydb/core/tx/columnshard/columnshard__progress_tx.cpp20
-rw-r--r--ydb/core/tx/columnshard/columnshard__propose_transaction.cpp12
-rw-r--r--ydb/core/tx/columnshard/columnshard__read.cpp102
-rw-r--r--ydb/core/tx/columnshard/columnshard__read_blob_ranges.cpp204
-rw-r--r--ydb/core/tx/columnshard/columnshard__scan.cpp1138
-rw-r--r--ydb/core/tx/columnshard/columnshard__scan.h34
-rw-r--r--ydb/core/tx/columnshard/columnshard__stats_scan.h380
-rw-r--r--ydb/core/tx/columnshard/columnshard__write.cpp26
-rw-r--r--ydb/core/tx/columnshard/columnshard__write_index.cpp70
-rw-r--r--ydb/core/tx/columnshard/columnshard_common.cpp22
-rw-r--r--ydb/core/tx/columnshard/columnshard_common.h22
-rw-r--r--ydb/core/tx/columnshard/columnshard_impl.cpp154
-rw-r--r--ydb/core/tx/columnshard/columnshard_impl.h118
-rw-r--r--ydb/core/tx/columnshard/columnshard_schema.h116
-rw-r--r--ydb/core/tx/columnshard/columnshard_txs.h144
-rw-r--r--ydb/core/tx/columnshard/columnshard_ut_common.cpp16
-rw-r--r--ydb/core/tx/columnshard/compaction_actor.cpp54
-rw-r--r--ydb/core/tx/columnshard/defs.h42
-rw-r--r--ydb/core/tx/columnshard/engines/column_engine.h24
-rw-r--r--ydb/core/tx/columnshard/engines/column_engine_logs.cpp54
-rw-r--r--ydb/core/tx/columnshard/engines/column_engine_logs.h6
-rw-r--r--ydb/core/tx/columnshard/engines/columns_table.h10
-rw-r--r--ydb/core/tx/columnshard/engines/db_wrapper.cpp4
-rw-r--r--ydb/core/tx/columnshard/engines/db_wrapper.h10
-rw-r--r--ydb/core/tx/columnshard/engines/defs.h48
-rw-r--r--ydb/core/tx/columnshard/engines/index_info.cpp6
-rw-r--r--ydb/core/tx/columnshard/engines/index_info.h90
-rw-r--r--ydb/core/tx/columnshard/engines/indexed_read_data.cpp218
-rw-r--r--ydb/core/tx/columnshard/engines/indexed_read_data.h222
-rw-r--r--ydb/core/tx/columnshard/engines/insert_table.cpp32
-rw-r--r--ydb/core/tx/columnshard/engines/insert_table.h28
-rw-r--r--ydb/core/tx/columnshard/engines/portion_info.cpp16
-rw-r--r--ydb/core/tx/columnshard/engines/portion_info.h8
-rw-r--r--ydb/core/tx/columnshard/engines/predicate.h2
-rw-r--r--ydb/core/tx/columnshard/engines/ut_insert_table.cpp40
-rw-r--r--ydb/core/tx/columnshard/engines/ut_logs_engine.cpp60
-rw-r--r--ydb/core/tx/columnshard/indexing_actor.cpp30
-rw-r--r--ydb/core/tx/columnshard/inflight_request_tracker.h206
-rw-r--r--ydb/core/tx/columnshard/read_actor.cpp80
-rw-r--r--ydb/core/tx/columnshard/ut_columnshard_read_write.cpp646
-rw-r--r--ydb/core/tx/columnshard/ut_columnshard_schema.cpp8
-rw-r--r--ydb/core/tx/columnshard/write_actor.cpp244
-rw-r--r--ydb/core/tx/columnshard/ya.make14
-rw-r--r--ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp22
-rw-r--r--ydb/core/tx/coordinator/coordinator__restart_mediator.cpp10
-rw-r--r--ydb/core/tx/coordinator/coordinator__restore_transaction.cpp14
-rw-r--r--ydb/core/tx/coordinator/coordinator_impl.cpp14
-rw-r--r--ydb/core/tx/coordinator/coordinator_impl.h22
-rw-r--r--ydb/core/tx/datashard/check_data_tx_unit.cpp6
-rw-r--r--ydb/core/tx/datashard/complete_data_tx_unit.cpp4
-rw-r--r--ydb/core/tx/datashard/const.h8
-rw-r--r--ydb/core/tx/datashard/datashard.cpp516
-rw-r--r--ydb/core/tx/datashard/datashard.h508
-rw-r--r--ydb/core/tx/datashard/datashard__engine_host.cpp34
-rw-r--r--ydb/core/tx/datashard/datashard__engine_host.h2
-rw-r--r--ydb/core/tx/datashard/datashard__init.cpp330
-rw-r--r--ydb/core/tx/datashard/datashard__kqp_scan.cpp2
-rw-r--r--ydb/core/tx/datashard/datashard__monitoring.cpp24
-rw-r--r--ydb/core/tx/datashard/datashard__op_rows.cpp64
-rw-r--r--ydb/core/tx/datashard/datashard__plan_step.cpp54
-rw-r--r--ydb/core/tx/datashard/datashard__progress_tx.cpp2
-rw-r--r--ydb/core/tx/datashard/datashard__propose_tx_base.cpp20
-rw-r--r--ydb/core/tx/datashard/datashard__read_columns.cpp898
-rw-r--r--ydb/core/tx/datashard/datashard__s3.cpp392
-rw-r--r--ydb/core/tx/datashard/datashard__schema_changed.cpp12
-rw-r--r--ydb/core/tx/datashard/datashard__stats.cpp572
-rw-r--r--ydb/core/tx/datashard/datashard_active_transaction.cpp70
-rw-r--r--ydb/core/tx/datashard/datashard_active_transaction.h26
-rw-r--r--ydb/core/tx/datashard/datashard_common_upload.cpp60
-rw-r--r--ydb/core/tx/datashard/datashard_failpoints.cpp14
-rw-r--r--ydb/core/tx/datashard/datashard_failpoints.h138
-rw-r--r--ydb/core/tx/datashard/datashard_impl.h926
-rw-r--r--ydb/core/tx/datashard/datashard_loans.cpp358
-rw-r--r--ydb/core/tx/datashard/datashard_outreadset.cpp120
-rw-r--r--ydb/core/tx/datashard/datashard_outreadset.h10
-rw-r--r--ydb/core/tx/datashard/datashard_pipeline.cpp14
-rw-r--r--ydb/core/tx/datashard/datashard_pipeline.h2
-rw-r--r--ydb/core/tx/datashard/datashard_split_dst.cpp248
-rw-r--r--ydb/core/tx/datashard/datashard_split_src.cpp730
-rw-r--r--ydb/core/tx/datashard/datashard_trans_queue.cpp36
-rw-r--r--ydb/core/tx/datashard/datashard_trans_queue.h12
-rw-r--r--ydb/core/tx/datashard/datashard_txs.h22
-rw-r--r--ydb/core/tx/datashard/datashard_user_table.cpp12
-rw-r--r--ydb/core/tx/datashard/datashard_user_table.h62
-rw-r--r--ydb/core/tx/datashard/datashard_ut_common.cpp8
-rw-r--r--ydb/core/tx/datashard/datashard_ut_locks.cpp2
-rw-r--r--ydb/core/tx/datashard/datashard_ut_minikql.cpp62
-rw-r--r--ydb/core/tx/datashard/finish_propose_unit.cpp4
-rw-r--r--ydb/core/tx/datashard/ya.make16
-rw-r--r--ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp2
-rw-r--r--ydb/core/tx/long_tx_service/commit_impl.cpp2
-rw-r--r--ydb/core/tx/mediator/execute_queue.cpp4
-rw-r--r--ydb/core/tx/mediator/tablet_queue.cpp2
-rw-r--r--ydb/core/tx/message_seqno.h90
-rw-r--r--ydb/core/tx/scheme_board/cache.cpp104
-rw-r--r--ydb/core/tx/scheme_cache/scheme_cache.h12
-rw-r--r--ydb/core/tx/schemeshard/schemeshard.h324
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp32
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp30
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__init.cpp368
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__init_root.cpp38
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__monitoring.cpp172
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__notify.cpp52
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_alter_olap_table.cpp46
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp48
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_create_olap_store.cpp14
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_create_olap_table.cpp12
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp10
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_part.h2
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp30
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp36
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__table_stats.cpp144
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp538
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_impl.cpp446
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_impl.h158
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_info_types.cpp268
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_info_types.h234
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_path_describer.cpp38
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_utils.h6
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp138
-rw-r--r--ydb/core/tx/schemeshard/ut_base.cpp1926
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/helpers.cpp160
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/helpers.h20
-rw-r--r--ydb/core/tx/schemeshard/ut_olap.cpp34
-rw-r--r--ydb/core/tx/schemeshard/ut_olap_reboots.cpp2
-rw-r--r--ydb/core/tx/schemeshard/ut_reboots.cpp6
-rw-r--r--ydb/core/tx/schemeshard/ut_split_merge.cpp16
-rw-r--r--ydb/core/tx/schemeshard/ya.make4
-rw-r--r--ydb/core/tx/tx_proxy/datareq.cpp412
-rw-r--r--ydb/core/tx/tx_proxy/describe.cpp54
-rw-r--r--ydb/core/tx/tx_proxy/mon.cpp8
-rw-r--r--ydb/core/tx/tx_proxy/mon.h10
-rw-r--r--ydb/core/tx/tx_proxy/proxy.h80
-rw-r--r--ydb/core/tx/tx_proxy/proxy_impl.cpp8
-rw-r--r--ydb/core/tx/tx_proxy/schemereq.cpp20
-rw-r--r--ydb/core/tx/tx_proxy/snapshotreq.cpp148
-rw-r--r--ydb/core/tx/tx_proxy/upload_rows.cpp10
-rw-r--r--ydb/core/tx/tx_proxy/upload_rows_common_impl.h1160
-rw-r--r--ydb/core/tx/ya.make2
-rw-r--r--ydb/core/util/cache_cache.h38
-rw-r--r--ydb/core/util/pb.h10
-rw-r--r--ydb/core/viewer/browse_db.h20
-rw-r--r--ydb/core/viewer/content/viewer.js188
-rw-r--r--ydb/core/viewer/json_counters.h2
-rw-r--r--ydb/core/viewer/protos/viewer.proto24
-rw-r--r--ydb/core/ydb_convert/table_description.cpp132
-rw-r--r--ydb/core/ydb_convert/table_description.h2
-rw-r--r--ydb/core/ydb_convert/table_settings.cpp102
-rw-r--r--ydb/core/ydb_convert/table_settings.h92
-rw-r--r--ydb/core/ymq/http/http.cpp14
-rw-r--r--ydb/core/ymq/http/http.h4
-rw-r--r--ydb/core/yql_testlib/yql_testlib.cpp4
-rw-r--r--ydb/library/yql/core/type_ann/type_ann_core.cpp56
-rw-r--r--ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h2
-rw-r--r--ydb/library/yql/minikql/aligned_page_pool.cpp168
-rw-r--r--ydb/library/yql/minikql/aligned_page_pool.h84
-rw-r--r--ydb/library/yql/minikql/mkql_alloc.cpp2
-rw-r--r--ydb/library/yql/minikql/mkql_alloc.h2
-rw-r--r--ydb/library/yql/minikql/mkql_mem_info.h2
-rw-r--r--ydb/library/yql/minikql/mkql_node.cpp50
-rw-r--r--ydb/library/yql/minikql/mkql_node.h186
-rw-r--r--ydb/library/yql/minikql/mkql_node_builder.cpp12
-rw-r--r--ydb/library/yql/minikql/mkql_node_builder.h4
-rw-r--r--ydb/library/yql/minikql/mkql_node_cast_ut.cpp6
-rw-r--r--ydb/library/yql/minikql/mkql_node_serialization.cpp4
-rw-r--r--ydb/library/yql/minikql/mkql_node_ut.cpp4
-rw-r--r--ydb/library/yql/minikql/mkql_node_visitor.cpp2
-rw-r--r--ydb/library/yql/minikql/mkql_node_visitor.h2
-rw-r--r--ydb/library/yql/minikql/mkql_opt_literal.cpp4
-rw-r--r--ydb/library/yql/minikql/mkql_type_builder.cpp4
-rw-r--r--ydb/library/yql/minikql/mkql_type_builder.h2
-rw-r--r--ydb/library/yql/providers/common/gateway/yql_provider_gateway.cpp4
-rw-r--r--ydb/library/yql/providers/common/mkql/yql_provider_mkql.cpp4
-rw-r--r--ydb/library/yql/providers/common/mkql/yql_type_mkql.cpp2
-rw-r--r--ydb/library/yql/providers/common/schema/mkql/yql_mkql_schema.cpp2
-rw-r--r--ydb/library/yql/udfs/common/digest/digest_udf.cpp22
-rw-r--r--ydb/library/yql/utils/log/log_level.h2
-rw-r--r--ydb/public/api/grpc/draft/ya.make8
-rw-r--r--ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.proto24
-rw-r--r--ydb/public/api/grpc/draft/ydb_experimental_v1.proto16
-rw-r--r--ydb/public/api/grpc/draft/ydb_logstore_v1.proto32
-rw-r--r--ydb/public/api/grpc/draft/ydb_s3_internal_v1.proto18
-rw-r--r--ydb/public/api/grpc/ydb_table_v1.proto10
-rw-r--r--ydb/public/api/protos/draft/ydb_logstore.proto288
-rw-r--r--ydb/public/api/protos/ya.make10
-rw-r--r--ydb/public/api/protos/ydb_clickhouse_internal.proto156
-rw-r--r--ydb/public/api/protos/ydb_experimental.proto16
-rw-r--r--ydb/public/api/protos/ydb_query_stats.proto58
-rw-r--r--ydb/public/api/protos/ydb_s3_internal.proto44
-rw-r--r--ydb/public/api/protos/ydb_table.proto40
-rw-r--r--ydb/public/lib/base/defs.h2
-rw-r--r--ydb/public/lib/base/msgbus.h20
-rw-r--r--ydb/public/lib/deprecated/client/grpc_client.cpp2
-rw-r--r--ydb/public/lib/deprecated/client/grpc_client.h12
-rw-r--r--ydb/public/lib/deprecated/client/msgbus_client.cpp4
-rw-r--r--ydb/public/lib/deprecated/kicli/cpp_ut.cpp432
-rw-r--r--ydb/public/lib/deprecated/kicli/error.cpp2
-rw-r--r--ydb/public/lib/deprecated/kicli/kicli.h48
-rw-r--r--ydb/public/lib/deprecated/kicli/kikimr.cpp14
-rw-r--r--ydb/public/lib/deprecated/kicli/schema.cpp38
-rw-r--r--ydb/public/lib/experimental/ya.make30
-rw-r--r--ydb/public/lib/experimental/ydb_clickhouse_internal.cpp762
-rw-r--r--ydb/public/lib/experimental/ydb_clickhouse_internal.h298
-rw-r--r--ydb/public/lib/experimental/ydb_experimental.cpp16
-rw-r--r--ydb/public/lib/experimental/ydb_experimental.h16
-rw-r--r--ydb/public/lib/experimental/ydb_logstore.cpp662
-rw-r--r--ydb/public/lib/experimental/ydb_logstore.h418
-rw-r--r--ydb/public/lib/experimental/ydb_s3_internal.cpp204
-rw-r--r--ydb/public/lib/experimental/ydb_s3_internal.h86
-rw-r--r--ydb/public/lib/idx_test/idx_test_checker.cpp16
-rw-r--r--ydb/public/lib/value/value.cpp12
-rw-r--r--ydb/public/lib/ya.make2
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_service_import.cpp18
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_service_import.h2
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp8
-rw-r--r--ydb/public/lib/ydb_cli/import/import.cpp66
-rw-r--r--ydb/public/lib/ydb_cli/import/import.h4
-rw-r--r--ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h4
-rw-r--r--ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp16
-rw-r--r--ydb/public/sdk/cpp/client/ydb_proto/accessor.h2
-rw-r--r--ydb/public/sdk/cpp/client/ydb_scheme/scheme.cpp10
-rw-r--r--ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.cpp50
-rw-r--r--ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.h60
-rw-r--r--ydb/public/sdk/cpp/client/ydb_table/table.cpp568
-rw-r--r--ydb/public/sdk/cpp/client/ydb_table/table.h194
-rw-r--r--ydb/public/sdk/cpp/client/ydb_value/value.cpp12
-rw-r--r--ydb/public/sdk/cpp/client/ydb_value/value.h2
-rw-r--r--ydb/public/sdk/cpp/client/ydb_value/value_ut.cpp10
-rw-r--r--ydb/services/cms/grpc_service.cpp6
-rw-r--r--ydb/services/discovery/grpc_service.cpp4
-rw-r--r--ydb/services/kesus/grpc_service.cpp6
-rw-r--r--ydb/services/ydb/ut/json_udf.cpp8
-rw-r--r--ydb/services/ydb/ut/re2_udf.cpp12
-rw-r--r--ydb/services/ydb/ut/udfs.h8
-rw-r--r--ydb/services/ydb/ut/ya.make16
-rw-r--r--ydb/services/ydb/ya.make8
-rw-r--r--ydb/services/ydb/ydb_bulk_upsert_ut.cpp2290
-rw-r--r--ydb/services/ydb/ydb_clickhouse_internal.cpp110
-rw-r--r--ydb/services/ydb/ydb_clickhouse_internal.h58
-rw-r--r--ydb/services/ydb/ydb_common_ut.h38
-rw-r--r--ydb/services/ydb/ydb_dummy.cpp6
-rw-r--r--ydb/services/ydb/ydb_experimental.cpp84
-rw-r--r--ydb/services/ydb/ydb_experimental.h50
-rw-r--r--ydb/services/ydb/ydb_index_table_ut.cpp468
-rw-r--r--ydb/services/ydb/ydb_logstore.cpp92
-rw-r--r--ydb/services/ydb/ydb_logstore.h62
-rw-r--r--ydb/services/ydb/ydb_logstore_ut.cpp892
-rw-r--r--ydb/services/ydb/ydb_long_tx_ut.cpp16
-rw-r--r--ydb/services/ydb/ydb_olapstore_ut.cpp1186
-rw-r--r--ydb/services/ydb/ydb_operation.cpp6
-rw-r--r--ydb/services/ydb/ydb_s3_internal.cpp90
-rw-r--r--ydb/services/ydb/ydb_s3_internal.h46
-rw-r--r--ydb/services/ydb/ydb_s3_internal_ut.cpp564
-rw-r--r--ydb/services/ydb/ydb_table.cpp18
-rw-r--r--ydb/services/ydb/ydb_table_split_ut.cpp732
-rw-r--r--ydb/services/ydb/ydb_table_ut.cpp476
-rw-r--r--ydb/services/ydb/ydb_ut.cpp14
-rw-r--r--ydb/tests/library/common/types.py6
-rw-r--r--ydb/tests/library/harness/util.py4
731 files changed, 42784 insertions, 42784 deletions
diff --git a/contrib/libs/grpc/src/cpp/common/channel_arguments.cc b/contrib/libs/grpc/src/cpp/common/channel_arguments.cc
index 998bd5699b0..5a5dd91b5ec 100644
--- a/contrib/libs/grpc/src/cpp/common/channel_arguments.cc
+++ b/contrib/libs/grpc/src/cpp/common/channel_arguments.cc
@@ -65,15 +65,15 @@ ChannelArguments::ChannelArguments(const ChannelArguments& other)
}
}
-ChannelArguments::~ChannelArguments() {
+ChannelArguments::~ChannelArguments() {
grpc_core::ExecCtx exec_ctx;
for (auto& arg : args_) {
if (arg.type == GRPC_ARG_POINTER) {
arg.value.pointer.vtable->destroy(arg.value.pointer.p);
- }
- }
-}
-
+ }
+ }
+}
+
void ChannelArguments::Swap(ChannelArguments& other) {
args_.swap(other.args_);
strings_.swap(other.strings_);
@@ -190,7 +190,7 @@ void ChannelArguments::SetPointerWithVtable(
arg.type = GRPC_ARG_POINTER;
strings_.push_back(key);
arg.key = const_cast<char*>(strings_.back().c_str());
- arg.value.pointer.p = vtable->copy(value);
+ arg.value.pointer.p = vtable->copy(value);
arg.value.pointer.vtable = vtable;
args_.push_back(arg);
}
diff --git a/contrib/libs/pire/Makefile.am b/contrib/libs/pire/Makefile.am
index 31eb7b3e7cb..a9e8908fb66 100644
--- a/contrib/libs/pire/Makefile.am
+++ b/contrib/libs/pire/Makefile.am
@@ -1,2 +1,2 @@
-ACLOCAL_AMFLAGS = -I m4
-SUBDIRS = pire tests pkg samples
+ACLOCAL_AMFLAGS = -I m4
+SUBDIRS = pire tests pkg samples
diff --git a/contrib/libs/pire/README b/contrib/libs/pire/README
index 13ed2d6c7a0..1791486f8e8 100644
--- a/contrib/libs/pire/README
+++ b/contrib/libs/pire/README
@@ -1,6 +1,6 @@
-This is PIRE, Perl Incompatible Regular Expressions library.
-
-For detailed information about what it is, how to build and use it,
-see http://wiki.yandex-team.ru/DmitrijjProkopcev/pire .
-
-Please report bugs to dprokoptsev@yandex-team.ru or davenger@yandex-team.ru.
+This is PIRE, Perl Incompatible Regular Expressions library.
+
+For detailed information about what it is, how to build and use it,
+see http://wiki.yandex-team.ru/DmitrijjProkopcev/pire .
+
+Please report bugs to dprokoptsev@yandex-team.ru or davenger@yandex-team.ru.
diff --git a/contrib/libs/pire/configure.ac b/contrib/libs/pire/configure.ac
index 2068c63a7ec..49f235129cd 100644
--- a/contrib/libs/pire/configure.ac
+++ b/contrib/libs/pire/configure.ac
@@ -1,47 +1,47 @@
-AC_PREREQ([2.63])
-AC_INIT([pire], [0.0.2], [dprokoptsev@yandex-team.ru])
-AM_INIT_AUTOMAKE([foreign -Wall])
-AC_CONFIG_SRCDIR([pire/classes.cpp])
-AC_CONFIG_HEADERS([config.h])
-AC_CONFIG_MACRO_DIR([m4])
-
-AC_LANG_CPLUSPLUS
-
-# Require neccessary binaries to build ourselves
-AC_PROG_CXX
-AC_PROG_CC
-AC_PROG_LEX
-AC_PROG_YACC
-AC_PROG_LIBTOOL
-
-# Check for cppunit
-AM_PATH_CPPUNIT([0.0.0],[with_unittests=yes],[
- AC_WARN([cppunit not found. Unit tests will not compile and run.])
- with_unittests=no
-])
-AM_CONDITIONAL([WITH_UNITTESTS], [test x"$with_unittests" = xyes])
-
-# Just for conscience' sake
-AC_CHECK_HEADERS([stdlib.h string.h sys/time.h])
-AC_HEADER_STDBOOL
-AC_C_INLINE
-AC_TYPE_SIZE_T
-AC_CHECK_TYPES([ptrdiff_t])
-AC_FUNC_ERROR_AT_LINE
-AC_FUNC_MALLOC
-AC_CHECK_FUNCS([memset strchr])
-
-# Require little-endian platform
-AC_C_BIGENDIAN
-if test x"$ac_cv_c_bigendian" = xyes; then
- AC_ERROR([pire has not been ported to big-endian platforms yet.])
-fi
-
-# Optional features
-AC_ARG_ENABLE([extra], AS_HELP_STRING([--enable-extra], [Add extra functionality (capturing scanner, etc...)]))
-AC_ARG_ENABLE([debug], AS_HELP_STRING([--enable-debug], [Make Pire dump all constructed FSMs to std::clog (useless unless debugging Pire)]))
-AM_CONDITIONAL([ENABLE_EXTRA], [test x"$enable_extra" = xyes])
-AM_CONDITIONAL([ENABLE_DEBUG], [test x"$enable_debug" = xyes])
-
-AC_CONFIG_FILES([Makefile pire/Makefile tests/Makefile pkg/Makefile samples/Makefile samples/bench/Makefile])
-AC_OUTPUT
+AC_PREREQ([2.63])
+AC_INIT([pire], [0.0.2], [dprokoptsev@yandex-team.ru])
+AM_INIT_AUTOMAKE([foreign -Wall])
+AC_CONFIG_SRCDIR([pire/classes.cpp])
+AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_MACRO_DIR([m4])
+
+AC_LANG_CPLUSPLUS
+
+# Require neccessary binaries to build ourselves
+AC_PROG_CXX
+AC_PROG_CC
+AC_PROG_LEX
+AC_PROG_YACC
+AC_PROG_LIBTOOL
+
+# Check for cppunit
+AM_PATH_CPPUNIT([0.0.0],[with_unittests=yes],[
+ AC_WARN([cppunit not found. Unit tests will not compile and run.])
+ with_unittests=no
+])
+AM_CONDITIONAL([WITH_UNITTESTS], [test x"$with_unittests" = xyes])
+
+# Just for conscience' sake
+AC_CHECK_HEADERS([stdlib.h string.h sys/time.h])
+AC_HEADER_STDBOOL
+AC_C_INLINE
+AC_TYPE_SIZE_T
+AC_CHECK_TYPES([ptrdiff_t])
+AC_FUNC_ERROR_AT_LINE
+AC_FUNC_MALLOC
+AC_CHECK_FUNCS([memset strchr])
+
+# Require little-endian platform
+AC_C_BIGENDIAN
+if test x"$ac_cv_c_bigendian" = xyes; then
+ AC_ERROR([pire has not been ported to big-endian platforms yet.])
+fi
+
+# Optional features
+AC_ARG_ENABLE([extra], AS_HELP_STRING([--enable-extra], [Add extra functionality (capturing scanner, etc...)]))
+AC_ARG_ENABLE([debug], AS_HELP_STRING([--enable-debug], [Make Pire dump all constructed FSMs to std::clog (useless unless debugging Pire)]))
+AM_CONDITIONAL([ENABLE_EXTRA], [test x"$enable_extra" = xyes])
+AM_CONDITIONAL([ENABLE_DEBUG], [test x"$enable_debug" = xyes])
+
+AC_CONFIG_FILES([Makefile pire/Makefile tests/Makefile pkg/Makefile samples/Makefile samples/bench/Makefile])
+AC_OUTPUT
diff --git a/contrib/libs/pire/pire/Makefile.am b/contrib/libs/pire/pire/Makefile.am
index f2d09a2fb7f..09ef2117047 100644
--- a/contrib/libs/pire/pire/Makefile.am
+++ b/contrib/libs/pire/pire/Makefile.am
@@ -1,121 +1,121 @@
-
-AM_CXXFLAGS = -Wall
-if ENABLE_DEBUG
-AM_CXXFLAGS += -DPIRE_DEBUG
-endif
-if ENABLE_CHECKED
-AM_CXXFLAGS += -DPIRE_CHECKED
-endif
-
-lib_LTLIBRARIES = libpire.la
-libpire_la_SOURCES = \
- align.h \
- any.h \
- classes.cpp \
- defs.h \
- determine.h \
- encoding.cpp \
- encoding.h \
- extra.h \
- fsm.cpp \
- fsm.h \
- fwd.h \
- glue.cpp \
- glue.h \
+
+AM_CXXFLAGS = -Wall
+if ENABLE_DEBUG
+AM_CXXFLAGS += -DPIRE_DEBUG
+endif
+if ENABLE_CHECKED
+AM_CXXFLAGS += -DPIRE_CHECKED
+endif
+
+lib_LTLIBRARIES = libpire.la
+libpire_la_SOURCES = \
+ align.h \
+ any.h \
+ classes.cpp \
+ defs.h \
+ determine.h \
+ encoding.cpp \
+ encoding.h \
+ extra.h \
+ fsm.cpp \
+ fsm.h \
+ fwd.h \
+ glue.cpp \
+ glue.h \
minimize.h \
half_final_fsm.cpp \
half_final_fsm.h \
- partition.h \
- pire.h \
- re_lexer.cpp \
- re_lexer.h \
- run.h \
- scanner_io.cpp \
- vbitset.h \
- re_parser.ypp \
+ partition.h \
+ pire.h \
+ re_lexer.cpp \
+ re_lexer.h \
+ run.h \
+ scanner_io.cpp \
+ vbitset.h \
+ re_parser.ypp \
scanners/half_final.h \
- scanners/loaded.h \
- scanners/multi.h \
- scanners/slow.h \
- scanners/simple.h \
- scanners/common.h \
- scanners/pair.h \
- stub/stl.h \
- stub/lexical_cast.h \
- stub/saveload.h \
- stub/singleton.h \
- stub/utf8.cpp \
- stub/utf8.h \
- stub/noncopyable.h \
- stub/codepage_h.h \
- stub/doccodes_h.h \
- stub/unidata_h.h \
- stub/unidata_cpp.h
-
-if ENABLE_EXTRA
-libpire_la_SOURCES += \
- extra/capture.cpp \
- extra/capture.h \
- extra/count.cpp \
- extra/count.h \
- extra/glyphs.cpp \
- extra/glyphs.h
-endif
-
-pire_hdrdir = $(includedir)/pire
-pire_hdr_HEADERS = \
- align.h \
- any.h \
- defs.h \
- determine.h \
- encoding.h \
- extra.h \
- fsm.h \
- fwd.h \
- glue.h \
+ scanners/loaded.h \
+ scanners/multi.h \
+ scanners/slow.h \
+ scanners/simple.h \
+ scanners/common.h \
+ scanners/pair.h \
+ stub/stl.h \
+ stub/lexical_cast.h \
+ stub/saveload.h \
+ stub/singleton.h \
+ stub/utf8.cpp \
+ stub/utf8.h \
+ stub/noncopyable.h \
+ stub/codepage_h.h \
+ stub/doccodes_h.h \
+ stub/unidata_h.h \
+ stub/unidata_cpp.h
+
+if ENABLE_EXTRA
+libpire_la_SOURCES += \
+ extra/capture.cpp \
+ extra/capture.h \
+ extra/count.cpp \
+ extra/count.h \
+ extra/glyphs.cpp \
+ extra/glyphs.h
+endif
+
+pire_hdrdir = $(includedir)/pire
+pire_hdr_HEADERS = \
+ align.h \
+ any.h \
+ defs.h \
+ determine.h \
+ encoding.h \
+ extra.h \
+ fsm.h \
+ fwd.h \
+ glue.h \
minimize.h \
half_final_fsm.h \
- partition.h \
- pire.h \
- re_lexer.h \
- re_parser.h \
- run.h \
- static_assert.h \
- vbitset.h
-
-if ENABLE_EXTRA
-pire_extradir = $(includedir)/pire/extra
-pire_extra_HEADERS = \
- extra/capture.h \
- extra/count.h \
- extra/glyphs.h
-endif
-
-pire_scannersdir = $(includedir)/pire/scanners
-pire_scanners_HEADERS = \
- scanners/common.h \
+ partition.h \
+ pire.h \
+ re_lexer.h \
+ re_parser.h \
+ run.h \
+ static_assert.h \
+ vbitset.h
+
+if ENABLE_EXTRA
+pire_extradir = $(includedir)/pire/extra
+pire_extra_HEADERS = \
+ extra/capture.h \
+ extra/count.h \
+ extra/glyphs.h
+endif
+
+pire_scannersdir = $(includedir)/pire/scanners
+pire_scanners_HEADERS = \
+ scanners/common.h \
scanners/half_final.h \
- scanners/multi.h \
- scanners/slow.h \
- scanners/simple.h \
- scanners/loaded.h \
- scanners/pair.h
-
-pire_stubdir = $(includedir)/pire/stub
-pire_stub_HEADERS = \
- stub/stl.h \
- stub/defaults.h \
- stub/singleton.h \
- stub/saveload.h \
- stub/lexical_cast.h
-
-bin_PROGRAMS = pire_inline
-
-pire_inline_SOURCES = inline.lpp stub/hacks.h stub/memstreams.h
-pire_inline_LDADD = libpire.la
-
-BUILT_SOURCES = re_parser.h re_parser.cpp
-CLEANFILES = re_parser.h re_parser.cpp
-
-AM_YFLAGS = -d
-
+ scanners/multi.h \
+ scanners/slow.h \
+ scanners/simple.h \
+ scanners/loaded.h \
+ scanners/pair.h
+
+pire_stubdir = $(includedir)/pire/stub
+pire_stub_HEADERS = \
+ stub/stl.h \
+ stub/defaults.h \
+ stub/singleton.h \
+ stub/saveload.h \
+ stub/lexical_cast.h
+
+bin_PROGRAMS = pire_inline
+
+pire_inline_SOURCES = inline.lpp stub/hacks.h stub/memstreams.h
+pire_inline_LDADD = libpire.la
+
+BUILT_SOURCES = re_parser.h re_parser.cpp
+CLEANFILES = re_parser.h re_parser.cpp
+
+AM_YFLAGS = -d
+
diff --git a/contrib/libs/pire/pire/align.h b/contrib/libs/pire/pire/align.h
index c1941b7120f..fea084b598b 100644
--- a/contrib/libs/pire/pire/align.h
+++ b/contrib/libs/pire/pire/align.h
@@ -1,103 +1,103 @@
-/*
- * align.h -- functions for positioning streams and memory pointers
- * to word boundaries
+/*
+ * align.h -- functions for positioning streams and memory pointers
+ * to word boundaries
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_ALIGN_H
-#define PIRE_ALIGN_H
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_ALIGN_H
+#define PIRE_ALIGN_H
+
#include <contrib/libs/pire/pire/stub/stl.h>
#include <contrib/libs/pire/pire/stub/saveload.h>
-#include "platform.h"
-
-namespace Pire {
-
- namespace Impl {
-
- template<class T>
- inline T AlignUp(T t, size_t bound)
- {
- return (T) (((size_t) t + (bound-1)) & ~(bound-1));
- }
-
- template<class T>
- inline T AlignDown(T t, size_t bound)
- {
- return (T) ((size_t) t & ~(bound-1));
- }
-
- inline void AlignSave(yostream* s, size_t size)
- {
- size_t tail = AlignUp(size, sizeof(size_t)) - size;
- if (tail) {
- static const char buf[sizeof(MaxSizeWord)] = {0};
- SavePodArray(s, buf, tail);
- }
- }
-
- inline void AlignLoad(yistream* s, size_t size)
- {
- size_t tail = AlignUp(size, sizeof(size_t)) - size;
- if (tail) {
- char buf[sizeof(MaxSizeWord)];
- LoadPodArray(s, buf, tail);
- }
- }
-
- template<class T>
- inline void AlignedSaveArray(yostream* s, const T* array, size_t count)
- {
- SavePodArray(s, array, count);
- AlignSave(s, sizeof(*array) * count);
- }
-
- template<class T>
- inline void AlignedLoadArray(yistream* s, T* array, size_t count)
- {
- LoadPodArray(s, array, count);
- AlignLoad(s, sizeof(*array) * count);
- }
-
- template<class T>
- inline bool IsAligned(T t, size_t bound)
- {
- return ((size_t) t & (bound-1)) == 0;
- }
-
- inline const void* AlignPtr(const size_t*& p, size_t& size)
- {
- if (!IsAligned(p, sizeof(size_t))) {
- const size_t* next = AlignUp(p, sizeof(size_t));
- if (next > p+size)
- throw Error("EOF reached in NPire::Impl::align");
- size -= (next - p);
- p = next;
- }
- return (const void*) p;
- }
-
- }
-
-}
-
-#endif
+#include "platform.h"
+
+namespace Pire {
+
+ namespace Impl {
+
+ template<class T>
+ inline T AlignUp(T t, size_t bound)
+ {
+ return (T) (((size_t) t + (bound-1)) & ~(bound-1));
+ }
+
+ template<class T>
+ inline T AlignDown(T t, size_t bound)
+ {
+ return (T) ((size_t) t & ~(bound-1));
+ }
+
+ inline void AlignSave(yostream* s, size_t size)
+ {
+ size_t tail = AlignUp(size, sizeof(size_t)) - size;
+ if (tail) {
+ static const char buf[sizeof(MaxSizeWord)] = {0};
+ SavePodArray(s, buf, tail);
+ }
+ }
+
+ inline void AlignLoad(yistream* s, size_t size)
+ {
+ size_t tail = AlignUp(size, sizeof(size_t)) - size;
+ if (tail) {
+ char buf[sizeof(MaxSizeWord)];
+ LoadPodArray(s, buf, tail);
+ }
+ }
+
+ template<class T>
+ inline void AlignedSaveArray(yostream* s, const T* array, size_t count)
+ {
+ SavePodArray(s, array, count);
+ AlignSave(s, sizeof(*array) * count);
+ }
+
+ template<class T>
+ inline void AlignedLoadArray(yistream* s, T* array, size_t count)
+ {
+ LoadPodArray(s, array, count);
+ AlignLoad(s, sizeof(*array) * count);
+ }
+
+ template<class T>
+ inline bool IsAligned(T t, size_t bound)
+ {
+ return ((size_t) t & (bound-1)) == 0;
+ }
+
+ inline const void* AlignPtr(const size_t*& p, size_t& size)
+ {
+ if (!IsAligned(p, sizeof(size_t))) {
+ const size_t* next = AlignUp(p, sizeof(size_t));
+ if (next > p+size)
+ throw Error("EOF reached in NPire::Impl::align");
+ size -= (next - p);
+ p = next;
+ }
+ return (const void*) p;
+ }
+
+ }
+
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/any.h b/contrib/libs/pire/pire/any.h
index f1c8ab9ab99..4646d25781b 100644
--- a/contrib/libs/pire/pire/any.h
+++ b/contrib/libs/pire/pire/any.h
@@ -1,131 +1,131 @@
-/*
- * any.h -- a wrapper capable of holding a value of arbitrary type.
+/*
+ * any.h -- a wrapper capable of holding a value of arbitrary type.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_ANY_H
-#define PIRE_ANY_H
-
-
-#include <typeinfo>
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_ANY_H
+#define PIRE_ANY_H
+
+
+#include <typeinfo>
+
#include <contrib/libs/pire/pire/stub/stl.h>
-
-namespace Pire {
-
-class Any {
-
-public:
+
+namespace Pire {
+
+class Any {
+
+public:
Any() = default;
-
- Any(const Any& any)
- {
- if (any.h)
- h = any.h->Duplicate();
- }
-
- Any& operator= (Any any)
- {
- any.Swap(*this);
- return *this;
- }
-
- template <class T>
- Any(const T& t)
- : h(new Holder<T>(t))
- {
- }
-
- bool Empty() const {
- return !h;
- }
- template <class T>
- bool IsA() const {
- return h && h->IsA(typeid(T));
- }
-
- template <class T>
- T& As()
- {
- if (h && IsA<T>())
- return *reinterpret_cast<T*>(h->Ptr());
- else
- throw Pire::Error("type mismatch");
- }
-
- template <class T>
- const T& As() const
- {
- if (h && IsA<T>())
- return *reinterpret_cast<const T*>(h->Ptr());
- else
- throw Pire::Error("type mismatch");
- }
-
+
+ Any(const Any& any)
+ {
+ if (any.h)
+ h = any.h->Duplicate();
+ }
+
+ Any& operator= (Any any)
+ {
+ any.Swap(*this);
+ return *this;
+ }
+
+ template <class T>
+ Any(const T& t)
+ : h(new Holder<T>(t))
+ {
+ }
+
+ bool Empty() const {
+ return !h;
+ }
+ template <class T>
+ bool IsA() const {
+ return h && h->IsA(typeid(T));
+ }
+
+ template <class T>
+ T& As()
+ {
+ if (h && IsA<T>())
+ return *reinterpret_cast<T*>(h->Ptr());
+ else
+ throw Pire::Error("type mismatch");
+ }
+
+ template <class T>
+ const T& As() const
+ {
+ if (h && IsA<T>())
+ return *reinterpret_cast<const T*>(h->Ptr());
+ else
+ throw Pire::Error("type mismatch");
+ }
+
void Swap(Any& a) noexcept {
- DoSwap(h, a.h);
- }
-
-private:
-
- struct AbstractHolder {
- virtual ~AbstractHolder() {
- }
+ DoSwap(h, a.h);
+ }
+
+private:
+
+ struct AbstractHolder {
+ virtual ~AbstractHolder() {
+ }
virtual THolder<AbstractHolder> Duplicate() const = 0;
- virtual bool IsA(const std::type_info& id) const = 0;
- virtual void* Ptr() = 0;
- virtual const void* Ptr() const = 0;
- };
-
- template <class T>
- struct Holder: public AbstractHolder {
- Holder(T t)
- : d(t)
- {
- }
+ virtual bool IsA(const std::type_info& id) const = 0;
+ virtual void* Ptr() = 0;
+ virtual const void* Ptr() const = 0;
+ };
+
+ template <class T>
+ struct Holder: public AbstractHolder {
+ Holder(T t)
+ : d(t)
+ {
+ }
THolder<AbstractHolder> Duplicate() const {
return THolder<AbstractHolder>(new Holder<T>(d));
- }
- bool IsA(const std::type_info& id) const {
- return id == typeid(T);
- }
- void* Ptr() {
- return &d;
- }
- const void* Ptr() const {
- return &d;
- }
- private:
- T d;
- };
-
+ }
+ bool IsA(const std::type_info& id) const {
+ return id == typeid(T);
+ }
+ void* Ptr() {
+ return &d;
+ }
+ const void* Ptr() const {
+ return &d;
+ }
+ private:
+ T d;
+ };
+
THolder<AbstractHolder> h;
-};
-
-}
-
-namespace std {
- inline void swap(Pire::Any& a, Pire::Any& b) {
- a.Swap(b);
- }
-}
-
-#endif
+};
+
+}
+
+namespace std {
+ inline void swap(Pire::Any& a, Pire::Any& b) {
+ a.Swap(b);
+ }
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/classes.cpp b/contrib/libs/pire/pire/classes.cpp
index d928d76866d..bbf021737df 100644
--- a/contrib/libs/pire/pire/classes.cpp
+++ b/contrib/libs/pire/pire/classes.cpp
@@ -1,152 +1,152 @@
-/*
- * classes.cpp -- implementation for Pire::CharClasses feature.
+/*
+ * classes.cpp -- implementation for Pire::CharClasses feature.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
#include <contrib/libs/pire/pire/stub/stl.h>
#include <contrib/libs/pire/pire/stub/singleton.h>
#include <contrib/libs/pire/pire/stub/noncopyable.h>
#include <contrib/libs/pire/pire/stub/utf8.h>
-#include "re_lexer.h"
-
-namespace Pire {
-
-namespace {
-
- class CharClassesTable: private NonCopyable {
- private:
- class CharClass {
- public:
- CharClass() {}
- explicit CharClass(wchar32 ch) { m_bounds.push_back(ymake_pair(ch, ch)); }
- CharClass(wchar32 lower, wchar32 upper) { m_bounds.push_back(ymake_pair(lower, upper)); }
-
- CharClass& operator |= (const CharClass& cc)
- {
- std::copy(cc.m_bounds.begin(), cc.m_bounds.end(), std::back_inserter(m_bounds));
- return *this;
- }
-
- CharClass operator | (const CharClass& cc) const
- {
- CharClass r(*this);
- r |= cc;
- return r;
- }
-
+#include "re_lexer.h"
+
+namespace Pire {
+
+namespace {
+
+ class CharClassesTable: private NonCopyable {
+ private:
+ class CharClass {
+ public:
+ CharClass() {}
+ explicit CharClass(wchar32 ch) { m_bounds.push_back(ymake_pair(ch, ch)); }
+ CharClass(wchar32 lower, wchar32 upper) { m_bounds.push_back(ymake_pair(lower, upper)); }
+
+ CharClass& operator |= (const CharClass& cc)
+ {
+ std::copy(cc.m_bounds.begin(), cc.m_bounds.end(), std::back_inserter(m_bounds));
+ return *this;
+ }
+
+ CharClass operator | (const CharClass& cc) const
+ {
+ CharClass r(*this);
+ r |= cc;
+ return r;
+ }
+
TSet<wchar32> ToSet() const
- {
+ {
TSet<wchar32> ret;
for (auto&& bound : m_bounds)
for (wchar32 c = bound.first; c <= bound.second; ++c)
- ret.insert(c);
- return ret;
- }
-
- private:
+ ret.insert(c);
+ return ret;
+ }
+
+ private:
TVector<ypair<wchar32, wchar32> > m_bounds;
- };
-
- public:
- bool Has(wchar32 wc) const
- {
- return (m_classes.find(to_lower(wc & ~ControlMask)) != m_classes.end());
- }
-
+ };
+
+ public:
+ bool Has(wchar32 wc) const
+ {
+ return (m_classes.find(to_lower(wc & ~ControlMask)) != m_classes.end());
+ }
+
TSet<wchar32> Get(wchar32 wc) const
- {
+ {
auto it = m_classes.find(to_lower(wc & ~ControlMask));
- if (it == m_classes.end())
- throw Error("Unknown character class");
- return it->second.ToSet();
- }
-
- CharClassesTable()
- {
- m_classes['l'] = CharClass('A', 'Z') | CharClass('a', 'z');
- m_classes['c']
- = CharClass(0x0410, 0x044F) // Russian capital A to Russan capital YA, Russian small A to Russian small YA
- | CharClass(0x0401) // Russian capital Yo
- | CharClass(0x0451) // Russian small Yo
- ;
-
- m_classes['w'] = m_classes['l'] | m_classes['c'];
- m_classes['d'] = CharClass('0', '9');
- m_classes['s']
- = CharClass(' ') | CharClass('\t') | CharClass('\r') | CharClass('\n')
- | CharClass(0x00A0) // Non-breaking space
- ;
-
- // A few special classes which do not have any negation
- m_classes['n'] = CharClass('\n');
- m_classes['r'] = CharClass('\r');
- m_classes['t'] = CharClass('\t');
- }
-
+ if (it == m_classes.end())
+ throw Error("Unknown character class");
+ return it->second.ToSet();
+ }
+
+ CharClassesTable()
+ {
+ m_classes['l'] = CharClass('A', 'Z') | CharClass('a', 'z');
+ m_classes['c']
+ = CharClass(0x0410, 0x044F) // Russian capital A to Russan capital YA, Russian small A to Russian small YA
+ | CharClass(0x0401) // Russian capital Yo
+ | CharClass(0x0451) // Russian small Yo
+ ;
+
+ m_classes['w'] = m_classes['l'] | m_classes['c'];
+ m_classes['d'] = CharClass('0', '9');
+ m_classes['s']
+ = CharClass(' ') | CharClass('\t') | CharClass('\r') | CharClass('\n')
+ | CharClass(0x00A0) // Non-breaking space
+ ;
+
+ // A few special classes which do not have any negation
+ m_classes['n'] = CharClass('\n');
+ m_classes['r'] = CharClass('\r');
+ m_classes['t'] = CharClass('\t');
+ }
+
TMap<wchar32, CharClass> m_classes;
- };
-
- class CharClassesImpl: public Feature {
- public:
- CharClassesImpl(): m_table(Singleton<CharClassesTable>()) {}
- int Priority() const { return 10; }
-
- void Alter(Term& t)
- {
- if (t.Value().IsA<Term::CharacterRange>()) {
- const Term::CharacterRange& range = t.Value().As<Term::CharacterRange>();
- typedef Term::CharacterRange::first_type CharSet;
- const CharSet& old = range.first;
- CharSet altered;
- bool pos = false;
- bool neg = false;
+ };
+
+ class CharClassesImpl: public Feature {
+ public:
+ CharClassesImpl(): m_table(Singleton<CharClassesTable>()) {}
+ int Priority() const { return 10; }
+
+ void Alter(Term& t)
+ {
+ if (t.Value().IsA<Term::CharacterRange>()) {
+ const Term::CharacterRange& range = t.Value().As<Term::CharacterRange>();
+ typedef Term::CharacterRange::first_type CharSet;
+ const CharSet& old = range.first;
+ CharSet altered;
+ bool pos = false;
+ bool neg = false;
for (auto&& i : old)
if (i.size() == 1 && (i[0] & ControlMask) == Control && m_table->Has(i[0])) {
if (is_upper(i[0] & ~ControlMask))
- neg = true;
- else
- pos = true;
-
+ neg = true;
+ else
+ pos = true;
+
TSet<wchar32> klass = m_table->Get(i[0]);
for (auto&& j : klass)
altered.insert(Term::String(1, j));
- } else
+ } else
altered.insert(i);
-
- if (neg && (pos || range.second))
- Error("Positive and negative character ranges mixed");
- t = Term(t.Type(), Term::CharacterRange(altered, neg || range.second));
- }
- }
-
- private:
- CharClassesTable* m_table;
- };
-
-}
-
-namespace Features {
+
+ if (neg && (pos || range.second))
+ Error("Positive and negative character ranges mixed");
+ t = Term(t.Type(), Term::CharacterRange(altered, neg || range.second));
+ }
+ }
+
+ private:
+ CharClassesTable* m_table;
+ };
+
+}
+
+namespace Features {
Feature::Ptr CharClasses() { return Feature::Ptr(new CharClassesImpl); }
-}
-
-}
-
+}
+
+}
+
diff --git a/contrib/libs/pire/pire/defs.h b/contrib/libs/pire/pire/defs.h
index c1e7780ef98..19d785d7d78 100644
--- a/contrib/libs/pire/pire/defs.h
+++ b/contrib/libs/pire/pire/defs.h
@@ -1,112 +1,112 @@
-/*
- * defs.h -- common Pire definitions.
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_DEFS_H
-#define PIRE_DEFS_H
-
-#ifndef PIRE_NO_CONFIG
-#include <pire/config.h>
-#endif
-#include <stdlib.h>
-
-#if defined(_MSC_VER)
-#define PIRE_HAVE_DECLSPEC_ALIGN
-#else
-#define PIRE_HAVE_ALIGNAS
-#endif
-
-#define PIRE_HAVE_LAMBDAS
-
-namespace Pire {
-
-#ifdef PIRE_DEBUG
-# define PIRE_IFDEBUG(x) x
-#else
-# define PIRE_IFDEBUG(x)
-#endif
-
-#ifdef PIRE_CHECKED
-# define PIRE_IF_CHECKED(e) e
-#else
-# define PIRE_IF_CHECKED(e)
-#endif
-
-
- typedef unsigned short Char;
-
- namespace SpecialChar {
- enum {
- Epsilon = 257,
- BeginMark = 258,
- EndMark = 259,
-
- // Actual size of input alphabet
- MaxCharUnaligned = 260,
-
- // Size of letter transition tables, must be a multiple of the machine word size
- MaxChar = (MaxCharUnaligned + (sizeof(void*)-1)) & ~(sizeof(void*)-1)
- };
- }
-
- using namespace SpecialChar;
-
- namespace Impl {
-#ifndef PIRE_WORDS_BIGENDIAN
- inline size_t ToLittleEndian(size_t val) { return val; }
-#else
- template<unsigned N>
- inline size_t SwapBytes(size_t val)
- {
- static const size_t Mask = (1 << (N/2)) - 1;
- return ((SwapBytes<N/2>(val) & Mask) << (N/2)) | SwapBytes<N/2>(val >> (N/2));
- }
-
- template<>
- inline size_t SwapBytes<8>(size_t val) { return val & 0xFF; }
-
- inline size_t ToLittleEndian(size_t val) { return SwapBytes<sizeof(val)*8>(val); }
-#endif
-
- struct Struct { void* p; };
- }
-}
-
-#ifndef PIRE_ALIGNED_DECL
-# if defined(PIRE_HAVE_ALIGNAS)
-# define PIRE_ALIGNED_DECL(x) alignas(::Pire::Impl::Struct) static const char x[]
-# elif defined(PIRE_HAVE_ATTR_ALIGNED)
-# define PIRE_ALIGNED_DECL(x) static const char x[] __attribute__((aligned(sizeof(void*))))
-# elif defined(PIRE_HAVE_DECLSPEC_ALIGN)
-# define PIRE_ALIGNED_DECL(x) __declspec(align(8)) static const char x[]
-# endif
-#endif
-
-#ifndef PIRE_LITERAL
-# if defined(PIRE_HAVE_LAMBDAS)
-# define PIRE_LITERAL(data) ([]() -> const char* { PIRE_ALIGNED_DECL(__pire_regexp__) = data; return __pire_regexp__; })()
-# elif defined(PIRE_HAVE_SCOPED_EXPR)
-# define PIRE_LITERAL(data) ({ PIRE_ALIGNED_DECL(__pire_regexp__) = data; __pire_regexp__; })
-# endif
-#endif
-
-#endif
+/*
+ * defs.h -- common Pire definitions.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_DEFS_H
+#define PIRE_DEFS_H
+
+#ifndef PIRE_NO_CONFIG
+#include <pire/config.h>
+#endif
+#include <stdlib.h>
+
+#if defined(_MSC_VER)
+#define PIRE_HAVE_DECLSPEC_ALIGN
+#else
+#define PIRE_HAVE_ALIGNAS
+#endif
+
+#define PIRE_HAVE_LAMBDAS
+
+namespace Pire {
+
+#ifdef PIRE_DEBUG
+# define PIRE_IFDEBUG(x) x
+#else
+# define PIRE_IFDEBUG(x)
+#endif
+
+#ifdef PIRE_CHECKED
+# define PIRE_IF_CHECKED(e) e
+#else
+# define PIRE_IF_CHECKED(e)
+#endif
+
+
+ typedef unsigned short Char;
+
+ namespace SpecialChar {
+ enum {
+ Epsilon = 257,
+ BeginMark = 258,
+ EndMark = 259,
+
+ // Actual size of input alphabet
+ MaxCharUnaligned = 260,
+
+ // Size of letter transition tables, must be a multiple of the machine word size
+ MaxChar = (MaxCharUnaligned + (sizeof(void*)-1)) & ~(sizeof(void*)-1)
+ };
+ }
+
+ using namespace SpecialChar;
+
+ namespace Impl {
+#ifndef PIRE_WORDS_BIGENDIAN
+ inline size_t ToLittleEndian(size_t val) { return val; }
+#else
+ template<unsigned N>
+ inline size_t SwapBytes(size_t val)
+ {
+ static const size_t Mask = (1 << (N/2)) - 1;
+ return ((SwapBytes<N/2>(val) & Mask) << (N/2)) | SwapBytes<N/2>(val >> (N/2));
+ }
+
+ template<>
+ inline size_t SwapBytes<8>(size_t val) { return val & 0xFF; }
+
+ inline size_t ToLittleEndian(size_t val) { return SwapBytes<sizeof(val)*8>(val); }
+#endif
+
+ struct Struct { void* p; };
+ }
+}
+
+#ifndef PIRE_ALIGNED_DECL
+# if defined(PIRE_HAVE_ALIGNAS)
+# define PIRE_ALIGNED_DECL(x) alignas(::Pire::Impl::Struct) static const char x[]
+# elif defined(PIRE_HAVE_ATTR_ALIGNED)
+# define PIRE_ALIGNED_DECL(x) static const char x[] __attribute__((aligned(sizeof(void*))))
+# elif defined(PIRE_HAVE_DECLSPEC_ALIGN)
+# define PIRE_ALIGNED_DECL(x) __declspec(align(8)) static const char x[]
+# endif
+#endif
+
+#ifndef PIRE_LITERAL
+# if defined(PIRE_HAVE_LAMBDAS)
+# define PIRE_LITERAL(data) ([]() -> const char* { PIRE_ALIGNED_DECL(__pire_regexp__) = data; return __pire_regexp__; })()
+# elif defined(PIRE_HAVE_SCOPED_EXPR)
+# define PIRE_LITERAL(data) ({ PIRE_ALIGNED_DECL(__pire_regexp__) = data; __pire_regexp__; })
+# endif
+#endif
+
+#endif
diff --git a/contrib/libs/pire/pire/determine.h b/contrib/libs/pire/pire/determine.h
index ddadfa1c75a..fb48fdd0b35 100644
--- a/contrib/libs/pire/pire/determine.h
+++ b/contrib/libs/pire/pire/determine.h
@@ -1,145 +1,145 @@
-/*
- * determine.h -- the FSM determination routine.
+/*
+ * determine.h -- the FSM determination routine.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_DETERMINE_H
-#define PIRE_DETERMINE_H
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_DETERMINE_H
+#define PIRE_DETERMINE_H
+
#include <contrib/libs/pire/pire/stub/stl.h>
-#include "partition.h"
-
-namespace Pire {
- namespace Impl {
-
- /**
- * An interface of a determination task.
- * You don't have to derive from this class; it is just a start point template.
- */
- class DetermineTask {
- private:
- struct ImplementationSpecific1;
- struct ImplementationSpecific2;
-
- public:
- /// A type representing a new state (may be a set of old states, a pair of them, etc...)
- typedef ImplementationSpecific1 State;
-
- /// A type of letter equivalence classes table.
- typedef Partition<char, ImplementationSpecific2> LettersTbl;
-
- /// A container used for storing map of states to thier indices.
+#include "partition.h"
+
+namespace Pire {
+ namespace Impl {
+
+ /**
+ * An interface of a determination task.
+ * You don't have to derive from this class; it is just a start point template.
+ */
+ class DetermineTask {
+ private:
+ struct ImplementationSpecific1;
+ struct ImplementationSpecific2;
+
+ public:
+ /// A type representing a new state (may be a set of old states, a pair of them, etc...)
+ typedef ImplementationSpecific1 State;
+
+ /// A type of letter equivalence classes table.
+ typedef Partition<char, ImplementationSpecific2> LettersTbl;
+
+ /// A container used for storing map of states to thier indices.
typedef TMap<State, size_t> InvStates;
-
- /// Should return used letters' partition.
- const LettersTbl& Letters() const;
-
- /// Should return initial state (surprise!)
- State Initial() const;
-
- /// Should calculate next state, given the current state and a letter.
- State Next(State state, Char letter) const;
-
- /// Should return true iff the state need to be processed.
- bool IsRequired(const State& /*state*/) const { return true; }
-
- /// Called when the set of new states is closed.
+
+ /// Should return used letters' partition.
+ const LettersTbl& Letters() const;
+
+ /// Should return initial state (surprise!)
+ State Initial() const;
+
+ /// Should calculate next state, given the current state and a letter.
+ State Next(State state, Char letter) const;
+
+ /// Should return true iff the state need to be processed.
+ bool IsRequired(const State& /*state*/) const { return true; }
+
+ /// Called when the set of new states is closed.
void AcceptStates(const TVector<State>& newstates);
-
- /// Called for each transition from one new state to another.
- void Connect(size_t from, size_t to, Char letter);
-
- typedef bool Result;
- Result Success() { return true; }
- Result Failure() { return false; }
- };
-
- /**
- * A helper function for FSM determining and all determine-like algorithms
- * like scanners' agglutination.
- *
- * Given an indirectly specified automaton (through Task::Initial() and Task::Next()
- * functions, see above), performs a breadth-first traversal, finding and enumerating
- * all effectively reachable states. Then passes all found states and transitions
- * between them back to the task.
- *
- * Initial state is always placed at zero position.
- *
- * Please note that the function does not take care of any payload (including final flags);
- * it is the task's responsibility to agglutinate them properly.
- *
- * Returns task.Succeed() if everything was done; task.Failure() if maximum limit of state count was reached.
- */
- template<class Task>
- typename Task::Result Determine(Task& task, size_t maxSize)
- {
- typedef typename Task::State State;
- typedef typename Task::InvStates InvStates;
+
+ /// Called for each transition from one new state to another.
+ void Connect(size_t from, size_t to, Char letter);
+
+ typedef bool Result;
+ Result Success() { return true; }
+ Result Failure() { return false; }
+ };
+
+ /**
+ * A helper function for FSM determining and all determine-like algorithms
+ * like scanners' agglutination.
+ *
+ * Given an indirectly specified automaton (through Task::Initial() and Task::Next()
+ * functions, see above), performs a breadth-first traversal, finding and enumerating
+ * all effectively reachable states. Then passes all found states and transitions
+ * between them back to the task.
+ *
+ * Initial state is always placed at zero position.
+ *
+ * Please note that the function does not take care of any payload (including final flags);
+ * it is the task's responsibility to agglutinate them properly.
+ *
+ * Returns task.Succeed() if everything was done; task.Failure() if maximum limit of state count was reached.
+ */
+ template<class Task>
+ typename Task::Result Determine(Task& task, size_t maxSize)
+ {
+ typedef typename Task::State State;
+ typedef typename Task::InvStates InvStates;
typedef TDeque< TVector<size_t> > TransitionTable;
-
+
TVector<State> states;
- InvStates invstates;
- TransitionTable transitions;
+ InvStates invstates;
+ TransitionTable transitions;
TVector<size_t> stateIndices;
-
- states.push_back(task.Initial());
- invstates.insert(typename InvStates::value_type(states[0], 0));
-
- for (size_t stateIdx = 0; stateIdx < states.size(); ++stateIdx) {
- if (!task.IsRequired(states[stateIdx]))
- continue;
- TransitionTable::value_type row(task.Letters().Size());
+
+ states.push_back(task.Initial());
+ invstates.insert(typename InvStates::value_type(states[0], 0));
+
+ for (size_t stateIdx = 0; stateIdx < states.size(); ++stateIdx) {
+ if (!task.IsRequired(states[stateIdx]))
+ continue;
+ TransitionTable::value_type row(task.Letters().Size());
for (auto&& letter : task.Letters()) {
State newState = task.Next(states[stateIdx], letter.first);
auto i = invstates.find(newState);
- if (i == invstates.end()) {
- if (!maxSize--)
- return task.Failure();
- i = invstates.insert(typename InvStates::value_type(newState, states.size())).first;
- states.push_back(newState);
- }
+ if (i == invstates.end()) {
+ if (!maxSize--)
+ return task.Failure();
+ i = invstates.insert(typename InvStates::value_type(newState, states.size())).first;
+ states.push_back(newState);
+ }
row[letter.second.first] = i->second;
- }
- transitions.push_back(row);
- stateIndices.push_back(stateIdx);
- }
-
+ }
+ transitions.push_back(row);
+ stateIndices.push_back(stateIdx);
+ }
+
TVector<Char> invletters(task.Letters().Size());
for (auto&& letter : task.Letters())
invletters[letter.second.first] = letter.first;
-
- task.AcceptStates(states);
- size_t from = 0;
- for (TransitionTable::iterator i = transitions.begin(), ie = transitions.end(); i != ie; ++i, ++from) {
+
+ task.AcceptStates(states);
+ size_t from = 0;
+ for (TransitionTable::iterator i = transitions.begin(), ie = transitions.end(); i != ie; ++i, ++from) {
TVector<Char>::iterator l = invletters.begin();
- for (TransitionTable::value_type::iterator j = i->begin(), je = i->end(); j != je; ++j, ++l)
- task.Connect(stateIndices[from], *j, *l);
- }
- return task.Success();
- }
+ for (TransitionTable::value_type::iterator j = i->begin(), je = i->end(); j != je; ++j, ++l)
+ task.Connect(stateIndices[from], *j, *l);
+ }
+ return task.Success();
+ }
// Faster transition table representation for determined FSM
typedef TVector<size_t> DeterminedTransitions;
- }
-}
-
-#endif
+ }
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/easy.cpp b/contrib/libs/pire/pire/easy.cpp
index 61e4384fab7..bcb56c693bb 100644
--- a/contrib/libs/pire/pire/easy.cpp
+++ b/contrib/libs/pire/pire/easy.cpp
@@ -1,33 +1,33 @@
-/*
- * easy.cpp -- static variables for Pire Easy facilities.
+/*
+ * easy.cpp -- static variables for Pire Easy facilities.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-#include "easy.h"
-
-namespace Pire {
-
-const Option<const Encoding&> UTF8(&Pire::Encodings::Utf8);
-const Option<const Encoding&> LATIN1(&Pire::Encodings::Latin1);
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+#include "easy.h"
+
+namespace Pire {
+
+const Option<const Encoding&> UTF8(&Pire::Encodings::Utf8);
+const Option<const Encoding&> LATIN1(&Pire::Encodings::Latin1);
+
const Option<Feature::Ptr> I(&Pire::Features::CaseInsensitive);
const Option<Feature::Ptr> ANDNOT(&Pire::Features::AndNotSupport);
-
-}
+
+}
diff --git a/contrib/libs/pire/pire/easy.h b/contrib/libs/pire/pire/easy.h
index a784252c5f5..c70e965353b 100644
--- a/contrib/libs/pire/pire/easy.h
+++ b/contrib/libs/pire/pire/easy.h
@@ -1,249 +1,249 @@
-/*
- * easy.h -- Pire Easy facilities.
+/*
+ * easy.h -- Pire Easy facilities.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-/**
- * For those who never reads documentation, does not need any mysterious features
- * there is a fast and easy way to start using Pire.
- *
- * Just type:
- *
- * Pire::Regexp sc("pattern of (my regexp)*", Pire::UTF8 | Pire::I);
- * if (sc.Matches("pattern of my regexp"))
- * std::cout << "Hooray!" << std::endl;
- *
- * Or, to go more crazy:
- *
- * if ("pattern of my regexp" ==~ sc)
- * std::cout << "What a perversion..." << std::endl;
- *
- * Scanner's constructor takes a pattern and a "bitwise ORed" combination of "flags".
- * Available "flags" are:
- * I - case insensitivity;
- * ANDNOT - support for additional operations (& and ~) inside the pattern;
- * UTF8 - treat pattern input sequence as UTF-8 (surprise!)
- * LATIN1 - guess what?
- *
- * (In fact, those are not "flags" and not "bitwise ORed". See code for details.)
- */
-
-#ifndef PIRE_EASY_H_INCLUDED
-#define PIRE_EASY_H_INCLUDED
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+/**
+ * For those who never reads documentation, does not need any mysterious features
+ * there is a fast and easy way to start using Pire.
+ *
+ * Just type:
+ *
+ * Pire::Regexp sc("pattern of (my regexp)*", Pire::UTF8 | Pire::I);
+ * if (sc.Matches("pattern of my regexp"))
+ * std::cout << "Hooray!" << std::endl;
+ *
+ * Or, to go more crazy:
+ *
+ * if ("pattern of my regexp" ==~ sc)
+ * std::cout << "What a perversion..." << std::endl;
+ *
+ * Scanner's constructor takes a pattern and a "bitwise ORed" combination of "flags".
+ * Available "flags" are:
+ * I - case insensitivity;
+ * ANDNOT - support for additional operations (& and ~) inside the pattern;
+ * UTF8 - treat pattern input sequence as UTF-8 (surprise!)
+ * LATIN1 - guess what?
+ *
+ * (In fact, those are not "flags" and not "bitwise ORed". See code for details.)
+ */
+
+#ifndef PIRE_EASY_H_INCLUDED
+#define PIRE_EASY_H_INCLUDED
+
#include <iterator>
#include <contrib/libs/pire/pire/stub/stl.h>
-#include "pire.h"
-#include "vbitset.h"
-
-namespace Pire {
-
-template<class Arg> class Option;
-
-class Options {
-public:
- Options(): m_encoding(&Pire::Encodings::Latin1()) {}
- ~Options() { Clear(); }
-
- void Add(const Pire::Encoding& encoding) { m_encoding = &encoding; }
+#include "pire.h"
+#include "vbitset.h"
+
+namespace Pire {
+
+template<class Arg> class Option;
+
+class Options {
+public:
+ Options(): m_encoding(&Pire::Encodings::Latin1()) {}
+ ~Options() { Clear(); }
+
+ void Add(const Pire::Encoding& encoding) { m_encoding = &encoding; }
void Add(Feature::Ptr&& feature) { m_features.push_back(std::move(feature)); }
-
- struct Proxy {
- Options* const o;
- /*implicit*/ Proxy(Options* opts): o(opts) {}
- };
- operator Proxy() { return Proxy(this); }
-
- Options(Options& o): m_encoding(o.m_encoding) { m_features.swap(o.m_features); }
+
+ struct Proxy {
+ Options* const o;
+ /*implicit*/ Proxy(Options* opts): o(opts) {}
+ };
+ operator Proxy() { return Proxy(this); }
+
+ Options(Options& o): m_encoding(o.m_encoding) { m_features.swap(o.m_features); }
Options& operator = (Options& o) { m_encoding = o.m_encoding; m_features = std::move(o.m_features); o.Clear(); return *this; }
-
- Options(Proxy p): m_encoding(p.o->m_encoding) { m_features.swap(p.o->m_features); }
+
+ Options(Proxy p): m_encoding(p.o->m_encoding) { m_features.swap(p.o->m_features); }
Options& operator = (Proxy p) { m_encoding = p.o->m_encoding; m_features = std::move(p.o->m_features); p.o->Clear(); return *this; }
-
- void Apply(Lexer& lexer)
- {
- lexer.SetEncoding(*m_encoding);
+
+ void Apply(Lexer& lexer)
+ {
+ lexer.SetEncoding(*m_encoding);
for (auto&& i : m_features) {
lexer.AddFeature(i);
i = 0;
- }
- m_features.clear();
- }
-
- template<class ArgT>
- /*implicit*/ Options(const Option<ArgT>& option);
-
- const Pire::Encoding& Encoding() const { return *m_encoding; }
-
-private:
- const Pire::Encoding* m_encoding;
+ }
+ m_features.clear();
+ }
+
+ template<class ArgT>
+ /*implicit*/ Options(const Option<ArgT>& option);
+
+ const Pire::Encoding& Encoding() const { return *m_encoding; }
+
+private:
+ const Pire::Encoding* m_encoding;
TVector<Feature::Ptr> m_features;
-
- void Clear()
- {
- m_features.clear();
- }
-};
-
-template<class Arg>
-class Option {
-public:
- typedef Arg (*Ctor)();
-
- Option(Ctor ctor): m_ctor(ctor) {}
-
- friend Options operator | (Options::Proxy options, const Option<Arg>& self)
- {
- Options ret(options);
- ret.Add((*self.m_ctor)());
- return ret;
- }
-
- template<class Arg2>
- friend Options operator | (const Option<Arg2>& a, const Option<Arg>& b)
- {
- return Options() | a | b;
- }
-
-private:
- Ctor m_ctor;
-};
-
-
-extern const Option<const Encoding&> UTF8;
-extern const Option<const Encoding&> LATIN1;
-
+
+ void Clear()
+ {
+ m_features.clear();
+ }
+};
+
+template<class Arg>
+class Option {
+public:
+ typedef Arg (*Ctor)();
+
+ Option(Ctor ctor): m_ctor(ctor) {}
+
+ friend Options operator | (Options::Proxy options, const Option<Arg>& self)
+ {
+ Options ret(options);
+ ret.Add((*self.m_ctor)());
+ return ret;
+ }
+
+ template<class Arg2>
+ friend Options operator | (const Option<Arg2>& a, const Option<Arg>& b)
+ {
+ return Options() | a | b;
+ }
+
+private:
+ Ctor m_ctor;
+};
+
+
+extern const Option<const Encoding&> UTF8;
+extern const Option<const Encoding&> LATIN1;
+
extern const Option<Feature::Ptr> I;
extern const Option<Feature::Ptr> ANDNOT;
-
-
-class Regexp {
-public:
- template<class Pattern>
- explicit Regexp(Pattern pattern, Options options = Options())
- {
- Init(PatternBounds(pattern), options);
- }
-
- template<class Pattern, class Arg>
- Regexp(Pattern pattern, Option<Arg> option)
- {
- Init(PatternBounds(pattern), Options() | option);
- }
-
- explicit Regexp(Scanner sc): m_scanner(sc) {}
- explicit Regexp(SlowScanner ssc): m_slow(ssc) {}
-
+
+
+class Regexp {
+public:
+ template<class Pattern>
+ explicit Regexp(Pattern pattern, Options options = Options())
+ {
+ Init(PatternBounds(pattern), options);
+ }
+
+ template<class Pattern, class Arg>
+ Regexp(Pattern pattern, Option<Arg> option)
+ {
+ Init(PatternBounds(pattern), Options() | option);
+ }
+
+ explicit Regexp(Scanner sc): m_scanner(sc) {}
+ explicit Regexp(SlowScanner ssc): m_slow(ssc) {}
+
bool Matches(TStringBuf buf) const
- {
- if (!m_scanner.Empty())
+ {
+ if (!m_scanner.Empty())
return Runner(m_scanner).Begin().Run(buf).End();
- else
+ else
return Runner(m_slow).Begin().Run(buf).End();
- }
+ }
bool Matches(const char* begin, const char* end) const
- {
+ {
return Matches(TStringBuf(begin, end));
- }
-
- /// A helper class allowing '==~' operator for regexps
- class MatchProxy {
- public:
- MatchProxy(const Regexp& re): m_re(&re) {}
- friend bool operator == (const char* str, const MatchProxy& re) { return re.m_re->Matches(str); }
- friend bool operator == (const ystring& str, const MatchProxy& re) { return re.m_re->Matches(str); }
-
- private:
- const Regexp* m_re;
- };
- MatchProxy operator ~() const { return MatchProxy(*this); }
-
-private:
- Scanner m_scanner;
- SlowScanner m_slow;
-
- ypair<const char*, const char*> PatternBounds(const ystring& pattern)
- {
- static const char c = 0;
- return pattern.empty() ? ymake_pair(&c, &c) : ymake_pair(pattern.c_str(), pattern.c_str() + pattern.size());
- }
-
- ypair<const char*, const char*> PatternBounds(const char* pattern)
- {
- return ymake_pair(pattern, pattern + strlen(pattern));
- }
-
- void Init(ypair<const char*, const char*> rawPattern, Options options)
- {
+ }
+
+ /// A helper class allowing '==~' operator for regexps
+ class MatchProxy {
+ public:
+ MatchProxy(const Regexp& re): m_re(&re) {}
+ friend bool operator == (const char* str, const MatchProxy& re) { return re.m_re->Matches(str); }
+ friend bool operator == (const ystring& str, const MatchProxy& re) { return re.m_re->Matches(str); }
+
+ private:
+ const Regexp* m_re;
+ };
+ MatchProxy operator ~() const { return MatchProxy(*this); }
+
+private:
+ Scanner m_scanner;
+ SlowScanner m_slow;
+
+ ypair<const char*, const char*> PatternBounds(const ystring& pattern)
+ {
+ static const char c = 0;
+ return pattern.empty() ? ymake_pair(&c, &c) : ymake_pair(pattern.c_str(), pattern.c_str() + pattern.size());
+ }
+
+ ypair<const char*, const char*> PatternBounds(const char* pattern)
+ {
+ return ymake_pair(pattern, pattern + strlen(pattern));
+ }
+
+ void Init(ypair<const char*, const char*> rawPattern, Options options)
+ {
TVector<wchar32> pattern;
- options.Encoding().FromLocal(rawPattern.first, rawPattern.second, std::back_inserter(pattern));
-
- Lexer lexer(pattern);
- options.Apply(lexer);
- Fsm fsm = lexer.Parse();
-
- if (!BeginsWithCircumflex(fsm))
- fsm.PrependAnything();
- fsm.AppendAnything();
-
- if (fsm.Determine())
- m_scanner = fsm.Compile<Scanner>();
- else
- m_slow = fsm.Compile<SlowScanner>();
- }
-
- static bool BeginsWithCircumflex(const Fsm& fsm)
- {
- typedef Fsm::StatesSet Set;
+ options.Encoding().FromLocal(rawPattern.first, rawPattern.second, std::back_inserter(pattern));
+
+ Lexer lexer(pattern);
+ options.Apply(lexer);
+ Fsm fsm = lexer.Parse();
+
+ if (!BeginsWithCircumflex(fsm))
+ fsm.PrependAnything();
+ fsm.AppendAnything();
+
+ if (fsm.Determine())
+ m_scanner = fsm.Compile<Scanner>();
+ else
+ m_slow = fsm.Compile<SlowScanner>();
+ }
+
+ static bool BeginsWithCircumflex(const Fsm& fsm)
+ {
+ typedef Fsm::StatesSet Set;
TDeque<size_t> queue;
- BitSet handled(fsm.Size());
-
- queue.push_back(fsm.Initial());
- handled.Set(fsm.Initial());
-
- while (!queue.empty()) {
- Set s = fsm.Destinations(queue.front(), SpecialChar::Epsilon);
+ BitSet handled(fsm.Size());
+
+ queue.push_back(fsm.Initial());
+ handled.Set(fsm.Initial());
+
+ while (!queue.empty()) {
+ Set s = fsm.Destinations(queue.front(), SpecialChar::Epsilon);
for (auto&& i : s) {
if (!handled.Test(i)) {
handled.Set(i);
queue.push_back(i);
- }
- }
-
+ }
+ }
+
TSet<Char> lets = fsm.OutgoingLetters(queue.front());
- lets.erase(SpecialChar::Epsilon);
- lets.erase(SpecialChar::BeginMark);
- if (!lets.empty())
- return false;
-
- queue.pop_front();
- }
-
- return true;
- }
-};
-
-};
-
-#endif
+ lets.erase(SpecialChar::Epsilon);
+ lets.erase(SpecialChar::BeginMark);
+ if (!lets.empty())
+ return false;
+
+ queue.pop_front();
+ }
+
+ return true;
+ }
+};
+
+};
+
+#endif
diff --git a/contrib/libs/pire/pire/encoding.cpp b/contrib/libs/pire/pire/encoding.cpp
index 37ea1225bb8..842e2b534dd 100644
--- a/contrib/libs/pire/pire/encoding.cpp
+++ b/contrib/libs/pire/pire/encoding.cpp
@@ -1,134 +1,134 @@
-/*
- * encoding.cpp -- implementation of the encodings shipped with Pire.
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+/*
+ * encoding.cpp -- implementation of the encodings shipped with Pire.
*
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#include <stdexcept>
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#include <stdexcept>
#include <util/charset/utf8.h>
-#include <utility>
+#include <utility>
#include <contrib/libs/pire/pire/stub/defaults.h>
#include <contrib/libs/pire/pire/stub/utf8.h>
#include <contrib/libs/pire/pire/stub/singleton.h>
-#include "encoding.h"
-#include "fsm.h"
-
-
-namespace Pire {
-
-namespace {
-
- class Latin1: public Encoding {
- public:
+#include "encoding.h"
+#include "fsm.h"
+
+
+namespace Pire {
+
+namespace {
+
+ class Latin1: public Encoding {
+ public:
Latin1() : Encoding() {}
- wchar32 FromLocal(const char*& begin, const char* end) const
- {
- if (begin == end)
- throw Error("EOF reached in Pire::Latin1::fromLocal()");
- else if (static_cast<unsigned char>(*begin) >= 0x80)
- throw Error("Pire::Latin1::fromLocal(): wrong character encountered (>=0x80)");
- else
- return (wchar32) *begin++;
- }
-
- ystring ToLocal(wchar32 ch) const
- {
- if (ch < 0x80)
- return ystring(1, (char) ch);
- else
- return ystring();
- }
-
- void AppendDot(Fsm& fsm) const { fsm.AppendDot(); }
- };
-
- namespace UtfRanges {
-
- static const size_t MaxLen = 4;
+ wchar32 FromLocal(const char*& begin, const char* end) const
+ {
+ if (begin == end)
+ throw Error("EOF reached in Pire::Latin1::fromLocal()");
+ else if (static_cast<unsigned char>(*begin) >= 0x80)
+ throw Error("Pire::Latin1::fromLocal(): wrong character encountered (>=0x80)");
+ else
+ return (wchar32) *begin++;
+ }
+
+ ystring ToLocal(wchar32 ch) const
+ {
+ if (ch < 0x80)
+ return ystring(1, (char) ch);
+ else
+ return ystring();
+ }
+
+ void AppendDot(Fsm& fsm) const { fsm.AppendDot(); }
+ };
+
+ namespace UtfRanges {
+
+ static const size_t MaxLen = 4;
static const size_t First[MaxLen][2] = {
{0x00, 0x80},
{0xC0, 0xE0},
{0xE0, 0xF0},
{0xF0, 0xF8}
- };
+ };
static const size_t Next[2] = {0x80, 0xC0};
- }
-
+ }
- class Utf8: public Encoding {
- public:
+
+ class Utf8: public Encoding {
+ public:
Utf8() : Encoding() {}
- wchar32 FromLocal(const char*& begin, const char* end) const
- {
- wchar32 rune;
- size_t len;
+ wchar32 FromLocal(const char*& begin, const char* end) const
+ {
+ wchar32 rune;
+ size_t len;
if (SafeReadUTF8Char(rune, len, reinterpret_cast<const unsigned char*>(begin), reinterpret_cast<const unsigned char*>(end)) != RECODE_OK)
- throw Error("Error reading UTF8 sequence");
- begin += len;
- return rune;
- }
-
- ystring ToLocal(wchar32 c) const
- {
+ throw Error("Error reading UTF8 sequence");
+ begin += len;
+ return rune;
+ }
+
+ ystring ToLocal(wchar32 c) const
+ {
ystring ret(UTF8RuneLenByUCS(c), ' ');
- size_t len;
- unsigned char* p = (unsigned char*) &*ret.begin();
+ size_t len;
+ unsigned char* p = (unsigned char*) &*ret.begin();
if (SafeWriteUTF8Char(c, len, p, p + ret.size()) != RECODE_OK)
Y_ASSERT(!"Pire::UTF8::toLocal(): Internal error");
- return ret;
- }
-
- void AppendDot(Fsm& fsm) const
- {
- size_t last = fsm.Resize(fsm.Size() + UtfRanges::MaxLen);
- for (size_t i = 0; i < UtfRanges::MaxLen; ++i)
+ return ret;
+ }
+
+ void AppendDot(Fsm& fsm) const
+ {
+ size_t last = fsm.Resize(fsm.Size() + UtfRanges::MaxLen);
+ for (size_t i = 0; i < UtfRanges::MaxLen; ++i)
for (size_t letter = UtfRanges::First[i][0]; letter < UtfRanges::First[i][1]; ++letter)
- fsm.ConnectFinal(fsm.Size() - i - 1, letter);
- for (size_t i = 0; i < UtfRanges::MaxLen - 1; ++i)
+ fsm.ConnectFinal(fsm.Size() - i - 1, letter);
+ for (size_t i = 0; i < UtfRanges::MaxLen - 1; ++i)
for (size_t letter = UtfRanges::Next[0]; letter < UtfRanges::Next[1]; ++letter)
- fsm.Connect(last + i, last + i + 1, letter);
- fsm.ClearFinal();
- fsm.SetFinal(fsm.Size() - 1, true);
- fsm.SetIsDetermined(false);
- }
- };
-}
-
-namespace Encodings {
-
+ fsm.Connect(last + i, last + i + 1, letter);
+ fsm.ClearFinal();
+ fsm.SetFinal(fsm.Size() - 1, true);
+ fsm.SetIsDetermined(false);
+ }
+ };
+}
+
+namespace Encodings {
+
const Encoding& Utf8()
{
static const Pire::Utf8 utf8;
return utf8;
}
-
+
const Encoding& Latin1()
{
static const Pire::Latin1 latin1;
return latin1;
}
-}
-
-}
+}
+
+}
diff --git a/contrib/libs/pire/pire/encoding.h b/contrib/libs/pire/pire/encoding.h
index b4117afa45a..b2c8bb9b417 100644
--- a/contrib/libs/pire/pire/encoding.h
+++ b/contrib/libs/pire/pire/encoding.h
@@ -1,71 +1,71 @@
-/*
- * encoding.h -- the interface of Encoding.
+/*
+ * encoding.h -- the interface of Encoding.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_ENCODING_H
-#define PIRE_ENCODING_H
-
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_ENCODING_H
+#define PIRE_ENCODING_H
+
+
#include <contrib/libs/pire/pire/stub/defaults.h>
#include <contrib/libs/pire/pire/stub/stl.h>
-
-namespace Pire {
-
-class Fsm;
-
-class Encoding {
-public:
- virtual ~Encoding() {}
-
- /// Should read bytes from @p begin and return the corresponding Unicode
- /// character, advancing @p begin.
- virtual wchar32 FromLocal(const char*& begin, const char* end) const = 0;
-
- /// Opposite to FromLocal(), transforms given Unicode character into
- /// the string in the encoding.
- virtual ystring ToLocal(wchar32 c) const = 0;
-
- /// Given the FSM, should append the representation of a dot in the ecoding
- /// to that FSM.
- virtual void AppendDot(Fsm&) const = 0;
-
- template<class OutputIter>
- OutputIter FromLocal(const char* begin, const char* end, OutputIter iter) const
- {
- while (begin != end) {
- *iter = FromLocal(begin, end);
- ++iter;
- }
- return iter;
- }
-};
-
-namespace Encodings {
- const Encoding& Latin1();
- const Encoding& Utf8();
-
-};
-
-
-};
-
-#endif
+
+namespace Pire {
+
+class Fsm;
+
+class Encoding {
+public:
+ virtual ~Encoding() {}
+
+ /// Should read bytes from @p begin and return the corresponding Unicode
+ /// character, advancing @p begin.
+ virtual wchar32 FromLocal(const char*& begin, const char* end) const = 0;
+
+ /// Opposite to FromLocal(), transforms given Unicode character into
+ /// the string in the encoding.
+ virtual ystring ToLocal(wchar32 c) const = 0;
+
+ /// Given the FSM, should append the representation of a dot in the ecoding
+ /// to that FSM.
+ virtual void AppendDot(Fsm&) const = 0;
+
+ template<class OutputIter>
+ OutputIter FromLocal(const char* begin, const char* end, OutputIter iter) const
+ {
+ while (begin != end) {
+ *iter = FromLocal(begin, end);
+ ++iter;
+ }
+ return iter;
+ }
+};
+
+namespace Encodings {
+ const Encoding& Latin1();
+ const Encoding& Utf8();
+
+};
+
+
+};
+
+#endif
diff --git a/contrib/libs/pire/pire/extra.h b/contrib/libs/pire/pire/extra.h
index 373607838d6..2e4358acdde 100644
--- a/contrib/libs/pire/pire/extra.h
+++ b/contrib/libs/pire/pire/extra.h
@@ -1,33 +1,33 @@
-/*
- * extra.h -- a single include file, which enables additional features,
- * unnecessary for major part of users.
+/*
+ * extra.h -- a single include file, which enables additional features,
+ * unnecessary for major part of users.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_EXTRA_H
-#define PIRE_EXTRA_H
-
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_EXTRA_H
+#define PIRE_EXTRA_H
+
+
#include <contrib/libs/pire/pire/extra/capture.h>
#include <contrib/libs/pire/pire/extra/count.h>
#include <contrib/libs/pire/pire/extra/glyphs.h>
-
-#endif
+
+#endif
diff --git a/contrib/libs/pire/pire/extra/capture.cpp b/contrib/libs/pire/pire/extra/capture.cpp
index ea9e287f00a..fb4cdf6d815 100644
--- a/contrib/libs/pire/pire/extra/capture.cpp
+++ b/contrib/libs/pire/pire/extra/capture.cpp
@@ -1,48 +1,48 @@
-/*
- * capture.cpp -- a helper for compiling CapturingScanner
+/*
+ * capture.cpp -- a helper for compiling CapturingScanner
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#include <stdexcept>
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#include <stdexcept>
+
+#include "capture.h"
-#include "capture.h"
-
-namespace Pire {
-
-namespace {
- class CaptureImpl: public Feature {
- public:
- CaptureImpl(size_t pos)
- : State(0)
- , Pos(pos)
- , Level(0)
+namespace Pire {
+
+namespace {
+ class CaptureImpl: public Feature {
+ public:
+ CaptureImpl(size_t pos)
+ : State(0)
+ , Pos(pos)
+ , Level(0)
, StateRepetition(NoRepetition)
- {}
-
+ {}
+
bool Accepts(wchar32 c) const { return c == '(' || c == '+' || c == '*' || c == '?' || c == '{'; }
- Term Lex()
- {
+ Term Lex()
+ {
wchar32 c = GetChar();
if (!Accepts(c))
- Error("How did we get here?!..");
+ Error("How did we get here?!..");
if (c != '(') {
wchar32 next = PeekChar();
if (next == '?') {
@@ -53,13 +53,13 @@ namespace {
StateRepetition = GreedyRepetition;
}
else if (State == 0 && Pos > 1)
- --Pos;
- else if (State == 0 && Pos == 1) {
- State = 1;
- Level = 0;
- } else if (State == 1) {
- ++Level;
- }
+ --Pos;
+ else if (State == 0 && Pos == 1) {
+ State = 1;
+ Level = 0;
+ } else if (State == 1) {
+ ++Level;
+ }
if (c == '(')
return Term(TokenTypes::Open);
else if (c == '+')
@@ -72,24 +72,24 @@ namespace {
UngetChar(c);
return Term(0);
}
- }
-
- void Parenthesized(Fsm& fsm)
- {
+ }
+
+ void Parenthesized(Fsm& fsm)
+ {
if (StateRepetition != NoRepetition) {
bool greedy = (StateRepetition == GreedyRepetition);
SetRepetitionMark(fsm, greedy);
StateRepetition = NoRepetition;
} else if (State == 1 && Level == 0) {
- SetCaptureMark(fsm);
- State = 2;
- } else if (State == 1 && Level > 0)
- --Level;
- }
- private:
- unsigned State;
- size_t Pos;
- size_t Level;
+ SetCaptureMark(fsm);
+ State = 2;
+ } else if (State == 1 && Level > 0)
+ --Level;
+ }
+ private:
+ unsigned State;
+ size_t Pos;
+ size_t Level;
RepetitionTypes StateRepetition;
void SetRepetitionMark(Fsm& fsm, bool greedy)
@@ -108,28 +108,28 @@ namespace {
fsm.SetIsDetermined(false);
}
- void SetCaptureMark(Fsm& fsm)
- {
- fsm.Resize(fsm.Size() + 2);
- fsm.Connect(fsm.Size() - 2, fsm.Initial());
- fsm.ConnectFinal(fsm.Size() - 1);
-
- fsm.SetOutput(fsm.Size() - 2, fsm.Initial(), CapturingScanner::BeginCapture);
- for (size_t state = 0; state < fsm.Size() - 2; ++state)
- if (fsm.IsFinal(state))
- fsm.SetOutput(state, fsm.Size() - 1, CapturingScanner::EndCapture);
-
- fsm.SetInitial(fsm.Size() - 2);
- fsm.ClearFinal();
- fsm.SetFinal(fsm.Size() - 1, true);
- fsm.SetIsDetermined(false);
- }
-
- void FinishBuild() {}
- };
-}
-
-namespace Features {
+ void SetCaptureMark(Fsm& fsm)
+ {
+ fsm.Resize(fsm.Size() + 2);
+ fsm.Connect(fsm.Size() - 2, fsm.Initial());
+ fsm.ConnectFinal(fsm.Size() - 1);
+
+ fsm.SetOutput(fsm.Size() - 2, fsm.Initial(), CapturingScanner::BeginCapture);
+ for (size_t state = 0; state < fsm.Size() - 2; ++state)
+ if (fsm.IsFinal(state))
+ fsm.SetOutput(state, fsm.Size() - 1, CapturingScanner::EndCapture);
+
+ fsm.SetInitial(fsm.Size() - 2);
+ fsm.ClearFinal();
+ fsm.SetFinal(fsm.Size() - 1, true);
+ fsm.SetIsDetermined(false);
+ }
+
+ void FinishBuild() {}
+ };
+}
+
+namespace Features {
Feature::Ptr Capture(size_t pos) { return Feature::Ptr(new CaptureImpl(pos)); }
-};
-}
+};
+}
diff --git a/contrib/libs/pire/pire/extra/capture.h b/contrib/libs/pire/pire/extra/capture.h
index 1c7ada9b56b..8399914a67f 100644
--- a/contrib/libs/pire/pire/extra/capture.h
+++ b/contrib/libs/pire/pire/extra/capture.h
@@ -1,30 +1,30 @@
-/*
- * capture.h -- definition of CapturingScanner
+/*
+ * capture.h -- definition of CapturingScanner
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_EXTRA_CAPTURE_H
-#define PIRE_EXTRA_CAPTURE_H
-
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_EXTRA_CAPTURE_H
+#define PIRE_EXTRA_CAPTURE_H
+
+
#include <contrib/libs/pire/pire/approx_matching.h>
#include <contrib/libs/pire/pire/scanners/loaded.h>
#include <contrib/libs/pire/pire/scanners/multi.h>
@@ -32,77 +32,77 @@
#include <contrib/libs/pire/pire/fsm.h>
#include <contrib/libs/pire/pire/re_lexer.h>
#include <contrib/libs/pire/pire/run.h>
-
+
#include <array>
-namespace Pire {
-
-/**
-* A capturing scanner.
-* Requires source FSM to be deterministic, matches input string
-* against a single regexp (taking O(strlen(str)) time) and
-* captures a substring between a single pair of parentheses.
-*
-* Requires regexp pattern to satisfy certain conditions
-* (I still do not know exactly what they are :) )
-*/
-class CapturingScanner: public LoadedScanner {
-public:
- enum {
- NoAction = 0,
- BeginCapture = 1,
- EndCapture = 2,
-
- FinalFlag = 1
- };
-
- class State {
- public:
- bool Captured() const { return (m_begin != npos) && (m_end != npos); }
- size_t Begin() const { return m_begin; }
- size_t End() const { return m_end; }
- private:
- static const size_t npos = static_cast<size_t>(-1);
- size_t m_state;
- size_t m_begin;
- size_t m_end;
- size_t m_counter;
- friend class CapturingScanner;
-
-#ifdef PIRE_DEBUG
- friend yostream& operator << (yostream& s, const State& state)
- {
- s << state.m_state;
- if (state.m_begin != State::npos || state.m_end != npos) {
- s << " [";
- if (state.m_begin != State::npos)
- s << 'b';
- if (state.m_end != State::npos)
- s << 'e';
- s << "]";
- }
- return s;
- }
-#endif
- };
-
- void Initialize(State& state) const
- {
- state.m_state = m.initial;
- state.m_begin = state.m_end = State::npos;
- state.m_counter = 0;
- }
-
- void TakeAction(State& s, Action a) const
- {
+namespace Pire {
+
+/**
+* A capturing scanner.
+* Requires source FSM to be deterministic, matches input string
+* against a single regexp (taking O(strlen(str)) time) and
+* captures a substring between a single pair of parentheses.
+*
+* Requires regexp pattern to satisfy certain conditions
+* (I still do not know exactly what they are :) )
+*/
+class CapturingScanner: public LoadedScanner {
+public:
+ enum {
+ NoAction = 0,
+ BeginCapture = 1,
+ EndCapture = 2,
+
+ FinalFlag = 1
+ };
+
+ class State {
+ public:
+ bool Captured() const { return (m_begin != npos) && (m_end != npos); }
+ size_t Begin() const { return m_begin; }
+ size_t End() const { return m_end; }
+ private:
+ static const size_t npos = static_cast<size_t>(-1);
+ size_t m_state;
+ size_t m_begin;
+ size_t m_end;
+ size_t m_counter;
+ friend class CapturingScanner;
+
+#ifdef PIRE_DEBUG
+ friend yostream& operator << (yostream& s, const State& state)
+ {
+ s << state.m_state;
+ if (state.m_begin != State::npos || state.m_end != npos) {
+ s << " [";
+ if (state.m_begin != State::npos)
+ s << 'b';
+ if (state.m_end != State::npos)
+ s << 'e';
+ s << "]";
+ }
+ return s;
+ }
+#endif
+ };
+
+ void Initialize(State& state) const
+ {
+ state.m_state = m.initial;
+ state.m_begin = state.m_end = State::npos;
+ state.m_counter = 0;
+ }
+
+ void TakeAction(State& s, Action a) const
+ {
if ((a & BeginCapture) && !s.Captured())
s.m_begin = s.m_counter - 1;
else if (a & EndCapture) {
if (s.m_end == State::npos)
s.m_end = s.m_counter - 1;
}
- }
-
+ }
+
Char Translate(Char ch) const
{
return m_letters[static_cast<size_t>(ch)];
@@ -117,47 +117,47 @@ public:
return x.action;
}
- Action Next(State& s, Char c) const
- {
+ Action Next(State& s, Char c) const
+ {
return NextTranslated(s, Translate(c));
- }
-
- Action Next(const State& current, State& n, Char c) const
- {
- n = current;
- return Next(n, c);
- }
-
- bool CanStop(const State& s) const
- {
- return Final(s);
- }
-
- bool Final(const State& s) const { return m_tags[(reinterpret_cast<Transition*>(s.m_state) - m_jumps) / m.lettersCount] & FinalFlag; }
-
- bool Dead(const State&) const { return false; }
-
- CapturingScanner() {}
- CapturingScanner(const CapturingScanner& s): LoadedScanner(s) {}
+ }
+
+ Action Next(const State& current, State& n, Char c) const
+ {
+ n = current;
+ return Next(n, c);
+ }
+
+ bool CanStop(const State& s) const
+ {
+ return Final(s);
+ }
+
+ bool Final(const State& s) const { return m_tags[(reinterpret_cast<Transition*>(s.m_state) - m_jumps) / m.lettersCount] & FinalFlag; }
+
+ bool Dead(const State&) const { return false; }
+
+ CapturingScanner() {}
+ CapturingScanner(const CapturingScanner& s): LoadedScanner(s) {}
explicit CapturingScanner(Fsm& fsm, size_t distance = 0)
- {
+ {
if (distance) {
fsm = CreateApproxFsm(fsm, distance);
}
- fsm.Canonize();
- Init(fsm.Size(), fsm.Letters(), fsm.Initial());
- BuildScanner(fsm, *this);
- }
-
- void Swap(CapturingScanner& s) { LoadedScanner::Swap(s); }
- CapturingScanner& operator = (const CapturingScanner& s) { CapturingScanner(s).Swap(*this); return *this; }
-
- size_t StateIndex(const State& s) const { return StateIdx(s.m_state); }
-
-private:
-
- friend void BuildScanner<CapturingScanner>(const Fsm&, CapturingScanner&);
-};
+ fsm.Canonize();
+ Init(fsm.Size(), fsm.Letters(), fsm.Initial());
+ BuildScanner(fsm, *this);
+ }
+
+ void Swap(CapturingScanner& s) { LoadedScanner::Swap(s); }
+ CapturingScanner& operator = (const CapturingScanner& s) { CapturingScanner(s).Swap(*this); return *this; }
+
+ size_t StateIndex(const State& s) const { return StateIdx(s.m_state); }
+
+private:
+
+ friend void BuildScanner<CapturingScanner>(const Fsm&, CapturingScanner&);
+};
enum RepetitionTypes { // They are sorted by their priorities
NonGreedyRepetition,
@@ -582,11 +582,11 @@ public:
}
};
-namespace Features {
+namespace Features {
Feature::Ptr Capture(size_t pos);
-}
-
-}
-
-
-#endif
+}
+
+}
+
+
+#endif
diff --git a/contrib/libs/pire/pire/extra/count.cpp b/contrib/libs/pire/pire/extra/count.cpp
index f79dba506c1..468ff61d924 100644
--- a/contrib/libs/pire/pire/extra/count.cpp
+++ b/contrib/libs/pire/pire/extra/count.cpp
@@ -1,26 +1,26 @@
-/*
- * count.cpp -- CountingScanner compiling routine
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+/*
+ * count.cpp -- CountingScanner compiling routine
*
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
#include "count.h"
#include <contrib/libs/pire/pire/fsm.h>
@@ -31,8 +31,8 @@
#include <contrib/libs/pire/pire/stub/stl.h>
#include <tuple>
-
-namespace Pire {
+
+namespace Pire {
namespace Impl {
@@ -740,103 +740,103 @@ void CountingFsm::SwapTaskOutputs(CountingFsmTask& task) {
}
-namespace {
- Pire::Fsm FsmForDot() { Pire::Fsm f; f.AppendDot(); return f; }
- Pire::Fsm FsmForChar(Pire::Char c) { Pire::Fsm f; f.AppendSpecial(c); return f; }
-}
-
-CountingScanner::CountingScanner(const Fsm& re, const Fsm& sep)
-{
- Fsm res = re;
- res.Surround();
- Fsm sep_re = ((sep & ~res) /* | Fsm()*/) + re;
- sep_re.Determine();
-
- Fsm dup = sep_re;
- for (size_t i = 0; i < dup.Size(); ++i)
- dup.SetTag(i, Matched);
- size_t oldsize = sep_re.Size();
- sep_re.Import(dup);
- for (Fsm::FinalTable::const_iterator i = sep_re.Finals().begin(), ie = sep_re.Finals().end(); i != ie; ++i)
- if (*i < oldsize)
- sep_re.Connect(*i, oldsize + *i);
-
- sep_re |= (FsmForDot() | FsmForChar(Pire::BeginMark) | FsmForChar(Pire::EndMark));
-
- // Make a full Cartesian product of two sep_res
- sep_re.Determine();
- sep_re.Unsparse();
+namespace {
+ Pire::Fsm FsmForDot() { Pire::Fsm f; f.AppendDot(); return f; }
+ Pire::Fsm FsmForChar(Pire::Char c) { Pire::Fsm f; f.AppendSpecial(c); return f; }
+}
+
+CountingScanner::CountingScanner(const Fsm& re, const Fsm& sep)
+{
+ Fsm res = re;
+ res.Surround();
+ Fsm sep_re = ((sep & ~res) /* | Fsm()*/) + re;
+ sep_re.Determine();
+
+ Fsm dup = sep_re;
+ for (size_t i = 0; i < dup.Size(); ++i)
+ dup.SetTag(i, Matched);
+ size_t oldsize = sep_re.Size();
+ sep_re.Import(dup);
+ for (Fsm::FinalTable::const_iterator i = sep_re.Finals().begin(), ie = sep_re.Finals().end(); i != ie; ++i)
+ if (*i < oldsize)
+ sep_re.Connect(*i, oldsize + *i);
+
+ sep_re |= (FsmForDot() | FsmForChar(Pire::BeginMark) | FsmForChar(Pire::EndMark));
+
+ // Make a full Cartesian product of two sep_res
+ sep_re.Determine();
+ sep_re.Unsparse();
TSet<size_t> dead = sep_re.DeadStates();
-
- PIRE_IFDEBUG(Cdbg << "=== Original FSM ===" << Endl << sep_re << ">>> " << sep_re.Size() << " states, dead: [" << Join(dead.begin(), dead.end(), ", ") << "]" << Endl);
-
- Fsm sq;
-
- typedef ypair<size_t, size_t> NewState;
+
+ PIRE_IFDEBUG(Cdbg << "=== Original FSM ===" << Endl << sep_re << ">>> " << sep_re.Size() << " states, dead: [" << Join(dead.begin(), dead.end(), ", ") << "]" << Endl);
+
+ Fsm sq;
+
+ typedef ypair<size_t, size_t> NewState;
TVector<NewState> states;
TMap<NewState, size_t> invstates;
-
- states.push_back(NewState(sep_re.Initial(), sep_re.Initial()));
- invstates.insert(ymake_pair(states.back(), states.size() - 1));
-
- // TODO: this loop reminds me a general determination task...
- for (size_t curstate = 0; curstate < states.size(); ++curstate) {
-
- unsigned long tag = sep_re.Tag(states[curstate].first);
- if (tag)
- sq.SetTag(curstate, tag);
- sq.SetFinal(curstate, sep_re.IsFinal(states[curstate].first));
-
- PIRE_IFDEBUG(Cdbg << "State " << curstate << " = (" << states[curstate].first << ", " << states[curstate].second << ")" << Endl);
- for (Fsm::LettersTbl::ConstIterator lit = sep_re.Letters().Begin(), lie = sep_re.Letters().End(); lit != lie; ++lit) {
-
- Char letter = lit->first;
-
- const Fsm::StatesSet& mr = sep_re.Destinations(states[curstate].first, letter);
- const Fsm::StatesSet& br = sep_re.Destinations(states[curstate].second, letter);
-
- if (mr.size() != 1)
+
+ states.push_back(NewState(sep_re.Initial(), sep_re.Initial()));
+ invstates.insert(ymake_pair(states.back(), states.size() - 1));
+
+ // TODO: this loop reminds me a general determination task...
+ for (size_t curstate = 0; curstate < states.size(); ++curstate) {
+
+ unsigned long tag = sep_re.Tag(states[curstate].first);
+ if (tag)
+ sq.SetTag(curstate, tag);
+ sq.SetFinal(curstate, sep_re.IsFinal(states[curstate].first));
+
+ PIRE_IFDEBUG(Cdbg << "State " << curstate << " = (" << states[curstate].first << ", " << states[curstate].second << ")" << Endl);
+ for (Fsm::LettersTbl::ConstIterator lit = sep_re.Letters().Begin(), lie = sep_re.Letters().End(); lit != lie; ++lit) {
+
+ Char letter = lit->first;
+
+ const Fsm::StatesSet& mr = sep_re.Destinations(states[curstate].first, letter);
+ const Fsm::StatesSet& br = sep_re.Destinations(states[curstate].second, letter);
+
+ if (mr.size() != 1)
Y_ASSERT(!"Wrong transition size for main");
- if (br.size() != 1)
+ if (br.size() != 1)
Y_ASSERT(!"Wrong transition size for backup");
-
- NewState ns(*mr.begin(), *br.begin());
+
+ NewState ns(*mr.begin(), *br.begin());
PIRE_IFDEBUG(NewState savedNs = ns);
- unsigned long outputs = 0;
-
- PIRE_IFDEBUG(ystring dbgout);
- if (dead.find(ns.first) != dead.end()) {
- PIRE_IFDEBUG(dbgout = ((sep_re.Tag(ns.first) & Matched) ? ", ++cur" : ", max <- cur"));
- outputs = DeadFlag | (sep_re.Tag(ns.first) & Matched);
- ns.first = ns.second;
- }
- if (sep_re.IsFinal(ns.first) || (sep_re.IsFinal(ns.second) && !(sep_re.Tag(ns.first) & Matched)))
- ns.second = sep_re.Initial();
-
- PIRE_IFDEBUG(if (ns != savedNs) Cdbg << "Diverted transition to (" << savedNs.first << ", " << savedNs.second << ") on " << (char) letter << " to (" << ns.first << ", " << ns.second << ")" << dbgout << Endl);
-
+ unsigned long outputs = 0;
+
+ PIRE_IFDEBUG(ystring dbgout);
+ if (dead.find(ns.first) != dead.end()) {
+ PIRE_IFDEBUG(dbgout = ((sep_re.Tag(ns.first) & Matched) ? ", ++cur" : ", max <- cur"));
+ outputs = DeadFlag | (sep_re.Tag(ns.first) & Matched);
+ ns.first = ns.second;
+ }
+ if (sep_re.IsFinal(ns.first) || (sep_re.IsFinal(ns.second) && !(sep_re.Tag(ns.first) & Matched)))
+ ns.second = sep_re.Initial();
+
+ PIRE_IFDEBUG(if (ns != savedNs) Cdbg << "Diverted transition to (" << savedNs.first << ", " << savedNs.second << ") on " << (char) letter << " to (" << ns.first << ", " << ns.second << ")" << dbgout << Endl);
+
TMap<NewState, size_t>::iterator nsi = invstates.find(ns);
- if (nsi == invstates.end()) {
- PIRE_IFDEBUG(Cdbg << "New state " << states.size() << " = (" << ns.first << ", " << ns.second << ")" << Endl);
- states.push_back(ns);
- nsi = invstates.insert(ymake_pair(states.back(), states.size() - 1)).first;
- sq.Resize(states.size());
- }
-
+ if (nsi == invstates.end()) {
+ PIRE_IFDEBUG(Cdbg << "New state " << states.size() << " = (" << ns.first << ", " << ns.second << ")" << Endl);
+ states.push_back(ns);
+ nsi = invstates.insert(ymake_pair(states.back(), states.size() - 1)).first;
+ sq.Resize(states.size());
+ }
+
for (TVector<Char>::const_iterator li = lit->second.second.begin(), le = lit->second.second.end(); li != le; ++li)
- sq.Connect(curstate, nsi->second, *li);
- if (outputs)
- sq.SetOutput(curstate, nsi->second, outputs);
- }
- }
-
- sq.Determine();
-
- PIRE_IFDEBUG(Cdbg << "=== FSM ===" << Endl << sq << Endl);
- Init(sq.Size(), sq.Letters(), sq.Initial(), 1);
- BuildScanner(sq, *this);
-}
-
+ sq.Connect(curstate, nsi->second, *li);
+ if (outputs)
+ sq.SetOutput(curstate, nsi->second, outputs);
+ }
+ }
+
+ sq.Determine();
+
+ PIRE_IFDEBUG(Cdbg << "=== FSM ===" << Endl << sq << Endl);
+ Init(sq.Size(), sq.Letters(), sq.Initial(), 1);
+ BuildScanner(sq, *this);
+}
+
namespace Impl {
template <class AdvancedScanner>
AdvancedScanner MakeAdvancedCountingScanner(const Fsm& re, const Fsm& sep, bool* simple) {
@@ -848,7 +848,7 @@ AdvancedScanner MakeAdvancedCountingScanner(const Fsm& re, const Fsm& sep, bool*
if (simple) {
*simple = countingFsm.Simple();
}
-
+
const auto& determined = countingFsm.Determined();
const auto& letters = countingFsm.Letters();
@@ -877,11 +877,11 @@ NoGlueLimitCountingScanner::NoGlueLimitCountingScanner(const Fsm& re, const Fsm&
}
-namespace Impl {
-
+namespace Impl {
+
template<class Scanner>
class CountingScannerGlueTask: public ScannerGlueCommon<Scanner> {
-public:
+public:
using typename ScannerGlueCommon<Scanner>::State;
using TAction = typename Scanner::Action;
using InternalState = typename Scanner::InternalState;
@@ -889,36 +889,36 @@ public:
CountingScannerGlueTask(const Scanner& lhs, const Scanner& rhs)
: ScannerGlueCommon<Scanner>(lhs, rhs, LettersEquality<Scanner>(lhs.m_letters, rhs.m_letters))
- {
- }
+ {
+ }
void AcceptStates(const TVector<State>& states)
- {
- States = states;
+ {
+ States = states;
this->SetSc(THolder<Scanner>(new Scanner));
this->Sc().Init(states.size(), this->Letters(), 0, this->Lhs().RegexpsCount() + this->Rhs().RegexpsCount());
- for (size_t i = 0; i < states.size(); ++i)
+ for (size_t i = 0; i < states.size(); ++i)
this->Sc().SetTag(i, this->Lhs().m_tags[this->Lhs().StateIdx(states[i].first)] | (this->Rhs().m_tags[this->Rhs().StateIdx(states[i].second)] << 3));
- }
+ }
- void Connect(size_t from, size_t to, Char letter)
- {
+ void Connect(size_t from, size_t to, Char letter)
+ {
this->Sc().SetJump(from, letter, to,
Action(this->Lhs(), States[from].first, letter) | (Action(this->Rhs(), States[from].second, letter) << this->Lhs().RegexpsCount()));
- }
+ }
protected:
TVector<State> States;
TAction Action(const Scanner& sc, InternalState state, Char letter) const
- {
+ {
size_t state_index = sc.StateIdx(state);
size_t transition_index = sc.TransitionIndex(state_index, letter);
const auto& tr = sc.m_jumps[transition_index];
return tr.action;
- }
-};
-
+ }
+};
+
class NoGlueLimitCountingScannerGlueTask : public CountingScannerGlueTask<NoGlueLimitCountingScanner> {
public:
using ActionIndex = NoGlueLimitCountingScanner::ActionIndex;
@@ -980,18 +980,18 @@ private:
};
-}
+}
-CountingScanner CountingScanner::Glue(const CountingScanner& lhs, const CountingScanner& rhs, size_t maxSize /* = 0 */)
-{
+CountingScanner CountingScanner::Glue(const CountingScanner& lhs, const CountingScanner& rhs, size_t maxSize /* = 0 */)
+{
if (lhs.RegexpsCount() + rhs.RegexpsCount() > MAX_RE_COUNT) {
return CountingScanner();
}
static constexpr size_t DefMaxSize = 250000;
Impl::CountingScannerGlueTask<CountingScanner> task(lhs, rhs);
- return Impl::Determine(task, maxSize ? maxSize : DefMaxSize);
-}
-
+ return Impl::Determine(task, maxSize ? maxSize : DefMaxSize);
+}
+
AdvancedCountingScanner AdvancedCountingScanner::Glue(const AdvancedCountingScanner& lhs, const AdvancedCountingScanner& rhs, size_t maxSize /* = 0 */)
{
if (lhs.RegexpsCount() + rhs.RegexpsCount() > MAX_RE_COUNT) {
@@ -1000,7 +1000,7 @@ AdvancedCountingScanner AdvancedCountingScanner::Glue(const AdvancedCountingScan
static constexpr size_t DefMaxSize = 250000;
Impl::CountingScannerGlueTask<AdvancedCountingScanner> task(lhs, rhs);
return Impl::Determine(task, maxSize ? maxSize : DefMaxSize);
-}
+}
NoGlueLimitCountingScanner NoGlueLimitCountingScanner::Glue(const NoGlueLimitCountingScanner& lhs, const NoGlueLimitCountingScanner& rhs, size_t maxSize /* = 0 */)
{
diff --git a/contrib/libs/pire/pire/extra/count.h b/contrib/libs/pire/pire/extra/count.h
index deaa4c23143..bd1526b98d0 100644
--- a/contrib/libs/pire/pire/extra/count.h
+++ b/contrib/libs/pire/pire/extra/count.h
@@ -1,38 +1,38 @@
-/*
- * count.h -- definition of the counting scanner
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+/*
+ * count.h -- definition of the counting scanner
*
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_EXTRA_COUNT_H
-#define PIRE_EXTRA_COUNT_H
-
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_EXTRA_COUNT_H
+#define PIRE_EXTRA_COUNT_H
+
#include <contrib/libs/pire/pire/scanners/loaded.h>
#include <contrib/libs/pire/pire/fsm.h>
-
+
#include <algorithm>
-namespace Pire {
-class Fsm;
+namespace Pire {
+class Fsm;
-namespace Impl {
+namespace Impl {
template<class T>
class ScannerGlueCommon;
@@ -43,8 +43,8 @@ namespace Impl {
template <class AdvancedScanner>
AdvancedScanner MakeAdvancedCountingScanner(const Fsm& re, const Fsm& sep, bool* simple);
-};
-
+};
+
template<size_t I>
class IncrementPerformer {
public:
@@ -110,38 +110,38 @@ public:
}
};
-/**
- * A scanner which counts occurences of the
- * given regexp separated by another regexp
- * in input text.
- */
+/**
+ * A scanner which counts occurences of the
+ * given regexp separated by another regexp
+ * in input text.
+ */
template<class DerivedScanner, class State>
class BaseCountingScanner: public LoadedScanner {
-public:
- enum {
- IncrementAction = 1,
- ResetAction = 2,
-
- FinalFlag = 0,
- DeadFlag = 1,
- };
-
- void Initialize(State& state) const
- {
- state.m_state = m.initial;
- memset(&state.m_current, 0, sizeof(state.m_current));
- memset(&state.m_total, 0, sizeof(state.m_total));
- state.m_updatedMask = 0;
- }
-
+public:
+ enum {
+ IncrementAction = 1,
+ ResetAction = 2,
+
+ FinalFlag = 0,
+ DeadFlag = 1,
+ };
+
+ void Initialize(State& state) const
+ {
+ state.m_state = m.initial;
+ memset(&state.m_current, 0, sizeof(state.m_current));
+ memset(&state.m_total, 0, sizeof(state.m_total));
+ state.m_updatedMask = 0;
+ }
+
PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
void TakeAction(State& s, Action a) const
{
static_cast<const DerivedScanner*>(this)->template TakeActionImpl<MAX_RE_COUNT>(s, a);
}
- bool CanStop(const State&) const { return false; }
-
+ bool CanStop(const State&) const { return false; }
+
Char Translate(Char ch) const
{
return m_letters[static_cast<size_t>(ch)];
@@ -154,55 +154,55 @@ public:
return x.action;
}
- Action Next(State& s, Char c) const
- {
+ Action Next(State& s, Char c) const
+ {
return NextTranslated(s, Translate(c));
- }
-
- Action Next(const State& current, State& n, Char c) const
- {
- n = current;
- return Next(n, c);
- }
-
- bool Final(const State& /*state*/) const { return false; }
-
- bool Dead(const State&) const { return false; }
-
+ }
+
+ Action Next(const State& current, State& n, Char c) const
+ {
+ n = current;
+ return Next(n, c);
+ }
+
+ bool Final(const State& /*state*/) const { return false; }
+
+ bool Dead(const State&) const { return false; }
+
using LoadedScanner::Swap;
- size_t StateIndex(const State& s) const { return StateIdx(s.m_state); }
-
+ size_t StateIndex(const State& s) const { return StateIdx(s.m_state); }
+
protected:
- using LoadedScanner::Init;
+ using LoadedScanner::Init;
using LoadedScanner::InternalState;
-
+
template<size_t ActualReCount>
- void PerformIncrement(State& s, Action mask) const
- {
- if (mask) {
+ void PerformIncrement(State& s, Action mask) const
+ {
+ if (mask) {
IncrementPerformer<ActualReCount>::Do(s, mask);
- s.m_updatedMask |= ((size_t)mask) << MAX_RE_COUNT;
- }
- }
-
+ s.m_updatedMask |= ((size_t)mask) << MAX_RE_COUNT;
+ }
+ }
+
template<size_t ActualReCount>
- void PerformReset(State& s, Action mask) const
- {
- mask &= s.m_updatedMask;
- if (mask) {
+ void PerformReset(State& s, Action mask) const
+ {
+ mask &= s.m_updatedMask;
+ if (mask) {
ResetPerformer<ActualReCount>::Do(s, mask);
s.m_updatedMask &= (Action)~mask;
- }
- }
-
- void Next(InternalState& s, Char c) const
- {
+ }
+ }
+
+ void Next(InternalState& s, Char c) const
+ {
Transition x = reinterpret_cast<const Transition*>(s)[Translate(c)];
- s += SignExtend(x.shift);
- }
+ s += SignExtend(x.shift);
+ }
};
-
+
template <size_t MAX_RE_COUNT>
class CountingState {
public:
@@ -258,21 +258,21 @@ public:
}
private:
- Action RemapAction(Action action)
- {
- if (action == (Matched | DeadFlag))
- return 1;
- else if (action == DeadFlag)
- return 1 << MAX_RE_COUNT;
- else
- return 0;
- }
-
- friend void BuildScanner<CountingScanner>(const Fsm&, CountingScanner&);
- friend class Impl::ScannerGlueCommon<CountingScanner>;
+ Action RemapAction(Action action)
+ {
+ if (action == (Matched | DeadFlag))
+ return 1;
+ else if (action == DeadFlag)
+ return 1 << MAX_RE_COUNT;
+ else
+ return 0;
+ }
+
+ friend void BuildScanner<CountingScanner>(const Fsm&, CountingScanner&);
+ friend class Impl::ScannerGlueCommon<CountingScanner>;
friend class Impl::CountingScannerGlueTask<CountingScanner>;
-};
-
+};
+
class AdvancedCountingScanner : public BaseCountingScanner<AdvancedCountingScanner, CountingState<LoadedScanner::MAX_RE_COUNT>> {
public:
using State = CountingState<MAX_RE_COUNT>;
@@ -329,10 +329,10 @@ public:
++m_current[regexp_id];
m_total[regexp_id] = ymax(m_total[regexp_id], m_current[regexp_id]);
}
-
+
template<size_t I>
friend class IncrementPerformer;
-
+
template<size_t I>
friend class ResetPerformer;
@@ -352,7 +352,7 @@ private:
s << state.m_current[i] << '/' << state.m_total[i] << ' ';
return s << ')';
}
-#endif
+#endif
};
diff --git a/contrib/libs/pire/pire/extra/glyphs.cpp b/contrib/libs/pire/pire/extra/glyphs.cpp
index 9bf7d1bd658..a14d2baa567 100644
--- a/contrib/libs/pire/pire/extra/glyphs.cpp
+++ b/contrib/libs/pire/pire/extra/glyphs.cpp
@@ -1,144 +1,144 @@
-/*
- * glyphs.cpp -- implementation for the GlueSimilarGlyphs feature.
+/*
+ * glyphs.cpp -- implementation for the GlueSimilarGlyphs feature.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#include <algorithm>
-#include <map>
-#include <list>
-#include <set>
-#include <vector>
-#include <utility>
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#include <algorithm>
+#include <map>
+#include <list>
+#include <set>
+#include <vector>
+#include <utility>
+
#include <contrib/libs/pire/pire/stub/singleton.h>
#include <contrib/libs/pire/pire/stub/noncopyable.h>
#include <contrib/libs/pire/pire/stub/utf8.h>
#include <contrib/libs/pire/pire/stub/stl.h>
#include <contrib/libs/pire/pire/re_lexer.h>
-namespace Pire {
-
-namespace {
-
- /*
- * A class providing a function which returns a character
- * whose glyph resembles that of given char, if any;
- * otherwise returns given char itself.
- */
- class GlyphTable {
- private:
+namespace Pire {
+
+namespace {
+
+ /*
+ * A class providing a function which returns a character
+ * whose glyph resembles that of given char, if any;
+ * otherwise returns given char itself.
+ */
+ class GlyphTable {
+ private:
TList< TVector<wchar32> > m_classes;
TMap<wchar32, TVector<wchar32>*> m_map;
-
- struct GlyphClass {
+
+ struct GlyphClass {
TVector<wchar32>* m_class;
TMap<wchar32, TVector<wchar32>*> *m_map;
-
- GlyphClass& operator << (wchar32 c)
- {
- m_class->push_back(c);
- m_map->insert(ymake_pair(c, m_class));
- return *this;
- }
- };
-
- GlyphClass Class()
- {
- GlyphClass cl;
+
+ GlyphClass& operator << (wchar32 c)
+ {
+ m_class->push_back(c);
+ m_map->insert(ymake_pair(c, m_class));
+ return *this;
+ }
+ };
+
+ GlyphClass Class()
+ {
+ GlyphClass cl;
m_classes.push_back(TVector<wchar32>());
- cl.m_class = &m_classes.back();
- cl.m_map = &m_map;
- return cl;
- }
-
- public:
-
+ cl.m_class = &m_classes.back();
+ cl.m_map = &m_map;
+ return cl;
+ }
+
+ public:
+
const TVector<wchar32>& Klass(wchar32 x) const
- {
+ {
TMap<wchar32, TVector<wchar32>*>::const_iterator i = m_map.find(x);
- if (i != m_map.end())
- return *i->second;
- else
+ if (i != m_map.end())
+ return *i->second;
+ else
return DefaultValue< TVector<wchar32> >();
- }
-
- GlyphTable()
- {
- Class() << 'A' << 0x0410;
- Class() << 'B' << 0x0412;
- Class() << 'C' << 0x0421;
- Class() << 'E' << 0x0415 << 0x0401;
- Class() << 'H' << 0x041D;
- Class() << 'K' << 0x041A;
- Class() << 'M' << 0x041C;
- Class() << 'O' << 0x041E;
- Class() << 'P' << 0x0420;
- Class() << 'T' << 0x0422;
- Class() << 'X' << 0x0425;
-
- Class() << 'a' << 0x0430;
- Class() << 'c' << 0x0441;
- Class() << 'e' << 0x0435 << 0x0451;
- Class() << 'm' << 0x0442;
- Class() << 'o' << 0x043E;
- Class() << 'p' << 0x0440;
- Class() << 'u' << 0x0438;
- Class() << 'x' << 0x0445;
- Class() << 'y' << 0x0443;
- }
- };
-
- class GlueSimilarGlyphsImpl: public Feature {
- public:
- GlueSimilarGlyphsImpl(): m_table(Singleton<GlyphTable>()) {}
- int Priority() const { return 9; }
-
- void Alter(Term& t)
- {
- if (t.Value().IsA<Term::CharacterRange>()) {
- const Term::CharacterRange& range = t.Value().As<Term::CharacterRange>();
- typedef Term::CharacterRange::first_type CharSet;
- const CharSet& old = range.first;
- CharSet altered;
+ }
+
+ GlyphTable()
+ {
+ Class() << 'A' << 0x0410;
+ Class() << 'B' << 0x0412;
+ Class() << 'C' << 0x0421;
+ Class() << 'E' << 0x0415 << 0x0401;
+ Class() << 'H' << 0x041D;
+ Class() << 'K' << 0x041A;
+ Class() << 'M' << 0x041C;
+ Class() << 'O' << 0x041E;
+ Class() << 'P' << 0x0420;
+ Class() << 'T' << 0x0422;
+ Class() << 'X' << 0x0425;
+
+ Class() << 'a' << 0x0430;
+ Class() << 'c' << 0x0441;
+ Class() << 'e' << 0x0435 << 0x0451;
+ Class() << 'm' << 0x0442;
+ Class() << 'o' << 0x043E;
+ Class() << 'p' << 0x0440;
+ Class() << 'u' << 0x0438;
+ Class() << 'x' << 0x0445;
+ Class() << 'y' << 0x0443;
+ }
+ };
+
+ class GlueSimilarGlyphsImpl: public Feature {
+ public:
+ GlueSimilarGlyphsImpl(): m_table(Singleton<GlyphTable>()) {}
+ int Priority() const { return 9; }
+
+ void Alter(Term& t)
+ {
+ if (t.Value().IsA<Term::CharacterRange>()) {
+ const Term::CharacterRange& range = t.Value().As<Term::CharacterRange>();
+ typedef Term::CharacterRange::first_type CharSet;
+ const CharSet& old = range.first;
+ CharSet altered;
for (auto&& i : old) {
const TVector<wchar32>* klass = 0;
if (i.size() == 1 && !(klass = &m_table->Klass(i[0]))->empty())
for (auto&& j : *klass)
altered.insert(Term::String(1, j));
- else
+ else
altered.insert(i);
- }
-
- t = Term(t.Type(), Term::CharacterRange(altered, range.second));
- }
- }
-
- private:
- GlyphTable* m_table;
- };
-}
-
-namespace Features {
+ }
+
+ t = Term(t.Type(), Term::CharacterRange(altered, range.second));
+ }
+ }
+
+ private:
+ GlyphTable* m_table;
+ };
+}
+
+namespace Features {
Feature::Ptr GlueSimilarGlyphs() { return Feature::Ptr(new GlueSimilarGlyphsImpl); }
-}
-
-}
-
+}
+
+}
+
diff --git a/contrib/libs/pire/pire/extra/glyphs.h b/contrib/libs/pire/pire/extra/glyphs.h
index 07c42769512..678b9e15c4a 100644
--- a/contrib/libs/pire/pire/extra/glyphs.h
+++ b/contrib/libs/pire/pire/extra/glyphs.h
@@ -1,41 +1,41 @@
-/*
- * glyphs.h -- declaration of the GlueSimilarGlyphs feature.
+/*
+ * glyphs.h -- declaration of the GlueSimilarGlyphs feature.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_EXTRA_GLYPHS_H
-#define PIRE_EXTRA_GLYPHS_H
-
-
-namespace Pire {
-class Feature;
-namespace Features {
-
- /**
- * A feature which tells Pire not to distinguish latin
- * and cyrillic letters having identical shapes
- * (e.g. latin A and cyrillic A).
- */
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_EXTRA_GLYPHS_H
+#define PIRE_EXTRA_GLYPHS_H
+
+
+namespace Pire {
+class Feature;
+namespace Features {
+
+ /**
+ * A feature which tells Pire not to distinguish latin
+ * and cyrillic letters having identical shapes
+ * (e.g. latin A and cyrillic A).
+ */
Feature::Ptr GlueSimilarGlyphs();
-}
-}
-
-#endif
+}
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/fsm.cpp b/contrib/libs/pire/pire/fsm.cpp
index f2216b3abad..984d708dfab 100644
--- a/contrib/libs/pire/pire/fsm.cpp
+++ b/contrib/libs/pire/pire/fsm.cpp
@@ -1,114 +1,114 @@
-/*
- * fsm.cpp -- the implementation of the FSM class.
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+/*
+ * fsm.cpp -- the implementation of the FSM class.
*
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#include <algorithm>
-#include <functional>
-#include <stdexcept>
-#include <iostream>
-#include <iterator>
-#include <numeric>
-#include <queue>
-#include <utility>
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#include <algorithm>
+#include <functional>
+#include <stdexcept>
+#include <iostream>
+#include <iterator>
+#include <numeric>
+#include <queue>
+#include <utility>
#include <iostream>
#include <stdio.h>
#include <contrib/libs/pire/pire/stub/lexical_cast.h>
-#include "fsm.h"
-#include "vbitset.h"
-#include "partition.h"
-#include "determine.h"
+#include "fsm.h"
+#include "vbitset.h"
+#include "partition.h"
+#include "determine.h"
#include "minimize.h"
-#include "platform.h"
-
-namespace Pire {
+#include "platform.h"
-ystring CharDump(Char c)
-{
- char buf[8];
+namespace Pire {
+
+ystring CharDump(Char c)
+{
+ char buf[8];
if (c == '"')
return ystring("\\\"");
else if (c == '[' || c == ']' || c == '-' || c == '^') {
snprintf(buf, sizeof(buf)-1, "\\\\%c", c);
return ystring(buf);
} else if (c >= 32 && c < 127)
- return ystring(1, static_cast<char>(c));
- else if (c == '\n')
+ return ystring(1, static_cast<char>(c));
+ else if (c == '\n')
return ystring("\\\\n");
- else if (c == '\t')
+ else if (c == '\t')
return ystring("\\\\t");
- else if (c == '\r')
+ else if (c == '\r')
return ystring("\\\\r");
- else if (c < 256) {
+ else if (c < 256) {
snprintf(buf, sizeof(buf)-1, "\\\\%03o", static_cast<int>(c));
- return ystring(buf);
- } else if (c == Epsilon)
- return ystring("<Epsilon>");
- else if (c == BeginMark)
- return ystring("<Begin>");
- else if (c == EndMark)
- return ystring("<End>");
- else
- return ystring("<?" "?" "?>");
-}
-
-void Fsm::DumpState(yostream& s, size_t state) const
-{
- // Fill in a 'row': Q -> exp(V) (for current state)
+ return ystring(buf);
+ } else if (c == Epsilon)
+ return ystring("<Epsilon>");
+ else if (c == BeginMark)
+ return ystring("<Begin>");
+ else if (c == EndMark)
+ return ystring("<End>");
+ else
+ return ystring("<?" "?" "?>");
+}
+
+void Fsm::DumpState(yostream& s, size_t state) const
+{
+ // Fill in a 'row': Q -> exp(V) (for current state)
TVector< ybitset<MaxChar> > row(Size());
for (auto&& transition : m_transitions[state])
for (auto&& transitionState : transition.second) {
if (transitionState >= Size()) {
std::cerr << "WTF?! Transition from " << state << " on letter " << transition.first << " leads to non-existing state " << transitionState << "\n";
Y_ASSERT(false);
- }
+ }
if (Letters().Contains(transition.first)) {
const TVector<Char>& letters = Letters().Klass(Letters().Representative(transition.first));
for (auto&& letter : letters)
row[transitionState].set(letter);
- } else
+ } else
row[transitionState].set(transition.first);
- }
-
+ }
+
bool statePrinted = false;
- // Display each destination state
+ // Display each destination state
for (auto rit = row.begin(), rie = row.end(); rit != rie; ++rit) {
- unsigned begin = 0, end = 0;
-
+ unsigned begin = 0, end = 0;
+
ystring delimiter;
ystring label;
- if (rit->test(Epsilon)) {
+ if (rit->test(Epsilon)) {
label += delimiter + CharDump(Epsilon);
delimiter = " ";
- }
- if (rit->test(BeginMark)) {
+ }
+ if (rit->test(BeginMark)) {
label += delimiter + CharDump(BeginMark);
delimiter = " ";
- }
- if (rit->test(EndMark)) {
+ }
+ if (rit->test(EndMark)) {
label += delimiter + CharDump(EndMark);
delimiter = " ";
- }
+ }
unsigned count = 0;
for (unsigned i = 0; i < 256; ++i)
if (rit->test(i))
@@ -130,13 +130,13 @@ void Fsm::DumpState(yostream& s, size_t state) const
label += CharDump(begin) + "-" + (CharDump(end-1));
delimiter = " ";
}
- }
+ }
label += "]";
delimiter = " ";
} else if (count == 256) {
label += delimiter + ".";
delimiter = " ";
- }
+ }
if (!label.empty()) {
if (!statePrinted) {
s << " " << state << "[shape=\"" << (IsFinal(state) ? "double" : "") << "circle\",label=\"" << state;
@@ -149,490 +149,490 @@ void Fsm::DumpState(yostream& s, size_t state) const
statePrinted = true;
}
s << " " << state << " -> " << std::distance(row.begin(), rit) << "[label=\"" << label;
-
- // Display outputs
+
+ // Display outputs
auto oit = outputs.find(state);
- if (oit != outputs.end()) {
+ if (oit != outputs.end()) {
auto oit2 = oit->second.find(std::distance(row.begin(), rit));
- if (oit2 == oit->second.end())
- ;
- else {
+ if (oit2 == oit->second.end())
+ ;
+ else {
TVector<int> payload;
- for (unsigned i = 0; i < sizeof(oit2->second) * 8; ++i)
- if (oit2->second & (1ul << i))
- payload.push_back(i);
- if (!payload.empty())
+ for (unsigned i = 0; i < sizeof(oit2->second) * 8; ++i)
+ if (oit2->second & (1ul << i))
+ payload.push_back(i);
+ if (!payload.empty())
s << " (outputs: " << Join(payload.begin(), payload.end(), ", ") << ")";
- }
- }
+ }
+ }
s << "\"]\n";
- }
- }
+ }
+ }
if (statePrinted)
s << '\n';
-}
-
+}
+
void Fsm::DumpTo(yostream& s, const ystring& name) const
-{
+{
s << "digraph {\n \"initial\"[shape=\"plaintext\",label=\"" << name << "\"]\n\n";
- for (size_t state = 0; state < Size(); ++state) {
- DumpState(s, state);
- }
+ for (size_t state = 0; state < Size(); ++state) {
+ DumpState(s, state);
+ }
s << "}\n\n";
-}
-
-yostream& operator << (yostream& s, const Fsm& fsm) { fsm.DumpTo(s); return s; }
-
-
-namespace {
- template<class Vector> void resizeVector(Vector& v, size_t s) { v.resize(s); }
-}
-
-Fsm::Fsm():
- m_transitions(1),
- initial(0),
- letters(m_transitions),
- m_sparsed(false),
- determined(false),
- isAlternative(false)
-{
- m_final.insert(0);
-}
-
-Fsm Fsm::MakeFalse()
-{
- Fsm f;
- f.SetFinal(0, false);
- return f;
-}
-
-Char Fsm::Translate(Char c) const
-{
- if (!m_sparsed || c == Epsilon)
- return c;
- else
- return Letters().Representative(c);
-}
-
-bool Fsm::Connected(size_t from, size_t to, Char c) const
-{
+}
+
+yostream& operator << (yostream& s, const Fsm& fsm) { fsm.DumpTo(s); return s; }
+
+
+namespace {
+ template<class Vector> void resizeVector(Vector& v, size_t s) { v.resize(s); }
+}
+
+Fsm::Fsm():
+ m_transitions(1),
+ initial(0),
+ letters(m_transitions),
+ m_sparsed(false),
+ determined(false),
+ isAlternative(false)
+{
+ m_final.insert(0);
+}
+
+Fsm Fsm::MakeFalse()
+{
+ Fsm f;
+ f.SetFinal(0, false);
+ return f;
+}
+
+Char Fsm::Translate(Char c) const
+{
+ if (!m_sparsed || c == Epsilon)
+ return c;
+ else
+ return Letters().Representative(c);
+}
+
+bool Fsm::Connected(size_t from, size_t to, Char c) const
+{
auto it = m_transitions[from].find(Translate(c));
- return (it != m_transitions[from].end() && it->second.find(to) != it->second.end());
-}
-
-bool Fsm::Connected(size_t from, size_t to) const
-{
+ return (it != m_transitions[from].end() && it->second.find(to) != it->second.end());
+}
+
+bool Fsm::Connected(size_t from, size_t to) const
+{
for (auto i = m_transitions[from].begin(), ie = m_transitions[from].end(); i != ie; ++i)
- if (i->second.find(to) != i->second.end())
- return true;
- return false;
-}
-
-const Fsm::StatesSet& Fsm::Destinations(size_t from, Char c) const
-{
+ if (i->second.find(to) != i->second.end())
+ return true;
+ return false;
+}
+
+const Fsm::StatesSet& Fsm::Destinations(size_t from, Char c) const
+{
auto i = m_transitions[from].find(Translate(c));
- return (i != m_transitions[from].end()) ? i->second : DefaultValue<StatesSet>();
-}
-
+ return (i != m_transitions[from].end()) ? i->second : DefaultValue<StatesSet>();
+}
+
TSet<Char> Fsm::OutgoingLetters(size_t state) const
-{
+{
TSet<Char> ret;
for (auto&& i : m_transitions[state])
ret.insert(i.first);
- return ret;
-}
-
-size_t Fsm::Resize(size_t newSize)
-{
- size_t ret = Size();
- m_transitions.resize(newSize);
- return ret;
-}
-
-void Fsm::Swap(Fsm& fsm)
-{
- DoSwap(m_transitions, fsm.m_transitions);
- DoSwap(initial, fsm.initial);
- DoSwap(m_final, fsm.m_final);
- DoSwap(letters, fsm.letters);
- DoSwap(determined, fsm.determined);
- DoSwap(outputs, fsm.outputs);
- DoSwap(tags, fsm.tags);
- DoSwap(isAlternative, fsm.isAlternative);
-}
-
-void Fsm::SetFinal(size_t state, bool final)
-{
- if (final)
- m_final.insert(state);
- else
- m_final.erase(state);
-}
-
-Fsm& Fsm::AppendDot()
-{
- Resize(Size() + 1);
- for (size_t letter = 0; letter != (1 << (sizeof(char)*8)); ++letter)
- ConnectFinal(Size() - 1, letter);
- ClearFinal();
- SetFinal(Size() - 1, true);
- determined = false;
- return *this;
-}
-
-Fsm& Fsm::Append(char c)
-{
- Resize(Size() + 1);
- ConnectFinal(Size() - 1, static_cast<unsigned char>(c));
- ClearFinal();
- SetFinal(Size() - 1, true);
- determined = false;
- return *this;
-}
-
-Fsm& Fsm::Append(const ystring& str)
-{
+ return ret;
+}
+
+size_t Fsm::Resize(size_t newSize)
+{
+ size_t ret = Size();
+ m_transitions.resize(newSize);
+ return ret;
+}
+
+void Fsm::Swap(Fsm& fsm)
+{
+ DoSwap(m_transitions, fsm.m_transitions);
+ DoSwap(initial, fsm.initial);
+ DoSwap(m_final, fsm.m_final);
+ DoSwap(letters, fsm.letters);
+ DoSwap(determined, fsm.determined);
+ DoSwap(outputs, fsm.outputs);
+ DoSwap(tags, fsm.tags);
+ DoSwap(isAlternative, fsm.isAlternative);
+}
+
+void Fsm::SetFinal(size_t state, bool final)
+{
+ if (final)
+ m_final.insert(state);
+ else
+ m_final.erase(state);
+}
+
+Fsm& Fsm::AppendDot()
+{
+ Resize(Size() + 1);
+ for (size_t letter = 0; letter != (1 << (sizeof(char)*8)); ++letter)
+ ConnectFinal(Size() - 1, letter);
+ ClearFinal();
+ SetFinal(Size() - 1, true);
+ determined = false;
+ return *this;
+}
+
+Fsm& Fsm::Append(char c)
+{
+ Resize(Size() + 1);
+ ConnectFinal(Size() - 1, static_cast<unsigned char>(c));
+ ClearFinal();
+ SetFinal(Size() - 1, true);
+ determined = false;
+ return *this;
+}
+
+Fsm& Fsm::Append(const ystring& str)
+{
for (auto&& i : str)
Append(i);
- return *this;
-}
-
-Fsm& Fsm::AppendSpecial(Char c)
-{
- Resize(Size() + 1);
- ConnectFinal(Size() - 1, c);
- ClearFinal();
- SetFinal(Size() - 1, true);
- determined = false;
- return *this;
-}
-
+ return *this;
+}
+
+Fsm& Fsm::AppendSpecial(Char c)
+{
+ Resize(Size() + 1);
+ ConnectFinal(Size() - 1, c);
+ ClearFinal();
+ SetFinal(Size() - 1, true);
+ determined = false;
+ return *this;
+}
+
Fsm& Fsm::AppendStrings(const TVector<ystring>& strings)
-{
+{
for (auto&& i : strings)
if (i.empty())
- throw Error("None of strings passed to appendStrings() can be empty");
-
- Resize(Size() + 1);
- size_t end = Size() - 1;
-
- // A local transitions table: (oldstate, char) -> newstate.
- // Valid for all letters in given strings except final ones,
- // which are always connected to the end state.
-
- // NB: since each FSM contains at least one state,
- // state #0 cannot appear in LTRs. Thus we can use this
- // criteria to test whether a transition has been created or not.
- typedef ypair<size_t, char> Transition;
+ throw Error("None of strings passed to appendStrings() can be empty");
+
+ Resize(Size() + 1);
+ size_t end = Size() - 1;
+
+ // A local transitions table: (oldstate, char) -> newstate.
+ // Valid for all letters in given strings except final ones,
+ // which are always connected to the end state.
+
+ // NB: since each FSM contains at least one state,
+ // state #0 cannot appear in LTRs. Thus we can use this
+ // criteria to test whether a transition has been created or not.
+ typedef ypair<size_t, char> Transition;
TMap<char, size_t> startLtr;
TMap<Transition, size_t> ltr;
-
- // A presense of a transition in this set indicates that
- // a that transition already points somewhere (either to end
- // or somewhere else). Another attempt to create such transition
- // will clear `determined flag.
+
+ // A presense of a transition in this set indicates that
+ // a that transition already points somewhere (either to end
+ // or somewhere else). Another attempt to create such transition
+ // will clear `determined flag.
TSet<Transition> usedTransitions;
TSet<char> usedFirsts;
-
+
for (const auto& str : strings) {
- if (str.size() > 1) {
-
- // First letter: all previously final states are connected to the new state
- size_t& firstJump = startLtr[str[0]];
- if (!firstJump) {
- firstJump = Resize(Size() + 1);
- ConnectFinal(firstJump, static_cast<unsigned char>(str[0]));
- determined = determined && (usedFirsts.find(str[0]) != usedFirsts.end());
- }
-
- // All other letters except last one
- size_t state = firstJump;
+ if (str.size() > 1) {
+
+ // First letter: all previously final states are connected to the new state
+ size_t& firstJump = startLtr[str[0]];
+ if (!firstJump) {
+ firstJump = Resize(Size() + 1);
+ ConnectFinal(firstJump, static_cast<unsigned char>(str[0]));
+ determined = determined && (usedFirsts.find(str[0]) != usedFirsts.end());
+ }
+
+ // All other letters except last one
+ size_t state = firstJump;
for (auto cit = str.begin() + 1, cie = str.end() - 1; cit != cie; ++cit) {
- size_t& newState = ltr[ymake_pair(state, *cit)];
- if (!newState) {
- newState = Resize(Size() + 1);
- Connect(state, newState, static_cast<unsigned char>(*cit));
- determined = determined && (usedTransitions.find(ymake_pair(state, *cit)) != usedTransitions.end());
- }
- state = newState;
- }
-
- // The last letter: connect the current state to end
- unsigned char last = static_cast<unsigned char>(*(str.end() - 1));
- Connect(state, end, last);
- determined = determined && (usedTransitions.find(ymake_pair(state, last)) != usedTransitions.end());
-
- } else {
- // The single letter: connect all the previously final states to end
- ConnectFinal(end, static_cast<unsigned char>(str[0]));
- determined = determined && (usedFirsts.find(str[0]) != usedFirsts.end());
- }
- }
-
- ClearFinal();
- SetFinal(end, true);
- return *this;
-}
-
-void Fsm::Import(const Fsm& rhs)
-{
-// PIRE_IFDEBUG(LOG_DEBUG("fsm") << "Importing");
-// PIRE_IFDEBUG(LOG_DEBUG("fsm") << "=== Left-hand side ===\n" << *this);
-// PIRE_IFDEBUG(LOG_DEBUG("fsm") << "=== Right-hand side ===\n" << rhs);
-
- size_t oldsize = Resize(Size() + rhs.Size());
-
+ size_t& newState = ltr[ymake_pair(state, *cit)];
+ if (!newState) {
+ newState = Resize(Size() + 1);
+ Connect(state, newState, static_cast<unsigned char>(*cit));
+ determined = determined && (usedTransitions.find(ymake_pair(state, *cit)) != usedTransitions.end());
+ }
+ state = newState;
+ }
+
+ // The last letter: connect the current state to end
+ unsigned char last = static_cast<unsigned char>(*(str.end() - 1));
+ Connect(state, end, last);
+ determined = determined && (usedTransitions.find(ymake_pair(state, last)) != usedTransitions.end());
+
+ } else {
+ // The single letter: connect all the previously final states to end
+ ConnectFinal(end, static_cast<unsigned char>(str[0]));
+ determined = determined && (usedFirsts.find(str[0]) != usedFirsts.end());
+ }
+ }
+
+ ClearFinal();
+ SetFinal(end, true);
+ return *this;
+}
+
+void Fsm::Import(const Fsm& rhs)
+{
+// PIRE_IFDEBUG(LOG_DEBUG("fsm") << "Importing");
+// PIRE_IFDEBUG(LOG_DEBUG("fsm") << "=== Left-hand side ===\n" << *this);
+// PIRE_IFDEBUG(LOG_DEBUG("fsm") << "=== Right-hand side ===\n" << rhs);
+
+ size_t oldsize = Resize(Size() + rhs.Size());
+
for (auto&& outer : m_transitions) {
for (auto&& letter : letters) {
auto targets = outer.find(letter.first);
if (targets == outer.end())
- continue;
+ continue;
for (auto&& character : letter.second.second)
if (character != letter.first)
outer.insert(ymake_pair(character, targets->second));
- }
- }
-
+ }
+ }
+
auto dest = m_transitions.begin() + oldsize;
for (auto outer = rhs.m_transitions.begin(), outerEnd = rhs.m_transitions.end(); outer != outerEnd; ++outer, ++dest) {
for (auto&& inner : *outer) {
TSet<size_t> targets;
std::transform(inner.second.begin(), inner.second.end(), std::inserter(targets, targets.begin()),
- std::bind2nd(std::plus<size_t>(), oldsize));
+ std::bind2nd(std::plus<size_t>(), oldsize));
dest->insert(ymake_pair(inner.first, targets));
- }
-
+ }
+
for (auto&& letter : rhs.letters) {
auto targets = dest->find(letter.first);
if (targets == dest->end())
- continue;
+ continue;
for (auto&& character : letter.second.second)
if (character != letter.first)
dest->insert(ymake_pair(character, targets->second));
- }
- }
-
- // Import outputs
+ }
+ }
+
+ // Import outputs
for (auto&& output : rhs.outputs) {
auto& dest = outputs[output.first + oldsize];
for (auto&& element : output.second)
dest.insert(ymake_pair(element.first + oldsize, element.second));
- }
-
- // Import tags
+ }
+
+ // Import tags
for (auto&& tag : rhs.tags)
tags.insert(ymake_pair(tag.first + oldsize, tag.second));
-
- letters = LettersTbl(LettersEquality(m_transitions));
-}
-
-void Fsm::Connect(size_t from, size_t to, Char c /* = Epsilon */)
-{
- m_transitions[from][c].insert(to);
- ClearHints();
-}
-
-void Fsm::ConnectFinal(size_t to, Char c /* = Epsilon */)
-{
+
+ letters = LettersTbl(LettersEquality(m_transitions));
+}
+
+void Fsm::Connect(size_t from, size_t to, Char c /* = Epsilon */)
+{
+ m_transitions[from][c].insert(to);
+ ClearHints();
+}
+
+void Fsm::ConnectFinal(size_t to, Char c /* = Epsilon */)
+{
for (auto&& final : m_final)
Connect(final, to, c);
- ClearHints();
-}
-
-void Fsm::Disconnect(size_t from, size_t to, Char c)
-{
+ ClearHints();
+}
+
+void Fsm::Disconnect(size_t from, size_t to, Char c)
+{
auto i = m_transitions[from].find(c);
- if (i != m_transitions[from].end())
- i->second.erase(to);
- ClearHints();
-}
-
-void Fsm::Disconnect(size_t from, size_t to)
-{
+ if (i != m_transitions[from].end())
+ i->second.erase(to);
+ ClearHints();
+}
+
+void Fsm::Disconnect(size_t from, size_t to)
+{
for (auto&& i : m_transitions[from])
i.second.erase(to);
- ClearHints();
-}
-
-unsigned long Fsm::Output(size_t from, size_t to) const
-{
+ ClearHints();
+}
+
+unsigned long Fsm::Output(size_t from, size_t to) const
+{
auto i = outputs.find(from);
- if (i == outputs.end())
- return 0;
+ if (i == outputs.end())
+ return 0;
auto j = i->second.find(to);
- if (j == i->second.end())
- return 0;
- else
- return j->second;
-}
-
-Fsm& Fsm::operator += (const Fsm& rhs)
-{
- size_t lhsSize = Size();
- Import(rhs);
-
- const TransitionRow& row = m_transitions[lhsSize + rhs.initial];
-
+ if (j == i->second.end())
+ return 0;
+ else
+ return j->second;
+}
+
+Fsm& Fsm::operator += (const Fsm& rhs)
+{
+ size_t lhsSize = Size();
+ Import(rhs);
+
+ const TransitionRow& row = m_transitions[lhsSize + rhs.initial];
+
for (auto&& outer : row)
for (auto&& inner : outer.second)
ConnectFinal(inner, outer.first);
-
+
auto out = rhs.outputs.find(rhs.initial);
- if (out != rhs.outputs.end())
+ if (out != rhs.outputs.end())
for (auto&& toAndOutput : out->second) {
for (auto&& final : m_final)
outputs[final].insert(ymake_pair(toAndOutput.first + lhsSize, toAndOutput.second));
- }
-
- ClearFinal();
+ }
+
+ ClearFinal();
for (auto&& letter : rhs.m_final)
SetFinal(letter + lhsSize, true);
- determined = false;
-
- ClearHints();
- PIRE_IFDEBUG(Cdbg << "=== After addition ===" << Endl << *this << Endl);
-
- return *this;
-}
-
-Fsm& Fsm::operator |= (const Fsm& rhs)
-{
- size_t lhsSize = Size();
-
- Import(rhs);
+ determined = false;
+
+ ClearHints();
+ PIRE_IFDEBUG(Cdbg << "=== After addition ===" << Endl << *this << Endl);
+
+ return *this;
+}
+
+Fsm& Fsm::operator |= (const Fsm& rhs)
+{
+ size_t lhsSize = Size();
+
+ Import(rhs);
for (auto&& final : rhs.m_final)
m_final.insert(final + lhsSize);
- if (!isAlternative && !rhs.isAlternative) {
- Resize(Size() + 1);
- Connect(Size() - 1, initial);
- Connect(Size() - 1, lhsSize + rhs.initial);
- initial = Size() - 1;
- } else if (isAlternative && !rhs.isAlternative) {
- Connect(initial, lhsSize + rhs.initial, Epsilon);
- } else if (!isAlternative && rhs.isAlternative) {
- Connect(lhsSize + rhs.initial, initial, Epsilon);
- initial = rhs.initial + lhsSize;
- } else if (isAlternative && rhs.isAlternative) {
- const StatesSet& tos = rhs.Destinations(rhs.initial, Epsilon);
+ if (!isAlternative && !rhs.isAlternative) {
+ Resize(Size() + 1);
+ Connect(Size() - 1, initial);
+ Connect(Size() - 1, lhsSize + rhs.initial);
+ initial = Size() - 1;
+ } else if (isAlternative && !rhs.isAlternative) {
+ Connect(initial, lhsSize + rhs.initial, Epsilon);
+ } else if (!isAlternative && rhs.isAlternative) {
+ Connect(lhsSize + rhs.initial, initial, Epsilon);
+ initial = rhs.initial + lhsSize;
+ } else if (isAlternative && rhs.isAlternative) {
+ const StatesSet& tos = rhs.Destinations(rhs.initial, Epsilon);
for (auto&& to : tos) {
Connect(initial, to + lhsSize, Epsilon);
Disconnect(rhs.initial + lhsSize, to + lhsSize, Epsilon);
- }
- }
-
- determined = false;
- isAlternative = true;
- return *this;
-}
-
-Fsm& Fsm::operator &= (const Fsm& rhs)
-{
- Fsm rhs2(rhs);
- Complement();
- rhs2.Complement();
- *this |= rhs2;
- Complement();
- return *this;
-}
-
-Fsm& Fsm::Iterate()
-{
- PIRE_IFDEBUG(Cdbg << "Iterating:" << Endl << *this << Endl);
- Resize(Size() + 2);
-
- Connect(Size() - 2, Size() - 1);
- Connect(Size() - 2, initial);
- ConnectFinal(initial);
- ConnectFinal(Size() - 1);
-
- ClearFinal();
- SetFinal(Size() - 1, true);
- initial = Size() - 2;
-
- determined = false;
-
- PIRE_IFDEBUG(Cdbg << "Iterated:" << Endl << *this << Endl);
- return *this;
-}
-
-Fsm& Fsm::Complement()
-{
- if (!Determine())
- throw Error("Regexp pattern too complicated");
- Minimize();
- Resize(Size() + 1);
- for (size_t i = 0; i < Size(); ++i)
- if (!IsFinal(i))
- Connect(i, Size() - 1);
- ClearFinal();
- SetFinal(Size() - 1, true);
- determined = false;
-
- return *this;
-}
-
+ }
+ }
+
+ determined = false;
+ isAlternative = true;
+ return *this;
+}
+
+Fsm& Fsm::operator &= (const Fsm& rhs)
+{
+ Fsm rhs2(rhs);
+ Complement();
+ rhs2.Complement();
+ *this |= rhs2;
+ Complement();
+ return *this;
+}
+
+Fsm& Fsm::Iterate()
+{
+ PIRE_IFDEBUG(Cdbg << "Iterating:" << Endl << *this << Endl);
+ Resize(Size() + 2);
+
+ Connect(Size() - 2, Size() - 1);
+ Connect(Size() - 2, initial);
+ ConnectFinal(initial);
+ ConnectFinal(Size() - 1);
+
+ ClearFinal();
+ SetFinal(Size() - 1, true);
+ initial = Size() - 2;
+
+ determined = false;
+
+ PIRE_IFDEBUG(Cdbg << "Iterated:" << Endl << *this << Endl);
+ return *this;
+}
+
+Fsm& Fsm::Complement()
+{
+ if (!Determine())
+ throw Error("Regexp pattern too complicated");
+ Minimize();
+ Resize(Size() + 1);
+ for (size_t i = 0; i < Size(); ++i)
+ if (!IsFinal(i))
+ Connect(i, Size() - 1);
+ ClearFinal();
+ SetFinal(Size() - 1, true);
+ determined = false;
+
+ return *this;
+}
+
Fsm Fsm::operator *(size_t count) const
-{
- Fsm ret;
- while (count--)
- ret += *this;
- return ret;
-}
-
-void Fsm::MakePrefix()
-{
- RemoveDeadEnds();
- for (size_t i = 0; i < Size(); ++i)
- if (!m_transitions[i].empty())
- m_final.insert(i);
- ClearHints();
-}
-
-void Fsm::MakeSuffix()
-{
- for (size_t i = 0; i < Size(); ++i)
- if (i != initial)
- Connect(initial, i);
- ClearHints();
-}
-
-Fsm& Fsm::Reverse()
{
- Fsm out;
- out.Resize(Size() + 1);
- out.letters = Letters();
+ Fsm ret;
+ while (count--)
+ ret += *this;
+ return ret;
+}
- // Invert transitions
- for (size_t from = 0; from < Size(); ++from)
+void Fsm::MakePrefix()
+{
+ RemoveDeadEnds();
+ for (size_t i = 0; i < Size(); ++i)
+ if (!m_transitions[i].empty())
+ m_final.insert(i);
+ ClearHints();
+}
+
+void Fsm::MakeSuffix()
+{
+ for (size_t i = 0; i < Size(); ++i)
+ if (i != initial)
+ Connect(initial, i);
+ ClearHints();
+}
+
+Fsm& Fsm::Reverse()
+{
+ Fsm out;
+ out.Resize(Size() + 1);
+ out.letters = Letters();
+
+ // Invert transitions
+ for (size_t from = 0; from < Size(); ++from)
for (auto&& i : m_transitions[from])
for (auto&& j : i.second)
out.Connect(j, from, i.first);
- // Invert initial and final states
+ // Invert initial and final states
out.m_final.clear();
- out.SetFinal(initial, true);
+ out.SetFinal(initial, true);
for (auto i : m_final)
out.Connect(Size(), i, Epsilon);
- out.SetInitial(Size());
+ out.SetInitial(Size());
- // Invert outputs
+ // Invert outputs
for (auto&& i : outputs)
for (auto&& j : i.second)
out.SetOutput(j.first, i.first, j.second);
- // Preserve tags (although thier semantics are usually heavily broken at this point)
- out.tags = tags;
+ // Preserve tags (although thier semantics are usually heavily broken at this point)
+ out.tags = tags;
+
+ // Apply
+ Swap(out);
+ return *this;
+}
- // Apply
- Swap(out);
- return *this;
-}
-
TSet<size_t> Fsm::DeadStates() const
-{
+{
TSet<size_t> res;
for (int invert = 0; invert <= 1; ++invert) {
@@ -649,26 +649,26 @@ TSet<size_t> Fsm::DeadStates() const
digraph.Connect(j - m_transitions.begin(), *toSt, 0);
}
}
- }
- }
-
+ }
+ }
+
TVector<bool> unchecked(Size(), true);
TVector<bool> useless(Size(), true);
TDeque<size_t> queue;
-
+
// Put all final (or initial) states into queue, marking them useful
for (size_t i = 0; i < Size(); ++i)
if ((invert && IsFinal(i)) || (!invert && Initial() == i)) {
useless[i] = false;
queue.push_back(i);
}
-
+
// Do the breadth-first search, marking all states
// from which already marked states are reachable
while (!queue.empty()) {
size_t to = queue.front();
queue.pop_front();
-
+
// All the states that are connected to this state in the transition matrix are useful
const StatesSet& connections = (digraph.m_transitions[to])[0];
for (auto&& fr : connections) {
@@ -677,310 +677,310 @@ TSet<size_t> Fsm::DeadStates() const
useless[fr] = false;
queue.push_back(fr);
}
- }
+ }
// Now we consider this state checked
unchecked[to] = false;
- }
-
+ }
+
for (size_t i = 0; i < Size(); ++i) {
if (useless[i]) {
res.insert(i);
}
- }
- }
-
- return res;
-}
-
-void Fsm::RemoveDeadEnds()
-{
- PIRE_IFDEBUG(Cdbg << "Removing dead ends on:" << Endl << *this << Endl);
-
+ }
+ }
+
+ return res;
+}
+
+void Fsm::RemoveDeadEnds()
+{
+ PIRE_IFDEBUG(Cdbg << "Removing dead ends on:" << Endl << *this << Endl);
+
TSet<size_t> dead = DeadStates();
- // Erase all useless states
+ // Erase all useless states
for (auto&& i : dead) {
PIRE_IFDEBUG(Cdbg << "Removing useless state " << i << Endl);
m_transitions[i].clear();
for (auto&& j : m_transitions)
for (auto&& k : j)
k.second.erase(i);
- }
- ClearHints();
-
- PIRE_IFDEBUG(Cdbg << "Result:" << Endl << *this << Endl);
-}
-
-// This method is one step of Epsilon-connection removal algorithm.
-// It merges transitions, tags, and outputs of 'to' state into 'from' state
-void Fsm::MergeEpsilonConnection(size_t from, size_t to)
-{
- unsigned long frEpsOutput = 0;
- bool fsEpsOutputExists = false;
-
- // Is there an output for 'from'->'to' transition?
- if (outputs.find(from) != outputs.end() && outputs[from].find(to) != outputs[from].end()) {
- frEpsOutput = outputs[from][to];
- fsEpsOutputExists = true;
- }
-
- // Merge transitions from 'to' state into transitions from 'from' state
+ }
+ ClearHints();
+
+ PIRE_IFDEBUG(Cdbg << "Result:" << Endl << *this << Endl);
+}
+
+// This method is one step of Epsilon-connection removal algorithm.
+// It merges transitions, tags, and outputs of 'to' state into 'from' state
+void Fsm::MergeEpsilonConnection(size_t from, size_t to)
+{
+ unsigned long frEpsOutput = 0;
+ bool fsEpsOutputExists = false;
+
+ // Is there an output for 'from'->'to' transition?
+ if (outputs.find(from) != outputs.end() && outputs[from].find(to) != outputs[from].end()) {
+ frEpsOutput = outputs[from][to];
+ fsEpsOutputExists = true;
+ }
+
+ // Merge transitions from 'to' state into transitions from 'from' state
for (auto&& transition : m_transitions[to]) {
TSet<size_t> connStates;
std::copy(transition.second.begin(), transition.second.end(),
std::inserter(m_transitions[from][transition.first], m_transitions[from][transition.first].end()));
-
- // If there is an output of the 'from'->'to' connection it has to be set to all
- // new connections that were merged from 'to' state
- if (fsEpsOutputExists) {
- // Compute the set of states that are reachable from 'to' state
+
+ // If there is an output of the 'from'->'to' connection it has to be set to all
+ // new connections that were merged from 'to' state
+ if (fsEpsOutputExists) {
+ // Compute the set of states that are reachable from 'to' state
std::copy(transition.second.begin(), transition.second.end(), std::inserter(connStates, connStates.end()));
-
- // For each of these states add an output equal to the Epsilon-connection output
+
+ // For each of these states add an output equal to the Epsilon-connection output
for (auto&& newConnSt : connStates) {
outputs[from][newConnSt] |= frEpsOutput;
- }
- }
- }
-
- // Mark 'from' state final if 'to' state is final
- if (IsFinal(to))
- SetFinal(from, true);
-
- // Combine tags
+ }
+ }
+ }
+
+ // Mark 'from' state final if 'to' state is final
+ if (IsFinal(to))
+ SetFinal(from, true);
+
+ // Combine tags
auto ti = tags.find(to);
- if (ti != tags.end())
- tags[from] |= ti->second;
-
- // Merge all 'to' into 'from' outputs:
- // outputs[from][i] |= (outputs[from][to] | outputs[to][i])
+ if (ti != tags.end())
+ tags[from] |= ti->second;
+
+ // Merge all 'to' into 'from' outputs:
+ // outputs[from][i] |= (outputs[from][to] | outputs[to][i])
auto toOit = outputs.find(to);
- if (toOit != outputs.end()) {
+ if (toOit != outputs.end()) {
for (auto&& output : toOit->second) {
outputs[from][output.first] |= (frEpsOutput | output.second);
- }
- }
-}
-
-// Assuming the epsilon transitions is possible from 'from' to 'thru',
-// finds all states which are Epsilon-reachable from 'thru' and connects
-// them directly to 'from' with Epsilon transition having proper output.
-// Updates inverse map of epsilon transitions as well.
+ }
+ }
+}
+
+// Assuming the epsilon transitions is possible from 'from' to 'thru',
+// finds all states which are Epsilon-reachable from 'thru' and connects
+// them directly to 'from' with Epsilon transition having proper output.
+// Updates inverse map of epsilon transitions as well.
void Fsm::ShortCutEpsilon(size_t from, size_t thru, TVector< TSet<size_t> >& inveps)
-{
- PIRE_IFDEBUG(Cdbg << "In Fsm::ShortCutEpsilon(" << from << ", " << thru << ")\n");
- const StatesSet& to = Destinations(thru, Epsilon);
- Outputs::iterator outIt = outputs.find(from);
- unsigned long fromThruOut = Output(from, thru);
+{
+ PIRE_IFDEBUG(Cdbg << "In Fsm::ShortCutEpsilon(" << from << ", " << thru << ")\n");
+ const StatesSet& to = Destinations(thru, Epsilon);
+ Outputs::iterator outIt = outputs.find(from);
+ unsigned long fromThruOut = Output(from, thru);
for (auto&& toElement : to) {
PIRE_IFDEBUG(Cdbg << "Epsilon connecting " << from << " --> " << thru << " --> " << toElement << "\n");
Connect(from, toElement, Epsilon);
inveps[toElement].insert(from);
- if (outIt != outputs.end())
+ if (outIt != outputs.end())
outIt->second[toElement] |= (fromThruOut | Output(thru, toElement));
}
-}
-
-// Removes all Epsilon-connections by iterating though states and merging each Epsilon-connection
-// effects from 'to' state into 'from' state
-void Fsm::RemoveEpsilons()
-{
- Unsparse();
-
- // Build inverse map of epsilon transitions
+}
+
+// Removes all Epsilon-connections by iterating though states and merging each Epsilon-connection
+// effects from 'to' state into 'from' state
+void Fsm::RemoveEpsilons()
+{
+ Unsparse();
+
+ // Build inverse map of epsilon transitions
TVector< TSet<size_t> > inveps(Size()); // We have to use TSet<> here since we want it sorted
- for (size_t from = 0; from != Size(); ++from) {
- const StatesSet& tos = Destinations(from, Epsilon);
+ for (size_t from = 0; from != Size(); ++from) {
+ const StatesSet& tos = Destinations(from, Epsilon);
for (auto&& to : tos)
inveps[to].insert(from);
- }
+ }
- // Make a transitive closure of all epsilon transitions (Floyd-Warshall algorithm)
- // (if there exists an epsilon-path between two states, epsilon-connect them directly)
- for (size_t thru = 0; thru != Size(); ++thru)
+ // Make a transitive closure of all epsilon transitions (Floyd-Warshall algorithm)
+ // (if there exists an epsilon-path between two states, epsilon-connect them directly)
+ for (size_t thru = 0; thru != Size(); ++thru)
for (auto&& from : inveps[thru])
- // inveps[thru] may alter during loop body, hence we cannot cache ivneps[thru].end()
+ // inveps[thru] may alter during loop body, hence we cannot cache ivneps[thru].end()
if (from != thru)
ShortCutEpsilon(from, thru, inveps);
- PIRE_IFDEBUG(Cdbg << "=== After epsilons shortcut\n" << *this << Endl);
+ PIRE_IFDEBUG(Cdbg << "=== After epsilons shortcut\n" << *this << Endl);
- // Iterate through all epsilon-connected state pairs, merging states together
- for (size_t from = 0; from != Size(); ++from) {
- const StatesSet& to = Destinations(from, Epsilon);
+ // Iterate through all epsilon-connected state pairs, merging states together
+ for (size_t from = 0; from != Size(); ++from) {
+ const StatesSet& to = Destinations(from, Epsilon);
for (auto&& toElement : to)
if (toElement != from)
MergeEpsilonConnection(from, toElement); // it's a NOP if to == from, so don't waste time
- }
+ }
- PIRE_IFDEBUG(Cdbg << "=== After epsilons merged\n" << *this << Endl);
+ PIRE_IFDEBUG(Cdbg << "=== After epsilons merged\n" << *this << Endl);
- // Drop all epsilon transitions
+ // Drop all epsilon transitions
for (auto&& i : m_transitions)
i.erase(Epsilon);
- Sparse();
- ClearHints();
-}
-
-bool Fsm::LettersEquality::operator()(Char a, Char b) const
-{
+ Sparse();
+ ClearHints();
+}
+
+bool Fsm::LettersEquality::operator()(Char a, Char b) const
+{
for (auto&& outer : *m_tbl) {
auto ia = outer.find(a);
auto ib = outer.find(b);
if (ia == outer.end() && ib == outer.end())
- continue;
+ continue;
else if (ia == outer.end() || ib == outer.end() || ia->second != ib->second) {
- return false;
- }
- }
- return true;
-}
-
+ return false;
+ }
+ }
+ return true;
+}
+
void Fsm::Sparse(bool needEpsilons /* = false */)
-{
- letters = LettersTbl(LettersEquality(m_transitions));
- for (unsigned letter = 0; letter < MaxChar; ++letter)
+{
+ letters = LettersTbl(LettersEquality(m_transitions));
+ for (unsigned letter = 0; letter < MaxChar; ++letter)
if (letter != Epsilon || needEpsilons)
- letters.Append(letter);
-
- m_sparsed = true;
- PIRE_IFDEBUG(Cdbg << "Letter classes = " << letters << Endl);
-}
-
-void Fsm::Unsparse()
-{
+ letters.Append(letter);
+
+ m_sparsed = true;
+ PIRE_IFDEBUG(Cdbg << "Letter classes = " << letters << Endl);
+}
+
+void Fsm::Unsparse()
+{
for (auto&& letter : letters)
for (auto&& i : m_transitions)
for (auto&& j : letter.second.second)
i[j] = i[letter.first];
- m_sparsed = false;
-}
-
-// Returns a set of 'terminal states', which are those of the final states,
-// from which a transition to themselves on any letter is possible.
+ m_sparsed = false;
+}
+
+// Returns a set of 'terminal states', which are those of the final states,
+// from which a transition to themselves on any letter is possible.
TSet<size_t> Fsm::TerminalStates() const
-{
+{
TSet<size_t> terminals;
for (auto&& final : m_final) {
- bool ok = true;
+ bool ok = true;
for (auto&& letter : letters) {
auto dests = m_transitions[final].find(letter.first);
ok = ok && (dests != m_transitions[final].end() && dests->second.find(final) != dests->second.end());
- }
- if (ok)
+ }
+ if (ok)
terminals.insert(final);
- }
- return terminals;
-}
-
-namespace Impl {
-class FsmDetermineTask {
-public:
+ }
+ return terminals;
+}
+
+namespace Impl {
+class FsmDetermineTask {
+public:
typedef TVector<size_t> State;
- typedef Fsm::LettersTbl LettersTbl;
+ typedef Fsm::LettersTbl LettersTbl;
typedef TMap<State, size_t> InvStates;
- FsmDetermineTask(const Fsm& fsm)
- : mFsm(fsm)
- , mTerminals(fsm.TerminalStates())
- {
- PIRE_IFDEBUG(Cdbg << "Terminal states: [" << Join(mTerminals.begin(), mTerminals.end(), ", ") << "]" << Endl);
- }
- const LettersTbl& Letters() const { return mFsm.letters; }
-
- State Initial() const { return State(1, mFsm.initial); }
- bool IsRequired(const State& state) const
- {
+ FsmDetermineTask(const Fsm& fsm)
+ : mFsm(fsm)
+ , mTerminals(fsm.TerminalStates())
+ {
+ PIRE_IFDEBUG(Cdbg << "Terminal states: [" << Join(mTerminals.begin(), mTerminals.end(), ", ") << "]" << Endl);
+ }
+ const LettersTbl& Letters() const { return mFsm.letters; }
+
+ State Initial() const { return State(1, mFsm.initial); }
+ bool IsRequired(const State& state) const
+ {
for (auto&& i : state)
if (mTerminals.find(i) != mTerminals.end())
- return false;
- return true;
- }
-
- State Next(const State& state, Char letter) const
- {
- State next;
- next.reserve(20);
+ return false;
+ return true;
+ }
+
+ State Next(const State& state, Char letter) const
+ {
+ State next;
+ next.reserve(20);
for (auto&& from : state) {
const auto& part = mFsm.Destinations(from, letter);
- std::copy(part.begin(), part.end(), std::back_inserter(next));
- }
-
- std::sort(next.begin(), next.end());
- next.erase(std::unique(next.begin(), next.end()), next.end());
- PIRE_IFDEBUG(Cdbg << "Returning transition [" << Join(state.begin(), state.end(), ", ") << "] --" << letter
- << "--> [" << Join(next.begin(), next.end(), ", ") << "]" << Endl);
- return next;
- }
+ std::copy(part.begin(), part.end(), std::back_inserter(next));
+ }
+
+ std::sort(next.begin(), next.end());
+ next.erase(std::unique(next.begin(), next.end()), next.end());
+ PIRE_IFDEBUG(Cdbg << "Returning transition [" << Join(state.begin(), state.end(), ", ") << "] --" << letter
+ << "--> [" << Join(next.begin(), next.end(), ", ") << "]" << Endl);
+ return next;
+ }
void AcceptStates(const TVector<State>& states)
- {
- mNewFsm.Resize(states.size());
- mNewFsm.initial = 0;
- mNewFsm.determined = true;
- mNewFsm.letters = Letters();
- mNewFsm.m_final.clear();
- for (size_t ns = 0; ns < states.size(); ++ns) {
- PIRE_IFDEBUG(Cdbg << "State " << ns << " = [" << Join(states[ns].begin(), states[ns].end(), ", ") << "]" << Endl);
+ {
+ mNewFsm.Resize(states.size());
+ mNewFsm.initial = 0;
+ mNewFsm.determined = true;
+ mNewFsm.letters = Letters();
+ mNewFsm.m_final.clear();
+ for (size_t ns = 0; ns < states.size(); ++ns) {
+ PIRE_IFDEBUG(Cdbg << "State " << ns << " = [" << Join(states[ns].begin(), states[ns].end(), ", ") << "]" << Endl);
for (auto&& j : states[ns]) {
- // If it was a terminal state, connect it to itself
+ // If it was a terminal state, connect it to itself
if (mTerminals.find(j) != mTerminals.end()) {
for (auto&& letter : Letters())
mNewFsm.Connect(ns, ns, letter.first);
- mNewTerminals.insert(ns);
+ mNewTerminals.insert(ns);
PIRE_IFDEBUG(Cdbg << "State " << ns << " becomes terminal because of old state " << j << Endl);
- }
- }
+ }
+ }
for (auto&& j : states[ns]) {
- // If any state containing in our one is marked final, mark the new state final as well
+ // If any state containing in our one is marked final, mark the new state final as well
if (mFsm.IsFinal(j)) {
PIRE_IFDEBUG(Cdbg << "State " << ns << " becomes final because of old state " << j << Endl);
- mNewFsm.SetFinal(ns, true);
- if (mFsm.tags.empty())
- // Weve got no tags and already know that the state is final,
- // hence weve done with this state and got nothing more to do.
- break;
- }
-
- // Bitwise OR all tags in states
+ mNewFsm.SetFinal(ns, true);
+ if (mFsm.tags.empty())
+ // Weve got no tags and already know that the state is final,
+ // hence weve done with this state and got nothing more to do.
+ break;
+ }
+
+ // Bitwise OR all tags in states
auto ti = mFsm.tags.find(j);
- if (ti != mFsm.tags.end()) {
+ if (ti != mFsm.tags.end()) {
PIRE_IFDEBUG(Cdbg << "State " << ns << " carries tag " << ti->second << " because of old state " << j << Endl);
- mNewFsm.tags[ns] |= ti->second;
- }
- }
- }
- // For each old state, prepare a list of new state it is contained in
+ mNewFsm.tags[ns] |= ti->second;
+ }
+ }
+ }
+ // For each old state, prepare a list of new state it is contained in
typedef TMap< size_t, TVector<size_t> > Old2New;
- Old2New old2new;
- for (size_t ns = 0; ns < states.size(); ++ns)
+ Old2New old2new;
+ for (size_t ns = 0; ns < states.size(); ++ns)
for (auto&& j : states[ns])
old2new[j].push_back(ns);
- // Copy all outputs
+ // Copy all outputs
for (auto&& i : mFsm.outputs) {
for (auto&& j : i.second) {
auto from = old2new.find(i.first);
auto to = old2new.find(j.first);
- if (from != old2new.end() && to != old2new.end()) {
+ if (from != old2new.end() && to != old2new.end()) {
for (auto&& k : from->second)
for (auto&& l : to->second)
mNewFsm.outputs[k][l] |= j.second;
- }
- }
- }
- PIRE_IFDEBUG(Cdbg << "New terminals = [" << Join(mNewTerminals.begin(), mNewTerminals.end(), ",") << "]" << Endl);
- }
-
- void Connect(size_t from, size_t to, Char letter)
- {
- PIRE_IFDEBUG(Cdbg << "Connecting " << from << " --" << letter << "--> " << to << Endl);
+ }
+ }
+ }
+ PIRE_IFDEBUG(Cdbg << "New terminals = [" << Join(mNewTerminals.begin(), mNewTerminals.end(), ",") << "]" << Endl);
+ }
+
+ void Connect(size_t from, size_t to, Char letter)
+ {
+ PIRE_IFDEBUG(Cdbg << "Connecting " << from << " --" << letter << "--> " << to << Endl);
Y_ASSERT(mNewTerminals.find(from) == mNewTerminals.end());
- mNewFsm.Connect(from, to, letter);
- }
- typedef bool Result;
+ mNewFsm.Connect(from, to, letter);
+ }
+ typedef bool Result;
Result Success() {
Fsm::Outputs oldOutputs;
@@ -1003,40 +1003,40 @@ public:
return true;
}
- Result Failure() { return false; }
+ Result Failure() { return false; }
- Fsm& Output() { return mNewFsm; }
-private:
- const Fsm& mFsm;
- Fsm mNewFsm;
+ Fsm& Output() { return mNewFsm; }
+private:
+ const Fsm& mFsm;
+ Fsm mNewFsm;
TSet<size_t> mTerminals;
TSet<size_t> mNewTerminals;
-};
-}
-
-bool Fsm::Determine(size_t maxsize /* = 0 */)
-{
- static const unsigned MaxSize = 200000;
- if (determined)
- return true;
-
- PIRE_IFDEBUG(Cdbg << "=== Initial ===" << Endl << *this << Endl);
-
- RemoveEpsilons();
- PIRE_IFDEBUG(Cdbg << "=== After all epsilons removed" << Endl << *this << Endl);
-
- Impl::FsmDetermineTask task(*this);
- if (Pire::Impl::Determine(task, maxsize ? maxsize : MaxSize)) {
- task.Output().Swap(*this);
- PIRE_IFDEBUG(Cdbg << "=== Determined ===" << Endl << *this << Endl);
- return true;
- } else
- return false;
-}
-
+};
+}
+
+bool Fsm::Determine(size_t maxsize /* = 0 */)
+{
+ static const unsigned MaxSize = 200000;
+ if (determined)
+ return true;
+
+ PIRE_IFDEBUG(Cdbg << "=== Initial ===" << Endl << *this << Endl);
+
+ RemoveEpsilons();
+ PIRE_IFDEBUG(Cdbg << "=== After all epsilons removed" << Endl << *this << Endl);
+
+ Impl::FsmDetermineTask task(*this);
+ if (Pire::Impl::Determine(task, maxsize ? maxsize : MaxSize)) {
+ task.Output().Swap(*this);
+ PIRE_IFDEBUG(Cdbg << "=== Determined ===" << Endl << *this << Endl);
+ return true;
+ } else
+ return false;
+}
+
namespace Impl {
class FsmMinimizeTask {
-public:
+public:
explicit FsmMinimizeTask(const Fsm& fsm)
: mFsm(fsm)
, reversedTransitions(fsm.Size())
@@ -1044,7 +1044,7 @@ public:
, Classes(0)
{
Y_ASSERT(mFsm.IsDetermined());
-
+
TMap<bool, size_t> FinalStateClassMap;
for (size_t state = 0; state < mFsm.Size(); ++state) {
@@ -1068,7 +1068,7 @@ public:
}
}
}
-
+
TVector<size_t>& GetStateClass() { return StateClass; }
size_t& GetClassesNumber() { return Classes; }
@@ -1080,22 +1080,22 @@ public:
bool IsDetermined() const {
return mFsm.IsDetermined();
}
-
+
size_t Size() const {
return mFsm.Size();
- }
-
+ }
+
const TVector<size_t>& Previous(size_t state, size_t letter) const {
return reversedTransitions[state][letter];
- }
-
+ }
+
void AcceptStates() {
mNewFsm.Resize(Classes);
mNewFsm.letters = mFsm.letters;
mNewFsm.determined = mFsm.determined;
mNewFsm.m_sparsed = mFsm.m_sparsed;
mNewFsm.SetFinal(0, false);
-
+
// Unite equality classes into new states
size_t fromIdx = 0;
for (auto from = mFsm.m_transitions.begin(), fromEnd = mFsm.m_transitions.end(); from != fromEnd; ++from, ++fromIdx) {
@@ -1109,36 +1109,36 @@ public:
mNewFsm.SetFinal(dest, true);
PIRE_IFDEBUG(Cdbg << "[min] New state " << dest << " becomes final because of old state " << fromIdx << Endl);
}
-
+
// Append tags
auto ti = mFsm.tags.find(fromIdx);
if (ti != mFsm.tags.end()) {
mNewFsm.tags[dest] |= ti->second;
PIRE_IFDEBUG(Cdbg << "[min] New state " << dest << " carries tag " << ti->second << " because of old state " << fromIdx << Endl);
}
- }
+ }
mNewFsm.initial = StateClass[mFsm.initial];
// Restore outputs
for (auto&& output : mFsm.outputs)
for (auto&& output2 : output.second)
mNewFsm.outputs[StateClass[output.first]].insert(ymake_pair(StateClass[output2.first], output2.second));
- }
-
+ }
+
typedef bool Result;
-
+
Result Success() {
return true;
}
-
+
Result Failure() {
return false;
}
-
+
Fsm& Output() {
return mNewFsm;
- }
-
+ }
+
private:
const Fsm& mFsm;
Fsm mNewFsm;
@@ -1147,89 +1147,89 @@ private:
size_t Classes;
};
}
-
+
void Fsm::Minimize()
{
// Minimization algorithm is only applicable to a determined FSM.
Y_ASSERT(determined);
-
+
Impl::FsmMinimizeTask task{*this};
if (Pire::Impl::Minimize(task)) {
task.Output().Swap(*this);
- }
-}
-
-Fsm& Fsm::Canonize(size_t maxSize /* = 0 */)
-{
- if (!IsDetermined()) {
+ }
+}
+
+Fsm& Fsm::Canonize(size_t maxSize /* = 0 */)
+{
+ if (!IsDetermined()) {
if (!Determine(maxSize))
- throw Error("regexp pattern too complicated");
- }
- Minimize();
- return *this;
-}
-
-void Fsm::PrependAnything()
-{
- size_t newstate = Size();
- Resize(Size() + 1);
- for (size_t letter = 0; letter < MaxChar; ++letter)
- Connect(newstate, newstate, letter);
-
- Connect(newstate, initial);
- initial = newstate;
-
- determined = false;
-}
-
-void Fsm::AppendAnything()
-{
- size_t newstate = Size();
- Resize(Size() + 1);
- for (size_t letter = 0; letter < MaxChar; ++letter)
- Connect(newstate, newstate, letter);
-
- ConnectFinal(newstate);
- ClearFinal();
- SetFinal(newstate, 1);
-
- determined = false;
-}
-
-Fsm& Fsm::Surround()
-{
- PrependAnything();
- AppendAnything();
- return *this;
-}
-
-void Fsm::Divert(size_t from, size_t to, size_t dest)
-{
- if (to == dest)
- return;
-
- // Assign the output
+ throw Error("regexp pattern too complicated");
+ }
+ Minimize();
+ return *this;
+}
+
+void Fsm::PrependAnything()
+{
+ size_t newstate = Size();
+ Resize(Size() + 1);
+ for (size_t letter = 0; letter < MaxChar; ++letter)
+ Connect(newstate, newstate, letter);
+
+ Connect(newstate, initial);
+ initial = newstate;
+
+ determined = false;
+}
+
+void Fsm::AppendAnything()
+{
+ size_t newstate = Size();
+ Resize(Size() + 1);
+ for (size_t letter = 0; letter < MaxChar; ++letter)
+ Connect(newstate, newstate, letter);
+
+ ConnectFinal(newstate);
+ ClearFinal();
+ SetFinal(newstate, 1);
+
+ determined = false;
+}
+
+Fsm& Fsm::Surround()
+{
+ PrependAnything();
+ AppendAnything();
+ return *this;
+}
+
+void Fsm::Divert(size_t from, size_t to, size_t dest)
+{
+ if (to == dest)
+ return;
+
+ // Assign the output
auto oi = outputs.find(from);
- if (oi != outputs.end()) {
+ if (oi != outputs.end()) {
auto oi2 = oi->second.find(to);
- if (oi2 != oi->second.end()) {
- unsigned long output = oi2->second;
- oi->second.erase(oi2);
- oi->second.insert(ymake_pair(dest, output));
- }
- }
-
- // Assign the transition
+ if (oi2 != oi->second.end()) {
+ unsigned long output = oi2->second;
+ oi->second.erase(oi2);
+ oi->second.insert(ymake_pair(dest, output));
+ }
+ }
+
+ // Assign the transition
for (auto&& i : m_transitions[from]) {
auto di = i.second.find(to);
if (di != i.second.end()) {
i.second.erase(di);
i.second.insert(dest);
- }
- }
-
- ClearHints();
-}
-
-
-}
+ }
+ }
+
+ ClearHints();
+}
+
+
+}
diff --git a/contrib/libs/pire/pire/fsm.h b/contrib/libs/pire/pire/fsm.h
index d25d1764e31..4dad06ca065 100644
--- a/contrib/libs/pire/pire/fsm.h
+++ b/contrib/libs/pire/pire/fsm.h
@@ -1,283 +1,283 @@
-/*
- * fsm.h -- the definition of the FSM class.
+/*
+ * fsm.h -- the definition of the FSM class.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_FSM_H
-#define PIRE_FSM_H
-
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_FSM_H
+#define PIRE_FSM_H
+
+
#include <contrib/libs/pire/pire/stub/stl.h>
-#include "partition.h"
-#include "defs.h"
-
-namespace Pire {
-
- namespace Impl {
- class FsmDetermineTask;
+#include "partition.h"
+#include "defs.h"
+
+namespace Pire {
+
+ namespace Impl {
+ class FsmDetermineTask;
class FsmMinimizeTask;
class HalfFinalDetermineTask;
- }
-
- /// A Flying Spaghetti Monster... no, just a Finite State Machine.
- class Fsm {
- public:
- typedef ybitset<MaxChar> Charset;
-
- Fsm();
- void Swap(Fsm& fsm);
-
- static Fsm MakeFalse();
-
- /// Current number of states
- size_t Size() const { return m_transitions.size(); }
-
- Fsm& Append(char c);
- Fsm& Append(const ystring& str);
- Fsm& AppendSpecial(Char c);
-
- /// Efficiently appends a union of passed strings to FSM.
- /// Used for ranges (e.g. [a-z]), character classes (e.g. \w, \d)
- /// and case-insensitive comparison of multibyte characters,
- /// when one string represents a lowercase variant of a character,
- /// while another string represents its uppercase variant.
+ }
+
+ /// A Flying Spaghetti Monster... no, just a Finite State Machine.
+ class Fsm {
+ public:
+ typedef ybitset<MaxChar> Charset;
+
+ Fsm();
+ void Swap(Fsm& fsm);
+
+ static Fsm MakeFalse();
+
+ /// Current number of states
+ size_t Size() const { return m_transitions.size(); }
+
+ Fsm& Append(char c);
+ Fsm& Append(const ystring& str);
+ Fsm& AppendSpecial(Char c);
+
+ /// Efficiently appends a union of passed strings to FSM.
+ /// Used for ranges (e.g. [a-z]), character classes (e.g. \w, \d)
+ /// and case-insensitive comparison of multibyte characters,
+ /// when one string represents a lowercase variant of a character,
+ /// while another string represents its uppercase variant.
Fsm& AppendStrings(const TVector<ystring>& strings);
-
- /// Appends a part matching a single byte (any).
- Fsm& AppendDot();
-
- /// Appends and prepends the FSM with the iterated dot (see above).
- Fsm& Surround(); // returns *this
- Fsm Surrounded() const { Fsm copy(*this); copy.Surround(); return copy; }
-
- Fsm& operator += (const Fsm& rhs); ///< Concatenation
- Fsm& operator |= (const Fsm& rhs); ///< Alternation
- Fsm& operator &= (const Fsm& rhs); ///< Conjunction
- Fsm& Iterate(); ///< Klene star
- Fsm& Complement(); ///< Complementation
- Fsm& operator *= (size_t count) { *this = *this * count; return *this; }
-
- Fsm operator + (const Fsm& rhs) const { Fsm a(*this); return a += rhs; }
- Fsm operator | (const Fsm& rhs) const { Fsm a(*this); return a |= rhs; }
- Fsm operator & (const Fsm& rhs) const { Fsm a(*this); return a &= rhs; }
- Fsm operator * () const { Fsm a(*this); return a.Iterate(); }
- Fsm operator ~ () const { Fsm a(*this); return a.Complement(); }
+
+ /// Appends a part matching a single byte (any).
+ Fsm& AppendDot();
+
+ /// Appends and prepends the FSM with the iterated dot (see above).
+ Fsm& Surround(); // returns *this
+ Fsm Surrounded() const { Fsm copy(*this); copy.Surround(); return copy; }
+
+ Fsm& operator += (const Fsm& rhs); ///< Concatenation
+ Fsm& operator |= (const Fsm& rhs); ///< Alternation
+ Fsm& operator &= (const Fsm& rhs); ///< Conjunction
+ Fsm& Iterate(); ///< Klene star
+ Fsm& Complement(); ///< Complementation
+ Fsm& operator *= (size_t count) { *this = *this * count; return *this; }
+
+ Fsm operator + (const Fsm& rhs) const { Fsm a(*this); return a += rhs; }
+ Fsm operator | (const Fsm& rhs) const { Fsm a(*this); return a |= rhs; }
+ Fsm operator & (const Fsm& rhs) const { Fsm a(*this); return a &= rhs; }
+ Fsm operator * () const { Fsm a(*this); return a.Iterate(); }
+ Fsm operator ~ () const { Fsm a(*this); return a.Complement(); }
Fsm operator * (size_t count) const;
-
- // === Raw FSM construction ===
-
- /// Connects two states with given transition
- void Connect(size_t from, size_t to, Char c = Epsilon);
-
- /// Removes given character from the specified transition.
- void Disconnect(size_t from, size_t to, Char c);
-
- /// Completely removes given transition
- void Disconnect(size_t from, size_t to);
-
+
+ // === Raw FSM construction ===
+
+ /// Connects two states with given transition
+ void Connect(size_t from, size_t to, Char c = Epsilon);
+
+ /// Removes given character from the specified transition.
+ void Disconnect(size_t from, size_t to, Char c);
+
+ /// Completely removes given transition
+ void Disconnect(size_t from, size_t to);
+
/// Creates an FSM which matches any prefix of any word current FSM matches.
- void MakePrefix();
-
- /// Creates an FSM which matches any suffix of any word current FSM matches.
- void MakeSuffix();
-
- /// Does the one way part of Surround().
- void PrependAnything();
- void AppendAnything();
-
- /// Creates an FSM which matches reversed strings matched by current FSM.
- Fsm& Reverse();
-
- /// Returns a set of states from which no final states are reachable
+ void MakePrefix();
+
+ /// Creates an FSM which matches any suffix of any word current FSM matches.
+ void MakeSuffix();
+
+ /// Does the one way part of Surround().
+ void PrependAnything();
+ void AppendAnything();
+
+ /// Creates an FSM which matches reversed strings matched by current FSM.
+ Fsm& Reverse();
+
+ /// Returns a set of states from which no final states are reachable
TSet<size_t> DeadStates() const;
-
- /// Removes all dead end paths from FSM
- void RemoveDeadEnds();
-
- /// Determines and minimizes the FSM if neccessary. Returns *this.
- Fsm& Canonize(size_t maxSize = 0);
-
- template<class Scanner>
+
+ /// Removes all dead end paths from FSM
+ void RemoveDeadEnds();
+
+ /// Determines and minimizes the FSM if neccessary. Returns *this.
+ Fsm& Canonize(size_t maxSize = 0);
+
+ template<class Scanner>
Scanner Compile(size_t distance = 0);
-
- void DumpState(yostream& s, size_t state) const;
+
+ void DumpState(yostream& s, size_t state) const;
void DumpTo(yostream& s, const ystring& name = "") const;
-
+
typedef TSet<size_t> StatesSet;
typedef TMap<size_t, StatesSet> TransitionRow;
typedef TVector<TransitionRow> TransitionTable;
-
- struct LettersEquality {
- LettersEquality(const Fsm::TransitionTable& tbl): m_tbl(&tbl) {}
- bool operator()(Char a, Char b) const;
- private:
- const Fsm::TransitionTable* m_tbl;
- };
-
+
+ struct LettersEquality {
+ LettersEquality(const Fsm::TransitionTable& tbl): m_tbl(&tbl) {}
+ bool operator()(Char a, Char b) const;
+ private:
+ const Fsm::TransitionTable* m_tbl;
+ };
+
typedef TSet<size_t> FinalTable;
- typedef Partition<Char, LettersEquality> LettersTbl;
-
-
- /*
- * A very low level FSM building interface.
- *
- * It is generally unwise to call any of these functions unless you are building
- * your own scanner, your own ecoding or exaclty know what you are doing.
- */
- unsigned long Tag(size_t state) const { Tags::const_iterator i = tags.find(state); return (i == tags.end()) ? 0 : i->second; }
- void SetTag(size_t state, unsigned long tag) { tags[state] = tag; }
-
- unsigned long Output(size_t from, size_t to) const;
- void SetOutput(size_t from, size_t to, unsigned long output) { outputs[from][to] = output; }
- void ClearOutputs() { outputs.clear(); }
-
- const FinalTable& Finals() const { return m_final; }
- bool IsFinal(size_t state) const { return m_final.find(state) != m_final.end(); }
- void SetFinal(size_t size, bool final);
- void ClearFinal() { m_final.clear(); }
-
- /// Removes all espilon transitions from the FSM. Does not change the FSMs language.
- void RemoveEpsilons();
-
- /// Resize FSM to newSize states. Returns old size.
- size_t Resize(size_t newSize);
-
- /// Imports foreign transition table
- void Import(const Fsm& rhs);
-
- /// Connects all final state with given state
- void ConnectFinal(size_t to, Char c = Epsilon);
-
- /// Diverts all transition between two given states to @p dest, preserving outputs
- void Divert(size_t from, size_t to, size_t dest);
-
- /// Checks whether two states are connected using given letter.
- bool Connected(size_t from, size_t to, Char c) const;
-
- /// Returns a set of letters on which a transition from the specified state exists
+ typedef Partition<Char, LettersEquality> LettersTbl;
+
+
+ /*
+ * A very low level FSM building interface.
+ *
+ * It is generally unwise to call any of these functions unless you are building
+ * your own scanner, your own ecoding or exaclty know what you are doing.
+ */
+ unsigned long Tag(size_t state) const { Tags::const_iterator i = tags.find(state); return (i == tags.end()) ? 0 : i->second; }
+ void SetTag(size_t state, unsigned long tag) { tags[state] = tag; }
+
+ unsigned long Output(size_t from, size_t to) const;
+ void SetOutput(size_t from, size_t to, unsigned long output) { outputs[from][to] = output; }
+ void ClearOutputs() { outputs.clear(); }
+
+ const FinalTable& Finals() const { return m_final; }
+ bool IsFinal(size_t state) const { return m_final.find(state) != m_final.end(); }
+ void SetFinal(size_t size, bool final);
+ void ClearFinal() { m_final.clear(); }
+
+ /// Removes all espilon transitions from the FSM. Does not change the FSMs language.
+ void RemoveEpsilons();
+
+ /// Resize FSM to newSize states. Returns old size.
+ size_t Resize(size_t newSize);
+
+ /// Imports foreign transition table
+ void Import(const Fsm& rhs);
+
+ /// Connects all final state with given state
+ void ConnectFinal(size_t to, Char c = Epsilon);
+
+ /// Diverts all transition between two given states to @p dest, preserving outputs
+ void Divert(size_t from, size_t to, size_t dest);
+
+ /// Checks whether two states are connected using given letter.
+ bool Connected(size_t from, size_t to, Char c) const;
+
+ /// Returns a set of letters on which a transition from the specified state exists
TSet<Char> OutgoingLetters(size_t state) const;
-
- /// Returns a set of states where a transition from the given state using the given letter is possible
- const StatesSet& Destinations(size_t from, Char letter) const;
-
- /// Checks whether two states are connected using any letter.
- bool Connected(size_t from, size_t to) const;
- size_t Initial() const { return initial; }
- void SetInitial(size_t init) { initial = init; }
-
- const LettersTbl& Letters() const { return letters; }
-
- /// Determines the FSM.
- /// Breaks FSM invariant of having a single final state, so high-level FSM building
- /// functions (i.e. Append(), operator+(), etc...) no longer can be applied to the FSM
- /// until the invariants have been manually restored.
- /// return value: successful?
- bool Determine(size_t maxsize = 0);
- bool IsDetermined() const { return determined; }
- void SetIsDetermined(bool det) { determined = det; }
-
- /// Minimizes amount of states in the regexp.
- /// Requires a determined FSM.
- void Minimize();
-
-
- /// Builds letters equivalence classes
+
+ /// Returns a set of states where a transition from the given state using the given letter is possible
+ const StatesSet& Destinations(size_t from, Char letter) const;
+
+ /// Checks whether two states are connected using any letter.
+ bool Connected(size_t from, size_t to) const;
+ size_t Initial() const { return initial; }
+ void SetInitial(size_t init) { initial = init; }
+
+ const LettersTbl& Letters() const { return letters; }
+
+ /// Determines the FSM.
+ /// Breaks FSM invariant of having a single final state, so high-level FSM building
+ /// functions (i.e. Append(), operator+(), etc...) no longer can be applied to the FSM
+ /// until the invariants have been manually restored.
+ /// return value: successful?
+ bool Determine(size_t maxsize = 0);
+ bool IsDetermined() const { return determined; }
+ void SetIsDetermined(bool det) { determined = det; }
+
+ /// Minimizes amount of states in the regexp.
+ /// Requires a determined FSM.
+ void Minimize();
+
+
+ /// Builds letters equivalence classes
void Sparse(bool needEpsilons = false);
-
- /// Unpacks all letters equivalence classs back into transitions table
- void Unsparse();
-
- private:
-
- /// Transitions table :: Q x V -> exp(Q)
- TransitionTable m_transitions;
-
- /// Initial state
- size_t initial;
-
- /// Final states.
- FinalTable m_final;
-
- LettersTbl letters;
-
- /// Does 'letters' make sense?
- bool m_sparsed;
-
- /// Is the FSM already determined?
- bool determined;
-
- /// Output
+
+ /// Unpacks all letters equivalence classs back into transitions table
+ void Unsparse();
+
+ private:
+
+ /// Transitions table :: Q x V -> exp(Q)
+ TransitionTable m_transitions;
+
+ /// Initial state
+ size_t initial;
+
+ /// Final states.
+ FinalTable m_final;
+
+ LettersTbl letters;
+
+ /// Does 'letters' make sense?
+ bool m_sparsed;
+
+ /// Is the FSM already determined?
+ bool determined;
+
+ /// Output
typedef TMap< size_t, TMap<size_t, unsigned long> > Outputs;
- Outputs outputs;
-
+ Outputs outputs;
+
typedef TMap<size_t, unsigned long> Tags;
- Tags tags;
-
- /// Heuristics hit: true iff this FSM is a union of two other FSMs
- bool isAlternative;
-
+ Tags tags;
+
+ /// Heuristics hit: true iff this FSM is a union of two other FSMs
+ bool isAlternative;
+
void ShortCutEpsilon(size_t from, size_t thru, TVector< TSet<size_t> >& inveps); ///< internal
- void MergeEpsilonConnection(size_t from, size_t to); ///< internal
-
+ void MergeEpsilonConnection(size_t from, size_t to); ///< internal
+
TSet<size_t> TerminalStates() const;
-
- Char Translate(Char c) const;
-
- void ClearHints() { isAlternative = false; }
-
- friend class Impl::FsmDetermineTask;
+
+ Char Translate(Char c) const;
+
+ void ClearHints() { isAlternative = false; }
+
+ friend class Impl::FsmDetermineTask;
friend class Impl::FsmMinimizeTask;
friend class Impl::HalfFinalDetermineTask;
- };
-
- template<class Scanner>
+ };
+
+ template<class Scanner>
void BuildScanner(const Fsm& fsm, Scanner& r)
- {
+ {
TSet<size_t> dead;
- if (Scanner::DeadFlag)
- dead = fsm.DeadStates();
-
- for (size_t state = 0; state < fsm.Size(); ++state)
- r.SetTag(state, typename Scanner::Tag(fsm.Tag(state)
- | (fsm.IsFinal(state) ? Scanner::FinalFlag : 0)
- | ((dead.find(state) != dead.end()) ? Scanner::DeadFlag : 0)));
-
- for (size_t from = 0; from != fsm.Size(); ++from)
- for (Fsm::LettersTbl::ConstIterator lit = fsm.Letters().Begin(), lie = fsm.Letters().End(); lit != lie; ++lit) {
- const Fsm::StatesSet& tos = fsm.Destinations(from, lit->first);
- for (Fsm::StatesSet::const_iterator to = tos.begin(), toEnd = tos.end(); to != toEnd; ++to)
- r.SetJump(from, lit->first, *to, r.RemapAction(fsm.Output(from, *to)));
- }
-
- r.FinishBuild();
- }
-
- template<class Scanner>
+ if (Scanner::DeadFlag)
+ dead = fsm.DeadStates();
+
+ for (size_t state = 0; state < fsm.Size(); ++state)
+ r.SetTag(state, typename Scanner::Tag(fsm.Tag(state)
+ | (fsm.IsFinal(state) ? Scanner::FinalFlag : 0)
+ | ((dead.find(state) != dead.end()) ? Scanner::DeadFlag : 0)));
+
+ for (size_t from = 0; from != fsm.Size(); ++from)
+ for (Fsm::LettersTbl::ConstIterator lit = fsm.Letters().Begin(), lie = fsm.Letters().End(); lit != lie; ++lit) {
+ const Fsm::StatesSet& tos = fsm.Destinations(from, lit->first);
+ for (Fsm::StatesSet::const_iterator to = tos.begin(), toEnd = tos.end(); to != toEnd; ++to)
+ r.SetJump(from, lit->first, *to, r.RemapAction(fsm.Output(from, *to)));
+ }
+
+ r.FinishBuild();
+ }
+
+ template<class Scanner>
inline Scanner Fsm::Compile(size_t distance)
- {
+ {
return Scanner(*this, distance);
- }
-
- yostream& operator << (yostream&, const Fsm&);
-}
-
-#endif
+ }
+
+ yostream& operator << (yostream&, const Fsm&);
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/fwd.h b/contrib/libs/pire/pire/fwd.h
index aa6eb6b051d..c2b5870b05a 100644
--- a/contrib/libs/pire/pire/fwd.h
+++ b/contrib/libs/pire/pire/fwd.h
@@ -1,42 +1,42 @@
-/*
- * fwd.h -- forward declarations of Pire classes
+/*
+ * fwd.h -- forward declarations of Pire classes
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_FWD_H
-#define PIRE_FWD_H
-
-
-namespace Pire {
-
- class Scanner;
- class MultiScanner;
- class SlowScanner;
- class CapturingScanner;
- class CountingScanner;
-
- class Fsm;
-
- class Lexer;
- class Encoding;
-}
-
-#endif
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_FWD_H
+#define PIRE_FWD_H
+
+
+namespace Pire {
+
+ class Scanner;
+ class MultiScanner;
+ class SlowScanner;
+ class CapturingScanner;
+ class CountingScanner;
+
+ class Fsm;
+
+ class Lexer;
+ class Encoding;
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/glue.h b/contrib/libs/pire/pire/glue.h
index fb34c6cfa80..bac086f2f00 100644
--- a/contrib/libs/pire/pire/glue.h
+++ b/contrib/libs/pire/pire/glue.h
@@ -1,166 +1,166 @@
-/*
- * glue.h -- scanner agglutination task, which can be used as
- * a parameter to Determine().
+/*
+ * glue.h -- scanner agglutination task, which can be used as
+ * a parameter to Determine().
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_GLUE_H
-#define PIRE_GLUE_H
-
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_GLUE_H
+#define PIRE_GLUE_H
+
+
#include <contrib/libs/pire/pire/stub/stl.h>
-#include "partition.h"
-
-namespace Pire {
-namespace Impl {
-
-template <class Scanner>
-class LettersEquality: public ybinary_function<Char, Char, bool> {
-public:
- LettersEquality(typename Scanner::Letter* lhs, typename Scanner::Letter* rhs): m_lhs(lhs), m_rhs(rhs) {}
-
- bool operator()(Char a, Char b) const
- {
- return m_lhs[a] == m_lhs[b] && m_rhs[a] == m_rhs[b];
- }
-
-private:
- typename Scanner::Letter* m_lhs;
- typename Scanner::Letter* m_rhs;
-};
-
-// This lookup table is used instead of std::map.
-// The key idea is to specify size which is a power of 2 in order to use >> and | instead of
-// divisions and remainders.
-// NB: it mimics limited std::map<> behaviour, hence stl-like method names and typedefs.
-template <size_t N, class State>
-class GluedStateLookupTable {
-public:
- static const size_t MaxSize = N;
- typedef ypair<State, State> key_type;
- typedef size_t mapped_type;
- typedef ypair<key_type, mapped_type> value_type;
- typedef value_type* iterator;
- typedef const value_type* const_iterator;
-
- GluedStateLookupTable()
- : mMap(new value_type[N])
- , mFilled(N, false)
- {}
-
+#include "partition.h"
+
+namespace Pire {
+namespace Impl {
+
+template <class Scanner>
+class LettersEquality: public ybinary_function<Char, Char, bool> {
+public:
+ LettersEquality(typename Scanner::Letter* lhs, typename Scanner::Letter* rhs): m_lhs(lhs), m_rhs(rhs) {}
+
+ bool operator()(Char a, Char b) const
+ {
+ return m_lhs[a] == m_lhs[b] && m_rhs[a] == m_rhs[b];
+ }
+
+private:
+ typename Scanner::Letter* m_lhs;
+ typename Scanner::Letter* m_rhs;
+};
+
+// This lookup table is used instead of std::map.
+// The key idea is to specify size which is a power of 2 in order to use >> and | instead of
+// divisions and remainders.
+// NB: it mimics limited std::map<> behaviour, hence stl-like method names and typedefs.
+template <size_t N, class State>
+class GluedStateLookupTable {
+public:
+ static const size_t MaxSize = N;
+ typedef ypair<State, State> key_type;
+ typedef size_t mapped_type;
+ typedef ypair<key_type, mapped_type> value_type;
+ typedef value_type* iterator;
+ typedef const value_type* const_iterator;
+
+ GluedStateLookupTable()
+ : mMap(new value_type[N])
+ , mFilled(N, false)
+ {}
+
~GluedStateLookupTable() = default;
-
- const_iterator end() const {
+
+ const_iterator end() const {
return mMap.Get() + MaxSize;
- }
- // Note that in fact mMap is sparsed and traditional [begin,end)
- // traversal is unavailable; hence no begin() method here.
- // end() is only valid for comparing with find() result.
- const_iterator find(const key_type& st) const {
- size_t ind = Search(st);
+ }
+ // Note that in fact mMap is sparsed and traditional [begin,end)
+ // traversal is unavailable; hence no begin() method here.
+ // end() is only valid for comparing with find() result.
+ const_iterator find(const key_type& st) const {
+ size_t ind = Search(st);
return mFilled[ind] ? (mMap.Get() + ind) : end();
- }
-
- ypair<iterator, bool> insert(const value_type& v) {
- size_t ind = Search(v.first);
- if (!mFilled[ind]) {
+ }
+
+ ypair<iterator, bool> insert(const value_type& v) {
+ size_t ind = Search(v.first);
+ if (!mFilled[ind]) {
mMap[ind] = v;
- mFilled[ind] = true;
+ mFilled[ind] = true;
return ymake_pair(mMap.Get() + ind, true);
- } else
+ } else
return ymake_pair(mMap.Get() + ind, false);
- }
-
-private:
- size_t Search(const key_type& st) const {
- size_t startInd = (Hash(st) % N);
- for (size_t ind = startInd; ind != (startInd + N - 1) % N; ind = (ind + 1) % N) {
- if (!mFilled[ind] || mMap[ind].first == st) {
- return ind;
- }
- }
+ }
+
+private:
+ size_t Search(const key_type& st) const {
+ size_t startInd = (Hash(st) % N);
+ for (size_t ind = startInd; ind != (startInd + N - 1) % N; ind = (ind + 1) % N) {
+ if (!mFilled[ind] || mMap[ind].first == st) {
+ return ind;
+ }
+ }
return (size_t)-1;
- }
-
- static size_t Hash(const key_type& st) {
- return size_t((st.first >> 2) ^ (st.second >> 4) ^ (st.second << 10));
- }
-
+ }
+
+ static size_t Hash(const key_type& st) {
+ return size_t((st.first >> 2) ^ (st.second >> 4) ^ (st.second << 10));
+ }
+
TArrayHolder<value_type> mMap;
TVector<bool> mFilled;
-
- // Noncopyable
- GluedStateLookupTable(const GluedStateLookupTable&);
- GluedStateLookupTable& operator = (const GluedStateLookupTable&);
-};
-
-template<class Scanner>
-class ScannerGlueCommon {
-public:
- typedef Partition< Char, Impl::LettersEquality<Scanner> > LettersTbl;
-
- typedef ypair<typename Scanner::InternalState, typename Scanner::InternalState> State;
- ScannerGlueCommon(const Scanner& lhs, const Scanner& rhs, const LettersTbl& letters)
- : m_lhs(lhs)
- , m_rhs(rhs)
- , m_letters(letters)
- {
- // Form a new letters partition
- for (unsigned ch = 0; ch < MaxChar; ++ch)
- if (ch != Epsilon)
- m_letters.Append(ch);
- }
-
- const LettersTbl& Letters() const { return m_letters; }
-
- const Scanner& Lhs() const { return m_lhs; }
- const Scanner& Rhs() const { return m_rhs; }
-
- State Initial() const { return State(Lhs().m.initial, Rhs().m.initial); }
-
- State Next(State state, Char letter) const
- {
- Lhs().Next(state.first, letter);
- Rhs().Next(state.second, letter);
- return state;
- }
-
- bool IsRequired(const State& /*state*/) const { return true; }
-
- typedef Scanner Result;
- const Scanner& Success() const { return *m_result; }
- Scanner Failure() const { return Scanner(); }
-
-protected:
- Scanner& Sc() { return *m_result; }
+
+ // Noncopyable
+ GluedStateLookupTable(const GluedStateLookupTable&);
+ GluedStateLookupTable& operator = (const GluedStateLookupTable&);
+};
+
+template<class Scanner>
+class ScannerGlueCommon {
+public:
+ typedef Partition< Char, Impl::LettersEquality<Scanner> > LettersTbl;
+
+ typedef ypair<typename Scanner::InternalState, typename Scanner::InternalState> State;
+ ScannerGlueCommon(const Scanner& lhs, const Scanner& rhs, const LettersTbl& letters)
+ : m_lhs(lhs)
+ , m_rhs(rhs)
+ , m_letters(letters)
+ {
+ // Form a new letters partition
+ for (unsigned ch = 0; ch < MaxChar; ++ch)
+ if (ch != Epsilon)
+ m_letters.Append(ch);
+ }
+
+ const LettersTbl& Letters() const { return m_letters; }
+
+ const Scanner& Lhs() const { return m_lhs; }
+ const Scanner& Rhs() const { return m_rhs; }
+
+ State Initial() const { return State(Lhs().m.initial, Rhs().m.initial); }
+
+ State Next(State state, Char letter) const
+ {
+ Lhs().Next(state.first, letter);
+ Rhs().Next(state.second, letter);
+ return state;
+ }
+
+ bool IsRequired(const State& /*state*/) const { return true; }
+
+ typedef Scanner Result;
+ const Scanner& Success() const { return *m_result; }
+ Scanner Failure() const { return Scanner(); }
+
+protected:
+ Scanner& Sc() { return *m_result; }
void SetSc(THolder<Scanner>&& sc) { m_result = std::move(sc); }
-
-private:
- const Scanner& m_lhs;
- const Scanner& m_rhs;
- LettersTbl m_letters;
+
+private:
+ const Scanner& m_lhs;
+ const Scanner& m_rhs;
+ LettersTbl m_letters;
THolder<Scanner> m_result;
-};
-
-}
-}
-
-#endif
+};
+
+}
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/inline.l b/contrib/libs/pire/pire/inline.l
index 67f6d805848..a4d2e1a836f 100644
--- a/contrib/libs/pire/pire/inline.l
+++ b/contrib/libs/pire/pire/inline.l
@@ -1,31 +1,31 @@
-%{ // -*- mode: c++ -*-
-
-/*
- * inline.lpp -- a tool for inlining Pire regexps into your C++ code
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-#include <stdio.h>
-#include <vector>
-#include <string>
-#include <stdexcept>
+%{ // -*- mode: c++ -*-
+
+/*
+ * inline.lpp -- a tool for inlining Pire regexps into your C++ code
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+#include <stdio.h>
+#include <vector>
+#include <string>
+#include <stdexcept>
#include <contrib/libs/pire/pire/stub/hacks.h>
#include <contrib/libs/pire/pire/stub/lexical_cast.h>
@@ -35,238 +35,238 @@
#include "pire.h"
-ystring filename = "";
-int line = 1;
+ystring filename = "";
+int line = 1;
TVector<ystring> args;
-
-#ifdef _WIN32
-#if _MCS_VER >= 1600
-static int isatty(int) { return 0; }
-#endif
-#endif
-
-class Die {
-public:
- Die() {
- Msg = filename.empty() ? "pire_inline" : (filename + ":" + ToString(line) + ":");
- }
-
-
- template<class T>
- Die& operator << (const T& t) {
- Msg += ToString(t);
- return *this;
- }
-
-
- ~Die() {
- fprintf(stderr, "%s\n", Msg.c_str());
- exit(1);
- }
-private:
- ystring Msg;
-};
-Die DieHelper() {
- return Die();
-}
-
-void putChar(char c) { putc(c, yyout); }
-void suppressChar(char) {}
-void eatComment(void (*action)(char));
-
-#define YY_FATAL_ERROR(msg) DieHelper() << msg
-%}
-%x Regexp
-%%
-
-
-<INITIAL>"/*" { eatComment(putChar); }
-<Regexp>"/*" { eatComment(suppressChar); }
-<INITIAL>"//".*\n { ++line; fprintf(yyout, "%s", yytext); }
-<Regexp>"//".*\n { ++line; }
-"\""([^\"]|\\.)*"\"" { fprintf(yyout, "%s", yytext); }
-\n { ++line; putc('\n', yyout); }
-
-
-<INITIAL>"PIRE_REGEXP"[:space:]*"(" { BEGIN(Regexp); args.clear(); args.push_back(ystring()); }
-<Regexp>"\""([^\"]|\\.)*"\"" {
- ystring& s = args.back();
- const char* p;
- for (p = yytext + 1; *p && p[1]; ++p) {
- if (*p == '\\') {
- ++p;
- if (!*p)
- Die() << "string ends with a backslash";
- else if (*p == '\'' || *p == '\"' || *p == '\\')
- s.push_back(*p);
- else if (*p == 'n')
- s.push_back('\n');
- else if (*p == 't')
- s.push_back('\t');
- else if (isdigit(*p)) {
- const char* beg = p;
- while (isdigit(*p))
- ++p;
- s.push_back(strtol(ystring(beg, p).c_str(), 0, 8));
- } else if (*p == 'x') {
- const char* beg = p;
- while (isdigit(*p) || (*p > 'a' && *p <= 'f') || (*p > 'A' && *p < 'F'))
- ++p;
- s.push_back(strtol(ystring(beg, p).c_str(), 0, 16));
- } else
- Die() << "unknown escape sequence (\\" << *p << ")";
- } else
- s.push_back(*p);
- }
- if (!*p)
- Die() << "string ends with a backslash";
-}
-<Regexp>[ \t] {}
-<Regexp>\n { ++line; }
-<Regexp>"," { args.push_back(ystring()); }
-<Regexp>")" {
-
- if (args.size() & 1 || args.empty())
- Die() << "Usage: PIRE_REGEXP(\"regexp1\", \"flags1\" [, \"regexp2\", \"flags2\" [,...] ])";
-
- bool first = true;
- Pire::Scanner sc;
- ystring pattern;
+
+#ifdef _WIN32
+#if _MCS_VER >= 1600
+static int isatty(int) { return 0; }
+#endif
+#endif
+
+class Die {
+public:
+ Die() {
+ Msg = filename.empty() ? "pire_inline" : (filename + ":" + ToString(line) + ":");
+ }
+
+
+ template<class T>
+ Die& operator << (const T& t) {
+ Msg += ToString(t);
+ return *this;
+ }
+
+
+ ~Die() {
+ fprintf(stderr, "%s\n", Msg.c_str());
+ exit(1);
+ }
+private:
+ ystring Msg;
+};
+Die DieHelper() {
+ return Die();
+}
+
+void putChar(char c) { putc(c, yyout); }
+void suppressChar(char) {}
+void eatComment(void (*action)(char));
+
+#define YY_FATAL_ERROR(msg) DieHelper() << msg
+%}
+%x Regexp
+%%
+
+
+<INITIAL>"/*" { eatComment(putChar); }
+<Regexp>"/*" { eatComment(suppressChar); }
+<INITIAL>"//".*\n { ++line; fprintf(yyout, "%s", yytext); }
+<Regexp>"//".*\n { ++line; }
+"\""([^\"]|\\.)*"\"" { fprintf(yyout, "%s", yytext); }
+\n { ++line; putc('\n', yyout); }
+
+
+<INITIAL>"PIRE_REGEXP"[:space:]*"(" { BEGIN(Regexp); args.clear(); args.push_back(ystring()); }
+<Regexp>"\""([^\"]|\\.)*"\"" {
+ ystring& s = args.back();
+ const char* p;
+ for (p = yytext + 1; *p && p[1]; ++p) {
+ if (*p == '\\') {
+ ++p;
+ if (!*p)
+ Die() << "string ends with a backslash";
+ else if (*p == '\'' || *p == '\"' || *p == '\\')
+ s.push_back(*p);
+ else if (*p == 'n')
+ s.push_back('\n');
+ else if (*p == 't')
+ s.push_back('\t');
+ else if (isdigit(*p)) {
+ const char* beg = p;
+ while (isdigit(*p))
+ ++p;
+ s.push_back(strtol(ystring(beg, p).c_str(), 0, 8));
+ } else if (*p == 'x') {
+ const char* beg = p;
+ while (isdigit(*p) || (*p > 'a' && *p <= 'f') || (*p > 'A' && *p < 'F'))
+ ++p;
+ s.push_back(strtol(ystring(beg, p).c_str(), 0, 16));
+ } else
+ Die() << "unknown escape sequence (\\" << *p << ")";
+ } else
+ s.push_back(*p);
+ }
+ if (!*p)
+ Die() << "string ends with a backslash";
+}
+<Regexp>[ \t] {}
+<Regexp>\n { ++line; }
+<Regexp>"," { args.push_back(ystring()); }
+<Regexp>")" {
+
+ if (args.size() & 1 || args.empty())
+ Die() << "Usage: PIRE_REGEXP(\"regexp1\", \"flags1\" [, \"regexp2\", \"flags2\" [,...] ])";
+
+ bool first = true;
+ Pire::Scanner sc;
+ ystring pattern;
for (auto i = args.begin(), ie = args.end(); i != ie; i += 2) {
-
- Pire::Lexer lexer(i->c_str(), i->c_str() + i->size());
- bool surround = false;
- bool greedy = false;
+
+ Pire::Lexer lexer(i->c_str(), i->c_str() + i->size());
+ bool surround = false;
+ bool greedy = false;
bool reverse = false;
- for (const char* option = (i+1)->c_str(); *option; ++option) {
- if (*option == 'i')
- lexer.AddFeature(Pire::Features::CaseInsensitive());
- else if (*option == 'u')
- lexer.SetEncoding(Pire::Encodings::Utf8());
- else if (*option == 's')
- surround = true;
- else if (*option == 'a')
- lexer.AddFeature(Pire::Features::AndNotSupport());
- else if (*option == 'g')
- greedy = true;
+ for (const char* option = (i+1)->c_str(); *option; ++option) {
+ if (*option == 'i')
+ lexer.AddFeature(Pire::Features::CaseInsensitive());
+ else if (*option == 'u')
+ lexer.SetEncoding(Pire::Encodings::Utf8());
+ else if (*option == 's')
+ surround = true;
+ else if (*option == 'a')
+ lexer.AddFeature(Pire::Features::AndNotSupport());
+ else if (*option == 'g')
+ greedy = true;
else if (*option == 'r')
reverse = true;
- else
- Die() << "unknown option " << *option << "";
- }
-
- Pire::Fsm fsm;
- try {
- fsm = lexer.Parse();
- }
- catch (std::exception& e) {
- Die() << "" << filename << ":" << line << ": " << e.what() << "";
- }
+ else
+ Die() << "unknown option " << *option << "";
+ }
+
+ Pire::Fsm fsm;
+ try {
+ fsm = lexer.Parse();
+ }
+ catch (std::exception& e) {
+ Die() << "" << filename << ":" << line << ": " << e.what() << "";
+ }
if (reverse)
fsm.Reverse();
- if (greedy && surround)
- Die() << "greedy and surround options are incompatible";
- if (greedy)
- fsm = ~fsm.Surrounded() + fsm;
- else if (surround)
- fsm.Surround();
-
- Pire::Scanner tsc(fsm);
- if (first) {
- pattern = *i;
- first = false;
- tsc.Swap(sc);
- } else {
- sc = Pire::Scanner::Glue(sc, tsc);
- pattern += " | ";
- pattern += *i;
- }
- }
-
- BufferOutput buf;
- AlignedOutput stream(&buf);
- Save(&stream, sc);
-
- fprintf(yyout, "Pire::MmappedScanner<Pire::Scanner>(PIRE_LITERAL( // %s \n \"", pattern.c_str());
- size_t pos = 5;
+ if (greedy && surround)
+ Die() << "greedy and surround options are incompatible";
+ if (greedy)
+ fsm = ~fsm.Surrounded() + fsm;
+ else if (surround)
+ fsm.Surround();
+
+ Pire::Scanner tsc(fsm);
+ if (first) {
+ pattern = *i;
+ first = false;
+ tsc.Swap(sc);
+ } else {
+ sc = Pire::Scanner::Glue(sc, tsc);
+ pattern += " | ";
+ pattern += *i;
+ }
+ }
+
+ BufferOutput buf;
+ AlignedOutput stream(&buf);
+ Save(&stream, sc);
+
+ fprintf(yyout, "Pire::MmappedScanner<Pire::Scanner>(PIRE_LITERAL( // %s \n \"", pattern.c_str());
+ size_t pos = 5;
for (auto i = buf.Buffer().Begin(), ie = buf.Buffer().End(); i != ie; ++i) {
- pos += fprintf(yyout, "\\x%02X", static_cast<unsigned char>(*i));
- if (pos >= 78) {
- fprintf(yyout, "\"\n \"");
- pos = 5;
- }
- }
- fprintf(yyout, "\"), %u)\n#line %d \"%s\"\n",
- (unsigned int) buf.Buffer().Size(), line, filename.c_str());
- BEGIN(INITIAL);
-}
-<INITIAL>. { putc(*yytext, yyout); }
-
-
-
-
-%%
-
-void eatComment(void (*action)(char))
-{
- int c;
- action('/'); action('*');
- for (;;) {
- while ((c = yyinput()) != EOF && c != '*') {
- if (c == '\n')
- ++line;
- action(c);
- }
- if (c == '*') {
- action(c);
- while ((c = yyinput()) == '*')
- action(c);
- if (c == '/') {
- action(c);
- break;
- }
- }
- if (c == EOF)
- Die() << "EOF in comment";
- }
-}
-
-int yywrap() { return 1; }
-
-
-int main(int argc, char** argv)
-{
- // Suppress warnings
- static_cast<void>(&yy_fatal_error);
- static_cast<void>(&yyunput);
-
-
- try {
- const char* outfile = 0;
- if (argc >= 3 && !strcmp(argv[1], "-o")) {
- outfile = argv[2];
- argv += 2, argc -= 2;
- }
- if (argc == 2)
- filename = ystring(argv[1]);
- else if (argc > 2)
- Die() << "usage: pire_inline [-o outfile] [infile]";
-
- yyin = stdin, yyout = stdout;
- if (outfile && (yyout = fopen(outfile, "w")) == NULL)
- Die() << "cannot open file " << outfile << " for writing";
- if (!filename.empty()) {
- if ((yyin = fopen(filename.c_str(), "r")) == NULL)
- Die() << "cannot open file " << filename.c_str() << "\n";
- } else
- filename = "(stdin)";
-
-
- yylex();
- return 0;
- }
- catch (std::exception& e) {
- fprintf(stderr, "%s\n", e.what());
- return 1;
- }
-}
+ pos += fprintf(yyout, "\\x%02X", static_cast<unsigned char>(*i));
+ if (pos >= 78) {
+ fprintf(yyout, "\"\n \"");
+ pos = 5;
+ }
+ }
+ fprintf(yyout, "\"), %u)\n#line %d \"%s\"\n",
+ (unsigned int) buf.Buffer().Size(), line, filename.c_str());
+ BEGIN(INITIAL);
+}
+<INITIAL>. { putc(*yytext, yyout); }
+
+
+
+
+%%
+
+void eatComment(void (*action)(char))
+{
+ int c;
+ action('/'); action('*');
+ for (;;) {
+ while ((c = yyinput()) != EOF && c != '*') {
+ if (c == '\n')
+ ++line;
+ action(c);
+ }
+ if (c == '*') {
+ action(c);
+ while ((c = yyinput()) == '*')
+ action(c);
+ if (c == '/') {
+ action(c);
+ break;
+ }
+ }
+ if (c == EOF)
+ Die() << "EOF in comment";
+ }
+}
+
+int yywrap() { return 1; }
+
+
+int main(int argc, char** argv)
+{
+ // Suppress warnings
+ static_cast<void>(&yy_fatal_error);
+ static_cast<void>(&yyunput);
+
+
+ try {
+ const char* outfile = 0;
+ if (argc >= 3 && !strcmp(argv[1], "-o")) {
+ outfile = argv[2];
+ argv += 2, argc -= 2;
+ }
+ if (argc == 2)
+ filename = ystring(argv[1]);
+ else if (argc > 2)
+ Die() << "usage: pire_inline [-o outfile] [infile]";
+
+ yyin = stdin, yyout = stdout;
+ if (outfile && (yyout = fopen(outfile, "w")) == NULL)
+ Die() << "cannot open file " << outfile << " for writing";
+ if (!filename.empty()) {
+ if ((yyin = fopen(filename.c_str(), "r")) == NULL)
+ Die() << "cannot open file " << filename.c_str() << "\n";
+ } else
+ filename = "(stdin)";
+
+
+ yylex();
+ return 0;
+ }
+ catch (std::exception& e) {
+ fprintf(stderr, "%s\n", e.what());
+ return 1;
+ }
+}
diff --git a/contrib/libs/pire/pire/partition.h b/contrib/libs/pire/pire/partition.h
index c41cf5c335a..85a9af88635 100644
--- a/contrib/libs/pire/pire/partition.h
+++ b/contrib/libs/pire/pire/partition.h
@@ -1,193 +1,193 @@
-/*
- * partition.h -- a disjoint set of pairwise equivalent items
+/*
+ * partition.h -- a disjoint set of pairwise equivalent items
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_PARTITION_H
-#define PIRE_PARTITION_H
-
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_PARTITION_H
+#define PIRE_PARTITION_H
+
+
#include <contrib/libs/pire/pire/stub/stl.h>
#include <contrib/libs/pire/pire/stub/singleton.h>
-
-namespace Pire {
-
-/*
-* A class which forms a disjoint set of pairwise equivalent items,
-* depending on given equivalence relation.
-*/
-template<class T, class Eq>
-class Partition {
-private:
+
+namespace Pire {
+
+/*
+* A class which forms a disjoint set of pairwise equivalent items,
+* depending on given equivalence relation.
+*/
+template<class T, class Eq>
+class Partition {
+private:
typedef TMap< T, ypair< size_t, TVector<T> > > Set;
-
-public:
- Partition(const Eq& eq)
- : m_eq(eq)
- , m_maxidx(0)
- {
- }
-
- /// Appends a new item into partition, creating new equivalience class if neccessary.
- void Append(const T& t) {
- DoAppend(m_set, t);
- }
-
- typedef typename Set::const_iterator ConstIterator;
-
- ConstIterator Begin() const {
- return m_set.begin();
- }
+
+public:
+ Partition(const Eq& eq)
+ : m_eq(eq)
+ , m_maxidx(0)
+ {
+ }
+
+ /// Appends a new item into partition, creating new equivalience class if neccessary.
+ void Append(const T& t) {
+ DoAppend(m_set, t);
+ }
+
+ typedef typename Set::const_iterator ConstIterator;
+
+ ConstIterator Begin() const {
+ return m_set.begin();
+ }
ConstIterator begin() const {
return m_set.begin();
}
- ConstIterator End() const {
- return m_set.end();
- }
+ ConstIterator End() const {
+ return m_set.end();
+ }
ConstIterator end() const {
return m_set.end();
}
- size_t Size() const {
- return m_set.size();
- }
- bool Empty() const {
- return m_set.empty();
- }
-
- /// Returns an item equal to @p t. It is guaranteed that:
- /// - representative(a) equals representative(b) iff a is equivalent to b;
- /// - representative(a) is equivalent to a.
- const T& Representative(const T& t) const
- {
+ size_t Size() const {
+ return m_set.size();
+ }
+ bool Empty() const {
+ return m_set.empty();
+ }
+
+ /// Returns an item equal to @p t. It is guaranteed that:
+ /// - representative(a) equals representative(b) iff a is equivalent to b;
+ /// - representative(a) is equivalent to a.
+ const T& Representative(const T& t) const
+ {
auto it = m_inv.find(t);
- if (it != m_inv.end())
- return it->second;
- else
- return DefaultValue<T>();
- }
-
- bool Contains(const T& t) const
- {
- return m_inv.find(t) != m_inv.end();
- }
-
- /// Returns an index of set containing @p t. It is guaranteed that:
- /// - index(a) equals index(b) iff a is equivalent to b;
- /// - 0 <= index(a) < size().
- size_t Index(const T& t) const
- {
+ if (it != m_inv.end())
+ return it->second;
+ else
+ return DefaultValue<T>();
+ }
+
+ bool Contains(const T& t) const
+ {
+ return m_inv.find(t) != m_inv.end();
+ }
+
+ /// Returns an index of set containing @p t. It is guaranteed that:
+ /// - index(a) equals index(b) iff a is equivalent to b;
+ /// - 0 <= index(a) < size().
+ size_t Index(const T& t) const
+ {
auto it = m_inv.find(t);
- if (it == m_inv.end())
- throw Error("Partition::index(): attempted to obtain an index of nonexistent item");
+ if (it == m_inv.end())
+ throw Error("Partition::index(): attempted to obtain an index of nonexistent item");
auto it2 = m_set.find(it->second);
Y_ASSERT(it2 != m_set.end());
- return it2->second.first;
- }
- /// Returns the whole equivalence class of @p t (i.e. item @p i
- /// is returned iff representative(i) == representative(t)).
+ return it2->second.first;
+ }
+ /// Returns the whole equivalence class of @p t (i.e. item @p i
+ /// is returned iff representative(i) == representative(t)).
const TVector<T>& Klass(const T& t) const
- {
+ {
auto it = m_inv.find(t);
- if (it == m_inv.end())
- throw Error("Partition::index(): attempted to obtain an index of nonexistent item");
+ if (it == m_inv.end())
+ throw Error("Partition::index(): attempted to obtain an index of nonexistent item");
auto it2 = m_set.find(it->second);
Y_ASSERT(it2 != m_set.end());
- return it2->second.second;
- }
-
- bool operator == (const Partition& rhs) const { return m_set == rhs.m_set; }
- bool operator != (const Partition& rhs) const { return !(*this == rhs); }
-
- /// Splits the current sets into smaller ones, using given equivalence relation.
- /// Requires given relation imply previous one (set either in ctor or
- /// in preceeding calls to split()), but performs faster.
- /// Replaces previous relation with given one.
- void Split(const Eq& eq)
- {
- m_eq = eq;
-
+ return it2->second.second;
+ }
+
+ bool operator == (const Partition& rhs) const { return m_set == rhs.m_set; }
+ bool operator != (const Partition& rhs) const { return !(*this == rhs); }
+
+ /// Splits the current sets into smaller ones, using given equivalence relation.
+ /// Requires given relation imply previous one (set either in ctor or
+ /// in preceeding calls to split()), but performs faster.
+ /// Replaces previous relation with given one.
+ void Split(const Eq& eq)
+ {
+ m_eq = eq;
+
for (auto&& element : m_set)
if (element.second.second.size() > 1) {
TVector<T>& v = element.second.second;
auto bound = std::partition(v.begin(), v.end(), std::bind2nd(m_eq, v[0]));
- if (bound == v.end())
- continue;
-
- Set delta;
+ if (bound == v.end())
+ continue;
+
+ Set delta;
for (auto it = bound, ie = v.end(); it != ie; ++it)
- DoAppend(delta, *it);
-
- v.erase(bound, v.end());
- m_set.insert(delta.begin(), delta.end());
- }
- }
-
-private:
- Eq m_eq;
- Set m_set;
+ DoAppend(delta, *it);
+
+ v.erase(bound, v.end());
+ m_set.insert(delta.begin(), delta.end());
+ }
+ }
+
+private:
+ Eq m_eq;
+ Set m_set;
TMap<T, T> m_inv;
- size_t m_maxidx;
-
- void DoAppend(Set& set, const T& t)
- {
+ size_t m_maxidx;
+
+ void DoAppend(Set& set, const T& t)
+ {
auto it = set.begin();
auto end = set.end();
- for (; it != end; ++it)
- if (m_eq(it->first, t)) {
- it->second.second.push_back(t);
- m_inv[t] = it->first;
- break;
- }
-
- if (it == end) {
- // Begin new set
+ for (; it != end; ++it)
+ if (m_eq(it->first, t)) {
+ it->second.second.push_back(t);
+ m_inv[t] = it->first;
+ break;
+ }
+
+ if (it == end) {
+ // Begin new set
TVector<T> v(1, t);
- set.insert(ymake_pair(t, ymake_pair(m_maxidx++, v)));
- m_inv[t] = t;
- }
- }
-};
-
-// Mainly for debugging
-template<class T, class Eq>
-yostream& operator << (yostream& stream, const Partition<T, Eq>& partition)
-{
- stream << "Partition {\n";
+ set.insert(ymake_pair(t, ymake_pair(m_maxidx++, v)));
+ m_inv[t] = t;
+ }
+ }
+};
+
+// Mainly for debugging
+template<class T, class Eq>
+yostream& operator << (yostream& stream, const Partition<T, Eq>& partition)
+{
+ stream << "Partition {\n";
for (auto&& partitionElement : partition) {
stream << " Class " << partitionElement.second.first << " \"" << partitionElement.first << "\" { ";
- bool first = false;
+ bool first = false;
for (auto&& element : partitionElement.second.second) {
- if (first)
- stream << ", ";
- else
- first = true;
+ if (first)
+ stream << ", ";
+ else
+ first = true;
stream << element;
- }
- stream << " }\n";
- }
- stream << "}";
- return stream;
-}
-
-}
-
-
-#endif
+ }
+ stream << " }\n";
+ }
+ stream << "}";
+ return stream;
+}
+
+}
+
+
+#endif
diff --git a/contrib/libs/pire/pire/pire.h b/contrib/libs/pire/pire/pire.h
index d4d3acd92dd..12eb84ccb6d 100644
--- a/contrib/libs/pire/pire/pire.h
+++ b/contrib/libs/pire/pire/pire.h
@@ -1,38 +1,38 @@
-/*
- * pire.h -- a single include file for end-users
+/*
+ * pire.h -- a single include file for end-users
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_PIRE_H
-#define PIRE_PIRE_H
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_PIRE_H
+#define PIRE_PIRE_H
+
#include <contrib/libs/pire/pire/scanners/multi.h>
#include <contrib/libs/pire/pire/scanners/half_final.h>
#include <contrib/libs/pire/pire/scanners/simple.h>
#include <contrib/libs/pire/pire/scanners/slow.h>
#include <contrib/libs/pire/pire/scanners/pair.h>
-
-#include "re_lexer.h"
-#include "fsm.h"
-#include "encoding.h"
-#include "run.h"
-
-#endif
+
+#include "re_lexer.h"
+#include "fsm.h"
+#include "encoding.h"
+#include "run.h"
+
+#endif
diff --git a/contrib/libs/pire/pire/platform.h b/contrib/libs/pire/pire/platform.h
index c0504b7ce3d..54ded6b3879 100644
--- a/contrib/libs/pire/pire/platform.h
+++ b/contrib/libs/pire/pire/platform.h
@@ -1,47 +1,47 @@
-/*
- * platform.h -- hardware and OS specific stuff
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-#ifndef PIRE_PLATFORM_H_INCLUDED
-#define PIRE_PLATFORM_H_INCLUDED
-
+/*
+ * platform.h -- hardware and OS specific stuff
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+#ifndef PIRE_PLATFORM_H_INCLUDED
+#define PIRE_PLATFORM_H_INCLUDED
+
#include <contrib/libs/pire/pire/stub/defaults.h>
#include <contrib/libs/pire/pire/static_assert.h>
-
+
#ifndef PIRE_FORCED_INLINE
-#ifdef __GNUC__
+#ifdef __GNUC__
#define PIRE_FORCED_INLINE inline __attribute__((__always_inline__))
-#elif _MSC_VER
+#elif _MSC_VER
#define PIRE_FORCED_INLINE __forceinline
-#else
+#else
#define PIRE_FORCED_INLINE inline
-#endif
-#endif
-
-#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
-#define PIRE_HOT_FUNCTION __attribute__ ((hot))
-#else
-#define PIRE_HOT_FUNCTION
-#endif
-
+#endif
+#endif
+
+#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
+#define PIRE_HOT_FUNCTION __attribute__ ((hot))
+#else
+#define PIRE_HOT_FUNCTION
+#endif
+
#ifndef PIRE_LIKELY
#ifdef __GNUC__
#define PIRE_LIKELY(x) (__builtin_expect((x), 1))
@@ -58,27 +58,27 @@
#endif
#endif
-#ifdef _MSC_VER
-#include <stdio.h>
-#include <stdarg.h>
-
-namespace Pire {
-
-#if _MCS_VER >= 1600
-#ifdef _WIN64
-typedef i64 ssize_t;
-#else
-typedef i32 ssize_t;
-#endif
-#endif
-
-inline int snprintf(char *str, size_t size, const char *format, ...)
-{
+#ifdef _MSC_VER
+#include <stdio.h>
+#include <stdarg.h>
+
+namespace Pire {
+
+#if _MCS_VER >= 1600
+#ifdef _WIN64
+typedef i64 ssize_t;
+#else
+typedef i32 ssize_t;
+#endif
+#endif
+
+inline int snprintf(char *str, size_t size, const char *format, ...)
+{
va_list argptr;
va_start(argptr, format);
int i = _vsnprintf(str, size-1, format, argptr);
va_end(argptr);
-
+
// A workaround for some bug
if (i < 0) {
str[size - 1] = '\x00';
@@ -87,14 +87,14 @@ inline int snprintf(char *str, size_t size, const char *format, ...)
str[i] = '\x00';
}
return i;
-}
-
-}
-#endif
-
-namespace Pire {
-namespace Impl {
-
+}
+
+}
+#endif
+
+namespace Pire {
+namespace Impl {
+
// A portable way to define a constant like `(size_t)0101010101010101ull' without any warnings.
template<unsigned Pos, unsigned char Byte>
struct DoGenerateConst {
@@ -112,11 +112,11 @@ struct GenerateConst {
};
-// Common implementation of mask comparison logic suitable for
-// any instruction set
-struct BasicInstructionSet {
+// Common implementation of mask comparison logic suitable for
+// any instruction set
+struct BasicInstructionSet {
typedef size_t Vector;
-
+
// Check bytes in the chunk against bytes in the mask
static inline Vector CheckBytes(Vector mask, Vector chunk)
{
@@ -125,66 +125,66 @@ struct BasicInstructionSet {
size_t mc = chunk ^ mask;
return ((mc - mask0x01) & ~mc & mask0x80);
}
-
+
static inline Vector Or(Vector mask1, Vector mask2) { return (mask1 | mask2); }
-
+
static inline bool IsAnySet(Vector mask) { return (mask != 0); }
-};
-
-}}
-
-#if defined(__SSE2__)
-#include <emmintrin.h>
-
-namespace Pire {
-namespace Impl {
-
-// SSE2-optimized mask comparison logic
-struct AvailSSE2 {
+};
+
+}}
+
+#if defined(__SSE2__)
+#include <emmintrin.h>
+
+namespace Pire {
+namespace Impl {
+
+// SSE2-optimized mask comparison logic
+struct AvailSSE2 {
typedef __m128i Vector;
-
+
static inline Vector CheckBytes(Vector mask, Vector chunk)
{
return _mm_cmpeq_epi8(mask, chunk);
}
-
+
static inline Vector Or(Vector mask1, Vector mask2)
{
return _mm_or_si128(mask1, mask2);
}
-
+
static inline bool IsAnySet(Vector mask)
{
return _mm_movemask_epi8(mask);
}
-};
-
-typedef AvailSSE2 AvailInstructionSet;
-
-inline AvailSSE2::Vector ToLittleEndian(AvailSSE2::Vector x) { return x; }
-
-}}
-
-#elif defined(__MMX__)
-#include <mmintrin.h>
-
-namespace Pire {
-namespace Impl {
-
-// MMX-optimized mask comparison logic
-struct AvailMMX {
+};
+
+typedef AvailSSE2 AvailInstructionSet;
+
+inline AvailSSE2::Vector ToLittleEndian(AvailSSE2::Vector x) { return x; }
+
+}}
+
+#elif defined(__MMX__)
+#include <mmintrin.h>
+
+namespace Pire {
+namespace Impl {
+
+// MMX-optimized mask comparison logic
+struct AvailMMX {
typedef __m64 Vector;
-
+
static inline Vector CheckBytes(Vector mask, Vector chunk)
{
return _mm_cmpeq_pi8(mask, chunk);
}
-
+
static inline Vector Or(Vector mask1, Vector mask2)
{
return _mm_or_si64(mask1, mask2);
}
-
+
static inline bool IsAnySet(Vector mask)
{
union {
@@ -194,68 +194,68 @@ struct AvailMMX {
mmxMask = mask;
return ui64Mask;
}
-};
-
-typedef AvailMMX AvailInstructionSet;
-
-inline AvailMMX::Vector ToLittleEndian(AvailMMX::Vector x) { return x; }
-
-}}
-
-#else // no SSE and MMX
-
-namespace Pire {
-namespace Impl {
-
-typedef BasicInstructionSet AvailInstructionSet;
-
-}}
-
-#endif
-
-namespace Pire {
-namespace Impl {
-
-typedef AvailInstructionSet::Vector Word;
-
-inline Word CheckBytes(Word mask, Word chunk) { return AvailInstructionSet::CheckBytes(mask, chunk); }
-
-inline Word Or(Word mask1, Word mask2) { return AvailInstructionSet::Or(mask1, mask2); }
-
-inline bool IsAnySet(Word mask) { return AvailInstructionSet::IsAnySet(mask); }
-
-// MaxSizeWord type is largest integer type supported by the plaform including
-// all possible SSE extensions that are are known for this platform (even if these
-// extensions are not available at compile time)
-// It is used for alignments and save/load data structures to produce data format
-// compatible between all platforms with the same endianness and pointer size
-template <size_t Size> struct MaxWordSizeHelper;
-
-// Maximum size of SSE register is 128 bit on x86 and x86_64
-template <>
-struct MaxWordSizeHelper<16> {
+};
+
+typedef AvailMMX AvailInstructionSet;
+
+inline AvailMMX::Vector ToLittleEndian(AvailMMX::Vector x) { return x; }
+
+}}
+
+#else // no SSE and MMX
+
+namespace Pire {
+namespace Impl {
+
+typedef BasicInstructionSet AvailInstructionSet;
+
+}}
+
+#endif
+
+namespace Pire {
+namespace Impl {
+
+typedef AvailInstructionSet::Vector Word;
+
+inline Word CheckBytes(Word mask, Word chunk) { return AvailInstructionSet::CheckBytes(mask, chunk); }
+
+inline Word Or(Word mask1, Word mask2) { return AvailInstructionSet::Or(mask1, mask2); }
+
+inline bool IsAnySet(Word mask) { return AvailInstructionSet::IsAnySet(mask); }
+
+// MaxSizeWord type is largest integer type supported by the plaform including
+// all possible SSE extensions that are are known for this platform (even if these
+// extensions are not available at compile time)
+// It is used for alignments and save/load data structures to produce data format
+// compatible between all platforms with the same endianness and pointer size
+template <size_t Size> struct MaxWordSizeHelper;
+
+// Maximum size of SSE register is 128 bit on x86 and x86_64
+template <>
+struct MaxWordSizeHelper<16> {
struct MaxSizeWord {
char val[16];
};
-};
-
-typedef MaxWordSizeHelper<16>::MaxSizeWord MaxSizeWord;
-
-// MaxSizeWord size should be a multiple of size_t size and a multipe of Word size
-PIRE_STATIC_ASSERT(
+};
+
+typedef MaxWordSizeHelper<16>::MaxSizeWord MaxSizeWord;
+
+// MaxSizeWord size should be a multiple of size_t size and a multipe of Word size
+PIRE_STATIC_ASSERT(
(sizeof(MaxSizeWord) % sizeof(size_t) == 0) &&
(sizeof(MaxSizeWord) % sizeof(Word) == 0));
-
-inline size_t FillSizeT(char c)
-{
+
+inline size_t FillSizeT(char c)
+{
size_t w = c;
w &= 0x0ff;
for (size_t i = 8; i != sizeof(size_t)*8; i <<= 1)
w = (w << i) | w;
return w;
-}
-
-}}
-
-#endif
-
+}
+
+}}
+
+#endif
+
diff --git a/contrib/libs/pire/pire/re_lexer.cpp b/contrib/libs/pire/pire/re_lexer.cpp
index c2258dd7593..132fbeb0399 100644
--- a/contrib/libs/pire/pire/re_lexer.cpp
+++ b/contrib/libs/pire/pire/re_lexer.cpp
@@ -1,28 +1,28 @@
-/*
- * re_lexer.cpp -- implementation of Lexer class
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+/*
+ * re_lexer.cpp -- implementation of Lexer class
*
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#include <ctype.h>
-#include <stdexcept>
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#include <ctype.h>
+#include <stdexcept>
#include <contrib/libs/pire/pire/stub/stl.h>
#include <contrib/libs/pire/pire/stub/utf8.h>
@@ -32,24 +32,24 @@
#include "re_lexer.h"
#include "re_parser.h"
#include "read_unicode.h"
-
-namespace Pire {
-
-namespace Impl {
+
+namespace Pire {
+
+namespace Impl {
int yre_parse(Pire::Lexer& lexer);
-}
-
-Term Term::Character(wchar32 c) { Term::CharacterRange cr; cr.first.insert(Term::String(1, c)); cr.second = false; return Term(TokenTypes::Letters, cr); }
-Term Term::Repetition(int lower, int upper) { return Term(TokenTypes::Count, RepetitionCount(lower, upper)); }
-Term Term::Dot() { return Term(TokenTypes::Dot, DotTag()); }
-Term Term::BeginMark() { return Term(TokenTypes::BeginMark, BeginTag()); }
-Term Term::EndMark() { return Term(TokenTypes::EndMark, EndTag()); }
-
+}
+
+Term Term::Character(wchar32 c) { Term::CharacterRange cr; cr.first.insert(Term::String(1, c)); cr.second = false; return Term(TokenTypes::Letters, cr); }
+Term Term::Repetition(int lower, int upper) { return Term(TokenTypes::Count, RepetitionCount(lower, upper)); }
+Term Term::Dot() { return Term(TokenTypes::Dot, DotTag()); }
+Term Term::BeginMark() { return Term(TokenTypes::BeginMark, BeginTag()); }
+Term Term::EndMark() { return Term(TokenTypes::EndMark, EndTag()); }
+
Lexer::~Lexer() = default;
-
-wchar32 Lexer::GetChar()
-{
+
+wchar32 Lexer::GetChar()
+{
if (m_input.empty())
return End;
else if (m_input.front() == '\\') {
@@ -64,23 +64,23 @@ wchar32 Lexer::GetChar()
m_input.pop_front();
return ch;
}
-}
-
-wchar32 Lexer::PeekChar()
-{
+}
+
+wchar32 Lexer::PeekChar()
+{
if (m_input.empty())
return End;
else
return m_input.front();
-}
-
-void Lexer::UngetChar(wchar32 c)
-{
+}
+
+void Lexer::UngetChar(wchar32 c)
+{
if (c != End)
m_input.push_front(c);
-}
-
-namespace {
+}
+
+namespace {
class CompareFeaturesByPriority: public ybinary_function<const Feature::Ptr&, const Feature::Ptr&, bool> {
public:
bool operator()(const Feature::Ptr& a, const Feature::Ptr& b) const
@@ -88,15 +88,15 @@ namespace {
return a->Priority() < b->Priority();
}
};
-}
-
+}
+
Lexer& Lexer::AddFeature(Feature::Ptr& feature)
-{
+{
feature->m_lexer = this;
m_features.insert(LowerBound(m_features.begin(), m_features.end(), feature, CompareFeaturesByPriority()), std::move(feature));
return *this;
-}
-
+}
+
Lexer& Lexer::AddFeature(Feature::Ptr&& feature)
{
feature->m_lexer = this;
@@ -104,8 +104,8 @@ Lexer& Lexer::AddFeature(Feature::Ptr&& feature)
return *this;
}
-Term Lexer::DoLex()
-{
+Term Lexer::DoLex()
+{
static const char* controls = "|().*+?^$\\";
for (;;) {
UngetChar(GetChar());
@@ -120,7 +120,7 @@ Term Lexer::DoLex()
}
}
ch = GetChar();
-
+
if (ch == '|')
return Term(TokenTypes::Or);
else if (ch == '(') {
@@ -144,15 +144,15 @@ Term Lexer::DoLex()
else
return Term::Character(ch);
}
-}
-
-Term Lexer::Lex()
-{
+}
+
+Term Lexer::Lex()
+{
Term t = DoLex();
-
+
for (auto i = m_features.rbegin(), ie = m_features.rend(); i != ie; ++i)
(*i)->Alter(t);
-
+
if (t.Value().IsA<Term::CharacterRange>()) {
const auto& chars = t.Value().As<Term::CharacterRange>();
//std::cerr << "lex: type " << t.type() << "; chars = { " << join(chars.first.begin(), chars.first.end(), ", ") << " }" << std::endl;
@@ -186,25 +186,25 @@ Term Lexer::Lex()
else if (type == TokenTypes::End)
type = 0;
return Term(type, t.Value());
-}
+}
-void Lexer::Parenthesized(Fsm& fsm)
-{
+void Lexer::Parenthesized(Fsm& fsm)
+{
for (auto i = m_features.rbegin(), ie = m_features.rend(); i != ie; ++i)
(*i)->Parenthesized(fsm);
-}
-
-wchar32 Feature::CorrectChar(wchar32 c, const char* controls)
-{
+}
+
+wchar32 Feature::CorrectChar(wchar32 c, const char* controls)
+{
bool ctrl = (strchr(controls, c & 0xFF) != 0);
if ((c & ControlMask) == Control && ctrl)
return c & ~ControlMask;
if (c <= 0xFF && ctrl)
return c | Control;
return c;
-}
-
-namespace {
+}
+
+namespace {
class EnableUnicodeSequencesImpl : public UnicodeReader {
public:
bool Accepts(wchar32 c) const {
@@ -219,7 +219,7 @@ namespace {
class CharacterRangeReader: public UnicodeReader {
public:
bool Accepts(wchar32 c) const { return c == '[' || c == (Control | '[') || c == (Control | ']'); }
-
+
Term Lex()
{
static const char* controls = "^[]-\\";
@@ -227,14 +227,14 @@ namespace {
wchar32 ch = CorrectChar(GetChar(), controls);
if (ch == '[' || ch == ']')
return Term::Character(ch);
-
+
Term::CharacterRange cs;
ch = CorrectChar(GetChar(), controls);
if (ch == (Control | '^')) {
cs.second = true;
ch = CorrectChar(GetChar(), controls);
}
-
+
bool firstUnicode;
wchar32 unicodeSymbol = 0;
@@ -281,15 +281,15 @@ namespace {
}
if (ch == End)
Error("Unexpected end of pattern");
-
+
return Term(TokenTypes::Letters, cs);
}
};
-
+
class RepetitionCountReader: public Feature {
public:
bool Accepts(wchar32 c) const { return c == '{' || c == (Control | '{') || c == (Control | '}'); }
-
+
Term Lex()
{
wchar32 ch = GetChar();
@@ -297,17 +297,17 @@ namespace {
return Term::Character(ch & ~ControlMask);
ch = GetChar();
int lower = 0, upper = 0;
-
+
if (!is_digit(ch))
Error("Wrong repetition count");
-
+
for (; is_digit(ch); ch = GetChar())
lower = lower * 10 + (ch - '0');
if (ch == '}')
return Term::Repetition(lower, lower);
else if (ch != ',')
Error("Wrong repetition count");
-
+
ch = GetChar();
if (ch == '}')
return Term::Repetition(lower, Inf);
@@ -315,13 +315,13 @@ namespace {
Error("Wrong repetition count");
for (; is_digit(ch); ch = GetChar())
upper = upper * 10 + (ch - '0');
-
+
if (ch != '}')
Error("Wrong repetition count");
return Term::Repetition(lower, upper);
}
};
-
+
class CaseInsensitiveImpl: public Feature {
public:
void Alter(Term& t)
@@ -363,30 +363,30 @@ namespace {
}
}
};
-}
-
-namespace Features {
+}
+
+namespace Features {
Feature::Ptr CaseInsensitive() { return Feature::Ptr(new CaseInsensitiveImpl); }
Feature::Ptr CharClasses();
Feature::Ptr AndNotSupport() { return Feature::Ptr(new AndNotSupportImpl); }
-};
-
-void Lexer::InstallDefaultFeatures()
-{
+};
+
+void Lexer::InstallDefaultFeatures()
+{
AddFeature(Feature::Ptr(new CharacterRangeReader));
AddFeature(Feature::Ptr(new RepetitionCountReader));
AddFeature(Features::CharClasses());
AddFeature(Feature::Ptr(new EnableUnicodeSequencesImpl));
-}
-
-Fsm Lexer::Parse()
-{
+}
+
+Fsm Lexer::Parse()
+{
if (!Impl::yre_parse(*this))
return m_retval.As<Fsm>();
else {
Error("Syntax error in regexp");
return Fsm(); // Make compiler happy
}
-}
-
-}
+}
+
+}
diff --git a/contrib/libs/pire/pire/re_lexer.h b/contrib/libs/pire/pire/re_lexer.h
index e397a38d5c5..5591c16d34e 100644
--- a/contrib/libs/pire/pire/re_lexer.h
+++ b/contrib/libs/pire/pire/re_lexer.h
@@ -1,244 +1,244 @@
-/*
- * re_lexer.h -- definition required for parsing regexps
+/*
+ * re_lexer.h -- definition required for parsing regexps
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_RE_LEXER_H
-#define PIRE_RE_LEXER_H
-
-
-#include <vector>
-#include <stack>
-#include <set>
-#include <utility>
-#include <stdexcept>
-#include <utility>
-#include <string.h>
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_RE_LEXER_H
+#define PIRE_RE_LEXER_H
+
+
+#include <vector>
+#include <stack>
+#include <set>
+#include <utility>
+#include <stdexcept>
+#include <utility>
+#include <string.h>
#include <contrib/libs/pire/pire/stub/defaults.h>
#include <contrib/libs/pire/pire/stub/stl.h>
-
+
#include "encoding.h"
#include "any.h"
-namespace Pire {
-
-namespace Consts {
-enum { Inf = -1 };
-
-static const wchar32 Control = 0xF000;
-static const wchar32 ControlMask = 0xFF00;
-static const wchar32 End = Control | 0xFF;
-};
-
-using namespace Consts;
-
-namespace TokenTypes {
-enum {
- None = 0,
- Letters,
- Count,
- Dot,
- Open,
- Close,
- Or,
- And,
- Not,
- BeginMark,
- EndMark,
- End
-};
-}
-
-/**
-* A single terminal character in regexp pattern.
-* Consists of a type (a character, a repetition count, an opening parenthesis, etc...)
-* and optional value.
-*/
-class Term {
-public:
+namespace Pire {
+
+namespace Consts {
+enum { Inf = -1 };
+
+static const wchar32 Control = 0xF000;
+static const wchar32 ControlMask = 0xFF00;
+static const wchar32 End = Control | 0xFF;
+};
+
+using namespace Consts;
+
+namespace TokenTypes {
+enum {
+ None = 0,
+ Letters,
+ Count,
+ Dot,
+ Open,
+ Close,
+ Or,
+ And,
+ Not,
+ BeginMark,
+ EndMark,
+ End
+};
+}
+
+/**
+* A single terminal character in regexp pattern.
+* Consists of a type (a character, a repetition count, an opening parenthesis, etc...)
+* and optional value.
+*/
+class Term {
+public:
typedef TVector<wchar32> String;
typedef TSet<String> Strings;
-
- typedef ypair<int, int> RepetitionCount;
- typedef ypair<Strings, bool> CharacterRange;
-
- struct DotTag {};
- struct BeginTag {};
- struct EndTag {};
-
- Term(int type): m_type(type) {}
- template<class T> Term(int type, T t): m_type(type), m_value(t) {}
- Term(int type, const Any& value): m_type(type), m_value(value) {}
-
- static Term Character(wchar32 c);
- static Term Repetition(int lower, int upper);
- static Term Dot();
- static Term BeginMark();
- static Term EndMark();
-
- int Type() const { return m_type; }
- const Any& Value() const { return m_value; }
-private:
- int m_type;
- Any m_value;
-};
-
-class Feature;
-
-/**
-* A class performing regexp pattern parsing.
-*/
-class Lexer {
-public:
- // One-size-fits-all constructor set.
- Lexer()
- : m_encoding(&Encodings::Latin1())
- { InstallDefaultFeatures(); }
-
- explicit Lexer(const char* str)
- : m_encoding(&Encodings::Latin1())
- {
- InstallDefaultFeatures();
- Assign(str, str + strlen(str));
- }
- template<class T> explicit Lexer(const T& t)
- : m_encoding(&Encodings::Latin1())
- {
- InstallDefaultFeatures();
- Assign(t.begin(), t.end());
- }
-
- template<class Iter> Lexer(Iter begin, Iter end)
- : m_encoding(&Encodings::Latin1())
- {
- InstallDefaultFeatures();
- Assign(begin, end);
- }
- ~Lexer();
-
- template<class Iter> void Assign(Iter begin, Iter end)
- {
- m_input.clear();
- std::copy(begin, end, std::back_inserter(m_input));
- }
-
- /// The main lexer function. Extracts and returns the next term in input sequence.
- Term Lex();
- /// Installs an additional lexer feature.
+
+ typedef ypair<int, int> RepetitionCount;
+ typedef ypair<Strings, bool> CharacterRange;
+
+ struct DotTag {};
+ struct BeginTag {};
+ struct EndTag {};
+
+ Term(int type): m_type(type) {}
+ template<class T> Term(int type, T t): m_type(type), m_value(t) {}
+ Term(int type, const Any& value): m_type(type), m_value(value) {}
+
+ static Term Character(wchar32 c);
+ static Term Repetition(int lower, int upper);
+ static Term Dot();
+ static Term BeginMark();
+ static Term EndMark();
+
+ int Type() const { return m_type; }
+ const Any& Value() const { return m_value; }
+private:
+ int m_type;
+ Any m_value;
+};
+
+class Feature;
+
+/**
+* A class performing regexp pattern parsing.
+*/
+class Lexer {
+public:
+ // One-size-fits-all constructor set.
+ Lexer()
+ : m_encoding(&Encodings::Latin1())
+ { InstallDefaultFeatures(); }
+
+ explicit Lexer(const char* str)
+ : m_encoding(&Encodings::Latin1())
+ {
+ InstallDefaultFeatures();
+ Assign(str, str + strlen(str));
+ }
+ template<class T> explicit Lexer(const T& t)
+ : m_encoding(&Encodings::Latin1())
+ {
+ InstallDefaultFeatures();
+ Assign(t.begin(), t.end());
+ }
+
+ template<class Iter> Lexer(Iter begin, Iter end)
+ : m_encoding(&Encodings::Latin1())
+ {
+ InstallDefaultFeatures();
+ Assign(begin, end);
+ }
+ ~Lexer();
+
+ template<class Iter> void Assign(Iter begin, Iter end)
+ {
+ m_input.clear();
+ std::copy(begin, end, std::back_inserter(m_input));
+ }
+
+ /// The main lexer function. Extracts and returns the next term in input sequence.
+ Term Lex();
+ /// Installs an additional lexer feature.
/// We declare both lvalue and rvalue reference types to fix some linker errors.
Lexer& AddFeature(THolder<Feature>& a);
Lexer& AddFeature(THolder<Feature>&& a);
-
- const Pire::Encoding& Encoding() const { return *m_encoding; }
- Lexer& SetEncoding(const Pire::Encoding& encoding) { m_encoding = &encoding; return *this; }
+
+ const Pire::Encoding& Encoding() const { return *m_encoding; }
+ Lexer& SetEncoding(const Pire::Encoding& encoding) { m_encoding = &encoding; return *this; }
void SetError(const char* msg) { errmsg = msg; }
void SetError(ystring msg) { errmsg = msg; }
ystring& GetError() { return errmsg; }
-
- Any& Retval() { return m_retval; }
-
- Fsm Parse();
-
- void Parenthesized(Fsm& fsm);
-
-private:
- Term DoLex();
-
- wchar32 GetChar();
- wchar32 PeekChar();
- void UngetChar(wchar32 c);
-
- void Error(const char* msg) { throw Pire::Error(msg); }
-
- void InstallDefaultFeatures();
-
+
+ Any& Retval() { return m_retval; }
+
+ Fsm Parse();
+
+ void Parenthesized(Fsm& fsm);
+
+private:
+ Term DoLex();
+
+ wchar32 GetChar();
+ wchar32 PeekChar();
+ void UngetChar(wchar32 c);
+
+ void Error(const char* msg) { throw Pire::Error(msg); }
+
+ void InstallDefaultFeatures();
+
TDeque<wchar32> m_input;
- const Pire::Encoding* m_encoding;
+ const Pire::Encoding* m_encoding;
TVector<THolder<Feature>> m_features;
- Any m_retval;
+ Any m_retval;
ystring errmsg;
-
- friend class Feature;
-
- Lexer(const Lexer&);
- Lexer& operator = (const Lexer&);
-};
-
-/**
-* A basic class for Pire customization.
-* Features can be installed in the lexer and alter its behaviour.
-*/
-class Feature {
-public:
+
+ friend class Feature;
+
+ Lexer(const Lexer&);
+ Lexer& operator = (const Lexer&);
+};
+
+/**
+* A basic class for Pire customization.
+* Features can be installed in the lexer and alter its behaviour.
+*/
+class Feature {
+public:
/// Precedence of features. The less the priority, the earlier
- /// will Lex() be called, and the later will Alter() and Parenthesized() be called.
- virtual int Priority() const { return 50; }
-
- /// Lexer will call this function to check whether the feature
- /// wants to handle the next part of the input sequence in its
- /// specific way. If it does not, features Lex() will not be called.
- virtual bool Accepts(wchar32 /*c*/) const { return false; }
- /// Should eat up some part of the input sequence, handle it
- /// somehow and produce a terminal.
- virtual Term Lex() { return Term(0); }
-
- /// This function recieves a shiny new terminal, and the feature
- /// has a chance to hack it somehow if it wants.
- virtual void Alter(Term&) {}
- /// This function recieves a parenthesized part of a pattern, and the feature
- /// has a chance to hack it somehow if it wants (its the way to implement
- /// those perl-style (?@#$%:..) clauses).
- virtual void Parenthesized(Fsm&) {}
-
+ /// will Lex() be called, and the later will Alter() and Parenthesized() be called.
+ virtual int Priority() const { return 50; }
+
+ /// Lexer will call this function to check whether the feature
+ /// wants to handle the next part of the input sequence in its
+ /// specific way. If it does not, features Lex() will not be called.
+ virtual bool Accepts(wchar32 /*c*/) const { return false; }
+ /// Should eat up some part of the input sequence, handle it
+ /// somehow and produce a terminal.
+ virtual Term Lex() { return Term(0); }
+
+ /// This function recieves a shiny new terminal, and the feature
+ /// has a chance to hack it somehow if it wants.
+ virtual void Alter(Term&) {}
+ /// This function recieves a parenthesized part of a pattern, and the feature
+ /// has a chance to hack it somehow if it wants (its the way to implement
+ /// those perl-style (?@#$%:..) clauses).
+ virtual void Parenthesized(Fsm&) {}
+
using Ptr = THolder<Feature>;
-
+
virtual ~Feature() = default;
-protected:
-
- // These functions are exposed versions of the corresponding lexer functions.
- const Pire::Encoding& Encoding() const { return m_lexer->Encoding(); }
- wchar32 GetChar() { return m_lexer->GetChar(); }
- wchar32 PeekChar() { return m_lexer->PeekChar(); }
- void UngetChar(wchar32 c) { m_lexer->UngetChar(c); }
- wchar32 CorrectChar(wchar32 c, const char* controls);
- void Error(const char* msg) { m_lexer->Error(msg); }
-
-private:
- friend class Lexer;
- Lexer* m_lexer;
-};
-
-namespace Features {
- /// Disables case sensitivity
+protected:
+
+ // These functions are exposed versions of the corresponding lexer functions.
+ const Pire::Encoding& Encoding() const { return m_lexer->Encoding(); }
+ wchar32 GetChar() { return m_lexer->GetChar(); }
+ wchar32 PeekChar() { return m_lexer->PeekChar(); }
+ void UngetChar(wchar32 c) { m_lexer->UngetChar(c); }
+ wchar32 CorrectChar(wchar32 c, const char* controls);
+ void Error(const char* msg) { m_lexer->Error(msg); }
+
+private:
+ friend class Lexer;
+ Lexer* m_lexer;
+};
+
+namespace Features {
+ /// Disables case sensitivity
Feature::Ptr CaseInsensitive();
-
- /**
- * Adds two more operations:
- * (pattern1)&(pattern2) -- matches those strings which match both /pattern1/ and /pattern2/;
- * ~(pattern) -- matches those strings which do not match /pattern/.
- */
+
+ /**
+ * Adds two more operations:
+ * (pattern1)&(pattern2) -- matches those strings which match both /pattern1/ and /pattern2/;
+ * ~(pattern) -- matches those strings which do not match /pattern/.
+ */
Feature::Ptr AndNotSupport();
-}
-
-}
-
-#endif
+}
+
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/re_parser.y b/contrib/libs/pire/pire/re_parser.y
index 292c275ebd7..dbad88e2872 100644
--- a/contrib/libs/pire/pire/re_parser.y
+++ b/contrib/libs/pire/pire/re_parser.y
@@ -1,80 +1,80 @@
-%{ // -*- mode: c++ -*-
-
-/*
- * re_parser.ypp -- the main regexp parsing routine
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
+%{ // -*- mode: c++ -*-
+
+/*
+ * re_parser.ypp -- the main regexp parsing routine
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifdef _MSC_VER
-// Disable yacc warnings
-#pragma warning(disable: 4060) // switch contains no 'case' or 'default' statements
-#pragma warning(disable: 4065) // switch contains 'default' but no 'case' statements
-#pragma warning(disable: 4102) // unreferenced label 'yyerrlabl'
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifdef _MSC_VER
+// Disable yacc warnings
+#pragma warning(disable: 4060) // switch contains no 'case' or 'default' statements
+#pragma warning(disable: 4065) // switch contains 'default' but no 'case' statements
+#pragma warning(disable: 4102) // unreferenced label 'yyerrlabl'
#pragma warning(disable: 4702) // unreachable code
-#endif
-
-#ifdef __GNUC__
-#pragma GCC diagnostic ignored "-Wuninitialized" // 'yylval' may be used uninitialized
-#endif
-
-#include <stdexcept>
-
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic ignored "-Wuninitialized" // 'yylval' may be used uninitialized
+#endif
+
+#include <stdexcept>
+
#include <contrib/libs/pire/pire/fsm.h>
#include <contrib/libs/pire/pire/re_lexer.h>
#include <contrib/libs/pire/pire/any.h>
#include <contrib/libs/pire/pire/stub/stl.h>
-
-#define YYSTYPE Any*
-#define YYSTYPE_IS_TRIVIAL 0
-
-namespace {
-
-using namespace Pire;
-using Pire::Fsm;
-using Pire::Encoding;
-
-int yylex(YYSTYPE*, Lexer&);
+
+#define YYSTYPE Any*
+#define YYSTYPE_IS_TRIVIAL 0
+
+namespace {
+
+using namespace Pire;
+using Pire::Fsm;
+using Pire::Encoding;
+
+int yylex(YYSTYPE*, Lexer&);
void yyerror(Pire::Lexer&, const char*);
-
-Fsm& ConvertToFSM(const Encoding& encoding, Any* any);
-void AppendRange(const Encoding& encoding, Fsm& a, const Term::CharacterRange& cr);
-
-%}
-
+
+Fsm& ConvertToFSM(const Encoding& encoding, Any* any);
+void AppendRange(const Encoding& encoding, Fsm& a, const Term::CharacterRange& cr);
+
+%}
+
%parse-param { Pire::Lexer& rlex }
%lex-param { Pire::Lexer& rlex }
%pure-parser
-
-// Terminal declarations
-%term YRE_LETTERS
-%term YRE_COUNT
-%term YRE_DOT
-%term YRE_AND
-%term YRE_NOT
-
+
+// Terminal declarations
+%term YRE_LETTERS
+%term YRE_COUNT
+%term YRE_DOT
+%term YRE_AND
+%term YRE_NOT
+
%destructor { delete $$; } <>
-%%
-
-regexp
+%%
+
+regexp
: alternative
{
ConvertToFSM(rlex.Encoding(), $1);
@@ -83,23 +83,23 @@ regexp
$$ = nullptr;
}
;
-
-alternative
+
+alternative
: conjunction
| alternative '|' conjunction { ConvertToFSM(rlex.Encoding(), ($$ = $1)) |= ConvertToFSM(rlex.Encoding(), $3); delete $2; delete $3; }
;
-
-conjunction
+
+conjunction
: negation
| conjunction YRE_AND negation { ConvertToFSM(rlex.Encoding(), ($$ = $1)) &= ConvertToFSM(rlex.Encoding(), $3); delete $2; delete $3; }
;
-
-negation
+
+negation
: concatenation
| YRE_NOT concatenation { ConvertToFSM(rlex.Encoding(), ($$ = $2)).Complement(); delete $1; }
;
-
-concatenation
+
+concatenation
: { $$ = new Any(Fsm()); }
| concatenation iteration
{
@@ -113,8 +113,8 @@ concatenation
delete $2;
}
;
-
-iteration
+
+iteration
: term
| term YRE_COUNT
{
@@ -122,8 +122,8 @@ iteration
$$ = new Any(orig);
Fsm& cur = $$->As<Fsm>();
const Term::RepetitionCount& repc = $2->As<Term::RepetitionCount>();
-
-
+
+
if (repc.first == 0 && repc.second == 1) {
Fsm empty;
cur |= empty;
@@ -144,19 +144,19 @@ iteration
delete $2;
}
;
-
-term
+
+term
: YRE_LETTERS
| YRE_DOT
| '^'
| '$'
| '(' alternative ')' { $$ = $2; rlex.Parenthesized($$->As<Fsm>()); delete $1; delete $3; }
;
-
-%%
-
-int yylex(YYSTYPE* lval, Pire::Lexer& rlex)
-{
+
+%%
+
+int yylex(YYSTYPE* lval, Pire::Lexer& rlex)
+{
try {
Pire::Term term = rlex.Lex();
if (!term.Value().Empty())
@@ -168,18 +168,18 @@ int yylex(YYSTYPE* lval, Pire::Lexer& rlex)
rlex.SetError(e.what());
return 0;
}
-}
-
+}
+
void yyerror(Pire::Lexer& rlex, const char* str)
-{
+{
if (rlex.GetError().length() == 0)
rlex.SetError(ystring("Regexp parse error: ").append(str));
-}
-
-void AppendRange(const Encoding& encoding, Fsm& a, const Term::CharacterRange& cr)
-{
+}
+
+void AppendRange(const Encoding& encoding, Fsm& a, const Term::CharacterRange& cr)
+{
TVector<ystring> strings;
-
+
for (auto&& i : cr.first) {
ystring s;
for (auto&& j : i) {
@@ -199,16 +199,16 @@ void AppendRange(const Encoding& encoding, Fsm& a, const Term::CharacterRange& c
a = Fsm::MakeFalse();
else
a.AppendStrings(strings);
-}
-
-Fsm& ConvertToFSM(const Encoding& encoding, Any* any)
-{
+}
+
+Fsm& ConvertToFSM(const Encoding& encoding, Any* any)
+{
if (any->IsA<Fsm>())
return any->As<Fsm>();
-
+
Any ret = Fsm();
Fsm& a = ret.As<Fsm>();
-
+
if (any->IsA<Term::DotTag>()) {
encoding.AppendDot(a);
} else if (any->IsA<Term::BeginTag>()) {
@@ -229,11 +229,11 @@ Fsm& ConvertToFSM(const Encoding& encoding, Any* any)
}
any->Swap(ret);
return a;
-}
-
-}
-
-namespace Pire {
+}
+
+}
+
+namespace Pire {
namespace Impl {
int yre_parse(Pire::Lexer& rlex)
{
@@ -244,4 +244,4 @@ namespace Pire {
return rc;
}
}
-}
+}
diff --git a/contrib/libs/pire/pire/run.h b/contrib/libs/pire/pire/run.h
index a2f3a2fc8b1..f6e1ff734d4 100644
--- a/contrib/libs/pire/pire/run.h
+++ b/contrib/libs/pire/pire/run.h
@@ -1,113 +1,113 @@
-/*
- * run.h -- routines for running scanners on strings.
+/*
+ * run.h -- routines for running scanners on strings.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_RE_SCANNER_H
-#define PIRE_RE_SCANNER_H
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_RE_SCANNER_H
+#define PIRE_RE_SCANNER_H
+
#include <contrib/libs/pire/pire/stub/stl.h>
#include <contrib/libs/pire/pire/stub/memstreams.h>
#include <contrib/libs/pire/pire/scanners/pair.h>
#include "platform.h"
-#include "defs.h"
-
+#include "defs.h"
+
#include <string>
-namespace Pire {
-
- template<class Scanner>
- struct StDumper {
- StDumper(const Scanner& sc, typename Scanner::State st): m_sc(&sc), m_st(st) {}
- void Dump(yostream& stream) const { stream << m_sc->StateIndex(m_st) << (m_sc->Final(m_st) ? " [final]" : ""); }
- private:
- const Scanner* m_sc;
- typename Scanner::State m_st;
- };
-
- template<class Scanner> StDumper<Scanner> StDump(const Scanner& sc, typename Scanner::State st) { return StDumper<Scanner>(sc, st); }
- template<class Scanner> yostream& operator << (yostream& stream, const StDumper<Scanner>& stdump) { stdump.Dump(stream); return stream; }
-}
-
-namespace Pire {
-
-template<class Scanner>
+namespace Pire {
+
+ template<class Scanner>
+ struct StDumper {
+ StDumper(const Scanner& sc, typename Scanner::State st): m_sc(&sc), m_st(st) {}
+ void Dump(yostream& stream) const { stream << m_sc->StateIndex(m_st) << (m_sc->Final(m_st) ? " [final]" : ""); }
+ private:
+ const Scanner* m_sc;
+ typename Scanner::State m_st;
+ };
+
+ template<class Scanner> StDumper<Scanner> StDump(const Scanner& sc, typename Scanner::State st) { return StDumper<Scanner>(sc, st); }
+ template<class Scanner> yostream& operator << (yostream& stream, const StDumper<Scanner>& stdump) { stdump.Dump(stream); return stream; }
+}
+
+namespace Pire {
+
+template<class Scanner>
PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
-void Step(const Scanner& scanner, typename Scanner::State& state, Char ch)
-{
+void Step(const Scanner& scanner, typename Scanner::State& state, Char ch)
+{
Y_ASSERT(ch < MaxCharUnaligned);
- typename Scanner::Action a = scanner.Next(state, ch);
- scanner.TakeAction(state, a);
-}
-
-namespace Impl {
-
- enum Action { Continue, Stop };
-
- template<class Scanner>
- struct RunPred {
+ typename Scanner::Action a = scanner.Next(state, ch);
+ scanner.TakeAction(state, a);
+}
+
+namespace Impl {
+
+ enum Action { Continue, Stop };
+
+ template<class Scanner>
+ struct RunPred {
PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- Action operator()(const Scanner&, const typename Scanner::State&, const char*) const { return Continue; }
- };
+ Action operator()(const Scanner&, const typename Scanner::State&, const char*) const { return Continue; }
+ };
- template<class Scanner>
- struct ShortestPrefixPred {
- explicit ShortestPrefixPred(const char*& pos): m_pos(&pos) {}
-
+ template<class Scanner>
+ struct ShortestPrefixPred {
+ explicit ShortestPrefixPred(const char*& pos): m_pos(&pos) {}
+
PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- Action operator()(const Scanner& sc, const typename Scanner::State& st, const char* pos) const
- {
- if (sc.Final(st)) {
- *m_pos = pos;
- return Stop;
- } else {
+ Action operator()(const Scanner& sc, const typename Scanner::State& st, const char* pos) const
+ {
+ if (sc.Final(st)) {
+ *m_pos = pos;
+ return Stop;
+ } else {
return (sc.Dead(st) ? Stop : Continue);
- }
- }
- private:
- const char** m_pos;
- };
+ }
+ }
+ private:
+ const char** m_pos;
+ };
- template<class Scanner>
- struct LongestPrefixPred {
- explicit LongestPrefixPred(const char*& pos): m_pos(&pos) {}
+ template<class Scanner>
+ struct LongestPrefixPred {
+ explicit LongestPrefixPred(const char*& pos): m_pos(&pos) {}
PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- Action operator()(const Scanner& sc, const typename Scanner::State& st, const char* pos) const
- {
- if (sc.Final(st))
- *m_pos = pos;
- return (sc.Dead(st) ? Stop : Continue);
- }
- private:
- const char** m_pos;
- };
-
-}
-
-#ifndef PIRE_DEBUG
-
-namespace Impl {
-
+ Action operator()(const Scanner& sc, const typename Scanner::State& st, const char* pos) const
+ {
+ if (sc.Final(st))
+ *m_pos = pos;
+ return (sc.Dead(st) ? Stop : Continue);
+ }
+ private:
+ const char** m_pos;
+ };
+
+}
+
+#ifndef PIRE_DEBUG
+
+namespace Impl {
+
template<class Scanner, class Pred>
PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
Action SafeRunChunk(const Scanner& scanner, typename Scanner::State& state, const size_t* p, size_t pos, size_t size, Pred pred)
@@ -128,168 +128,168 @@ namespace Impl {
return Continue;
}
- /// Effectively runs a scanner on a short data chunk, fit completely into one machine word.
- template<class Scanner, class Pred>
+ /// Effectively runs a scanner on a short data chunk, fit completely into one machine word.
+ template<class Scanner, class Pred>
PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- Action RunChunk(const Scanner& scanner, typename Scanner::State& state, const size_t* p, size_t pos, size_t size, Pred pred)
- {
+ Action RunChunk(const Scanner& scanner, typename Scanner::State& state, const size_t* p, size_t pos, size_t size, Pred pred)
+ {
Y_ASSERT(pos <= sizeof(size_t));
Y_ASSERT(size <= sizeof(size_t));
Y_ASSERT(pos + size <= sizeof(size_t));
-
+
if (PIRE_UNLIKELY(size == 0))
return Continue;
- size_t chunk = Impl::ToLittleEndian(*p) >> 8*pos;
- const char* ptr = (const char*) p + pos + size + 1;
-
- for (size_t i = size; i != 0; --i) {
- Step(scanner, state, chunk & 0xFF);
- if (pred(scanner, state, ptr - i) == Stop)
- return Stop;
- chunk >>= 8;
- }
- return Continue;
- }
-
- template<class Scanner>
- struct AlignedRunner {
-
- // Generic version for LongestPrefix()/ShortestPrefix() impelementations
- template<class Pred>
- static inline PIRE_HOT_FUNCTION
- Action RunAligned(const Scanner& scanner, typename Scanner::State& state, const size_t* begin, const size_t* end, Pred stop)
- {
- typename Scanner::State st = state;
- Action ret = Continue;
- for (; begin != end && (ret = RunChunk(scanner, st, begin, 0, sizeof(void*), stop)) == Continue; ++begin)
- ;
- state = st;
- return ret;
- }
-
- // A special version for Run() impelementation that skips predicate checks
- static inline PIRE_HOT_FUNCTION
- Action RunAligned(const Scanner& scanner, typename Scanner::State& state, const size_t* begin, const size_t* end, RunPred<Scanner>)
- {
- typename Scanner::State st = state;
- for (; begin != end; ++begin) {
- size_t chunk = *begin;
- for (size_t i = sizeof(chunk); i != 0; --i) {
- Step(scanner, st, chunk & 0xFF);
- chunk >>= 8;
- }
- }
- state = st;
- return Continue;
- }
- };
-
- /// The main function: runs a scanner through given memory range.
- template<class Scanner, class Pred>
+ size_t chunk = Impl::ToLittleEndian(*p) >> 8*pos;
+ const char* ptr = (const char*) p + pos + size + 1;
+
+ for (size_t i = size; i != 0; --i) {
+ Step(scanner, state, chunk & 0xFF);
+ if (pred(scanner, state, ptr - i) == Stop)
+ return Stop;
+ chunk >>= 8;
+ }
+ return Continue;
+ }
+
+ template<class Scanner>
+ struct AlignedRunner {
+
+ // Generic version for LongestPrefix()/ShortestPrefix() impelementations
+ template<class Pred>
+ static inline PIRE_HOT_FUNCTION
+ Action RunAligned(const Scanner& scanner, typename Scanner::State& state, const size_t* begin, const size_t* end, Pred stop)
+ {
+ typename Scanner::State st = state;
+ Action ret = Continue;
+ for (; begin != end && (ret = RunChunk(scanner, st, begin, 0, sizeof(void*), stop)) == Continue; ++begin)
+ ;
+ state = st;
+ return ret;
+ }
+
+ // A special version for Run() impelementation that skips predicate checks
+ static inline PIRE_HOT_FUNCTION
+ Action RunAligned(const Scanner& scanner, typename Scanner::State& state, const size_t* begin, const size_t* end, RunPred<Scanner>)
+ {
+ typename Scanner::State st = state;
+ for (; begin != end; ++begin) {
+ size_t chunk = *begin;
+ for (size_t i = sizeof(chunk); i != 0; --i) {
+ Step(scanner, st, chunk & 0xFF);
+ chunk >>= 8;
+ }
+ }
+ state = st;
+ return Continue;
+ }
+ };
+
+ /// The main function: runs a scanner through given memory range.
+ template<class Scanner, class Pred>
inline void DoRun(const Scanner& scanner, typename Scanner::State& st, TStringBuf str, Pred pred)
- {
-
+ {
+
const size_t* head = reinterpret_cast<const size_t*>((reinterpret_cast<uintptr_t>(str.begin())) & ~(sizeof(size_t)-1));
const size_t* tail = reinterpret_cast<const size_t*>((reinterpret_cast<uintptr_t>(str.end())) & ~(sizeof(size_t)-1));
-
+
size_t headSize = (sizeof(size_t) - (str.begin() - (const char*)head)); // The distance from @p begin to the end of the word containing @p begin
size_t tailSize = str.end() - (const char*) tail; // The distance from the beginning of the word containing @p end to the @p end
-
+
Y_ASSERT(headSize >= 1 && headSize <= sizeof(size_t));
Y_ASSERT(tailSize < sizeof(size_t));
-
- if (head == tail) {
+
+ if (head == tail) {
Impl::SafeRunChunk(scanner, st, head, sizeof(size_t) - headSize, str.end() - str.begin(), pred);
- return;
- }
-
- // st is passed by reference to this function. If we use it directly on each step the compiler will have to
- // update it in memory because of pointer aliasing assumptions. Copying it into a local var allows the
- // compiler to store it in a register. This saves some instructions and cycles
- typename Scanner::State state = st;
-
+ return;
+ }
+
+ // st is passed by reference to this function. If we use it directly on each step the compiler will have to
+ // update it in memory because of pointer aliasing assumptions. Copying it into a local var allows the
+ // compiler to store it in a register. This saves some instructions and cycles
+ typename Scanner::State state = st;
+
if (str.begin() != (const char*) head) {
if (Impl::RunChunk(scanner, state, head, sizeof(size_t) - headSize, headSize, pred) == Stop) {
- st = state;
- return;
- }
- ++head;
- }
-
- if (Impl::AlignedRunner<Scanner>::RunAligned(scanner, state, head, tail, pred) == Stop) {
- st = state;
- return;
- }
-
- if (tailSize)
+ st = state;
+ return;
+ }
+ ++head;
+ }
+
+ if (Impl::AlignedRunner<Scanner>::RunAligned(scanner, state, head, tail, pred) == Stop) {
+ st = state;
+ return;
+ }
+
+ if (tailSize)
Impl::SafeRunChunk(scanner, state, tail, 0, tailSize, pred);
-
- st = state;
- }
-
-}
-
-/// Runs two scanners through given memory range simultaneously.
-/// This is several percent faster than running them independently.
-template<class Scanner1, class Scanner2>
+
+ st = state;
+ }
+
+}
+
+/// Runs two scanners through given memory range simultaneously.
+/// This is several percent faster than running them independently.
+template<class Scanner1, class Scanner2>
inline void Run(const Scanner1& scanner1, const Scanner2& scanner2, typename Scanner1::State& state1, typename Scanner2::State& state2, TStringBuf str)
-{
- typedef ScannerPair<Scanner1, Scanner2> Scanners;
- Scanners pair(scanner1, scanner2);
- typename Scanners::State states(state1, state2);
+{
+ typedef ScannerPair<Scanner1, Scanner2> Scanners;
+ Scanners pair(scanner1, scanner2);
+ typename Scanners::State states(state1, state2);
Run(pair, states, str);
- state1 = states.first;
- state2 = states.second;
-}
-
-#else
-
-namespace Impl {
- /// A debug version of all Run() methods.
- template<class Scanner, class Pred>
- inline void DoRun(const Scanner& scanner, typename Scanner::State& state, const char* begin, const char* end, Pred pred)
- {
- Cdbg << "Running regexp on string " << ystring(begin, ymin(end - begin, static_cast<ptrdiff_t>(100u))) << Endl;
- Cdbg << "Initial state " << StDump(scanner, state) << Endl;
-
- if (pred(scanner, state, begin) == Stop) {
- Cdbg << " exiting" << Endl;
- return;
- }
-
- for (; begin != end; ++begin) {
- Step(scanner, state, (unsigned char)*begin);
- Cdbg << *begin << " => state " << StDump(scanner, state) << Endl;
- if (pred(scanner, state, begin + 1) == Stop) {
- Cdbg << " exiting" << Endl;
- return;
- }
- }
- }
-}
-
-#endif
+ state1 = states.first;
+ state2 = states.second;
+}
+
+#else
+
+namespace Impl {
+ /// A debug version of all Run() methods.
+ template<class Scanner, class Pred>
+ inline void DoRun(const Scanner& scanner, typename Scanner::State& state, const char* begin, const char* end, Pred pred)
+ {
+ Cdbg << "Running regexp on string " << ystring(begin, ymin(end - begin, static_cast<ptrdiff_t>(100u))) << Endl;
+ Cdbg << "Initial state " << StDump(scanner, state) << Endl;
+
+ if (pred(scanner, state, begin) == Stop) {
+ Cdbg << " exiting" << Endl;
+ return;
+ }
+
+ for (; begin != end; ++begin) {
+ Step(scanner, state, (unsigned char)*begin);
+ Cdbg << *begin << " => state " << StDump(scanner, state) << Endl;
+ if (pred(scanner, state, begin + 1) == Stop) {
+ Cdbg << " exiting" << Endl;
+ return;
+ }
+ }
+ }
+}
+
+#endif
-template<class Scanner>
+template<class Scanner>
void Run(const Scanner& sc, typename Scanner::State& st, TStringBuf str)
{
Impl::DoRun(sc, st, str, Impl::RunPred<Scanner>());
}
template<class Scanner>
-void Run(const Scanner& sc, typename Scanner::State& st, const char* begin, const char* end)
-{
+void Run(const Scanner& sc, typename Scanner::State& st, const char* begin, const char* end)
+{
Run(sc, st, TStringBuf(begin, end));
-}
-
+}
+
/// Returns default constructed string_view{} if there is no matching prefix
/// Returns str.substr(0, 0) if matching prefix is empty
-template<class Scanner>
+template<class Scanner>
std::string_view LongestPrefix(const Scanner& sc, std::string_view str, bool throughBeginMark = false, bool throughEndMark = false)
-{
- typename Scanner::State st;
- sc.Initialize(st);
+{
+ typename Scanner::State st;
+ sc.Initialize(st);
if (throughBeginMark)
Pire::Step(sc, st, BeginMark);
const char* pos = (sc.Final(st) ? str.data() : nullptr);
@@ -300,11 +300,11 @@ std::string_view LongestPrefix(const Scanner& sc, std::string_view str, bool thr
pos = str.data() + str.size();
}
return pos ? str.substr(0, pos - str.data()) : std::string_view{};
-}
-
-template<class Scanner>
+}
+
+template<class Scanner>
const char* LongestPrefix(const Scanner& sc, const char* begin, const char* end, bool throughBeginMark = false, bool throughEndMark = false)
-{
+{
auto prefix = LongestPrefix(sc, std::string_view(begin, end - begin), throughBeginMark, throughEndMark);
return prefix.data() + prefix.size();
}
@@ -314,11 +314,11 @@ const char* LongestPrefix(const Scanner& sc, const char* begin, const char* end,
template<class Scanner>
std::string_view ShortestPrefix(const Scanner& sc, std::string_view str, bool throughBeginMark = false, bool throughEndMark = false)
{
- typename Scanner::State st;
- sc.Initialize(st);
+ typename Scanner::State st;
+ sc.Initialize(st);
if (throughBeginMark)
Pire::Step(sc, st, BeginMark);
- if (sc.Final(st))
+ if (sc.Final(st))
return str.substr(0, 0);
const char* pos = nullptr;
Impl::DoRun(sc, st, str, Impl::ShortestPrefixPred<Scanner>(pos));
@@ -328,8 +328,8 @@ std::string_view ShortestPrefix(const Scanner& sc, std::string_view str, bool th
pos = str.data() + str.size();
}
return pos ? str.substr(0, pos - str.data()) : std::string_view{};
-}
-
+}
+
template<class Scanner>
const char* ShortestPrefix(const Scanner& sc, const char* begin, const char* end, bool throughBeginMark = false, bool throughEndMark = false)
{
@@ -338,30 +338,30 @@ const char* ShortestPrefix(const Scanner& sc, const char* begin, const char* end
}
-/// The same as above, but scans string in reverse direction
-/// (consider using Fsm::Reverse() for using in this function).
+/// The same as above, but scans string in reverse direction
+/// (consider using Fsm::Reverse() for using in this function).
/// Returns default constructed string_view{} if there is no matching suffix
/// Returns str.substr(str.size(), 0) if matching suffix is empty
-template<class Scanner>
+template<class Scanner>
inline std::string_view LongestSuffix(const Scanner& scanner, std::string_view str, bool throughEndMark = false, bool throughBeginMark = false)
-{
- typename Scanner::State state;
- scanner.Initialize(state);
+{
+ typename Scanner::State state;
+ scanner.Initialize(state);
if (throughEndMark)
Step(scanner, state, EndMark);
PIRE_IFDEBUG(Cdbg << "Running LongestSuffix on string " << ystring(str) << Endl);
- PIRE_IFDEBUG(Cdbg << "Initial state " << StDump(scanner, state) << Endl);
-
+ PIRE_IFDEBUG(Cdbg << "Initial state " << StDump(scanner, state) << Endl);
+
std::string_view suffix{};
auto begin = str.data() + str.size();
while (begin != str.data() && !scanner.Dead(state)) {
- if (scanner.Final(state))
+ if (scanner.Final(state))
suffix = str.substr(begin - str.data());
--begin;
Step(scanner, state, (unsigned char)*begin);
PIRE_IFDEBUG(Cdbg << *begin << " => state " << StDump(scanner, state) << Endl);
- }
- if (scanner.Final(state))
+ }
+ if (scanner.Final(state))
suffix = str.substr(begin - str.data());
if (throughBeginMark) {
Step(scanner, state, BeginMark);
@@ -369,97 +369,97 @@ inline std::string_view LongestSuffix(const Scanner& scanner, std::string_view s
suffix = str.substr(begin - str.data());
}
return suffix;
-}
-
+}
+
template<class Scanner>
inline const char* LongestSuffix(const Scanner& scanner, const char* rbegin, const char* rend, bool throughEndMark = false, bool throughBeginMark = false) {
auto suffix = LongestSuffix(scanner, std::string_view(rend + 1, rbegin - rend), throughEndMark, throughBeginMark);
return suffix.data() ? suffix.data() - 1 : nullptr;
}
-/// The same as above, but scans string in reverse direction
+/// The same as above, but scans string in reverse direction
/// Returns default constructed string_view{} if there is no matching suffix
/// Returns str.substr(str.size(), 0) if matching suffix is empty
-template<class Scanner>
+template<class Scanner>
inline std::string_view ShortestSuffix(const Scanner& scanner, std::string_view str, bool throughEndMark = false, bool throughBeginMark = false)
-{
+{
auto begin = str.data() + str.size();
- typename Scanner::State state;
- scanner.Initialize(state);
+ typename Scanner::State state;
+ scanner.Initialize(state);
if (throughEndMark)
Step(scanner, state, EndMark);
PIRE_IFDEBUG(Cdbg << "Running ShortestSuffix on string " << ystring(str) << Endl);
PIRE_IFDEBUG(Cdbg << "Initial state " << StDump(scanner, state) << Endl);
-
+
while (begin != str.data() && !scanner.Final(state) && !scanner.Dead(state)) {
--begin;
scanner.Next(state, (unsigned char)*begin);
PIRE_IFDEBUG(Cdbg << *rbegin << " => state " << StDump(scanner, state) << Endl);
- }
+ }
if (throughBeginMark)
Step(scanner, state, BeginMark);
return scanner.Final(state) ? str.substr(begin - str.data()) : std::string_view{};
-}
-
+}
+
template<class Scanner>
inline const char* ShortestSuffix(const Scanner& scanner, const char* rbegin, const char* rend, bool throughEndMark = false, bool throughBeginMark = false) {
auto suffix = ShortestSuffix(scanner, std::string_view(rend + 1, rbegin - rend), throughEndMark, throughBeginMark);
return suffix.data() ? suffix.data() - 1 : nullptr;
}
-
-
-template<class Scanner>
-class RunHelper {
-public:
- RunHelper(const Scanner& sc, typename Scanner::State st): Sc(&sc), St(st) {}
- explicit RunHelper(const Scanner& sc): Sc(&sc) { Sc->Initialize(St); }
-
- RunHelper<Scanner>& Step(Char letter) { Pire::Step(*Sc, St, letter); return *this; }
+
+
+template<class Scanner>
+class RunHelper {
+public:
+ RunHelper(const Scanner& sc, typename Scanner::State st): Sc(&sc), St(st) {}
+ explicit RunHelper(const Scanner& sc): Sc(&sc) { Sc->Initialize(St); }
+
+ RunHelper<Scanner>& Step(Char letter) { Pire::Step(*Sc, St, letter); return *this; }
RunHelper<Scanner>& Run(TStringBuf str) { Pire::Run(*Sc, St, str); return *this; }
RunHelper<Scanner>& Run(const char* begin, const char* end) { return Run(TStringBuf(begin, end)); }
RunHelper<Scanner>& Run(const char* begin, size_t size) { return Run(TStringBuf(begin, begin + size)); }
- RunHelper<Scanner>& Begin() { return Step(BeginMark); }
- RunHelper<Scanner>& End() { return Step(EndMark); }
-
- const typename Scanner::State& State() const { return St; }
- struct Tag {};
- operator const Tag*() const { return Sc->Final(St) ? (const Tag*) this : 0; }
- bool operator ! () const { return !Sc->Final(St); }
-
-private:
- const Scanner* Sc;
- typename Scanner::State St;
-};
-
-template<class Scanner>
-RunHelper<Scanner> Runner(const Scanner& sc) { return RunHelper<Scanner>(sc); }
-
-template<class Scanner>
-RunHelper<Scanner> Runner(const Scanner& sc, typename Scanner::State st) { return RunHelper<Scanner>(sc, st); }
-
-
-/// Provided for testing purposes and convinience
-template<class Scanner>
+ RunHelper<Scanner>& Begin() { return Step(BeginMark); }
+ RunHelper<Scanner>& End() { return Step(EndMark); }
+
+ const typename Scanner::State& State() const { return St; }
+ struct Tag {};
+ operator const Tag*() const { return Sc->Final(St) ? (const Tag*) this : 0; }
+ bool operator ! () const { return !Sc->Final(St); }
+
+private:
+ const Scanner* Sc;
+ typename Scanner::State St;
+};
+
+template<class Scanner>
+RunHelper<Scanner> Runner(const Scanner& sc) { return RunHelper<Scanner>(sc); }
+
+template<class Scanner>
+RunHelper<Scanner> Runner(const Scanner& sc, typename Scanner::State st) { return RunHelper<Scanner>(sc, st); }
+
+
+/// Provided for testing purposes and convinience
+template<class Scanner>
bool Matches(const Scanner& scanner, TStringBuf str)
{
return Runner(scanner).Run(str);
}
template<class Scanner>
-bool Matches(const Scanner& scanner, const char* begin, const char* end)
-{
+bool Matches(const Scanner& scanner, const char* begin, const char* end)
+{
return Runner(scanner).Run(TStringBuf(begin, end));
-}
-
-/// Constructs an inline scanner in one statement
-template<class Scanner>
-Scanner MmappedScanner(const char* ptr, size_t size)
-{
- Scanner s;
- s.Mmap(ptr, size);
- return s;
-}
-
-}
-
-#endif
+}
+
+/// Constructs an inline scanner in one statement
+template<class Scanner>
+Scanner MmappedScanner(const char* ptr, size_t size)
+{
+ Scanner s;
+ s.Mmap(ptr, size);
+ return s;
+}
+
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/scanner_io.cpp b/contrib/libs/pire/pire/scanner_io.cpp
index 22fcccf665f..3956e3c6edb 100644
--- a/contrib/libs/pire/pire/scanner_io.cpp
+++ b/contrib/libs/pire/pire/scanner_io.cpp
@@ -1,26 +1,26 @@
-/*
- * scanner_io.cpp -- scanner serialization and deserialization
+/*
+ * scanner_io.cpp -- scanner serialization and deserialization
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
#include <contrib/libs/pire/pire/stub/stl.h>
#include <contrib/libs/pire/pire/stub/saveload.h>
#include <contrib/libs/pire/pire/scanners/common.h>
@@ -28,75 +28,75 @@
#include <contrib/libs/pire/pire/scanners/simple.h>
#include <contrib/libs/pire/pire/scanners/loaded.h>
-#include "align.h"
-
-namespace Pire {
-
-void SimpleScanner::Save(yostream* s) const
-{
+#include "align.h"
+
+namespace Pire {
+
+void SimpleScanner::Save(yostream* s) const
+{
SavePodType(s, Header(ScannerIOTypes::SimpleScanner, sizeof(m)));
- Impl::AlignSave(s, sizeof(Header));
- Locals mc = m;
- mc.initial -= reinterpret_cast<size_t>(m_transitions);
- SavePodType(s, mc);
- Impl::AlignSave(s, sizeof(mc));
- SavePodType(s, Empty());
- Impl::AlignSave(s, sizeof(Empty()));
- if (!Empty()) {
+ Impl::AlignSave(s, sizeof(Header));
+ Locals mc = m;
+ mc.initial -= reinterpret_cast<size_t>(m_transitions);
+ SavePodType(s, mc);
+ Impl::AlignSave(s, sizeof(mc));
+ SavePodType(s, Empty());
+ Impl::AlignSave(s, sizeof(Empty()));
+ if (!Empty()) {
Y_ASSERT(m_buffer);
Impl::AlignedSaveArray(s, m_buffer.Get(), BufSize());
- }
-}
-
-void SimpleScanner::Load(yistream* s)
-{
- SimpleScanner sc;
+ }
+}
+
+void SimpleScanner::Load(yistream* s)
+{
+ SimpleScanner sc;
Impl::ValidateHeader(s, ScannerIOTypes::SimpleScanner, sizeof(sc.m));
- LoadPodType(s, sc.m);
- Impl::AlignLoad(s, sizeof(sc.m));
- bool empty;
- LoadPodType(s, empty);
- Impl::AlignLoad(s, sizeof(empty));
- if (empty) {
- sc.Alias(Null());
- } else {
+ LoadPodType(s, sc.m);
+ Impl::AlignLoad(s, sizeof(sc.m));
+ bool empty;
+ LoadPodType(s, empty);
+ Impl::AlignLoad(s, sizeof(empty));
+ if (empty) {
+ sc.Alias(Null());
+ } else {
sc.m_buffer = BufferType(new char[sc.BufSize()]);
Impl::AlignedLoadArray(s, sc.m_buffer.Get(), sc.BufSize());
sc.Markup(sc.m_buffer.Get());
- sc.m.initial += reinterpret_cast<size_t>(sc.m_transitions);
- }
- Swap(sc);
-}
-
-void SlowScanner::Save(yostream* s) const
-{
+ sc.m.initial += reinterpret_cast<size_t>(sc.m_transitions);
+ }
+ Swap(sc);
+}
+
+void SlowScanner::Save(yostream* s) const
+{
SavePodType(s, Header(ScannerIOTypes::SlowScanner, sizeof(m)));
- Impl::AlignSave(s, sizeof(Header));
- SavePodType(s, m);
- Impl::AlignSave(s, sizeof(m));
- SavePodType(s, Empty());
- Impl::AlignSave(s, sizeof(Empty()));
- if (!Empty()) {
+ Impl::AlignSave(s, sizeof(Header));
+ SavePodType(s, m);
+ Impl::AlignSave(s, sizeof(m));
+ SavePodType(s, Empty());
+ Impl::AlignSave(s, sizeof(Empty()));
+ if (!Empty()) {
Y_ASSERT(!m_vec.empty());
- Impl::AlignedSaveArray(s, m_letters, MaxChar);
- Impl::AlignedSaveArray(s, m_finals, m.statesCount);
-
- size_t c = 0;
- SavePodType<size_t>(s, 0);
+ Impl::AlignedSaveArray(s, m_letters, MaxChar);
+ Impl::AlignedSaveArray(s, m_finals, m.statesCount);
+
+ size_t c = 0;
+ SavePodType<size_t>(s, 0);
for (auto&& i : m_vec) {
size_t n = c + i.size();
- SavePodType(s, n);
- c = n;
- }
- Impl::AlignSave(s, (m_vec.size() + 1) * sizeof(size_t));
-
- size_t size = 0;
+ SavePodType(s, n);
+ c = n;
+ }
+ Impl::AlignSave(s, (m_vec.size() + 1) * sizeof(size_t));
+
+ size_t size = 0;
for (auto&& i : m_vec)
if (!i.empty()) {
SavePodArray(s, &(i)[0], i.size());
size += sizeof(unsigned) * i.size();
- }
- Impl::AlignSave(s, size);
+ }
+ Impl::AlignSave(s, size);
if (need_actions) {
size_t pos = 0;
for (TVector< TVector< Action > >::const_iterator i = m_actionsvec.begin(), ie = m_actionsvec.end(); i != ie; ++i)
@@ -106,55 +106,55 @@ void SlowScanner::Save(yostream* s) const
}
Impl::AlignSave(s, pos);
}
- }
-}
-
-void SlowScanner::Load(yistream* s)
-{
- SlowScanner sc;
+ }
+}
+
+void SlowScanner::Load(yistream* s)
+{
+ SlowScanner sc;
Impl::ValidateHeader(s, ScannerIOTypes::SlowScanner, sizeof(sc.m));
- LoadPodType(s, sc.m);
- Impl::AlignLoad(s, sizeof(sc.m));
- bool empty;
- LoadPodType(s, empty);
- Impl::AlignLoad(s, sizeof(empty));
+ LoadPodType(s, sc.m);
+ Impl::AlignLoad(s, sizeof(sc.m));
+ bool empty;
+ LoadPodType(s, empty);
+ Impl::AlignLoad(s, sizeof(empty));
sc.need_actions = need_actions;
- if (empty) {
- sc.Alias(Null());
- } else {
- sc.m_vec.resize(sc.m.lettersCount * sc.m.statesCount);
+ if (empty) {
+ sc.Alias(Null());
+ } else {
+ sc.m_vec.resize(sc.m.lettersCount * sc.m.statesCount);
if (sc.need_actions)
sc.m_actionsvec.resize(sc.m.lettersCount * sc.m.statesCount);
- sc.m_vecptr = &sc.m_vec;
-
- sc.alloc(sc.m_letters, MaxChar);
- Impl::AlignedLoadArray(s, sc.m_letters, MaxChar);
-
- sc.alloc(sc.m_finals, sc.m.statesCount);
- Impl::AlignedLoadArray(s, sc.m_finals, sc.m.statesCount);
-
- size_t c;
- LoadPodType(s, c);
+ sc.m_vecptr = &sc.m_vec;
+
+ sc.alloc(sc.m_letters, MaxChar);
+ Impl::AlignedLoadArray(s, sc.m_letters, MaxChar);
+
+ sc.alloc(sc.m_finals, sc.m.statesCount);
+ Impl::AlignedLoadArray(s, sc.m_finals, sc.m.statesCount);
+
+ size_t c;
+ LoadPodType(s, c);
auto act = sc.m_actionsvec.begin();
for (auto&& i : sc.m_vec) {
- size_t n;
- LoadPodType(s, n);
+ size_t n;
+ LoadPodType(s, n);
i.resize(n - c);
if (sc.need_actions) {
act->resize(n - c);
++act;
}
- c = n;
- }
- Impl::AlignLoad(s, (m_vec.size() + 1) * sizeof(size_t));
-
- size_t size = 0;
+ c = n;
+ }
+ Impl::AlignLoad(s, (m_vec.size() + 1) * sizeof(size_t));
+
+ size_t size = 0;
for (auto&& i : sc.m_vec)
if (!i.empty()) {
LoadPodArray(s, &(i)[0], i.size());
size += sizeof(unsigned) * i.size();
- }
- Impl::AlignLoad(s, size);
+ }
+ Impl::AlignLoad(s, size);
size_t actSize = 0;
if (sc.need_actions) {
for (auto&& i : sc.m_actionsvec) {
@@ -165,53 +165,53 @@ void SlowScanner::Load(yistream* s)
}
Impl::AlignLoad(s, actSize);
}
- }
- Swap(sc);
-}
-
+ }
+ Swap(sc);
+}
+
void LoadedScanner::Save(yostream* s) const {
Save(s, ScannerIOTypes::LoadedScanner);
}
void LoadedScanner::Save(yostream* s, ui32 type) const
-{
+{
Y_ASSERT(type == ScannerIOTypes::LoadedScanner || type == ScannerIOTypes::NoGlueLimitCountingScanner);
SavePodType(s, Header(type, sizeof(m)));
- Impl::AlignSave(s, sizeof(Header));
- Locals mc = m;
- mc.initial -= reinterpret_cast<size_t>(m_jumps);
- SavePodType(s, mc);
- Impl::AlignSave(s, sizeof(mc));
-
- Impl::AlignedSaveArray(s, m_letters, MaxChar);
- Impl::AlignedSaveArray(s, m_jumps, m.statesCount * m.lettersCount);
- Impl::AlignedSaveArray(s, m_tags, m.statesCount);
-}
-
+ Impl::AlignSave(s, sizeof(Header));
+ Locals mc = m;
+ mc.initial -= reinterpret_cast<size_t>(m_jumps);
+ SavePodType(s, mc);
+ Impl::AlignSave(s, sizeof(mc));
+
+ Impl::AlignedSaveArray(s, m_letters, MaxChar);
+ Impl::AlignedSaveArray(s, m_jumps, m.statesCount * m.lettersCount);
+ Impl::AlignedSaveArray(s, m_tags, m.statesCount);
+}
+
void LoadedScanner::Load(yistream* s) {
Load(s, nullptr);
}
void LoadedScanner::Load(yistream* s, ui32* type)
-{
- LoadedScanner sc;
+{
+ LoadedScanner sc;
Header header = Impl::ValidateHeader(s, ScannerIOTypes::LoadedScanner, sizeof(sc.m));
if (type) {
*type = header.Type;
}
- LoadPodType(s, sc.m);
- Impl::AlignLoad(s, sizeof(sc.m));
+ LoadPodType(s, sc.m);
+ Impl::AlignLoad(s, sizeof(sc.m));
sc.m_buffer = BufferType(new char[sc.BufSize()]);
sc.Markup(sc.m_buffer.Get());
- Impl::AlignedLoadArray(s, sc.m_letters, MaxChar);
- Impl::AlignedLoadArray(s, sc.m_jumps, sc.m.statesCount * sc.m.lettersCount);
+ Impl::AlignedLoadArray(s, sc.m_letters, MaxChar);
+ Impl::AlignedLoadArray(s, sc.m_jumps, sc.m.statesCount * sc.m.lettersCount);
if (header.Version == Header::RE_VERSION_WITH_MACTIONS) {
TVector<Action> actions(sc.m.statesCount * sc.m.lettersCount);
Impl::AlignedLoadArray(s, actions.data(), actions.size());
}
- Impl::AlignedLoadArray(s, sc.m_tags, sc.m.statesCount);
- sc.m.initial += reinterpret_cast<size_t>(sc.m_jumps);
- Swap(sc);
-}
-
-}
+ Impl::AlignedLoadArray(s, sc.m_tags, sc.m.statesCount);
+ sc.m.initial += reinterpret_cast<size_t>(sc.m_jumps);
+ Swap(sc);
+}
+
+}
diff --git a/contrib/libs/pire/pire/scanners/common.h b/contrib/libs/pire/pire/scanners/common.h
index 4d03c1e4bcb..de5ea0af7ba 100644
--- a/contrib/libs/pire/pire/scanners/common.h
+++ b/contrib/libs/pire/pire/scanners/common.h
@@ -1,35 +1,35 @@
-/*
- * common.h -- common declaration for Pire scanners
+/*
+ * common.h -- common declaration for Pire scanners
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-#ifndef PIRE_SCANNERS_COMMON_H_INCLUDED
-#define PIRE_SCANNERS_COMMON_H_INCLUDED
-
-#include <stdlib.h>
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+#ifndef PIRE_SCANNERS_COMMON_H_INCLUDED
+#define PIRE_SCANNERS_COMMON_H_INCLUDED
+
+#include <stdlib.h>
#include <contrib/libs/pire/pire/align.h>
#include <contrib/libs/pire/pire/stub/defaults.h>
#include <contrib/libs/pire/pire/defs.h>
#include <contrib/libs/pire/pire/platform.h>
-
-namespace Pire {
+
+namespace Pire {
namespace ScannerIOTypes {
enum {
NoScanner = 0,
@@ -40,84 +40,84 @@ namespace Pire {
NoGlueLimitCountingScanner = 5,
};
}
-
- struct Header {
- ui32 Magic;
- ui32 Version;
- ui32 PtrSize;
- ui32 MaxWordSize;
- ui32 Type;
- ui32 HdrSize;
-
- static const ui32 MAGIC = 0x45524950; // "PIRE" on litte-endian
+
+ struct Header {
+ ui32 Magic;
+ ui32 Version;
+ ui32 PtrSize;
+ ui32 MaxWordSize;
+ ui32 Type;
+ ui32 HdrSize;
+
+ static const ui32 MAGIC = 0x45524950; // "PIRE" on litte-endian
static const ui32 RE_VERSION = 7; // Should be incremented each time when the format of serialized scanner changes
static const ui32 RE_VERSION_WITH_MACTIONS = 6; // LoadedScanner with m_actions, which is ignored
-
- explicit Header(ui32 type, size_t hdrsize)
- : Magic(MAGIC)
- , Version(RE_VERSION)
- , PtrSize(sizeof(void*))
- , MaxWordSize(sizeof(Impl::MaxSizeWord))
- , Type(type)
+
+ explicit Header(ui32 type, size_t hdrsize)
+ : Magic(MAGIC)
+ , Version(RE_VERSION)
+ , PtrSize(sizeof(void*))
+ , MaxWordSize(sizeof(Impl::MaxSizeWord))
+ , Type(type)
, HdrSize((ui32)hdrsize)
- {}
-
- void Validate(ui32 type, size_t hdrsize) const
- {
- if (Magic != MAGIC || PtrSize != sizeof(void*) || MaxWordSize != sizeof(Impl::MaxSizeWord))
- throw Error("Serialized regexp incompatible with your system");
+ {}
+
+ void Validate(ui32 type, size_t hdrsize) const
+ {
+ if (Magic != MAGIC || PtrSize != sizeof(void*) || MaxWordSize != sizeof(Impl::MaxSizeWord))
+ throw Error("Serialized regexp incompatible with your system");
if (Version != RE_VERSION && Version != RE_VERSION_WITH_MACTIONS)
- throw Error("You are trying to used an incompatible version of a serialized regexp");
+ throw Error("You are trying to used an incompatible version of a serialized regexp");
if (type != ScannerIOTypes::NoScanner && type != Type &&
!(type == ScannerIOTypes::LoadedScanner && Type == ScannerIOTypes::NoGlueLimitCountingScanner)) {
- throw Error("Serialized regexp incompatible with your system");
+ throw Error("Serialized regexp incompatible with your system");
}
if (hdrsize != 0 && HdrSize != hdrsize)
throw Error("Serialized regexp incompatible with your system");
- }
- };
-
- namespace Impl {
- inline const void* AdvancePtr(const size_t*& ptr, size_t& size, size_t delta)
- {
- ptr = (const size_t*) ((const char*) ptr + delta);
- size -= delta;
- return (const void*) ptr;
- }
-
- template<class T>
- inline void MapPtr(T*& field, size_t count, const size_t*& p, size_t& size)
- {
- if (size < count * sizeof(*field))
- throw Error("EOF reached while mapping Pire::SlowScanner");
- field = (T*) p;
- Impl::AdvancePtr(p, size, count * sizeof(*field));
- Impl::AlignPtr(p, size);
- }
-
- inline void CheckAlign(const void* ptr, size_t bound = sizeof(size_t))
- {
- if (!IsAligned(ptr, bound))
- throw Error("Tried to mmap scanner at misaligned address");
- }
-
+ }
+ };
+
+ namespace Impl {
+ inline const void* AdvancePtr(const size_t*& ptr, size_t& size, size_t delta)
+ {
+ ptr = (const size_t*) ((const char*) ptr + delta);
+ size -= delta;
+ return (const void*) ptr;
+ }
+
+ template<class T>
+ inline void MapPtr(T*& field, size_t count, const size_t*& p, size_t& size)
+ {
+ if (size < count * sizeof(*field))
+ throw Error("EOF reached while mapping Pire::SlowScanner");
+ field = (T*) p;
+ Impl::AdvancePtr(p, size, count * sizeof(*field));
+ Impl::AlignPtr(p, size);
+ }
+
+ inline void CheckAlign(const void* ptr, size_t bound = sizeof(size_t))
+ {
+ if (!IsAligned(ptr, bound))
+ throw Error("Tried to mmap scanner at misaligned address");
+ }
+
inline Header ValidateHeader(const size_t*& ptr, size_t& size, ui32 type, size_t hdrsize)
- {
- const Header* hdr;
- MapPtr(hdr, 1, ptr, size);
- hdr->Validate(type, hdrsize);
+ {
+ const Header* hdr;
+ MapPtr(hdr, 1, ptr, size);
+ hdr->Validate(type, hdrsize);
return *hdr;
- }
-
+ }
+
inline Header ValidateHeader(yistream* s, ui32 type, size_t hdrsize)
- {
+ {
Header hdr(ScannerIOTypes::NoScanner, 0);
- LoadPodType(s, hdr);
- AlignLoad(s, sizeof(hdr));
- hdr.Validate(type, hdrsize);
+ LoadPodType(s, hdr);
+ AlignLoad(s, sizeof(hdr));
+ hdr.Validate(type, hdrsize);
return hdr;
- }
- }
-}
-
-#endif
+ }
+ }
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/scanners/loaded.h b/contrib/libs/pire/pire/scanners/loaded.h
index 7d5d6a50d7c..120dc403b75 100644
--- a/contrib/libs/pire/pire/scanners/loaded.h
+++ b/contrib/libs/pire/pire/scanners/loaded.h
@@ -1,108 +1,108 @@
-/*
- * loaded.h -- a definition of the LoadedScanner
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+/*
+ * loaded.h -- a definition of the LoadedScanner
*
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_SCANNERS_LOADED_H
-#define PIRE_SCANNERS_LOADED_H
-
-#include <string.h>
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_SCANNERS_LOADED_H
+#define PIRE_SCANNERS_LOADED_H
+
+#include <string.h>
#include <contrib/libs/pire/pire/approx_matching.h>
#include <contrib/libs/pire/pire/fsm.h>
#include <contrib/libs/pire/pire/partition.h>
-#include "common.h"
-
-#ifdef PIRE_DEBUG
-#include <iostream>
-#endif
-
-namespace Pire {
-
-/**
-* A loaded scanner -- the deterministic scanner having actions
-* associated with states and transitions
-*
-* Not a complete scanner itself (hence abstract), this class provides
-* infrastructure for regexp-based algorithms (e.g. counts or captures),
-* supporting major part of scanner construction, (de)serialization,
-* mmap()-ing, etc.
-*
-* It is a good idea to override copy ctor, operator= and swap()
-* in subclasses to avoid mixing different scanner types in these methods.
-* Also please note that subclasses should not have any data members of thier own.
-*/
-class LoadedScanner {
-public:
- typedef ui8 Letter;
- typedef ui32 Action;
- typedef ui8 Tag;
-
- typedef size_t InternalState;
-
- union Transition {
- size_t raw; // alignment hint for compiler
- struct {
- ui32 shift;
- Action action;
- };
- };
-
- // Override in subclass, if neccessary
+#include "common.h"
+
+#ifdef PIRE_DEBUG
+#include <iostream>
+#endif
+
+namespace Pire {
+
+/**
+* A loaded scanner -- the deterministic scanner having actions
+* associated with states and transitions
+*
+* Not a complete scanner itself (hence abstract), this class provides
+* infrastructure for regexp-based algorithms (e.g. counts or captures),
+* supporting major part of scanner construction, (de)serialization,
+* mmap()-ing, etc.
+*
+* It is a good idea to override copy ctor, operator= and swap()
+* in subclasses to avoid mixing different scanner types in these methods.
+* Also please note that subclasses should not have any data members of thier own.
+*/
+class LoadedScanner {
+public:
+ typedef ui8 Letter;
+ typedef ui32 Action;
+ typedef ui8 Tag;
+
+ typedef size_t InternalState;
+
+ union Transition {
+ size_t raw; // alignment hint for compiler
+ struct {
+ ui32 shift;
+ Action action;
+ };
+ };
+
+ // Override in subclass, if neccessary
enum {
- FinalFlag = 0,
- DeadFlag = 0
- };
-
+ FinalFlag = 0,
+ DeadFlag = 0
+ };
+
static const size_t MAX_RE_COUNT = 16;
protected:
- LoadedScanner() { Alias(Null()); }
+ LoadedScanner() { Alias(Null()); }
- LoadedScanner(const LoadedScanner& s): m(s.m)
- {
- if (s.m_buffer) {
+ LoadedScanner(const LoadedScanner& s): m(s.m)
+ {
+ if (s.m_buffer) {
m_buffer = BufferType(new char [BufSize()]);
memcpy(m_buffer.Get(), s.m_buffer.Get(), BufSize());
Markup(m_buffer.Get());
- m.initial = (InternalState)m_jumps + (s.m.initial - (InternalState)s.m_jumps);
- } else {
- Alias(s);
- }
- }
-
- void Swap(LoadedScanner& s)
- {
- DoSwap(m_buffer, s.m_buffer);
- DoSwap(m.statesCount, s.m.statesCount);
- DoSwap(m.lettersCount, s.m.lettersCount);
- DoSwap(m.regexpsCount, s.m.regexpsCount);
- DoSwap(m.initial, s.m.initial);
- DoSwap(m_letters, s.m_letters);
- DoSwap(m_jumps, s.m_jumps);
- DoSwap(m_tags, s.m_tags);
- }
-
- LoadedScanner& operator = (const LoadedScanner& s) { LoadedScanner(s).Swap(*this); return *this; }
+ m.initial = (InternalState)m_jumps + (s.m.initial - (InternalState)s.m_jumps);
+ } else {
+ Alias(s);
+ }
+ }
+
+ void Swap(LoadedScanner& s)
+ {
+ DoSwap(m_buffer, s.m_buffer);
+ DoSwap(m.statesCount, s.m.statesCount);
+ DoSwap(m.lettersCount, s.m.lettersCount);
+ DoSwap(m.regexpsCount, s.m.regexpsCount);
+ DoSwap(m.initial, s.m.initial);
+ DoSwap(m_letters, s.m_letters);
+ DoSwap(m_jumps, s.m_jumps);
+ DoSwap(m_tags, s.m_tags);
+ }
+
+ LoadedScanner& operator = (const LoadedScanner& s) { LoadedScanner(s).Swap(*this); return *this; }
LoadedScanner (LoadedScanner&& other) : LoadedScanner() {
Swap(other);
}
@@ -110,14 +110,14 @@ protected:
Swap(other);
return *this;
}
-
-public:
- size_t Size() const { return m.statesCount; }
-
- bool Empty() const { return m_jumps == Null().m_jumps; }
-
- size_t RegexpsCount() const { return Empty() ? 0 : m.regexpsCount; }
-
+
+public:
+ size_t Size() const { return m.statesCount; }
+
+ bool Empty() const { return m_jumps == Null().m_jumps; }
+
+ size_t RegexpsCount() const { return Empty() ? 0 : m.regexpsCount; }
+
size_t LettersCount() const { return m.lettersCount; }
const void* Mmap(const void* ptr, size_t size) {
@@ -125,93 +125,93 @@ public:
}
const void* Mmap(const void* ptr, size_t size, ui32* type)
- {
- Impl::CheckAlign(ptr);
- LoadedScanner s;
- const size_t* p = reinterpret_cast<const size_t*>(ptr);
+ {
+ Impl::CheckAlign(ptr);
+ LoadedScanner s;
+ const size_t* p = reinterpret_cast<const size_t*>(ptr);
Header header = Impl::ValidateHeader(p, size, ScannerIOTypes::LoadedScanner, sizeof(s.m));
if (type) {
*type = header.Type;
}
-
- Locals* locals;
- Impl::MapPtr(locals, 1, p, size);
- memcpy(&s.m, locals, sizeof(s.m));
- Impl::MapPtr(s.m_letters, MaxChar, p, size);
- Impl::MapPtr(s.m_jumps, s.m.statesCount * s.m.lettersCount, p, size);
+ Locals* locals;
+ Impl::MapPtr(locals, 1, p, size);
+ memcpy(&s.m, locals, sizeof(s.m));
+
+ Impl::MapPtr(s.m_letters, MaxChar, p, size);
+ Impl::MapPtr(s.m_jumps, s.m.statesCount * s.m.lettersCount, p, size);
if (header.Version == Header::RE_VERSION_WITH_MACTIONS) {
Action* actions = 0;
Impl::MapPtr(actions, s.m.statesCount * s.m.lettersCount, p, size);
}
- Impl::MapPtr(s.m_tags, s.m.statesCount, p, size);
-
- s.m.initial += reinterpret_cast<size_t>(s.m_jumps);
- Swap(s);
-
- return (const void*) p;
- }
-
+ Impl::MapPtr(s.m_tags, s.m.statesCount, p, size);
+
+ s.m.initial += reinterpret_cast<size_t>(s.m_jumps);
+ Swap(s);
+
+ return (const void*) p;
+ }
+
void Save(yostream*, ui32 type) const;
- void Save(yostream*) const;
+ void Save(yostream*) const;
void Load(yistream*, ui32* type);
- void Load(yistream*);
-
- template<class Eq>
- void Init(size_t states, const Partition<Char, Eq>& letters, size_t startState, size_t regexpsCount = 1)
- {
- m.statesCount = states;
- m.lettersCount = letters.Size();
- m.regexpsCount = regexpsCount;
+ void Load(yistream*);
+
+ template<class Eq>
+ void Init(size_t states, const Partition<Char, Eq>& letters, size_t startState, size_t regexpsCount = 1)
+ {
+ m.statesCount = states;
+ m.lettersCount = letters.Size();
+ m.regexpsCount = regexpsCount;
m_buffer = BufferType(new char[BufSize()]);
memset(m_buffer.Get(), 0, BufSize());
Markup(m_buffer.Get());
-
- m.initial = reinterpret_cast<size_t>(m_jumps + startState * m.lettersCount);
-
- // Build letter translation table
+
+ m.initial = reinterpret_cast<size_t>(m_jumps + startState * m.lettersCount);
+
+ // Build letter translation table
Fill(m_letters, m_letters + MaxChar, 0);
for (auto&& letter : letters)
for (auto&& character : letter.second.second)
m_letters[character] = letter.second.first;
- }
-
+ }
+
size_t StateSize() const
{
return m.lettersCount * sizeof(*m_jumps);
}
-
+
size_t TransitionIndex(size_t state, Char c) const
{
return state * m.lettersCount + m_letters[c];
}
- void SetJump(size_t oldState, Char c, size_t newState, Action action)
- {
+ void SetJump(size_t oldState, Char c, size_t newState, Action action)
+ {
Y_ASSERT(m_buffer);
Y_ASSERT(oldState < m.statesCount);
Y_ASSERT(newState < m.statesCount);
-
+
size_t shift = (newState - oldState) * StateSize();
- Transition tr;
+ Transition tr;
tr.shift = (ui32)shift;
- tr.action = action;
+ tr.action = action;
m_jumps[TransitionIndex(oldState, c)] = tr;
- }
-
- Action RemapAction(Action action) { return action; }
-
+ }
+
+ Action RemapAction(Action action) { return action; }
+
void SetInitial(size_t state) { Y_ASSERT(m_buffer); m.initial = reinterpret_cast<size_t>(m_jumps + state * m.lettersCount); }
void SetTag(size_t state, Tag tag) { Y_ASSERT(m_buffer); m_tags[state] = tag; }
- void FinishBuild() {}
-
- size_t StateIdx(InternalState s) const
- {
- return (reinterpret_cast<Transition*>(s) - m_jumps) / m.lettersCount;
- }
-
- i64 SignExtend(i32 i) const { return i; }
-
+ void FinishBuild() {}
+
+ size_t StateIdx(InternalState s) const
+ {
+ return (reinterpret_cast<Transition*>(s) - m_jumps) / m.lettersCount;
+ }
+
+ i64 SignExtend(i32 i) const { return i; }
+
size_t BufSize() const
{
return
@@ -221,74 +221,74 @@ public:
;
}
-protected:
-
+protected:
+
static const Action IncrementMask = (1 << MAX_RE_COUNT) - 1;
static const Action ResetMask = IncrementMask << MAX_RE_COUNT;
-
- // TODO: maybe, put fields in private section and provide data accessors
-
- struct Locals {
- ui32 statesCount;
- ui32 lettersCount;
- ui32 regexpsCount;
- size_t initial;
- } m;
-
+
+ // TODO: maybe, put fields in private section and provide data accessors
+
+ struct Locals {
+ ui32 statesCount;
+ ui32 lettersCount;
+ ui32 regexpsCount;
+ size_t initial;
+ } m;
+
using BufferType = TArrayHolder<char>;
BufferType m_buffer;
-
- Letter* m_letters;
- Transition* m_jumps;
- Tag* m_tags;
-
- virtual ~LoadedScanner();
-
-private:
+
+ Letter* m_letters;
+ Transition* m_jumps;
+ Tag* m_tags;
+
+ virtual ~LoadedScanner();
+
+private:
explicit LoadedScanner(Fsm& fsm, size_t distance = 0)
- {
+ {
if (distance) {
fsm = CreateApproxFsm(fsm, distance);
}
- fsm.Canonize();
- Init(fsm.Size(), fsm.Letters(), fsm.Initial());
- BuildScanner(fsm, *this);
- }
-
- inline static const LoadedScanner& Null()
- {
- static const LoadedScanner n = Fsm::MakeFalse().Compile<LoadedScanner>();
- return n;
- }
-
- void Markup(void* buf)
- {
- m_letters = reinterpret_cast<Letter*>(buf);
- m_jumps = reinterpret_cast<Transition*>(m_letters + MaxChar);
+ fsm.Canonize();
+ Init(fsm.Size(), fsm.Letters(), fsm.Initial());
+ BuildScanner(fsm, *this);
+ }
+
+ inline static const LoadedScanner& Null()
+ {
+ static const LoadedScanner n = Fsm::MakeFalse().Compile<LoadedScanner>();
+ return n;
+ }
+
+ void Markup(void* buf)
+ {
+ m_letters = reinterpret_cast<Letter*>(buf);
+ m_jumps = reinterpret_cast<Transition*>(m_letters + MaxChar);
m_tags = reinterpret_cast<Tag*>(m_jumps + m.statesCount * m.lettersCount);
- }
-
- void Alias(const LoadedScanner& s)
- {
- memcpy(&m, &s.m, sizeof(m));
- m_buffer = 0;
- m_letters = s.m_letters;
- m_jumps = s.m_jumps;
- m_tags = s.m_tags;
- }
-
- template<class Eq>
- LoadedScanner(size_t states, const Partition<Char, Eq>& letters, size_t startState, size_t regexpsCount = 1)
- {
- Init(states, letters, startState, regexpsCount);
- }
-
+ }
+
+ void Alias(const LoadedScanner& s)
+ {
+ memcpy(&m, &s.m, sizeof(m));
+ m_buffer = 0;
+ m_letters = s.m_letters;
+ m_jumps = s.m_jumps;
+ m_tags = s.m_tags;
+ }
+
+ template<class Eq>
+ LoadedScanner(size_t states, const Partition<Char, Eq>& letters, size_t startState, size_t regexpsCount = 1)
+ {
+ Init(states, letters, startState, regexpsCount);
+ }
+
friend class Fsm;
-};
+};
inline LoadedScanner::~LoadedScanner() = default;
-
-}
-
-
-#endif
+
+}
+
+
+#endif
diff --git a/contrib/libs/pire/pire/scanners/multi.h b/contrib/libs/pire/pire/scanners/multi.h
index 8b6c537836c..29679e416ed 100644
--- a/contrib/libs/pire/pire/scanners/multi.h
+++ b/contrib/libs/pire/pire/scanners/multi.h
@@ -1,31 +1,31 @@
-/*
- * multi.h -- definition of the Scanner
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_SCANNERS_MULTI_H
-#define PIRE_SCANNERS_MULTI_H
-
+/*
+ * multi.h -- definition of the Scanner
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_SCANNERS_MULTI_H
+#define PIRE_SCANNERS_MULTI_H
+
#include <cstring>
-#include <string.h>
+#include <string.h>
#include <contrib/libs/pire/pire/approx_matching.h>
#include <contrib/libs/pire/pire/fsm.h>
#include <contrib/libs/pire/pire/partition.h>
@@ -38,1094 +38,1094 @@
#include <contrib/libs/pire/pire/stub/saveload.h>
#include <contrib/libs/pire/pire/stub/lexical_cast.h>
-#include "common.h"
-
-namespace Pire {
-
-namespace Impl {
-
- inline static ssize_t SignExtend(i32 i) { return i; }
- template<class T>
- class ScannerGlueCommon;
-
- template<class T>
- class ScannerGlueTask;
-
- // This strategy allows to mmap() saved representation of a scanner. This is achieved by
- // storing shifts instead of addresses in the transition table.
- struct Relocatable {
- static const size_t Signature = 1;
- // Please note that Transition size is hardcoded as 32 bits.
- // This limits size of transition table to 4G, but compresses
- // it twice compared to 64-bit transitions. In future Transition
- // can be made a template parameter if this is a concern.
- typedef ui32 Transition;
-
- typedef const void* RetvalForMmap;
-
- static size_t Go(size_t state, Transition shift) { return state + SignExtend(shift); }
- static Transition Diff(size_t from, size_t to) { return static_cast<Transition>(to - from); }
- };
-
- // With this strategy the transition table stores addresses. This makes the scanner faster
- // compared to mmap()-ed
- struct Nonrelocatable {
- static const size_t Signature = 2;
- typedef size_t Transition;
-
- // Generates a compile-time error if Scanner<Nonrelocatable>::Mmap()
- // (which is unsupported) is mistakenly called
- typedef struct {} RetvalForMmap;
-
- static size_t Go(size_t /*state*/, Transition shift) { return shift; }
- static Transition Diff(size_t /*from*/, size_t to) { return to; }
- };
-
-
+#include "common.h"
+
+namespace Pire {
+
+namespace Impl {
+
+ inline static ssize_t SignExtend(i32 i) { return i; }
+ template<class T>
+ class ScannerGlueCommon;
+
+ template<class T>
+ class ScannerGlueTask;
+
+ // This strategy allows to mmap() saved representation of a scanner. This is achieved by
+ // storing shifts instead of addresses in the transition table.
+ struct Relocatable {
+ static const size_t Signature = 1;
+ // Please note that Transition size is hardcoded as 32 bits.
+ // This limits size of transition table to 4G, but compresses
+ // it twice compared to 64-bit transitions. In future Transition
+ // can be made a template parameter if this is a concern.
+ typedef ui32 Transition;
+
+ typedef const void* RetvalForMmap;
+
+ static size_t Go(size_t state, Transition shift) { return state + SignExtend(shift); }
+ static Transition Diff(size_t from, size_t to) { return static_cast<Transition>(to - from); }
+ };
+
+ // With this strategy the transition table stores addresses. This makes the scanner faster
+ // compared to mmap()-ed
+ struct Nonrelocatable {
+ static const size_t Signature = 2;
+ typedef size_t Transition;
+
+ // Generates a compile-time error if Scanner<Nonrelocatable>::Mmap()
+ // (which is unsupported) is mistakenly called
+ typedef struct {} RetvalForMmap;
+
+ static size_t Go(size_t /*state*/, Transition shift) { return shift; }
+ static Transition Diff(size_t /*from*/, size_t to) { return to; }
+ };
+
+
// Scanner implementation parametrized by
-// - transition table representation strategy
-// - strategy for fast forwarding through memory ranges
-template<class Relocation, class Shortcutting>
-class Scanner {
-protected:
- enum {
- FinalFlag = 1,
- DeadFlag = 2,
- Flags = FinalFlag | DeadFlag
- };
-
- static const size_t End = static_cast<size_t>(-1);
-
-public:
- typedef typename Relocation::Transition Transition;
-
- typedef ui16 Letter;
- typedef ui32 Action;
- typedef ui8 Tag;
-
- /// Some properties of the particular state.
- struct CommonRowHeader {
- size_t Flags; ///< Holds FinalFlag, DeadFlag, etc...
-
- CommonRowHeader(): Flags(0) {}
-
- template <class OtherCommonRowHeader>
- CommonRowHeader& operator =(const OtherCommonRowHeader& other)
- {
- Flags = other.Flags;
- return *this;
- }
- };
-
- typedef typename Shortcutting::template ExtendedRowHeader<Scanner> ScannerRowHeader;
-
- Scanner() { Alias(Null()); }
+// - transition table representation strategy
+// - strategy for fast forwarding through memory ranges
+template<class Relocation, class Shortcutting>
+class Scanner {
+protected:
+ enum {
+ FinalFlag = 1,
+ DeadFlag = 2,
+ Flags = FinalFlag | DeadFlag
+ };
+
+ static const size_t End = static_cast<size_t>(-1);
+
+public:
+ typedef typename Relocation::Transition Transition;
+
+ typedef ui16 Letter;
+ typedef ui32 Action;
+ typedef ui8 Tag;
+
+ /// Some properties of the particular state.
+ struct CommonRowHeader {
+ size_t Flags; ///< Holds FinalFlag, DeadFlag, etc...
+
+ CommonRowHeader(): Flags(0) {}
+
+ template <class OtherCommonRowHeader>
+ CommonRowHeader& operator =(const OtherCommonRowHeader& other)
+ {
+ Flags = other.Flags;
+ return *this;
+ }
+ };
+
+ typedef typename Shortcutting::template ExtendedRowHeader<Scanner> ScannerRowHeader;
+
+ Scanner() { Alias(Null()); }
explicit Scanner(Fsm& fsm, size_t distance = 0)
- {
+ {
if (distance) {
fsm = CreateApproxFsm(fsm, distance);
}
- fsm.Canonize();
- Init(fsm.Size(), fsm.Letters(), fsm.Finals().size(), fsm.Initial(), 1);
- BuildScanner(fsm, *this);
- }
-
-
- size_t Size() const { return m.statesCount; }
- bool Empty() const { return m_transitions == Null().m_transitions; }
-
- typedef size_t State;
-
- size_t RegexpsCount() const { return Empty() ? 0 : m.regexpsCount; }
- size_t LettersCount() const { return m.lettersCount; }
-
- /// Checks whether specified state is in any of the final sets
- bool Final(const State& state) const { return (Header(state).Common.Flags & FinalFlag) != 0; }
-
- /// Checks whether specified state is 'dead' (i.e. scanner will never
- /// reach any final state from current one)
- bool Dead(const State& state) const { return (Header(state).Common.Flags & DeadFlag) != 0; }
-
- ypair<const size_t*, const size_t*> AcceptedRegexps(const State& state) const
- {
- size_t idx = (state - reinterpret_cast<size_t>(m_transitions)) /
- (RowSize() * sizeof(Transition));
- const size_t* b = m_final + m_finalIndex[idx];
- const size_t* e = b;
- while (*e != End)
- ++e;
- return ymake_pair(b, e);
- }
-
- /// Returns an initial state for this scanner
- void Initialize(State& state) const { state = m.initial; }
-
+ fsm.Canonize();
+ Init(fsm.Size(), fsm.Letters(), fsm.Finals().size(), fsm.Initial(), 1);
+ BuildScanner(fsm, *this);
+ }
+
+
+ size_t Size() const { return m.statesCount; }
+ bool Empty() const { return m_transitions == Null().m_transitions; }
+
+ typedef size_t State;
+
+ size_t RegexpsCount() const { return Empty() ? 0 : m.regexpsCount; }
+ size_t LettersCount() const { return m.lettersCount; }
+
+ /// Checks whether specified state is in any of the final sets
+ bool Final(const State& state) const { return (Header(state).Common.Flags & FinalFlag) != 0; }
+
+ /// Checks whether specified state is 'dead' (i.e. scanner will never
+ /// reach any final state from current one)
+ bool Dead(const State& state) const { return (Header(state).Common.Flags & DeadFlag) != 0; }
+
+ ypair<const size_t*, const size_t*> AcceptedRegexps(const State& state) const
+ {
+ size_t idx = (state - reinterpret_cast<size_t>(m_transitions)) /
+ (RowSize() * sizeof(Transition));
+ const size_t* b = m_final + m_finalIndex[idx];
+ const size_t* e = b;
+ while (*e != End)
+ ++e;
+ return ymake_pair(b, e);
+ }
+
+ /// Returns an initial state for this scanner
+ void Initialize(State& state) const { state = m.initial; }
+
Char Translate(Char ch) const
- {
+ {
return m_letters[static_cast<size_t>(ch)];
}
/// Handles one letter
Action NextTranslated(State& state, Char letter) const
{
- PIRE_IFDEBUG(
+ PIRE_IFDEBUG(
Y_ASSERT(state >= (size_t)m_transitions);
Y_ASSERT(state < (size_t)(m_transitions + RowSize()*Size()));
Y_ASSERT((state - (size_t)m_transitions) % (RowSize()*sizeof(Transition)) == 0);
- );
-
+ );
+
state = Relocation::Go(state, reinterpret_cast<const Transition*>(state)[letter]);
-
- PIRE_IFDEBUG(
+
+ PIRE_IFDEBUG(
Y_ASSERT(state >= (size_t)m_transitions);
Y_ASSERT(state < (size_t)(m_transitions + RowSize()*Size()));
Y_ASSERT((state - (size_t)m_transitions) % (RowSize()*sizeof(Transition)) == 0);
- );
-
- return 0;
- }
-
+ );
+
+ return 0;
+ }
+
/// Handles one character
Action Next(State& state, Char c) const
{
return NextTranslated(state, Translate(c));
}
- void TakeAction(State&, Action) const {}
-
+ void TakeAction(State&, Action) const {}
+
Scanner(const Scanner& s): m(s.m)
- {
- if (!s.m_buffer) {
- // Empty or mmap()-ed scanner
- Alias(s);
- } else {
- // In-memory scanner
- DeepCopy(s);
- }
- }
-
+ {
+ if (!s.m_buffer) {
+ // Empty or mmap()-ed scanner
+ Alias(s);
+ } else {
+ // In-memory scanner
+ DeepCopy(s);
+ }
+ }
+
Scanner(Scanner&& s)
{
Alias(Null());
Swap(s);
}
- template<class AnotherRelocation>
+ template<class AnotherRelocation>
Scanner(const Scanner<AnotherRelocation, Shortcutting>& s)
- {
- if (s.Empty())
- Alias(Null());
- else
- DeepCopy(s);
- }
-
- void Swap(Scanner& s)
- {
+ {
+ if (s.Empty())
+ Alias(Null());
+ else
+ DeepCopy(s);
+ }
+
+ void Swap(Scanner& s)
+ {
Y_ASSERT(m.relocationSignature == s.m.relocationSignature);
Y_ASSERT(m.shortcuttingSignature == s.m.shortcuttingSignature);
- DoSwap(m_buffer, s.m_buffer);
- DoSwap(m.statesCount, s.m.statesCount);
- DoSwap(m.lettersCount, s.m.lettersCount);
- DoSwap(m.regexpsCount, s.m.regexpsCount);
- DoSwap(m.initial, s.m.initial);
- DoSwap(m_letters, s.m_letters);
- DoSwap(m.finalTableSize, s.m.finalTableSize);
- DoSwap(m_final, s.m_final);
- DoSwap(m_finalIndex, s.m_finalIndex);
- DoSwap(m_transitions, s.m_transitions);
- }
-
- Scanner& operator = (const Scanner& s) { Scanner(s).Swap(*this); return *this; }
-
- /*
- * Constructs the scanner from mmap()-ed memory range, returning a pointer
- * to unconsumed part of the buffer.
- */
- typename Relocation::RetvalForMmap Mmap(const void* ptr, size_t size)
- {
- Impl::CheckAlign(ptr, sizeof(size_t));
- Scanner s;
-
- const size_t* p = reinterpret_cast<const size_t*>(ptr);
+ DoSwap(m_buffer, s.m_buffer);
+ DoSwap(m.statesCount, s.m.statesCount);
+ DoSwap(m.lettersCount, s.m.lettersCount);
+ DoSwap(m.regexpsCount, s.m.regexpsCount);
+ DoSwap(m.initial, s.m.initial);
+ DoSwap(m_letters, s.m_letters);
+ DoSwap(m.finalTableSize, s.m.finalTableSize);
+ DoSwap(m_final, s.m_final);
+ DoSwap(m_finalIndex, s.m_finalIndex);
+ DoSwap(m_transitions, s.m_transitions);
+ }
+
+ Scanner& operator = (const Scanner& s) { Scanner(s).Swap(*this); return *this; }
+
+ /*
+ * Constructs the scanner from mmap()-ed memory range, returning a pointer
+ * to unconsumed part of the buffer.
+ */
+ typename Relocation::RetvalForMmap Mmap(const void* ptr, size_t size)
+ {
+ Impl::CheckAlign(ptr, sizeof(size_t));
+ Scanner s;
+
+ const size_t* p = reinterpret_cast<const size_t*>(ptr);
Impl::ValidateHeader(p, size, ScannerIOTypes::Scanner, sizeof(m));
- if (size < sizeof(s.m))
- throw Error("EOF reached while mapping Pire::Scanner");
-
- memcpy(&s.m, p, sizeof(s.m));
- if (s.m.relocationSignature != Relocation::Signature)
- throw Error("Type mismatch while mmapping Pire::Scanner");
- Impl::AdvancePtr(p, size, sizeof(s.m));
- Impl::AlignPtr(p, size);
-
- if (Shortcutting::Signature != s.m.shortcuttingSignature)
- throw Error("This scanner has different shortcutting type");
-
- bool empty = *((const bool*) p);
- Impl::AdvancePtr(p, size, sizeof(empty));
- Impl::AlignPtr(p, size);
-
- if (empty)
- s.Alias(Null());
- else {
- if (size < s.BufSize())
- throw Error("EOF reached while mapping NPire::Scanner");
- s.Markup(const_cast<size_t*>(p));
- Impl::AdvancePtr(p, size, s.BufSize());
- s.m.initial += reinterpret_cast<size_t>(s.m_transitions);
- }
-
- Swap(s);
- return Impl::AlignPtr(p, size);
- }
-
- size_t StateIndex(State s) const
- {
- return (s - reinterpret_cast<size_t>(m_transitions)) / (RowSize() * sizeof(Transition));
- }
-
- /**
- * Agglutinates two scanners together, producing a larger scanner.
- * Checkig a string against that scanner effectively checks them against both agglutinated regexps
- * (detailed information about matched regexps can be obtained with AcceptedRegexps()).
- *
- * Returns default-constructed scanner in case of failure
- * (consult Scanner::Empty() to find out whether the operation was successful).
- */
- static Scanner Glue(const Scanner& a, const Scanner& b, size_t maxSize = 0);
-
- // Returns the size of the memory buffer used (or required) by scanner.
- size_t BufSize() const
- {
- return AlignUp(
- MaxChar * sizeof(Letter) // Letters translation table
- + m.finalTableSize * sizeof(size_t) // Final table
- + m.statesCount * sizeof(size_t) // Final index
- + RowSize() * m.statesCount * sizeof(Transition), // Transitions table
- sizeof(size_t));
- }
-
- void Save(yostream*) const;
- void Load(yistream*);
-
- ScannerRowHeader& Header(State s) { return *(ScannerRowHeader*) s; }
- const ScannerRowHeader& Header(State s) const { return *(const ScannerRowHeader*) s; }
-
+ if (size < sizeof(s.m))
+ throw Error("EOF reached while mapping Pire::Scanner");
+
+ memcpy(&s.m, p, sizeof(s.m));
+ if (s.m.relocationSignature != Relocation::Signature)
+ throw Error("Type mismatch while mmapping Pire::Scanner");
+ Impl::AdvancePtr(p, size, sizeof(s.m));
+ Impl::AlignPtr(p, size);
+
+ if (Shortcutting::Signature != s.m.shortcuttingSignature)
+ throw Error("This scanner has different shortcutting type");
+
+ bool empty = *((const bool*) p);
+ Impl::AdvancePtr(p, size, sizeof(empty));
+ Impl::AlignPtr(p, size);
+
+ if (empty)
+ s.Alias(Null());
+ else {
+ if (size < s.BufSize())
+ throw Error("EOF reached while mapping NPire::Scanner");
+ s.Markup(const_cast<size_t*>(p));
+ Impl::AdvancePtr(p, size, s.BufSize());
+ s.m.initial += reinterpret_cast<size_t>(s.m_transitions);
+ }
+
+ Swap(s);
+ return Impl::AlignPtr(p, size);
+ }
+
+ size_t StateIndex(State s) const
+ {
+ return (s - reinterpret_cast<size_t>(m_transitions)) / (RowSize() * sizeof(Transition));
+ }
+
+ /**
+ * Agglutinates two scanners together, producing a larger scanner.
+ * Checkig a string against that scanner effectively checks them against both agglutinated regexps
+ * (detailed information about matched regexps can be obtained with AcceptedRegexps()).
+ *
+ * Returns default-constructed scanner in case of failure
+ * (consult Scanner::Empty() to find out whether the operation was successful).
+ */
+ static Scanner Glue(const Scanner& a, const Scanner& b, size_t maxSize = 0);
+
+ // Returns the size of the memory buffer used (or required) by scanner.
+ size_t BufSize() const
+ {
+ return AlignUp(
+ MaxChar * sizeof(Letter) // Letters translation table
+ + m.finalTableSize * sizeof(size_t) // Final table
+ + m.statesCount * sizeof(size_t) // Final index
+ + RowSize() * m.statesCount * sizeof(Transition), // Transitions table
+ sizeof(size_t));
+ }
+
+ void Save(yostream*) const;
+ void Load(yistream*);
+
+ ScannerRowHeader& Header(State s) { return *(ScannerRowHeader*) s; }
+ const ScannerRowHeader& Header(State s) const { return *(const ScannerRowHeader*) s; }
+
protected:
-
- struct Locals {
- ui32 statesCount;
- ui32 lettersCount;
- ui32 regexpsCount;
- size_t initial;
- ui32 finalTableSize;
- size_t relocationSignature;
- size_t shortcuttingSignature;
- } m;
-
+
+ struct Locals {
+ ui32 statesCount;
+ ui32 lettersCount;
+ ui32 regexpsCount;
+ size_t initial;
+ ui32 finalTableSize;
+ size_t relocationSignature;
+ size_t shortcuttingSignature;
+ } m;
+
using BufferType = TArrayHolder<char>;
BufferType m_buffer;
- Letter* m_letters;
-
- size_t* m_final;
- size_t* m_finalIndex;
-
- Transition* m_transitions;
-
- inline static const Scanner& Null()
- {
- static const Scanner n = Fsm::MakeFalse().Compile< Scanner<Relocation, Shortcutting> >();
+ Letter* m_letters;
+
+ size_t* m_final;
+ size_t* m_finalIndex;
+
+ Transition* m_transitions;
+
+ inline static const Scanner& Null()
+ {
+ static const Scanner n = Fsm::MakeFalse().Compile< Scanner<Relocation, Shortcutting> >();
return n;
- }
-
- // Returns transition row size in Transition's. Row size_in bytes should be a multiple of sizeof(MaxSizeWord)
- size_t RowSize() const { return AlignUp(m.lettersCount + HEADER_SIZE, sizeof(MaxSizeWord)/sizeof(Transition)); }
-
- static const size_t HEADER_SIZE = sizeof(ScannerRowHeader) / sizeof(Transition);
- PIRE_STATIC_ASSERT(sizeof(ScannerRowHeader) % sizeof(Transition) == 0);
-
- template<class Eq>
- void Init(size_t states, const Partition<Char, Eq>& letters, size_t finalStatesCount, size_t startState, size_t regexpsCount = 1)
- {
+ }
+
+ // Returns transition row size in Transition's. Row size_in bytes should be a multiple of sizeof(MaxSizeWord)
+ size_t RowSize() const { return AlignUp(m.lettersCount + HEADER_SIZE, sizeof(MaxSizeWord)/sizeof(Transition)); }
+
+ static const size_t HEADER_SIZE = sizeof(ScannerRowHeader) / sizeof(Transition);
+ PIRE_STATIC_ASSERT(sizeof(ScannerRowHeader) % sizeof(Transition) == 0);
+
+ template<class Eq>
+ void Init(size_t states, const Partition<Char, Eq>& letters, size_t finalStatesCount, size_t startState, size_t regexpsCount = 1)
+ {
std::memset(&m, 0, sizeof(m));
- m.relocationSignature = Relocation::Signature;
- m.shortcuttingSignature = Shortcutting::Signature;
- m.statesCount = states;
- m.lettersCount = letters.Size();
- m.regexpsCount = regexpsCount;
- m.finalTableSize = finalStatesCount + states;
-
+ m.relocationSignature = Relocation::Signature;
+ m.shortcuttingSignature = Shortcutting::Signature;
+ m.statesCount = states;
+ m.lettersCount = letters.Size();
+ m.regexpsCount = regexpsCount;
+ m.finalTableSize = finalStatesCount + states;
+
m_buffer = BufferType(new char[BufSize() + sizeof(size_t)]);
memset(m_buffer.Get(), 0, BufSize() + sizeof(size_t));
Markup(AlignUp(m_buffer.Get(), sizeof(size_t)));
-
- for (size_t i = 0; i != Size(); ++i)
- Header(IndexToState(i)) = ScannerRowHeader();
-
- m.initial = reinterpret_cast<size_t>(m_transitions + startState * RowSize());
-
- // Build letter translation table
+
+ for (size_t i = 0; i != Size(); ++i)
+ Header(IndexToState(i)) = ScannerRowHeader();
+
+ m.initial = reinterpret_cast<size_t>(m_transitions + startState * RowSize());
+
+ // Build letter translation table
for (auto&& letter : letters)
for (auto&& character : letter.second.second)
m_letters[character] = letter.second.first + HEADER_SIZE;
- }
-
- /*
- * Initializes pointers depending on buffer start, letters and states count
- */
- void Markup(void* ptr)
- {
- Impl::CheckAlign(ptr, sizeof(size_t));
- m_letters = reinterpret_cast<Letter*>(ptr);
- m_final = reinterpret_cast<size_t*>(m_letters + MaxChar);
- m_finalIndex = reinterpret_cast<size_t*>(m_final + m.finalTableSize);
- m_transitions = reinterpret_cast<Transition*>(m_finalIndex + m.statesCount);
- }
-
- // Makes a shallow ("weak") copy of the given scanner.
- // The copied scanner does not maintain lifetime of the original's entrails.
- void Alias(const Scanner<Relocation, Shortcutting>& s)
- {
- memcpy(&m, &s.m, sizeof(m));
+ }
+
+ /*
+ * Initializes pointers depending on buffer start, letters and states count
+ */
+ void Markup(void* ptr)
+ {
+ Impl::CheckAlign(ptr, sizeof(size_t));
+ m_letters = reinterpret_cast<Letter*>(ptr);
+ m_final = reinterpret_cast<size_t*>(m_letters + MaxChar);
+ m_finalIndex = reinterpret_cast<size_t*>(m_final + m.finalTableSize);
+ m_transitions = reinterpret_cast<Transition*>(m_finalIndex + m.statesCount);
+ }
+
+ // Makes a shallow ("weak") copy of the given scanner.
+ // The copied scanner does not maintain lifetime of the original's entrails.
+ void Alias(const Scanner<Relocation, Shortcutting>& s)
+ {
+ memcpy(&m, &s.m, sizeof(m));
m_buffer.Reset();
- m_letters = s.m_letters;
- m_final = s.m_final;
- m_finalIndex = s.m_finalIndex;
- m_transitions = s.m_transitions;
- }
-
- template<class AnotherRelocation>
- void DeepCopy(const Scanner<AnotherRelocation, Shortcutting>& s)
- {
- // Don't want memory leaks, but we cannot free the buffer because there might be aliased instances
+ m_letters = s.m_letters;
+ m_final = s.m_final;
+ m_finalIndex = s.m_finalIndex;
+ m_transitions = s.m_transitions;
+ }
+
+ template<class AnotherRelocation>
+ void DeepCopy(const Scanner<AnotherRelocation, Shortcutting>& s)
+ {
+ // Don't want memory leaks, but we cannot free the buffer because there might be aliased instances
Y_ASSERT(m_buffer == nullptr);
-
- // Ensure that specializations of Scanner across different Relocations do not touch its Locals
+
+ // Ensure that specializations of Scanner across different Relocations do not touch its Locals
static_assert(sizeof(m) == sizeof(s.m), "sizeof(m) == sizeof(s.m)");
- memcpy(&m, &s.m, sizeof(s.m));
- m.relocationSignature = Relocation::Signature;
- m.shortcuttingSignature = Shortcutting::Signature;
+ memcpy(&m, &s.m, sizeof(s.m));
+ m.relocationSignature = Relocation::Signature;
+ m.shortcuttingSignature = Shortcutting::Signature;
m_buffer = BufferType(new char[BufSize() + sizeof(size_t)]);
std::memset(m_buffer.Get(), 0, BufSize() + sizeof(size_t));
Markup(AlignUp(m_buffer.Get(), sizeof(size_t)));
-
- // Values in letter-to-leterclass table take into account row header size
- for (size_t c = 0; c < MaxChar; ++c) {
- m_letters[c] = s.m_letters[c] - s.HEADER_SIZE + HEADER_SIZE;
+
+ // Values in letter-to-leterclass table take into account row header size
+ for (size_t c = 0; c < MaxChar; ++c) {
+ m_letters[c] = s.m_letters[c] - s.HEADER_SIZE + HEADER_SIZE;
Y_ASSERT(c == Epsilon || m_letters[c] >= HEADER_SIZE);
Y_ASSERT(c == Epsilon || m_letters[c] < RowSize());
- }
- memcpy(m_final, s.m_final, m.finalTableSize * sizeof(*m_final));
- memcpy(m_finalIndex, s.m_finalIndex, m.statesCount * sizeof(*m_finalIndex));
-
- m.initial = IndexToState(s.StateIndex(s.m.initial));
-
- for (size_t st = 0; st != m.statesCount; ++st) {
- size_t oldstate = s.IndexToState(st);
- size_t newstate = IndexToState(st);
- Header(newstate) = s.Header(oldstate);
- const typename Scanner<AnotherRelocation, Shortcutting>::Transition* os
- = reinterpret_cast<const typename Scanner<AnotherRelocation, Shortcutting>::Transition*>(oldstate);
- Transition* ns = reinterpret_cast<Transition*>(newstate);
-
- for (size_t let = 0; let != LettersCount(); ++let) {
- size_t destIndex = s.StateIndex(AnotherRelocation::Go(oldstate, os[let + s.HEADER_SIZE]));
- Transition tr = Relocation::Diff(newstate, IndexToState(destIndex));
- ns[let + HEADER_SIZE] = tr;
+ }
+ memcpy(m_final, s.m_final, m.finalTableSize * sizeof(*m_final));
+ memcpy(m_finalIndex, s.m_finalIndex, m.statesCount * sizeof(*m_finalIndex));
+
+ m.initial = IndexToState(s.StateIndex(s.m.initial));
+
+ for (size_t st = 0; st != m.statesCount; ++st) {
+ size_t oldstate = s.IndexToState(st);
+ size_t newstate = IndexToState(st);
+ Header(newstate) = s.Header(oldstate);
+ const typename Scanner<AnotherRelocation, Shortcutting>::Transition* os
+ = reinterpret_cast<const typename Scanner<AnotherRelocation, Shortcutting>::Transition*>(oldstate);
+ Transition* ns = reinterpret_cast<Transition*>(newstate);
+
+ for (size_t let = 0; let != LettersCount(); ++let) {
+ size_t destIndex = s.StateIndex(AnotherRelocation::Go(oldstate, os[let + s.HEADER_SIZE]));
+ Transition tr = Relocation::Diff(newstate, IndexToState(destIndex));
+ ns[let + HEADER_SIZE] = tr;
Y_ASSERT(Relocation::Go(newstate, tr) >= (size_t)m_transitions);
Y_ASSERT(Relocation::Go(newstate, tr) < (size_t)(m_transitions + RowSize()*Size()));
- }
- }
- }
-
-
- size_t IndexToState(size_t stateIndex) const
- {
- return reinterpret_cast<size_t>(m_transitions + stateIndex * RowSize());
- }
-
- void SetJump(size_t oldState, Char c, size_t newState, unsigned long /*payload*/ = 0)
- {
+ }
+ }
+ }
+
+
+ size_t IndexToState(size_t stateIndex) const
+ {
+ return reinterpret_cast<size_t>(m_transitions + stateIndex * RowSize());
+ }
+
+ void SetJump(size_t oldState, Char c, size_t newState, unsigned long /*payload*/ = 0)
+ {
Y_ASSERT(m_buffer);
Y_ASSERT(oldState < m.statesCount);
Y_ASSERT(newState < m.statesCount);
-
- m_transitions[oldState * RowSize() + m_letters[c]]
- = Relocation::Diff(IndexToState(oldState), IndexToState(newState));
- }
-
- unsigned long RemapAction(unsigned long action) { return action; }
-
- void SetInitial(size_t state)
- {
+
+ m_transitions[oldState * RowSize() + m_letters[c]]
+ = Relocation::Diff(IndexToState(oldState), IndexToState(newState));
+ }
+
+ unsigned long RemapAction(unsigned long action) { return action; }
+
+ void SetInitial(size_t state)
+ {
Y_ASSERT(m_buffer);
- m.initial = IndexToState(state);
- }
-
- void SetTag(size_t state, size_t value)
- {
+ m.initial = IndexToState(state);
+ }
+
+ void SetTag(size_t state, size_t value)
+ {
Y_ASSERT(m_buffer);
- Header(IndexToState(state)).Common.Flags = value;
- }
-
- // Fill shortcut masks for all the states
- void BuildShortcuts()
- {
+ Header(IndexToState(state)).Common.Flags = value;
+ }
+
+ // Fill shortcut masks for all the states
+ void BuildShortcuts()
+ {
Y_ASSERT(m_buffer);
-
- // Build the mapping from letter classes to characters
+
+ // Build the mapping from letter classes to characters
TVector< TVector<char> > letters(RowSize());
- for (unsigned ch = 0; ch != 1 << (sizeof(char)*8); ++ch)
- letters[m_letters[ch]].push_back(ch);
-
- // Loop through all states in the transition table and
- // check if it is possible to setup shortcuts
- for (size_t i = 0; i != Size(); ++i) {
- State st = IndexToState(i);
- ScannerRowHeader& header = Header(st);
- Shortcutting::SetNoExit(header);
- size_t ind = 0;
- size_t let = HEADER_SIZE;
- for (; let != LettersCount() + HEADER_SIZE; ++let) {
- // Check if the transition is not the same state
- if (Relocation::Go(st, reinterpret_cast<const Transition*>(st)[let]) != st) {
- if (ind + letters[let].size() > Shortcutting::ExitMaskCount)
- break;
- // For each character setup a mask
+ for (unsigned ch = 0; ch != 1 << (sizeof(char)*8); ++ch)
+ letters[m_letters[ch]].push_back(ch);
+
+ // Loop through all states in the transition table and
+ // check if it is possible to setup shortcuts
+ for (size_t i = 0; i != Size(); ++i) {
+ State st = IndexToState(i);
+ ScannerRowHeader& header = Header(st);
+ Shortcutting::SetNoExit(header);
+ size_t ind = 0;
+ size_t let = HEADER_SIZE;
+ for (; let != LettersCount() + HEADER_SIZE; ++let) {
+ // Check if the transition is not the same state
+ if (Relocation::Go(st, reinterpret_cast<const Transition*>(st)[let]) != st) {
+ if (ind + letters[let].size() > Shortcutting::ExitMaskCount)
+ break;
+ // For each character setup a mask
for (auto&& character : letters[let]) {
Shortcutting::SetMask(header, ind, character);
- ++ind;
- }
- }
- }
-
- if (let != LettersCount() + HEADER_SIZE) {
- // Not enough space in ExitMasks, so reset all masks (which leads to bypassing the optimization)
- Shortcutting::SetNoShortcut(header);
- }
- // Fill the rest of the shortcut masks with the last used mask
- Shortcutting::FinishMasks(header, ind);
- }
- }
-
- // Fills final states table and builds shortcuts if possible
- void FinishBuild()
- {
+ ++ind;
+ }
+ }
+ }
+
+ if (let != LettersCount() + HEADER_SIZE) {
+ // Not enough space in ExitMasks, so reset all masks (which leads to bypassing the optimization)
+ Shortcutting::SetNoShortcut(header);
+ }
+ // Fill the rest of the shortcut masks with the last used mask
+ Shortcutting::FinishMasks(header, ind);
+ }
+ }
+
+ // Fills final states table and builds shortcuts if possible
+ void FinishBuild()
+ {
Y_ASSERT(m_buffer);
auto finalWriter = m_final;
- for (size_t state = 0; state != Size(); ++state) {
+ for (size_t state = 0; state != Size(); ++state) {
m_finalIndex[state] = finalWriter - m_final;
- if (Header(IndexToState(state)).Common.Flags & FinalFlag)
+ if (Header(IndexToState(state)).Common.Flags & FinalFlag)
*finalWriter++ = 0;
*finalWriter++ = static_cast<size_t>(-1);
- }
- BuildShortcuts();
- }
-
- size_t AcceptedRegexpsCount(size_t idx) const
- {
- const size_t* b = m_final + m_finalIndex[idx];
- const size_t* e = b;
- while (*e != End)
- ++e;
- return e - b;
- }
-
- template <class Scanner>
- friend void Pire::BuildScanner(const Fsm&, Scanner&);
-
- typedef State InternalState; // Needed for agglutination
- friend class ScannerGlueCommon<Scanner>;
- friend class ScannerGlueTask<Scanner>;
-
- template<class AnotherRelocation, class AnotherShortcutting>
- friend class Scanner;
-
- friend struct ScannerSaver;
-
-#ifndef PIRE_DEBUG
- friend struct AlignedRunner< Scanner<Relocation, Shortcutting> >;
-#endif
-};
-
-// Helper class for Save/Load partial specialization
-struct ScannerSaver {
- template<class Shortcutting>
- static void SaveScanner(const Scanner<Relocatable, Shortcutting>& scanner, yostream* s)
- {
- typedef Scanner<Relocatable, Shortcutting> ScannerType;
-
- typename ScannerType::Locals mc = scanner.m;
- mc.initial -= reinterpret_cast<size_t>(scanner.m_transitions);
+ }
+ BuildShortcuts();
+ }
+
+ size_t AcceptedRegexpsCount(size_t idx) const
+ {
+ const size_t* b = m_final + m_finalIndex[idx];
+ const size_t* e = b;
+ while (*e != End)
+ ++e;
+ return e - b;
+ }
+
+ template <class Scanner>
+ friend void Pire::BuildScanner(const Fsm&, Scanner&);
+
+ typedef State InternalState; // Needed for agglutination
+ friend class ScannerGlueCommon<Scanner>;
+ friend class ScannerGlueTask<Scanner>;
+
+ template<class AnotherRelocation, class AnotherShortcutting>
+ friend class Scanner;
+
+ friend struct ScannerSaver;
+
+#ifndef PIRE_DEBUG
+ friend struct AlignedRunner< Scanner<Relocation, Shortcutting> >;
+#endif
+};
+
+// Helper class for Save/Load partial specialization
+struct ScannerSaver {
+ template<class Shortcutting>
+ static void SaveScanner(const Scanner<Relocatable, Shortcutting>& scanner, yostream* s)
+ {
+ typedef Scanner<Relocatable, Shortcutting> ScannerType;
+
+ typename ScannerType::Locals mc = scanner.m;
+ mc.initial -= reinterpret_cast<size_t>(scanner.m_transitions);
SavePodType(s, Pire::Header(ScannerIOTypes::Scanner, sizeof(mc)));
- Impl::AlignSave(s, sizeof(Pire::Header));
- SavePodType(s, mc);
- Impl::AlignSave(s, sizeof(mc));
- SavePodType(s, scanner.Empty());
- Impl::AlignSave(s, sizeof(scanner.Empty()));
- if (!scanner.Empty())
+ Impl::AlignSave(s, sizeof(Pire::Header));
+ SavePodType(s, mc);
+ Impl::AlignSave(s, sizeof(mc));
+ SavePodType(s, scanner.Empty());
+ Impl::AlignSave(s, sizeof(scanner.Empty()));
+ if (!scanner.Empty())
Impl::AlignedSaveArray(s, scanner.m_buffer.Get(), scanner.BufSize());
- }
-
- template<class Shortcutting>
- static void LoadScanner(Scanner<Relocatable, Shortcutting>& scanner, yistream* s)
- {
- typedef Scanner<Relocatable, Shortcutting> ScannerType;
-
- Scanner<Relocatable, Shortcutting> sc;
+ }
+
+ template<class Shortcutting>
+ static void LoadScanner(Scanner<Relocatable, Shortcutting>& scanner, yistream* s)
+ {
+ typedef Scanner<Relocatable, Shortcutting> ScannerType;
+
+ Scanner<Relocatable, Shortcutting> sc;
Impl::ValidateHeader(s, ScannerIOTypes::Scanner, sizeof(sc.m));
- LoadPodType(s, sc.m);
- Impl::AlignLoad(s, sizeof(sc.m));
- if (Shortcutting::Signature != sc.m.shortcuttingSignature)
- throw Error("This scanner has different shortcutting type");
- bool empty;
- LoadPodType(s, empty);
- Impl::AlignLoad(s, sizeof(empty));
-
- if (empty) {
- sc.Alias(ScannerType::Null());
- } else {
+ LoadPodType(s, sc.m);
+ Impl::AlignLoad(s, sizeof(sc.m));
+ if (Shortcutting::Signature != sc.m.shortcuttingSignature)
+ throw Error("This scanner has different shortcutting type");
+ bool empty;
+ LoadPodType(s, empty);
+ Impl::AlignLoad(s, sizeof(empty));
+
+ if (empty) {
+ sc.Alias(ScannerType::Null());
+ } else {
sc.m_buffer = TArrayHolder<char>(new char[sc.BufSize()]);
Impl::AlignedLoadArray(s, sc.m_buffer.Get(), sc.BufSize());
sc.Markup(sc.m_buffer.Get());
- sc.m.initial += reinterpret_cast<size_t>(sc.m_transitions);
- }
- scanner.Swap(sc);
- }
-
- // TODO: implement more effective serialization
- // of nonrelocatable scanner if necessary
-
- template<class Shortcutting>
- static void SaveScanner(const Scanner<Nonrelocatable, Shortcutting>& scanner, yostream* s)
- {
- Scanner<Relocatable, Shortcutting>(scanner).Save(s);
- }
-
- template<class Shortcutting>
- static void LoadScanner(Scanner<Nonrelocatable, Shortcutting>& scanner, yistream* s)
- {
- Scanner<Relocatable, Shortcutting> rs;
- rs.Load(s);
- Scanner<Nonrelocatable, Shortcutting>(rs).Swap(scanner);
- }
-};
-
-
-template<class Relocation, class Shortcutting>
-void Scanner<Relocation, Shortcutting>::Save(yostream* s) const
-{
- ScannerSaver::SaveScanner(*this, s);
-}
-
-template<class Relocation, class Shortcutting>
-void Scanner<Relocation, Shortcutting>::Load(yistream* s)
-{
- ScannerSaver::LoadScanner(*this, s);
-}
-
-// Shortcutting policy that checks state exit masks
-template <size_t MaskCount>
-class ExitMasks {
-private:
- enum {
- NO_SHORTCUT_MASK = 1, // the state doesn't have shortcuts
- NO_EXIT_MASK = 2 // the state has only transtions to itself (we can stop the scan)
- };
-
- template<class ScannerRowHeader, unsigned N>
- struct MaskCheckerBase {
+ sc.m.initial += reinterpret_cast<size_t>(sc.m_transitions);
+ }
+ scanner.Swap(sc);
+ }
+
+ // TODO: implement more effective serialization
+ // of nonrelocatable scanner if necessary
+
+ template<class Shortcutting>
+ static void SaveScanner(const Scanner<Nonrelocatable, Shortcutting>& scanner, yostream* s)
+ {
+ Scanner<Relocatable, Shortcutting>(scanner).Save(s);
+ }
+
+ template<class Shortcutting>
+ static void LoadScanner(Scanner<Nonrelocatable, Shortcutting>& scanner, yistream* s)
+ {
+ Scanner<Relocatable, Shortcutting> rs;
+ rs.Load(s);
+ Scanner<Nonrelocatable, Shortcutting>(rs).Swap(scanner);
+ }
+};
+
+
+template<class Relocation, class Shortcutting>
+void Scanner<Relocation, Shortcutting>::Save(yostream* s) const
+{
+ ScannerSaver::SaveScanner(*this, s);
+}
+
+template<class Relocation, class Shortcutting>
+void Scanner<Relocation, Shortcutting>::Load(yistream* s)
+{
+ ScannerSaver::LoadScanner(*this, s);
+}
+
+// Shortcutting policy that checks state exit masks
+template <size_t MaskCount>
+class ExitMasks {
+private:
+ enum {
+ NO_SHORTCUT_MASK = 1, // the state doesn't have shortcuts
+ NO_EXIT_MASK = 2 // the state has only transtions to itself (we can stop the scan)
+ };
+
+ template<class ScannerRowHeader, unsigned N>
+ struct MaskCheckerBase {
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- bool Check(const ScannerRowHeader& hdr, size_t alignOffset, Word chunk)
- {
- Word mask = CheckBytes(hdr.Mask(N, alignOffset), chunk);
- for (int i = N-1; i >= 0; --i) {
- mask = Or(mask, CheckBytes(hdr.Mask(i, alignOffset), chunk));
- }
- return !IsAnySet(mask);
- }
+ bool Check(const ScannerRowHeader& hdr, size_t alignOffset, Word chunk)
+ {
+ Word mask = CheckBytes(hdr.Mask(N, alignOffset), chunk);
+ for (int i = N-1; i >= 0; --i) {
+ mask = Or(mask, CheckBytes(hdr.Mask(i, alignOffset), chunk));
+ }
+ return !IsAnySet(mask);
+ }
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- const Word* DoRun(const ScannerRowHeader& hdr, size_t alignOffset, const Word* begin, const Word* end)
- {
- for (; begin != end && Check(hdr, alignOffset, ToLittleEndian(*begin)); ++begin) {}
- return begin;
- }
- };
-
- template<class ScannerRowHeader, unsigned N, unsigned Nmax>
- struct MaskChecker : MaskCheckerBase<ScannerRowHeader, N> {
- typedef MaskCheckerBase<ScannerRowHeader, N> Base;
- typedef MaskChecker<ScannerRowHeader, N+1, Nmax> Next;
+ const Word* DoRun(const ScannerRowHeader& hdr, size_t alignOffset, const Word* begin, const Word* end)
+ {
+ for (; begin != end && Check(hdr, alignOffset, ToLittleEndian(*begin)); ++begin) {}
+ return begin;
+ }
+ };
+
+ template<class ScannerRowHeader, unsigned N, unsigned Nmax>
+ struct MaskChecker : MaskCheckerBase<ScannerRowHeader, N> {
+ typedef MaskCheckerBase<ScannerRowHeader, N> Base;
+ typedef MaskChecker<ScannerRowHeader, N+1, Nmax> Next;
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- const Word* Run(const ScannerRowHeader& hdr, size_t alignOffset, const Word* begin, const Word* end)
- {
- if (hdr.Mask(N) == hdr.Mask(N + 1))
- return Base::DoRun(hdr, alignOffset, begin, end);
- else
- return Next::Run(hdr, alignOffset, begin, end);
- }
- };
-
- template<class ScannerRowHeader, unsigned N>
- struct MaskChecker<ScannerRowHeader, N, N> : MaskCheckerBase<ScannerRowHeader, N> {
- typedef MaskCheckerBase<ScannerRowHeader, N> Base;
+ const Word* Run(const ScannerRowHeader& hdr, size_t alignOffset, const Word* begin, const Word* end)
+ {
+ if (hdr.Mask(N) == hdr.Mask(N + 1))
+ return Base::DoRun(hdr, alignOffset, begin, end);
+ else
+ return Next::Run(hdr, alignOffset, begin, end);
+ }
+ };
+
+ template<class ScannerRowHeader, unsigned N>
+ struct MaskChecker<ScannerRowHeader, N, N> : MaskCheckerBase<ScannerRowHeader, N> {
+ typedef MaskCheckerBase<ScannerRowHeader, N> Base;
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- const Word* Run(const ScannerRowHeader& hdr, size_t alignOffset, const Word* begin, const Word* end)
- {
- return Base::DoRun(hdr, alignOffset, begin, end);
- }
+ const Word* Run(const ScannerRowHeader& hdr, size_t alignOffset, const Word* begin, const Word* end)
+ {
+ return Base::DoRun(hdr, alignOffset, begin, end);
+ }
};
-
- // Compares the ExitMask[0] value without SSE reads which seems to be more optimal
- template <class Relocation>
+
+ // Compares the ExitMask[0] value without SSE reads which seems to be more optimal
+ template <class Relocation>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- bool CheckFirstMask(const Scanner<Relocation, ExitMasks<MaskCount> >& scanner, typename Scanner<Relocation, ExitMasks<MaskCount> >::State state, size_t val)
- {
- return (scanner.Header(state).Mask(0) == val);
- }
-
-public:
-
- static const size_t ExitMaskCount = MaskCount;
- static const size_t Signature = 0x2000 + MaskCount;
-
- template <class Scanner>
- struct ExtendedRowHeader {
- private:
- /// In order to allow transition table to be aligned at sizeof(size_t) instead of
- /// sizeof(Word) and still be able to read Masks at Word-aligned addresses each mask
- /// occupies 2x space and only properly aligned part of it is read
- enum {
- SizeTInMaxSizeWord = sizeof(MaxSizeWord) / sizeof(size_t),
- MaskSizeInSizeT = 2 * SizeTInMaxSizeWord,
- };
-
+ bool CheckFirstMask(const Scanner<Relocation, ExitMasks<MaskCount> >& scanner, typename Scanner<Relocation, ExitMasks<MaskCount> >::State state, size_t val)
+ {
+ return (scanner.Header(state).Mask(0) == val);
+ }
+
+public:
+
+ static const size_t ExitMaskCount = MaskCount;
+ static const size_t Signature = 0x2000 + MaskCount;
+
+ template <class Scanner>
+ struct ExtendedRowHeader {
+ private:
+ /// In order to allow transition table to be aligned at sizeof(size_t) instead of
+ /// sizeof(Word) and still be able to read Masks at Word-aligned addresses each mask
+ /// occupies 2x space and only properly aligned part of it is read
+ enum {
+ SizeTInMaxSizeWord = sizeof(MaxSizeWord) / sizeof(size_t),
+ MaskSizeInSizeT = 2 * SizeTInMaxSizeWord,
+ };
+
public:
- static const size_t ExitMaskCount = MaskCount;
-
- inline
- const Word& Mask(size_t i, size_t alignOffset) const
- {
+ static const size_t ExitMaskCount = MaskCount;
+
+ inline
+ const Word& Mask(size_t i, size_t alignOffset) const
+ {
Y_ASSERT(i < ExitMaskCount);
Y_ASSERT(alignOffset < SizeTInMaxSizeWord);
- const Word* p = (const Word*)(ExitMasksArray + alignOffset + MaskSizeInSizeT * i);
+ const Word* p = (const Word*)(ExitMasksArray + alignOffset + MaskSizeInSizeT * i);
Y_ASSERT(IsAligned(p, sizeof(Word)));
- return *p;
- }
+ return *p;
+ }
PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- size_t Mask(size_t i) const
- {
+ size_t Mask(size_t i) const
+ {
Y_ASSERT(i < ExitMaskCount);
- return ExitMasksArray[MaskSizeInSizeT*i];
- }
-
- void SetMask(size_t i, size_t val)
- {
- for (size_t j = 0; j < MaskSizeInSizeT; ++j)
- ExitMasksArray[MaskSizeInSizeT*i + j] = val;
- }
-
- ExtendedRowHeader()
- {
- for (size_t i = 0; i < ExitMaskCount; ++i)
- SetMask(i, NO_SHORTCUT_MASK);
- }
-
- template <class OtherScanner>
- ExtendedRowHeader& operator =(const ExtendedRowHeader<OtherScanner>& other)
- {
- PIRE_STATIC_ASSERT(ExitMaskCount == ExtendedRowHeader<OtherScanner>::ExitMaskCount);
- Common = other.Common;
- for (size_t i = 0; i < ExitMaskCount; ++i)
- SetMask(i, other.Mask(i));
- return *this;
- }
-
- private:
- /// If this state loops for all letters except particular set
- /// (common thing when matching something like /.*[Aa]/),
- /// each ExitMask contains that letter in each byte of size_t.
- ///
- /// These masks are most commonly used for fast forwarding through parts
- /// of the string matching /.*/ somewhere in the middle regexp.
- size_t ExitMasksArray[ExitMaskCount * MaskSizeInSizeT];
-
- public:
- typename Scanner::CommonRowHeader Common;
- };
-
- template <class Header>
- static void SetNoExit(Header& header)
- {
- header.SetMask(0, NO_EXIT_MASK);
- }
-
- template <class Header>
- static void SetNoShortcut(Header& header)
- {
- header.SetMask(0, NO_SHORTCUT_MASK);
- }
-
- template <class Header>
- static void SetMask(Header& header, size_t ind, char c)
- {
- header.SetMask(ind, FillSizeT(c));
- }
-
- template <class Header>
- static void FinishMasks(Header& header, size_t ind)
- {
- if (ind == 0)
- ind = 1;
- // Fill the rest of the shortcut masks with the last used mask
- size_t lastMask = header.Mask(ind - 1);
- while (ind != ExitMaskCount) {
- header.SetMask(ind, lastMask);
- ++ind;
- }
- }
-
- template <class Relocation>
+ return ExitMasksArray[MaskSizeInSizeT*i];
+ }
+
+ void SetMask(size_t i, size_t val)
+ {
+ for (size_t j = 0; j < MaskSizeInSizeT; ++j)
+ ExitMasksArray[MaskSizeInSizeT*i + j] = val;
+ }
+
+ ExtendedRowHeader()
+ {
+ for (size_t i = 0; i < ExitMaskCount; ++i)
+ SetMask(i, NO_SHORTCUT_MASK);
+ }
+
+ template <class OtherScanner>
+ ExtendedRowHeader& operator =(const ExtendedRowHeader<OtherScanner>& other)
+ {
+ PIRE_STATIC_ASSERT(ExitMaskCount == ExtendedRowHeader<OtherScanner>::ExitMaskCount);
+ Common = other.Common;
+ for (size_t i = 0; i < ExitMaskCount; ++i)
+ SetMask(i, other.Mask(i));
+ return *this;
+ }
+
+ private:
+ /// If this state loops for all letters except particular set
+ /// (common thing when matching something like /.*[Aa]/),
+ /// each ExitMask contains that letter in each byte of size_t.
+ ///
+ /// These masks are most commonly used for fast forwarding through parts
+ /// of the string matching /.*/ somewhere in the middle regexp.
+ size_t ExitMasksArray[ExitMaskCount * MaskSizeInSizeT];
+
+ public:
+ typename Scanner::CommonRowHeader Common;
+ };
+
+ template <class Header>
+ static void SetNoExit(Header& header)
+ {
+ header.SetMask(0, NO_EXIT_MASK);
+ }
+
+ template <class Header>
+ static void SetNoShortcut(Header& header)
+ {
+ header.SetMask(0, NO_SHORTCUT_MASK);
+ }
+
+ template <class Header>
+ static void SetMask(Header& header, size_t ind, char c)
+ {
+ header.SetMask(ind, FillSizeT(c));
+ }
+
+ template <class Header>
+ static void FinishMasks(Header& header, size_t ind)
+ {
+ if (ind == 0)
+ ind = 1;
+ // Fill the rest of the shortcut masks with the last used mask
+ size_t lastMask = header.Mask(ind - 1);
+ while (ind != ExitMaskCount) {
+ header.SetMask(ind, lastMask);
+ ++ind;
+ }
+ }
+
+ template <class Relocation>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- bool NoExit(const Scanner<Relocation, ExitMasks<MaskCount> >& scanner, typename Scanner<Relocation, ExitMasks<MaskCount> >::State state)
- {
- return CheckFirstMask(scanner, state, NO_EXIT_MASK);
- }
-
- template <class Relocation>
+ bool NoExit(const Scanner<Relocation, ExitMasks<MaskCount> >& scanner, typename Scanner<Relocation, ExitMasks<MaskCount> >::State state)
+ {
+ return CheckFirstMask(scanner, state, NO_EXIT_MASK);
+ }
+
+ template <class Relocation>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- bool NoShortcut(const Scanner<Relocation, ExitMasks<MaskCount> >& scanner, typename Scanner<Relocation, ExitMasks<MaskCount> >::State state)
- {
- return CheckFirstMask(scanner, state, NO_SHORTCUT_MASK);
- }
-
- template <class Relocation>
+ bool NoShortcut(const Scanner<Relocation, ExitMasks<MaskCount> >& scanner, typename Scanner<Relocation, ExitMasks<MaskCount> >::State state)
+ {
+ return CheckFirstMask(scanner, state, NO_SHORTCUT_MASK);
+ }
+
+ template <class Relocation>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- const Word* Run(const Scanner<Relocation, ExitMasks<MaskCount> >& scanner, typename Scanner<Relocation, ExitMasks<MaskCount> >::State state, size_t alignOffset, const Word* begin, const Word* end)
- {
- return MaskChecker<typename Scanner<Relocation, ExitMasks<MaskCount> >::ScannerRowHeader, 0, MaskCount - 1>::Run(scanner.Header(state), alignOffset, begin, end);
- }
-
-};
-
-
-// Shortcutting policy that doesn't do shortcuts
-struct NoShortcuts {
-
- static const size_t ExitMaskCount = 0;
- static const size_t Signature = 0x1000;
-
- template <class Scanner>
- struct ExtendedRowHeader {
- typename Scanner::CommonRowHeader Common;
-
- template <class OtherScanner>
- ExtendedRowHeader& operator =(const ExtendedRowHeader<OtherScanner>& other)
- {
- PIRE_STATIC_ASSERT(sizeof(ExtendedRowHeader) == sizeof(ExtendedRowHeader<OtherScanner>));
- Common = other.Common;
- return *this;
- }
- };
-
- template <class Header>
- static void SetNoExit(Header&) {}
-
- template <class Header>
- static void SetNoShortcut(Header&) {}
-
- template <class Header>
- static void SetMask(Header&, size_t, char) {}
-
- template <class Header>
- static void FinishMasks(Header&, size_t) {}
-
- template <class Relocation>
+ const Word* Run(const Scanner<Relocation, ExitMasks<MaskCount> >& scanner, typename Scanner<Relocation, ExitMasks<MaskCount> >::State state, size_t alignOffset, const Word* begin, const Word* end)
+ {
+ return MaskChecker<typename Scanner<Relocation, ExitMasks<MaskCount> >::ScannerRowHeader, 0, MaskCount - 1>::Run(scanner.Header(state), alignOffset, begin, end);
+ }
+
+};
+
+
+// Shortcutting policy that doesn't do shortcuts
+struct NoShortcuts {
+
+ static const size_t ExitMaskCount = 0;
+ static const size_t Signature = 0x1000;
+
+ template <class Scanner>
+ struct ExtendedRowHeader {
+ typename Scanner::CommonRowHeader Common;
+
+ template <class OtherScanner>
+ ExtendedRowHeader& operator =(const ExtendedRowHeader<OtherScanner>& other)
+ {
+ PIRE_STATIC_ASSERT(sizeof(ExtendedRowHeader) == sizeof(ExtendedRowHeader<OtherScanner>));
+ Common = other.Common;
+ return *this;
+ }
+ };
+
+ template <class Header>
+ static void SetNoExit(Header&) {}
+
+ template <class Header>
+ static void SetNoShortcut(Header&) {}
+
+ template <class Header>
+ static void SetMask(Header&, size_t, char) {}
+
+ template <class Header>
+ static void FinishMasks(Header&, size_t) {}
+
+ template <class Relocation>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- bool NoExit(const Scanner<Relocation, NoShortcuts>&, typename Scanner<Relocation, NoShortcuts>::State)
- {
- // Cannot exit prematurely
- return false;
- }
-
- template <class Relocation>
+ bool NoExit(const Scanner<Relocation, NoShortcuts>&, typename Scanner<Relocation, NoShortcuts>::State)
+ {
+ // Cannot exit prematurely
+ return false;
+ }
+
+ template <class Relocation>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- bool NoShortcut(const Scanner<Relocation, NoShortcuts>&, typename Scanner<Relocation, NoShortcuts>::State)
- {
- // There's no shortcut regardless of the state
- return true;
- }
-
- template <class Relocation>
+ bool NoShortcut(const Scanner<Relocation, NoShortcuts>&, typename Scanner<Relocation, NoShortcuts>::State)
+ {
+ // There's no shortcut regardless of the state
+ return true;
+ }
+
+ template <class Relocation>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- const Word* Run(const Scanner<Relocation, NoShortcuts>&, typename Scanner<Relocation, NoShortcuts>::State, size_t, const Word* begin, const Word*)
- {
- // Stop shortcutting right at the beginning
- return begin;
- }
-};
-
-#ifndef PIRE_DEBUG
-
-// The purpose of this template is to produce a number of ProcessChunk() calls
-// instead of writing for(...){ProcessChunk()} loop that GCC refuses to unroll.
-// Manually unrolled code proves to be faster
-template <class Scanner, unsigned Count>
-struct MultiChunk {
- // Process Word-sized chunk which consist of >=1 size_t-sized chunks
- template<class Pred>
+ const Word* Run(const Scanner<Relocation, NoShortcuts>&, typename Scanner<Relocation, NoShortcuts>::State, size_t, const Word* begin, const Word*)
+ {
+ // Stop shortcutting right at the beginning
+ return begin;
+ }
+};
+
+#ifndef PIRE_DEBUG
+
+// The purpose of this template is to produce a number of ProcessChunk() calls
+// instead of writing for(...){ProcessChunk()} loop that GCC refuses to unroll.
+// Manually unrolled code proves to be faster
+template <class Scanner, unsigned Count>
+struct MultiChunk {
+ // Process Word-sized chunk which consist of >=1 size_t-sized chunks
+ template<class Pred>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- Action Process(const Scanner& scanner, typename Scanner::State& state, const size_t* p, Pred pred)
- {
- if (RunChunk(scanner, state, p, 0, sizeof(void*), pred) == Continue)
- return MultiChunk<Scanner, Count-1>::Process(scanner, state, ++p, pred);
- else
- return Stop;
- }
-};
-
-template <class Scanner>
-struct MultiChunk<Scanner, 0> {
- // Process Word-sized chunk which consist of >=1 size_t-sized chunks
- template<class Pred>
+ Action Process(const Scanner& scanner, typename Scanner::State& state, const size_t* p, Pred pred)
+ {
+ if (RunChunk(scanner, state, p, 0, sizeof(void*), pred) == Continue)
+ return MultiChunk<Scanner, Count-1>::Process(scanner, state, ++p, pred);
+ else
+ return Stop;
+ }
+};
+
+template <class Scanner>
+struct MultiChunk<Scanner, 0> {
+ // Process Word-sized chunk which consist of >=1 size_t-sized chunks
+ template<class Pred>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- Action Process(const Scanner&, typename Scanner::State, const size_t*, Pred)
- {
- return Continue;
- }
-};
-
-// Efficiently runs a scanner through size_t-aligned memory range
-template<class Relocation, class Shortcutting>
-struct AlignedRunner< Scanner<Relocation, Shortcutting> > {
-private:
- typedef Scanner<Relocation, Shortcutting> ScannerType;
-
- // Processes Word-sized chuck of memory (depending on the platform a Word might
- // consist of multiple size_t chuncks)
- template <class Pred>
+ Action Process(const Scanner&, typename Scanner::State, const size_t*, Pred)
+ {
+ return Continue;
+ }
+};
+
+// Efficiently runs a scanner through size_t-aligned memory range
+template<class Relocation, class Shortcutting>
+struct AlignedRunner< Scanner<Relocation, Shortcutting> > {
+private:
+ typedef Scanner<Relocation, Shortcutting> ScannerType;
+
+ // Processes Word-sized chuck of memory (depending on the platform a Word might
+ // consist of multiple size_t chuncks)
+ template <class Pred>
static PIRE_FORCED_INLINE PIRE_HOT_FUNCTION
- Action RunMultiChunk(const ScannerType& scanner, typename ScannerType::State& st, const size_t* begin, Pred pred)
- {
- return MultiChunk<ScannerType, sizeof(Word)/sizeof(size_t)>::Process(scanner, st, begin, pred);
- }
-
- // Asserts if the scanner changes state while processing the byte range that is
- // supposed to be skipped by a shortcut
- static void ValidateSkip(const ScannerType& scanner, typename ScannerType::State st, const char* begin, const char* end)
- {
- typename ScannerType::State stateBefore = st;
- for (const char* pos = begin; pos != end; ++pos) {
- Step(scanner, st, (unsigned char)*pos);
+ Action RunMultiChunk(const ScannerType& scanner, typename ScannerType::State& st, const size_t* begin, Pred pred)
+ {
+ return MultiChunk<ScannerType, sizeof(Word)/sizeof(size_t)>::Process(scanner, st, begin, pred);
+ }
+
+ // Asserts if the scanner changes state while processing the byte range that is
+ // supposed to be skipped by a shortcut
+ static void ValidateSkip(const ScannerType& scanner, typename ScannerType::State st, const char* begin, const char* end)
+ {
+ typename ScannerType::State stateBefore = st;
+ for (const char* pos = begin; pos != end; ++pos) {
+ Step(scanner, st, (unsigned char)*pos);
Y_ASSERT(st == stateBefore);
- }
- }
-
-public:
-
- template<class Pred>
- static inline PIRE_HOT_FUNCTION
- Action RunAligned(const ScannerType& scanner, typename ScannerType::State& st, const size_t* begin, const size_t* end , Pred pred)
- {
+ }
+ }
+
+public:
+
+ template<class Pred>
+ static inline PIRE_HOT_FUNCTION
+ Action RunAligned(const ScannerType& scanner, typename ScannerType::State& st, const size_t* begin, const size_t* end , Pred pred)
+ {
typename ScannerType::State state = st;
- const Word* head = AlignUp((const Word*) begin, sizeof(Word));
- const Word* tail = AlignDown((const Word*) end, sizeof(Word));
- for (; begin != (const size_t*) head && begin != end; ++begin)
- if (RunChunk(scanner, state, begin, 0, sizeof(void*), pred) == Stop) {
- st = state;
- return Stop;
- }
-
- if (begin == end) {
- st = state;
- return Continue;
- }
- if (Shortcutting::NoExit(scanner, state)) {
- st = state;
- return pred(scanner, state, ((const char*) end));
- }
-
- // Row size should be a multiple of MaxSizeWord size. Then alignOffset is the same for any state
+ const Word* head = AlignUp((const Word*) begin, sizeof(Word));
+ const Word* tail = AlignDown((const Word*) end, sizeof(Word));
+ for (; begin != (const size_t*) head && begin != end; ++begin)
+ if (RunChunk(scanner, state, begin, 0, sizeof(void*), pred) == Stop) {
+ st = state;
+ return Stop;
+ }
+
+ if (begin == end) {
+ st = state;
+ return Continue;
+ }
+ if (Shortcutting::NoExit(scanner, state)) {
+ st = state;
+ return pred(scanner, state, ((const char*) end));
+ }
+
+ // Row size should be a multiple of MaxSizeWord size. Then alignOffset is the same for any state
Y_ASSERT((scanner.RowSize()*sizeof(typename ScannerType::Transition)) % sizeof(MaxSizeWord) == 0);
- size_t alignOffset = (AlignUp((size_t)scanner.m_transitions, sizeof(Word)) - (size_t)scanner.m_transitions) / sizeof(size_t);
-
- bool noShortcut = Shortcutting::NoShortcut(scanner, state);
-
- while (true) {
- // Do normal processing until a shortcut is possible
- while (noShortcut && head != tail) {
- if (RunMultiChunk(scanner, state, (const size_t*)head, pred) == Stop) {
- st = state;
- return Stop;
- }
- ++head;
- noShortcut = Shortcutting::NoShortcut(scanner, state);
- }
- if (head == tail)
- break;
-
- if (Shortcutting::NoExit(scanner, state)) {
- st = state;
- return pred(scanner, state, ((const char*) end));
- }
-
- // Do fast forwarding while it is possible
- const Word* skipEnd = Shortcutting::Run(scanner, state, alignOffset, head, tail);
- PIRE_IF_CHECKED(ValidateSkip(scanner, state, (const char*)head, (const char*)skipEnd));
- head = skipEnd;
- noShortcut = true;
- }
-
- for (size_t* p = (size_t*) tail; p != end; ++p) {
- if (RunChunk(scanner, state, p, 0, sizeof(void*), pred) == Stop) {
- st = state;
- return Stop;
- }
- }
-
- st = state;
- return Continue;
- }
-};
-
-#endif
-
-template<class Scanner>
-class ScannerGlueTask: public ScannerGlueCommon<Scanner> {
-public:
- typedef ScannerGlueCommon<Scanner> Base;
- typedef typename Base::State State;
- using Base::Lhs;
- using Base::Rhs;
- using Base::Sc;
- using Base::Letters;
-
- typedef GluedStateLookupTable<256*1024, typename Scanner::State> InvStates;
-
- ScannerGlueTask(const Scanner& lhs, const Scanner& rhs)
- : ScannerGlueCommon<Scanner>(lhs, rhs, LettersEquality<Scanner>(lhs.m_letters, rhs.m_letters))
- {
- }
+ size_t alignOffset = (AlignUp((size_t)scanner.m_transitions, sizeof(Word)) - (size_t)scanner.m_transitions) / sizeof(size_t);
+
+ bool noShortcut = Shortcutting::NoShortcut(scanner, state);
+
+ while (true) {
+ // Do normal processing until a shortcut is possible
+ while (noShortcut && head != tail) {
+ if (RunMultiChunk(scanner, state, (const size_t*)head, pred) == Stop) {
+ st = state;
+ return Stop;
+ }
+ ++head;
+ noShortcut = Shortcutting::NoShortcut(scanner, state);
+ }
+ if (head == tail)
+ break;
+
+ if (Shortcutting::NoExit(scanner, state)) {
+ st = state;
+ return pred(scanner, state, ((const char*) end));
+ }
+
+ // Do fast forwarding while it is possible
+ const Word* skipEnd = Shortcutting::Run(scanner, state, alignOffset, head, tail);
+ PIRE_IF_CHECKED(ValidateSkip(scanner, state, (const char*)head, (const char*)skipEnd));
+ head = skipEnd;
+ noShortcut = true;
+ }
+
+ for (size_t* p = (size_t*) tail; p != end; ++p) {
+ if (RunChunk(scanner, state, p, 0, sizeof(void*), pred) == Stop) {
+ st = state;
+ return Stop;
+ }
+ }
+
+ st = state;
+ return Continue;
+ }
+};
+
+#endif
+
+template<class Scanner>
+class ScannerGlueTask: public ScannerGlueCommon<Scanner> {
+public:
+ typedef ScannerGlueCommon<Scanner> Base;
+ typedef typename Base::State State;
+ using Base::Lhs;
+ using Base::Rhs;
+ using Base::Sc;
+ using Base::Letters;
+
+ typedef GluedStateLookupTable<256*1024, typename Scanner::State> InvStates;
+
+ ScannerGlueTask(const Scanner& lhs, const Scanner& rhs)
+ : ScannerGlueCommon<Scanner>(lhs, rhs, LettersEquality<Scanner>(lhs.m_letters, rhs.m_letters))
+ {
+ }
void AcceptStates(const TVector<State>& states)
- {
- // Make up a new scanner and fill in the final table
+ {
+ // Make up a new scanner and fill in the final table
- size_t finalTableSize = 0;
+ size_t finalTableSize = 0;
for (auto&& i : states)
finalTableSize += RangeLen(Lhs().AcceptedRegexps(i.first)) + RangeLen(Rhs().AcceptedRegexps(i.second));
this->SetSc(THolder<Scanner>(new Scanner));
- Sc().Init(states.size(), Letters(), finalTableSize, size_t(0), Lhs().RegexpsCount() + Rhs().RegexpsCount());
+ Sc().Init(states.size(), Letters(), finalTableSize, size_t(0), Lhs().RegexpsCount() + Rhs().RegexpsCount());
auto finalWriter = Sc().m_final;
- for (size_t state = 0; state != states.size(); ++state) {
+ for (size_t state = 0; state != states.size(); ++state) {
Sc().m_finalIndex[state] = finalWriter - Sc().m_final;
finalWriter = Shift(Lhs().AcceptedRegexps(states[state].first), 0, finalWriter);
finalWriter = Shift(Rhs().AcceptedRegexps(states[state].second), Lhs().RegexpsCount(), finalWriter);
*finalWriter++ = static_cast<size_t>(-1);
- Sc().SetTag(state, ((Lhs().Final(states[state].first) || Rhs().Final(states[state].second)) ? Scanner::FinalFlag : 0)
- | ((Lhs().Dead(states[state].first) && Rhs().Dead(states[state].second)) ? Scanner::DeadFlag : 0));
- }
- }
+ Sc().SetTag(state, ((Lhs().Final(states[state].first) || Rhs().Final(states[state].second)) ? Scanner::FinalFlag : 0)
+ | ((Lhs().Dead(states[state].first) && Rhs().Dead(states[state].second)) ? Scanner::DeadFlag : 0));
+ }
+ }
+
+ void Connect(size_t from, size_t to, Char letter) { Sc().SetJump(from, letter, to); }
- void Connect(size_t from, size_t to, Char letter) { Sc().SetJump(from, letter, to); }
-
- const Scanner& Success()
- {
- Sc().BuildShortcuts();
- return Sc();
- }
+ const Scanner& Success()
+ {
+ Sc().BuildShortcuts();
+ return Sc();
+ }
private:
- template<class Iter>
- size_t RangeLen(ypair<Iter, Iter> range) const
- {
- return std::distance(range.first, range.second);
- }
-
- template<class Iter, class OutIter>
- OutIter Shift(ypair<Iter, Iter> range, size_t shift, OutIter out) const
- {
- for (; range.first != range.second; ++range.first, ++out)
- *out = *range.first + shift;
- return out;
- }
-};
-
-}
-
-
-template<class Relocation, class Shortcutting>
-struct StDumper< Impl::Scanner<Relocation, Shortcutting> > {
-
- typedef Impl::Scanner<Relocation, Shortcutting> ScannerType;
-
- StDumper(const ScannerType& sc, typename ScannerType::State st): m_sc(&sc), m_st(st) {}
-
- void Dump(yostream& stream) const
- {
- stream << m_sc->StateIndex(m_st);
- if (m_sc->Final(m_st))
- stream << " [final]";
- if (m_sc->Dead(m_st))
- stream << " [dead]";
- }
-private:
- const ScannerType* m_sc;
- typename ScannerType::State m_st;
-};
-
-
-template<class Relocation, class Shortcutting>
-Impl::Scanner<Relocation, Shortcutting> Impl::Scanner<Relocation, Shortcutting>::Glue(const Impl::Scanner<Relocation, Shortcutting>& lhs, const Impl::Scanner<Relocation, Shortcutting>& rhs, size_t maxSize /* = 0 */)
-{
- if (lhs.Empty())
- return rhs;
- if (rhs.Empty())
- return lhs;
-
- static const size_t DefMaxSize = 80000;
- Impl::ScannerGlueTask< Impl::Scanner<Relocation, Shortcutting> > task(lhs, rhs);
- return Impl::Determine(task, maxSize ? maxSize : DefMaxSize);
-}
-
-
-/**
- * A compiled multiregexp.
- * Can only find out whether a string matches the regexps or not,
- * but takes O( str.length() ) time.
- *
- * In addition, multiple scanners can be agglutinated together,
- * producting a scanner which can be used for checking
- * strings against several regexps in a single pass.
- */
-typedef Impl::Scanner<Impl::Relocatable, Impl::ExitMasks<2> > Scanner;
-typedef Impl::Scanner<Impl::Relocatable, Impl::NoShortcuts> ScannerNoMask;
-
-/**
- * Same as above, but does not allow relocation or mmap()-ing.
- * On the other hand, runs almost twice as fast as the Scanner.
- */
-typedef Impl::Scanner<Impl::Nonrelocatable, Impl::ExitMasks<2> > NonrelocScanner;
-typedef Impl::Scanner<Impl::Nonrelocatable, Impl::NoShortcuts> NonrelocScannerNoMask;
-
-}
-
-namespace std {
+ template<class Iter>
+ size_t RangeLen(ypair<Iter, Iter> range) const
+ {
+ return std::distance(range.first, range.second);
+ }
+
+ template<class Iter, class OutIter>
+ OutIter Shift(ypair<Iter, Iter> range, size_t shift, OutIter out) const
+ {
+ for (; range.first != range.second; ++range.first, ++out)
+ *out = *range.first + shift;
+ return out;
+ }
+};
+
+}
+
+
+template<class Relocation, class Shortcutting>
+struct StDumper< Impl::Scanner<Relocation, Shortcutting> > {
+
+ typedef Impl::Scanner<Relocation, Shortcutting> ScannerType;
+
+ StDumper(const ScannerType& sc, typename ScannerType::State st): m_sc(&sc), m_st(st) {}
+
+ void Dump(yostream& stream) const
+ {
+ stream << m_sc->StateIndex(m_st);
+ if (m_sc->Final(m_st))
+ stream << " [final]";
+ if (m_sc->Dead(m_st))
+ stream << " [dead]";
+ }
+private:
+ const ScannerType* m_sc;
+ typename ScannerType::State m_st;
+};
+
+
+template<class Relocation, class Shortcutting>
+Impl::Scanner<Relocation, Shortcutting> Impl::Scanner<Relocation, Shortcutting>::Glue(const Impl::Scanner<Relocation, Shortcutting>& lhs, const Impl::Scanner<Relocation, Shortcutting>& rhs, size_t maxSize /* = 0 */)
+{
+ if (lhs.Empty())
+ return rhs;
+ if (rhs.Empty())
+ return lhs;
+
+ static const size_t DefMaxSize = 80000;
+ Impl::ScannerGlueTask< Impl::Scanner<Relocation, Shortcutting> > task(lhs, rhs);
+ return Impl::Determine(task, maxSize ? maxSize : DefMaxSize);
+}
+
+
+/**
+ * A compiled multiregexp.
+ * Can only find out whether a string matches the regexps or not,
+ * but takes O( str.length() ) time.
+ *
+ * In addition, multiple scanners can be agglutinated together,
+ * producting a scanner which can be used for checking
+ * strings against several regexps in a single pass.
+ */
+typedef Impl::Scanner<Impl::Relocatable, Impl::ExitMasks<2> > Scanner;
+typedef Impl::Scanner<Impl::Relocatable, Impl::NoShortcuts> ScannerNoMask;
+
+/**
+ * Same as above, but does not allow relocation or mmap()-ing.
+ * On the other hand, runs almost twice as fast as the Scanner.
+ */
+typedef Impl::Scanner<Impl::Nonrelocatable, Impl::ExitMasks<2> > NonrelocScanner;
+typedef Impl::Scanner<Impl::Nonrelocatable, Impl::NoShortcuts> NonrelocScannerNoMask;
+
+}
+
+namespace std {
inline void swap(Pire::Scanner& a, Pire::Scanner& b) {
- a.Swap(b);
- }
-
+ a.Swap(b);
+ }
+
inline void swap(Pire::NonrelocScanner& a, Pire::NonrelocScanner& b) {
- a.Swap(b);
- }
-}
-
-
-#endif
+ a.Swap(b);
+ }
+}
+
+
+#endif
diff --git a/contrib/libs/pire/pire/scanners/null.cpp b/contrib/libs/pire/pire/scanners/null.cpp
index 3a7fee7220d..f0e21ce4d3b 100644
--- a/contrib/libs/pire/pire/scanners/null.cpp
+++ b/contrib/libs/pire/pire/scanners/null.cpp
@@ -1,6 +1,6 @@
#include <contrib/libs/pire/pire/fsm.h>
-#include "multi.h"
+#include "multi.h"
#include "half_final.h"
-#include "simple.h"
-#include "slow.h"
-#include "loaded.h"
+#include "simple.h"
+#include "slow.h"
+#include "loaded.h"
diff --git a/contrib/libs/pire/pire/scanners/pair.h b/contrib/libs/pire/pire/scanners/pair.h
index 16fc14a59fa..c12338a2a06 100644
--- a/contrib/libs/pire/pire/scanners/pair.h
+++ b/contrib/libs/pire/pire/scanners/pair.h
@@ -1,99 +1,99 @@
-/*
- * pair.h -- definition of the pair of scanners
+/*
+ * pair.h -- definition of the pair of scanners
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-#ifndef PIRE_SCANNER_PAIR_INCLUDED
-#define PIRE_SCANNER_PAIR_INCLUDED
-
-namespace Pire {
-
- /**
- * A pair of scanner, providing the interface of a scanner itself.
- * If you need to run two scanners on the same string, using ScannerPair
- * is usually faster then running those scanners sequentially.
- */
- template<class Scanner1, class Scanner2>
- class ScannerPair {
- public:
- typedef ypair<typename Scanner1::State, typename Scanner2::State> State;
- typedef ypair<typename Scanner1::Action, typename Scanner2::Action> Action;
-
- ScannerPair()
- : m_scanner1()
- , m_scanner2()
- {
- }
- ScannerPair(const Scanner1& s1, const Scanner2& s2)
- : m_scanner1(&s1)
- , m_scanner2(&s2)
- {
- }
-
- void Initialize(State& state) const
- {
- m_scanner1->Initialize(state.first);
- m_scanner2->Initialize(state.second);
- }
-
- Action Next(State& state, Char ch) const
- {
- return ymake_pair(
- m_scanner1->Next(state.first, ch),
- m_scanner2->Next(state.second, ch)
- );
- }
-
- void TakeAction(State& s, Action a) const
- {
- m_scanner1->TakeAction(s.first, a.first);
- m_scanner2->TakeAction(s.second, a.second);
- }
-
- bool Final(const State& state) const
- {
- return m_scanner1->Final(state.first) || m_scanner2->Final(state.second);
- }
-
- bool Dead(const State& state) const
- {
- return m_scanner1->Dead(state.first) && m_scanner2->Dead(state.second);
- }
-
- ypair<size_t, size_t> StateIndex(const State& state) const
- {
- return ymake_pair(m_scanner1->StateIndex(state.first), m_scanner2->StateIndex(state.second));
- }
-
- Scanner1& First() { return *m_scanner1; }
- Scanner2& Second() { return *m_scanner2; }
-
- const Scanner1& First() const { return *m_scanner1; }
- const Scanner2& Second() const { return *m_scanner2; }
-
- private:
- const Scanner1* m_scanner1;
- const Scanner2* m_scanner2;
- };
-
-
-}
-
-#endif
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+#ifndef PIRE_SCANNER_PAIR_INCLUDED
+#define PIRE_SCANNER_PAIR_INCLUDED
+
+namespace Pire {
+
+ /**
+ * A pair of scanner, providing the interface of a scanner itself.
+ * If you need to run two scanners on the same string, using ScannerPair
+ * is usually faster then running those scanners sequentially.
+ */
+ template<class Scanner1, class Scanner2>
+ class ScannerPair {
+ public:
+ typedef ypair<typename Scanner1::State, typename Scanner2::State> State;
+ typedef ypair<typename Scanner1::Action, typename Scanner2::Action> Action;
+
+ ScannerPair()
+ : m_scanner1()
+ , m_scanner2()
+ {
+ }
+ ScannerPair(const Scanner1& s1, const Scanner2& s2)
+ : m_scanner1(&s1)
+ , m_scanner2(&s2)
+ {
+ }
+
+ void Initialize(State& state) const
+ {
+ m_scanner1->Initialize(state.first);
+ m_scanner2->Initialize(state.second);
+ }
+
+ Action Next(State& state, Char ch) const
+ {
+ return ymake_pair(
+ m_scanner1->Next(state.first, ch),
+ m_scanner2->Next(state.second, ch)
+ );
+ }
+
+ void TakeAction(State& s, Action a) const
+ {
+ m_scanner1->TakeAction(s.first, a.first);
+ m_scanner2->TakeAction(s.second, a.second);
+ }
+
+ bool Final(const State& state) const
+ {
+ return m_scanner1->Final(state.first) || m_scanner2->Final(state.second);
+ }
+
+ bool Dead(const State& state) const
+ {
+ return m_scanner1->Dead(state.first) && m_scanner2->Dead(state.second);
+ }
+
+ ypair<size_t, size_t> StateIndex(const State& state) const
+ {
+ return ymake_pair(m_scanner1->StateIndex(state.first), m_scanner2->StateIndex(state.second));
+ }
+
+ Scanner1& First() { return *m_scanner1; }
+ Scanner2& Second() { return *m_scanner2; }
+
+ const Scanner1& First() const { return *m_scanner1; }
+ const Scanner2& Second() const { return *m_scanner2; }
+
+ private:
+ const Scanner1* m_scanner1;
+ const Scanner2* m_scanner2;
+ };
+
+
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/scanners/simple.h b/contrib/libs/pire/pire/scanners/simple.h
index 3175e105dac..ef959aeed13 100644
--- a/contrib/libs/pire/pire/scanners/simple.h
+++ b/contrib/libs/pire/pire/scanners/simple.h
@@ -1,190 +1,190 @@
-/*
- * simple.h -- the definition of the SimpleScanner
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+/*
+ * simple.h -- the definition of the SimpleScanner
*
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_SCANNERS_SIMPLE_H
-#define PIRE_SCANNERS_SIMPLE_H
-
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_SCANNERS_SIMPLE_H
+#define PIRE_SCANNERS_SIMPLE_H
+
#include <contrib/libs/pire/pire/approx_matching.h>
#include <contrib/libs/pire/pire/stub/stl.h>
#include <contrib/libs/pire/pire/stub/defaults.h>
#include <contrib/libs/pire/pire/stub/saveload.h>
-#include "common.h"
-
-namespace Pire {
-
-/**
- * More faster version than the Scanner, but incapable of storing multiple
- * regexps and taking more memory for the same regexp.
- */
-class SimpleScanner {
-private:
- static const size_t STATE_ROW_SIZE = MaxChar + 1; // All characters + 1 element to store final state flag
-
-public:
- typedef size_t Transition;
- typedef ui16 Letter;
- typedef ui32 Action;
- typedef ui8 Tag;
-
- SimpleScanner() { Alias(Null()); }
+#include "common.h"
+
+namespace Pire {
+
+/**
+ * More faster version than the Scanner, but incapable of storing multiple
+ * regexps and taking more memory for the same regexp.
+ */
+class SimpleScanner {
+private:
+ static const size_t STATE_ROW_SIZE = MaxChar + 1; // All characters + 1 element to store final state flag
+
+public:
+ typedef size_t Transition;
+ typedef ui16 Letter;
+ typedef ui32 Action;
+ typedef ui8 Tag;
+
+ SimpleScanner() { Alias(Null()); }
explicit SimpleScanner(Fsm& fsm, size_t distance = 0);
-
- size_t Size() const { return m.statesCount; }
- bool Empty() const { return m_transitions == Null().m_transitions; }
-
- typedef size_t State;
-
- size_t RegexpsCount() const { return Empty() ? 0 : 1; }
- size_t LettersCount() const { return MaxChar; }
-
- /// Checks whether specified state is in any of the final sets
- bool Final(const State& state) const { return *(((const Transition*) state) - 1) != 0; }
-
- bool Dead(const State&) const { return false; }
-
+
+ size_t Size() const { return m.statesCount; }
+ bool Empty() const { return m_transitions == Null().m_transitions; }
+
+ typedef size_t State;
+
+ size_t RegexpsCount() const { return Empty() ? 0 : 1; }
+ size_t LettersCount() const { return MaxChar; }
+
+ /// Checks whether specified state is in any of the final sets
+ bool Final(const State& state) const { return *(((const Transition*) state) - 1) != 0; }
+
+ bool Dead(const State&) const { return false; }
+
ypair<const size_t*, const size_t*> AcceptedRegexps(const State& s) const {
return Final(s) ? Accept() : Deny();
}
- /// returns an initial state for this scanner
- void Initialize(State& state) const { state = m.initial; }
-
- /// Handles one characters
- Action Next(State& state, Char c) const
- {
- Transition shift = reinterpret_cast<const Transition*>(state)[c];
- state += shift;
- return 0;
- }
-
- bool TakeAction(State&, Action) const { return false; }
-
- SimpleScanner(const SimpleScanner& s): m(s.m)
- {
- if (!s.m_buffer) {
- // Empty or mmap()-ed scanner, just copy pointers
- m_buffer = 0;
- m_transitions = s.m_transitions;
- } else {
- // In-memory scanner, perform deep copy
+ /// returns an initial state for this scanner
+ void Initialize(State& state) const { state = m.initial; }
+
+ /// Handles one characters
+ Action Next(State& state, Char c) const
+ {
+ Transition shift = reinterpret_cast<const Transition*>(state)[c];
+ state += shift;
+ return 0;
+ }
+
+ bool TakeAction(State&, Action) const { return false; }
+
+ SimpleScanner(const SimpleScanner& s): m(s.m)
+ {
+ if (!s.m_buffer) {
+ // Empty or mmap()-ed scanner, just copy pointers
+ m_buffer = 0;
+ m_transitions = s.m_transitions;
+ } else {
+ // In-memory scanner, perform deep copy
m_buffer = BufferType(new char[BufSize()]);
memcpy(m_buffer.Get(), s.m_buffer.Get(), BufSize());
Markup(m_buffer.Get());
-
- m.initial += (m_transitions - s.m_transitions) * sizeof(Transition);
- }
- }
-
- // Makes a shallow ("weak") copy of the given scanner.
- // The copied scanner does not maintain lifetime of the original's entrails.
- void Alias(const SimpleScanner& s)
- {
- m = s.m;
+
+ m.initial += (m_transitions - s.m_transitions) * sizeof(Transition);
+ }
+ }
+
+ // Makes a shallow ("weak") copy of the given scanner.
+ // The copied scanner does not maintain lifetime of the original's entrails.
+ void Alias(const SimpleScanner& s)
+ {
+ m = s.m;
m_buffer.Reset();
- m_transitions = s.m_transitions;
- }
-
- void Swap(SimpleScanner& s)
- {
- DoSwap(m_buffer, s.m_buffer);
- DoSwap(m.statesCount, s.m.statesCount);
- DoSwap(m.initial, s.m.initial);
- DoSwap(m_transitions, s.m_transitions);
- }
-
- SimpleScanner& operator = (const SimpleScanner& s) { SimpleScanner(s).Swap(*this); return *this; }
-
+ m_transitions = s.m_transitions;
+ }
+
+ void Swap(SimpleScanner& s)
+ {
+ DoSwap(m_buffer, s.m_buffer);
+ DoSwap(m.statesCount, s.m.statesCount);
+ DoSwap(m.initial, s.m.initial);
+ DoSwap(m_transitions, s.m_transitions);
+ }
+
+ SimpleScanner& operator = (const SimpleScanner& s) { SimpleScanner(s).Swap(*this); return *this; }
+
~SimpleScanner() = default;
-
- /*
- * Constructs the scanner from mmap()-ed memory range, returning a pointer
- * to unconsumed part of the buffer.
- */
- const void* Mmap(const void* ptr, size_t size)
- {
- Impl::CheckAlign(ptr);
- SimpleScanner s;
-
- const size_t* p = reinterpret_cast<const size_t*>(ptr);
+
+ /*
+ * Constructs the scanner from mmap()-ed memory range, returning a pointer
+ * to unconsumed part of the buffer.
+ */
+ const void* Mmap(const void* ptr, size_t size)
+ {
+ Impl::CheckAlign(ptr);
+ SimpleScanner s;
+
+ const size_t* p = reinterpret_cast<const size_t*>(ptr);
Impl::ValidateHeader(p, size, ScannerIOTypes::SimpleScanner, sizeof(m));
- if (size < sizeof(s.m))
- throw Error("EOF reached while mapping NPire::Scanner");
-
- memcpy(&s.m, p, sizeof(s.m));
- Impl::AdvancePtr(p, size, sizeof(s.m));
- Impl::AlignPtr(p, size);
-
- bool empty = *((const bool*) p);
- Impl::AdvancePtr(p, size, sizeof(empty));
- Impl::AlignPtr(p, size);
-
- if (empty)
- s.Alias(Null());
- else {
- if (size < s.BufSize())
- throw Error("EOF reached while mapping NPire::Scanner");
- s.Markup(const_cast<size_t*>(p));
- s.m.initial += reinterpret_cast<size_t>(s.m_transitions);
-
- Swap(s);
- Impl::AdvancePtr(p, size, BufSize());
- }
- return Impl::AlignPtr(p, size);
- }
-
- size_t StateIndex(State s) const
- {
- return (s - reinterpret_cast<size_t>(m_transitions)) / (STATE_ROW_SIZE * sizeof(Transition));
- }
-
- // Returns the size of the memory buffer used (or required) by scanner.
- size_t BufSize() const
- {
- return STATE_ROW_SIZE * m.statesCount * sizeof(Transition); // Transitions table
- }
-
- void Save(yostream*) const;
- void Load(yistream*);
-
-protected:
- struct Locals {
- size_t statesCount;
- size_t initial;
- } m;
-
+ if (size < sizeof(s.m))
+ throw Error("EOF reached while mapping NPire::Scanner");
+
+ memcpy(&s.m, p, sizeof(s.m));
+ Impl::AdvancePtr(p, size, sizeof(s.m));
+ Impl::AlignPtr(p, size);
+
+ bool empty = *((const bool*) p);
+ Impl::AdvancePtr(p, size, sizeof(empty));
+ Impl::AlignPtr(p, size);
+
+ if (empty)
+ s.Alias(Null());
+ else {
+ if (size < s.BufSize())
+ throw Error("EOF reached while mapping NPire::Scanner");
+ s.Markup(const_cast<size_t*>(p));
+ s.m.initial += reinterpret_cast<size_t>(s.m_transitions);
+
+ Swap(s);
+ Impl::AdvancePtr(p, size, BufSize());
+ }
+ return Impl::AlignPtr(p, size);
+ }
+
+ size_t StateIndex(State s) const
+ {
+ return (s - reinterpret_cast<size_t>(m_transitions)) / (STATE_ROW_SIZE * sizeof(Transition));
+ }
+
+ // Returns the size of the memory buffer used (or required) by scanner.
+ size_t BufSize() const
+ {
+ return STATE_ROW_SIZE * m.statesCount * sizeof(Transition); // Transitions table
+ }
+
+ void Save(yostream*) const;
+ void Load(yistream*);
+
+protected:
+ struct Locals {
+ size_t statesCount;
+ size_t initial;
+ } m;
+
using BufferType = TArrayHolder<char>;
BufferType m_buffer;
-
- Transition* m_transitions;
-
- inline static const SimpleScanner& Null()
- {
- static const SimpleScanner n = Fsm::MakeFalse().Compile<SimpleScanner>();
- return n;
- }
-
+
+ Transition* m_transitions;
+
+ inline static const SimpleScanner& Null()
+ {
+ static const SimpleScanner n = Fsm::MakeFalse().Compile<SimpleScanner>();
+ return n;
+ }
+
static ypair<const size_t*, const size_t*> Accept()
{
static size_t v[1] = { 0 };
@@ -197,65 +197,65 @@ protected:
return ymake_pair(v, v);
}
- /*
- * Initializes pointers depending on buffer start, letters and states count
- */
- void Markup(void* ptr)
- {
- m_transitions = reinterpret_cast<Transition*>(ptr);
- }
-
- void SetJump(size_t oldState, Char c, size_t newState)
- {
+ /*
+ * Initializes pointers depending on buffer start, letters and states count
+ */
+ void Markup(void* ptr)
+ {
+ m_transitions = reinterpret_cast<Transition*>(ptr);
+ }
+
+ void SetJump(size_t oldState, Char c, size_t newState)
+ {
Y_ASSERT(m_buffer);
Y_ASSERT(oldState < m.statesCount);
Y_ASSERT(newState < m.statesCount);
- m_transitions[oldState * STATE_ROW_SIZE + 1 + c]
- = (((newState - oldState) * STATE_ROW_SIZE) * sizeof(Transition));
- }
-
- unsigned long RemapAction(unsigned long action) { return action; }
-
- void SetInitial(size_t state)
- {
+ m_transitions[oldState * STATE_ROW_SIZE + 1 + c]
+ = (((newState - oldState) * STATE_ROW_SIZE) * sizeof(Transition));
+ }
+
+ unsigned long RemapAction(unsigned long action) { return action; }
+
+ void SetInitial(size_t state)
+ {
Y_ASSERT(m_buffer);
- m.initial = reinterpret_cast<size_t>(m_transitions + state * STATE_ROW_SIZE + 1);
- }
-
- void SetTag(size_t state, size_t tag)
- {
+ m.initial = reinterpret_cast<size_t>(m_transitions + state * STATE_ROW_SIZE + 1);
+ }
+
+ void SetTag(size_t state, size_t tag)
+ {
Y_ASSERT(m_buffer);
- m_transitions[state * STATE_ROW_SIZE] = tag;
- }
-
-};
+ m_transitions[state * STATE_ROW_SIZE] = tag;
+ }
+
+};
inline SimpleScanner::SimpleScanner(Fsm& fsm, size_t distance)
-{
+{
if (distance) {
fsm = CreateApproxFsm(fsm, distance);
}
- fsm.Canonize();
+ fsm.Canonize();
- m.statesCount = fsm.Size();
+ m.statesCount = fsm.Size();
m_buffer = BufferType(new char[BufSize()]);
memset(m_buffer.Get(), 0, BufSize());
Markup(m_buffer.Get());
- m.initial = reinterpret_cast<size_t>(m_transitions + fsm.Initial() * STATE_ROW_SIZE + 1);
- for (size_t state = 0; state < fsm.Size(); ++state)
- SetTag(state, fsm.Tag(state) | (fsm.IsFinal(state) ? 1 : 0));
-
- for (size_t from = 0; from != fsm.Size(); ++from)
+ m.initial = reinterpret_cast<size_t>(m_transitions + fsm.Initial() * STATE_ROW_SIZE + 1);
+ for (size_t state = 0; state < fsm.Size(); ++state)
+ SetTag(state, fsm.Tag(state) | (fsm.IsFinal(state) ? 1 : 0));
+
+ for (size_t from = 0; from != fsm.Size(); ++from)
for (auto&& i : fsm.Letters()) {
const auto& tos = fsm.Destinations(from, i.first);
- if (tos.empty())
- continue;
+ if (tos.empty())
+ continue;
for (auto&& l : i.second.second)
for (auto&& to : tos)
SetJump(from, l, to);
- }
-}
-
+ }
+}
+
+
+}
-}
-
-#endif
+#endif
diff --git a/contrib/libs/pire/pire/scanners/slow.h b/contrib/libs/pire/pire/scanners/slow.h
index fa449bb1c5c..6adfcb8c1d0 100644
--- a/contrib/libs/pire/pire/scanners/slow.h
+++ b/contrib/libs/pire/pire/scanners/slow.h
@@ -1,29 +1,29 @@
-/*
- * slow.h -- definition of the SlowScanner
- *
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
+/*
+ * slow.h -- definition of the SlowScanner
*
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_SCANNERS_SLOW_H
-#define PIRE_SCANNERS_SLOW_H
-
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_SCANNERS_SLOW_H
+#define PIRE_SCANNERS_SLOW_H
+
#include <contrib/libs/pire/pire/approx_matching.h>
#include <contrib/libs/pire/pire/partition.h>
#include <contrib/libs/pire/pire/vbitset.h>
@@ -32,271 +32,271 @@
#include <contrib/libs/pire/pire/stub/saveload.h>
#include <contrib/libs/pire/pire/stub/stl.h>
-#include "common.h"
-
-#ifdef PIRE_DEBUG
-#include <iostream>
+#include "common.h"
+
+#ifdef PIRE_DEBUG
+#include <iostream>
#include <contrib/libs/pire/pire/stub/lexical_cast.h>
-#endif
-
-namespace Pire {
-
-/**
- * A 'slow' scanner.
- * Takes O( str.length() * this->m_states.size() ) time to scan string,
- * but does not require FSM to be deterministic.
- * Thus can be used to handle something sorta /x.{40}$/,
- * where deterministic FSM contains 2^40 states and hence cannot fit
- * in memory.
- */
-class SlowScanner {
-public:
- typedef size_t Transition;
- typedef ui16 Letter;
- typedef ui32 Action;
- typedef ui8 Tag;
-
+#endif
+
+namespace Pire {
+
+/**
+ * A 'slow' scanner.
+ * Takes O( str.length() * this->m_states.size() ) time to scan string,
+ * but does not require FSM to be deterministic.
+ * Thus can be used to handle something sorta /x.{40}$/,
+ * where deterministic FSM contains 2^40 states and hence cannot fit
+ * in memory.
+ */
+class SlowScanner {
+public:
+ typedef size_t Transition;
+ typedef ui16 Letter;
+ typedef ui32 Action;
+ typedef ui8 Tag;
+
enum {
- FinalFlag = 1,
- DeadFlag = 0
- };
-
- struct State {
+ FinalFlag = 1,
+ DeadFlag = 0
+ };
+
+ struct State {
TVector<unsigned> states;
- BitSet flags;
-
- State() {}
- State(size_t size): flags(size) { states.reserve(size); }
- void Swap(State& s) { states.swap(s.states); flags.Swap(s.flags); }
-
-#ifdef PIRE_DEBUG
- friend yostream& operator << (yostream& stream, const State& state) { return stream << Join(state.states.begin(), state.states.end(), ", "); }
-#endif
- };
-
+ BitSet flags;
+
+ State() {}
+ State(size_t size): flags(size) { states.reserve(size); }
+ void Swap(State& s) { states.swap(s.states); flags.Swap(s.flags); }
+
+#ifdef PIRE_DEBUG
+ friend yostream& operator << (yostream& stream, const State& state) { return stream << Join(state.states.begin(), state.states.end(), ", "); }
+#endif
+ };
+
SlowScanner(bool needActions = false) {
Alias(Null());
need_actions = needActions;
}
-
+
size_t GetLettersCount() const {return m.lettersCount; };
size_t Size() const { return GetSize(); }
size_t GetSize() const { return m.statesCount; }
- bool Empty() const { return m_finals == Null().m_finals; }
-
- size_t Id() const {return (size_t) -1;}
- size_t RegexpsCount() const { return Empty() ? 0 : 1; }
-
- void Initialize(State& state) const
- {
- state.states.clear();
- state.states.reserve(m.statesCount);
- state.states.push_back(m.start);
- BitSet(m.statesCount).Swap(state.flags);
- }
-
+ bool Empty() const { return m_finals == Null().m_finals; }
+
+ size_t Id() const {return (size_t) -1;}
+ size_t RegexpsCount() const { return Empty() ? 0 : 1; }
+
+ void Initialize(State& state) const
+ {
+ state.states.clear();
+ state.states.reserve(m.statesCount);
+ state.states.push_back(m.start);
+ BitSet(m.statesCount).Swap(state.flags);
+ }
+
Char Translate(Char ch) const
- {
+ {
return m_letters[static_cast<size_t>(ch)];
}
Action NextTranslated(const State& current, State& next, Char l) const
{
- next.flags.Clear();
- next.states.clear();
+ next.flags.Clear();
+ next.states.clear();
for (auto&& state : current.states) {
- const unsigned* begin = 0;
- const unsigned* end = 0;
- if (!m_vecptr) {
+ const unsigned* begin = 0;
+ const unsigned* end = 0;
+ if (!m_vecptr) {
const size_t* pos = m_jumpPos + state * m.lettersCount + l;
- begin = m_jumps + pos[0];
- end = m_jumps + pos[1];
- } else {
+ begin = m_jumps + pos[0];
+ end = m_jumps + pos[1];
+ } else {
const auto& v = (*m_vecptr)[state * m.lettersCount + l];
- if (!v.empty()) {
- begin = &v[0];
- end = &v[0] + v.size();
- }
- }
-
- for (; begin != end; ++begin)
- if (!next.flags.Test(*begin)) {
- next.flags.Set(*begin);
- next.states.push_back(*begin);
- }
- }
-
- return 0;
- }
-
+ if (!v.empty()) {
+ begin = &v[0];
+ end = &v[0] + v.size();
+ }
+ }
+
+ for (; begin != end; ++begin)
+ if (!next.flags.Test(*begin)) {
+ next.flags.Set(*begin);
+ next.states.push_back(*begin);
+ }
+ }
+
+ return 0;
+ }
+
Action Next(const State& current, State& next, Char c) const
{
return NextTranslated(current, next, Translate(c));
}
- bool TakeAction(State&, Action) const { return false; }
-
+ bool TakeAction(State&, Action) const { return false; }
+
Action NextTranslated(State& s, Char l) const
- {
- State dest(m.statesCount);
+ {
+ State dest(m.statesCount);
Action a = NextTranslated(s, dest, l);
- s.Swap(dest);
- return a;
- }
-
+ s.Swap(dest);
+ return a;
+ }
+
Action Next(State& s, Char c) const
{
return NextTranslated(s, Translate(c));
}
- bool Final(const State& s) const
- {
+ bool Final(const State& s) const
+ {
for (auto&& state : s.states)
if (m_finals[state])
- return true;
- return false;
- }
-
- bool Dead(const State&) const
- {
- return false;
- }
-
- ypair<const size_t*, const size_t*> AcceptedRegexps(const State& s) const {
- return Final(s) ? Accept() : Deny();
- }
-
- bool CanStop(const State& s) const {
- return Final(s);
- }
-
- const void* Mmap(const void* ptr, size_t size)
- {
- Impl::CheckAlign(ptr);
- SlowScanner s;
- const size_t* p = reinterpret_cast<const size_t*>(ptr);
-
+ return true;
+ return false;
+ }
+
+ bool Dead(const State&) const
+ {
+ return false;
+ }
+
+ ypair<const size_t*, const size_t*> AcceptedRegexps(const State& s) const {
+ return Final(s) ? Accept() : Deny();
+ }
+
+ bool CanStop(const State& s) const {
+ return Final(s);
+ }
+
+ const void* Mmap(const void* ptr, size_t size)
+ {
+ Impl::CheckAlign(ptr);
+ SlowScanner s;
+ const size_t* p = reinterpret_cast<const size_t*>(ptr);
+
Impl::ValidateHeader(p, size, ScannerIOTypes::SlowScanner, sizeof(s.m));
- Locals* locals;
- Impl::MapPtr(locals, 1, p, size);
- memcpy(&s.m, locals, sizeof(s.m));
-
- bool empty = *((const bool*) p);
- Impl::AdvancePtr(p, size, sizeof(empty));
- Impl::AlignPtr(p, size);
-
- if (empty)
- s.Alias(Null());
- else {
- s.m_vecptr = 0;
- Impl::MapPtr(s.m_letters, MaxChar, p, size);
- Impl::MapPtr(s.m_finals, s.m.statesCount, p, size);
- Impl::MapPtr(s.m_jumpPos, s.m.statesCount * s.m.lettersCount + 1, p, size);
- Impl::MapPtr(s.m_jumps, s.m_jumpPos[s.m.statesCount * s.m.lettersCount], p, size);
+ Locals* locals;
+ Impl::MapPtr(locals, 1, p, size);
+ memcpy(&s.m, locals, sizeof(s.m));
+
+ bool empty = *((const bool*) p);
+ Impl::AdvancePtr(p, size, sizeof(empty));
+ Impl::AlignPtr(p, size);
+
+ if (empty)
+ s.Alias(Null());
+ else {
+ s.m_vecptr = 0;
+ Impl::MapPtr(s.m_letters, MaxChar, p, size);
+ Impl::MapPtr(s.m_finals, s.m.statesCount, p, size);
+ Impl::MapPtr(s.m_jumpPos, s.m.statesCount * s.m.lettersCount + 1, p, size);
+ Impl::MapPtr(s.m_jumps, s.m_jumpPos[s.m.statesCount * s.m.lettersCount], p, size);
if (need_actions)
Impl::MapPtr(s.m_actions, s.m_jumpPos[s.m.statesCount * s.m.lettersCount], p, size);
- Swap(s);
- }
- return (const void*) p;
- }
-
- void Swap(SlowScanner& s)
- {
- DoSwap(m_finals, s.m_finals);
- DoSwap(m_jumps, s.m_jumps);
+ Swap(s);
+ }
+ return (const void*) p;
+ }
+
+ void Swap(SlowScanner& s)
+ {
+ DoSwap(m_finals, s.m_finals);
+ DoSwap(m_jumps, s.m_jumps);
DoSwap(m_actions, s.m_actions);
- DoSwap(m_jumpPos, s.m_jumpPos);
- DoSwap(m.statesCount, s.m.statesCount);
- DoSwap(m.lettersCount, s.m.lettersCount);
- DoSwap(m.start, s.m.start);
- DoSwap(m_letters, s.m_letters);
- DoSwap(m_pool, s.m_pool);
- DoSwap(m_vec, s.m_vec);
-
- DoSwap(m_vecptr, s.m_vecptr);
+ DoSwap(m_jumpPos, s.m_jumpPos);
+ DoSwap(m.statesCount, s.m.statesCount);
+ DoSwap(m.lettersCount, s.m.lettersCount);
+ DoSwap(m.start, s.m.start);
+ DoSwap(m_letters, s.m_letters);
+ DoSwap(m_pool, s.m_pool);
+ DoSwap(m_vec, s.m_vec);
+
+ DoSwap(m_vecptr, s.m_vecptr);
DoSwap(need_actions, s.need_actions);
DoSwap(m_actionsvec, s.m_actionsvec);
- if (m_vecptr == &s.m_vec)
- m_vecptr = &m_vec;
- if (s.m_vecptr == &m_vec)
- s.m_vecptr = &s.m_vec;
- }
-
- SlowScanner(const SlowScanner& s)
- : m(s.m)
- , m_vec(s.m_vec)
+ if (m_vecptr == &s.m_vec)
+ m_vecptr = &m_vec;
+ if (s.m_vecptr == &m_vec)
+ s.m_vecptr = &s.m_vec;
+ }
+
+ SlowScanner(const SlowScanner& s)
+ : m(s.m)
+ , m_vec(s.m_vec)
, need_actions(s.need_actions)
, m_actionsvec(s.m_actionsvec)
- {
- if (s.m_vec.empty()) {
- // Empty or mmap()-ed scanner, just copy pointers
- m_finals = s.m_finals;
- m_jumps = s.m_jumps;
+ {
+ if (s.m_vec.empty()) {
+ // Empty or mmap()-ed scanner, just copy pointers
+ m_finals = s.m_finals;
+ m_jumps = s.m_jumps;
m_actions = s.m_actions;
- m_jumpPos = s.m_jumpPos;
- m_letters = s.m_letters;
- m_vecptr = 0;
- } else {
- // In-memory scanner, perform deep copy
- alloc(m_letters, MaxChar);
- memcpy(m_letters, s.m_letters, sizeof(*m_letters) * MaxChar);
- m_jumps = 0;
- m_jumpPos = 0;
+ m_jumpPos = s.m_jumpPos;
+ m_letters = s.m_letters;
+ m_vecptr = 0;
+ } else {
+ // In-memory scanner, perform deep copy
+ alloc(m_letters, MaxChar);
+ memcpy(m_letters, s.m_letters, sizeof(*m_letters) * MaxChar);
+ m_jumps = 0;
+ m_jumpPos = 0;
m_actions = 0;
- alloc(m_finals, m.statesCount);
- memcpy(m_finals, s.m_finals, sizeof(*m_finals) * m.statesCount);
- m_vecptr = &m_vec;
- }
- }
+ alloc(m_finals, m.statesCount);
+ memcpy(m_finals, s.m_finals, sizeof(*m_finals) * m.statesCount);
+ m_vecptr = &m_vec;
+ }
+ }
explicit SlowScanner(Fsm& fsm, bool needActions = false, bool removeEpsilons = true, size_t distance = 0)
: need_actions(needActions)
- {
+ {
if (distance) {
fsm = CreateApproxFsm(fsm, distance);
}
if (removeEpsilons)
fsm.RemoveEpsilons();
fsm.Sparse(!removeEpsilons);
-
- m.statesCount = fsm.Size();
- m.lettersCount = fsm.Letters().Size();
-
- m_vec.resize(m.statesCount * m.lettersCount);
+
+ m.statesCount = fsm.Size();
+ m.lettersCount = fsm.Letters().Size();
+
+ m_vec.resize(m.statesCount * m.lettersCount);
if (need_actions)
m_actionsvec.resize(m.statesCount * m.lettersCount);
- m_vecptr = &m_vec;
- alloc(m_letters, MaxChar);
- m_jumps = 0;
+ m_vecptr = &m_vec;
+ alloc(m_letters, MaxChar);
+ m_jumps = 0;
m_actions = 0;
- m_jumpPos = 0;
- alloc(m_finals, m.statesCount);
-
- // Build letter translation table
+ m_jumpPos = 0;
+ alloc(m_finals, m.statesCount);
+
+ // Build letter translation table
Fill(m_letters, m_letters + MaxChar, 0);
for (auto&& letter : fsm.Letters())
for (auto&& character : letter.second.second)
m_letters[character] = letter.second.first;
-
- m.start = fsm.Initial();
- BuildScanner(fsm, *this);
- }
-
-
- SlowScanner& operator = (const SlowScanner& s) { SlowScanner(s).Swap(*this); return *this; }
-
- ~SlowScanner()
- {
+
+ m.start = fsm.Initial();
+ BuildScanner(fsm, *this);
+ }
+
+
+ SlowScanner& operator = (const SlowScanner& s) { SlowScanner(s).Swap(*this); return *this; }
+
+ ~SlowScanner()
+ {
for (auto&& i : m_pool)
free(i);
- }
-
- void Save(yostream*) const;
- void Load(yistream*);
-
- const State& StateIndex(const State& s) const { return s; }
-
+ }
+
+ void Save(yostream*) const;
+ void Load(yistream*);
+
+ const State& StateIndex(const State& s) const { return s; }
+
protected:
bool IsMmaped() const
{
@@ -338,84 +338,84 @@ protected:
return m_finals[pos];
}
-private:
-
- struct Locals {
- size_t statesCount;
- size_t lettersCount;
- size_t start;
- } m;
-
- bool* m_finals;
- unsigned* m_jumps;
+private:
+
+ struct Locals {
+ size_t statesCount;
+ size_t lettersCount;
+ size_t start;
+ } m;
+
+ bool* m_finals;
+ unsigned* m_jumps;
Action* m_actions;
- size_t* m_jumpPos;
- size_t* m_letters;
-
+ size_t* m_jumpPos;
+ size_t* m_letters;
+
TVector<void*> m_pool;
TVector< TVector<unsigned> > m_vec, *m_vecptr;
-
+
bool need_actions;
TVector<TVector<Action>> m_actionsvec;
static const SlowScanner& Null();
-
- template<class T> void alloc(T*& p, size_t size)
- {
- p = static_cast<T*>(malloc(size * sizeof(T)));
- memset(p, 0, size * sizeof(T));
- m_pool.push_back(p);
- }
-
- void Alias(const SlowScanner& s)
+
+ template<class T> void alloc(T*& p, size_t size)
+ {
+ p = static_cast<T*>(malloc(size * sizeof(T)));
+ memset(p, 0, size * sizeof(T));
+ m_pool.push_back(p);
+ }
+
+ void Alias(const SlowScanner& s)
{
- memcpy(&m, &s.m, sizeof(m));
- m_vec.clear();
+ memcpy(&m, &s.m, sizeof(m));
+ m_vec.clear();
need_actions = s.need_actions;
m_actionsvec.clear();
- m_finals = s.m_finals;
- m_jumps = s.m_jumps;
+ m_finals = s.m_finals;
+ m_jumps = s.m_jumps;
m_actions = s.m_actions;
- m_jumpPos = s.m_jumpPos;
- m_letters = s.m_letters;
- m_vecptr = s.m_vecptr;
- m_pool.clear();
- }
+ m_jumpPos = s.m_jumpPos;
+ m_letters = s.m_letters;
+ m_vecptr = s.m_vecptr;
+ m_pool.clear();
+ }
void SetJump(size_t oldState, Char c, size_t newState, unsigned long action)
- {
+ {
Y_ASSERT(!m_vec.empty());
Y_ASSERT(oldState < m.statesCount);
Y_ASSERT(newState < m.statesCount);
-
- size_t idx = oldState * m.lettersCount + m_letters[c];
- m_vec[idx].push_back(newState);
+
+ size_t idx = oldState * m.lettersCount + m_letters[c];
+ m_vec[idx].push_back(newState);
if (need_actions)
m_actionsvec[idx].push_back(action);
- }
-
- unsigned long RemapAction(unsigned long action) { return action; }
-
- void SetInitial(size_t state) { m.start = state; }
- void SetTag(size_t state, ui8 tag) { m_finals[state] = (tag != 0); }
-
- void FinishBuild() {}
-
- static ypair<const size_t*, const size_t*> Accept()
- {
- static size_t v[1] = { 0 };
-
- return ymake_pair(v, v + 1);
- }
-
- static ypair<const size_t*, const size_t*> Deny()
- {
- static size_t v[1] = { 0 };
- return ymake_pair(v, v);
- }
-
- friend void BuildScanner<SlowScanner>(const Fsm&, SlowScanner&);
-};
-
+ }
+
+ unsigned long RemapAction(unsigned long action) { return action; }
+
+ void SetInitial(size_t state) { m.start = state; }
+ void SetTag(size_t state, ui8 tag) { m_finals[state] = (tag != 0); }
+
+ void FinishBuild() {}
+
+ static ypair<const size_t*, const size_t*> Accept()
+ {
+ static size_t v[1] = { 0 };
+
+ return ymake_pair(v, v + 1);
+ }
+
+ static ypair<const size_t*, const size_t*> Deny()
+ {
+ static size_t v[1] = { 0 };
+ return ymake_pair(v, v);
+ }
+
+ friend void BuildScanner<SlowScanner>(const Fsm&, SlowScanner&);
+};
+
template<>
inline SlowScanner Fsm::Compile(size_t distance) {
return SlowScanner(*this, false, true, distance);
@@ -428,27 +428,27 @@ inline const SlowScanner& SlowScanner::Null()
}
#ifndef PIRE_DEBUG
-/// A specialization of Run(), since its state is much heavier than other ones
-/// and we thus want to avoid copying states.
-template<>
+/// A specialization of Run(), since its state is much heavier than other ones
+/// and we thus want to avoid copying states.
+template<>
inline void Run<SlowScanner>(const SlowScanner& scanner, SlowScanner::State& state, TStringBuf str)
-{
- SlowScanner::State temp;
- scanner.Initialize(temp);
-
- SlowScanner::State* src = &state;
- SlowScanner::State* dest = &temp;
-
+{
+ SlowScanner::State temp;
+ scanner.Initialize(temp);
+
+ SlowScanner::State* src = &state;
+ SlowScanner::State* dest = &temp;
+
for (auto it = str.begin(); it != str.end(); ++it) {
scanner.Next(*src, *dest, static_cast<unsigned char>(*it));
- DoSwap(src, dest);
- }
- if (src != &state)
- state = *src;
-}
-#endif
-
-}
-
-
-#endif
+ DoSwap(src, dest);
+ }
+ if (src != &state)
+ state = *src;
+}
+#endif
+
+}
+
+
+#endif
diff --git a/contrib/libs/pire/pire/static_assert.h b/contrib/libs/pire/pire/static_assert.h
index f56a899ae7a..90dd0ff4f01 100644
--- a/contrib/libs/pire/pire/static_assert.h
+++ b/contrib/libs/pire/pire/static_assert.h
@@ -1,36 +1,36 @@
-/*
- * static_assert.h -- compile-time assertions
+/*
+ * static_assert.h -- compile-time assertions
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-#ifndef PIRE_ASSERT_H_INCLUDED
-#define PIRE_ASSERT_H_INCLUDED
-
-namespace Pire { namespace Impl {
-
- // A static (compile-tile) assertion.
- // The idea was shamelessly borrowed from Boost.
- template<bool x> struct StaticAssertion;
- template<> struct StaticAssertion<true> {};
-#define PIRE_STATIC_ASSERT(x) \
- enum { PireStaticAssertion ## __LINE__ = sizeof(Pire::Impl::StaticAssertion<(bool) (x)>) }
-}}
-
-#endif
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+#ifndef PIRE_ASSERT_H_INCLUDED
+#define PIRE_ASSERT_H_INCLUDED
+
+namespace Pire { namespace Impl {
+
+ // A static (compile-tile) assertion.
+ // The idea was shamelessly borrowed from Boost.
+ template<bool x> struct StaticAssertion;
+ template<> struct StaticAssertion<true> {};
+#define PIRE_STATIC_ASSERT(x) \
+ enum { PireStaticAssertion ## __LINE__ = sizeof(Pire::Impl::StaticAssertion<(bool) (x)>) }
+}}
+
+#endif
diff --git a/contrib/libs/pire/pire/stub/defaults.h b/contrib/libs/pire/pire/stub/defaults.h
index 50900a8731e..561c23251b8 100644
--- a/contrib/libs/pire/pire/stub/defaults.h
+++ b/contrib/libs/pire/pire/stub/defaults.h
@@ -1,3 +1,3 @@
-#include <util/system/defaults.h>
-#include <errno.h>
-
+#include <util/system/defaults.h>
+#include <errno.h>
+
diff --git a/contrib/libs/pire/pire/stub/hacks.h b/contrib/libs/pire/pire/stub/hacks.h
index 6172a0ee2af..07319b0b374 100644
--- a/contrib/libs/pire/pire/stub/hacks.h
+++ b/contrib/libs/pire/pire/stub/hacks.h
@@ -1,7 +1,7 @@
-#ifndef PIRE_STUB_USE_PIRE_H_INCLUDED
-#define PIRE_STUB_USE_PIRE_H_INCLUDED
-
-namespace Pire {}
-using namespace Pire;
-
-#endif
+#ifndef PIRE_STUB_USE_PIRE_H_INCLUDED
+#define PIRE_STUB_USE_PIRE_H_INCLUDED
+
+namespace Pire {}
+using namespace Pire;
+
+#endif
diff --git a/contrib/libs/pire/pire/stub/lexical_cast.h b/contrib/libs/pire/pire/stub/lexical_cast.h
index 68d6dc92ae8..a060c4dddbf 100644
--- a/contrib/libs/pire/pire/stub/lexical_cast.h
+++ b/contrib/libs/pire/pire/stub/lexical_cast.h
@@ -1 +1 @@
-#include <util/string/cast.h>
+#include <util/string/cast.h>
diff --git a/contrib/libs/pire/pire/stub/memstreams.h b/contrib/libs/pire/pire/stub/memstreams.h
index 5cfd9a7896e..92c75ca6c9e 100644
--- a/contrib/libs/pire/pire/stub/memstreams.h
+++ b/contrib/libs/pire/pire/stub/memstreams.h
@@ -1,11 +1,11 @@
-#include <util/stream/mem.h>
-#include <util/stream/aligned.h>
-#include <util/stream/buffer.h>
-#include <util/generic/buffer.h>
-
-namespace Pire {
- typedef TBuffer Buffer;
- typedef TBuffer::TIterator BufferIterator;
- typedef TBufferOutput BufferOutput;
- typedef TAlignedOutput AlignedOutput;
+#include <util/stream/mem.h>
+#include <util/stream/aligned.h>
+#include <util/stream/buffer.h>
+#include <util/generic/buffer.h>
+
+namespace Pire {
+ typedef TBuffer Buffer;
+ typedef TBuffer::TIterator BufferIterator;
+ typedef TBufferOutput BufferOutput;
+ typedef TAlignedOutput AlignedOutput;
}
diff --git a/contrib/libs/pire/pire/stub/noncopyable.h b/contrib/libs/pire/pire/stub/noncopyable.h
index 1791f43638a..ab18546e518 100644
--- a/contrib/libs/pire/pire/stub/noncopyable.h
+++ b/contrib/libs/pire/pire/stub/noncopyable.h
@@ -1,5 +1,5 @@
-#pragma once
-#include <util/generic/noncopyable.h>
-namespace Pire {
- typedef TNonCopyable NonCopyable;
-}
+#pragma once
+#include <util/generic/noncopyable.h>
+namespace Pire {
+ typedef TNonCopyable NonCopyable;
+}
diff --git a/contrib/libs/pire/pire/stub/saveload.h b/contrib/libs/pire/pire/stub/saveload.h
index 6808c7a4003..97768ff463a 100644
--- a/contrib/libs/pire/pire/stub/saveload.h
+++ b/contrib/libs/pire/pire/stub/saveload.h
@@ -1,2 +1,2 @@
-#pragma once
-#include <util/ysaveload.h>
+#pragma once
+#include <util/ysaveload.h>
diff --git a/contrib/libs/pire/pire/stub/singleton.h b/contrib/libs/pire/pire/stub/singleton.h
index 193817f1007..f24e9244607 100644
--- a/contrib/libs/pire/pire/stub/singleton.h
+++ b/contrib/libs/pire/pire/stub/singleton.h
@@ -1,8 +1,8 @@
-#pragma once
-#include <util/generic/singleton.h>
-namespace Pire {
- template<class T>
- const T& DefaultValue() {
- return Default<T>();
- }
-}
+#pragma once
+#include <util/generic/singleton.h>
+namespace Pire {
+ template<class T>
+ const T& DefaultValue() {
+ return Default<T>();
+ }
+}
diff --git a/contrib/libs/pire/pire/stub/stl.h b/contrib/libs/pire/pire/stub/stl.h
index 705981a7e67..98ebd9f7c6f 100644
--- a/contrib/libs/pire/pire/stub/stl.h
+++ b/contrib/libs/pire/pire/stub/stl.h
@@ -1,66 +1,66 @@
-#ifndef PIRE_COMPAT_H_INCLUDED
-#define PIRE_COMPAT_H_INCLUDED
-
-#include <bitset>
-#include <algorithm>
-#include <iterator>
-#include <functional>
-#include <utility>
-#include <memory>
-
+#ifndef PIRE_COMPAT_H_INCLUDED
+#define PIRE_COMPAT_H_INCLUDED
+
+#include <bitset>
+#include <algorithm>
+#include <iterator>
+#include <functional>
+#include <utility>
+#include <memory>
+
#include <util/generic/string.h>
-#include <util/generic/vector.h>
-#include <util/generic/deque.h>
-#include <util/generic/list.h>
-#include <util/generic/map.h>
-#include <util/generic/set.h>
-#include <util/generic/hash.h>
-#include <util/generic/hash_set.h>
-#include <util/generic/ptr.h>
-#include <util/generic/yexception.h>
-#include <util/generic/utility.h>
-#include <util/generic/algorithm.h>
-#include <util/stream/input.h>
-#include <util/stream/output.h>
+#include <util/generic/vector.h>
+#include <util/generic/deque.h>
+#include <util/generic/list.h>
+#include <util/generic/map.h>
+#include <util/generic/set.h>
+#include <util/generic/hash.h>
+#include <util/generic/hash_set.h>
+#include <util/generic/ptr.h>
+#include <util/generic/yexception.h>
+#include <util/generic/utility.h>
+#include <util/generic/algorithm.h>
+#include <util/stream/input.h>
+#include <util/stream/output.h>
#include <util/string/reverse.h>
-#include <util/string/vector.h>
-
+#include <util/string/vector.h>
+
namespace Pire {
using ystring = TString;
template<size_t N> using ybitset = std::bitset<N>;
template<typename T1, typename T2> using ypair = std::pair<T1, T2>;
template<typename T> using yauto_ptr = std::auto_ptr<T>;
template<typename Arg1, typename Arg2, typename Result> using ybinary_function = std::binary_function<Arg1, Arg2, Result>;
-
+
template<typename T1, typename T2>
inline ypair<T1, T2> ymake_pair(T1 v1, T2 v2) {
return std::make_pair(v1, v2);
}
-
+
template<typename T>
inline T ymax(T v1, T v2) {
return std::max(v1, v2);
}
-
+
template<typename T>
inline T ymin(T v1, T v2) {
return std::min(v1, v2);
}
-
+
template<class Iter, class T>
void Fill(Iter begin, Iter end, T t) { std::fill(begin, end, t); }
-
+
class Error: public yexception {
public:
Error(const char* msg) { *this << msg; }
Error(const ystring& msg) { *this << msg; }
};
-
+
typedef IOutputStream yostream;
typedef IInputStream yistream;
template<class Iter>
ystring Join(Iter begin, Iter end, const ystring& separator) { return JoinStrings(begin, end, separator); }
-}
-
-#endif
+}
+
+#endif
diff --git a/contrib/libs/pire/pire/stub/utf8.h b/contrib/libs/pire/pire/stub/utf8.h
index 189520d2cb0..51ea0479d4a 100644
--- a/contrib/libs/pire/pire/stub/utf8.h
+++ b/contrib/libs/pire/pire/stub/utf8.h
@@ -1,7 +1,7 @@
#pragma once
#include <library/cpp/charset/codepage.h>
-#include <util/charset/unidata.h>
+#include <util/charset/unidata.h>
inline wchar32 to_lower(wchar32 c) {
return ToLower(c);
diff --git a/contrib/libs/pire/pire/vbitset.h b/contrib/libs/pire/pire/vbitset.h
index 904c27d1cba..69cb5aeba3d 100644
--- a/contrib/libs/pire/pire/vbitset.h
+++ b/contrib/libs/pire/pire/vbitset.h
@@ -1,120 +1,120 @@
-/*
- * vbitset.h -- a bitset of variable size.
+/*
+ * vbitset.h -- a bitset of variable size.
+ *
+ * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
+ * Alexander Gololobov <agololobov@gmail.com>
+ *
+ * This file is part of Pire, the Perl Incompatible
+ * Regular Expressions library.
+ *
+ * Pire is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
*
- * Copyright (c) 2007-2010, Dmitry Prokoptsev <dprokoptsev@gmail.com>,
- * Alexander Gololobov <agololobov@gmail.com>
- *
- * This file is part of Pire, the Perl Incompatible
- * Regular Expressions library.
- *
- * Pire is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Pire is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser Public License for more details.
- * You should have received a copy of the GNU Lesser Public License
- * along with Pire. If not, see <http://www.gnu.org/licenses>.
- */
-
-
-#ifndef PIRE_VBITSET_H
-#define PIRE_VBITSET_H
-
-
-#include <string.h>
-
+ * Pire is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser Public License for more details.
+ * You should have received a copy of the GNU Lesser Public License
+ * along with Pire. If not, see <http://www.gnu.org/licenses>.
+ */
+
+
+#ifndef PIRE_VBITSET_H
+#define PIRE_VBITSET_H
+
+
+#include <string.h>
+
#include <contrib/libs/pire/pire/stub/stl.h>
-namespace Pire {
-
-#ifdef _DEBUG
-#define VBITSET_CHECK_SIZE(x) CheckSize(x)
-#else
-#define VBITSET_CHECK_SIZE(x) x
-#endif
-
-/// A bitset with variable width
-class BitSet {
-public:
- typedef size_t value_type;
- typedef size_t* pointer;
- typedef size_t& reference;
- typedef const size_t& const_reference;
-
- class const_iterator;
-
- BitSet()
- : m_data(1, 1)
- {
- }
- BitSet(size_t size)
- : m_data(RoundUp(size + 1) + 1)
- , m_size(size)
- {
- m_data[RoundDown(size)] |= (1U << Remainder(size));
- }
-
- void Swap(BitSet& s)
- {
- m_data.swap(s.m_data);
- DoSwap(m_size, s.m_size);
- }
-
- /// Sets the specified bit to 1.
- void Set(size_t pos) {
- m_data[RoundDown(VBITSET_CHECK_SIZE(pos))] |= (1U << Remainder(pos));
- }
-
- /// Resets the specified bit to 0.
- void Reset(size_t pos) {
- m_data[RoundDown(VBITSET_CHECK_SIZE(pos))] &= ~(1U << Remainder(pos));
- }
-
- /// Checks whether the specified bit is set to 1.
- bool Test(size_t pos) const {
- return (m_data[RoundDown(VBITSET_CHECK_SIZE(pos))] & (1U << Remainder(pos))) != 0;
- }
-
- size_t Size() const {
- return m_size;
- }
-
- void Resize(size_t newsize)
- {
- m_data.resize(RoundUp(newsize + 1));
- if (Remainder(newsize) && !m_data.empty())
- m_data[m_data.size() - 1] &= ((1U << Remainder(newsize)) - 1); // Clear tail
- m_data[RoundDown(newsize)] |= (1U << Remainder(newsize));
- }
-
- /// Resets all bits to 0.
- void Clear() { memset(&m_data[0], 0, m_data.size() * sizeof(ContainerType)); }
-
-private:
- typedef unsigned char ContainerType;
- static const size_t ItemSize = sizeof(ContainerType) * 8;
+namespace Pire {
+
+#ifdef _DEBUG
+#define VBITSET_CHECK_SIZE(x) CheckSize(x)
+#else
+#define VBITSET_CHECK_SIZE(x) x
+#endif
+
+/// A bitset with variable width
+class BitSet {
+public:
+ typedef size_t value_type;
+ typedef size_t* pointer;
+ typedef size_t& reference;
+ typedef const size_t& const_reference;
+
+ class const_iterator;
+
+ BitSet()
+ : m_data(1, 1)
+ {
+ }
+ BitSet(size_t size)
+ : m_data(RoundUp(size + 1) + 1)
+ , m_size(size)
+ {
+ m_data[RoundDown(size)] |= (1U << Remainder(size));
+ }
+
+ void Swap(BitSet& s)
+ {
+ m_data.swap(s.m_data);
+ DoSwap(m_size, s.m_size);
+ }
+
+ /// Sets the specified bit to 1.
+ void Set(size_t pos) {
+ m_data[RoundDown(VBITSET_CHECK_SIZE(pos))] |= (1U << Remainder(pos));
+ }
+
+ /// Resets the specified bit to 0.
+ void Reset(size_t pos) {
+ m_data[RoundDown(VBITSET_CHECK_SIZE(pos))] &= ~(1U << Remainder(pos));
+ }
+
+ /// Checks whether the specified bit is set to 1.
+ bool Test(size_t pos) const {
+ return (m_data[RoundDown(VBITSET_CHECK_SIZE(pos))] & (1U << Remainder(pos))) != 0;
+ }
+
+ size_t Size() const {
+ return m_size;
+ }
+
+ void Resize(size_t newsize)
+ {
+ m_data.resize(RoundUp(newsize + 1));
+ if (Remainder(newsize) && !m_data.empty())
+ m_data[m_data.size() - 1] &= ((1U << Remainder(newsize)) - 1); // Clear tail
+ m_data[RoundDown(newsize)] |= (1U << Remainder(newsize));
+ }
+
+ /// Resets all bits to 0.
+ void Clear() { memset(&m_data[0], 0, m_data.size() * sizeof(ContainerType)); }
+
+private:
+ typedef unsigned char ContainerType;
+ static const size_t ItemSize = sizeof(ContainerType) * 8;
TVector<ContainerType> m_data;
- size_t m_size;
-
- static size_t RoundUp(size_t x) { return x / ItemSize + ((x % ItemSize) ? 1 : 0); }
- static size_t RoundDown(size_t x) { return x / ItemSize; }
- static size_t Remainder(size_t x) { return x % ItemSize; }
-
-#ifdef _DEBUG
- size_t CheckSize(size_t size) const
- {
- if (size < m_size)
- return size;
- else
- throw Error("BitSet: subscript out of range");
- }
-#endif
-};
-
-}
-
-#endif
+ size_t m_size;
+
+ static size_t RoundUp(size_t x) { return x / ItemSize + ((x % ItemSize) ? 1 : 0); }
+ static size_t RoundDown(size_t x) { return x / ItemSize; }
+ static size_t Remainder(size_t x) { return x % ItemSize; }
+
+#ifdef _DEBUG
+ size_t CheckSize(size_t size) const
+ {
+ if (size < m_size)
+ return size;
+ else
+ throw Error("BitSet: subscript out of range");
+ }
+#endif
+};
+
+}
+
+#endif
diff --git a/library/cpp/actors/core/actor.h b/library/cpp/actors/core/actor.h
index 7eabe6fbd1a..ed29bd14b9e 100644
--- a/library/cpp/actors/core/actor.h
+++ b/library/cpp/actors/core/actor.h
@@ -218,7 +218,7 @@ namespace NActors {
TActorIdentity SelfActorId;
i64 ElapsedTicks;
ui64 HandledEvents;
-
+
friend void DoActorInit(TActorSystem*, IActor*, const TActorId&, const TActorId&);
friend class TDecorator;
@@ -235,10 +235,10 @@ namespace NActors {
INTERCONNECT_COMMON = 171,
SELF_PING_ACTOR = 207,
TEST_ACTOR_RUNTIME = 283,
- INTERCONNECT_HANDSHAKE = 284,
- INTERCONNECT_POLLER = 285,
- INTERCONNECT_SESSION_KILLER = 286,
- ACTOR_SYSTEM_SCHEDULER_ACTOR = 312,
+ INTERCONNECT_HANDSHAKE = 284,
+ INTERCONNECT_POLLER = 285,
+ INTERCONNECT_SESSION_KILLER = 286,
+ ACTOR_SYSTEM_SCHEDULER_ACTOR = 312,
ACTOR_FUTURE_CALLBACK = 337,
INTERCONNECT_MONACTOR = 362,
INTERCONNECT_LOAD_ACTOR = 376,
@@ -418,10 +418,10 @@ namespace NActors {
}
protected:
- //* Comment this function to find unmarked activities
+ //* Comment this function to find unmarked activities
static constexpr IActor::EActivityType ActorActivityType() {
return EActorActivity::OTHER;
- } //*/
+ } //*/
// static constexpr char ActorName[] = "UNNAMED";
diff --git a/library/cpp/actors/core/actorsystem.h b/library/cpp/actors/core/actorsystem.h
index 242794ac6f4..40499d7586f 100644
--- a/library/cpp/actors/core/actorsystem.h
+++ b/library/cpp/actors/core/actorsystem.h
@@ -8,7 +8,7 @@
#include "event.h"
#include "log_settings.h"
#include "scheduler_cookie.h"
-#include "mon_stats.h"
+#include "mon_stats.h"
#include <library/cpp/threading/future/future.h>
#include <library/cpp/actors/util/ticket_lock.h>
@@ -58,7 +58,7 @@ namespace NActors {
, DestroyedActors(0)
{
}
-
+
virtual ~IExecutorPool() {
}
@@ -348,7 +348,7 @@ namespace NActors {
T* AppData() const {
return (T*)AppData0;
}
-
+
NLog::TSettings* LoggerSettings() const {
return LoggerSettings0.Get();
}
diff --git a/library/cpp/actors/core/defs.h b/library/cpp/actors/core/defs.h
index 6c50ab677c4..980b7d767bc 100644
--- a/library/cpp/actors/core/defs.h
+++ b/library/cpp/actors/core/defs.h
@@ -6,12 +6,12 @@
#include <util/generic/hash.h>
#include <util/string/printf.h>
-// Enables collection of
-// event send/receive counts
-// activation time histograms
-// event processing time histograms
-#define ACTORSLIB_COLLECT_EXEC_STATS
-
+// Enables collection of
+// event send/receive counts
+// activation time histograms
+// event processing time histograms
+#define ACTORSLIB_COLLECT_EXEC_STATS
+
namespace NActors {
using TPoolId = ui8;
using TPoolsMask = ui64;
diff --git a/library/cpp/actors/core/event.h b/library/cpp/actors/core/event.h
index 9d2b694cb29..6ff02aaf943 100644
--- a/library/cpp/actors/core/event.h
+++ b/library/cpp/actors/core/event.h
@@ -7,9 +7,9 @@
#include <library/cpp/actors/wilson/wilson_trace.h>
-#include <util/system/hp_timer.h>
+#include <util/system/hp_timer.h>
#include <util/generic/maybe.h>
-
+
namespace NActors {
class TChunkSerializer;
@@ -110,10 +110,10 @@ namespace NActors {
// filled if feeded by interconnect session
const TActorId InterconnectSession;
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
::NHPTimer::STime SendTime;
-#endif
-
+#endif
+
static const size_t ChannelBits = 12;
static const size_t ChannelShift = (sizeof(ui32) << 3) - ChannelBits;
@@ -174,9 +174,9 @@ namespace NActors {
, Sender(sender)
, Cookie(cookie)
, TraceId(std::move(traceId))
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
, SendTime(0)
-#endif
+#endif
, Event(ev)
, RewriteRecipient(Recipient)
, RewriteType(Type)
@@ -199,9 +199,9 @@ namespace NActors {
, Sender(sender)
, Cookie(cookie)
, TraceId(std::move(traceId))
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
, SendTime(0)
-#endif
+#endif
, Buffer(std::move(buffer))
, RewriteRecipient(Recipient)
, RewriteType(Type)
@@ -228,9 +228,9 @@ namespace NActors {
, OriginScopeId(originScopeId)
, TraceId(std::move(traceId))
, InterconnectSession(session)
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
, SendTime(0)
-#endif
+#endif
, Buffer(std::move(buffer))
, RewriteRecipient(Recipient)
, RewriteType(Type)
diff --git a/library/cpp/actors/core/event_pb.h b/library/cpp/actors/core/event_pb.h
index d7ff4a3cbed..d7546b901a0 100644
--- a/library/cpp/actors/core/event_pb.h
+++ b/library/cpp/actors/core/event_pb.h
@@ -127,27 +127,27 @@ namespace NActors {
static const size_t EventMaxByteSize = 67108000;
#endif
- template <typename TEv, typename TRecord /*protobuf record*/, ui32 TEventType, typename TRecHolder>
- class TEventPBBase: public TEventBase<TEv, TEventType> , public TRecHolder {
+ template <typename TEv, typename TRecord /*protobuf record*/, ui32 TEventType, typename TRecHolder>
+ class TEventPBBase: public TEventBase<TEv, TEventType> , public TRecHolder {
// a vector of data buffers referenced by record; if filled, then extended serialization mechanism applies
TVector<TRope> Payload;
public:
- using TRecHolder::Record;
-
- public:
+ using TRecHolder::Record;
+
+ public:
using ProtoRecordType = TRecord;
- TEventPBBase() = default;
+ TEventPBBase() = default;
- explicit TEventPBBase(const TRecord& rec)
+ explicit TEventPBBase(const TRecord& rec)
{
- Record = rec;
+ Record = rec;
}
- explicit TEventPBBase(TRecord&& rec)
+ explicit TEventPBBase(TRecord&& rec)
{
- Record = std::move(rec);
+ Record = std::move(rec);
}
TString ToStringHeader() const override {
@@ -231,7 +231,7 @@ namespace NActors {
}
static IEventBase* Load(TIntrusivePtr<TEventSerializedData> input) {
- THolder<TEventPBBase> ev(new TEv());
+ THolder<TEventPBBase> ev(new TEv());
if (!input->GetSize()) {
Y_PROTOBUF_SUPPRESS_NODISCARD ev->Record.ParseFromString(TString());
} else {
@@ -273,7 +273,7 @@ namespace NActors {
}
ev->CachedByteSize = input->GetSize();
return ev.Release();
- }
+ }
size_t GetCachedByteSize() const {
if (CachedByteSize == 0) {
@@ -369,43 +369,43 @@ namespace NActors {
}
};
- // Protobuf record not using arena
- template <typename TRecord>
- struct TRecordHolder {
- TRecord Record;
- };
-
- // Protobuf arena and a record allocated on it
- template <typename TRecord, size_t InitialBlockSize, size_t MaxBlockSize>
- struct TArenaRecordHolder {
- google::protobuf::Arena PbArena;
- TRecord& Record;
-
- static const google::protobuf::ArenaOptions GetArenaOptions() {
- google::protobuf::ArenaOptions opts;
- opts.initial_block_size = InitialBlockSize;
- opts.max_block_size = MaxBlockSize;
- return opts;
- }
-
- TArenaRecordHolder()
- : PbArena(GetArenaOptions())
- , Record(*google::protobuf::Arena::CreateMessage<TRecord>(&PbArena))
- {}
- };
-
+ // Protobuf record not using arena
+ template <typename TRecord>
+ struct TRecordHolder {
+ TRecord Record;
+ };
+
+ // Protobuf arena and a record allocated on it
+ template <typename TRecord, size_t InitialBlockSize, size_t MaxBlockSize>
+ struct TArenaRecordHolder {
+ google::protobuf::Arena PbArena;
+ TRecord& Record;
+
+ static const google::protobuf::ArenaOptions GetArenaOptions() {
+ google::protobuf::ArenaOptions opts;
+ opts.initial_block_size = InitialBlockSize;
+ opts.max_block_size = MaxBlockSize;
+ return opts;
+ }
+
+ TArenaRecordHolder()
+ : PbArena(GetArenaOptions())
+ , Record(*google::protobuf::Arena::CreateMessage<TRecord>(&PbArena))
+ {}
+ };
+
+ template <typename TEv, typename TRecord, ui32 TEventType>
+ class TEventPB : public TEventPBBase<TEv, TRecord, TEventType, TRecordHolder<TRecord> > {
+ typedef TEventPBBase<TEv, TRecord, TEventType, TRecordHolder<TRecord> > TPbBase;
+ // NOTE: No extra fields allowed: TEventPB must be a "template typedef"
+ public:
+ using TPbBase::TPbBase;
+ };
+
+ template <typename TEv, typename TRecord, ui32 TEventType, size_t InitialBlockSize = 512, size_t MaxBlockSize = 16*1024>
+ using TEventPBWithArena = TEventPBBase<TEv, TRecord, TEventType, TArenaRecordHolder<TRecord, InitialBlockSize, MaxBlockSize> >;
+
template <typename TEv, typename TRecord, ui32 TEventType>
- class TEventPB : public TEventPBBase<TEv, TRecord, TEventType, TRecordHolder<TRecord> > {
- typedef TEventPBBase<TEv, TRecord, TEventType, TRecordHolder<TRecord> > TPbBase;
- // NOTE: No extra fields allowed: TEventPB must be a "template typedef"
- public:
- using TPbBase::TPbBase;
- };
-
- template <typename TEv, typename TRecord, ui32 TEventType, size_t InitialBlockSize = 512, size_t MaxBlockSize = 16*1024>
- using TEventPBWithArena = TEventPBBase<TEv, TRecord, TEventType, TArenaRecordHolder<TRecord, InitialBlockSize, MaxBlockSize> >;
-
- template <typename TEv, typename TRecord, ui32 TEventType>
class TEventShortDebugPB: public TEventPB<TEv, TRecord, TEventType> {
public:
using TBase = TEventPB<TEv, TRecord, TEventType>;
@@ -455,7 +455,7 @@ namespace NActors {
base.Swap(&copy);
PreSerializedData.clear();
}
- return TBase::Record;
+ return TBase::Record;
}
const TRecord& GetRecord() const {
@@ -463,7 +463,7 @@ namespace NActors {
}
TRecord* MutableRecord() {
- GetRecord(); // Make sure PreSerializedData is parsed
+ GetRecord(); // Make sure PreSerializedData is parsed
return &(TBase::Record);
}
diff --git a/library/cpp/actors/core/event_pb_payload_ut.cpp b/library/cpp/actors/core/event_pb_payload_ut.cpp
index 4dcc958bb17..eab007bc15d 100644
--- a/library/cpp/actors/core/event_pb_payload_ut.cpp
+++ b/library/cpp/actors/core/event_pb_payload_ut.cpp
@@ -8,7 +8,7 @@ using namespace NActors;
enum {
EvMessageWithPayload = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvArenaMessage,
+ EvArenaMessage,
EvArenaMessageBig,
EvMessageWithPayloadPreSerialized
};
@@ -38,81 +38,81 @@ TString MakeString(size_t len) {
Y_UNIT_TEST_SUITE(TEventProtoWithPayload) {
- template <class TEventFrom, class TEventTo>
- void TestSerializeDeserialize(size_t size1, size_t size2) {
- static_assert(TEventFrom::EventType == TEventTo::EventType, "Must be same event type");
+ template <class TEventFrom, class TEventTo>
+ void TestSerializeDeserialize(size_t size1, size_t size2) {
+ static_assert(TEventFrom::EventType == TEventTo::EventType, "Must be same event type");
- TEventFrom msg;
- msg.Record.SetMeta("hello, world!");
- msg.Record.AddPayloadId(msg.AddPayload(MakeStringRope(MakeString(size1))));
- msg.Record.AddPayloadId(msg.AddPayload(MakeStringRope(MakeString(size2))));
- msg.Record.AddSomeData(MakeString((size1 + size2) % 50 + 11));
+ TEventFrom msg;
+ msg.Record.SetMeta("hello, world!");
+ msg.Record.AddPayloadId(msg.AddPayload(MakeStringRope(MakeString(size1))));
+ msg.Record.AddPayloadId(msg.AddPayload(MakeStringRope(MakeString(size2))));
+ msg.Record.AddSomeData(MakeString((size1 + size2) % 50 + 11));
- auto serializer = MakeHolder<TAllocChunkSerializer>();
+ auto serializer = MakeHolder<TAllocChunkSerializer>();
msg.SerializeToArcadiaStream(serializer.Get());
- auto buffers = serializer->Release(msg.IsExtendedFormat());
- UNIT_ASSERT_VALUES_EQUAL(buffers->GetSize(), msg.CalculateSerializedSize());
- TString ser = buffers->GetString();
-
- TString chunkerRes;
- TCoroutineChunkSerializer chunker;
- chunker.SetSerializingEvent(&msg);
- while (!chunker.IsComplete()) {
- char buffer[4096];
+ auto buffers = serializer->Release(msg.IsExtendedFormat());
+ UNIT_ASSERT_VALUES_EQUAL(buffers->GetSize(), msg.CalculateSerializedSize());
+ TString ser = buffers->GetString();
+
+ TString chunkerRes;
+ TCoroutineChunkSerializer chunker;
+ chunker.SetSerializingEvent(&msg);
+ while (!chunker.IsComplete()) {
+ char buffer[4096];
auto range = chunker.FeedBuf(buffer, sizeof(buffer));
for (auto p = range.first; p != range.second; ++p) {
chunkerRes += TString(p->first, p->second);
}
}
- UNIT_ASSERT_VALUES_EQUAL(chunkerRes, ser);
-
+ UNIT_ASSERT_VALUES_EQUAL(chunkerRes, ser);
+
THolder<IEventBase> ev2 = THolder(TEventTo::Load(buffers));
- TEventTo& msg2 = static_cast<TEventTo&>(*ev2);
- UNIT_ASSERT_VALUES_EQUAL(msg2.Record.GetMeta(), msg.Record.GetMeta());
- UNIT_ASSERT_EQUAL(msg2.GetPayload(msg2.Record.GetPayloadId(0)), msg.GetPayload(msg.Record.GetPayloadId(0)));
- UNIT_ASSERT_EQUAL(msg2.GetPayload(msg2.Record.GetPayloadId(1)), msg.GetPayload(msg.Record.GetPayloadId(1)));
+ TEventTo& msg2 = static_cast<TEventTo&>(*ev2);
+ UNIT_ASSERT_VALUES_EQUAL(msg2.Record.GetMeta(), msg.Record.GetMeta());
+ UNIT_ASSERT_EQUAL(msg2.GetPayload(msg2.Record.GetPayloadId(0)), msg.GetPayload(msg.Record.GetPayloadId(0)));
+ UNIT_ASSERT_EQUAL(msg2.GetPayload(msg2.Record.GetPayloadId(1)), msg.GetPayload(msg.Record.GetPayloadId(1)));
+ }
+
+ template <class TEvent>
+ void TestAllSizes(size_t step1 = 100, size_t step2 = 111) {
+ for (size_t size1 = 0; size1 < 10000; size1 += step1) {
+ for (size_t size2 = 0; size2 < 10000; size2 += step2) {
+ TestSerializeDeserialize<TEvent, TEvent>(size1, size2);
+ }
+ }
}
-
- template <class TEvent>
- void TestAllSizes(size_t step1 = 100, size_t step2 = 111) {
- for (size_t size1 = 0; size1 < 10000; size1 += step1) {
- for (size_t size2 = 0; size2 < 10000; size2 += step2) {
- TestSerializeDeserialize<TEvent, TEvent>(size1, size2);
- }
- }
- }
-
+
#if (!defined(_tsan_enabled_))
- Y_UNIT_TEST(SerializeDeserialize) {
- TestAllSizes<TEvMessageWithPayload>();
- }
+ Y_UNIT_TEST(SerializeDeserialize) {
+ TestAllSizes<TEvMessageWithPayload>();
+ }
#endif
-
-
- struct TEvArenaMessage : TEventPBWithArena<TEvArenaMessage, TMessageWithPayload, EvArenaMessage> {
- };
-
- Y_UNIT_TEST(SerializeDeserializeArena) {
- TestAllSizes<TEvArenaMessage>(500, 111);
- }
-
-
- struct TEvArenaMessageBig : TEventPBWithArena<TEvArenaMessageBig, TMessageWithPayload, EvArenaMessageBig, 4000, 32000> {
- };
-
- Y_UNIT_TEST(SerializeDeserializeArenaBig) {
- TestAllSizes<TEvArenaMessageBig>(111, 500);
- }
-
-
- // Compatible with TEvArenaMessage but doesn't use arenas
- struct TEvArenaMessageWithoutArena : TEventPB<TEvArenaMessageWithoutArena, TMessageWithPayload, EvArenaMessage> {
- };
-
- Y_UNIT_TEST(Compatibility) {
- TestSerializeDeserialize<TEvArenaMessage, TEvArenaMessageWithoutArena>(200, 14010);
- TestSerializeDeserialize<TEvArenaMessageWithoutArena, TEvArenaMessage>(2000, 4010);
- }
+
+
+ struct TEvArenaMessage : TEventPBWithArena<TEvArenaMessage, TMessageWithPayload, EvArenaMessage> {
+ };
+
+ Y_UNIT_TEST(SerializeDeserializeArena) {
+ TestAllSizes<TEvArenaMessage>(500, 111);
+ }
+
+
+ struct TEvArenaMessageBig : TEventPBWithArena<TEvArenaMessageBig, TMessageWithPayload, EvArenaMessageBig, 4000, 32000> {
+ };
+
+ Y_UNIT_TEST(SerializeDeserializeArenaBig) {
+ TestAllSizes<TEvArenaMessageBig>(111, 500);
+ }
+
+
+ // Compatible with TEvArenaMessage but doesn't use arenas
+ struct TEvArenaMessageWithoutArena : TEventPB<TEvArenaMessageWithoutArena, TMessageWithPayload, EvArenaMessage> {
+ };
+
+ Y_UNIT_TEST(Compatibility) {
+ TestSerializeDeserialize<TEvArenaMessage, TEvArenaMessageWithoutArena>(200, 14010);
+ TestSerializeDeserialize<TEvArenaMessageWithoutArena, TEvArenaMessage>(2000, 4010);
+ }
Y_UNIT_TEST(PreSerializedCompatibility) {
// ensure TEventPreSerializedPB and TEventPB are interchangable with no compatibility issues
diff --git a/library/cpp/actors/core/executelater.h b/library/cpp/actors/core/executelater.h
index a2e380e4662..e7a13c10053 100644
--- a/library/cpp/actors/core/executelater.h
+++ b/library/cpp/actors/core/executelater.h
@@ -8,10 +8,10 @@ namespace NActors {
template <typename TCallback>
class TExecuteLater: public TActorBootstrapped<TExecuteLater<TCallback>> {
public:
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::ACTORLIB_COMMON;
- }
-
+ static constexpr IActor::EActivityType ActorActivityType() {
+ return IActor::ACTORLIB_COMMON;
+ }
+
TExecuteLater(
TCallback&& callback,
IActor::EActivityType activityType,
diff --git a/library/cpp/actors/core/executor_pool_base.cpp b/library/cpp/actors/core/executor_pool_base.cpp
index e3852eaf8d9..c3b99991680 100644
--- a/library/cpp/actors/core/executor_pool_base.cpp
+++ b/library/cpp/actors/core/executor_pool_base.cpp
@@ -16,9 +16,9 @@ namespace NActors {
: IExecutorPool(poolId)
, ActorSystem(nullptr)
, MailboxTable(new TMailboxTable)
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- , Stats(maxActivityType)
-#endif
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+ , Stats(maxActivityType)
+#endif
{}
TExecutorPoolBaseMailboxed::~TExecutorPoolBaseMailboxed() {
@@ -47,9 +47,9 @@ namespace NActors {
bool TExecutorPoolBaseMailboxed::Send(TAutoPtr<IEventHandle>& ev) {
Y_VERIFY_DEBUG(ev->GetRecipientRewrite().PoolID() == PoolId);
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
RelaxedStore(&ev->SendTime, (::NHPTimer::STime)GetCycleCountFast());
-#endif
+#endif
return MailboxTable->SendTo(ev, this);
}
@@ -59,21 +59,21 @@ namespace NActors {
TActorId TExecutorPoolBaseMailboxed::Register(IActor* actor, TMailboxType::EType mailboxType, ui64 revolvingWriteCounter, const TActorId& parentId) {
NHPTimer::STime hpstart = GetCycleCountFast();
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
ui32 at = actor->GetActivityType();
if (at >= Stats.MaxActivityType())
at = 0;
AtomicIncrement(Stats.ActorsAliveByActivity[at]);
-#endif
+#endif
AtomicIncrement(ActorRegistrations);
-
+
// first step - find good enough mailbox
ui32 hint = 0;
TMailboxHeader* mailbox = nullptr;
if (revolvingWriteCounter == 0)
revolvingWriteCounter = AtomicIncrement(RegisterRevolvingCounter);
-
+
{
ui32 hintBackoff = 0;
@@ -122,7 +122,7 @@ namespace NActors {
default:
Y_FAIL();
}
-
+
NHPTimer::STime elapsed = GetCycleCountFast() - hpstart;
if (elapsed > 1000000) {
LWPROBE(SlowRegisterNew, PoolId, NHPTimer::GetSeconds(elapsed) * 1000.0);
@@ -133,14 +133,14 @@ namespace NActors {
TActorId TExecutorPoolBaseMailboxed::Register(IActor* actor, TMailboxHeader* mailbox, ui32 hint, const TActorId& parentId) {
NHPTimer::STime hpstart = GetCycleCountFast();
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
ui32 at = actor->GetActivityType();
if (at >= Stats.MaxActivityType())
at = 0;
AtomicIncrement(Stats.ActorsAliveByActivity[at]);
-#endif
+#endif
AtomicIncrement(ActorRegistrations);
-
+
const ui64 localActorId = AllocateID();
mailbox->AttachActor(localActorId, actor);
diff --git a/library/cpp/actors/core/executor_pool_base.h b/library/cpp/actors/core/executor_pool_base.h
index 49129e8a8df..c84ce1af779 100644
--- a/library/cpp/actors/core/executor_pool_base.h
+++ b/library/cpp/actors/core/executor_pool_base.h
@@ -12,11 +12,11 @@ namespace NActors {
protected:
TActorSystem* ActorSystem;
THolder<TMailboxTable> MailboxTable;
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
+#ifdef ACTORSLIB_COLLECT_EXEC_STATS
// Need to have per pool object to collect stats like actor registrations (because
// registrations might be done in threads from other pools)
TExecutorThreadStats Stats;
-#endif
+#endif
TAtomic RegisterRevolvingCounter = 0;
ui64 AllocateID();
public:
diff --git a/library/cpp/actors/core/executor_pool_basic.cpp b/library/cpp/actors/core/executor_pool_basic.cpp
index fdd07ef84f7..4dce16939ae 100644
--- a/library/cpp/actors/core/executor_pool_basic.cpp
+++ b/library/cpp/actors/core/executor_pool_basic.cpp
@@ -21,8 +21,8 @@ namespace NActors {
TAffinity* affinity,
TDuration timePerMailbox,
ui32 eventsPerMailbox,
- int realtimePriority,
- ui32 maxActivityType)
+ int realtimePriority,
+ ui32 maxActivityType)
: TExecutorPoolBase(poolId, threads, affinity, maxActivityType)
, SpinThreshold(spinThreshold)
, SpinThresholdCycles(spinThreshold * NHPTimer::GetCyclesPerSecond() * 0.000001) // convert microseconds to cycles
@@ -195,7 +195,7 @@ namespace NActors {
return activation;
}
SpinLockPause();
- }
+ }
// stopping, die!
return 0;
@@ -245,8 +245,8 @@ namespace NActors {
for (size_t i = 0; i < PoolThreads; ++i) {
Threads[i].Thread->GetCurrentStats(statsCopy[i + 1]);
}
- }
-
+ }
+
void TBasicExecutorPool::Prepare(TActorSystem* actorSystem, NSchedulerQueue::TReader** scheduleReaders, ui32* scheduleSz) {
TAffinityGuard affinityGuard(Affinity());
diff --git a/library/cpp/actors/core/executor_pool_basic.h b/library/cpp/actors/core/executor_pool_basic.h
index 2a663ab3caf..023190f7fe3 100644
--- a/library/cpp/actors/core/executor_pool_basic.h
+++ b/library/cpp/actors/core/executor_pool_basic.h
@@ -77,8 +77,8 @@ namespace NActors {
TAffinity* affinity = nullptr,
TDuration timePerMailbox = DEFAULT_TIME_PER_MAILBOX,
ui32 eventsPerMailbox = DEFAULT_EVENTS_PER_MAILBOX,
- int realtimePriority = 0,
- ui32 maxActivityType = 1);
+ int realtimePriority = 0,
+ ui32 maxActivityType = 1);
explicit TBasicExecutorPool(const TBasicExecutorPoolConfig& cfg);
~TBasicExecutorPool();
diff --git a/library/cpp/actors/core/executor_pool_io.h b/library/cpp/actors/core/executor_pool_io.h
index b4f1472731f..e576d642a1d 100644
--- a/library/cpp/actors/core/executor_pool_io.h
+++ b/library/cpp/actors/core/executor_pool_io.h
@@ -26,7 +26,7 @@ namespace NActors {
public:
TIOExecutorPool(ui32 poolId, ui32 threads, const TString& poolName = "", TAffinity* affinity = nullptr,
- ui32 maxActivityType = 1);
+ ui32 maxActivityType = 1);
explicit TIOExecutorPool(const TIOExecutorPoolConfig& cfg);
~TIOExecutorPool();
diff --git a/library/cpp/actors/core/executor_pool_united.cpp b/library/cpp/actors/core/executor_pool_united.cpp
index c8c2b3af54b..dac6245635d 100644
--- a/library/cpp/actors/core/executor_pool_united.cpp
+++ b/library/cpp/actors/core/executor_pool_united.cpp
@@ -741,7 +741,7 @@ namespace NActors {
#endif
}
}
- }
+ }
void WakeFastWorker() {
#ifdef _linux_
diff --git a/library/cpp/actors/core/executor_thread.cpp b/library/cpp/actors/core/executor_thread.cpp
index 437f1d04b99..446b651efd2 100644
--- a/library/cpp/actors/core/executor_thread.cpp
+++ b/library/cpp/actors/core/executor_thread.cpp
@@ -21,9 +21,9 @@
#include <util/system/type_name.h>
#include <util/system/datetime.h>
-
-LWTRACE_USING(ACTORLIB_PROVIDER)
-
+
+LWTRACE_USING(ACTORLIB_PROVIDER)
+
namespace NActors {
constexpr TDuration TExecutorThread::DEFAULT_TIME_PER_MAILBOX;
@@ -98,10 +98,10 @@ namespace NActors {
}
}
- inline TString ActorTypeName(const IActor* actor, ui32 activityType) {
- return actor ? SafeTypeName(actor) : ("activityType_" + ToString(activityType) + " (destroyed)");
- }
-
+ inline TString ActorTypeName(const IActor* actor, ui32 activityType) {
+ return actor ? SafeTypeName(actor) : ("activityType_" + ToString(activityType) + " (destroyed)");
+ }
+
inline void LwTraceSlowDelivery(IEventHandle* ev, const IActor* actor, ui32 poolId, const TActorId& currentRecipient,
double delivMs, double sinceActivationMs, ui32 eventsExecutedBefore) {
const auto baseEv = (ev && ev->HasEvent()) ? ev->GetBase() : nullptr;
@@ -124,13 +124,13 @@ namespace NActors {
eventMs,
baseEv ? SafeTypeName(baseEv) : ToString(evTypeForTracing),
currentRecipient.ToString(),
- ActorTypeName(actor, activityType));
+ ActorTypeName(actor, activityType));
}
template <typename TMailbox>
void TExecutorThread::Execute(TMailbox* mailbox, ui32 hint) {
Y_VERIFY_DEBUG(DyingActors.empty());
-
+
bool reclaimAsFree = false;
NHPTimer::STime hpstart = GetCycleCountFast();
@@ -167,9 +167,9 @@ namespace NActors {
double sinceActivationMs = NHPTimer::GetSeconds(hpprev - hpstart) * 1000.0;
LwTraceSlowDelivery(ev.Get(), actor, Ctx.PoolId, CurrentRecipient, NHPTimer::GetSeconds(hpprev - ev->SendTime) * 1000.0, sinceActivationMs, executed);
}
-
+
ui32 evTypeForTracing = ev->Type;
-
+
ui32 activityType = actor->GetActivityType();
if (activityType != prevActivityType) {
prevActivityType = activityType;
@@ -184,10 +184,10 @@ namespace NActors {
DropUnregistered();
actor = nullptr;
}
-
+
if (mailbox->IsEmpty()) // was not-free and become free, we must reclaim mailbox
reclaimAsFree = true;
-
+
hpnow = GetCycleCountFast();
NHPTimer::STime elapsed = Ctx.AddEventProcessingStats(hpprev, hpnow, activityType, CurrentActorScheduledEventsCounter);
if (elapsed > 1000000) {
@@ -197,9 +197,9 @@ namespace NActors {
// The actor might have been destroyed
if (actor)
actor->AddElapsedTicks(elapsed);
-
+
CurrentRecipient = TActorId();
- } else {
+ } else {
TAutoPtr<IEventHandle> nonDelivered = ev->ForwardOnNondelivery(TEvents::TEvUndelivered::ReasonActorUnknown);
if (nonDelivered.Get()) {
ActorSystem->Send(nonDelivered);
@@ -207,7 +207,7 @@ namespace NActors {
Ctx.IncrementNonDeliveredEvents();
}
hpnow = GetCycleCountFast();
- }
+ }
hpprev = hpnow;
@@ -241,8 +241,8 @@ namespace NActors {
recipient.ToString(),
SafeTypeName(actor));
break;
- }
-
+ }
+
if (executed + 1 == Ctx.EventsPerMailbox) {
AtomicStore(&mailbox->ScheduleMoment, hpnow);
Ctx.IncrementMailboxPushedOutByEventCount();
@@ -255,8 +255,8 @@ namespace NActors {
Ctx.WorkerId,
recipient.ToString(),
SafeTypeName(actor));
- break;
- }
+ break;
+ }
} else {
if (executed == 0)
Ctx.IncrementEmptyMailboxActivation();
@@ -271,7 +271,7 @@ namespace NActors {
SafeTypeName(actor));
break; // empty queue, leave
}
- }
+ }
NProfiling::TMemoryTagScope::Reset(0);
TlsActivationContext = nullptr;
diff --git a/library/cpp/actors/core/executor_thread.h b/library/cpp/actors/core/executor_thread.h
index 6d6e527f9e5..9d3c573f0d6 100644
--- a/library/cpp/actors/core/executor_thread.h
+++ b/library/cpp/actors/core/executor_thread.h
@@ -54,15 +54,15 @@ namespace NActors {
#ifdef USE_ACTOR_CALLSTACK
ev->Callstack = TCallstack::GetTlsCallstack();
ev->Callstack.Trace();
-#endif
+#endif
Ctx.IncrementSentEvents();
return ActorSystem->Send(ev);
}
-
+
void GetCurrentStats(TExecutorThreadStats& statsCopy) const {
Ctx.GetCurrentStats(statsCopy);
}
-
+
TThreadId GetThreadId() const; // blocks, must be called after Start()
TWorkerId GetWorkerId() const { return Ctx.WorkerId; }
diff --git a/library/cpp/actors/core/log.cpp b/library/cpp/actors/core/log.cpp
index 3edc42012c1..5f63b5af580 100644
--- a/library/cpp/actors/core/log.cpp
+++ b/library/cpp/actors/core/log.cpp
@@ -164,8 +164,8 @@ namespace NActors {
std::shared_ptr<NMonitoring::TMetricRegistry> Metrics;
};
- TAtomic TLoggerActor::IsOverflow = 0;
-
+ TAtomic TLoggerActor::IsOverflow = 0;
+
TLoggerActor::TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
TAutoPtr<TLogBackend> logBackend,
TIntrusivePtr<NMonitoring::TDynamicCounters> counters)
@@ -224,10 +224,10 @@ namespace NActors {
}
void TLoggerActor::Throttle(const NLog::TSettings& settings) {
- if (AtomicGet(IsOverflow))
+ if (AtomicGet(IsOverflow))
Sleep(settings.ThrottleDelay);
- }
-
+ }
+
void TLoggerActor::LogIgnoredCount(TInstant now) {
TString message = Sprintf("Ignored IgnoredCount# %" PRIu64 " log records due to logger overflow!", IgnoredCount);
if (!OutputRecord(now, NActors::NLog::EPrio::Error, Settings->LoggerComponent, message)) {
@@ -250,7 +250,7 @@ namespace NActors {
Metrics->IncActorMsgs();
const auto prio = ev.Level.ToPrio();
-
+
switch (prio) {
case ::NActors::NLog::EPrio::Alert:
Metrics->IncAlertMsgs();
@@ -267,28 +267,28 @@ namespace NActors {
void TLoggerActor::HandleLogEvent(NLog::TEvLog::TPtr& ev, const NActors::TActorContext& ctx) {
i64 delayMillisec = (ctx.Now() - ev->Get()->Stamp).MilliSeconds();
WriteMessageStat(*ev->Get());
- if (Settings->AllowDrop) {
- // Disable throttling if it was enabled previously
- if (AtomicGet(IsOverflow))
- AtomicSet(IsOverflow, 0);
-
- // Check if some records have to be dropped
+ if (Settings->AllowDrop) {
+ // Disable throttling if it was enabled previously
+ if (AtomicGet(IsOverflow))
+ AtomicSet(IsOverflow, 0);
+
+ // Check if some records have to be dropped
if ((PassedCount > 10 && delayMillisec > (i64)Settings->TimeThresholdMs) || IgnoredCount > 0) {
Metrics->IncIgnoredMsgs();
- if (IgnoredCount == 0) {
- ctx.Send(ctx.SelfID, new TLogIgnored());
- }
- ++IgnoredCount;
- PassedCount = 0;
- return;
+ if (IgnoredCount == 0) {
+ ctx.Send(ctx.SelfID, new TLogIgnored());
+ }
+ ++IgnoredCount;
+ PassedCount = 0;
+ return;
}
- PassedCount++;
- } else {
- // Enable of disable throttling depending on the load
- if (delayMillisec > (i64)Settings->TimeThresholdMs && !AtomicGet(IsOverflow))
- AtomicSet(IsOverflow, 1);
- else if (delayMillisec <= (i64)Settings->TimeThresholdMs && AtomicGet(IsOverflow))
- AtomicSet(IsOverflow, 0);
+ PassedCount++;
+ } else {
+ // Enable of disable throttling depending on the load
+ if (delayMillisec > (i64)Settings->TimeThresholdMs && !AtomicGet(IsOverflow))
+ AtomicSet(IsOverflow, 1);
+ else if (delayMillisec <= (i64)Settings->TimeThresholdMs && AtomicGet(IsOverflow))
+ AtomicSet(IsOverflow, 0);
}
const auto prio = ev->Get()->Level.ToPrio();
@@ -376,8 +376,8 @@ namespace NActors {
bool hasPriority = false;
bool hasSamplingPriority = false;
bool hasSamplingRate = false;
- bool hasAllowDrop = false;
- int allowDrop = 0;
+ bool hasAllowDrop = false;
+ int allowDrop = 0;
if (params.Has("c")) {
if (TryFromString(params.Get("c"), component) && (component == NLog::InvalidComponent || Settings->IsValidComponent(component))) {
hasComponent = true;
@@ -402,11 +402,11 @@ namespace NActors {
}
}
}
- if (params.Has("allowdrop")) {
- if (TryFromString(params.Get("allowdrop"), allowDrop)) {
- hasAllowDrop = true;
- }
- }
+ if (params.Has("allowdrop")) {
+ if (TryFromString(params.Get("allowdrop"), allowDrop)) {
+ hasAllowDrop = true;
+ }
+ }
TStringStream str;
if (hasComponent && !hasPriority && !hasSamplingPriority && !hasSamplingRate) {
@@ -485,9 +485,9 @@ namespace NActors {
if (hasComponent && hasSamplingRate) {
Settings->SetSamplingRate(samplingRate, component, explanation);
}
- if (hasAllowDrop) {
- Settings->SetAllowDrop(allowDrop);
- }
+ if (hasAllowDrop) {
+ Settings->SetAllowDrop(allowDrop);
+ }
HTML(str) {
if (!explanation.empty()) {
@@ -559,10 +559,10 @@ namespace NActors {
str << "Drop log entries in case of overflow: "
<< (Settings->AllowDrop ? "Enabled" : "Disabled");
}
- str << "<form method=\"GET\">" << Endl;
+ str << "<form method=\"GET\">" << Endl;
str << "<input type=\"hidden\" name=\"allowdrop\" value=\"" << (Settings->AllowDrop ? "0" : "1") << "\"/>" << Endl;
str << "<input class=\"btn btn-primary\" type=\"submit\" value=\"" << (Settings->AllowDrop ? "Disable" : "Enable") << "\"/>" << Endl;
- str << "</form>" << Endl;
+ str << "</form>" << Endl;
}
}
Metrics->GetOutputHtml(str);
@@ -588,7 +588,7 @@ namespace NActors {
logRecord << time;
}
logRecord
- << Settings->MessagePrefix
+ << Settings->MessagePrefix
<< " :" << Settings->ComponentName(component)
<< " " << PriorityToString(priority)
<< ": " << formatted;
diff --git a/library/cpp/actors/core/log.h b/library/cpp/actors/core/log.h
index 4219194faa6..c11a7cf3c19 100644
--- a/library/cpp/actors/core/log.h
+++ b/library/cpp/actors/core/log.h
@@ -233,13 +233,13 @@ namespace NActors {
void Log(TInstant time, NLog::EPriority priority, NLog::EComponent component, const char* c, ...);
static void Throttle(const NLog::TSettings& settings);
-
+
private:
TIntrusivePtr<NLog::TSettings> Settings;
std::shared_ptr<TLogBackend> LogBackend;
ui64 IgnoredCount = 0;
ui64 PassedCount = 0;
- static TAtomic IsOverflow;
+ static TAtomic IsOverflow;
TDuration WakeupInterval{TDuration::Seconds(5)};
std::unique_ptr<ILoggerMetrics> Metrics;
diff --git a/library/cpp/actors/core/log_settings.cpp b/library/cpp/actors/core/log_settings.cpp
index e22760d149f..f52f2fc5d22 100644
--- a/library/cpp/actors/core/log_settings.cpp
+++ b/library/cpp/actors/core/log_settings.cpp
@@ -11,7 +11,7 @@ namespace NActors {
: LoggerActorId(loggerActorId)
, LoggerComponent(loggerComponent)
, TimeThresholdMs(timeThresholdMs)
- , AllowDrop(true)
+ , AllowDrop(true)
, ThrottleDelay(TDuration::MilliSeconds(100))
, MinVal(0)
, MaxVal(0)
@@ -33,7 +33,7 @@ namespace NActors {
: LoggerActorId(loggerActorId)
, LoggerComponent(loggerComponent)
, TimeThresholdMs(timeThresholdMs)
- , AllowDrop(true)
+ , AllowDrop(true)
, ThrottleDelay(TDuration::MilliSeconds(100))
, MinVal(0)
, MaxVal(0)
@@ -201,10 +201,10 @@ namespace NActors {
return (MinVal <= component) && (component <= MaxVal) && !ComponentNames[component].empty();
}
- void TSettings::SetAllowDrop(bool val) {
- AllowDrop = val;
- }
-
+ void TSettings::SetAllowDrop(bool val) {
+ AllowDrop = val;
+ }
+
void TSettings::SetThrottleDelay(TDuration value) {
ThrottleDelay = value;
}
diff --git a/library/cpp/actors/core/log_settings.h b/library/cpp/actors/core/log_settings.h
index acab6bb93e1..7fe4504edd9 100644
--- a/library/cpp/actors/core/log_settings.h
+++ b/library/cpp/actors/core/log_settings.h
@@ -72,7 +72,7 @@ namespace NActors {
TActorId LoggerActorId;
EComponent LoggerComponent;
ui64 TimeThresholdMs;
- bool AllowDrop;
+ bool AllowDrop;
TDuration ThrottleDelay;
TArrayHolder<TAtomic> ComponentInfo;
TVector<TString> ComponentNames;
@@ -92,7 +92,7 @@ namespace NActors {
ELogFormat Format;
TString ShortHostName;
TString ClusterName;
- TString MessagePrefix;
+ TString MessagePrefix;
// The best way to provide minVal, maxVal and func is to have
// protobuf enumeration of components. In this case protoc
@@ -161,7 +161,7 @@ namespace NActors {
static int PowerOf2Mask(int val);
static bool IsValidPriority(EPriority priority);
bool IsValidComponent(EComponent component);
- void SetAllowDrop(bool val);
+ void SetAllowDrop(bool val);
void SetThrottleDelay(TDuration value);
void SetUseLocalTimestamps(bool value);
diff --git a/library/cpp/actors/core/mailbox.cpp b/library/cpp/actors/core/mailbox.cpp
index 87337bfa9e3..d84b4f9e466 100644
--- a/library/cpp/actors/core/mailbox.cpp
+++ b/library/cpp/actors/core/mailbox.cpp
@@ -180,7 +180,7 @@ namespace NActors {
TSimpleMailbox* const mailbox = TSimpleMailbox::Get(lineHint, x);
#if (!defined(_tsan_enabled_))
Y_VERIFY_DEBUG(mailbox->Type == (ui32)x->MailboxType);
-#endif
+#endif
mailbox->Queue.Push(ev.Release());
if (mailbox->MarkForSchedule()) {
RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
@@ -200,11 +200,11 @@ namespace NActors {
"We expect that one line can store more simple mailboxes than revolving mailboxes");
if (lineHint > TRevolvingMailbox::MaxMailboxesInLine())
return false;
-
+
TRevolvingMailbox* const mailbox = TRevolvingMailbox::Get(lineHint, x);
#if (!defined(_tsan_enabled_))
Y_VERIFY_DEBUG(mailbox->Type == (ui32)x->MailboxType);
-#endif
+#endif
mailbox->QueueWriter.Push(ev.Release());
if (mailbox->MarkForSchedule()) {
RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
diff --git a/library/cpp/actors/core/mailbox.h b/library/cpp/actors/core/mailbox.h
index 1879a8aea6a..0bd9c4d314e 100644
--- a/library/cpp/actors/core/mailbox.h
+++ b/library/cpp/actors/core/mailbox.h
@@ -370,7 +370,7 @@ namespace NActors {
TRevolvingMailbox();
~TRevolvingMailbox();
-
+
IEventHandle* Pop() {
return QueueReader.Pop();
}
diff --git a/library/cpp/actors/core/mon_stats.h b/library/cpp/actors/core/mon_stats.h
index 2415537e712..d55552af0cb 100644
--- a/library/cpp/actors/core/mon_stats.h
+++ b/library/cpp/actors/core/mon_stats.h
@@ -1,16 +1,16 @@
-#pragma once
-
-#include "defs.h"
-#include "actor.h"
+#pragma once
+
+#include "defs.h"
+#include "actor.h"
#include <library/cpp/monlib/metrics/histogram_snapshot.h>
-#include <util/system/hp_timer.h>
-
-namespace NActors {
+#include <util/system/hp_timer.h>
+
+namespace NActors {
struct TLogHistogram : public NMonitoring::IHistogramSnapshot {
TLogHistogram() {
memset(Buckets, 0, sizeof(Buckets));
}
-
+
inline void Add(ui64 val, ui64 inc = 1) {
size_t ind = 0;
#if defined(__clang__) && __clang_major__ == 3 && __clang_minor__ == 7
@@ -27,15 +27,15 @@ namespace NActors {
RelaxedStore(&TotalSamples, RelaxedLoad(&TotalSamples) + inc);
RelaxedStore(&Buckets[ind], RelaxedLoad(&Buckets[ind]) + inc);
}
-
+
void Aggregate(const TLogHistogram& other) {
const ui64 inc = RelaxedLoad(&other.TotalSamples);
RelaxedStore(&TotalSamples, RelaxedLoad(&TotalSamples) + inc);
for (size_t i = 0; i < Y_ARRAY_SIZE(Buckets); ++i) {
Buckets[i] += RelaxedLoad(&other.Buckets[i]);
}
- }
-
+ }
+
// IHistogramSnapshot
ui32 Count() const override {
return Y_ARRAY_SIZE(Buckets);
@@ -57,7 +57,7 @@ namespace NActors {
ui64 TotalSamples = 0;
ui64 Buckets[65];
};
-
+
struct TExecutorPoolStats {
ui64 MaxUtilizationTime = 0;
};
@@ -86,7 +86,7 @@ namespace NActors {
ui64 MailboxPushedOutBySoftPreemption = 0;
ui64 MailboxPushedOutByTime = 0;
ui64 MailboxPushedOutByEventCount = 0;
-
+
TExecutorThreadStats(size_t activityVecSize = 1) // must be not empty as 0 used as default
: ElapsedTicksByActivity(activityVecSize)
, ReceivedEventsByActivity(activityVecSize)
@@ -103,7 +103,7 @@ namespace NActors {
for (size_t at = 0; at < otherSize; ++at)
self[at] += RelaxedLoad(&other[at]);
}
-
+
void Aggregate(const TExecutorThreadStats& other) {
SentEvents += RelaxedLoad(&other.SentEvents);
ReceivedEvents += RelaxedLoad(&other.ReceivedEvents);
@@ -115,9 +115,9 @@ namespace NActors {
ParkedTicks += RelaxedLoad(&other.ParkedTicks);
BlockedTicks += RelaxedLoad(&other.BlockedTicks);
MailboxPushedOutBySoftPreemption += RelaxedLoad(&other.MailboxPushedOutBySoftPreemption);
- MailboxPushedOutByTime += RelaxedLoad(&other.MailboxPushedOutByTime);
- MailboxPushedOutByEventCount += RelaxedLoad(&other.MailboxPushedOutByEventCount);
-
+ MailboxPushedOutByTime += RelaxedLoad(&other.MailboxPushedOutByTime);
+ MailboxPushedOutByEventCount += RelaxedLoad(&other.MailboxPushedOutByEventCount);
+
ActivationTimeHistogram.Aggregate(other.ActivationTimeHistogram);
EventDeliveryTimeHistogram.Aggregate(other.EventDeliveryTimeHistogram);
EventProcessingCountHistogram.Aggregate(other.EventProcessingCountHistogram);
@@ -143,5 +143,5 @@ namespace NActors {
return ActorsAliveByActivity.size();
}
};
-
+
}
diff --git a/library/cpp/actors/core/probes.cpp b/library/cpp/actors/core/probes.cpp
index ae475bb59f9..7ace83e1020 100644
--- a/library/cpp/actors/core/probes.cpp
+++ b/library/cpp/actors/core/probes.cpp
@@ -1,10 +1,10 @@
-#include "probes.h"
-
+#include "probes.h"
+
#include "actorsystem.h"
#include <util/string/builder.h>
-LWTRACE_DEFINE_PROVIDER(ACTORLIB_PROVIDER);
+LWTRACE_DEFINE_PROVIDER(ACTORLIB_PROVIDER);
namespace NActors {
TVector<NLWTrace::TDashboard> LWTraceDashboards(TActorSystemSetup* setup) {
diff --git a/library/cpp/actors/core/probes.h b/library/cpp/actors/core/probes.h
index 536200b9776..4912d6dd26c 100644
--- a/library/cpp/actors/core/probes.h
+++ b/library/cpp/actors/core/probes.h
@@ -1,8 +1,8 @@
-#pragma once
-
+#pragma once
+
#include <library/cpp/lwtrace/all.h>
#include <util/generic/vector.h>
-
+
#define LWACTORID(x) (x).RawX1(), (x).RawX2(), (x).NodeId(), (x).PoolID()
#define LWTYPE_ACTORID ui64, ui64, ui32, ui32
#define LWNAME_ACTORID(n) n "Raw1", n "Raw2", n "NodeId", n "PoolId"
@@ -167,8 +167,8 @@
TYPES(ui32, ui64, TString, TString, ui32), \
NAMES("fromPoolId", "toPoolId", "fromPool", "toPool", "cpu")) \
/**/
-
-LWTRACE_DECLARE_PROVIDER(ACTORLIB_PROVIDER)
+
+LWTRACE_DECLARE_PROVIDER(ACTORLIB_PROVIDER)
namespace NActors {
struct TActorSystemSetup;
diff --git a/library/cpp/actors/core/process_stats.cpp b/library/cpp/actors/core/process_stats.cpp
index f3a73419804..0e1dbd00314 100644
--- a/library/cpp/actors/core/process_stats.cpp
+++ b/library/cpp/actors/core/process_stats.cpp
@@ -1,25 +1,25 @@
-#include "actorsystem.h"
-#include "actor_bootstrapped.h"
-#include "hfunc.h"
+#include "actorsystem.h"
+#include "actor_bootstrapped.h"
+#include "hfunc.h"
#include "process_stats.h"
-
+
#include <library/cpp/monlib/dynamic_counters/counters.h>
#include <library/cpp/monlib/metrics/metric_registry.h>
-
+
#include <util/datetime/uptime.h>
-#include <util/system/defaults.h>
-#include <util/stream/file.h>
-#include <util/string/vector.h>
-#include <util/string/split.h>
-
-#ifndef _win_
+#include <util/system/defaults.h>
+#include <util/stream/file.h>
+#include <util/string/vector.h>
+#include <util/string/split.h>
+
+#ifndef _win_
#include <sys/user.h>
#include <sys/sysctl.h>
-#endif
-
-namespace NActors {
-#ifdef _linux_
-
+#endif
+
+namespace NActors {
+#ifdef _linux_
+
namespace {
template <typename TVal>
static bool ExtractVal(const TString& str, const TString& name, TVal& res) {
@@ -32,16 +32,16 @@ namespace NActors {
res = atol(str.data() + pos);
return true;
}
-
+
float TicksPerMillisec() {
-#ifdef _SC_CLK_TCK
+#ifdef _SC_CLK_TCK
return sysconf(_SC_CLK_TCK) / 1000.0;
-#else
+#else
return 1.f;
-#endif
- }
+#endif
+ }
}
-
+
bool TProcStat::Fill(pid_t pid) {
try {
TString strPid(ToString(pid));
@@ -57,9 +57,9 @@ namespace NActors {
}
// Convert from kB to bytes
Rss *= 1024;
-
+
float tickPerMillisec = TicksPerMillisec();
-
+
TFileInput procStat("/proc/" + strPid + "/stat");
procStat.ReadLine(str);
if (!str.empty()) {
@@ -114,28 +114,28 @@ namespace NActors {
} catch (...) {
return false;
- }
+ }
return true;
- }
-
+ }
+
long TProcStat::ObtainPageSize() {
long sz = sysconf(_SC_PAGESIZE);
return sz;
}
-#else
-
+#else
+
bool TProcStat::Fill(pid_t pid) {
Y_UNUSED(pid);
return false;
}
-
+
long TProcStat::ObtainPageSize() {
return 0;
}
-#endif
-
+#endif
+
namespace {
// Periodically collects process stats and exposes them as mon counters
template <typename TDerived>
@@ -144,7 +144,7 @@ namespace {
static constexpr IActor::EActivityType ActorActivityType() {
return IActor::ACTORLIB_STATS;
}
-
+
TProcStatCollectingActor(TDuration interval)
: Interval(interval)
{
@@ -154,19 +154,19 @@ namespace {
ctx.Schedule(Interval, new TEvents::TEvWakeup());
Self()->Become(&TDerived::StateWork);
}
-
+
STFUNC(StateWork) {
switch (ev->GetTypeRewrite()) {
CFunc(TEvents::TSystem::Wakeup, Wakeup);
}
}
-
+
private:
void Wakeup(const TActorContext& ctx) {
Self()->UpdateCounters(ProcStat);
ctx.Schedule(Interval, new TEvents::TEvWakeup());
- }
-
+ }
+
TDerived* Self() {
ProcStat.Fill(getpid());
return static_cast<TDerived*>(this);
@@ -176,7 +176,7 @@ namespace {
const TDuration Interval;
TProcStat ProcStat;
};
-
+
// Periodically collects process stats and exposes them as mon counters
class TDynamicCounterCollector: public TProcStatCollectingActor<TDynamicCounterCollector> {
using TBase = TProcStatCollectingActor<TDynamicCounterCollector>;
@@ -295,8 +295,8 @@ namespace {
IActor* CreateProcStatCollector(ui32 intervalSec, NMonitoring::TDynamicCounterPtr counters) {
return new TDynamicCounterCollector(intervalSec, counters);
- }
-
+ }
+
IActor* CreateProcStatCollector(TDuration interval, NMonitoring::TMetricRegistry& registry) {
return new TRegistryCollector(interval, registry);
}
diff --git a/library/cpp/actors/core/process_stats.h b/library/cpp/actors/core/process_stats.h
index 454c04e6f37..66346d0b5aa 100644
--- a/library/cpp/actors/core/process_stats.h
+++ b/library/cpp/actors/core/process_stats.h
@@ -1,15 +1,15 @@
-#pragma once
-
-#include "defs.h"
-#include "actor.h"
-
+#pragma once
+
+#include "defs.h"
+#include "actor.h"
+
#include <library/cpp/monlib/dynamic_counters/counters.h>
-
+
namespace NMonitoring {
class TMetricRegistry;
}
-namespace NActors {
+namespace NActors {
struct TProcStat {
ui64 Rss;
ui64 VolCtxSwtch;
diff --git a/library/cpp/actors/core/scheduler_actor.cpp b/library/cpp/actors/core/scheduler_actor.cpp
index 60d972d9aab..febc5e40dd2 100644
--- a/library/cpp/actors/core/scheduler_actor.cpp
+++ b/library/cpp/actors/core/scheduler_actor.cpp
@@ -61,7 +61,7 @@ namespace NActors {
public:
static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::ACTOR_SYSTEM_SCHEDULER_ACTOR;
+ return IActor::ACTOR_SYSTEM_SCHEDULER_ACTOR;
}
TSchedulerActor(const TSchedulerConfig& cfg)
diff --git a/library/cpp/actors/core/ya.make b/library/cpp/actors/core/ya.make
index 5a3a6c7a358..880a9d00dba 100644
--- a/library/cpp/actors/core/ya.make
+++ b/library/cpp/actors/core/ya.make
@@ -80,15 +80,15 @@ SRCS(
memory_tracker.cpp
memory_tracker.h
mon.h
- mon_stats.h
+ mon_stats.h
monotonic.cpp
monotonic.h
worker_context.cpp
worker_context.h
probes.cpp
- probes.h
- process_stats.cpp
- process_stats.h
+ probes.h
+ process_stats.cpp
+ process_stats.h
scheduler_actor.cpp
scheduler_actor.h
scheduler_basic.cpp
diff --git a/library/cpp/actors/helpers/selfping_actor.cpp b/library/cpp/actors/helpers/selfping_actor.cpp
index 2fa2ce3a450..f9bfaf8dc09 100644
--- a/library/cpp/actors/helpers/selfping_actor.cpp
+++ b/library/cpp/actors/helpers/selfping_actor.cpp
@@ -72,8 +72,8 @@ private:
public:
static constexpr auto ActorActivityType() {
return SELF_PING_ACTOR;
- }
-
+ }
+
TSelfPingActor(TDuration sendInterval, const NMonitoring::TDynamicCounters::TCounterPtr& counter,
const NMonitoring::TDynamicCounters::TCounterPtr& calculationTimeCounter)
: SendInterval(sendInterval)
diff --git a/library/cpp/actors/helpers/selfping_actor_ut.cpp b/library/cpp/actors/helpers/selfping_actor_ut.cpp
index 9df99d19498..459635fa24a 100644
--- a/library/cpp/actors/helpers/selfping_actor_ut.cpp
+++ b/library/cpp/actors/helpers/selfping_actor_ut.cpp
@@ -27,9 +27,9 @@ Y_UNIT_TEST_SUITE(TSelfPingTest) {
TDuration::MilliSeconds(100), // sendInterval (unused in test)
counter, counter2);
- UNIT_ASSERT_VALUES_EQUAL(counter->Val(), 0);
+ UNIT_ASSERT_VALUES_EQUAL(counter->Val(), 0);
UNIT_ASSERT_VALUES_EQUAL(counter2->Val(), 0);
-
+
const TActorId actorId = runtime->Register(actor);
Y_UNUSED(actorId);
diff --git a/library/cpp/actors/interconnect/interconnect_handshake.cpp b/library/cpp/actors/interconnect/interconnect_handshake.cpp
index ea09913bdd8..9ede998d8e7 100644
--- a/library/cpp/actors/interconnect/interconnect_handshake.cpp
+++ b/library/cpp/actors/interconnect/interconnect_handshake.cpp
@@ -98,10 +98,10 @@ namespace NActors {
TInstant Deadline;
public:
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::INTERCONNECT_HANDSHAKE;
- }
-
+ static constexpr IActor::EActivityType ActorActivityType() {
+ return IActor::INTERCONNECT_HANDSHAKE;
+ }
+
THandshakeActor(TInterconnectProxyCommon::TPtr common, const TActorId& self, const TActorId& peer,
ui32 nodeId, ui64 nextPacket, TString peerHostName, TSessionParams params)
: TActorCoroImpl(StackSize, true, true) // allow unhandled poison pills and dtors
diff --git a/library/cpp/actors/interconnect/interconnect_tcp_session.h b/library/cpp/actors/interconnect/interconnect_tcp_session.h
index 846388edfc2..7fc00dbcc5a 100644
--- a/library/cpp/actors/interconnect/interconnect_tcp_session.h
+++ b/library/cpp/actors/interconnect/interconnect_tcp_session.h
@@ -518,10 +518,10 @@ namespace NActors {
TInterconnectProxyCommon::TPtr Common;
public:
- static constexpr EActivityType ActorActivityType() {
- return INTERCONNECT_SESSION_KILLER;
- }
-
+ static constexpr EActivityType ActorActivityType() {
+ return INTERCONNECT_SESSION_KILLER;
+ }
+
TInterconnectSessionKiller(TInterconnectProxyCommon::TPtr common)
: Common(common)
{
diff --git a/library/cpp/actors/interconnect/load.cpp b/library/cpp/actors/interconnect/load.cpp
index b40a01b3e8f..2a8443da71f 100644
--- a/library/cpp/actors/interconnect/load.cpp
+++ b/library/cpp/actors/interconnect/load.cpp
@@ -100,10 +100,10 @@ namespace NInterconnect {
}
public:
- static constexpr IActor::EActivityType ActorActivityType() {
+ static constexpr IActor::EActivityType ActorActivityType() {
return IActor::INTERCONNECT_LOAD_RESPONDER;
- }
-
+ }
+
TLoadResponderMasterActor()
{}
@@ -150,10 +150,10 @@ namespace NInterconnect {
std::shared_ptr<std::atomic_uint64_t> Traffic;
public:
- static constexpr IActor::EActivityType ActorActivityType() {
+ static constexpr IActor::EActivityType ActorActivityType() {
return IActor::INTERCONNECT_LOAD_ACTOR;
- }
-
+ }
+
TLoadActor(const TLoadParams& params)
: Params(params)
{}
diff --git a/library/cpp/actors/interconnect/poller_actor.cpp b/library/cpp/actors/interconnect/poller_actor.cpp
index 9bf3bd1d8d6..e75cbcaef43 100644
--- a/library/cpp/actors/interconnect/poller_actor.cpp
+++ b/library/cpp/actors/interconnect/poller_actor.cpp
@@ -246,10 +246,10 @@ namespace NActors {
std::shared_ptr<TPollerThread> PollerThread;
public:
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::INTERCONNECT_POLLER;
- }
-
+ static constexpr IActor::EActivityType ActorActivityType() {
+ return IActor::INTERCONNECT_POLLER;
+ }
+
void Bootstrap() {
PollerThread = std::make_shared<TPollerThread>(TlsActivationContext->ExecutorThread.ActorSystem);
Become(&TPollerActor::StateFunc);
diff --git a/library/cpp/actors/interconnect/ut/lib/node.h b/library/cpp/actors/interconnect/ut/lib/node.h
index 95666d3f7a8..ff30b1445e8 100644
--- a/library/cpp/actors/interconnect/ut/lib/node.h
+++ b/library/cpp/actors/interconnect/ut/lib/node.h
@@ -21,10 +21,10 @@ public:
TChannelsConfig channelsSettings = TChannelsConfig(),
ui32 numDynamicNodes = 0, ui32 numThreads = 1) {
TActorSystemSetup setup;
- setup.NodeId = nodeId;
- setup.ExecutorsCount = 1;
+ setup.NodeId = nodeId;
+ setup.ExecutorsCount = 1;
setup.Executors.Reset(new TAutoPtr<IExecutorPool>[setup.ExecutorsCount]);
- for (ui32 i = 0; i < setup.ExecutorsCount; ++i) {
+ for (ui32 i = 0; i < setup.ExecutorsCount; ++i) {
setup.Executors[i].Reset(new TBasicExecutorPool(i, numThreads, 20 /* magic number */));
}
setup.Scheduler.Reset(new TBasicSchedulerThread());
diff --git a/library/cpp/actors/protos/unittests.proto b/library/cpp/actors/protos/unittests.proto
index 64503bc1c19..a856b0942ad 100644
--- a/library/cpp/actors/protos/unittests.proto
+++ b/library/cpp/actors/protos/unittests.proto
@@ -1,5 +1,5 @@
-option cc_enable_arenas = true;
-
+option cc_enable_arenas = true;
+
message TSimple {
required string Str1 = 1;
optional string Str2 = 2;
@@ -16,5 +16,5 @@ message TBigMessage {
message TMessageWithPayload {
optional string Meta = 1;
repeated uint32 PayloadId = 2;
- repeated string SomeData = 3;
+ repeated string SomeData = 3;
}
diff --git a/library/cpp/actors/testlib/test_runtime.cpp b/library/cpp/actors/testlib/test_runtime.cpp
index fd61e4c7201..6fa25b99656 100644
--- a/library/cpp/actors/testlib/test_runtime.cpp
+++ b/library/cpp/actors/testlib/test_runtime.cpp
@@ -19,7 +19,7 @@
#include <util/string/printf.h>
#include <typeinfo>
-bool VERBOSE = false;
+bool VERBOSE = false;
const bool PRINT_EVENT_BODY = false;
namespace {
@@ -86,8 +86,8 @@ namespace NActors {
public:
static constexpr EActivityType ActorActivityType() {
return TEST_ACTOR_RUNTIME;
- }
-
+ }
+
TEdgeActor(TTestActorRuntimeBase* runtime)
: TActor(&TEdgeActor::StateFunc)
, Runtime(runtime)
@@ -722,9 +722,9 @@ namespace NActors {
}
void TTestActorRuntimeBase::SetVerbose(bool verbose) {
- VERBOSE = verbose;
- }
-
+ VERBOSE = verbose;
+ }
+
void TTestActorRuntimeBase::AddLocalService(const TActorId& actorId, const TActorSetupCmd& cmd, ui32 nodeIndex) {
Y_VERIFY(!IsInitialized);
Y_VERIFY(nodeIndex < NodeCount);
@@ -1038,10 +1038,10 @@ namespace NActors {
bool TTestActorRuntimeBase::DispatchEvents(const TDispatchOptions& options, TInstant simDeadline) {
TGuard<TMutex> guard(Mutex);
- return DispatchEventsInternal(options, simDeadline);
- }
-
- // Mutex must be locked by caller!
+ return DispatchEventsInternal(options, simDeadline);
+ }
+
+ // Mutex must be locked by caller!
bool TTestActorRuntimeBase::DispatchEventsInternal(const TDispatchOptions& options, TInstant simDeadline) {
TDispatchContext localContext;
localContext.Options = &options;
@@ -1253,9 +1253,9 @@ namespace NActors {
if (!localContext.FoundNonEmptyMailboxes.empty())
return true;
- if (options.CustomFinalCondition && options.CustomFinalCondition())
- return true;
-
+ if (options.CustomFinalCondition && options.CustomFinalCondition())
+ return true;
+
if (options.FinalEvents.empty()) {
for (auto& mbox : currentMailboxes) {
if (!mbox.second->IsActive(TInstant::MicroSeconds(CurrentTimestamp)))
@@ -1755,8 +1755,8 @@ namespace NActors {
public:
static constexpr EActivityType ActorActivityType() {
return TEST_ACTOR_RUNTIME;
- }
-
+ }
+
TReplyActor(TStrandingActorDecorator* owner)
: TActor(&TReplyActor::StateFunc)
, Owner(owner)
@@ -1771,8 +1771,8 @@ namespace NActors {
static constexpr EActivityType ActorActivityType() {
return TEST_ACTOR_RUNTIME;
- }
-
+ }
+
TStrandingActorDecorator(const TActorId& delegatee, bool isSync, const TVector<TActorId>& additionalActors,
TSimpleSharedPtr<TStrandingActorDecoratorContext> context, TTestActorRuntimeBase* runtime,
TReplyCheckerCreator createReplyChecker)
diff --git a/library/cpp/actors/testlib/test_runtime.h b/library/cpp/actors/testlib/test_runtime.h
index 90de87b5ac6..26e3b45c984 100644
--- a/library/cpp/actors/testlib/test_runtime.h
+++ b/library/cpp/actors/testlib/test_runtime.h
@@ -93,7 +93,7 @@ namespace NActors {
TVector<TFinalEventCondition> FinalEvents;
TVector<TEventMailboxId> NonEmptyMailboxes;
TVector<TEventMailboxId> OnlyMailboxes;
- std::function<bool()> CustomFinalCondition;
+ std::function<bool()> CustomFinalCondition;
bool Quiet = false;
};
@@ -219,8 +219,8 @@ namespace NActors {
TEventFilter SetEventFilter(TEventFilter filterFunc);
TScheduledEventFilter SetScheduledEventFilter(TScheduledEventFilter filterFunc);
TRegistrationObserver SetRegistrationObserverFunc(TRegistrationObserver observerFunc);
- static bool IsVerbose();
- static void SetVerbose(bool verbose);
+ static bool IsVerbose();
+ static void SetVerbose(bool verbose);
TDuration SetDispatchTimeout(TDuration timeout);
void SetDispatchedEventsLimit(ui64 limit) {
DispatchedEventsLimit = limit;
@@ -499,7 +499,7 @@ namespace NActors {
void ClearMailbox(ui32 nodeId, ui32 hint);
void HandleNonEmptyMailboxesForEachContext(TEventMailboxId mboxId);
void UpdateFinalEventsStatsForEachContext(IEventHandle& ev);
- bool DispatchEventsInternal(const TDispatchOptions& options, TInstant simDeadline);
+ bool DispatchEventsInternal(const TDispatchOptions& options, TInstant simDeadline);
private:
ui64 ScheduledCount;
diff --git a/library/cpp/execprofile/annotate_profile.pl b/library/cpp/execprofile/annotate_profile.pl
index c6923c8e943..1a8c5d65a15 100644
--- a/library/cpp/execprofile/annotate_profile.pl
+++ b/library/cpp/execprofile/annotate_profile.pl
@@ -1,360 +1,360 @@
-#!/usr/bin/env perl
-
-#
-# Takes profile file as an input and prints out annotated disassmebly
-# Usage:
-# ./annotate_profile.pl <binary_name> <profile_name>
-#
-
-
-# Function to draw bar of the specified length filled up to specified length
-sub DRAW_BAR($$) {
- my ($length, $filled) = @_;
- my $bar = "";
- --$filled;
- while ($filled > 0) {
- $bar = $bar . "X";
- $length--;
- $filled--;
- }
- while ($length > 0) {
- $bar = $bar . " ";
- $length--;
- }
- return $bar;
-}
-
-my $curFunc = "";
-my $curModule = "";
-my $allHits = 0;
-my %moduleHits;
-my %funcModule;
-my %funcHits;
-my %funcHottestCount;
-my %funcStart;
-my %funcEnd;
-my %funcNames;
-my %funcBaseAddrs;
-my %funcSizes;
-my %addrHits;
-my %addrFunc;
-my %moduleBaseAddr;
-my @funcSortByAddr;
-my %demangledNames;
-my %srcLineHits;
-my %srcFileHits;
-
-# Demagles C++ function name
-sub DEMANGLE($) {
- my ($name) = @_;
- if (exists $demangledNames{$name}) {
- return $demangledNames{$name};
- }
- if ($name =~ /^_Z/) {
- my $cmd = "c++filt -p \'$name\' |";
- open(my $RES, $cmd ) || die "No c++filt";
- my $demangled_name = <$RES>;
- chomp($demangled_name);
- close $RES;
- if (length($demangled_name) !=0) {
- $name = $demangled_name;
- }
- }
- return $name;
-}
-
-# Saves function info
-sub AddFunc($$$$$)
-{
- my ($func, $bin_file, $baseAddr, $size, $name) = @_;
- $funcModule{$func} = $bin_file;
- $funcBaseAddrs{$func} = $baseAddr;
- # A function with the same base address can be mentioned multiple times with different sizes (0, and non-0, WTF??)
- if ((! exists $funcSizes{$func}) || ($funcSizes{$func} < $size)) {
- $funcSizes{$func} = $size;
- }
- $funcNames{$func} = $name;
- $funcStart{$func} = $func;
-# printf "%08x\t%08x\t%016x\t%s\t%s\n",
-# $funcBaseAddrs{$func}, $funcSizes{$func}, $moduleBaseAddr, $funcModule{$func}, $funcNames{$func};
-}
-
-# Reads list of all functions in a module
-sub ReadFunctionList($$) {
- my ($bin_file, $moduleBaseAddr) = @_;
- if (! -e $bin_file) {
- return;
- }
- my $readelf_cmd = "readelf -W -s $bin_file |";
-# print "$readelf_cmd\n";
- my $IN_FILE;
- open($IN_FILE, $readelf_cmd) || die "couldn't open the file!";
- while (my $line = <$IN_FILE>) {
- chomp($line);
- # " 33: 00000000000a0fc0 433 FUNC GLOBAL DEFAULT 10 getipnodebyaddr@@FBSD_1.0"
- if ($line =~ m/^\s*\d+:\s+([0-9a-fA-F]+)\s+(\d+)\s+FUNC\s+\w+\s+DEFAULT\s+\d+\s+(.*)$/) {
- # Read function info
- my $name = $3;
- my $baseAddr = hex($1) + $moduleBaseAddr;
- my $func = $baseAddr;
- my $size = $2;
- AddFunc($func, $bin_file, $baseAddr, $size, $name);
- }
- }
- close($IN_FILE);
- @funcSortByAddr = sort {$funcBaseAddrs{$a} <=> $funcBaseAddrs{$b} } keys %funcBaseAddrs;
-# printf "%016x\t%s\t%d\n", $moduleBaseAddr, $bin_file, $#funcSortByAddr+1;
-}
-
-# Reads the profile and attributes address hits to the functions
-sub ReadSamples() {
- # First pass saves all samples in a hash-table
- my $samples_file = $ARGV[1];
- my $IN_FILE;
- open($IN_FILE, $samples_file)|| die "couldn't open the file!";
- my $curFuncInd = 0;
- my $curFunc = 0;
- my $curFuncBegin = 0;
- my $curFuncEnd = 0;
- my $curModule = "";
- my $curModuleBase = 0;
- my $read_samples = 0;
- my $samplesStarted = 0;
- while (my $line = <$IN_FILE>) {
- chomp($line);
-
- if ($line =~ m/^samples:\s+(\d+)\s+unique:\s+(\d+)\s+dropped:\s+(\d+)\s+searchskips:\s+(\d+)$/) {
- $total_samples = $1;
- $unique_samples = $2;
- $dropped_samples = $3;
- $search_skips = $4;
- next;
- }
-
- if ($line =~ m/^Samples:$/) {
- $samplesStarted = 1;
- next;
- } elsif (!$samplesStarted) {
- print "$line\n";
- next;
- }
-
-# print "$line\n";
- if ($line =~ m/^Func\t\d+/) {
- # "Func 2073 0x803323000 0x803332fd0 /lib/libthr.so.3 pthread_cond_init"
- my @cols = split(/\t/, $line);
- $curModule = $cols[4];
- $curModuleBase = hex($cols[2]);
- if (0x400000 == $curModuleBase) {
- $curModuleBase = 0;
- }
- $curFunc = hex($cols[3]);
- if (! exists $moduleBaseAddr{$curModule}) {
- $moduleBaseAddr{$curModule} = $curModuleBase;
- ReadFunctionList($curModule, $curModuleBase);
- }
- if (! exists $funcNames{$curFunc}) {
- my $name = sprintf("unknown_0x%08x", $curFunc);
- AddFunc($curFunc, $curModule, $curFunc, 0, $name);
- }
- } elsif ($line =~ m/^\d+\t0x([0-9,a-f,A-F]+)\t(\d+)/) {
- # Read one sample for the current function
- $read_samples++;
- my $addr = hex($1);
-# print "$addr\n";
- if ($addr >= $curFuncEnd) {
- # Find the function the current address belongs to
- while ($curFuncInd <= $#funcSortByAddr) {
- my $f = $funcSortByAddr[$curFuncInd];
- my $begin = $funcBaseAddrs{$f};
- my $end = $funcBaseAddrs{$f} + $funcSizes{$f};
- if ($begin <= $addr and $addr < $end) {
- $curFunc = $f;
- $funcStart{$curFunc} = $addr;
- $curFuncBegin = $begin;
- $curFuncEnd = $end;
- last;
- } elsif ($addr < $begin) {
-# printf "X3: func:%08x\tname:%s\tbase:%08x\tsize:%08x\t%s\nline:%s\n",
-# $curFunc, $funcNames{$curFunc}, $funcBaseAddrs{$curFunc}, $funcSizes{$curFunc}, $curModule, $line;
- last;
- }
- ++$curFuncInd;
- }
- }
-
- $funcHits{$curFunc} += $2;
- if ($funcHottestCount{$curFunc} < $2) {
- $funcHottestCount{$curFunc} = $2;
- }
- $addrHits{$addr} = $2;
- $addrFunc{$addr} = $curFunc;
- $funcEnd{$curFunc} = $addr;
- $allHits += $2;
- $moduleHits{$curModule} += $2;
-
-# printf "%08x\t%08x\t%08x\t%08x\t%s\n", $addr, $curFunc, $curFuncBegin, $curFuncEnd, $funcNames{$curFunc};
- }
- }
- close($IN_FILE);
-
- printf "\nsamples: %d unique: %d dropped: %d searchskips: %d\n", $total_samples, $unique_samples, $dropped_samples, $search_skips;
- if ($read_samples != $unique_samples) {
- printf "\n-----------------------------------------------------------------------------------------------------\n";
- printf "!!!!WARNING: read %d samples, expected %d samples, profiling results might be not acqurate!!!!", $read_samples, $unique_samples;
- printf "\n-----------------------------------------------------------------------------------------------------\n";
- }
-}
-
-# Dumps module stats
-sub DumpModules() {
- # Sort functions by hit counts and dump the list
- my @modules = sort {$a <=> $b } keys %moduleHits;
- for (my $i = 0; $i <= $#modules; ++$i) {
- my $m = $modules[$i];
- my $cnt = $moduleHits{$m};
- my $perc = 100.0 * $cnt / $allHits;
- printf "%12d\t%6.2f%% |%s %s\n", $cnt, $perc, DRAW_BAR(20, 20*$cnt/$allHits), $m;
- }
-}
-
-# Dumps top N hot functions
-sub DumpHotFunc($) {
- my ($maxCnt) = @_;
- # Sort functions by hit counts and dump the list
- my @hotFunc = sort {$funcHits{$b} <=> $funcHits{$a} } keys %funcHits;
-# print $#hotFunc;
- for (my $i = 0; $i <= $#hotFunc && $i < $maxCnt; ++$i) {
- my $f = $hotFunc[$i];
- my $cnt = $funcHits{$f};
- my $perc = 100.0 * $cnt / $allHits;
- printf "%12d\t%6.2f%% |%s %s\n", $cnt, $perc, DRAW_BAR(20, 20*$cnt/$allHits), DEMANGLE($funcNames{$f});
- }
-}
-
-# Dumps top N hotspots (hot addresses)
-sub DumpHotSpots($) {
- my ($maxCnt) = @_;
- # Sort addresses by hit counts and dump the list
- my @hotSpots = sort {$addrHits{$b} <=> $addrHits{$a} } keys %addrHits;
- for (my $i = 0; $i <= $#hotSpots && $i < $maxCnt; ++$i) {
- my $s = $hotSpots[$i];
- my $cnt = $addrHits{$s};
- my $perc = 100.0 * $cnt / $allHits;
- my $f = $addrFunc{$s};
- my $fname = $funcNames{$f};
- printf "%12d\t%6.2f%% |%s 0x%016x\t%s + 0x%x\n",
- $cnt, $perc, DRAW_BAR(20, 20*$cnt/$allHits), $s, DEMANGLE($fname), $s - $funcBaseAddrs{$f};
- }
-}
-
-# Adds hit informations to a disassembly line
-sub ANNOTATE_DISASSM($$$$) {
- my ($address, $disassm, $max_hit_count, $func_hit_count) = @_;
- my $hit_count = $addrHits{$address};
- my $perc = sprintf("% 7.2f%%", 100*$hit_count/$func_hit_count);
- $address = sprintf("% 8x", $address);
- print $address . " " . $hit_count . "\t" . $perc . " |" .
- DRAW_BAR(20, 20*$hit_count/$max_hit_count) . "\t" . $disassm . "\n";
-}
-
-# Dumps annotated disassembly of the specified function (actually not the whole function but
-# just the addresses between the first and last hit)
-sub DumpDisasm($) {
- my ($name) = @_;
- if (exists $funcStart{$name} && exists $funcEnd{$name} && $funcStart{$name}!=0) {
- my $module = $funcModule{$name};
- my $modBase = $moduleBaseAddr{$module};
- my $start_address = $funcStart{$name} - $modBase;
- my $stop_address = $funcEnd{$name} - $modBase + 1;
-# print " " . $funcStart{$name} . " " . $funcEnd{$name} . " $modBase ---";
- my $max_hit_count = $funcHits{$name};
- my $objdump_cmd = "objdump -C -d -l --start-address=" . $start_address .
- " --stop-address=" . $stop_address . " " . $module . " |";
- if ($stop_address - $start_address < 10000000) { # don't try to disaassemble more than 10MB, because most likely it's a bug
-# print STDERR $objdump_cmd . "\n";
- open(my $OBJDUMP, $objdump_cmd) || die "No objdump";
- my $srcLine = "func# ". $name;
- my $srcFile = $module;
- while (my $objdump_line = <$OBJDUMP>) {
- # filter disassembly lines
- if ($objdump_line =~ /^Disassembly of section/) {
- } elsif ($objdump_line =~ m/^\s*([0-9,a-f,A-F]+):\s*(.*)/) {
- my $addr = hex($1);
- my $hit_count = $addrHits{$addr};
- if ($hit_count > 0) {
- $srcLineHits{$srcLine} += $hit_count;
- $srcFileHits{$srcFile} += $hit_count;
- }
- ANNOTATE_DISASSM($addr + $modBase, $2, $funcHottestCount{$name}, $max_hit_count);
- } elsif ($objdump_line =~ m/^(\/.*):(\d+)$/) {
- $srcLine = $objdump_line;
- $srcFile = $1;
- chomp($srcLine);
- print $objdump_line;
- } else {
- print $objdump_line;
- }
- }
- close $OBJDUMP;
- }
- }
-}
-
-# Dumps disassemlby for top N hot functions
-sub DumpFuncDissasm($) {
- (my $maxCnt) = @_;
- my @funcs = sort {$funcHits{$b} <=> $funcHits{$a} } keys %funcHits;
- print $#funcs . "\n";
- for (my $i = 0; $i <= $#funcs && $i < $maxCnt; ++$i) {
- my $f = $funcs[$i];
- print "\n--------------------------------------------------------------------------------------------------------------\n";
- printf "hits:%d\t%7.2f%%\tbase:%08x\tstart:%08x\tend:%08x\t%s\n",
- $funcHits{$f}, 100*$funcHits{$f}/$allHits, $funcBaseAddrs{$f}, $funcStart{$f}, $funcEnd{$f}, DEMANGLE($funcNames{$f});
- print "--------------------------------------------------------------------------------------------------------------\n";
- DumpDisasm($f);
- }
-}
-
-sub DumpSrcFiles($) {
- (my $maxCnt) = @_;
- my @srcFiles = sort {$srcFileHits{$b} <=> $srcFileHits{$a} } keys %srcFileHits;
- for (my $i = 0; $i <= $#srcFiles && $i < $maxCnt; ++$i) {
- my $f = $srcFiles[$i];
- my $cnt = $srcFileHits{$f};
- printf "%12d\t%6.2f%% |%s %s\n", $cnt, 100*$cnt/$allHits, DRAW_BAR(20, 20*$cnt/$allHits), $f;
- }
-}
-
-sub DumpSrcLines($) {
- (my $maxCnt) = @_;
- my @srcLines = sort {$srcLineHits{$b} <=> $srcLineHits{$a} } keys %srcLineHits;
- for (my $i = 0; $i <= $#srcLines && $i < $maxCnt; ++$i) {
- my $l = $srcLines[$i];
- my $cnt = $srcLineHits{$l};
- printf "%12d\t%6.2f%% |%s %s\n", $cnt, 100*$cnt/$allHits, DRAW_BAR(20, 20*$cnt/$allHits), $l;
- }
-}
-
-ReadFunctionList($ARGV[0], 0);
-ReadSamples();
-print "\nModules:\n";
-DumpModules();
-print "\nHot functions:\n";
-DumpHotFunc(100);
-print "\nHotspots:\n";
-DumpHotSpots(100);
-DumpFuncDissasm(100);
-print "\nHot src files:\n";
-DumpSrcFiles(100);
-print "\nHot src lines:\n";
-DumpSrcLines(100);
-
-# my @funcs = sort {$funcBaseAddrs{$a} <=> $funcBaseAddrs{$b} } keys %funcHits;
-# printf "%d\n", $#funcs;
-# for (my $i = 0; $i <= $#funcs; ++$i) {
-# my $f = $funcs[$i];
-# printf "%s\t%d\tbase:%08x\tstart:%08x\tend:%08x\t%s\n",
-# $funcNames{$f}, $funcHits{$f}, $funcBaseAddrs{$f}, $funcStart{$f}, $funcEnd{$f}, $funcModule{$f};
-# #DumpDisasm($f);
-# }
+#!/usr/bin/env perl
+
+#
+# Takes profile file as an input and prints out annotated disassmebly
+# Usage:
+# ./annotate_profile.pl <binary_name> <profile_name>
+#
+
+
+# Function to draw bar of the specified length filled up to specified length
+sub DRAW_BAR($$) {
+ my ($length, $filled) = @_;
+ my $bar = "";
+ --$filled;
+ while ($filled > 0) {
+ $bar = $bar . "X";
+ $length--;
+ $filled--;
+ }
+ while ($length > 0) {
+ $bar = $bar . " ";
+ $length--;
+ }
+ return $bar;
+}
+
+my $curFunc = "";
+my $curModule = "";
+my $allHits = 0;
+my %moduleHits;
+my %funcModule;
+my %funcHits;
+my %funcHottestCount;
+my %funcStart;
+my %funcEnd;
+my %funcNames;
+my %funcBaseAddrs;
+my %funcSizes;
+my %addrHits;
+my %addrFunc;
+my %moduleBaseAddr;
+my @funcSortByAddr;
+my %demangledNames;
+my %srcLineHits;
+my %srcFileHits;
+
+# Demagles C++ function name
+sub DEMANGLE($) {
+ my ($name) = @_;
+ if (exists $demangledNames{$name}) {
+ return $demangledNames{$name};
+ }
+ if ($name =~ /^_Z/) {
+ my $cmd = "c++filt -p \'$name\' |";
+ open(my $RES, $cmd ) || die "No c++filt";
+ my $demangled_name = <$RES>;
+ chomp($demangled_name);
+ close $RES;
+ if (length($demangled_name) !=0) {
+ $name = $demangled_name;
+ }
+ }
+ return $name;
+}
+
+# Saves function info
+sub AddFunc($$$$$)
+{
+ my ($func, $bin_file, $baseAddr, $size, $name) = @_;
+ $funcModule{$func} = $bin_file;
+ $funcBaseAddrs{$func} = $baseAddr;
+ # A function with the same base address can be mentioned multiple times with different sizes (0, and non-0, WTF??)
+ if ((! exists $funcSizes{$func}) || ($funcSizes{$func} < $size)) {
+ $funcSizes{$func} = $size;
+ }
+ $funcNames{$func} = $name;
+ $funcStart{$func} = $func;
+# printf "%08x\t%08x\t%016x\t%s\t%s\n",
+# $funcBaseAddrs{$func}, $funcSizes{$func}, $moduleBaseAddr, $funcModule{$func}, $funcNames{$func};
+}
+
+# Reads list of all functions in a module
+sub ReadFunctionList($$) {
+ my ($bin_file, $moduleBaseAddr) = @_;
+ if (! -e $bin_file) {
+ return;
+ }
+ my $readelf_cmd = "readelf -W -s $bin_file |";
+# print "$readelf_cmd\n";
+ my $IN_FILE;
+ open($IN_FILE, $readelf_cmd) || die "couldn't open the file!";
+ while (my $line = <$IN_FILE>) {
+ chomp($line);
+ # " 33: 00000000000a0fc0 433 FUNC GLOBAL DEFAULT 10 getipnodebyaddr@@FBSD_1.0"
+ if ($line =~ m/^\s*\d+:\s+([0-9a-fA-F]+)\s+(\d+)\s+FUNC\s+\w+\s+DEFAULT\s+\d+\s+(.*)$/) {
+ # Read function info
+ my $name = $3;
+ my $baseAddr = hex($1) + $moduleBaseAddr;
+ my $func = $baseAddr;
+ my $size = $2;
+ AddFunc($func, $bin_file, $baseAddr, $size, $name);
+ }
+ }
+ close($IN_FILE);
+ @funcSortByAddr = sort {$funcBaseAddrs{$a} <=> $funcBaseAddrs{$b} } keys %funcBaseAddrs;
+# printf "%016x\t%s\t%d\n", $moduleBaseAddr, $bin_file, $#funcSortByAddr+1;
+}
+
+# Reads the profile and attributes address hits to the functions
+sub ReadSamples() {
+ # First pass saves all samples in a hash-table
+ my $samples_file = $ARGV[1];
+ my $IN_FILE;
+ open($IN_FILE, $samples_file)|| die "couldn't open the file!";
+ my $curFuncInd = 0;
+ my $curFunc = 0;
+ my $curFuncBegin = 0;
+ my $curFuncEnd = 0;
+ my $curModule = "";
+ my $curModuleBase = 0;
+ my $read_samples = 0;
+ my $samplesStarted = 0;
+ while (my $line = <$IN_FILE>) {
+ chomp($line);
+
+ if ($line =~ m/^samples:\s+(\d+)\s+unique:\s+(\d+)\s+dropped:\s+(\d+)\s+searchskips:\s+(\d+)$/) {
+ $total_samples = $1;
+ $unique_samples = $2;
+ $dropped_samples = $3;
+ $search_skips = $4;
+ next;
+ }
+
+ if ($line =~ m/^Samples:$/) {
+ $samplesStarted = 1;
+ next;
+ } elsif (!$samplesStarted) {
+ print "$line\n";
+ next;
+ }
+
+# print "$line\n";
+ if ($line =~ m/^Func\t\d+/) {
+ # "Func 2073 0x803323000 0x803332fd0 /lib/libthr.so.3 pthread_cond_init"
+ my @cols = split(/\t/, $line);
+ $curModule = $cols[4];
+ $curModuleBase = hex($cols[2]);
+ if (0x400000 == $curModuleBase) {
+ $curModuleBase = 0;
+ }
+ $curFunc = hex($cols[3]);
+ if (! exists $moduleBaseAddr{$curModule}) {
+ $moduleBaseAddr{$curModule} = $curModuleBase;
+ ReadFunctionList($curModule, $curModuleBase);
+ }
+ if (! exists $funcNames{$curFunc}) {
+ my $name = sprintf("unknown_0x%08x", $curFunc);
+ AddFunc($curFunc, $curModule, $curFunc, 0, $name);
+ }
+ } elsif ($line =~ m/^\d+\t0x([0-9,a-f,A-F]+)\t(\d+)/) {
+ # Read one sample for the current function
+ $read_samples++;
+ my $addr = hex($1);
+# print "$addr\n";
+ if ($addr >= $curFuncEnd) {
+ # Find the function the current address belongs to
+ while ($curFuncInd <= $#funcSortByAddr) {
+ my $f = $funcSortByAddr[$curFuncInd];
+ my $begin = $funcBaseAddrs{$f};
+ my $end = $funcBaseAddrs{$f} + $funcSizes{$f};
+ if ($begin <= $addr and $addr < $end) {
+ $curFunc = $f;
+ $funcStart{$curFunc} = $addr;
+ $curFuncBegin = $begin;
+ $curFuncEnd = $end;
+ last;
+ } elsif ($addr < $begin) {
+# printf "X3: func:%08x\tname:%s\tbase:%08x\tsize:%08x\t%s\nline:%s\n",
+# $curFunc, $funcNames{$curFunc}, $funcBaseAddrs{$curFunc}, $funcSizes{$curFunc}, $curModule, $line;
+ last;
+ }
+ ++$curFuncInd;
+ }
+ }
+
+ $funcHits{$curFunc} += $2;
+ if ($funcHottestCount{$curFunc} < $2) {
+ $funcHottestCount{$curFunc} = $2;
+ }
+ $addrHits{$addr} = $2;
+ $addrFunc{$addr} = $curFunc;
+ $funcEnd{$curFunc} = $addr;
+ $allHits += $2;
+ $moduleHits{$curModule} += $2;
+
+# printf "%08x\t%08x\t%08x\t%08x\t%s\n", $addr, $curFunc, $curFuncBegin, $curFuncEnd, $funcNames{$curFunc};
+ }
+ }
+ close($IN_FILE);
+
+ printf "\nsamples: %d unique: %d dropped: %d searchskips: %d\n", $total_samples, $unique_samples, $dropped_samples, $search_skips;
+ if ($read_samples != $unique_samples) {
+ printf "\n-----------------------------------------------------------------------------------------------------\n";
+ printf "!!!!WARNING: read %d samples, expected %d samples, profiling results might be not acqurate!!!!", $read_samples, $unique_samples;
+ printf "\n-----------------------------------------------------------------------------------------------------\n";
+ }
+}
+
+# Dumps module stats
+sub DumpModules() {
+ # Sort functions by hit counts and dump the list
+ my @modules = sort {$a <=> $b } keys %moduleHits;
+ for (my $i = 0; $i <= $#modules; ++$i) {
+ my $m = $modules[$i];
+ my $cnt = $moduleHits{$m};
+ my $perc = 100.0 * $cnt / $allHits;
+ printf "%12d\t%6.2f%% |%s %s\n", $cnt, $perc, DRAW_BAR(20, 20*$cnt/$allHits), $m;
+ }
+}
+
+# Dumps top N hot functions
+sub DumpHotFunc($) {
+ my ($maxCnt) = @_;
+ # Sort functions by hit counts and dump the list
+ my @hotFunc = sort {$funcHits{$b} <=> $funcHits{$a} } keys %funcHits;
+# print $#hotFunc;
+ for (my $i = 0; $i <= $#hotFunc && $i < $maxCnt; ++$i) {
+ my $f = $hotFunc[$i];
+ my $cnt = $funcHits{$f};
+ my $perc = 100.0 * $cnt / $allHits;
+ printf "%12d\t%6.2f%% |%s %s\n", $cnt, $perc, DRAW_BAR(20, 20*$cnt/$allHits), DEMANGLE($funcNames{$f});
+ }
+}
+
+# Dumps top N hotspots (hot addresses)
+sub DumpHotSpots($) {
+ my ($maxCnt) = @_;
+ # Sort addresses by hit counts and dump the list
+ my @hotSpots = sort {$addrHits{$b} <=> $addrHits{$a} } keys %addrHits;
+ for (my $i = 0; $i <= $#hotSpots && $i < $maxCnt; ++$i) {
+ my $s = $hotSpots[$i];
+ my $cnt = $addrHits{$s};
+ my $perc = 100.0 * $cnt / $allHits;
+ my $f = $addrFunc{$s};
+ my $fname = $funcNames{$f};
+ printf "%12d\t%6.2f%% |%s 0x%016x\t%s + 0x%x\n",
+ $cnt, $perc, DRAW_BAR(20, 20*$cnt/$allHits), $s, DEMANGLE($fname), $s - $funcBaseAddrs{$f};
+ }
+}
+
+# Adds hit informations to a disassembly line
+sub ANNOTATE_DISASSM($$$$) {
+ my ($address, $disassm, $max_hit_count, $func_hit_count) = @_;
+ my $hit_count = $addrHits{$address};
+ my $perc = sprintf("% 7.2f%%", 100*$hit_count/$func_hit_count);
+ $address = sprintf("% 8x", $address);
+ print $address . " " . $hit_count . "\t" . $perc . " |" .
+ DRAW_BAR(20, 20*$hit_count/$max_hit_count) . "\t" . $disassm . "\n";
+}
+
+# Dumps annotated disassembly of the specified function (actually not the whole function but
+# just the addresses between the first and last hit)
+sub DumpDisasm($) {
+ my ($name) = @_;
+ if (exists $funcStart{$name} && exists $funcEnd{$name} && $funcStart{$name}!=0) {
+ my $module = $funcModule{$name};
+ my $modBase = $moduleBaseAddr{$module};
+ my $start_address = $funcStart{$name} - $modBase;
+ my $stop_address = $funcEnd{$name} - $modBase + 1;
+# print " " . $funcStart{$name} . " " . $funcEnd{$name} . " $modBase ---";
+ my $max_hit_count = $funcHits{$name};
+ my $objdump_cmd = "objdump -C -d -l --start-address=" . $start_address .
+ " --stop-address=" . $stop_address . " " . $module . " |";
+ if ($stop_address - $start_address < 10000000) { # don't try to disaassemble more than 10MB, because most likely it's a bug
+# print STDERR $objdump_cmd . "\n";
+ open(my $OBJDUMP, $objdump_cmd) || die "No objdump";
+ my $srcLine = "func# ". $name;
+ my $srcFile = $module;
+ while (my $objdump_line = <$OBJDUMP>) {
+ # filter disassembly lines
+ if ($objdump_line =~ /^Disassembly of section/) {
+ } elsif ($objdump_line =~ m/^\s*([0-9,a-f,A-F]+):\s*(.*)/) {
+ my $addr = hex($1);
+ my $hit_count = $addrHits{$addr};
+ if ($hit_count > 0) {
+ $srcLineHits{$srcLine} += $hit_count;
+ $srcFileHits{$srcFile} += $hit_count;
+ }
+ ANNOTATE_DISASSM($addr + $modBase, $2, $funcHottestCount{$name}, $max_hit_count);
+ } elsif ($objdump_line =~ m/^(\/.*):(\d+)$/) {
+ $srcLine = $objdump_line;
+ $srcFile = $1;
+ chomp($srcLine);
+ print $objdump_line;
+ } else {
+ print $objdump_line;
+ }
+ }
+ close $OBJDUMP;
+ }
+ }
+}
+
+# Dumps disassemlby for top N hot functions
+sub DumpFuncDissasm($) {
+ (my $maxCnt) = @_;
+ my @funcs = sort {$funcHits{$b} <=> $funcHits{$a} } keys %funcHits;
+ print $#funcs . "\n";
+ for (my $i = 0; $i <= $#funcs && $i < $maxCnt; ++$i) {
+ my $f = $funcs[$i];
+ print "\n--------------------------------------------------------------------------------------------------------------\n";
+ printf "hits:%d\t%7.2f%%\tbase:%08x\tstart:%08x\tend:%08x\t%s\n",
+ $funcHits{$f}, 100*$funcHits{$f}/$allHits, $funcBaseAddrs{$f}, $funcStart{$f}, $funcEnd{$f}, DEMANGLE($funcNames{$f});
+ print "--------------------------------------------------------------------------------------------------------------\n";
+ DumpDisasm($f);
+ }
+}
+
+sub DumpSrcFiles($) {
+ (my $maxCnt) = @_;
+ my @srcFiles = sort {$srcFileHits{$b} <=> $srcFileHits{$a} } keys %srcFileHits;
+ for (my $i = 0; $i <= $#srcFiles && $i < $maxCnt; ++$i) {
+ my $f = $srcFiles[$i];
+ my $cnt = $srcFileHits{$f};
+ printf "%12d\t%6.2f%% |%s %s\n", $cnt, 100*$cnt/$allHits, DRAW_BAR(20, 20*$cnt/$allHits), $f;
+ }
+}
+
+sub DumpSrcLines($) {
+ (my $maxCnt) = @_;
+ my @srcLines = sort {$srcLineHits{$b} <=> $srcLineHits{$a} } keys %srcLineHits;
+ for (my $i = 0; $i <= $#srcLines && $i < $maxCnt; ++$i) {
+ my $l = $srcLines[$i];
+ my $cnt = $srcLineHits{$l};
+ printf "%12d\t%6.2f%% |%s %s\n", $cnt, 100*$cnt/$allHits, DRAW_BAR(20, 20*$cnt/$allHits), $l;
+ }
+}
+
+ReadFunctionList($ARGV[0], 0);
+ReadSamples();
+print "\nModules:\n";
+DumpModules();
+print "\nHot functions:\n";
+DumpHotFunc(100);
+print "\nHotspots:\n";
+DumpHotSpots(100);
+DumpFuncDissasm(100);
+print "\nHot src files:\n";
+DumpSrcFiles(100);
+print "\nHot src lines:\n";
+DumpSrcLines(100);
+
+# my @funcs = sort {$funcBaseAddrs{$a} <=> $funcBaseAddrs{$b} } keys %funcHits;
+# printf "%d\n", $#funcs;
+# for (my $i = 0; $i <= $#funcs; ++$i) {
+# my $f = $funcs[$i];
+# printf "%s\t%d\tbase:%08x\tstart:%08x\tend:%08x\t%s\n",
+# $funcNames{$f}, $funcHits{$f}, $funcBaseAddrs{$f}, $funcStart{$f}, $funcEnd{$f}, $funcModule{$f};
+# #DumpDisasm($f);
+# }
diff --git a/library/cpp/execprofile/profile.cpp b/library/cpp/execprofile/profile.cpp
index 19003a9a51c..d05de20203f 100644
--- a/library/cpp/execprofile/profile.cpp
+++ b/library/cpp/execprofile/profile.cpp
@@ -1,190 +1,190 @@
#include <util/system/defaults.h>
-#include "profile.h"
-
+#include "profile.h"
+
#if defined(_unix_) && !defined(_bionic_) && !defined(_cygwin_)
-
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
+
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#if defined(_darwin_)
#include <sys/ucontext.h>
#else
-#include <ucontext.h>
+#include <ucontext.h>
#endif
#include <dlfcn.h>
-#include <util/system/platform.h>
+#include <util/system/platform.h>
#include <util/generic/hash.h>
#include <util/generic/map.h>
#include <util/generic/noncopyable.h>
#include <util/generic/algorithm.h>
#include <util/generic/vector.h>
-#include <util/stream/file.h>
-#include <util/string/util.h>
-#include <util/system/datetime.h>
-
-// This class sets SIGPROF handler and captures instruction pointer in it.
+#include <util/stream/file.h>
+#include <util/string/util.h>
+#include <util/system/datetime.h>
+
+// This class sets SIGPROF handler and captures instruction pointer in it.
class TExecutionSampler : TNonCopyable {
-public:
+public:
typedef TVector<std::pair<void*, size_t>> TSampleVector;
-
- struct TStats {
- ui64 SavedSamples;
- ui64 DroppedSamples;
- ui64 SearchSkipCount;
- };
-
- // NOTE: There is no synchronization here as the instance is supposed to be
- // created on the main thread.
+
+ struct TStats {
+ ui64 SavedSamples;
+ ui64 DroppedSamples;
+ ui64 SearchSkipCount;
+ };
+
+ // NOTE: There is no synchronization here as the instance is supposed to be
+ // created on the main thread.
static TExecutionSampler* Instance() {
if (SInstance == nullptr) {
- SInstance = new TExecutionSampler();
- }
-
- return SInstance;
- }
-
+ SInstance = new TExecutionSampler();
+ }
+
+ return SInstance;
+ }
+
void Start() {
- // Set signal handler
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &OldSignalHandler) != 0)
- return;
-
- // Set interval timer
- itimerval tv;
- tv.it_interval.tv_sec = tv.it_value.tv_sec = 0;
- tv.it_interval.tv_usec = tv.it_value.tv_usec = SAMPLE_INTERVAL;
- setitimer(ITIMER_PROF, &tv, &OldTimerValue);
-
- Started = true;
- }
-
+ // Set signal handler
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &OldSignalHandler) != 0)
+ return;
+
+ // Set interval timer
+ itimerval tv;
+ tv.it_interval.tv_sec = tv.it_value.tv_sec = 0;
+ tv.it_interval.tv_usec = tv.it_value.tv_usec = SAMPLE_INTERVAL;
+ setitimer(ITIMER_PROF, &tv, &OldTimerValue);
+
+ Started = true;
+ }
+
void Stop(TSampleVector& sampleVector, TStats& stats) {
- // Reset signal handler and timer
- if (Started) {
+ // Reset signal handler and timer
+ if (Started) {
setitimer(ITIMER_PROF, &OldTimerValue, nullptr);
- sleep(1);
- }
-
- WaitForWriteFlag();
-
- if (Started) {
+ sleep(1);
+ }
+
+ WaitForWriteFlag();
+
+ if (Started) {
sigaction(SIGPROF, &OldSignalHandler, nullptr);
- Started = false;
- }
-
- TExecutionSampler::TSampleVector hits;
- hits.reserve(Samples);
- for (size_t i = 0; i < SZ; ++i) {
+ Started = false;
+ }
+
+ TExecutionSampler::TSampleVector hits;
+ hits.reserve(Samples);
+ for (size_t i = 0; i < SZ; ++i) {
if (Ips[i].first != nullptr) {
- hits.push_back(Ips[i]);
- }
- }
- stats.SavedSamples = Samples;
- stats.DroppedSamples = AtomicGet(DroppedSamples);
- stats.SearchSkipCount = SearchSkipCount;
- AtomicUnlock(&WriteFlag);
-
+ hits.push_back(Ips[i]);
+ }
+ }
+ stats.SavedSamples = Samples;
+ stats.DroppedSamples = AtomicGet(DroppedSamples);
+ stats.SearchSkipCount = SearchSkipCount;
+ AtomicUnlock(&WriteFlag);
+
Sort(hits.begin(), hits.end(), TCompareFirst());
-
- sampleVector.swap(hits);
- }
-
+
+ sampleVector.swap(hits);
+ }
+
void ResetStats() {
- WaitForWriteFlag();
- Clear();
- AtomicUnlock(&WriteFlag);
- }
-
-private:
+ WaitForWriteFlag();
+ Clear();
+ AtomicUnlock(&WriteFlag);
+ }
+
+private:
static const size_t SZ = 2 * 1024 * 1024; // size of the hash table
// inserts work faster if it's a power of 2
static const int SAMPLE_INTERVAL = 1000; // in microseconds
-
- struct TCompareFirst {
+
+ struct TCompareFirst {
bool operator()(const std::pair<void*, size_t>& a, const std::pair<void*, size_t>& b) const {
- return a.first < b.first;
- }
- };
-
- TExecutionSampler()
- : Started(false)
- , Ips(SZ)
- , WriteFlag(0)
- , DroppedSamples(0)
- , Samples(0)
- , UniqueSamples(0)
- , SearchSkipCount(0)
- {
- }
-
+ return a.first < b.first;
+ }
+ };
+
+ TExecutionSampler()
+ : Started(false)
+ , Ips(SZ)
+ , WriteFlag(0)
+ , DroppedSamples(0)
+ , Samples(0)
+ , UniqueSamples(0)
+ , SearchSkipCount(0)
+ {
+ }
+
~TExecutionSampler() = default;
-
- // Signal handler is not allowed to do anything that can deadlock with activity
- // on the thread to which the signal is delivered or corrupt data structures that
- // were in process of update.
- // One such thing is memory allocation. That's why a fixed size vector is
- // preallocated at start.
+
+ // Signal handler is not allowed to do anything that can deadlock with activity
+ // on the thread to which the signal is delivered or corrupt data structures that
+ // were in process of update.
+ // One such thing is memory allocation. That's why a fixed size vector is
+ // preallocated at start.
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- (void)info;
- if (signal != SIGPROF) {
- return;
- }
-
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ (void)info;
+ if (signal != SIGPROF) {
+ return;
+ }
+
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
Y_ASSERT(SInstance != nullptr);
-
- SInstance->CaptureIP(GetIp(&ucontext->uc_mcontext));
- }
-
+
+ SInstance->CaptureIP(GetIp(&ucontext->uc_mcontext));
+ }
+
void WaitForWriteFlag() {
- // Wait for write flag to be reset
- ui32 delay = 100;
- while (!AtomicTryLock(&WriteFlag)) {
- usleep(delay);
- delay += delay;
+ // Wait for write flag to be reset
+ ui32 delay = 100;
+ while (!AtomicTryLock(&WriteFlag)) {
+ usleep(delay);
+ delay += delay;
delay = Min(delay, (ui32)5000);
- }
- }
-
+ }
+ }
+
void CaptureIP(void* rip) {
- // Check if the handler on another thread is in the process of adding a sample
- // If this is the case, we just drop the current sample as this should happen
- // rarely.
- if (AtomicTryLock(&WriteFlag)) {
- AddSample(rip);
- AtomicUnlock(&WriteFlag);
- } else {
+ // Check if the handler on another thread is in the process of adding a sample
+ // If this is the case, we just drop the current sample as this should happen
+ // rarely.
+ if (AtomicTryLock(&WriteFlag)) {
+ AddSample(rip);
+ AtomicUnlock(&WriteFlag);
+ } else {
AtomicIncrement(DroppedSamples);
- }
- }
-
- // Hash function applied to the addresses
+ }
+ }
+
+ // Hash function applied to the addresses
static inline ui32 Hash(void* key) {
- return ((size_t)key + (size_t)key / SZ) % SZ;
- }
-
- // Get instruction pointer from the context
+ return ((size_t)key + (size_t)key / SZ) % SZ;
+ }
+
+ // Get instruction pointer from the context
static inline void* GetIp(const mcontext_t* mctx) {
-#if defined _freebsd_
+#if defined _freebsd_
#if defined _64_
- return (void*)mctx->mc_rip;
+ return (void*)mctx->mc_rip;
#else
- return (void*)mctx->mc_eip;
+ return (void*)mctx->mc_eip;
#endif
-#elif defined _linux_
+#elif defined _linux_
#if defined _64_
#if defined(_arm_)
return (void*)mctx->pc;
#else
- return (void*)mctx->gregs[REG_RIP];
+ return (void*)mctx->gregs[REG_RIP];
#endif
#else
- return (void*)mctx->gregs[REG_EIP];
+ return (void*)mctx->gregs[REG_EIP];
#endif
#elif defined _darwin_
#if defined _64_
@@ -199,67 +199,67 @@ private:
#else
return (void*)(*mctx)->__ss.__eip;
#endif
-#endif
#endif
- }
-
+#endif
+ }
+
inline bool AddSample(void* key) {
- ui32 slot = Hash(key);
- ui32 prevSlot = (slot - 1) % SZ;
-
- while (key != Ips[slot].first && !IsSlotEmpty(slot) && slot != prevSlot) {
- slot = (slot + 1) % SZ;
- SearchSkipCount++;
- }
-
- if (key == Ips[slot].first) {
- // increment the count
- Ips[slot].second++;
- ++Samples;
- } else if (InsertsAllowed()) {
- // add new sample and set the count to 1
- Ips[slot].first = key;
- Ips[slot].second = 1;
- ++UniqueSamples;
- ++Samples;
- } else {
- // don't insert new sample if the search is becoming too slow
+ ui32 slot = Hash(key);
+ ui32 prevSlot = (slot - 1) % SZ;
+
+ while (key != Ips[slot].first && !IsSlotEmpty(slot) && slot != prevSlot) {
+ slot = (slot + 1) % SZ;
+ SearchSkipCount++;
+ }
+
+ if (key == Ips[slot].first) {
+ // increment the count
+ Ips[slot].second++;
+ ++Samples;
+ } else if (InsertsAllowed()) {
+ // add new sample and set the count to 1
+ Ips[slot].first = key;
+ Ips[slot].second = 1;
+ ++UniqueSamples;
+ ++Samples;
+ } else {
+ // don't insert new sample if the search is becoming too slow
AtomicIncrement(DroppedSamples);
- return false;
- }
-
- return true;
- }
-
+ return false;
+ }
+
+ return true;
+ }
+
inline bool IsSlotEmpty(ui32 slot) const {
return Ips[slot].first == nullptr;
- }
-
+ }
+
inline bool InsertsAllowed() const {
- return UniqueSamples < SZ / 2;
- }
-
- void
+ return UniqueSamples < SZ / 2;
+ }
+
+ void
Clear() {
Y_ASSERT(WriteFlag == 1);
-
- for (size_t i = 0; i < SZ; ++i) {
+
+ for (size_t i = 0; i < SZ; ++i) {
Ips[i] = std::make_pair((void*)nullptr, (size_t)0);
- }
- Samples = 0;
- AtomicSet(DroppedSamples, 0);
- UniqueSamples = 0;
- SearchSkipCount = 0;
- }
-
+ }
+ Samples = 0;
+ AtomicSet(DroppedSamples, 0);
+ UniqueSamples = 0;
+ SearchSkipCount = 0;
+ }
+
bool Started;
struct sigaction OldSignalHandler;
itimerval OldTimerValue;
-
+
TVector<std::pair<void*, size_t>>
Ips; // The hash table storing addresses and their hitcounts
-
- // TODO: on a big multiproc cache line false sharing by the flag and count might become an issue
+
+ // TODO: on a big multiproc cache line false sharing by the flag and count might become an issue
TAtomic WriteFlag; // Is used to syncronize access to the hash table
TAtomic DroppedSamples; // "dropped sample" count will show how many times
// a sample was dropped either because of write conflict
@@ -267,87 +267,87 @@ private:
ui64 Samples; // Saved samples count
ui64 UniqueSamples; // Number of unique addresses
ui64 SearchSkipCount; // Total number of linear hash table probes due to collisions
-
- static TExecutionSampler* SInstance;
-};
-
-// Performs analysis of samples captured by TExecutionSampler
-class TSampleAnalyser : TNonCopyable {
-public:
- TSampleAnalyser(TExecutionSampler::TSampleVector& samples, const TExecutionSampler::TStats& stats, bool putTimeStamps = false)
- : Samples()
- , Stats(stats)
- , PutTimestamps(putTimeStamps)
- {
- Samples.swap(samples);
- }
-
+
+ static TExecutionSampler* SInstance;
+};
+
+// Performs analysis of samples captured by TExecutionSampler
+class TSampleAnalyser : TNonCopyable {
+public:
+ TSampleAnalyser(TExecutionSampler::TSampleVector& samples, const TExecutionSampler::TStats& stats, bool putTimeStamps = false)
+ : Samples()
+ , Stats(stats)
+ , PutTimestamps(putTimeStamps)
+ {
+ Samples.swap(samples);
+ }
+
~TSampleAnalyser() = default;
-
- void Analyze(FILE* out) const;
-
-private:
- TExecutionSampler::TSampleVector Samples;
+
+ void Analyze(FILE* out) const;
+
+private:
+ TExecutionSampler::TSampleVector Samples;
TExecutionSampler::TStats Stats;
- bool PutTimestamps;
-};
-
+ bool PutTimestamps;
+};
+
void TSampleAnalyser::Analyze(FILE* out) const {
fprintf(out, "samples: %" PRIu64 " unique: %" PRIu64 " dropped: %" PRIu64 " searchskips: %" PRIu64 "\n",
(ui64)Stats.SavedSamples, (ui64)Samples.size(),
(ui64)Stats.DroppedSamples, (ui64)Stats.SearchSkipCount);
-
- fprintf(out, "\nSamples:\n");
- size_t funcCnt = 0;
- void* prevModBase = (void*)-1;
- void* prevFunc = (void*)-1;
- for (size_t i = 0; i < Samples.size(); ++i) {
- // print cycle count once in a while to estimate time consumed by
- // dumping the samples
- if (PutTimestamps && (i % 1000 == 0)) {
- ui64 tm = GetCycleCount();
+
+ fprintf(out, "\nSamples:\n");
+ size_t funcCnt = 0;
+ void* prevModBase = (void*)-1;
+ void* prevFunc = (void*)-1;
+ for (size_t i = 0; i < Samples.size(); ++i) {
+ // print cycle count once in a while to estimate time consumed by
+ // dumping the samples
+ if (PutTimestamps && (i % 1000 == 0)) {
+ ui64 tm = GetCycleCount();
fprintf(out, "TM: %" PRIu64 "\n", tm);
- }
-
- Dl_info addrInfo;
- if (dladdr(Samples[i].first, &addrInfo)) {
- if (addrInfo.dli_fbase != prevModBase || addrInfo.dli_saddr != prevFunc) {
+ }
+
+ Dl_info addrInfo;
+ if (dladdr(Samples[i].first, &addrInfo)) {
+ if (addrInfo.dli_fbase != prevModBase || addrInfo.dli_saddr != prevFunc) {
fprintf(out, "Func\t%" PRISZT "\t%p\t%p\t%s\t%s\n",
funcCnt,
addrInfo.dli_fbase,
addrInfo.dli_saddr,
addrInfo.dli_fname,
addrInfo.dli_sname);
- prevModBase = addrInfo.dli_fbase;
- prevFunc = addrInfo.dli_saddr;
- ++funcCnt;
- }
- } else {
- fprintf(out, "[dladdr failed]\n");
- }
+ prevModBase = addrInfo.dli_fbase;
+ prevFunc = addrInfo.dli_saddr;
+ ++funcCnt;
+ }
+ } else {
+ fprintf(out, "[dladdr failed]\n");
+ }
fprintf(out, "%" PRISZT "\t%p\t%lu\n", i, Samples[i].first, Samples[i].second);
- }
-}
-
+ }
+}
+
TExecutionSampler* TExecutionSampler::SInstance = nullptr;
-
-// Starts capturing execution samples
-void BeginProfiling() {
- TExecutionSampler::Instance()->Start();
-}
-
-// Resets captured execution samples
-void ResetProfile() {
- TExecutionSampler::Instance()->ResetStats();
-}
-
-void DumpRUsage(FILE* out) {
- rusage ru;
- int e = getrusage(RUSAGE_SELF, &ru);
- if (e != 0)
- return;
-
- fprintf(out,
+
+// Starts capturing execution samples
+void BeginProfiling() {
+ TExecutionSampler::Instance()->Start();
+}
+
+// Resets captured execution samples
+void ResetProfile() {
+ TExecutionSampler::Instance()->ResetStats();
+}
+
+void DumpRUsage(FILE* out) {
+ rusage ru;
+ int e = getrusage(RUSAGE_SELF, &ru);
+ if (e != 0)
+ return;
+
+ fprintf(out,
"user time: %lf\n"
"system time: %lf\n"
"max RSS: %ld\n"
@@ -372,46 +372,46 @@ void DumpRUsage(FILE* out) {
ru.ru_msgsnd, ru.ru_msgrcv,
ru.ru_nsignals,
ru.ru_nvcsw, ru.ru_nivcsw);
-}
-
-// Pauses capturing execution samples and dumps them to the file
-// Samples are not cleared so that profiling can be continued by calling BeginProfiling()
-// or it can be started from scratch by calling ResetProfile() and then BeginProfiling()
-void EndProfiling(FILE* out) {
- DumpRUsage(out);
-
- TExecutionSampler::TSampleVector samples;
- TExecutionSampler::TStats stats;
- TExecutionSampler::Instance()->Stop(samples, stats);
-
- TSampleAnalyser analyzer(samples, stats);
- analyzer.Analyze(out);
-}
-
-void EndProfiling() {
- static unsigned cnt = 0;
- char nameBuf[256];
- snprintf(nameBuf, sizeof(nameBuf), "./%s.%d.%u.profile", getprogname(), (int)getpid(), cnt);
- FILE* out = fopen(nameBuf, "a");
- EndProfiling(out);
- fclose(out);
- ++cnt;
-}
-
+}
+
+// Pauses capturing execution samples and dumps them to the file
+// Samples are not cleared so that profiling can be continued by calling BeginProfiling()
+// or it can be started from scratch by calling ResetProfile() and then BeginProfiling()
+void EndProfiling(FILE* out) {
+ DumpRUsage(out);
+
+ TExecutionSampler::TSampleVector samples;
+ TExecutionSampler::TStats stats;
+ TExecutionSampler::Instance()->Stop(samples, stats);
+
+ TSampleAnalyser analyzer(samples, stats);
+ analyzer.Analyze(out);
+}
+
+void EndProfiling() {
+ static unsigned cnt = 0;
+ char nameBuf[256];
+ snprintf(nameBuf, sizeof(nameBuf), "./%s.%d.%u.profile", getprogname(), (int)getpid(), cnt);
+ FILE* out = fopen(nameBuf, "a");
+ EndProfiling(out);
+ fclose(out);
+ ++cnt;
+}
+
#else
-
-// NOTE: not supported on Windows
-
-void BeginProfiling() {
-}
-
-void ResetProfile() {
-}
-
+
+// NOTE: not supported on Windows
+
+void BeginProfiling() {
+}
+
+void ResetProfile() {
+}
+
void EndProfiling(FILE*) {
-}
-
-void EndProfiling() {
-}
-
+}
+
+void EndProfiling() {
+}
+
#endif
diff --git a/library/cpp/execprofile/profile.h b/library/cpp/execprofile/profile.h
index bd90aad3aa1..ccb88666565 100644
--- a/library/cpp/execprofile/profile.h
+++ b/library/cpp/execprofile/profile.h
@@ -1,17 +1,17 @@
#pragma once
-
-#include <stdio.h>
-
-// Starts capturing execution samples
-void BeginProfiling();
-
-// Resets captured execution samples
-void ResetProfile();
-
-// Pauses capturing execution samples and dumps them to the file
-// Samples are not cleared so that profiling can be continued by calling BeginProfiling()
-// or it can be started from scratch by calling ResetProfile() and then BeginProfiling()
-void EndProfiling(FILE* out);
-
-// Dumps the profile to default file (basename.pid.N.profile)
-void EndProfiling();
+
+#include <stdio.h>
+
+// Starts capturing execution samples
+void BeginProfiling();
+
+// Resets captured execution samples
+void ResetProfile();
+
+// Pauses capturing execution samples and dumps them to the file
+// Samples are not cleared so that profiling can be continued by calling BeginProfiling()
+// or it can be started from scratch by calling ResetProfile() and then BeginProfiling()
+void EndProfiling(FILE* out);
+
+// Dumps the profile to default file (basename.pid.N.profile)
+void EndProfiling();
diff --git a/library/cpp/execprofile/ya.make b/library/cpp/execprofile/ya.make
index ef002f15525..9d202ac4eb2 100644
--- a/library/cpp/execprofile/ya.make
+++ b/library/cpp/execprofile/ya.make
@@ -1,9 +1,9 @@
OWNER(g:cpp-contrib)
LIBRARY()
-
+
SRCS(
- profile.cpp
-)
-
-END()
+ profile.cpp
+)
+
+END()
diff --git a/library/cpp/grpc/server/grpc_request.h b/library/cpp/grpc/server/grpc_request.h
index d034f8e2b7f..5bd8d3902b5 100644
--- a/library/cpp/grpc/server/grpc_request.h
+++ b/library/cpp/grpc/server/grpc_request.h
@@ -59,8 +59,8 @@ public:
TRequestCallback requestCallback,
const char* name,
TLoggerPtr logger,
- ICounterBlockPtr counters,
- IGRpcRequestLimiterPtr limiter)
+ ICounterBlockPtr counters,
+ IGRpcRequestLimiterPtr limiter)
: TBaseAsyncContext<TService>(service, cq)
, Server_(server)
, Cb_(cb)
@@ -69,7 +69,7 @@ public:
, Name_(name)
, Logger_(std::move(logger))
, Counters_(std::move(counters))
- , RequestLimiter_(std::move(limiter))
+ , RequestLimiter_(std::move(limiter))
, Writer_(new grpc::ServerAsyncResponseWriter<TUniversalResponseRef<TOut>>(&this->Context))
, StateFunc_(&TThis::SetRequestDone)
{
@@ -87,8 +87,8 @@ public:
TStreamRequestCallback requestCallback,
const char* name,
TLoggerPtr logger,
- ICounterBlockPtr counters,
- IGRpcRequestLimiterPtr limiter)
+ ICounterBlockPtr counters,
+ IGRpcRequestLimiterPtr limiter)
: TBaseAsyncContext<TService>(service, cq)
, Server_(server)
, Cb_(cb)
@@ -97,7 +97,7 @@ public:
, Name_(name)
, Logger_(std::move(logger))
, Counters_(std::move(counters))
- , RequestLimiter_(std::move(limiter))
+ , RequestLimiter_(std::move(limiter))
, StreamWriter_(new grpc::ServerAsyncWriter<TUniversalResponse<TOut>>(&this->Context))
, StateFunc_(&TThis::SetRequestDone)
{
@@ -363,7 +363,7 @@ private:
return false;
}
- if (IncRequest()) {
+ if (IncRequest()) {
// Adjust counters.
RequestSize = Request_->ByteSize();
Counters_->StartProcessing(RequestSize);
@@ -405,7 +405,7 @@ private:
if (!ok) {
logCb(-1);
- DecRequest();
+ DecRequest();
Counters_->FinishProcessing(RequestSize, ResponseSize, ok, ResponseStatus,
TDuration::Seconds(RequestTimer.Passed()));
return false;
@@ -426,7 +426,7 @@ private:
GRPC_LOG_DEBUG(Logger_, "[%p] finished request Name# %s ok# %s peer# %s", this, Name_,
ok ? "true" : "false", this->Context.peer().c_str());
//PrintBackTrace();
- DecRequest();
+ DecRequest();
Counters_->FinishProcessing(RequestSize, ResponseSize, ok, ResponseStatus,
TDuration::Seconds(RequestTimer.Passed()));
return false;
@@ -436,7 +436,7 @@ private:
GRPC_LOG_DEBUG(Logger_, "[%p] finished request with error Name# %s ok# %s peer# %s", this, Name_,
ok ? "true" : "false", this->Context.peer().c_str());
if (!SkipUpdateCountersOnError) {
- DecRequest();
+ DecRequest();
Counters_->FinishProcessing(RequestSize, ResponseSize, ok, ResponseStatus,
TDuration::Seconds(RequestTimer.Passed()));
}
@@ -457,28 +457,28 @@ private:
}
}
- bool IncRequest() {
- if (!Server_->IncRequest())
- return false;
-
- if (!RequestLimiter_)
- return true;
-
- if (!RequestLimiter_->IncRequest()) {
- Server_->DecRequest();
- return false;
- }
-
- return true;
- }
-
- void DecRequest() {
- if (RequestLimiter_) {
- RequestLimiter_->DecRequest();
- }
- Server_->DecRequest();
- }
-
+ bool IncRequest() {
+ if (!Server_->IncRequest())
+ return false;
+
+ if (!RequestLimiter_)
+ return true;
+
+ if (!RequestLimiter_->IncRequest()) {
+ Server_->DecRequest();
+ return false;
+ }
+
+ return true;
+ }
+
+ void DecRequest() {
+ if (RequestLimiter_) {
+ RequestLimiter_->DecRequest();
+ }
+ Server_->DecRequest();
+ }
+
using TStateFunc = bool (TThis::*)(bool);
TService* Server_;
TOnRequest Cb_;
@@ -487,7 +487,7 @@ private:
const char* const Name_;
TLoggerPtr Logger_;
ICounterBlockPtr Counters_;
- IGRpcRequestLimiterPtr RequestLimiter_;
+ IGRpcRequestLimiterPtr RequestLimiter_;
THolder<grpc::ServerAsyncResponseWriter<TUniversalResponseRef<TOut>>> Writer_;
THolder<grpc::ServerAsyncWriterInterface<TUniversalResponse<TOut>>> StreamWriter_;
@@ -521,8 +521,8 @@ public:
typename TBase::TRequestCallback requestCallback,
const char* name,
TLoggerPtr logger,
- ICounterBlockPtr counters,
- IGRpcRequestLimiterPtr limiter = nullptr)
+ ICounterBlockPtr counters,
+ IGRpcRequestLimiterPtr limiter = nullptr)
: TBase{server, service, cq, std::move(cb), std::move(requestCallback), name, std::move(logger), std::move(counters), std::move(limiter)}
{
}
diff --git a/library/cpp/grpc/server/grpc_request_base.h b/library/cpp/grpc/server/grpc_request_base.h
index 2be84158c7b..fcfce1c181a 100644
--- a/library/cpp/grpc/server/grpc_request_base.h
+++ b/library/cpp/grpc/server/grpc_request_base.h
@@ -28,16 +28,16 @@ struct TAuthState {
EAuthState State;
};
-
-//! An interface that may be used to limit concurrency of requests
+
+//! An interface that may be used to limit concurrency of requests
class IGRpcRequestLimiter: public TThrRefBase {
-public:
- virtual bool IncRequest() = 0;
- virtual void DecRequest() = 0;
-};
-
-using IGRpcRequestLimiterPtr = TIntrusivePtr<IGRpcRequestLimiter>;
-
+public:
+ virtual bool IncRequest() = 0;
+ virtual void DecRequest() = 0;
+};
+
+using IGRpcRequestLimiterPtr = TIntrusivePtr<IGRpcRequestLimiter>;
+
//! State of current request
class IRequestContextBase: public TThrRefBase {
public:
diff --git a/library/cpp/grpc/server/grpc_server.h b/library/cpp/grpc/server/grpc_server.h
index 4ce67a1b68c..d6814a90a0d 100644
--- a/library/cpp/grpc/server/grpc_server.h
+++ b/library/cpp/grpc/server/grpc_server.h
@@ -1,8 +1,8 @@
#pragma once
-#include "grpc_request_base.h"
+#include "grpc_request_base.h"
#include "logger.h"
-
+
#include <library/cpp/threading/future/future.h>
#include <util/generic/ptr.h>
@@ -123,10 +123,10 @@ public:
virtual ~ICancelableContext() = default;
};
-template <class TLimit>
-class TInFlightLimiterImpl {
+template <class TLimit>
+class TInFlightLimiterImpl {
public:
- explicit TInFlightLimiterImpl(const TLimit& limit)
+ explicit TInFlightLimiterImpl(const TLimit& limit)
: Limit_(limit)
{}
@@ -154,13 +154,13 @@ public:
}
private:
- const TLimit Limit_;
+ const TLimit Limit_;
TAtomic CurInFlightReqs_ = 0;
};
-using TGlobalLimiter = TInFlightLimiterImpl<i64>;
-
-
+using TGlobalLimiter = TInFlightLimiterImpl<i64>;
+
+
class IGRpcService: public TThrRefBase {
public:
virtual grpc::Service* GetService() = 0;
diff --git a/library/cpp/lfalloc/alloc_profiler/profiler.cpp b/library/cpp/lfalloc/alloc_profiler/profiler.cpp
index beb0ffb289b..0e30927a5a2 100644
--- a/library/cpp/lfalloc/alloc_profiler/profiler.cpp
+++ b/library/cpp/lfalloc/alloc_profiler/profiler.cpp
@@ -8,7 +8,7 @@
#include <util/generic/vector.h>
#include <util/stream/str.h>
-namespace NAllocProfiler {
+namespace NAllocProfiler {
namespace {
@@ -50,32 +50,32 @@ void DeallocationCallback(int stackId, int tag, size_t size, int sizeIdx)
////////////////////////////////////////////////////////////////////////////////
-bool StartAllocationSampling(bool profileAllThreads)
+bool StartAllocationSampling(bool profileAllThreads)
{
auto& collector = AllocationStackCollector();
collector.Clear();
- NAllocDbg::SetProfileAllThreads(profileAllThreads);
+ NAllocDbg::SetProfileAllThreads(profileAllThreads);
NAllocDbg::SetAllocationCallback(AllocationCallback);
NAllocDbg::SetDeallocationCallback(DeallocationCallback);
NAllocDbg::SetAllocationSamplingEnabled(true);
return true;
}
-bool StopAllocationSampling(IAllocationStatsDumper &out, int count)
+bool StopAllocationSampling(IAllocationStatsDumper &out, int count)
{
NAllocDbg::SetAllocationCallback(nullptr);
NAllocDbg::SetDeallocationCallback(nullptr);
NAllocDbg::SetAllocationSamplingEnabled(false);
auto& collector = AllocationStackCollector();
- collector.Dump(count, out);
+ collector.Dump(count, out);
return true;
}
-bool StopAllocationSampling(IOutputStream& out, int count) {
- TAllocationStatsDumper dumper(out);
- return StopAllocationSampling(dumper, count);
+bool StopAllocationSampling(IOutputStream& out, int count) {
+ TAllocationStatsDumper dumper(out);
+ return StopAllocationSampling(dumper, count);
}
} // namespace NProfiler
diff --git a/library/cpp/lfalloc/alloc_profiler/profiler.h b/library/cpp/lfalloc/alloc_profiler/profiler.h
index 592849b460e..4ea49b9dcc8 100644
--- a/library/cpp/lfalloc/alloc_profiler/profiler.h
+++ b/library/cpp/lfalloc/alloc_profiler/profiler.h
@@ -1,13 +1,13 @@
#pragma once
-#include "stackcollect.h"
+#include "stackcollect.h"
#include <library/cpp/lfalloc/dbg_info/dbg_info.h>
#include <util/generic/noncopyable.h>
#include <util/stream/output.h>
-namespace NAllocProfiler {
+namespace NAllocProfiler {
////////////////////////////////////////////////////////////////////////////////
@@ -21,9 +21,9 @@ inline bool SetProfileCurrentThread(bool value)
return NAllocDbg::SetProfileCurrentThread(value);
}
-bool StartAllocationSampling(bool profileAllThreads = false);
-bool StopAllocationSampling(IAllocationStatsDumper& out, int count = 100);
-bool StopAllocationSampling(IOutputStream& out, int count = 100);
+bool StartAllocationSampling(bool profileAllThreads = false);
+bool StopAllocationSampling(IAllocationStatsDumper& out, int count = 100);
+bool StopAllocationSampling(IOutputStream& out, int count = 100);
////////////////////////////////////////////////////////////////////////////////
@@ -42,4 +42,4 @@ public:
}
};
-} // namespace NAllocProfiler
+} // namespace NAllocProfiler
diff --git a/library/cpp/lfalloc/alloc_profiler/profiler_ut.cpp b/library/cpp/lfalloc/alloc_profiler/profiler_ut.cpp
index 21b667e730b..4341dda6ed9 100644
--- a/library/cpp/lfalloc/alloc_profiler/profiler_ut.cpp
+++ b/library/cpp/lfalloc/alloc_profiler/profiler_ut.cpp
@@ -1,76 +1,76 @@
-#include "profiler.h"
-
+#include "profiler.h"
+
#include <library/cpp/testing/unittest/registar.h>
-
-namespace NAllocProfiler {
-
-////////////////////////////////////////////////////////////////////////////////
-
-Y_UNIT_TEST_SUITE(Profiler) {
- Y_UNIT_TEST(StackCollection)
- {
- TStringStream str;
-
- NAllocProfiler::StartAllocationSampling(true);
- TVector<TAutoPtr<int>> test;
- // Do many allocations and no deallocations
- for (int i = 0; i < 10000; ++i) {
- test.push_back(new int);
- }
- NAllocProfiler::StopAllocationSampling(str);
- //Cout << str.Str() << Endl;
-
-#if !defined(ARCH_AARCH64)
- /* Check that output resembles this:
-
- STACK #2: 0 Allocs: 10 Frees: 0 CurrentSize: 40
- 0000000000492353 ??
- 000000000048781F operator new(unsigned long) +1807
- 00000000003733FA NAllocProfiler::NTestSuiteProfiler::TTestCaseStackCollection::Execute_(NUnitTest::TTestContext&) +218
- 00000000004A1938 NUnitTest::TTestBase::Run(std::__y1::function<void ()>, TString, char const*, bool) +120
- 0000000000375656 NAllocProfiler::NTestSuiteProfiler::TCurrentTest::Execute() +342
- 00000000004A20CF NUnitTest::TTestFactory::Execute() +847
- 000000000049922D NUnitTest::RunMain(int, char**) +1965
- 00007FF665778F45 __libc_start_main +245
- */
-
- UNIT_ASSERT_STRING_CONTAINS(str.Str(), "StackCollection");
- UNIT_ASSERT_STRING_CONTAINS(str.Str(), "NUnitTest::TTestBase::Run");
- UNIT_ASSERT_STRING_CONTAINS(str.Str(), "NAllocProfiler::NTestSuiteProfiler::TCurrentTest::Execute");
- UNIT_ASSERT_STRING_CONTAINS(str.Str(), "NUnitTest::TTestFactory::Execute");
- UNIT_ASSERT_STRING_CONTAINS(str.Str(), "NUnitTest::RunMain");
-#endif
- }
-
- class TAllocDumper : public NAllocProfiler::TAllocationStatsDumper {
- public:
- explicit TAllocDumper(IOutputStream& out) : NAllocProfiler::TAllocationStatsDumper(out) {}
-
- TString FormatTag(int tag) override {
- UNIT_ASSERT_VALUES_EQUAL(tag, 42);
- return "TAG_NAME_42";
- }
- };
-
- Y_UNIT_TEST(TagNames)
- {
- TStringStream str;
-
- NAllocProfiler::StartAllocationSampling(true);
- TVector<TAutoPtr<int>> test;
- NAllocProfiler::TProfilingScope scope(42);
- // Do many allocations and no deallocations
- for (int i = 0; i < 10000; ++i) {
- test.push_back(new int);
- }
-
- TAllocDumper dumper(str);
- NAllocProfiler::StopAllocationSampling(dumper);
-
-#if !defined(ARCH_AARCH64)
- UNIT_ASSERT_STRING_CONTAINS(str.Str(), "TAG_NAME_42");
-#endif
- }
-}
-
-}
+
+namespace NAllocProfiler {
+
+////////////////////////////////////////////////////////////////////////////////
+
+Y_UNIT_TEST_SUITE(Profiler) {
+ Y_UNIT_TEST(StackCollection)
+ {
+ TStringStream str;
+
+ NAllocProfiler::StartAllocationSampling(true);
+ TVector<TAutoPtr<int>> test;
+ // Do many allocations and no deallocations
+ for (int i = 0; i < 10000; ++i) {
+ test.push_back(new int);
+ }
+ NAllocProfiler::StopAllocationSampling(str);
+ //Cout << str.Str() << Endl;
+
+#if !defined(ARCH_AARCH64)
+ /* Check that output resembles this:
+
+ STACK #2: 0 Allocs: 10 Frees: 0 CurrentSize: 40
+ 0000000000492353 ??
+ 000000000048781F operator new(unsigned long) +1807
+ 00000000003733FA NAllocProfiler::NTestSuiteProfiler::TTestCaseStackCollection::Execute_(NUnitTest::TTestContext&) +218
+ 00000000004A1938 NUnitTest::TTestBase::Run(std::__y1::function<void ()>, TString, char const*, bool) +120
+ 0000000000375656 NAllocProfiler::NTestSuiteProfiler::TCurrentTest::Execute() +342
+ 00000000004A20CF NUnitTest::TTestFactory::Execute() +847
+ 000000000049922D NUnitTest::RunMain(int, char**) +1965
+ 00007FF665778F45 __libc_start_main +245
+ */
+
+ UNIT_ASSERT_STRING_CONTAINS(str.Str(), "StackCollection");
+ UNIT_ASSERT_STRING_CONTAINS(str.Str(), "NUnitTest::TTestBase::Run");
+ UNIT_ASSERT_STRING_CONTAINS(str.Str(), "NAllocProfiler::NTestSuiteProfiler::TCurrentTest::Execute");
+ UNIT_ASSERT_STRING_CONTAINS(str.Str(), "NUnitTest::TTestFactory::Execute");
+ UNIT_ASSERT_STRING_CONTAINS(str.Str(), "NUnitTest::RunMain");
+#endif
+ }
+
+ class TAllocDumper : public NAllocProfiler::TAllocationStatsDumper {
+ public:
+ explicit TAllocDumper(IOutputStream& out) : NAllocProfiler::TAllocationStatsDumper(out) {}
+
+ TString FormatTag(int tag) override {
+ UNIT_ASSERT_VALUES_EQUAL(tag, 42);
+ return "TAG_NAME_42";
+ }
+ };
+
+ Y_UNIT_TEST(TagNames)
+ {
+ TStringStream str;
+
+ NAllocProfiler::StartAllocationSampling(true);
+ TVector<TAutoPtr<int>> test;
+ NAllocProfiler::TProfilingScope scope(42);
+ // Do many allocations and no deallocations
+ for (int i = 0; i < 10000; ++i) {
+ test.push_back(new int);
+ }
+
+ TAllocDumper dumper(str);
+ NAllocProfiler::StopAllocationSampling(dumper);
+
+#if !defined(ARCH_AARCH64)
+ UNIT_ASSERT_STRING_CONTAINS(str.Str(), "TAG_NAME_42");
+#endif
+ }
+}
+
+}
diff --git a/library/cpp/lfalloc/alloc_profiler/stackcollect.cpp b/library/cpp/lfalloc/alloc_profiler/stackcollect.cpp
index d608803e847..fded4e2fd1a 100644
--- a/library/cpp/lfalloc/alloc_profiler/stackcollect.cpp
+++ b/library/cpp/lfalloc/alloc_profiler/stackcollect.cpp
@@ -5,16 +5,16 @@
#include <util/generic/algorithm.h>
#include <util/generic/vector.h>
#include <util/stream/format.h>
-#include <util/stream/str.h>
-#include <util/string/cast.h>
-#include <util/string/printf.h>
+#include <util/stream/str.h>
+#include <util/string/cast.h>
+#include <util/string/printf.h>
#include <util/system/backtrace.h>
#include <util/system/spinlock.h>
#include <util/system/yassert.h>
-namespace NAllocProfiler {
-
+namespace NAllocProfiler {
+
////////////////////////////////////////////////////////////////////////////////
template <typename T>
@@ -87,11 +87,11 @@ public:
return Y_ARRAY_SIZE(Frames);
}
- void BackTrace(const TFrameInfo* stack, TStackVec<void*, 64>& frames) const
+ void BackTrace(const TFrameInfo* stack, TStackVec<void*, 64>& frames) const
{
- frames.clear();
+ frames.clear();
for (size_t i = 0; i < 100; ++i) {
- frames.push_back(stack->Addr);
+ frames.push_back(stack->Addr);
int prevInd = stack->PrevInd;
if (prevInd == -1) {
break;
@@ -174,11 +174,11 @@ private:
////////////////////////////////////////////////////////////////////////////////
-class TAllocationStackCollector::TImpl: public TStackCollector<TStats> {
- using TBase = TStackCollector<TStats>;
+class TAllocationStackCollector::TImpl: public TStackCollector<TStats> {
+ using TBase = TStackCollector<TStats>;
private:
- TStats Total;
+ TStats Total;
public:
int Alloc(void** stack, size_t frameCount, int tag, size_t size)
@@ -203,7 +203,7 @@ public:
Total.Clear();
}
- void Dump(int count, IAllocationStatsDumper& out) const
+ void Dump(int count, IAllocationStatsDumper& out) const
{
const TFrameInfo* frames = TBase::GetFrames();
size_t framesCount = TBase::GetFramesCount();
@@ -225,18 +225,18 @@ public:
: ls.Frees > rs.Frees;
});
- out.DumpTotal(Total);
+ out.DumpTotal(Total);
- TAllocationInfo allocInfo;
+ TAllocationInfo allocInfo;
int printedCount = 0;
for (const TFrameInfo* stack: stacks) {
- allocInfo.Clear();
- allocInfo.Tag = stack->Tag;
- allocInfo.Stats = stack->Stats;
- TBase::BackTrace(stack, allocInfo.Stack);
+ allocInfo.Clear();
+ allocInfo.Tag = stack->Tag;
+ allocInfo.Stats = stack->Stats;
+ TBase::BackTrace(stack, allocInfo.Stack);
+
+ out.DumpEntry(allocInfo);
- out.DumpEntry(allocInfo);
-
if (++printedCount >= count) {
break;
}
@@ -268,65 +268,65 @@ void TAllocationStackCollector::Clear()
Impl->Clear();
}
-void TAllocationStackCollector::Dump(int count, IAllocationStatsDumper &out) const
+void TAllocationStackCollector::Dump(int count, IAllocationStatsDumper &out) const
{
Impl->Dump(count, out);
}
-
-TString IAllocationStatsDumper::FormatTag(int tag) {
- return ToString(tag);
-}
-
-TString IAllocationStatsDumper::FormatSize(intptr_t sz) {
- return ToString(sz);
-}
-
-
-TAllocationStatsDumper::TAllocationStatsDumper(IOutputStream& out)
- : PrintedCount(0)
- , Out(out)
- , SymbolCache(2048)
-{}
-
-void TAllocationStatsDumper::DumpTotal(const TStats& total) {
- Out << "TOTAL"
- << "\tAllocs: " << total.Allocs
- << "\tFrees: " << total.Frees
- << "\tCurrentSize: " << FormatSize(total.CurrentSize)
- << Endl;
-}
-
-void TAllocationStatsDumper::DumpEntry(const TAllocationInfo& allocInfo) {
- Out << Endl
- << "STACK #" << PrintedCount+1 << ": " << FormatTag(allocInfo.Tag)
- << "\tAllocs: " << allocInfo.Stats.Allocs
- << "\tFrees: " << allocInfo.Stats.Frees
- << "\tCurrentSize: " << FormatSize(allocInfo.Stats.CurrentSize)
- << Endl;
- FormatBackTrace(allocInfo.Stack.data(), allocInfo.Stack.size());
- PrintedCount++;
-}
-
-void TAllocationStatsDumper::FormatBackTrace(void* const* stack, size_t sz) {
- char name[1024];
- for (size_t i = 0; i < sz; ++i) {
- TSymbol symbol;
- auto it = SymbolCache.Find(stack[i]);
- if (it != SymbolCache.End()) {
- symbol = it.Value();
- } else {
- TResolvedSymbol rs = ResolveSymbol(stack[i], name, sizeof(name));
- symbol = {rs.NearestSymbol, rs.Name};
- SymbolCache.Insert(stack[i], symbol);
- }
-
- Out << Hex((intptr_t)stack[i], HF_FULL) << "\t" << symbol.Name;
- intptr_t offset = (intptr_t)stack[i] - (intptr_t)symbol.Address;
- if (offset)
- Out << " +" << offset;
- Out << Endl;
- }
-}
-
-} // namespace NAllocProfiler
+
+TString IAllocationStatsDumper::FormatTag(int tag) {
+ return ToString(tag);
+}
+
+TString IAllocationStatsDumper::FormatSize(intptr_t sz) {
+ return ToString(sz);
+}
+
+
+TAllocationStatsDumper::TAllocationStatsDumper(IOutputStream& out)
+ : PrintedCount(0)
+ , Out(out)
+ , SymbolCache(2048)
+{}
+
+void TAllocationStatsDumper::DumpTotal(const TStats& total) {
+ Out << "TOTAL"
+ << "\tAllocs: " << total.Allocs
+ << "\tFrees: " << total.Frees
+ << "\tCurrentSize: " << FormatSize(total.CurrentSize)
+ << Endl;
+}
+
+void TAllocationStatsDumper::DumpEntry(const TAllocationInfo& allocInfo) {
+ Out << Endl
+ << "STACK #" << PrintedCount+1 << ": " << FormatTag(allocInfo.Tag)
+ << "\tAllocs: " << allocInfo.Stats.Allocs
+ << "\tFrees: " << allocInfo.Stats.Frees
+ << "\tCurrentSize: " << FormatSize(allocInfo.Stats.CurrentSize)
+ << Endl;
+ FormatBackTrace(allocInfo.Stack.data(), allocInfo.Stack.size());
+ PrintedCount++;
+}
+
+void TAllocationStatsDumper::FormatBackTrace(void* const* stack, size_t sz) {
+ char name[1024];
+ for (size_t i = 0; i < sz; ++i) {
+ TSymbol symbol;
+ auto it = SymbolCache.Find(stack[i]);
+ if (it != SymbolCache.End()) {
+ symbol = it.Value();
+ } else {
+ TResolvedSymbol rs = ResolveSymbol(stack[i], name, sizeof(name));
+ symbol = {rs.NearestSymbol, rs.Name};
+ SymbolCache.Insert(stack[i], symbol);
+ }
+
+ Out << Hex((intptr_t)stack[i], HF_FULL) << "\t" << symbol.Name;
+ intptr_t offset = (intptr_t)stack[i] - (intptr_t)symbol.Address;
+ if (offset)
+ Out << " +" << offset;
+ Out << Endl;
+ }
+}
+
+} // namespace NAllocProfiler
diff --git a/library/cpp/lfalloc/alloc_profiler/stackcollect.h b/library/cpp/lfalloc/alloc_profiler/stackcollect.h
index 7c10cd2ffdb..80715ed7cb5 100644
--- a/library/cpp/lfalloc/alloc_profiler/stackcollect.h
+++ b/library/cpp/lfalloc/alloc_profiler/stackcollect.h
@@ -2,89 +2,89 @@
#include <library/cpp/containers/stack_vector/stack_vec.h>
#include <library/cpp/cache/cache.h>
-
+
#include <util/generic/noncopyable.h>
#include <util/generic/ptr.h>
#include <util/stream/output.h>
-namespace NAllocProfiler {
-
-struct TStats {
- intptr_t Allocs = 0;
- intptr_t Frees = 0;
- intptr_t CurrentSize = 0;
-
- void Clear()
- {
- Allocs = 0;
- Frees = 0;
- CurrentSize = 0;
- }
-
- void Alloc(size_t size)
- {
- AtomicIncrement(Allocs);
- AtomicAdd(CurrentSize, size);
- }
-
- void Free(size_t size)
- {
- AtomicIncrement(Frees);
- AtomicSub(CurrentSize, size);
- }
-};
-
-struct TAllocationInfo {
- int Tag;
- TStats Stats;
- TStackVec<void*, 64> Stack;
-
- void Clear() {
- Tag = 0;
- Stats.Clear();
- Stack.clear();
- }
-};
-
-
-class IAllocationStatsDumper {
-public:
- virtual ~IAllocationStatsDumper() = default;
-
- // Total stats
- virtual void DumpTotal(const TStats& total) = 0;
-
- // Stats for individual stack
- virtual void DumpEntry(const TAllocationInfo& allocInfo) = 0;
-
- // App-specific tag printer
- virtual TString FormatTag(int tag);
-
- // Size printer (e.g. "10KB", "100MB", "over 9000")
- virtual TString FormatSize(intptr_t sz);
-};
-
-// Default implementation
-class TAllocationStatsDumper: public IAllocationStatsDumper {
-public:
- explicit TAllocationStatsDumper(IOutputStream& out);
- void DumpTotal(const TStats& total) override;
- void DumpEntry(const TAllocationInfo& allocInfo) override;
-
-private:
- void FormatBackTrace(void* const* stack, size_t sz);
-
-private:
- struct TSymbol {
- const void* Address;
- TString Name;
- };
-
- size_t PrintedCount;
- IOutputStream& Out;
- TLFUCache<void*, TSymbol> SymbolCache;
-};
-
+namespace NAllocProfiler {
+
+struct TStats {
+ intptr_t Allocs = 0;
+ intptr_t Frees = 0;
+ intptr_t CurrentSize = 0;
+
+ void Clear()
+ {
+ Allocs = 0;
+ Frees = 0;
+ CurrentSize = 0;
+ }
+
+ void Alloc(size_t size)
+ {
+ AtomicIncrement(Allocs);
+ AtomicAdd(CurrentSize, size);
+ }
+
+ void Free(size_t size)
+ {
+ AtomicIncrement(Frees);
+ AtomicSub(CurrentSize, size);
+ }
+};
+
+struct TAllocationInfo {
+ int Tag;
+ TStats Stats;
+ TStackVec<void*, 64> Stack;
+
+ void Clear() {
+ Tag = 0;
+ Stats.Clear();
+ Stack.clear();
+ }
+};
+
+
+class IAllocationStatsDumper {
+public:
+ virtual ~IAllocationStatsDumper() = default;
+
+ // Total stats
+ virtual void DumpTotal(const TStats& total) = 0;
+
+ // Stats for individual stack
+ virtual void DumpEntry(const TAllocationInfo& allocInfo) = 0;
+
+ // App-specific tag printer
+ virtual TString FormatTag(int tag);
+
+ // Size printer (e.g. "10KB", "100MB", "over 9000")
+ virtual TString FormatSize(intptr_t sz);
+};
+
+// Default implementation
+class TAllocationStatsDumper: public IAllocationStatsDumper {
+public:
+ explicit TAllocationStatsDumper(IOutputStream& out);
+ void DumpTotal(const TStats& total) override;
+ void DumpEntry(const TAllocationInfo& allocInfo) override;
+
+private:
+ void FormatBackTrace(void* const* stack, size_t sz);
+
+private:
+ struct TSymbol {
+ const void* Address;
+ TString Name;
+ };
+
+ size_t PrintedCount;
+ IOutputStream& Out;
+ TLFUCache<void*, TSymbol> SymbolCache;
+};
+
////////////////////////////////////////////////////////////////////////////////
class TAllocationStackCollector: private TNonCopyable {
@@ -101,7 +101,7 @@ public:
void Clear();
- void Dump(int count, IAllocationStatsDumper& out) const;
+ void Dump(int count, IAllocationStatsDumper& out) const;
};
-} // namespace NAllocProfiler
+} // namespace NAllocProfiler
diff --git a/library/cpp/lfalloc/alloc_profiler/ut/ya.make b/library/cpp/lfalloc/alloc_profiler/ut/ya.make
index c90a1278d53..8a7daa74af6 100644
--- a/library/cpp/lfalloc/alloc_profiler/ut/ya.make
+++ b/library/cpp/lfalloc/alloc_profiler/ut/ya.make
@@ -1,22 +1,22 @@
UNITTEST_FOR(library/cpp/lfalloc/alloc_profiler)
-
-OWNER(g:rtmr g:kikimr)
-
-PEERDIR(
+
+OWNER(g:rtmr g:kikimr)
+
+PEERDIR(
library/cpp/testing/unittest
-)
-
-IF (ARCH_AARCH64)
- PEERDIR(
- contrib/libs/jemalloc
- )
-ELSE()
- ALLOCATOR(LF_DBG)
-ENDIF()
-
-SRCS(
- profiler_ut.cpp
+)
+
+IF (ARCH_AARCH64)
+ PEERDIR(
+ contrib/libs/jemalloc
+ )
+ELSE()
+ ALLOCATOR(LF_DBG)
+ENDIF()
+
+SRCS(
+ profiler_ut.cpp
align_ut.cpp
-)
-
-END()
+)
+
+END()
diff --git a/library/cpp/lfalloc/alloc_profiler/ya.make b/library/cpp/lfalloc/alloc_profiler/ya.make
index dd1bfb09186..0f58d917678 100644
--- a/library/cpp/lfalloc/alloc_profiler/ya.make
+++ b/library/cpp/lfalloc/alloc_profiler/ya.make
@@ -1,17 +1,17 @@
-LIBRARY()
-
-OWNER(g:rtmr g:kikimr)
-
-SRCS(
- profiler.cpp
- stackcollect.cpp
-)
-
-PEERDIR(
+LIBRARY()
+
+OWNER(g:rtmr g:kikimr)
+
+SRCS(
+ profiler.cpp
+ stackcollect.cpp
+)
+
+PEERDIR(
library/cpp/lfalloc/dbg_info
library/cpp/cache
-)
-
-END()
-
-RECURSE(ut)
+)
+
+END()
+
+RECURSE(ut)
diff --git a/library/cpp/lfalloc/dbg_info/dbg_info.cpp b/library/cpp/lfalloc/dbg_info/dbg_info.cpp
index 7667e444a28..1fb9f7ad933 100644
--- a/library/cpp/lfalloc/dbg_info/dbg_info.cpp
+++ b/library/cpp/lfalloc/dbg_info/dbg_info.cpp
@@ -15,7 +15,7 @@ namespace NAllocDbg {
int& numSizes);
using TSetProfileCurrentThread = bool(bool newVal);
- using TSetProfileAllThreads = bool(bool newVal);
+ using TSetProfileAllThreads = bool(bool newVal);
using TSetAllocationSamplingEnabled = bool(bool newVal);
using TSetAllocationSampleRate = size_t(size_t newVal);
@@ -32,7 +32,7 @@ namespace NAllocDbg {
TGetPerTagAllocInfo* GetPerTagAllocInfo = nullptr;
TSetProfileCurrentThread* SetProfileCurrentThread = nullptr;
- TSetProfileAllThreads* SetProfileAllThreads = nullptr;
+ TSetProfileAllThreads* SetProfileAllThreads = nullptr;
TSetAllocationSamplingEnabled* SetAllocationSamplingEnabled = nullptr;
TSetAllocationSampleRate* SetAllocationSampleRate = nullptr;
@@ -51,7 +51,7 @@ namespace NAllocDbg {
GetPerTagAllocInfo = (TGetPerTagAllocInfo*)mallocInfo.GetParam("GetPerTagAllocInfo");
SetProfileCurrentThread = (TSetProfileCurrentThread*)mallocInfo.GetParam("SetProfileCurrentThread");
- SetProfileAllThreads = (TSetProfileAllThreads*)mallocInfo.GetParam("SetProfileAllThreads");
+ SetProfileAllThreads = (TSetProfileAllThreads*)mallocInfo.GetParam("SetProfileAllThreads");
SetAllocationSamplingEnabled = (TSetAllocationSamplingEnabled*)mallocInfo.GetParam("SetAllocationSamplingEnabled");
SetAllocationSampleRate = (TSetAllocationSampleRate*)mallocInfo.GetParam("SetAllocationSampleRate");
@@ -97,10 +97,10 @@ namespace NAllocDbg {
return AllocFn.SetProfileCurrentThread ? AllocFn.SetProfileCurrentThread(newVal) : false;
}
- bool SetProfileAllThreads(bool newVal) {
- return AllocFn.SetProfileAllThreads ? AllocFn.SetProfileAllThreads(newVal) : false;
- }
-
+ bool SetProfileAllThreads(bool newVal) {
+ return AllocFn.SetProfileAllThreads ? AllocFn.SetProfileAllThreads(newVal) : false;
+ }
+
bool SetAllocationSamplingEnabled(bool newVal) {
return AllocFn.SetAllocationSamplingEnabled ? AllocFn.SetAllocationSamplingEnabled(newVal) : false;
}
diff --git a/library/cpp/lfalloc/dbg_info/dbg_info.h b/library/cpp/lfalloc/dbg_info/dbg_info.h
index 8c6ead7180a..071562a81ab 100644
--- a/library/cpp/lfalloc/dbg_info/dbg_info.h
+++ b/library/cpp/lfalloc/dbg_info/dbg_info.h
@@ -1,6 +1,6 @@
#pragma once
-#include <util/generic/ptr.h>
+#include <util/generic/ptr.h>
#include <util/system/types.h>
namespace NAllocDbg {
@@ -60,7 +60,7 @@ namespace NAllocDbg {
// Allocation sampling could be used to collect detailed information
bool SetProfileCurrentThread(bool newVal);
- bool SetProfileAllThreads(bool newVal);
+ bool SetProfileAllThreads(bool newVal);
bool SetAllocationSamplingEnabled(bool newVal);
size_t SetAllocationSampleRate(size_t newVal);
diff --git a/library/cpp/lfalloc/lf_allocX64.h b/library/cpp/lfalloc/lf_allocX64.h
index 161d42243e1..fd2a906d6ff 100644
--- a/library/cpp/lfalloc/lf_allocX64.h
+++ b/library/cpp/lfalloc/lf_allocX64.h
@@ -128,11 +128,11 @@ static bool TransparentHugePages = false; // force MADV_HUGEPAGE for large alloc
static bool MapHugeTLB = false; // force MAP_HUGETLB for small allocs
static bool EnableDefrag = true;
-// Buffers that are larger than this size will not be filled with 0xcf
-#ifndef DBG_FILL_MAX_SIZE
-#define DBG_FILL_MAX_SIZE 0x01000000000000ULL
-#endif
-
+// Buffers that are larger than this size will not be filled with 0xcf
+#ifndef DBG_FILL_MAX_SIZE
+#define DBG_FILL_MAX_SIZE 0x01000000000000ULL
+#endif
+
template <class T>
inline T* DoCas(T* volatile* target, T* exchange, T* compare) {
#if defined(__has_builtin) && __has_builtin(__sync_val_compare_and_swap)
@@ -304,7 +304,7 @@ enum EMMapMode {
#ifndef _MSC_VER
inline void VerifyMmapResult(void* result) {
if (Y_UNLIKELY(result == MAP_FAILED))
- NMalloc::AbortFromCorruptedAllocator("negative size requested? or just out of mem");
+ NMalloc::AbortFromCorruptedAllocator("negative size requested? or just out of mem");
}
#endif
@@ -337,7 +337,7 @@ static char* AllocWithMMapLinuxImpl(uintptr_t sz, EMMapMode mode) {
char* nextAllocPtr = prevAllocPtr + sz;
if (uintptr_t(nextAllocPtr - (char*)nullptr) >= areaFinish) {
if (Y_UNLIKELY(wrapped)) {
- NMalloc::AbortFromCorruptedAllocator("virtual memory is over fragmented");
+ NMalloc::AbortFromCorruptedAllocator("virtual memory is over fragmented");
}
// wrap after all area is used
DoCas(areaPtr, areaStart, prevAllocPtr);
@@ -368,15 +368,15 @@ static char* AllocWithMMap(uintptr_t sz, EMMapMode mode) {
#ifdef _MSC_VER
char* largeBlock = (char*)VirtualAlloc(0, sz, MEM_RESERVE, PAGE_READWRITE);
if (Y_UNLIKELY(largeBlock == nullptr))
- NMalloc::AbortFromCorruptedAllocator("out of memory");
+ NMalloc::AbortFromCorruptedAllocator("out of memory");
if (Y_UNLIKELY(uintptr_t(((char*)largeBlock - ALLOC_START) + sz) >= N_MAX_WORKSET_SIZE))
- NMalloc::AbortFromCorruptedAllocator("out of working set, something has broken");
+ NMalloc::AbortFromCorruptedAllocator("out of working set, something has broken");
#else
#if defined(_freebsd_) || !defined(_64_)
char* largeBlock = (char*)mmap(0, sz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
VerifyMmapResult(largeBlock);
if (Y_UNLIKELY(uintptr_t(((char*)largeBlock - ALLOC_START) + sz) >= N_MAX_WORKSET_SIZE))
- NMalloc::AbortFromCorruptedAllocator("out of working set, something has broken");
+ NMalloc::AbortFromCorruptedAllocator("out of working set, something has broken");
#else
char* largeBlock = AllocWithMMapLinuxImpl(sz, mode);
if (TransparentHugePages) {
@@ -453,7 +453,7 @@ static void* LargeBlockAlloc(size_t _nSize, ELFAllocCounter counter) {
#ifdef _MSC_VER
char* pRes = (char*)VirtualAlloc(0, (pgCount + 1) * 4096ll, MEM_COMMIT, PAGE_READWRITE);
if (Y_UNLIKELY(pRes == 0)) {
- NMalloc::AbortFromCorruptedAllocator("out of memory");
+ NMalloc::AbortFromCorruptedAllocator("out of memory");
}
#else
@@ -784,7 +784,7 @@ static bool DefragmentMem() {
int* nFreeCount = (int*)SystemAlloc(N_CHUNKS * sizeof(int));
if (Y_UNLIKELY(!nFreeCount)) {
//__debugbreak();
- NMalloc::AbortFromCorruptedAllocator("debugbreak");
+ NMalloc::AbortFromCorruptedAllocator("debugbreak");
}
memset(nFreeCount, 0, N_CHUNKS * sizeof(int));
@@ -1004,7 +1004,7 @@ static Y_FORCE_INLINE void PutBlocksToGlobalFreeList(ptrdiff_t nSizeIdx, char**
//////////////////////////////////////////////////////////////////////////
static TAtomic GlobalCounters[CT_MAX];
const int MAX_LOCAL_UPDATES = 100;
-const intptr_t MAX_LOCAL_DELTA = 1*1024*1024;
+const intptr_t MAX_LOCAL_DELTA = 1*1024*1024;
struct TLocalCounter {
intptr_t Value;
@@ -1019,7 +1019,7 @@ struct TLocalCounter {
Y_FORCE_INLINE void Increment(size_t value) {
Value += value;
- if (++Updates > MAX_LOCAL_UPDATES || Value > MAX_LOCAL_DELTA) {
+ if (++Updates > MAX_LOCAL_UPDATES || Value > MAX_LOCAL_DELTA) {
Flush();
}
}
@@ -1344,13 +1344,13 @@ extern "C" bool SetProfileCurrentThread(bool newVal) {
return prevVal;
}
-static volatile bool ProfileAllThreads;
-extern "C" bool SetProfileAllThreads(bool newVal) {
- bool prevVal = ProfileAllThreads;
- ProfileAllThreads = newVal;
- return prevVal;
-}
-
+static volatile bool ProfileAllThreads;
+extern "C" bool SetProfileAllThreads(bool newVal) {
+ bool prevVal = ProfileAllThreads;
+ ProfileAllThreads = newVal;
+ return prevVal;
+}
+
static volatile bool AllocationSamplingEnabled;
extern "C" bool SetAllocationSamplingEnabled(bool newVal) {
bool prevVal = AllocationSamplingEnabled;
@@ -1394,7 +1394,7 @@ PERTHREAD bool InAllocationCallback;
static const int DBG_ALLOC_INVALID_COOKIE = -1;
static inline int SampleAllocation(TAllocHeader* p, int sizeIdx) {
int cookie = DBG_ALLOC_INVALID_COOKIE;
- if (AllocationSamplingEnabled && (ProfileCurrentThread || ProfileAllThreads) && !InAllocationCallback) {
+ if (AllocationSamplingEnabled && (ProfileCurrentThread || ProfileAllThreads) && !InAllocationCallback) {
if (p->Size > AllocationSampleMaxSize || ++AllocationsCount % AllocationSampleRate == 0) {
if (AllocationCallback) {
InAllocationCallback = true;
@@ -1556,7 +1556,7 @@ static Y_FORCE_INLINE void* LFAllocImpl(size_t _nSize) {
if (count == 0) {
count = LFAllocNoCacheMultiple(nSizeIdx, buf);
if (count == 0) {
- NMalloc::AbortFromCorruptedAllocator("no way LFAllocNoCacheMultiple() can fail");
+ NMalloc::AbortFromCorruptedAllocator("no way LFAllocNoCacheMultiple() can fail");
}
}
char** dstBuf = thr->FreePtrs[nSizeIdx] + freePtrIdx - 1;
@@ -1773,7 +1773,7 @@ static void DumpMemoryBlockUtilizationLocked() {
nBadPages += page == 3;
nTotalPages += page != 1;
}
- DebugTraceMMgr("entry = %lld; size = %lld; free = %lld; system %lld; utilisation = %g%%, fragmentation = %g%%\n",
+ DebugTraceMMgr("entry = %lld; size = %lld; free = %lld; system %lld; utilisation = %g%%, fragmentation = %g%%\n",
k, nSize, cs.FreeCount * nSize, csGB.FreeCount * nSize,
(N_CHUNK_SIZE - cs.FreeCount * nSize) * 100.0f / N_CHUNK_SIZE, 100.0f * nBadPages / Y_ARRAY_SIZE(pages));
nTotalAllocated += N_CHUNK_SIZE;
@@ -1781,10 +1781,10 @@ static void DumpMemoryBlockUtilizationLocked() {
nTotalBadPages += nBadPages;
}
SystemFree(entries);
- DebugTraceMMgr("Total allocated = %llu, free = %lld, system = %lld, locked for future use %lld, utilisation = %g, fragmentation = %g\n",
+ DebugTraceMMgr("Total allocated = %llu, free = %lld, system = %lld, locked for future use %lld, utilisation = %g, fragmentation = %g\n",
nTotalAllocated, nTotalFree, nTotalGroupBlocks, nTotalLocked,
100.0f * (nTotalAllocated - nTotalFree) / nTotalAllocated, 100.0f * nTotalBadPages / nTotalPages);
- DebugTraceMMgr("Total %lld bytes used, %lld bytes in used pages\n", nTotalUsed, nTotalPages * N_PAGE_SIZE);
+ DebugTraceMMgr("Total %lld bytes used, %lld bytes in used pages\n", nTotalUsed, nTotalPages * N_PAGE_SIZE);
for (int nSizeIdx = 0; nSizeIdx < N_SIZES; ++nSizeIdx)
globalFreeLists[nSizeIdx].ReturnWholeList(wholeLists[nSizeIdx]);
@@ -1850,7 +1850,7 @@ static const char* LFAlloc_GetParam(const char* param) {
#if defined(LFALLOC_DBG)
{"SetThreadAllocTag", (const char*)&SetThreadAllocTag},
{"SetProfileCurrentThread", (const char*)&SetProfileCurrentThread},
- {"SetProfileAllThreads", (const char*)&SetProfileAllThreads},
+ {"SetProfileAllThreads", (const char*)&SetProfileAllThreads},
{"SetAllocationSamplingEnabled", (const char*)&SetAllocationSamplingEnabled},
{"SetAllocationSampleRate", (const char*)&SetAllocationSampleRate},
{"SetAllocationSampleMaxSize", (const char*)&SetAllocationSampleMaxSize},
@@ -1870,11 +1870,11 @@ static const char* LFAlloc_GetParam(const char* param) {
static Y_FORCE_INLINE int LFPosixMemalign(void** memptr, size_t alignment, size_t size) {
if (Y_UNLIKELY(alignment > 4096)) {
- const char* error = "Larger alignment are not guaranteed with this implementation\n";
+ const char* error = "Larger alignment are not guaranteed with this implementation\n";
#ifdef _win_
- OutputDebugStringA(error);
+ OutputDebugStringA(error);
#endif
- NMalloc::AbortFromCorruptedAllocator(error);
+ NMalloc::AbortFromCorruptedAllocator(error);
}
size_t bigsize = size;
if (bigsize <= alignment) {
diff --git a/library/cpp/lwtrace/mon/mon_lwtrace.cpp b/library/cpp/lwtrace/mon/mon_lwtrace.cpp
index e9b8bb11a2b..a61ee9ce22d 100644
--- a/library/cpp/lwtrace/mon/mon_lwtrace.cpp
+++ b/library/cpp/lwtrace/mon/mon_lwtrace.cpp
@@ -1,5 +1,5 @@
-#include "mon_lwtrace.h"
-
+#include "mon_lwtrace.h"
+
#include <algorithm>
#include <iterator>
@@ -15,21 +15,21 @@
#include <util/string/escape.h>
#include <util/system/condvar.h>
#include <util/system/execpath.h>
-#include <util/system/hostname.h>
-
-using namespace NMonitoring;
-
+#include <util/system/hostname.h>
+
+using namespace NMonitoring;
+
#define WWW_CHECK(cond, ...) \
- do { \
- if (!(cond)) { \
- ythrow yexception() << Sprintf(__VA_ARGS__); \
- } \
- } while (false) \
- /**/
-
+ do { \
+ if (!(cond)) { \
+ ythrow yexception() << Sprintf(__VA_ARGS__); \
+ } \
+ } while (false) \
+ /**/
+
#define WWW_HTML_INNER(out) HTML(out) WITH_SCOPED(tmp, TScopedHtmlInner(out))
#define WWW_HTML(out) out << NMonitoring::HTTPOKHTML; WWW_HTML_INNER(out)
-
+
namespace NLwTraceMonPage {
struct TTrackLogRefs {
@@ -1537,35 +1537,35 @@ void SeriesSelectors(TStringStream& ss, const TCgiParameters& e,
}
}
-class TProbesHtmlPrinter {
-private:
+class TProbesHtmlPrinter {
+private:
TVector<TVector<TString>> TableData;
static constexpr int TimeoutSec = 15 * 60; // default timeout
-public:
- void Push(const NLWTrace::TProbe* probe)
- {
+public:
+ void Push(const NLWTrace::TProbe* probe)
+ {
TableData.emplace_back();
auto& row = TableData.back();
row.emplace_back();
TString& groups = row.back();
- bool first = true;
+ bool first = true;
for (const char* const* i = probe->Event.Groups; *i != nullptr; ++i, first = false) {
groups.append(TString(first? "": ", ") + GroupHtml(*i));
- }
+ }
row.push_back(ProbeHtml(probe->Event.GetProvider(), probe->Event.Name));
row.emplace_back();
TString& params = row.back();
- first = true;
- for (size_t i = 0; i < probe->Event.Signature.ParamCount; i++, first = false) {
+ first = true;
+ for (size_t i = 0; i < probe->Event.Signature.ParamCount; i++, first = false) {
params.append(TString(first? "": ", ") + probe->Event.Signature.ParamTypes[i]
+ " " + probe->Event.Signature.ParamNames[i]);
- }
+ }
row.emplace_back(ToString(probe->GetExecutorsCount()));
- }
+ }
void Output(IOutputStream& os)
{
@@ -1647,8 +1647,8 @@ private:
"</div>";
return ss.Str();
}
-};
-
+};
+
void TDashboardRegistry::Register(const NLWTrace::TDashboard& dashboard) {
TGuard<TMutex> g(Mutex);
Dashboards[dashboard.GetName()] = dashboard;
@@ -1835,27 +1835,27 @@ public:
}
};
-class TTracesHtmlPrinter {
-private:
+class TTracesHtmlPrinter {
+private:
IOutputStream& Os;
TInstant Now;
-public:
+public:
explicit TTracesHtmlPrinter(IOutputStream& os)
- : Os(os)
+ : Os(os)
, Now(TInstant::Now())
- {}
-
+ {}
+
void Push(ILogSource* src)
- {
+ {
TString id = src->GetId();
- Os << "<tr>";
- Os << "<td>";
- try {
+ Os << "<tr>";
+ Os << "<td>";
+ try {
Os << src->GetStartTime().ToStringUpToSeconds();
- } catch (...) {
- Os << "error: " << CurrentExceptionMessage();
- }
- Os << "</td>"
+ } catch (...) {
+ Os << "error: " << CurrentExceptionMessage();
+ }
+ Os << "</td>"
<< "<td><div class=\"dropdown\">"
"<a href=\"#\" data-toggle=\"dropdown\">" << TimeoutToString(src->GetTimeout(Now)) << "</a>"
"<ul class=\"dropdown-menu\">"
@@ -1867,12 +1867,12 @@ public:
"<li><a href=\"#\" onClick=\"$.redirectPost('?mode=settimeout&ui=y', {id:'" << id << "'});\">no timeout</a></li>"
"</ul>"
"</div></td>"
- << "<td>" << EncodeHtmlPcdata(id) << "</td>"
+ << "<td>" << EncodeHtmlPcdata(id) << "</td>"
<< "<td>" << src->GetEventsCount() << "</td>"
<< "<td>" << src->GetThreadsCount() << "</td>"
- << "<td><a href=\"?mode=log&id=" << id << "\">Text</a></td>"
- << "<td><a href=\"?mode=log&format=json&id=" << id << "\">Json</a></td>"
- << "<td><a href=\"?mode=query&id=" << id << "\">Query</a></td>"
+ << "<td><a href=\"?mode=log&id=" << id << "\">Text</a></td>"
+ << "<td><a href=\"?mode=log&format=json&id=" << id << "\">Json</a></td>"
+ << "<td><a href=\"?mode=query&id=" << id << "\">Query</a></td>"
<< "<td><a href=\"?mode=analytics&id=" << id << "\">Analytics</a></td>"
<< "<td><div class=\"dropdown navbar-right\">" // navbar-right is hack to drop left
"<a href=\"#\" data-toggle=\"dropdown\">Modify</a>"
@@ -1881,8 +1881,8 @@ public:
"<li><a href=\"#\" onClick=\"$.redirectPost('?mode=delete&ui=y', {id:'" << id << "'});\">Delete</a></li>"
"</ul>"
"</div></td>"
- << "</tr>\n";
- }
+ << "</tr>\n";
+ }
private:
static TString TimeoutToString(TDuration d)
{
@@ -1916,8 +1916,8 @@ private:
}
return ss.Str();
}
-};
-
+};
+
class TTracesLister {
private:
TVariants& Variants;
@@ -2004,8 +2004,8 @@ private:
}
};
-class TLogFilter {
-private:
+class TLogFilter {
+private:
struct TFilter {
TString ParamName;
TString ParamValue;
@@ -2042,31 +2042,31 @@ private:
THashSet<const NLWTrace::TSignature*> Signatures; // Just to list param names
TVariants ParamNames;
THashMap<TString, THashSet<TString>> FilteredParamValues; // paramName -> { paramValue }
-public:
+public:
explicit TLogFilter(const TVector<TString>& filters)
- {
+ {
for (const TString& subvalue : filters) {
TFilter filter(subvalue);
FilteredParamValues[filter.ParamName]; // just create empty set to gather values later
if (filter.Parsed) {
Filters.push_back(filter);
}
- }
- }
-
+ }
+ }
+
virtual ~TLogFilter() {}
template <class TLog>
bool Filter(const TLog& log)
- {
+ {
Gather(log);
for (const TFilter& filter : Filters) {
if (filter.Query.ExecuteQuery(log) != filter.Value) {
return false;
- }
- }
+ }
+ }
return true;
- }
+ }
void FilterSelectors(TStringStream& ss, const TCgiParameters& e, const TString& fparam)
{
@@ -2155,8 +2155,8 @@ private:
Sort(result.begin(), result.end());
return result;
}
-};
-
+};
+
static void EscapeJSONString(IOutputStream& os, const TString& s)
{
for (TString::const_iterator i = s.begin(), e = s.end(); i != e; ++i) {
@@ -2180,72 +2180,72 @@ static TString EscapeJSONString(const TString& s)
return ss.Str();
}
-class TLogJsonPrinter {
-private:
+class TLogJsonPrinter {
+private:
IOutputStream& Os;
- bool FirstThread;
- bool FirstItem;
-public:
+ bool FirstThread;
+ bool FirstItem;
+public:
explicit TLogJsonPrinter(IOutputStream& os)
- : Os(os)
- , FirstThread(true)
- , FirstItem(true)
- {}
-
- void OutputHeader()
- {
- Os << "{\n\t\"source\": \"" << HostName() << "\""
- "\n\t, \"items\": ["
- ;
- }
-
+ : Os(os)
+ , FirstThread(true)
+ , FirstItem(true)
+ {}
+
+ void OutputHeader()
+ {
+ Os << "{\n\t\"source\": \"" << HostName() << "\""
+ "\n\t, \"items\": ["
+ ;
+ }
+
void OutputFooter(const NLWTrace::TSession* trace)
- {
- Os << "\n\t\t]"
- "\n\t, \"threads\": ["
- ;
+ {
+ Os << "\n\t\t]"
+ "\n\t, \"threads\": ["
+ ;
trace->ReadThreads(*this);
- Os << "]"
+ Os << "]"
"\n\t, \"events_count\": " << trace->GetEventsCount() <<
"\n\t, \"threads_count\": " << trace->GetThreadsCount() <<
- "\n\t, \"timestamp\": " << Now().GetValue() <<
- "\n}"
- ;
- }
-
- void PushThread(TThread::TId tid)
- {
- Os << (FirstThread? "": ", ") << tid;
- FirstThread = false;
- }
-
+ "\n\t, \"timestamp\": " << Now().GetValue() <<
+ "\n}"
+ ;
+ }
+
+ void PushThread(TThread::TId tid)
+ {
+ Os << (FirstThread? "": ", ") << tid;
+ FirstThread = false;
+ }
+
void Push(TThread::TId tid, const NLWTrace::TLogItem& item)
- {
- Os << "\n\t\t" << (FirstItem? "": ", ");
- FirstItem = false;
-
- Os << "[" << tid <<
- ", " << item.Timestamp.GetValue() <<
- ", \"" << item.Probe->Event.GetProvider() << "\""
- ", \"" << item.Probe->Event.Name << "\""
- ", {"
- ;
- if (item.SavedParamsCount > 0) {
+ {
+ Os << "\n\t\t" << (FirstItem? "": ", ");
+ FirstItem = false;
+
+ Os << "[" << tid <<
+ ", " << item.Timestamp.GetValue() <<
+ ", \"" << item.Probe->Event.GetProvider() << "\""
+ ", \"" << item.Probe->Event.Name << "\""
+ ", {"
+ ;
+ if (item.SavedParamsCount > 0) {
TString ParamValues[LWTRACE_MAX_PARAMS];
- item.Probe->Event.Signature.SerializeParams(item.Params, ParamValues);
- bool first = true;
- for (size_t i = 0; i < item.SavedParamsCount; i++, first = false) {
- Os << (first? "": ", ") << "\"" << item.Probe->Event.Signature.ParamNames[i] << "\": \"";
- EscapeJSONString(Os, ParamValues[i]);
- Os << "\"";
- }
- }
- Os << "}]";
- }
-};
-
-class TLogTextPrinter : public TLogFilter {
-private:
+ item.Probe->Event.Signature.SerializeParams(item.Params, ParamValues);
+ bool first = true;
+ for (size_t i = 0; i < item.SavedParamsCount; i++, first = false) {
+ Os << (first? "": ", ") << "\"" << item.Probe->Event.Signature.ParamNames[i] << "\": \"";
+ EscapeJSONString(Os, ParamValues[i]);
+ Os << "\"";
+ }
+ }
+ Os << "}]";
+ }
+};
+
+class TLogTextPrinter : public TLogFilter {
+private:
TMultiMap<NLWTrace::TTypedParam, std::pair<TThread::TId, NLWTrace::TLogItem> > Items;
TMultiMap<NLWTrace::TTypedParam, NLWTrace::TTrackLog> Depot;
THashMap<NLWTrace::TProbe*, size_t> ProbeId;
@@ -2256,7 +2256,7 @@ private:
ui64 Head = 0;
ui64 Tail = 0;
bool ShowTs = false;
-public:
+public:
TLogTextPrinter(const TVector<TString>& filters, ui64 head, ui64 tail, const TString& order, bool reverseOrder, bool cutTs, bool showTs)
: TLogFilter(filters)
, CutTs(cutTs)
@@ -2284,11 +2284,11 @@ public:
};
void Output(IOutputStream& os) const
- {
+ {
OutputItems<Text>(os);
OutputDepot<Text>(os);
- }
-
+ }
+
void OutputJson(IOutputStream& os) const
{
os << "{\"depot\":[\n";
@@ -2315,13 +2315,13 @@ public:
}
void Push(TThread::TId tid, const NLWTrace::TLogItem& item)
- {
+ {
CutTs.Push(tid, item);
- if (Filter(item)) {
+ if (Filter(item)) {
AddId(item);
Items.emplace(GetKey(item), std::make_pair(tid, item));
- }
- }
+ }
+ }
void Push(TThread::TId tid, const NLWTrace::TTrackLog& tl)
{
@@ -2332,7 +2332,7 @@ public:
}
}
-private:
+private:
void AddId(const NLWTrace::TLogItem& item)
{
if (ProbeId.find(item.Probe) == ProbeId.end()) {
@@ -2412,7 +2412,7 @@ private:
template <EFormat Format, bool AsTrack = false>
void OutputItem(IOutputStream& os, TThread::TId tid, const NLWTrace::TLogItem& item, ui64 startTs, ui64 prevTs, bool& first) const
- {
+ {
if (CutTs.Skip(item)) {
return;
}
@@ -2427,7 +2427,7 @@ private:
}
if (tid) {
os << "<" << tid << "> ";
- }
+ }
if (item.Timestamp != TInstant::Zero()) {
os << "[" << item.Timestamp << "] ";
} else {
@@ -2464,9 +2464,9 @@ private:
}
os << "}]" << (AsTrack? "]":"");
}
- }
+ }
first = false;
- }
+ }
template <EFormat Format>
void OutputTrackLog(IOutputStream& os, const NLWTrace::TTrackLog& tl, bool& first) const
@@ -2486,8 +2486,8 @@ private:
}
os << "\n";
}
-};
-
+};
+
class TLogAnalyzer: public TLogFilter {
private:
TMultiMap<ui64, std::pair<TThread::TId, NLWTrace::TLogItem>> Items;
@@ -3773,52 +3773,52 @@ NLWTrace::TManager g_UnsafeManager(g_Probes, true);
TDashboardRegistry g_DashboardRegistry;
class TLWTraceMonPage : public NMonitoring::IMonPage {
-private:
+private:
NLWTrace::TManager* TraceMngr;
TString StartTime;
TTraceCleaner Cleaner;
TMutex SnapshotsMtx;
THashMap<TString, TAtomicSharedPtr<NLWTrace::TLogPb>> Snapshots;
-public:
+public:
explicit TLWTraceMonPage(bool allowUnsafe = false)
- : NMonitoring::IMonPage("trace", "Tracing")
+ : NMonitoring::IMonPage("trace", "Tracing")
, TraceMngr(&TraceManager(allowUnsafe))
, Cleaner(TraceMngr)
{
time_t stime = TInstant::Now().TimeT();
StartTime = CTimeR(&stime);
}
-
+
virtual void Output(NMonitoring::IMonHttpRequest& request) {
TStringStream out;
- try {
+ try {
if (request.GetParams().Get("mode") == "") {
OutputTracesAndSnapshots(request, out);
- } else if (request.GetParams().Get("mode") == "probes") {
+ } else if (request.GetParams().Get("mode") == "probes") {
OutputProbes(request, out);
} else if (request.GetParams().Get("mode") == "dashboards") {
OutputDashboards(request, out);
} else if (request.GetParams().Get("mode") == "dashboard") {
OutputDashboard(request, out);
- } else if (request.GetParams().Get("mode") == "log") {
+ } else if (request.GetParams().Get("mode") == "log") {
OutputLog(request, out);
- } else if (request.GetParams().Get("mode") == "query") {
+ } else if (request.GetParams().Get("mode") == "query") {
OutputQuery(request, out);
} else if (request.GetParams().Get("mode") == "builder") {
OutputBuilder(request, out);
} else if (request.GetParams().Get("mode") == "analytics") {
OutputAnalytics(request, out);
- } else if (request.GetParams().Get("mode") == "new") {
+ } else if (request.GetParams().Get("mode") == "new") {
PostNew(request, out);
- } else if (request.GetParams().Get("mode") == "delete") {
+ } else if (request.GetParams().Get("mode") == "delete") {
PostDelete(request, out);
} else if (request.GetParams().Get("mode") == "make_snapshot") {
PostSnapshot(request, out);
} else if (request.GetParams().Get("mode") == "settimeout") {
PostSetTimeout(request, out);
- } else {
- ythrow yexception() << "Bad request";
- }
+ } else {
+ ythrow yexception() << "Bad request";
+ }
} catch (TPageGenBase& gen) {
out.Clear();
out << gen.what();
@@ -4063,8 +4063,8 @@ private:
WWW_HTML(out) {
out << "<h2>Trace Query: " << id << "</h2><pre>" << queryStr;
}
- }
- }
+ }
+ }
void OutputBuilder(const NMonitoring::IMonHttpRequest& request, IOutputStream& out)
{
@@ -4669,8 +4669,8 @@ private:
}
return false;
}
-};
-
+};
+
void RegisterPages(NMonitoring::TMonService2* mon, bool allowUnsafe) {
THolder<NLwTraceMonPage::TLWTraceMonPage> p = MakeHolder<NLwTraceMonPage::TLWTraceMonPage>(allowUnsafe);
mon->Register(p.Release());
@@ -4706,7 +4706,7 @@ void RegisterPages(NMonitoring::TMonService2* mon, bool allowUnsafe) {
WWW_STATIC_FILE("lwtrace/mon/static/js/jquery.treegrid.min.js", JAVASCRIPT);
WWW_STATIC_FILE("lwtrace/mon/static/js/jquery.url.min.js", JAVASCRIPT);
#undef WWW_STATIC_FILE
-}
+}
NLWTrace::TProbeRegistry& ProbeRegistry() {
return g_Probes;
diff --git a/library/cpp/lwtrace/mon/mon_lwtrace.h b/library/cpp/lwtrace/mon/mon_lwtrace.h
index 2d5c4d38549..8030f6ea616 100644
--- a/library/cpp/lwtrace/mon/mon_lwtrace.h
+++ b/library/cpp/lwtrace/mon/mon_lwtrace.h
@@ -1,9 +1,9 @@
-#pragma once
+#pragma once
#include <library/cpp/lwtrace/protos/lwtrace.pb.h>
#include <library/cpp/monlib/service/monservice.h>
#include <library/cpp/lwtrace/control.h>
-
+
#include <util/generic/vector.h>
namespace NLwTraceMonPage {
diff --git a/library/cpp/lwtrace/protos/ya.make b/library/cpp/lwtrace/protos/ya.make
index 9262af6154f..503d5e515fd 100644
--- a/library/cpp/lwtrace/protos/ya.make
+++ b/library/cpp/lwtrace/protos/ya.make
@@ -1,11 +1,11 @@
PROTO_LIBRARY()
-
+
OWNER(serxa)
INCLUDE_TAGS(GO_PROTO)
-SRCS(
+SRCS(
lwtrace.proto
-)
-
-END()
+)
+
+END()
diff --git a/library/cpp/lwtrace/ya.make b/library/cpp/lwtrace/ya.make
index 746c1314fd5..d9accb30062 100644
--- a/library/cpp/lwtrace/ya.make
+++ b/library/cpp/lwtrace/ya.make
@@ -1,12 +1,12 @@
-LIBRARY()
-
+LIBRARY()
+
OWNER(serxa)
PEERDIR(
library/cpp/lwtrace/protos
)
-SRCS(
+SRCS(
check.cpp
control.cpp
custom_action.cpp
@@ -20,9 +20,9 @@ SRCS(
stderr_writer.cpp
symbol.cpp
trace.cpp
-)
-
-END()
+)
+
+END()
RECURSE(mon)
diff --git a/library/cpp/malloc/api/malloc.cpp b/library/cpp/malloc/api/malloc.cpp
index 9bda35814fb..eed1c58a383 100644
--- a/library/cpp/malloc/api/malloc.cpp
+++ b/library/cpp/malloc/api/malloc.cpp
@@ -1,5 +1,5 @@
#include <stdlib.h>
-#include <stdio.h>
+#include <stdio.h>
#include "malloc.h"
@@ -28,9 +28,9 @@ namespace NMalloc {
{
}
- void AbortFromCorruptedAllocator(const char* errorMessage) {
- errorMessage = errorMessage ? errorMessage : "<unspecified>";
- fprintf(stderr, "Allocator error: %s\n", errorMessage);
+ void AbortFromCorruptedAllocator(const char* errorMessage) {
+ errorMessage = errorMessage ? errorMessage : "<unspecified>";
+ fprintf(stderr, "Allocator error: %s\n", errorMessage);
IsAllocatorCorrupted = true;
abort();
}
diff --git a/library/cpp/malloc/api/malloc.h b/library/cpp/malloc/api/malloc.h
index d0d6e0c8d47..ebd545d6dd9 100644
--- a/library/cpp/malloc/api/malloc.h
+++ b/library/cpp/malloc/api/malloc.h
@@ -16,7 +16,7 @@ namespace NMalloc {
};
extern volatile bool IsAllocatorCorrupted;
- void AbortFromCorruptedAllocator(const char* errorMessage = nullptr);
+ void AbortFromCorruptedAllocator(const char* errorMessage = nullptr);
// this function should be implemented by malloc implementations
TMallocInfo MallocInfo();
diff --git a/library/cpp/messagebus/actor/executor.cpp b/library/cpp/messagebus/actor/executor.cpp
index b58e07f4bd7..7a2227a4589 100644
--- a/library/cpp/messagebus/actor/executor.cpp
+++ b/library/cpp/messagebus/actor/executor.cpp
@@ -18,7 +18,7 @@ using namespace NActor::NPrivate;
namespace {
struct THistoryInternal {
struct TRecord {
- TAtomic MaxQueueSize;
+ TAtomic MaxQueueSize;
TRecord()
: MaxQueueSize()
@@ -27,7 +27,7 @@ namespace {
TExecutorHistory::THistoryRecord Capture() {
TExecutorHistory::THistoryRecord r;
- r.MaxQueueSize = AtomicGet(MaxQueueSize);
+ r.MaxQueueSize = AtomicGet(MaxQueueSize);
return r;
}
};
@@ -237,14 +237,14 @@ size_t TExecutor::GetWorkQueueSize() const {
return WorkItems.Size();
}
-using namespace NTSAN;
-
+using namespace NTSAN;
+
ui32 TExecutor::GetMaxQueueSizeAndClear() const {
ui32 max = 0;
for (unsigned i = 0; i < WorkerThreads.size(); ++i) {
- TExecutorWorkerThreadLocalData* wtls = RelaxedLoad(&WorkerThreads[i]->ThreadLocalData);
- max = Max<ui32>(max, RelaxedLoad(&wtls->MaxQueueSize));
- RelaxedStore<ui32>(&wtls->MaxQueueSize, 0);
+ TExecutorWorkerThreadLocalData* wtls = RelaxedLoad(&WorkerThreads[i]->ThreadLocalData);
+ max = Max<ui32>(max, RelaxedLoad(&wtls->MaxQueueSize));
+ RelaxedStore<ui32>(&wtls->MaxQueueSize, 0);
}
return max;
}
@@ -269,7 +269,7 @@ TExecutorStatus TExecutor::GetStatusRecordInternal() const {
ss << "work items: " << GetWorkQueueSize() << "\n";
ss << "workers:\n";
for (unsigned i = 0; i < WorkerThreads.size(); ++i) {
- ss << "-- " << AtomicGet(*AtomicGet(WorkerThreads[i]->WhatThreadDoesLocation)) << "\n";
+ ss << "-- " << AtomicGet(*AtomicGet(WorkerThreads[i]->WhatThreadDoesLocation)) << "\n";
}
r.Status = ss.Str();
}
@@ -299,8 +299,8 @@ TAutoPtr<IWorkItem> TExecutor::DequeueWork() {
auto& wtls = TlsRef(WorkerThreadLocalData);
- if (queueSize > RelaxedLoad(&wtls.MaxQueueSize)) {
- RelaxedStore<ui32>(&wtls.MaxQueueSize, queueSize);
+ if (queueSize > RelaxedLoad(&wtls.MaxQueueSize)) {
+ RelaxedStore<ui32>(&wtls.MaxQueueSize, queueSize);
}
return wi;
diff --git a/library/cpp/messagebus/actor/thread_extra.h b/library/cpp/messagebus/actor/thread_extra.h
index e4f37a97604..b5aa1516185 100644
--- a/library/cpp/messagebus/actor/thread_extra.h
+++ b/library/cpp/messagebus/actor/thread_extra.h
@@ -2,28 +2,28 @@
#include <util/thread/singleton.h>
-namespace NTSAN {
+namespace NTSAN {
template <typename T>
inline void RelaxedStore(volatile T* a, T x) {
static_assert(std::is_integral<T>::value || std::is_pointer<T>::value, "expect std::is_integral<T>::value || std::is_pointer<T>::value");
-#ifdef _win_
+#ifdef _win_
*a = x;
-#else
+#else
__atomic_store_n(a, x, __ATOMIC_RELAXED);
-#endif
+#endif
}
-
+
template <typename T>
inline T RelaxedLoad(volatile T* a) {
-#ifdef _win_
+#ifdef _win_
return *a;
-#else
+#else
return __atomic_load_n(a, __ATOMIC_RELAXED);
-#endif
+#endif
}
-
-}
-
+
+}
+
void SetCurrentThreadName(const char* name);
namespace NThreadExtra {
diff --git a/library/cpp/messagebus/actor/what_thread_does.cpp b/library/cpp/messagebus/actor/what_thread_does.cpp
index bce5ccd15e4..bebb6a888c2 100644
--- a/library/cpp/messagebus/actor/what_thread_does.cpp
+++ b/library/cpp/messagebus/actor/what_thread_does.cpp
@@ -1,6 +1,6 @@
#include "what_thread_does.h"
-#include "thread_extra.h"
+#include "thread_extra.h"
#include <util/system/tls.h>
@@ -8,13 +8,13 @@ Y_POD_STATIC_THREAD(const char*)
WhatThreadDoes;
const char* PushWhatThreadDoes(const char* what) {
- const char* r = NTSAN::RelaxedLoad(&WhatThreadDoes);
- NTSAN::RelaxedStore(&WhatThreadDoes, what);
+ const char* r = NTSAN::RelaxedLoad(&WhatThreadDoes);
+ NTSAN::RelaxedStore(&WhatThreadDoes, what);
return r;
}
void PopWhatThreadDoes(const char* prev) {
- NTSAN::RelaxedStore(&WhatThreadDoes, prev);
+ NTSAN::RelaxedStore(&WhatThreadDoes, prev);
}
const char** WhatThreadDoesLocation() {
diff --git a/library/cpp/messagebus/config/defs.h b/library/cpp/messagebus/config/defs.h
index 5db4ef4dae0..92b1df99698 100644
--- a/library/cpp/messagebus/config/defs.h
+++ b/library/cpp/messagebus/config/defs.h
@@ -70,7 +70,7 @@ namespace NBus {
inline bool IsBusKeyValid(TBusKey key) {
return key != YBUS_KEYINVALID && key != YBUS_KEYMAX && key > YBUS_KEYLOCAL;
}
-
+
#define YBUS_VERSION 0
#define YBUS_INFINITE (1u << 30u)
diff --git a/library/cpp/messagebus/latch.h b/library/cpp/messagebus/latch.h
index 3677ee7c299..373f4c0e13b 100644
--- a/library/cpp/messagebus/latch.h
+++ b/library/cpp/messagebus/latch.h
@@ -23,7 +23,7 @@ public:
}
TGuard<TMutex> guard(Mutex);
- while (AtomicGet(Locked) == 1) {
+ while (AtomicGet(Locked) == 1) {
CondVar.WaitI(Mutex);
}
}
@@ -39,7 +39,7 @@ public:
}
TGuard<TMutex> guard(Mutex);
- AtomicSet(Locked, 0);
+ AtomicSet(Locked, 0);
CondVar.BroadCast();
}
diff --git a/library/cpp/messagebus/local_tasks.h b/library/cpp/messagebus/local_tasks.h
index c92d197ca59..d8e801a4572 100644
--- a/library/cpp/messagebus/local_tasks.h
+++ b/library/cpp/messagebus/local_tasks.h
@@ -1,23 +1,23 @@
#pragma once
-#include <util/system/atomic.h>
-
+#include <util/system/atomic.h>
+
class TLocalTasks {
private:
- TAtomic GotTasks;
+ TAtomic GotTasks;
public:
TLocalTasks()
- : GotTasks(0)
+ : GotTasks(0)
{
}
void AddTask() {
- AtomicSet(GotTasks, 1);
+ AtomicSet(GotTasks, 1);
}
bool FetchTask() {
- bool gotTasks = AtomicCas(&GotTasks, 0, 1);
+ bool gotTasks = AtomicCas(&GotTasks, 0, 1);
return gotTasks;
}
};
diff --git a/library/cpp/messagebus/message.h b/library/cpp/messagebus/message.h
index bf57f13ddec..005ca10c652 100644
--- a/library/cpp/messagebus/message.h
+++ b/library/cpp/messagebus/message.h
@@ -155,7 +155,7 @@ namespace NBus {
inline bool IsVersionNegotiation(const NBus::TBusHeader& header) {
return header.Id == 0 && header.Size == sizeof(TBusHeader);
}
-
+
//////////////////////////////////////////////////////////
/// \brief Base class for all messages passed in the system
diff --git a/library/cpp/messagebus/oldmodule/module.cpp b/library/cpp/messagebus/oldmodule/module.cpp
index a322a2366f0..24bd778799f 100644
--- a/library/cpp/messagebus/oldmodule/module.cpp
+++ b/library/cpp/messagebus/oldmodule/module.cpp
@@ -777,7 +777,7 @@ void TBusModuleImpl::DestroyJob(TJobRunner* job) {
Y_VERIFY(jobCount >= 0, "decremented too much");
Jobs.erase(job->JobStorageIterator);
- if (AtomicGet(State) == STOPPED) {
+ if (AtomicGet(State) == STOPPED) {
if (jobCount == 0) {
ShutdownCondVar.BroadCast();
}
@@ -804,11 +804,11 @@ void TBusModuleImpl::OnMessageReceived(TAutoPtr<TBusMessage> msg0, TOnMessageCon
}
void TBusModuleImpl::Shutdown() {
- if (AtomicGet(State) != TBusModuleImpl::RUNNING) {
- AtomicSet(State, TBusModuleImpl::STOPPED);
+ if (AtomicGet(State) != TBusModuleImpl::RUNNING) {
+ AtomicSet(State, TBusModuleImpl::STOPPED);
return;
}
- AtomicSet(State, TBusModuleImpl::STOPPED);
+ AtomicSet(State, TBusModuleImpl::STOPPED);
for (auto& clientSession : ClientSessions) {
clientSession->Shutdown();
diff --git a/library/cpp/messagebus/protobuf/ybusbuf.cpp b/library/cpp/messagebus/protobuf/ybusbuf.cpp
index 90ff132942d..63415b3737f 100644
--- a/library/cpp/messagebus/protobuf/ybusbuf.cpp
+++ b/library/cpp/messagebus/protobuf/ybusbuf.cpp
@@ -75,12 +75,12 @@ TAutoPtr<TBusMessage> TBusBufferProtocol::Deserialize(ui16 messageType, TArrayRe
// clone the base
TAutoPtr<TBusBufferBase> bmess = messageTemplate->New();
- // Need to override protobuf message size limit
- // NOTE: the payload size has already been checked against session MaxMessageSize
- google::protobuf::io::CodedInputStream input(reinterpret_cast<const ui8*>(payload.data()), payload.size());
+ // Need to override protobuf message size limit
+ // NOTE: the payload size has already been checked against session MaxMessageSize
+ google::protobuf::io::CodedInputStream input(reinterpret_cast<const ui8*>(payload.data()), payload.size());
input.SetTotalBytesLimit(payload.size());
-
- bool ok = bmess->GetRecord()->ParseFromCodedStream(&input) && input.ConsumedEntireMessage();
+
+ bool ok = bmess->GetRecord()->ParseFromCodedStream(&input) && input.ConsumedEntireMessage();
if (!ok) {
return nullptr;
}
diff --git a/library/cpp/messagebus/remote_connection.cpp b/library/cpp/messagebus/remote_connection.cpp
index 2113b5622ff..22932569dbd 100644
--- a/library/cpp/messagebus/remote_connection.cpp
+++ b/library/cpp/messagebus/remote_connection.cpp
@@ -186,7 +186,7 @@ namespace NBus {
}
ReaderData.DropChannel();
-
+
ReaderData.Status.Fd = readSocket.Socket;
ReaderData.SocketVersion = readSocket.SocketVersion;
@@ -393,8 +393,8 @@ namespace NBus {
}
return true;
- }
-
+ }
+
bool TRemoteConnection::ReaderFillBuffer() {
if (!ReaderData.BufferMore())
return true;
diff --git a/library/cpp/messagebus/remote_connection_status.cpp b/library/cpp/messagebus/remote_connection_status.cpp
index c34c8755362..2c48b2a287c 100644
--- a/library/cpp/messagebus/remote_connection_status.cpp
+++ b/library/cpp/messagebus/remote_connection_status.cpp
@@ -202,7 +202,7 @@ TString TRemoteConnectionStatus::PrintToString() const {
p.AddRow("write buffer cap", LeftPad(WriterStatus.BufferSize, 12));
p.AddRow("read buffer cap", LeftPad(ReaderStatus.BufferSize, 12));
-
+
p.AddRow("write buffer drops", LeftPad(WriterStatus.Incremental.BufferDrops, 10));
p.AddRow("read buffer drops", LeftPad(ReaderStatus.Incremental.BufferDrops, 10));
diff --git a/library/cpp/messagebus/test/ut/messagebus_ut.cpp b/library/cpp/messagebus/test/ut/messagebus_ut.cpp
index e771a933cae..040f9b77022 100644
--- a/library/cpp/messagebus/test/ut/messagebus_ut.cpp
+++ b/library/cpp/messagebus/test/ut/messagebus_ut.cpp
@@ -313,9 +313,9 @@ Y_UNIT_TEST_SUITE(TMessageBusTests) {
TMutex Lock_;
TDeque<TAutoPtr<TOnMessageContext>> DelayedMessages;
- TDelayReplyServer()
- : MessageReceivedEvent(TEventResetType::rAuto)
- {
+ TDelayReplyServer()
+ : MessageReceivedEvent(TEventResetType::rAuto)
+ {
Bus = CreateMessageQueue("TDelayReplyServer");
TBusServerSessionConfig sessionConfig;
sessionConfig.SendTimeout = 1000;
@@ -617,30 +617,30 @@ Y_UNIT_TEST_SUITE(TMessageBusTests) {
}
Y_UNIT_TEST(ServerMessageReservedIds) {
- TObjectCountCheck objectCountCheck;
-
- TExampleServer server;
- TNetAddr serverAddr = server.GetActualListenAddr();
-
- TExampleClient client;
-
- client.SendMessagesWaitReplies(2, serverAddr);
-
- // This test doens't check 0, 1, YBUS_KEYINVALID because there are asserts() on sending side
-
- TAutoPtr<TBusMessage> req(new TExampleRequest(&client.Proto.RequestCount));
- req->GetHeader()->Id = 2;
- client.Session->SendMessageAutoPtr(req, &serverAddr);
- client.MessageCount = 1;
- client.WaitForError(MESSAGE_DELIVERY_FAILED);
-
- req.Reset(new TExampleRequest(&client.Proto.RequestCount));
- req->GetHeader()->Id = YBUS_KEYLOCAL;
- client.Session->SendMessageAutoPtr(req, &serverAddr);
- client.MessageCount = 1;
- client.WaitForError(MESSAGE_DELIVERY_FAILED);
- }
-
+ TObjectCountCheck objectCountCheck;
+
+ TExampleServer server;
+ TNetAddr serverAddr = server.GetActualListenAddr();
+
+ TExampleClient client;
+
+ client.SendMessagesWaitReplies(2, serverAddr);
+
+ // This test doens't check 0, 1, YBUS_KEYINVALID because there are asserts() on sending side
+
+ TAutoPtr<TBusMessage> req(new TExampleRequest(&client.Proto.RequestCount));
+ req->GetHeader()->Id = 2;
+ client.Session->SendMessageAutoPtr(req, &serverAddr);
+ client.MessageCount = 1;
+ client.WaitForError(MESSAGE_DELIVERY_FAILED);
+
+ req.Reset(new TExampleRequest(&client.Proto.RequestCount));
+ req->GetHeader()->Id = YBUS_KEYLOCAL;
+ client.Session->SendMessageAutoPtr(req, &serverAddr);
+ client.MessageCount = 1;
+ client.WaitForError(MESSAGE_DELIVERY_FAILED);
+ }
+
Y_UNIT_TEST(TestGetInFlightForDestination) {
TObjectCountCheck objectCountCheck;
@@ -661,7 +661,7 @@ Y_UNIT_TEST_SUITE(TMessageBusTests) {
break;
}
}
- UNIT_ASSERT_VALUES_EQUAL(server.GetDelayedMessageCount(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(server.GetDelayedMessageCount(), 2);
size_t inFlight = client.Session->GetInFlight(addr);
// 4 is for messagebus1 that adds inFlight counter twice for some reason
@@ -731,10 +731,10 @@ Y_UNIT_TEST_SUITE(TMessageBusTests) {
// check reset is possible here
message->Reset();
- // intentionally don't destroy the message
- // we will try to resend it
+ // intentionally don't destroy the message
+ // we will try to resend it
Y_UNUSED(message.Release());
-
+
TestSync.CheckAndIncrement(1);
}
};
@@ -760,8 +760,8 @@ Y_UNIT_TEST_SUITE(TMessageBusTests) {
// check reset is possible here
message->Reset();
client.TestSync.CheckAndIncrement(3);
-
- delete message;
+
+ delete message;
}
Y_UNIT_TEST(ResetAfterSendOneWayErrorInReturn) {
@@ -865,8 +865,8 @@ Y_UNIT_TEST_SUITE(TMessageBusTests) {
request.SetVersionInternal(0xF); // max
output.Write(&request, sizeof(request));
- UNIT_ASSERT_VALUES_EQUAL(IsVersionNegotiation(request), true);
-
+ UNIT_ASSERT_VALUES_EQUAL(IsVersionNegotiation(request), true);
+
TStreamSocketInput input(&socket);
TBusHeader response;
diff --git a/library/cpp/messagebus/test/ut/one_way_ut.cpp b/library/cpp/messagebus/test/ut/one_way_ut.cpp
index bc78c5238a9..9c21227e2bd 100644
--- a/library/cpp/messagebus/test/ut/one_way_ut.cpp
+++ b/library/cpp/messagebus/test/ut/one_way_ut.cpp
@@ -93,7 +93,7 @@ public:
TExampleProtocol Proto;
public:
- TAtomic NumMessages;
+ TAtomic NumMessages;
NullServer() {
NumMessages = 0;
@@ -119,7 +119,7 @@ public:
/// tell session to forget this message and never expect any reply
mess.ForgetRequest();
- AtomicIncrement(NumMessages);
+ AtomicIncrement(NumMessages);
}
/// this handler should not be called because this server does not send replies
@@ -139,10 +139,10 @@ Y_UNIT_TEST_SUITE(TMessageBusTests_OneWay) {
client.Work();
// wait until all client message are delivered
- UNIT_WAIT_FOR(AtomicGet(server.NumMessages) == 10);
+ UNIT_WAIT_FOR(AtomicGet(server.NumMessages) == 10);
// assert correct number of messages
- UNIT_ASSERT_VALUES_EQUAL(AtomicGet(server.NumMessages), 10);
+ UNIT_ASSERT_VALUES_EQUAL(AtomicGet(server.NumMessages), 10);
UNIT_ASSERT_VALUES_EQUAL(server.Session->GetInFlight(), 0);
UNIT_ASSERT_VALUES_EQUAL(client.Session->GetInFlight(), 0);
}
@@ -196,7 +196,7 @@ Y_UNIT_TEST_SUITE(TMessageBusTests_OneWay) {
TBusClientSessionConfig sessionConfig;
sessionConfig.SendTimeout = 1;
sessionConfig.ConnectTimeout = 1;
- sessionConfig.Secret.TimeoutPeriod = TDuration::MilliSeconds(10);
+ sessionConfig.Secret.TimeoutPeriod = TDuration::MilliSeconds(10);
return sessionConfig;
}
@@ -245,11 +245,11 @@ Y_UNIT_TEST_SUITE(TMessageBusTests_OneWay) {
first = false;
}
- // BUGBUG: The test is buggy: the client might not get any error when sending one-way messages.
- // All the messages that the client has sent before he gets first MESSAGE_BUSY error might get
- // serailized and written to the socket buffer, so the write queue gets drained and there are
- // no messages to timeout when periodic timeout check happens.
-
+ // BUGBUG: The test is buggy: the client might not get any error when sending one-way messages.
+ // All the messages that the client has sent before he gets first MESSAGE_BUSY error might get
+ // serailized and written to the socket buffer, so the write queue gets drained and there are
+ // no messages to timeout when periodic timeout check happens.
+
client.GotError.WaitI();
}
}
diff --git a/library/cpp/monlib/counters/counters.cpp b/library/cpp/monlib/counters/counters.cpp
index 2781b8ab4eb..50dca4c577a 100644
--- a/library/cpp/monlib/counters/counters.cpp
+++ b/library/cpp/monlib/counters/counters.cpp
@@ -7,18 +7,18 @@ namespace NMonitoring {
unsigned i = 0;
i64 major = val;
i64 minor = 0;
- const unsigned imax = sizeof(shorts) / sizeof(char);
- for (i = 0; i < imax; i++) {
+ const unsigned imax = sizeof(shorts) / sizeof(char);
+ for (i = 0; i < imax; i++) {
if (major >> 10 == 0)
break;
else {
- minor = major - (major >> 10 << 10);
+ minor = major - (major >> 10 << 10);
major = major >> 10;
}
}
- minor = (minor * 10) >> 10;
+ minor = (minor * 10) >> 10;
- if (i == 0 || i >= imax)
+ if (i == 0 || i >= imax)
*buf = '\0';
else
snprintf(buf, size, "%" PRId64 ".%" PRId64 "%c", major, minor, shorts[i]);
@@ -28,15 +28,15 @@ namespace NMonitoring {
char* PrettyNum(i64 val, char* buf, size_t size) {
Y_ASSERT(buf);
- if (size < 4) {
- buf[0] = 0;
- return buf;
- }
+ if (size < 4) {
+ buf[0] = 0;
+ return buf;
+ }
PrettyNumShort(val, buf + 2, size - 3);
if (buf[2] == 0) {
*buf = '\0';
} else {
- size_t len = 2 + strnlen(buf + 2, size - 4);
+ size_t len = 2 + strnlen(buf + 2, size - 4);
Y_ASSERT(len < size);
buf[0] = ' ';
buf[1] = '(';
diff --git a/library/cpp/monlib/dynamic_counters/counters.h b/library/cpp/monlib/dynamic_counters/counters.h
index 5b0db006aed..dc178cfbe01 100644
--- a/library/cpp/monlib/dynamic_counters/counters.h
+++ b/library/cpp/monlib/dynamic_counters/counters.h
@@ -195,7 +195,7 @@ namespace NMonitoring {
private:
TRWMutex Lock;
- TCounterPtr LookupCounter; // Counts lookups by name
+ TCounterPtr LookupCounter; // Counts lookups by name
TOnLookupPtr OnLookup = nullptr; // Called on each lookup if not nullptr, intended for lightweight tracing.
typedef TIntrusivePtr<TCountableBase> TCountablePtr;
@@ -241,12 +241,12 @@ namespace NMonitoring {
~TDynamicCounters() override;
- // This counter allows to track lookups by name within the whole subtree
- void SetLookupCounter(TCounterPtr lookupCounter) {
+ // This counter allows to track lookups by name within the whole subtree
+ void SetLookupCounter(TCounterPtr lookupCounter) {
TWriteGuard g(Lock);
- LookupCounter = lookupCounter;
- }
-
+ LookupCounter = lookupCounter;
+ }
+
void SetOnLookup(TOnLookupPtr onLookup) {
TWriteGuard g(Lock);
OnLookup = onLookup;
diff --git a/library/cpp/monlib/dynamic_counters/counters_ut.cpp b/library/cpp/monlib/dynamic_counters/counters_ut.cpp
index f3d87930f9c..3591037e0a7 100644
--- a/library/cpp/monlib/dynamic_counters/counters_ut.cpp
+++ b/library/cpp/monlib/dynamic_counters/counters_ut.cpp
@@ -284,33 +284,33 @@ Y_UNIT_TEST_SUITE(TDynamicCountersTest) {
" sensor:timeMillis = {1: 1, 2: 1, 4: 2, inf: 95}\n"
"}\n");
}
-
- Y_UNIT_TEST(CounterLookupCounter) {
- TDynamicCounterPtr rootGroup(new TDynamicCounters());
- TDynamicCounters::TCounterPtr lookups = rootGroup->GetCounter("Lookups", true);
- rootGroup->SetLookupCounter(lookups);
-
- // Create subtree and check that counter is inherited
- TDynamicCounterPtr serviceGroup = rootGroup->GetSubgroup("service", "MyService");
- UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 1);
-
- TDynamicCounterPtr subGroup = serviceGroup->GetSubgroup("component", "MyComponent");
- UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 2);
-
- auto counter = subGroup->GetNamedCounter("range", "20 msec", true);
- UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 3);
-
- auto hist = subGroup->GetHistogram("timeMsec", ExponentialHistogram(4, 2));
- UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 4);
-
- // Replace the counter for subGroup
- auto subGroupLookups = rootGroup->GetCounter("LookupsInMyComponent", true);
- UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 5);
- subGroup->SetLookupCounter(subGroupLookups);
- auto counter2 = subGroup->GetNamedCounter("range", "30 msec", true);
- UNIT_ASSERT_VALUES_EQUAL(subGroupLookups->Val(), 1);
- UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 5);
- }
+
+ Y_UNIT_TEST(CounterLookupCounter) {
+ TDynamicCounterPtr rootGroup(new TDynamicCounters());
+ TDynamicCounters::TCounterPtr lookups = rootGroup->GetCounter("Lookups", true);
+ rootGroup->SetLookupCounter(lookups);
+
+ // Create subtree and check that counter is inherited
+ TDynamicCounterPtr serviceGroup = rootGroup->GetSubgroup("service", "MyService");
+ UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 1);
+
+ TDynamicCounterPtr subGroup = serviceGroup->GetSubgroup("component", "MyComponent");
+ UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 2);
+
+ auto counter = subGroup->GetNamedCounter("range", "20 msec", true);
+ UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 3);
+
+ auto hist = subGroup->GetHistogram("timeMsec", ExponentialHistogram(4, 2));
+ UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 4);
+
+ // Replace the counter for subGroup
+ auto subGroupLookups = rootGroup->GetCounter("LookupsInMyComponent", true);
+ UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 5);
+ subGroup->SetLookupCounter(subGroupLookups);
+ auto counter2 = subGroup->GetNamedCounter("range", "30 msec", true);
+ UNIT_ASSERT_VALUES_EQUAL(subGroupLookups->Val(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(lookups->Val(), 5);
+ }
Y_UNIT_TEST(FindCounters) {
TDynamicCounterPtr rootGroup(new TDynamicCounters());
diff --git a/library/cpp/monlib/service/pages/templates.h b/library/cpp/monlib/service/pages/templates.h
index a4de3628451..b4656f059fa 100644
--- a/library/cpp/monlib/service/pages/templates.h
+++ b/library/cpp/monlib/service/pages/templates.h
@@ -150,8 +150,8 @@ namespace NMonitoring {
}
~TTag() {
- try {
- Str << "</" << tag << ">";
+ try {
+ Str << "</" << tag << ">";
} catch (...) {
}
}
diff --git a/library/cpp/monlib/service/pages/version_mon_page.cpp b/library/cpp/monlib/service/pages/version_mon_page.cpp
index 50e62888092..41e29417da7 100644
--- a/library/cpp/monlib/service/pages/version_mon_page.cpp
+++ b/library/cpp/monlib/service/pages/version_mon_page.cpp
@@ -11,6 +11,6 @@ void TVersionMonPage::OutputText(IOutputStream& out, NMonitoring::IMonHttpReques
out << version;
if (!TString(version).EndsWith("\n"))
out << "\n";
- out << GetBuildInfo() << "\n\n";
+ out << GetBuildInfo() << "\n\n";
out << "linked with malloc: " << NMalloc::MallocInfo().Name << "\n";
}
diff --git a/library/cpp/regex/pire/extraencodings.cpp b/library/cpp/regex/pire/extraencodings.cpp
index 965a0c77de7..2e507e4b67f 100644
--- a/library/cpp/regex/pire/extraencodings.cpp
+++ b/library/cpp/regex/pire/extraencodings.cpp
@@ -1,13 +1,13 @@
-#include <util/system/defaults.h>
-#include <util/system/yassert.h>
+#include <util/system/defaults.h>
+#include <util/system/yassert.h>
#include <library/cpp/charset/codepage.h>
-#include <util/generic/singleton.h>
-#include <util/generic/yexception.h>
+#include <util/generic/singleton.h>
+#include <util/generic/yexception.h>
#include <library/cpp/charset/doccodes.h>
-#include "pire.h"
-
-namespace NPire {
+#include "pire.h"
+
+namespace NPire {
namespace {
// A one-byte encoding which is capable of transforming upper half of the character
// table to/from Unicode chars.
@@ -18,14 +18,14 @@ namespace NPire {
for (size_t i = 0; i < 256; ++i)
Reverse_.insert(std::make_pair(Table_[i], static_cast<char>(i)));
}
-
+
wchar32 FromLocal(const char*& begin, const char* end) const override {
if (begin != end)
return Table_[static_cast<unsigned char>(*begin++)];
else
ythrow yexception() << "EOF reached in Pire::OneByte::fromLocal()";
}
-
+
TString ToLocal(wchar32 c) const override {
THashMap<wchar32, char>::const_iterator i = Reverse_.find(c);
if (i != Reverse_.end())
@@ -33,16 +33,16 @@ namespace NPire {
else
return TString();
}
-
+
void AppendDot(TFsm& fsm) const override {
fsm.AppendDot();
}
-
+
private:
const wchar32* Table_;
THashMap<wchar32, char> Reverse_;
};
-
+
template <unsigned N>
struct TOneByteHelper: public TOneByte {
inline TOneByteHelper()
@@ -51,16 +51,16 @@ namespace NPire {
}
};
}
-
+
namespace NEncodings {
const NPire::TEncoding& Koi8r() {
return *Singleton<TOneByteHelper<CODES_KOI8>>();
}
-
+
const NPire::TEncoding& Cp1251() {
return *Singleton<TOneByteHelper<CODES_WIN>>();
- }
-
+ }
+
const NPire::TEncoding& Get(ECharset encoding) {
switch (encoding) {
case CODES_WIN:
@@ -75,7 +75,7 @@ namespace NPire {
ythrow yexception() << "Pire::Encodings::get(ECharset): unknown encoding " << (int)encoding;
}
}
-
- }
-
-}
+
+ }
+
+}
diff --git a/library/cpp/regex/pire/inline/ya.make b/library/cpp/regex/pire/inline/ya.make
index 78a44d80d74..d4850f7b457 100644
--- a/library/cpp/regex/pire/inline/ya.make
+++ b/library/cpp/regex/pire/inline/ya.make
@@ -1,5 +1,5 @@
PROGRAM(pire_inline)
-
+
CFLAGS(-DPIRE_NO_CONFIG)
OWNER(
@@ -9,14 +9,14 @@ OWNER(
PEERDIR(
ADDINCL library/cpp/regex/pire
-)
-
+)
+
SRCDIR(
contrib/libs/pire/pire
)
-
-SRCS(
+
+SRCS(
inline.l
-)
-
-END()
+)
+
+END()
diff --git a/library/cpp/regex/pire/pire.h b/library/cpp/regex/pire/pire.h
index e8f6f7cfd19..286fecd6930 100644
--- a/library/cpp/regex/pire/pire.h
+++ b/library/cpp/regex/pire/pire.h
@@ -1,19 +1,19 @@
-#pragma once
-
-#ifndef PIRE_NO_CONFIG
-#define PIRE_NO_CONFIG
-#endif
-
-#include <contrib/libs/pire/pire/pire.h>
-#include <contrib/libs/pire/pire/extra.h>
-
+#pragma once
+
+#ifndef PIRE_NO_CONFIG
+#define PIRE_NO_CONFIG
+#endif
+
+#include <contrib/libs/pire/pire/pire.h>
+#include <contrib/libs/pire/pire/extra.h>
+
#include <library/cpp/charset/doccodes.h>
-namespace NPire {
+namespace NPire {
using TChar = Pire::Char;
using Pire::MaxChar;
-
- // Scanner classes
+
+ // Scanner classes
using TScanner = Pire::Scanner;
using TNonrelocScanner = Pire::NonrelocScanner;
using TScannerNoMask = Pire::ScannerNoMask;
@@ -27,11 +27,11 @@ namespace NPire {
using TCapturingScanner = Pire::CapturingScanner;
using TSlowCapturingScanner = Pire::SlowCapturingScanner;
using TCountingScanner = Pire::CountingScanner;
-
+
template <typename T1, typename T2>
using TScannerPair = Pire::ScannerPair<T1, T2>;
- // Helper classes
+ // Helper classes
using TFsm = Pire::Fsm;
using TLexer = Pire::Lexer;
using TTerm = Pire::Term;
@@ -39,38 +39,38 @@ namespace NPire {
using TFeature = Pire::Feature;
using TFeaturePtr = Pire::Feature::Ptr;
using TError = Pire::Error;
-
- // Helper functions
+
+ // Helper functions
using Pire::LongestPrefix;
using Pire::LongestSuffix;
using Pire::Matches;
- using Pire::MmappedScanner;
- using Pire::Run;
+ using Pire::MmappedScanner;
+ using Pire::Run;
using Pire::Runner;
- using Pire::ShortestPrefix;
- using Pire::ShortestSuffix;
- using Pire::Step;
-
- using namespace Pire::SpecialChar;
- using namespace Pire::Consts;
-
- namespace NFeatures {
+ using Pire::ShortestPrefix;
+ using Pire::ShortestSuffix;
+ using Pire::Step;
+
+ using namespace Pire::SpecialChar;
+ using namespace Pire::Consts;
+
+ namespace NFeatures {
using Pire::Features::AndNotSupport;
using Pire::Features::Capture;
- using Pire::Features::CaseInsensitive;
- using Pire::Features::GlueSimilarGlyphs;
- }
-
- namespace NEncodings {
- using Pire::Encodings::Latin1;
- using Pire::Encodings::Utf8;
-
+ using Pire::Features::CaseInsensitive;
+ using Pire::Features::GlueSimilarGlyphs;
+ }
+
+ namespace NEncodings {
+ using Pire::Encodings::Latin1;
+ using Pire::Encodings::Utf8;
+
const NPire::TEncoding& Koi8r();
const NPire::TEncoding& Cp1251();
const NPire::TEncoding& Get(ECharset encoding);
- }
-
- namespace NTokenTypes {
- using namespace Pire::TokenTypes;
- }
-}
+ }
+
+ namespace NTokenTypes {
+ using namespace Pire::TokenTypes;
+ }
+}
diff --git a/library/cpp/regex/pire/regexp.h b/library/cpp/regex/pire/regexp.h
index aeb66a8a647..94bba4064b7 100644
--- a/library/cpp/regex/pire/regexp.h
+++ b/library/cpp/regex/pire/regexp.h
@@ -54,13 +54,13 @@ namespace NRegExp {
lexer.Assign(regexp.data(), regexp.data() + regexp.size());
} else {
TVector<wchar32> ucs4(regexp.size() + 1);
- size_t inRead = 0;
- size_t outWritten = 0;
+ size_t inRead = 0;
+ size_t outWritten = 0;
int recodeRes = RecodeToUnicode(opts.Charset, regexp.data(), ucs4.data(),
regexp.size(), regexp.size(), inRead, outWritten);
Y_ASSERT(recodeRes == RECODE_OK);
Y_ASSERT(outWritten < ucs4.size());
- ucs4[outWritten] = 0;
+ ucs4[outWritten] = 0;
lexer.Assign(ucs4.begin(),
ucs4.begin() + std::char_traits<wchar32>::length(ucs4.data()));
@@ -207,12 +207,12 @@ namespace NRegExp {
}
protected:
- inline void Run(const char* data, size_t len, bool addBegin, bool addEnd) noexcept {
- if (addBegin) {
+ inline void Run(const char* data, size_t len, bool addBegin, bool addEnd) noexcept {
+ if (addBegin) {
NPire::Step(GetScanner(), State, NPire::BeginMark);
}
NPire::Run(GetScanner(), State, data, data + len);
- if (addEnd) {
+ if (addEnd) {
NPire::Step(GetScanner(), State, NPire::EndMark);
}
}
@@ -236,8 +236,8 @@ namespace NRegExp {
{
}
- inline TMatcher& Match(const char* data, size_t len, bool addBegin = false, bool addEnd = false) noexcept {
- Run(data, len, addBegin, addEnd);
+ inline TMatcher& Match(const char* data, size_t len, bool addBegin = false, bool addEnd = false) noexcept {
+ Run(data, len, addBegin, addEnd);
return *this;
}
@@ -267,9 +267,9 @@ namespace NRegExp {
return GetState().Captured();
}
- inline TSearcher& Search(const char* data, size_t len, bool addBegin = true, bool addEnd = true) noexcept {
+ inline TSearcher& Search(const char* data, size_t len, bool addBegin = true, bool addEnd = true) noexcept {
Data = TStringBuf(data, len);
- Run(data, len, addBegin, addEnd);
+ Run(data, len, addBegin, addEnd);
return *this;
}
diff --git a/library/cpp/regex/pire/ut/regexp_ut.cpp b/library/cpp/regex/pire/ut/regexp_ut.cpp
index 7c517bc583b..e7206de9ad4 100644
--- a/library/cpp/regex/pire/ut/regexp_ut.cpp
+++ b/library/cpp/regex/pire/ut/regexp_ut.cpp
@@ -17,41 +17,41 @@ Y_UNIT_TEST_SUITE(TRegExp) {
}
Y_UNIT_TEST(Boundaries) {
- UNIT_ASSERT(!TMatcher(TFsm("qwb$", TFsm::TOptions().SetSurround(true))).Match("aqwb").Final());
- UNIT_ASSERT(!TMatcher(TFsm("^aqw", TFsm::TOptions().SetSurround(true))).Match("aqwb").Final());
+ UNIT_ASSERT(!TMatcher(TFsm("qwb$", TFsm::TOptions().SetSurround(true))).Match("aqwb").Final());
+ UNIT_ASSERT(!TMatcher(TFsm("^aqw", TFsm::TOptions().SetSurround(true))).Match("aqwb").Final());
UNIT_ASSERT(TMatcher(TFsm("qwb$", TFsm::TOptions().SetSurround(true))).Match(TStringBuf("aqwb"), true, true).Final());
UNIT_ASSERT(TMatcher(TFsm("^aqw", TFsm::TOptions().SetSurround(true))).Match(TStringBuf("aqwb"), true, true).Final());
UNIT_ASSERT(!TMatcher(TFsm("qw$", TFsm::TOptions().SetSurround(true))).Match(TStringBuf("aqwb"), true, true).Final());
UNIT_ASSERT(!TMatcher(TFsm("^qw", TFsm::TOptions().SetSurround(true))).Match(TStringBuf("aqwb"), true, true).Final());
-
- UNIT_ASSERT(TMatcher(TFsm("^aqwb$", TFsm::TOptions().SetSurround(true)))
+
+ UNIT_ASSERT(TMatcher(TFsm("^aqwb$", TFsm::TOptions().SetSurround(true)))
.Match(TStringBuf("a"), true, false)
.Match(TStringBuf("q"), false, false)
.Match(TStringBuf("w"), false, false)
.Match(TStringBuf("b"), false, true)
.Final());
- }
-
+ }
+
Y_UNIT_TEST(Case) {
UNIT_ASSERT(TMatcher(TFsm("qw", TFsm::TOptions().SetCaseInsensitive(true))).Match("Qw").Final());
UNIT_ASSERT(!TMatcher(TFsm("qw", TFsm::TOptions().SetCaseInsensitive(false))).Match("Qw").Final());
}
-
+
Y_UNIT_TEST(UnicodeCase) {
UNIT_ASSERT(TMatcher(TFsm("\\x{61}\\x{62}", TFsm::TOptions().SetCaseInsensitive(true))).Match("Ab").Final());
UNIT_ASSERT(!TMatcher(TFsm("\\x{61}\\x{62}", TFsm::TOptions().SetCaseInsensitive(false))).Match("Ab").Final());
}
Y_UNIT_TEST(Utf) {
- NRegExp::TFsmBase::TOptions opts;
- opts.Charset = CODES_UTF8;
- opts.Surround = true;
- UNIT_ASSERT(TMatcher(TFsm(".*", opts)).Match("wtf").Final());
- UNIT_ASSERT(TMatcher(TFsm(".*", opts)).Match("чзн").Final());
- UNIT_ASSERT(TMatcher(TFsm("ч.*", opts)).Match("чзн").Final());
- UNIT_ASSERT(!TMatcher(TFsm("чзн", opts)).Match("чзх").Final());
- }
-
+ NRegExp::TFsmBase::TOptions opts;
+ opts.Charset = CODES_UTF8;
+ opts.Surround = true;
+ UNIT_ASSERT(TMatcher(TFsm(".*", opts)).Match("wtf").Final());
+ UNIT_ASSERT(TMatcher(TFsm(".*", opts)).Match("чзн").Final());
+ UNIT_ASSERT(TMatcher(TFsm("ч.*", opts)).Match("чзн").Final());
+ UNIT_ASSERT(!TMatcher(TFsm("чзн", opts)).Match("чзх").Final());
+ }
+
Y_UNIT_TEST(AndNot) {
NRegExp::TFsmBase::TOptions opts;
opts.AndNotSupport = true;
@@ -84,15 +84,15 @@ Y_UNIT_TEST_SUITE(TRegExp) {
}
Y_UNIT_TEST(Glue) {
- TFsm glued =
- TFsm("qw", TFsm::TOptions().SetCaseInsensitive(true)) |
- TFsm("qw", TFsm::TOptions().SetCaseInsensitive(false)) |
- TFsm("abc", TFsm::TOptions().SetCaseInsensitive(false));
- UNIT_ASSERT(TMatcher(glued).Match("Qw").Final());
- UNIT_ASSERT(TMatcher(glued).Match("Qw").Final());
- UNIT_ASSERT(TMatcher(glued).Match("abc").Final());
- UNIT_ASSERT(!TMatcher(glued).Match("Abc").Final());
- }
+ TFsm glued =
+ TFsm("qw", TFsm::TOptions().SetCaseInsensitive(true)) |
+ TFsm("qw", TFsm::TOptions().SetCaseInsensitive(false)) |
+ TFsm("abc", TFsm::TOptions().SetCaseInsensitive(false));
+ UNIT_ASSERT(TMatcher(glued).Match("Qw").Final());
+ UNIT_ASSERT(TMatcher(glued).Match("Qw").Final());
+ UNIT_ASSERT(TMatcher(glued).Match("abc").Final());
+ UNIT_ASSERT(!TMatcher(glued).Match("Abc").Final());
+ }
Y_UNIT_TEST(Capture1) {
TCapturingFsm fsm("here we have user_id=([a-z0-9]+);");
diff --git a/library/cpp/regex/pire/ut/ya.make b/library/cpp/regex/pire/ut/ya.make
index d0a23018160..8776695f405 100644
--- a/library/cpp/regex/pire/ut/ya.make
+++ b/library/cpp/regex/pire/ut/ya.make
@@ -1,20 +1,20 @@
# this test in not linked into build tree with ReCURSE and is built by unittest/library
UNITTEST()
-
+
OWNER(
g:util
davenger
)
SET(PIRETESTSDIR contrib/libs/pire/ut)
-
+
CFLAGS(-DPIRE_NO_CONFIG)
-
+
PEERDIR(
library/cpp/regex/pire
)
-
+
SRCDIR(
${PIRETESTSDIR}
)
@@ -23,22 +23,22 @@ ADDINCL(
contrib/libs/pire/pire
contrib/libs/pire/ut
)
-
-SRCS(
- pire_ut.cpp
- capture_ut.cpp
- count_ut.cpp
+
+SRCS(
+ pire_ut.cpp
+ capture_ut.cpp
+ count_ut.cpp
glyph_ut.cpp
- easy_ut.cpp
+ easy_ut.cpp
read_unicode_ut.cpp
- regexp_ut.cpp
+ regexp_ut.cpp
approx_matching_ut.cpp
-)
-
+)
+
SIZE(MEDIUM)
TIMEOUT(600)
PIRE_INLINE(inline_ut.cpp)
-END()
+END()
diff --git a/library/cpp/regex/pire/ya.make b/library/cpp/regex/pire/ya.make
index 0f788b35b5e..c857e6d18bc 100644
--- a/library/cpp/regex/pire/ya.make
+++ b/library/cpp/regex/pire/ya.make
@@ -1,5 +1,5 @@
-LIBRARY()
-
+LIBRARY()
+
OWNER(
g:util
g:antiinfra
@@ -8,33 +8,33 @@ OWNER(
)
CFLAGS(-DPIRE_NO_CONFIG)
-
+
SRCDIR(contrib/libs/pire/pire)
-
-SRCS(
+
+SRCS(
pcre2pire.cpp
- classes.cpp
- encoding.cpp
- fsm.cpp
- scanner_io.cpp
- easy.cpp
- scanners/null.cpp
- extra/capture.cpp
- extra/count.cpp
- extra/glyphs.cpp
- re_lexer.cpp
+ classes.cpp
+ encoding.cpp
+ fsm.cpp
+ scanner_io.cpp
+ easy.cpp
+ scanners/null.cpp
+ extra/capture.cpp
+ extra/count.cpp
+ extra/glyphs.cpp
+ re_lexer.cpp
re_parser.y
read_unicode.cpp
- extraencodings.cpp
+ extraencodings.cpp
approx_matching.cpp
half_final_fsm.cpp
minimize.h
-)
-
+)
+
PEERDIR(
library/cpp/charset
)
-END()
+END()
RECURSE_FOR_TESTS(ut)
diff --git a/util/draft/holder_vector.h b/util/draft/holder_vector.h
index 0cf4689f6c6..1c62055bd95 100644
--- a/util/draft/holder_vector.h
+++ b/util/draft/holder_vector.h
@@ -20,8 +20,8 @@ public:
void Clear() {
for (typename TBase::iterator it = TBase::begin(); it != TBase::end(); ++it) {
- if (*it)
- D::Destroy(*it);
+ if (*it)
+ D::Destroy(*it);
}
TBase::clear();
}
@@ -33,11 +33,11 @@ public:
// TVector takes ownership of T
void PushBack(T* t) {
try {
- TBase::push_back(t);
+ TBase::push_back(t);
} catch (...) {
- if (t)
- D::Destroy(t);
- throw;
+ if (t)
+ D::Destroy(t);
+ throw;
}
}
@@ -65,11 +65,11 @@ public:
}
}
- T* Release(size_t i) {
- T* t = (*this)[i];
+ T* Release(size_t i) {
+ T* t = (*this)[i];
(*this)[i] = nullptr;
- return t;
- }
+ return t;
+ }
void Resize(size_t newSize) {
for (size_t i = newSize; i < size(); ++i) {
diff --git a/util/generic/object_counter.h b/util/generic/object_counter.h
index 893d4b810f8..5257afa2e60 100644
--- a/util/generic/object_counter.h
+++ b/util/generic/object_counter.h
@@ -32,7 +32,7 @@ public:
}
static inline long ObjectCount() noexcept {
- return AtomicGet(Count_);
+ return AtomicGet(Count_);
}
/**
diff --git a/util/memory/benchmark/pool/main.cpp b/util/memory/benchmark/pool/main.cpp
index 91f781aab7c..0b4d6c94af1 100644
--- a/util/memory/benchmark/pool/main.cpp
+++ b/util/memory/benchmark/pool/main.cpp
@@ -1,11 +1,11 @@
#include <library/cpp/testing/benchmark/bench.h>
-
-#include <util/memory/pool.h>
-#include <util/generic/xrange.h>
-#include <util/stream/output.h>
-
+
+#include <util/memory/pool.h>
+#include <util/generic/xrange.h>
+#include <util/stream/output.h>
+
#define BENCHMARK_POOL_ALLOC(chunkSize, allocSize, allocAlign) \
- Y_CPU_BENCHMARK(MemroyPool_chunk##chunkSize##_alloc##allocSize##_align##allocAlign, p) { \
+ Y_CPU_BENCHMARK(MemroyPool_chunk##chunkSize##_alloc##allocSize##_align##allocAlign, p) { \
TMemoryPool pool(chunkSize); \
for (auto i : xrange<size_t>(0, p.Iterations())) { \
(void)i; \
@@ -15,12 +15,12 @@
Cerr << "Allocated: " << pool.MemoryAllocated() << Endl; \
Cerr << "Waste: " << pool.MemoryWaste() << Endl; \
*/ \
- }
-
-BENCHMARK_POOL_ALLOC(4096, 1, 1)
-BENCHMARK_POOL_ALLOC(4096, 2, 2)
-BENCHMARK_POOL_ALLOC(4096, 3, 4)
-BENCHMARK_POOL_ALLOC(4096, 7, 8)
-BENCHMARK_POOL_ALLOC(4096, 17, 16)
-BENCHMARK_POOL_ALLOC(4096, 40, 64)
-BENCHMARK_POOL_ALLOC(4096, 77, 128)
+ }
+
+BENCHMARK_POOL_ALLOC(4096, 1, 1)
+BENCHMARK_POOL_ALLOC(4096, 2, 2)
+BENCHMARK_POOL_ALLOC(4096, 3, 4)
+BENCHMARK_POOL_ALLOC(4096, 7, 8)
+BENCHMARK_POOL_ALLOC(4096, 17, 16)
+BENCHMARK_POOL_ALLOC(4096, 40, 64)
+BENCHMARK_POOL_ALLOC(4096, 77, 128)
diff --git a/util/memory/benchmark/pool/metrics/main.py b/util/memory/benchmark/pool/metrics/main.py
index 1738ef91dc4..cc17b7b8887 100644
--- a/util/memory/benchmark/pool/metrics/main.py
+++ b/util/memory/benchmark/pool/metrics/main.py
@@ -1,5 +1,5 @@
-import yatest.common as yc
-
-
-def test_export_metrics(metrics):
+import yatest.common as yc
+
+
+def test_export_metrics(metrics):
metrics.set_benchmark(yc.execute_benchmark('util/memory/benchmark/pool/pool', threads=8))
diff --git a/util/memory/benchmark/pool/metrics/ya.make b/util/memory/benchmark/pool/metrics/ya.make
index 413ab1ecb28..c671bc5c1c9 100644
--- a/util/memory/benchmark/pool/metrics/ya.make
+++ b/util/memory/benchmark/pool/metrics/ya.make
@@ -1,18 +1,18 @@
OWNER(g:util)
SUBSCRIBER(g:util-subscribers)
-
+
PY2TEST()
-
+
SIZE(LARGE)
-
-TAG(
+
+TAG(
ya:force_sandbox
- sb:intel_e5_2660v1
+ sb:intel_e5_2660v1
ya:fat
-)
-
+)
+
TEST_SRCS(main.py)
-
+
DEPENDS(util/memory/benchmark/pool)
-
-END()
+
+END()
diff --git a/util/memory/benchmark/pool/ya.make b/util/memory/benchmark/pool/ya.make
index 1b48c71ae31..5f4f7d3f15e 100644
--- a/util/memory/benchmark/pool/ya.make
+++ b/util/memory/benchmark/pool/ya.make
@@ -1,10 +1,10 @@
OWNER(g:util)
SUBSCRIBER(g:util-subscribers)
-
+
Y_BENCHMARK()
-
-SRCS(
- main.cpp
-)
-
-END()
+
+SRCS(
+ main.cpp
+)
+
+END()
diff --git a/util/memory/benchmark/ya.make b/util/memory/benchmark/ya.make
index 0d6de5c96e0..2259b9434e1 100644
--- a/util/memory/benchmark/ya.make
+++ b/util/memory/benchmark/ya.make
@@ -1,7 +1,7 @@
OWNER(g:util)
SUBSCRIBER(g:util-subscribers)
-
-RECURSE(
- pool
+
+RECURSE(
+ pool
pool/metrics
-)
+)
diff --git a/util/memory/pool.cpp b/util/memory/pool.cpp
index 138e5afc8ad..9a011f0e4f6 100644
--- a/util/memory/pool.cpp
+++ b/util/memory/pool.cpp
@@ -17,12 +17,12 @@ void TMemoryPool::AddChunk(size_t hint) {
}
TBlock nb = Alloc_->Allocate(allocSize);
- // Add previous chunk's stats
- if (Current_ != &Empty_) {
- MemoryAllocatedBeforeCurrent_ += Current_->Used();
- MemoryWasteBeforeCurrent_ += Current_->Left();
- }
-
+ // Add previous chunk's stats
+ if (Current_ != &Empty_) {
+ MemoryAllocatedBeforeCurrent_ += Current_->Used();
+ MemoryWasteBeforeCurrent_ += Current_->Left();
+ }
+
BlockSize_ = GrowPolicy_->Next(dataLen);
Current_ = new (nb.Data) TChunk(nb.Len - sizeof(TChunk));
Chunks_.PushBack(Current_);
@@ -38,7 +38,7 @@ void TMemoryPool::DoClear(bool keepfirst) noexcept {
Current_ = c;
BlockSize_ = c->BlockLength() - sizeof(TChunk);
MemoryAllocatedBeforeCurrent_ = 0;
- MemoryWasteBeforeCurrent_ = 0;
+ MemoryWasteBeforeCurrent_ = 0;
return;
}
@@ -50,6 +50,6 @@ void TMemoryPool::DoClear(bool keepfirst) noexcept {
Current_ = &Empty_;
BlockSize_ = Origin_;
- MemoryAllocatedBeforeCurrent_ = 0;
- MemoryWasteBeforeCurrent_ = 0;
+ MemoryAllocatedBeforeCurrent_ = 0;
+ MemoryWasteBeforeCurrent_ = 0;
}
diff --git a/util/memory/pool.h b/util/memory/pool.h
index a50403749ae..13c8b6b9ede 100644
--- a/util/memory/pool.h
+++ b/util/memory/pool.h
@@ -138,8 +138,8 @@ public:
, Alloc_(alloc)
, Options_(options)
, Origin_(initial)
- , MemoryAllocatedBeforeCurrent_(0)
- , MemoryWasteBeforeCurrent_(0)
+ , MemoryAllocatedBeforeCurrent_(0)
+ , MemoryWasteBeforeCurrent_(0)
{
}
@@ -245,11 +245,11 @@ public:
}
inline size_t MemoryAllocated() const noexcept {
- return MemoryAllocatedBeforeCurrent_ + (Current_ != &Empty_ ? Current_->Used() : 0);
+ return MemoryAllocatedBeforeCurrent_ + (Current_ != &Empty_ ? Current_->Used() : 0);
}
inline size_t MemoryWaste() const noexcept {
- return MemoryWasteBeforeCurrent_ + (Current_ != &Empty_ ? Current_->Left() : 0);
+ return MemoryWasteBeforeCurrent_ + (Current_ != &Empty_ ? Current_->Left() : 0);
}
template <class TOp>
@@ -277,14 +277,14 @@ protected:
}
inline void* RawAllocate(size_t len, size_t align) {
- Y_ASSERT(align > 0);
+ Y_ASSERT(align > 0);
void* ret = Current_->Allocate(len, align);
if (ret) {
return ret;
}
- AddChunk(len + align - 1);
+ AddChunk(len + align - 1);
return Current_->Allocate(len, align);
}
@@ -302,8 +302,8 @@ private:
TOptions Options_;
TChunkList Chunks_;
const size_t Origin_;
- size_t MemoryAllocatedBeforeCurrent_;
- size_t MemoryWasteBeforeCurrent_;
+ size_t MemoryAllocatedBeforeCurrent_;
+ size_t MemoryWasteBeforeCurrent_;
};
template <typename TPool>
diff --git a/util/memory/pool_ut.cpp b/util/memory/pool_ut.cpp
index 3e963bb8ef0..1158a8ca423 100644
--- a/util/memory/pool_ut.cpp
+++ b/util/memory/pool_ut.cpp
@@ -126,12 +126,12 @@ private:
UNIT_ASSERT(m);
memset(m, 0, i);
}
-
- pool.Clear();
-
- UNIT_ASSERT_VALUES_EQUAL(0, pool.MemoryAllocated());
- UNIT_ASSERT_VALUES_EQUAL(0, pool.MemoryWaste());
- UNIT_ASSERT_VALUES_EQUAL(0, pool.Available());
+
+ pool.Clear();
+
+ UNIT_ASSERT_VALUES_EQUAL(0, pool.MemoryAllocated());
+ UNIT_ASSERT_VALUES_EQUAL(0, pool.MemoryWaste());
+ UNIT_ASSERT_VALUES_EQUAL(0, pool.Available());
}
alloc.CheckAtEnd();
@@ -180,14 +180,14 @@ private:
void* aligned8 = pool.Allocate(3, 8);
void* aligned1024 = pool.Allocate(3, 1024);
- UNIT_ASSERT_VALUES_UNEQUAL(aligned16, nullptr);
- UNIT_ASSERT_VALUES_UNEQUAL(aligned2, nullptr);
- UNIT_ASSERT_VALUES_UNEQUAL(aligned128, nullptr);
- UNIT_ASSERT_VALUES_UNEQUAL(aligned4, nullptr);
- UNIT_ASSERT_VALUES_UNEQUAL(aligned256, nullptr);
- UNIT_ASSERT_VALUES_UNEQUAL(aligned8, nullptr);
- UNIT_ASSERT_VALUES_UNEQUAL(aligned1024, nullptr);
-
+ UNIT_ASSERT_VALUES_UNEQUAL(aligned16, nullptr);
+ UNIT_ASSERT_VALUES_UNEQUAL(aligned2, nullptr);
+ UNIT_ASSERT_VALUES_UNEQUAL(aligned128, nullptr);
+ UNIT_ASSERT_VALUES_UNEQUAL(aligned4, nullptr);
+ UNIT_ASSERT_VALUES_UNEQUAL(aligned256, nullptr);
+ UNIT_ASSERT_VALUES_UNEQUAL(aligned8, nullptr);
+ UNIT_ASSERT_VALUES_UNEQUAL(aligned1024, nullptr);
+
UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(aligned2) & 1, 0);
UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(aligned4) & 3, 0);
UNIT_ASSERT_VALUES_EQUAL(reinterpret_cast<uintptr_t>(aligned8) & 7, 0);
diff --git a/util/stream/aligned.h b/util/stream/aligned.h
index 92fef15ea71..70e7be05a99 100644
--- a/util/stream/aligned.h
+++ b/util/stream/aligned.h
@@ -69,8 +69,8 @@ public:
size_t GetCurrentOffset() const {
return Position_;
- }
-
+ }
+
/**
* Ensures alignment of the position in the output stream by writing
* some data.
diff --git a/util/string/cast.cpp b/util/string/cast.cpp
index 8c40983c848..aa1e65a8e90 100644
--- a/util/string/cast.cpp
+++ b/util/string/cast.cpp
@@ -640,9 +640,9 @@ DEF_FLT_SPEC(long double)
#undef DEF_FLT_SPEC
-// Using StrToD for float and double because it is faster than sscanf.
+// Using StrToD for float and double because it is faster than sscanf.
// Exception-free, specialized for float types
-template <>
+template <>
bool TryFromStringImpl<double>(const char* data, size_t len, double& result) {
if (!len) {
return false;
@@ -680,19 +680,19 @@ bool TryFromStringImpl<long double>(const char* data, size_t len, long double& r
// Exception-throwing, specialized for float types
template <>
-double FromStringImpl<double>(const char* data, size_t len) {
+double FromStringImpl<double>(const char* data, size_t len) {
double d = 0.0;
if (!TryFromStringImpl(data, len, d)) {
ythrow TFromStringException() << TStringBuf("cannot parse float(") << TStringBuf(data, len) << TStringBuf(")");
- }
- return d;
-}
-
-template <>
-float FromStringImpl<float>(const char* data, size_t len) {
+ }
+ return d;
+}
+
+template <>
+float FromStringImpl<float>(const char* data, size_t len) {
return static_cast<float>(FromStringImpl<double>(data, len));
-}
-
+}
+
double StrToD(const char* b, const char* e, char** se) {
struct TCvt: public StringToDoubleConverter {
inline TCvt()
diff --git a/util/string/cast_ut.cpp b/util/string/cast_ut.cpp
index 7f54dcde320..033450c38c4 100644
--- a/util/string/cast_ut.cpp
+++ b/util/string/cast_ut.cpp
@@ -163,7 +163,7 @@ Y_UNIT_TEST_SUITE(TCastTest) {
f = FromString<TFloat>(str);
UNIT_ASSERT_DOUBLES_EQUAL(f, canonValue, eps);
}
-
+
template <class TFloat>
void BadFloatTester(const char* str) {
const double eps = 10E-5;
@@ -175,7 +175,7 @@ Y_UNIT_TEST_SUITE(TCastTest) {
UNIT_ASSERT_EXCEPTION(f = FromString<TFloat>(str), TFromStringException);
Y_UNUSED(f); // shut up compiler about 'assigned value that is not used'
}
-
+
Y_UNIT_TEST(TestToFrom) {
test1(bool, true);
test1(bool, false);
@@ -315,7 +315,7 @@ Y_UNIT_TEST_SUITE(TCastTest) {
BadFloatTester<long double>("10e 2");
BadFloatTester<long double>(""); // IGNIETFERRO-300
}
-
+
Y_UNIT_TEST(TestLiteral) {
UNIT_ASSERT_VALUES_EQUAL(ToString("abc"), TString("abc"));
}
diff --git a/util/system/align.h b/util/system/align.h
index 9d7ba39a369..ea0bbc5b466 100644
--- a/util/system/align.h
+++ b/util/system/align.h
@@ -15,9 +15,9 @@ static inline T AlignUp(T len, T align) noexcept {
const T alignedResult = AlignDown(len + (align - 1), align);
Y_ASSERT(alignedResult >= len); // check for overflow
return alignedResult;
-}
-
-template <class T>
+}
+
+template <class T>
static inline T AlignUpSpace(T len, T align) noexcept {
Y_ASSERT(IsPowerOf2(align)); // align should be power of 2
return ((T)0 - len) & (align - 1); // AlignUp(len, align) - len;
diff --git a/util/system/align_ut.cpp b/util/system/align_ut.cpp
index 6b24cbac521..3ba3a3442b9 100644
--- a/util/system/align_ut.cpp
+++ b/util/system/align_ut.cpp
@@ -1,13 +1,13 @@
-#include "align.h"
-
+#include "align.h"
+
#include <library/cpp/testing/unittest/registar.h>
-class TAlignTest: public TTestBase {
+class TAlignTest: public TTestBase {
UNIT_TEST_SUITE(TAlignTest);
UNIT_TEST(TestDown)
UNIT_TEST(TestUp)
UNIT_TEST_SUITE_END();
-
+
private:
inline void TestDown() {
UNIT_ASSERT(AlignDown(0, 4) == 0);
@@ -30,6 +30,6 @@ private:
UNIT_ASSERT(AlignUp(0, 8) == 0);
UNIT_ASSERT(AlignUp(1, 8) == 8);
}
-};
-
-UNIT_TEST_SUITE_REGISTRATION(TAlignTest);
+};
+
+UNIT_TEST_SUITE_REGISTRATION(TAlignTest);
diff --git a/util/system/context.h b/util/system/context.h
index 5f6ac024c63..d2a349bfc51 100644
--- a/util/system/context.h
+++ b/util/system/context.h
@@ -2,7 +2,7 @@
#include "align.h"
#include "defaults.h"
-#include "compiler.h"
+#include "compiler.h"
#include "sanitizers.h"
#include <util/generic/array_ref.h>
diff --git a/util/system/dynlib.cpp b/util/system/dynlib.cpp
index 8363db22dc4..9d2541c25f1 100644
--- a/util/system/dynlib.cpp
+++ b/util/system/dynlib.cpp
@@ -105,8 +105,8 @@ TDynamicLibrary::TDynamicLibrary(const TString& path, int flags) {
TDynamicLibrary::~TDynamicLibrary() = default;
-void TDynamicLibrary::Open(const char* path, int flags) {
- Impl_.Reset(TImpl::SafeCreate(path, flags));
+void TDynamicLibrary::Open(const char* path, int flags) {
+ Impl_.Reset(TImpl::SafeCreate(path, flags));
}
void TDynamicLibrary::Close() noexcept {
diff --git a/util/system/dynlib.h b/util/system/dynlib.h
index d2e426aebb2..66eaf4a5c1c 100644
--- a/util/system/dynlib.h
+++ b/util/system/dynlib.h
@@ -8,18 +8,18 @@
#define Y_GET_FUNC(dll, name) FUNC_##name((dll).Sym(#name))
#define Y_GET_FUNC_OPTIONAL(dll, name) FUNC_##name((dll).SymOptional(#name))
-#ifdef _win32_
+#ifdef _win32_
#define DEFAULT_DLLOPEN_FLAGS 0
-#else
+#else
#include <dlfcn.h>
-
+
#ifndef RTLD_GLOBAL
#define RTLD_GLOBAL (0)
#endif
-
+
#define DEFAULT_DLLOPEN_FLAGS (RTLD_NOW | RTLD_GLOBAL)
-#endif
-
+#endif
+
class TDynamicLibrary {
public:
TDynamicLibrary() noexcept;
diff --git a/util/system/sanitizers.h b/util/system/sanitizers.h
index c1896ca6378..965e5c751e4 100644
--- a/util/system/sanitizers.h
+++ b/util/system/sanitizers.h
@@ -60,9 +60,9 @@ namespace NSan {
return true;
#else
return false;
-#endif
+#endif
}
-
+
// Determines if tsan present
inline constexpr static bool TSanIsOn() noexcept {
#if defined(_tsan_enabled_)
diff --git a/util/system/thread.cpp b/util/system/thread.cpp
index 0cd8c078ea2..6236746c2d9 100644
--- a/util/system/thread.cpp
+++ b/util/system/thread.cpp
@@ -358,7 +358,7 @@ TThread::TId TThread::ImpossibleThreadId() noexcept {
namespace {
template <class T>
- static void* ThreadProcWrapper(void* param) {
+ static void* ThreadProcWrapper(void* param) {
return reinterpret_cast<T*>(param)->ThreadProc();
}
}
diff --git a/util/system/thread.h b/util/system/thread.h
index 612c25f86e3..a6e8abdb5be 100644
--- a/util/system/thread.h
+++ b/util/system/thread.h
@@ -163,7 +163,7 @@ public:
virtual ~ISimpleThread() = default;
- virtual void* ThreadProc() = 0;
+ virtual void* ThreadProc() = 0;
};
struct TCurrentThreadLimits {
diff --git a/util/thread/lfstack.h b/util/thread/lfstack.h
index 7a046e00b27..ca3d95f3c39 100644
--- a/util/thread/lfstack.h
+++ b/util/thread/lfstack.h
@@ -98,7 +98,7 @@ public:
bool Dequeue(T* res) {
AtomicAdd(DequeueCount, 1);
for (TNode* current = AtomicGet(Head); current; current = AtomicGet(Head)) {
- if (AtomicCas(&Head, AtomicGet(current->Next), current)) {
+ if (AtomicCas(&Head, AtomicGet(current->Next), current)) {
*res = std::move(current->Value);
// delete current; // ABA problem
// even more complex node deletion
@@ -109,7 +109,7 @@ public:
} else {
// Dequeue()s in progress, put node to free list
for (;;) {
- AtomicSet(current->Next, AtomicGet(FreePtr));
+ AtomicSet(current->Next, AtomicGet(FreePtr));
if (AtomicCas(&FreePtr, current, current->Next))
break;
}
@@ -145,7 +145,7 @@ public:
currentLast = currentLast->Next;
}
for (;;) {
- AtomicSet(currentLast->Next, AtomicGet(FreePtr));
+ AtomicSet(currentLast->Next, AtomicGet(FreePtr));
if (AtomicCas(&FreePtr, current, currentLast->Next))
break;
}
diff --git a/util/thread/lfstack_ut.cpp b/util/thread/lfstack_ut.cpp
index 38371bfc716..e20a838f95b 100644
--- a/util/thread/lfstack_ut.cpp
+++ b/util/thread/lfstack_ut.cpp
@@ -1,7 +1,7 @@
#include <util/system/atomic.h>
#include <util/system/event.h>
-#include <util/generic/deque.h>
+#include <util/generic/deque.h>
#include <library/cpp/threading/future/legacy_future.h>
#include <library/cpp/testing/unittest/registar.h>
@@ -189,7 +189,7 @@ Y_UNIT_TEST_SUITE(TLockFreeStackTests) {
UNIT_ASSERT_VALUES_EQUAL(1, p.RefCount());
}
-
+
Y_UNIT_TEST(NoCopyTest) {
static unsigned copied = 0;
struct TCopyCount {
@@ -232,53 +232,53 @@ Y_UNIT_TEST_SUITE(TLockFreeStackTests) {
struct TMultiThreadTester {
using ThisType = TMultiThreadTester<TTest>;
- size_t Threads;
- size_t OperationsPerThread;
-
- TCountDownLatch StartLatch;
+ size_t Threads;
+ size_t OperationsPerThread;
+
+ TCountDownLatch StartLatch;
TLockFreeStack<typename TTest::ValueType> Stack;
-
+
TMultiThreadTester()
- : Threads(10)
- , OperationsPerThread(100000)
- , StartLatch(Threads)
- {
- }
-
- void Worker() {
- StartLatch.CountDown();
- StartLatch.Await();
-
+ : Threads(10)
+ , OperationsPerThread(100000)
+ , StartLatch(Threads)
+ {
+ }
+
+ void Worker() {
+ StartLatch.CountDown();
+ StartLatch.Await();
+
TVector<typename TTest::ValueType> unused;
- for (size_t i = 0; i < OperationsPerThread; ++i) {
- switch (GetCycleCount() % 4) {
- case 0: {
+ for (size_t i = 0; i < OperationsPerThread; ++i) {
+ switch (GetCycleCount() % 4) {
+ case 0: {
TTest::Enqueue(Stack, i);
- break;
- }
- case 1: {
+ break;
+ }
+ case 1: {
TTest::Dequeue(Stack);
- break;
- }
- case 2: {
+ break;
+ }
+ case 2: {
TTest::EnqueueAll(Stack);
- break;
- }
- case 3: {
+ break;
+ }
+ case 3: {
TTest::DequeueAll(Stack);
- break;
- }
- }
- }
- }
-
- void Run() {
- TDeque<NThreading::TLegacyFuture<>> futures;
-
- for (size_t i = 0; i < Threads; ++i) {
+ break;
+ }
+ }
+ }
+ }
+
+ void Run() {
+ TDeque<NThreading::TLegacyFuture<>> futures;
+
+ for (size_t i = 0; i < Threads; ++i) {
futures.emplace_back(std::bind(&ThisType::Worker, this));
- }
- futures.clear();
+ }
+ futures.clear();
TTest::DequeueAll(Stack);
}
};
@@ -288,7 +288,7 @@ Y_UNIT_TEST_SUITE(TLockFreeStackTests) {
static void Enqueue(TLockFreeStack<int>& stack, size_t i) {
stack.Enqueue(static_cast<int>(i));
- }
+ }
static void Dequeue(TLockFreeStack<int>& stack) {
int value;
@@ -304,12 +304,12 @@ Y_UNIT_TEST_SUITE(TLockFreeStackTests) {
TVector<int> value;
stack.DequeueAll(&value);
}
- };
-
- // Test for catching thread sanitizer problems
+ };
+
+ // Test for catching thread sanitizer problems
Y_UNIT_TEST(TestFreeList) {
TMultiThreadTester<TFreeListTest>().Run();
- }
+ }
struct TMoveTest {
using ValueType = THolder<int>;
diff --git a/ydb/core/actorlib_impl/async_destroyer.h b/ydb/core/actorlib_impl/async_destroyer.h
index 98153a58d06..9d0d6f4c4ca 100644
--- a/ydb/core/actorlib_impl/async_destroyer.h
+++ b/ydb/core/actorlib_impl/async_destroyer.h
@@ -1,35 +1,35 @@
-#pragma once
+#pragma once
#include <ydb/core/protos/services.pb.h>
#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <util/generic/ptr.h>
-
-namespace NKikimr {
-
-// Helps to destroy heavy objects
-template <class TVictim>
-class TAsyncDestroyer : public NActors::TActorBootstrapped<TAsyncDestroyer<TVictim>> {
-public:
+#include <util/generic/ptr.h>
+
+namespace NKikimr {
+
+// Helps to destroy heavy objects
+template <class TVictim>
+class TAsyncDestroyer : public NActors::TActorBootstrapped<TAsyncDestroyer<TVictim>> {
+public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::ASYNC_DESTROYER;
- }
-
- explicit TAsyncDestroyer(TAutoPtr<TVictim> victim)
- : Victim(victim)
- {}
-
- void Bootstrap(const NActors::TActorContext &ctx) {
- Victim.Destroy();
- this->Die(ctx);
- }
-
-private:
- TAutoPtr<TVictim> Victim;
-};
-
-template <class TVictim>
-void AsyncDestroy(TAutoPtr<TVictim> victim, const NActors::TActorContext &ctx, ui32 poolId = Max<ui32>()) {
- if (victim)
+ }
+
+ explicit TAsyncDestroyer(TAutoPtr<TVictim> victim)
+ : Victim(victim)
+ {}
+
+ void Bootstrap(const NActors::TActorContext &ctx) {
+ Victim.Destroy();
+ this->Die(ctx);
+ }
+
+private:
+ TAutoPtr<TVictim> Victim;
+};
+
+template <class TVictim>
+void AsyncDestroy(TAutoPtr<TVictim> victim, const NActors::TActorContext &ctx, ui32 poolId = Max<ui32>()) {
+ if (victim)
ctx.Register(new TAsyncDestroyer<TVictim>(victim), NActors::TMailboxType::HTSwap, poolId);
-}
-
-}
+}
+
+}
diff --git a/ydb/core/actorlib_impl/load_network.cpp b/ydb/core/actorlib_impl/load_network.cpp
index 6a170d7ce20..ae6f1b248a3 100644
--- a/ydb/core/actorlib_impl/load_network.cpp
+++ b/ydb/core/actorlib_impl/load_network.cpp
@@ -18,8 +18,8 @@ class TLoadNetwork: public NActors::TActorBootstrapped<TLoadNetwork> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::TEST_ACTOR_RUNTIME;
- }
-
+ }
+
TLoadNetwork(ui32 selfNodeId, ui32 totalNodesCount)
: SelfNodeId(selfNodeId)
, TotalNodesCount(totalNodesCount)
diff --git a/ydb/core/actorlib_impl/ya.make b/ydb/core/actorlib_impl/ya.make
index 2a0dcd0befd..ba63f39617f 100644
--- a/ydb/core/actorlib_impl/ya.make
+++ b/ydb/core/actorlib_impl/ya.make
@@ -9,7 +9,7 @@ OWNER(
SRCS(
actor_tracker.cpp
actor_tracker.h
- async_destroyer.h
+ async_destroyer.h
connect_socket_protocol.cpp
connect_socket_protocol.h
defs.h
diff --git a/ydb/core/base/appdata.cpp b/ydb/core/base/appdata.cpp
index c2884f777d8..f9e517fc424 100644
--- a/ydb/core/base/appdata.cpp
+++ b/ydb/core/base/appdata.cpp
@@ -8,7 +8,7 @@ TAppData::TAppData(
TMap<TString, ui32> servicePools,
const NScheme::TTypeRegistry* typeRegistry,
const NMiniKQL::IFunctionRegistry* functionRegistry,
- const TFormatFactory* formatFactory,
+ const TFormatFactory* formatFactory,
TProgramShouldContinue *kikimrShouldContinue)
: Magic(MagicTag)
, SystemPoolId(sysPoolId)
@@ -18,7 +18,7 @@ TAppData::TAppData(
, ServicePools(servicePools)
, TypeRegistry(typeRegistry)
, FunctionRegistry(functionRegistry)
- , FormatFactory(formatFactory)
+ , FormatFactory(formatFactory)
, ProxySchemeCacheNodes(Max<ui64>() / 4)
, ProxySchemeCacheDistrNodes(Max<ui64>() / 4)
, CompilerSchemeCachePaths(Max<ui64>() / 4)
@@ -26,7 +26,7 @@ TAppData::TAppData(
, Mon(nullptr)
, BusMonPage(nullptr)
, Icb(new TControlBoard())
- , InFlightLimiterRegistry(new NGRpcService::TInFlightLimiterRegistry(Icb))
+ , InFlightLimiterRegistry(new NGRpcService::TInFlightLimiterRegistry(Icb))
, StaticBlobStorageConfig(new NKikimrBlobStorage::TNodeWardenServiceSet)
, KikimrShouldContinue(kikimrShouldContinue)
{}
diff --git a/ydb/core/base/appdata.h b/ydb/core/base/appdata.h
index bfc48fe2e15..c666f7468c0 100644
--- a/ydb/core/base/appdata.h
+++ b/ydb/core/base/appdata.h
@@ -61,8 +61,8 @@ namespace NPQ {
class IPersQueueMirrorReaderFactory;
}
-class TFormatFactory;
-
+class TFormatFactory;
+
struct TAppData {
static const ui32 MagicTag = 0x2991AAF8;
const ui32 Magic;
@@ -125,7 +125,7 @@ struct TAppData {
NMonitoring::TDynamicCounterPtr Counters;
NMonitoring::TBusNgMonPage* BusMonPage;
TIntrusivePtr<NKikimr::TControlBoard> Icb;
- TIntrusivePtr<NGRpcService::TInFlightLimiterRegistry> InFlightLimiterRegistry;
+ TIntrusivePtr<NGRpcService::TInFlightLimiterRegistry> InFlightLimiterRegistry;
TIntrusivePtr<NInterconnect::TPollerThreads> PollerThreads;
@@ -176,7 +176,7 @@ struct TAppData {
TMap<TString, ui32> servicePools,
const NScheme::TTypeRegistry* typeRegistry,
const NMiniKQL::IFunctionRegistry* functionRegistry,
- const TFormatFactory* formatFactory,
+ const TFormatFactory* formatFactory,
TProgramShouldContinue *kikimrShouldContinue);
};
diff --git a/ydb/core/base/blobstorage.h b/ydb/core/base/blobstorage.h
index d8d49885e63..a2faee326e5 100644
--- a/ydb/core/base/blobstorage.h
+++ b/ydb/core/base/blobstorage.h
@@ -1041,7 +1041,7 @@ struct TEvBlobStorage {
, ReportDetailedPartMap(reportDetailedPartMap)
{
Y_VERIFY(QuerySize > 0, "can't execute empty get queries");
- VerifySameTabletId();
+ VerifySameTabletId();
}
TEvGet(const TLogoBlobID &id, ui32 shift, ui32 size, TInstant deadline,
@@ -1094,7 +1094,7 @@ struct TEvBlobStorage {
TString ToString() const {
return Print(false);
}
-
+
ui32 CalculateSize() const {
return sizeof(*this) + QuerySize * sizeof(TQuery);
}
@@ -1102,14 +1102,14 @@ struct TEvBlobStorage {
std::unique_ptr<TEvGetResult> MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason,
ui32 groupId);
- private:
- void VerifySameTabletId() const {
- for (ui32 i = 1; i < QuerySize; ++i) {
- Y_VERIFY(Queries[i].Id.TabletID() == Queries[0].Id.TabletID(),
- "Trying to request blobs for different tablets in one request: %" PRIu64 ", %" PRIu64,
- Queries[0].Id.TabletID(), Queries[i].Id.TabletID());
- }
- }
+ private:
+ void VerifySameTabletId() const {
+ for (ui32 i = 1; i < QuerySize; ++i) {
+ Y_VERIFY(Queries[i].Id.TabletID() == Queries[0].Id.TabletID(),
+ "Trying to request blobs for different tablets in one request: %" PRIu64 ", %" PRIu64,
+ Queries[0].Id.TabletID(), Queries[i].Id.TabletID());
+ }
+ }
};
struct TEvGetResult : public TEventLocal<TEvGetResult, EvGetResult> {
diff --git a/ydb/core/base/board_lookup.cpp b/ydb/core/base/board_lookup.cpp
index 05e1f0e416a..890b88f6b9e 100644
--- a/ydb/core/base/board_lookup.cpp
+++ b/ydb/core/base/board_lookup.cpp
@@ -168,8 +168,8 @@ class TBoardLookupActor : public TActorBootstrapped<TBoardLookupActor> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::BOARD_LOOKUP_ACTOR;
- }
-
+ }
+
TBoardLookupActor(const TString &path, TActorId owner, EBoardLookupMode mode, ui32 groupId)
: Path(path)
, Owner(owner)
diff --git a/ydb/core/base/board_publish.cpp b/ydb/core/base/board_publish.cpp
index 76d0aeb27c6..cdd4bf6e489 100644
--- a/ydb/core/base/board_publish.cpp
+++ b/ydb/core/base/board_publish.cpp
@@ -49,8 +49,8 @@ class TBoardReplicaPublishActor : public TActorBootstrapped<TBoardReplicaPublish
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::BOARD_REPLICA_PUBLISH_ACTOR;
- }
-
+ }
+
TBoardReplicaPublishActor(const TString &path, const TString &payload, TActorId replica, TActorId publishActor)
: Path(path)
, Payload(payload)
@@ -148,8 +148,8 @@ class TBoardPublishActor : public TActorBootstrapped<TBoardPublishActor> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::BOARD_PUBLISH_ACTOR;
- }
-
+ }
+
TBoardPublishActor(const TString &path, const TString &payload, const TActorId &owner, ui32 groupId, ui32 ttlMs, bool reg)
: Path(path)
, Payload(payload)
diff --git a/ydb/core/base/counters.cpp b/ydb/core/base/counters.cpp
index 45dd0094578..b192fa36a1d 100644
--- a/ydb/core/base/counters.cpp
+++ b/ydb/core/base/counters.cpp
@@ -141,10 +141,10 @@ TIntrusivePtr<TDynamicCounters> GetServiceCounters(TIntrusivePtr<TDynamicCounter
res = SkipLabels(res, SERVICE_COUNTERS_EXTRA_LABELS);
- auto utils = root->GetSubgroup("counters", "utils");
+ auto utils = root->GetSubgroup("counters", "utils");
utils = SkipLabels(utils, SERVICE_COUNTERS_EXTRA_LABELS);
- auto lookupCounter = utils->GetSubgroup("component", service)->GetCounter("CounterLookups", true);
- res->SetLookupCounter(lookupCounter);
+ auto lookupCounter = utils->GetSubgroup("component", service)->GetCounter("CounterLookups", true);
+ res->SetLookupCounter(lookupCounter);
res->SetOnLookup(OnCounterLookup);
return res;
diff --git a/ydb/core/base/domain.h b/ydb/core/base/domain.h
index 34a6fa4d25e..05346e474aa 100644
--- a/ydb/core/base/domain.h
+++ b/ydb/core/base/domain.h
@@ -1,13 +1,13 @@
#pragma once
#include "defs.h"
#include "tabletid.h"
-#include "localdb.h"
+#include "localdb.h"
#include <ydb/core/protos/blobstorage_config.pb.h>
#include <util/generic/map.h>
-#include <util/generic/hash.h>
-#include <util/generic/ptr.h>
+#include <util/generic/hash.h>
+#include <util/generic/ptr.h>
namespace NKikimr {
@@ -60,16 +60,16 @@ struct TDomainsInfo : public TThrRefBase {
return MakeTabletID(domain, 0, uniqPart);
}
- static constexpr const char* SystemTableDefaultPoicyName() {
- return "SystemTableDefault";
- }
-
- static constexpr const char* UserTableDefaultPoicyName() {
- return "UserTableDefault";
- }
-
+ static constexpr const char* SystemTableDefaultPoicyName() {
+ return "SystemTableDefault";
+ }
+
+ static constexpr const char* UserTableDefaultPoicyName() {
+ return "UserTableDefault";
+ }
+
typedef THashMap<TString, TIntrusiveConstPtr<NLocalDb::TCompactionPolicy>> TNamedCompactionPolicies;
-
+
struct TDomain : public TThrRefBase {
using TPtr = TIntrusivePtr<TDomain>;
@@ -236,30 +236,30 @@ struct TDomainsInfo : public TThrRefBase {
TMap<ui32, TIntrusivePtr<TDomain>> DomainByStateStorageGroup;
TMap<ui32, TIntrusivePtr<TDomain>> DomainByHiveUid;
TMap<ui32, ui64> HivesByHiveUid;
- TNamedCompactionPolicies NamedCompactionPolicies;
+ TNamedCompactionPolicies NamedCompactionPolicies;
- TDomainsInfo() {
- // Add default configs. They can be overriden by user
+ TDomainsInfo() {
+ // Add default configs. They can be overriden by user
NamedCompactionPolicies[SystemTableDefaultPoicyName()] = NLocalDb::CreateDefaultTablePolicy();
- NamedCompactionPolicies[UserTableDefaultPoicyName()] = NLocalDb::CreateDefaultUserTablePolicy();
- }
-
- TIntrusiveConstPtr<NLocalDb::TCompactionPolicy> GetDefaultSystemTablePolicy() const {
- return *NamedCompactionPolicies.FindPtr(SystemTableDefaultPoicyName());
- }
-
- TIntrusiveConstPtr<NLocalDb::TCompactionPolicy> GetDefaultUserTablePolicy() const {
- return *NamedCompactionPolicies.FindPtr(UserTableDefaultPoicyName());
- }
-
+ NamedCompactionPolicies[UserTableDefaultPoicyName()] = NLocalDb::CreateDefaultUserTablePolicy();
+ }
+
+ TIntrusiveConstPtr<NLocalDb::TCompactionPolicy> GetDefaultSystemTablePolicy() const {
+ return *NamedCompactionPolicies.FindPtr(SystemTableDefaultPoicyName());
+ }
+
+ TIntrusiveConstPtr<NLocalDb::TCompactionPolicy> GetDefaultUserTablePolicy() const {
+ return *NamedCompactionPolicies.FindPtr(UserTableDefaultPoicyName());
+ }
+
void AddCompactionPolicy(TString name, TIntrusiveConstPtr<NLocalDb::TCompactionPolicy> policy) {
- NamedCompactionPolicies[name] = policy;
- }
-
+ NamedCompactionPolicies[name] = policy;
+ }
+
void AddDomain(TDomain *domain) {
Y_VERIFY(domain->DomainUid <= MaxDomainId);
Domains[domain->DomainUid] = domain;
- DomainByName[domain->Name] = domain;
+ DomainByName[domain->Name] = domain;
Y_VERIFY(Domains.size() == DomainByName.size());
for (auto group: domain->StateStorageGroups) {
DomainByStateStorageGroup[group] = domain;
diff --git a/ydb/core/base/events.h b/ydb/core/base/events.h
index 789088ba556..f5fedfe19b2 100644
--- a/ydb/core/base/events.h
+++ b/ydb/core/base/events.h
@@ -59,7 +59,7 @@ struct TKikimrEvents : TEvents {
ES_NODE_WHITEBOARD,
ES_FLAT_TX_SCHEMESHARD, // 4137
ES_PQ,
- ES_YQL_KIKIMR_PROXY,
+ ES_YQL_KIKIMR_PROXY,
ES_PQ_META_CACHE,
ES_DEPRECATED_4141,
ES_PQ_L2_CACHE, //4142
@@ -97,7 +97,7 @@ struct TKikimrEvents : TEvents {
ES_CONFIGS_DISPATCHER,
ES_IAM_SERVICE,
ES_FOLDER_SERVICE,
- ES_GRPC_MON,
+ ES_GRPC_MON,
ES_QUOTA,
ES_COORDINATED_QUOTA,
ES_ACCESS_SERVICE,
@@ -127,7 +127,7 @@ struct TKikimrEvents : TEvents {
ES_CROSSREF,
ES_SCHEME_BOARD_MON,
ES_YQL_ANALYTICS_PROXY = NYq::TEventIds::ES_YQL_ANALYTICS_PROXY,
- ES_BLOB_CACHE,
+ ES_BLOB_CACHE,
ES_LONG_TX_SERVICE,
ES_TEST_SHARD,
ES_DATASTREAMS_PROXY,
diff --git a/ydb/core/base/localdb.cpp b/ydb/core/base/localdb.cpp
index 9f67978f83d..d07ee26fc1f 100644
--- a/ydb/core/base/localdb.cpp
+++ b/ydb/core/base/localdb.cpp
@@ -1,10 +1,10 @@
-#include "localdb.h"
+#include "localdb.h"
#include "compile_time_flags.h"
#include <ydb/core/protos/resource_broker.pb.h>
namespace NKikimr {
-namespace NLocalDb {
+namespace NLocalDb {
TCompactionPolicy::TBackgroundPolicy::TBackgroundPolicy()
: Threshold(101)
@@ -100,12 +100,12 @@ TCompactionPolicy::TCompactionPolicy()
: InMemSizeToSnapshot(4 * 1024 * 1024)
, InMemStepsToSnapshot(300)
, InMemForceStepsToSnapshot(500)
- , InMemForceSizeToSnapshot(16 * 1024 * 1024)
+ , InMemForceSizeToSnapshot(16 * 1024 * 1024)
, InMemCompactionBrokerQueue(0)
, InMemResourceBrokerTask(LegacyQueueIdToTaskName(0))
- , ReadAheadHiThreshold(64 * 1024 * 1024)
- , ReadAheadLoThreshold(16 * 1024 * 1024)
- , MinDataPageSize(7*1024)
+ , ReadAheadHiThreshold(64 * 1024 * 1024)
+ , ReadAheadLoThreshold(16 * 1024 * 1024)
+ , MinDataPageSize(7*1024)
, SnapshotCompactionBrokerQueue(0)
, SnapshotResourceBrokerTask(LegacyQueueIdToTaskName(0))
, BackupCompactionBrokerQueue(1)
@@ -117,8 +117,8 @@ TCompactionPolicy::TCompactionPolicy()
, DroppedRowsPercentToCompact(50)
, CompactionStrategy(NKikimrSchemeOp::CompactionStrategyUnset)
, KeepEraseMarkers(false)
-{}
-
+{}
+
TCompactionPolicy::TCompactionPolicy(const NKikimrSchemeOp::TCompactionPolicy& policyPb)
: InMemSizeToSnapshot(policyPb.HasInMemSizeToSnapshot() ? policyPb.GetInMemSizeToSnapshot() : 4 * 1024 * 1024)
, InMemStepsToSnapshot(policyPb.HasInMemStepsToSnapshot() ? policyPb.GetInMemStepsToSnapshot() : 300)
@@ -147,27 +147,27 @@ TCompactionPolicy::TCompactionPolicy(const NKikimrSchemeOp::TCompactionPolicy& p
SnapshotResourceBrokerTask = LegacyQueueIdToTaskName(SnapshotCompactionBrokerQueue);
if (!BackupResourceBrokerTask || IsLegacyQueueIdTaskName(BackupResourceBrokerTask))
BackupResourceBrokerTask = ScanTaskName;
- Generations.reserve(policyPb.GenerationSize());
- for (ui32 i = 0; i < policyPb.GenerationSize(); ++i) {
- const auto& g = policyPb.GetGeneration(i);
+ Generations.reserve(policyPb.GenerationSize());
+ for (ui32 i = 0; i < policyPb.GenerationSize(); ++i) {
+ const auto& g = policyPb.GetGeneration(i);
Y_VERIFY_DEBUG(g.GetGenerationId() == i);
Generations.emplace_back(g);
- }
+ }
if (policyPb.HasShardPolicy()) {
ShardPolicy.CopyFrom(policyPb.GetShardPolicy());
}
}
void TCompactionPolicy::Serialize(NKikimrSchemeOp::TCompactionPolicy& policyPb) const {
- policyPb.SetInMemSizeToSnapshot(InMemSizeToSnapshot);
- policyPb.SetInMemStepsToSnapshot(InMemStepsToSnapshot);
- policyPb.SetInMemForceStepsToSnapshot(InMemForceStepsToSnapshot);
- policyPb.SetInMemForceSizeToSnapshot(InMemForceSizeToSnapshot);
+ policyPb.SetInMemSizeToSnapshot(InMemSizeToSnapshot);
+ policyPb.SetInMemStepsToSnapshot(InMemStepsToSnapshot);
+ policyPb.SetInMemForceStepsToSnapshot(InMemForceStepsToSnapshot);
+ policyPb.SetInMemForceSizeToSnapshot(InMemForceSizeToSnapshot);
policyPb.SetInMemCompactionBrokerQueue(InMemCompactionBrokerQueue);
policyPb.SetInMemResourceBrokerTask(InMemResourceBrokerTask);
- policyPb.SetReadAheadHiThreshold(ReadAheadHiThreshold);
- policyPb.SetReadAheadLoThreshold(ReadAheadLoThreshold);
- policyPb.SetMinDataPageSize(MinDataPageSize);
+ policyPb.SetReadAheadHiThreshold(ReadAheadHiThreshold);
+ policyPb.SetReadAheadLoThreshold(ReadAheadLoThreshold);
+ policyPb.SetMinDataPageSize(MinDataPageSize);
policyPb.SetSnapBrokerQueue(SnapshotCompactionBrokerQueue);
policyPb.SetSnapshotResourceBrokerTask(SnapshotResourceBrokerTask);
policyPb.SetBackupBrokerQueue(BackupCompactionBrokerQueue);
@@ -187,13 +187,13 @@ void TCompactionPolicy::Serialize(NKikimrSchemeOp::TCompactionPolicy& policyPb)
policyPb.MutableShardPolicy()->CopyFrom(ShardPolicy);
}
- for (ui32 i = 0; i < Generations.size(); ++i) {
+ for (ui32 i = 0; i < Generations.size(); ++i) {
auto &g = *policyPb.AddGeneration();
- g.SetGenerationId(i);
+ g.SetGenerationId(i);
Generations[i].Serialize(g);
- }
-}
-
+ }
+}
+
TCompactionPolicyPtr CreateDefaultTablePolicy() {
TCompactionPolicyPtr policy = new TCompactionPolicy;
#if KIKIMR_DEFAULT_SHARDED_COMPACTION
@@ -203,9 +203,9 @@ TCompactionPolicyPtr CreateDefaultTablePolicy() {
return policy;
}
-TCompactionPolicyPtr CreateDefaultUserTablePolicy() {
- TCompactionPolicyPtr userPolicy = new TCompactionPolicy();
- userPolicy->Generations.reserve(3);
+TCompactionPolicyPtr CreateDefaultUserTablePolicy() {
+ TCompactionPolicyPtr userPolicy = new TCompactionPolicy();
+ userPolicy->Generations.reserve(3);
userPolicy->Generations.push_back({0, 8, 8, 128 * 1024 * 1024,
LegacyQueueIdToTaskName(1), true});
userPolicy->Generations.push_back({40 * 1024 * 1024, 5, 16, 512 * 1024 * 1024,
@@ -215,18 +215,18 @@ TCompactionPolicyPtr CreateDefaultUserTablePolicy() {
#if KIKIMR_DEFAULT_SHARDED_COMPACTION
userPolicy->CompactionStrategy = NKikimrSchemeOp::CompactionStrategySharded;
#endif
- return userPolicy;
+ return userPolicy;
}
bool ValidateCompactionPolicyChange(const TCompactionPolicy& oldPolicy, const TCompactionPolicy& newPolicy, TString& err) {
- if (newPolicy.Generations.size() < oldPolicy.Generations.size()) {
+ if (newPolicy.Generations.size() < oldPolicy.Generations.size()) {
err = Sprintf("Decreasing number of levels in compaction policy in not supported, old level count %u, new level count %u",
- (ui32)oldPolicy.Generations.size(), (ui32)newPolicy.Generations.size());
- return false;
- }
- return true;
-}
-
+ (ui32)oldPolicy.Generations.size(), (ui32)newPolicy.Generations.size());
+ return false;
+ }
+ return true;
+}
+
TString LegacyQueueIdToTaskName(ui32 id)
{
switch (id) {
@@ -264,4 +264,4 @@ const TString KqpResourceManagerTaskName = "kqp_query";
const TString KqpResourceManagerQueue = "queue_kqp_resource_manager";
const TString LegacyQueueIdTaskNamePrefix = "compaction_gen";
-}}
+}}
diff --git a/ydb/core/base/localdb.h b/ydb/core/base/localdb.h
index 9cea4d7e135..8679cb286fc 100644
--- a/ydb/core/base/localdb.h
+++ b/ydb/core/base/localdb.h
@@ -8,7 +8,7 @@
#include <google/protobuf/util/message_differencer.h>
namespace NKikimr {
-namespace NLocalDb {
+namespace NLocalDb {
struct TCompactionPolicy : public TThrRefBase {
struct TBackgroundPolicy {
@@ -77,9 +77,9 @@ struct TCompactionPolicy : public TThrRefBase {
ui64 InMemForceSizeToSnapshot;
ui32 InMemCompactionBrokerQueue; // TODO: remove deprecated field
TString InMemResourceBrokerTask;
- ui64 ReadAheadHiThreshold;
- ui64 ReadAheadLoThreshold;
- ui32 MinDataPageSize;
+ ui64 ReadAheadHiThreshold;
+ ui64 ReadAheadLoThreshold;
+ ui32 MinDataPageSize;
ui32 SnapshotCompactionBrokerQueue; // TODO: remove deprecated field
TString SnapshotResourceBrokerTask;
ui32 BackupCompactionBrokerQueue; // TODO: remove deprecated field
@@ -97,7 +97,7 @@ struct TCompactionPolicy : public TThrRefBase {
TCompactionPolicy();
explicit TCompactionPolicy(const NKikimrSchemeOp::TCompactionPolicy& policyPb);
-
+
void Serialize(NKikimrSchemeOp::TCompactionPolicy& policyPb) const;
bool operator ==(const TCompactionPolicy& p) const {
@@ -107,9 +107,9 @@ struct TCompactionPolicy : public TThrRefBase {
&& InMemForceSizeToSnapshot == p.InMemForceSizeToSnapshot
&& InMemCompactionBrokerQueue == p.InMemCompactionBrokerQueue
&& InMemResourceBrokerTask == p.InMemResourceBrokerTask
- && ReadAheadHiThreshold == p.ReadAheadHiThreshold
- && ReadAheadLoThreshold == p.ReadAheadLoThreshold
- && MinDataPageSize == p.MinDataPageSize
+ && ReadAheadHiThreshold == p.ReadAheadHiThreshold
+ && ReadAheadLoThreshold == p.ReadAheadLoThreshold
+ && MinDataPageSize == p.MinDataPageSize
&& Generations == p.Generations
&& SnapshotCompactionBrokerQueue == p.SnapshotCompactionBrokerQueue
&& SnapshotResourceBrokerTask == p.SnapshotResourceBrokerTask
@@ -126,13 +126,13 @@ struct TCompactionPolicy : public TThrRefBase {
}
};
-typedef TIntrusivePtr<TCompactionPolicy> TCompactionPolicyPtr;
+typedef TIntrusivePtr<TCompactionPolicy> TCompactionPolicyPtr;
TCompactionPolicyPtr CreateDefaultTablePolicy();
-TCompactionPolicyPtr CreateDefaultUserTablePolicy();
+TCompactionPolicyPtr CreateDefaultUserTablePolicy();
bool ValidateCompactionPolicyChange(const TCompactionPolicy& oldPolicy, const TCompactionPolicy& newPolicy, TString& err);
-
+
// Get Resource Broker task type name by Compaction Broker queue ID.
TString LegacyQueueIdToTaskName(ui32 id);
diff --git a/ydb/core/base/pool_stats_collector.cpp b/ydb/core/base/pool_stats_collector.cpp
index 71edb7cc7d2..7dcbf28954d 100644
--- a/ydb/core/base/pool_stats_collector.cpp
+++ b/ydb/core/base/pool_stats_collector.cpp
@@ -10,8 +10,8 @@
#include <library/cpp/actors/helpers/pool_stats_collector.h>
namespace NKikimr {
-
-// Periodically collects stats from executor threads and exposes them as mon counters
+
+// Periodically collects stats from executor threads and exposes them as mon counters
class TStatsCollectingActor : public NActors::TStatsCollectingActor {
public:
TStatsCollectingActor(
@@ -23,44 +23,44 @@ public:
MiniKQLPoolStats.Init(Counters.Get());
}
-private:
- class TMiniKQLPoolStats {
- public:
- void Init(NMonitoring::TDynamicCounters* group) {
- CounterGroup = group->GetSubgroup("subsystem", "mkqlalloc");
- TotalBytes = CounterGroup->GetCounter("GlobalPoolTotalBytes", false);
- }
-
- void Update() {
- *TotalBytes = TAlignedPagePool::GetGlobalPagePoolSize();
- }
-
- private:
- TIntrusivePtr<NMonitoring::TDynamicCounters> CounterGroup;
- NMonitoring::TDynamicCounters::TCounterPtr TotalBytes;
- };
-
+private:
+ class TMiniKQLPoolStats {
+ public:
+ void Init(NMonitoring::TDynamicCounters* group) {
+ CounterGroup = group->GetSubgroup("subsystem", "mkqlalloc");
+ TotalBytes = CounterGroup->GetCounter("GlobalPoolTotalBytes", false);
+ }
+
+ void Update() {
+ *TotalBytes = TAlignedPagePool::GetGlobalPagePoolSize();
+ }
+
+ private:
+ TIntrusivePtr<NMonitoring::TDynamicCounters> CounterGroup;
+ NMonitoring::TDynamicCounters::TCounterPtr TotalBytes;
+ };
+
void OnWakeup(const TActorContext &ctx) override {
- MiniKQLPoolStats.Update();
-
+ MiniKQLPoolStats.Update();
+
TVector<std::tuple<TString, double, ui32>> pools;
for (const auto& pool : PoolCounters) {
pools.emplace_back(pool.Name, pool.Usage, pool.Threads);
}
ctx.Send(NNodeWhiteboard::MakeNodeWhiteboardServiceId(ctx.SelfID.NodeId()), new NNodeWhiteboard::TEvWhiteboard::TEvSystemStateUpdate(pools));
- }
-
-private:
- TMiniKQLPoolStats MiniKQLPoolStats;
-};
-
-
-IActor *CreateStatsCollector(ui32 intervalSec,
- const TActorSystemSetup& setup,
+ }
+
+private:
+ TMiniKQLPoolStats MiniKQLPoolStats;
+};
+
+
+IActor *CreateStatsCollector(ui32 intervalSec,
+ const TActorSystemSetup& setup,
NMonitoring::TDynamicCounterPtr counters)
-{
+{
return new TStatsCollectingActor(intervalSec, setup, counters);
-}
-
+}
+
} // namespace NKikimr
diff --git a/ydb/core/base/tablet.h b/ydb/core/base/tablet.h
index f7021a1a006..602e39c6000 100644
--- a/ydb/core/base/tablet.h
+++ b/ydb/core/base/tablet.h
@@ -71,12 +71,12 @@ struct TEvTablet {
EvReadLocalBaseResult,
EvLocalMKQL,
EvLocalMKQLResponse,
- EvLocalSchemeTx,
- EvLocalSchemeTxResponse,
+ EvLocalSchemeTx,
+ EvLocalSchemeTxResponse,
EvGetCounters,
EvGetCountersResponse,
- EvLocalReadColumns,
- EvLocalReadColumnsResponse,
+ EvLocalReadColumns,
+ EvLocalReadColumnsResponse,
// from outside to leader
EvFollowerAttach = EvBoot + 2048,
@@ -474,25 +474,25 @@ struct TEvTablet {
};
struct TEvLocalSchemeTx : public TEventPB<TEvLocalSchemeTx, NKikimrTabletTxBase::TEvLocalSchemeTx, TEvTablet::EvLocalSchemeTx> {
- TEvLocalSchemeTx()
- {}
- };
-
+ TEvLocalSchemeTx()
+ {}
+ };
+
struct TEvLocalSchemeTxResponse : public TEventPB<TEvLocalSchemeTxResponse, NKikimrTabletTxBase::TEvLocalSchemeTxResponse, TEvTablet::EvLocalSchemeTxResponse> {
- TEvLocalSchemeTxResponse()
- {}
- };
-
- struct TEvLocalReadColumns : public TEventPB<TEvLocalReadColumns, NKikimrTabletTxBase::TEvLocalReadColumns, TEvTablet::EvLocalReadColumns> {
- TEvLocalReadColumns()
- {}
- };
-
- struct TEvLocalReadColumnsResponse : public TEventPB<TEvLocalReadColumnsResponse, NKikimrTabletTxBase::TEvLocalReadColumnsResponse, TEvTablet::EvLocalReadColumnsResponse> {
- TEvLocalReadColumnsResponse()
- {}
- };
-
+ TEvLocalSchemeTxResponse()
+ {}
+ };
+
+ struct TEvLocalReadColumns : public TEventPB<TEvLocalReadColumns, NKikimrTabletTxBase::TEvLocalReadColumns, TEvTablet::EvLocalReadColumns> {
+ TEvLocalReadColumns()
+ {}
+ };
+
+ struct TEvLocalReadColumnsResponse : public TEventPB<TEvLocalReadColumnsResponse, NKikimrTabletTxBase::TEvLocalReadColumnsResponse, TEvTablet::EvLocalReadColumnsResponse> {
+ TEvLocalReadColumnsResponse()
+ {}
+ };
+
struct TEvFollowerAttach : public TEventPB<TEvFollowerAttach, NKikimrTabletBase::TEvFollowerAttach, EvFollowerAttach> {
TEvFollowerAttach()
{}
diff --git a/ydb/core/base/tablet_pipe.h b/ydb/core/base/tablet_pipe.h
index f7ae49fb47f..a0420b86724 100644
--- a/ydb/core/base/tablet_pipe.h
+++ b/ydb/core/base/tablet_pipe.h
@@ -28,7 +28,7 @@ namespace NKikimr {
EvServerDestroyed,
EvActivate,
EvShutdown,
- EvClientRetry,
+ EvClientRetry,
EvClientCheckDelay,
EvClientShuttingDown,
EvMessage, // replacement for EvSend
@@ -210,10 +210,10 @@ namespace NKikimr {
struct TEvShutdown : public TEventLocal<TEvShutdown, EvShutdown> {
TEvShutdown() {}
};
-
- struct TEvClientRetry : public TEventLocal<TEvClientRetry, EvClientRetry> {
- TEvClientRetry() {}
- };
+
+ struct TEvClientRetry : public TEventLocal<TEvClientRetry, EvClientRetry> {
+ TEvClientRetry() {}
+ };
struct TEvClientCheckDelay : public TEventLocal<TEvClientCheckDelay, EvClientCheckDelay> {
TEvClientCheckDelay() {}
@@ -303,8 +303,8 @@ namespace NKikimr {
// Cleanup resources after reset
virtual void Erase(TEvTabletPipe::TEvServerDestroyed::TPtr &ev) = 0;
-
- virtual bool IsActive() const = 0;
+
+ virtual bool IsActive() const = 0;
virtual bool IsStopped() const = 0;
};
diff --git a/ydb/core/base/ticket_parser.h b/ydb/core/base/ticket_parser.h
index 4dbc54a38b1..62004b3a89d 100644
--- a/ydb/core/base/ticket_parser.h
+++ b/ydb/core/base/ticket_parser.h
@@ -36,13 +36,13 @@ namespace NKikimr {
// if two identical permissions with different attributies are specified,
// only one of them will be processed. Which one is not guaranteed
- const std::vector<TEntry> Entries;
+ const std::vector<TEntry> Entries;
struct TInitializationFields {
TString Database;
TString Ticket;
TString PeerName;
- std::vector<TEntry> Entries;
+ std::vector<TEntry> Entries;
};
TEvAuthorizeTicket(TInitializationFields&& init)
diff --git a/ydb/core/base/ya.make b/ydb/core/base/ya.make
index e21dfaf7678..83db5825c3a 100644
--- a/ydb/core/base/ya.make
+++ b/ydb/core/base/ya.make
@@ -36,8 +36,8 @@ SRCS(
interconnect_channels.h
kikimr_issue.cpp
kikimr_issue.h
- localdb.cpp
- localdb.h
+ localdb.cpp
+ localdb.h
location.h
logoblob.cpp
logoblob.h
diff --git a/ydb/core/blobstorage/base/transparent.h b/ydb/core/blobstorage/base/transparent.h
index af37b28cf5d..2590ec7d9cf 100644
--- a/ydb/core/blobstorage/base/transparent.h
+++ b/ydb/core/blobstorage/base/transparent.h
@@ -17,9 +17,9 @@ namespace NKikimr {
/////////////////////////////////////////////////////////////////////////////////////////
class TTransparentMemoryPool {
public:
- TTransparentMemoryPool(TMemoryConsumer&& consumer, size_t initial, TMemoryPool::IGrowPolicy* growPolicy)
+ TTransparentMemoryPool(TMemoryConsumer&& consumer, size_t initial, TMemoryPool::IGrowPolicy* growPolicy)
: Consumer(std::move(consumer))
- , MemoryPool(initial, growPolicy)
+ , MemoryPool(initial, growPolicy)
, TotalAllocated(0)
{}
diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp
index 93c8359be19..2605a19a2e9 100644
--- a/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp
+++ b/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp
@@ -414,9 +414,9 @@ class TBlobStorageGroupDiscoverRequest : public TBlobStorageGroupRequestActor<TB
if (status == NKikimrProto::OK) {
if (record.GetIsRangeOverflow() && !record.ResultSize()) {
A_LOG_CRIT_S("BSD40", "Handle TEvVGetResult inconsistent IsRangeOverflow set with ResultSize# 0");
- replyStatus = NKikimrProto::ERROR;
- vDiskData.IsError = true;
- }
+ replyStatus = NKikimrProto::ERROR;
+ vDiskData.IsError = true;
+ }
} else {
replyStatus = NKikimrProto::ERROR;
vDiskData.IsError = true;
diff --git a/ydb/core/blobstorage/incrhuge/incrhuge_keeper.h b/ydb/core/blobstorage/incrhuge/incrhuge_keeper.h
index 7c0af1a1c59..3f45a0c0300 100644
--- a/ydb/core/blobstorage/incrhuge/incrhuge_keeper.h
+++ b/ydb/core/blobstorage/incrhuge/incrhuge_keeper.h
@@ -44,8 +44,8 @@ namespace NKikimr {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::BS_INCR_HUGE_KEEPER;
- }
-
+ }
+
TKeeper(const TKeeperSettings& settings);
void Bootstrap(const TActorContext& ctx);
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp
index cd88aef6438..5935d13229e 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp
@@ -19,7 +19,7 @@ void Run(TVector<IActor*> tests, TTestRunConfig runCfg) {
TTempDir tempDir;
TVector<TActorId> testIds;
TActorId pDiskId;
- TAppData appData(0, 0, 0, 0, TMap<TString, ui32>(), nullptr, nullptr, nullptr, nullptr);
+ TAppData appData(0, 0, 0, 0, TMap<TString, ui32>(), nullptr, nullptr, nullptr, nullptr);
auto ioContext = std::make_shared<NPDisk::TIoContextFactoryOSS>();
appData.IoContextFactory = ioContext.get();
diff --git a/ydb/core/blobstorage/testload/test_load_vdisk_write.cpp b/ydb/core/blobstorage/testload/test_load_vdisk_write.cpp
index b3ae7a00562..39ecca9dbda 100644
--- a/ydb/core/blobstorage/testload/test_load_vdisk_write.cpp
+++ b/ydb/core/blobstorage/testload/test_load_vdisk_write.cpp
@@ -73,8 +73,8 @@ namespace NKikimr {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::BS_LOAD_PDISK_WRITE;
- }
-
+ }
+
TVDiskLoadActor(const NKikimrBlobStorage::TEvTestLoadRequest::TVDiskLoadStart& cmd,
const NActors::TActorId& parent, ui64 tag)
: ParentActorId(parent)
diff --git a/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_impl.h b/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_impl.h
index 69aa7431898..3ebf8e8bea3 100644
--- a/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_impl.h
+++ b/ydb/core/blobstorage/vdisk/hulldb/fresh/fresh_segment_impl.h
@@ -93,7 +93,7 @@ namespace NKikimr {
class TIterator;
TFreshIndex(TVDiskContextPtr vctx)
- : LocalArena(TMemoryConsumer(vctx->FreshIndex), (8 << 20), TMemoryPool::TLinearGrow::Instance())
+ : LocalArena(TMemoryConsumer(vctx->FreshIndex), (8 << 20), TMemoryPool::TLinearGrow::Instance())
, Idx(LocalArena)
{}
diff --git a/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcommit.h b/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcommit.h
index 4f2511ec706..8b094e893bc 100644
--- a/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcommit.h
+++ b/ydb/core/blobstorage/vdisk/hullop/blobstorage_hullcommit.h
@@ -267,7 +267,7 @@ namespace NKikimr {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return DerivedActivityType;
+ return DerivedActivityType;
}
TBaseHullDbCommitter(
diff --git a/ydb/core/client/cancel_tx_ut.cpp b/ydb/core/client/cancel_tx_ut.cpp
index 6ae7de6bd93..4544a3154fb 100644
--- a/ydb/core/client/cancel_tx_ut.cpp
+++ b/ydb/core/client/cancel_tx_ut.cpp
@@ -1,156 +1,156 @@
-#include "flat_ut_client.h"
-
+#include "flat_ut_client.h"
+
#include <ydb/core/base/tablet_resolver.h>
#include <ydb/core/tx/tx_proxy/proxy.h>
#include <ydb/core/tx/datashard/datashard.h>
#include <ydb/core/tx/datashard/datashard_failpoints.h>
#include <ydb/core/engine/mkql_engine_flat.h>
-
+
#include <library/cpp/testing/unittest/registar.h>
#include <google/protobuf/text_format.h>
-
-namespace NKikimr {
-namespace NFlatTests {
-
-using namespace Tests;
+
+namespace NKikimr {
+namespace NFlatTests {
+
+using namespace Tests;
using NClient::TValue;
-
-Y_UNIT_TEST_SUITE(TCancelTx) {
-
- TServer PrepareTest(bool allowCancelROwithReadsets = false) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
- if (false) {
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_NOTICE);
- }
-
- TFlatMsgBusClient annoyingClient(port);
-
- const char * table = R"(Name: "T"
- Columns { Name: "key" Type: "Uint32" }
- Columns { Name: "value" Type: "Uint32" }
- KeyColumnNames: ["key"]
- UniformPartitionsCount: 2)";
-
- annoyingClient.InitRoot();
- annoyingClient.CreateTable("/dc-1", table);
-
- annoyingClient.FlatQuery(
- "("
- " (return (AsList"
- " (UpdateRow '/dc-1/T '('('key (Uint32 '0))) '('('value (Uint32 '11111))) )"
- " (UpdateRow '/dc-1/T '('('key (Uint32 '3000000000))) '('('value (Uint32 '22222))) )"
- " ))"
- ")"
- );
-
- TAtomic prevVal;
+
+Y_UNIT_TEST_SUITE(TCancelTx) {
+
+ TServer PrepareTest(bool allowCancelROwithReadsets = false) {
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+ if (false) {
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_NOTICE);
+ }
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char * table = R"(Name: "T"
+ Columns { Name: "key" Type: "Uint32" }
+ Columns { Name: "value" Type: "Uint32" }
+ KeyColumnNames: ["key"]
+ UniformPartitionsCount: 2)";
+
+ annoyingClient.InitRoot();
+ annoyingClient.CreateTable("/dc-1", table);
+
+ annoyingClient.FlatQuery(
+ "("
+ " (return (AsList"
+ " (UpdateRow '/dc-1/T '('('key (Uint32 '0))) '('('value (Uint32 '11111))) )"
+ " (UpdateRow '/dc-1/T '('('key (Uint32 '3000000000))) '('('value (Uint32 '22222))) )"
+ " ))"
+ ")"
+ );
+
+ TAtomic prevVal;
cleverServer.GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CanCancelROWithReadSets", allowCancelROwithReadsets ? 1 : 0 , prevVal);
-
- return cleverServer;
- }
-
- void TestMkqlTxCancellation(TString queryText, bool canBeCancelled, bool allowCancelROwithReadsets = false) {
- TServer server = PrepareTest(allowCancelROwithReadsets);
- TFlatMsgBusClient annoyingClient(server.GetSettings().Port);\
-
- for (ui64 datashard : {72075186224037888, 72075186224037889}) {
- int failAt = 0;
- for (; failAt < 100; ++failAt) {
+
+ return cleverServer;
+ }
+
+ void TestMkqlTxCancellation(TString queryText, bool canBeCancelled, bool allowCancelROwithReadsets = false) {
+ TServer server = PrepareTest(allowCancelROwithReadsets);
+ TFlatMsgBusClient annoyingClient(server.GetSettings().Port);\
+
+ for (ui64 datashard : {72075186224037888, 72075186224037889}) {
+ int failAt = 0;
+ for (; failAt < 100; ++failAt) {
NDataShard::gCancelTxFailPoint.Enable(datashard, -1, failAt);
-
- TFlatMsgBusClient::TFlatQueryOptions opts;
- NKikimrClient::TResponse response;
- annoyingClient.FlatQueryRaw(queryText, opts, response, 2);
-
- if (false)
- Cerr << response << Endl;
-
- bool requestFailed = (response.GetStatus() == NMsgBusProxy::MSTATUS_ERROR);
+
+ TFlatMsgBusClient::TFlatQueryOptions opts;
+ NKikimrClient::TResponse response;
+ annoyingClient.FlatQueryRaw(queryText, opts, response, 2);
+
+ if (false)
+ Cerr << response << Endl;
+
+ bool requestFailed = (response.GetStatus() == NMsgBusProxy::MSTATUS_ERROR);
UNIT_ASSERT_VALUES_EQUAL_C(NDataShard::gCancelTxFailPoint.Hit, requestFailed, "Request should fail iff there was a cancellation");
- if (!canBeCancelled) {
- UNIT_ASSERT_VALUES_EQUAL_C(requestFailed, false, "Tx is not supposed to be cancelled");
- }
-
+ if (!canBeCancelled) {
+ UNIT_ASSERT_VALUES_EQUAL_C(requestFailed, false, "Tx is not supposed to be cancelled");
+ }
+
NDataShard::gCancelTxFailPoint.Disable();
-
- if (!requestFailed) {
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
- break;
- }
- }
- if (canBeCancelled) {
- UNIT_ASSERT_C(failAt > 0, "Failpoint never fired");
- }
- }
- }
-
- Y_UNIT_TEST(CrossShardReadOnly) {
- TString query = R"(
- (
- (let row1 '('('key (Uint32 '0)) ))
- (let row2 '('('key (Uint32 '3000000000)) ))
- (let cols '('value))
- (let select1 (SelectRow '/dc-1/T row1 cols))
- (let select2 (SelectRow '/dc-1/T row2 cols))
- (let ret (AsList
- (SetResult 'ret1 select1)
- (SetResult 'ret2 select2)
- ))
- (return ret)
- )
- )";
- TestMkqlTxCancellation(query, true);
- }
-
- Y_UNIT_TEST(CrossShardReadOnlyWithReadSets) {
- TString query = R"(
- (
- (let row1 '('('key (Uint32 '0)) ))
- (let row2 '('('key (Uint32 '3000000000)) ))
- (let cols '('value))
- (let select1 (SelectRow '/dc-1/T row1 cols))
- (let val (IfPresent select1
- (lambda '(r) (block '(
- (let select2 (SelectRow '/dc-1/T row2 cols))
- (let res (SetResult 'ret2 select2))
- (return res)
- )))
- (Void)
- ))
- (let ret (AsList
- (SetResult 'ret1 select1)
- val
- ))
- (return ret)
- )
- )";
-
- // Normal scenario: cancellation is not allowed
- TestMkqlTxCancellation(query, false);
-
- // Hack scenario: cancellation is allowed by a flag
- TestMkqlTxCancellation(query, true, true);
- }
-
- Y_UNIT_TEST(ImmediateReadOnly) {
- TString query = R"(
- (
- (let row1 '('('key (Uint32 '0)) ))
- (let row2 '('('key (Uint32 '3000000000)) ))
- (let cols '('value))
- (let select1 (SelectRow '/dc-1/T row1 cols 'head))
- (let select2 (SelectRow '/dc-1/T row2 cols 'head))
- (let ret (AsList
- (SetResult 'ret1 select1)
- (SetResult 'ret2 select2)
- ))
- (return ret)
- )
- )";
- TestMkqlTxCancellation(query, true);
- }
-}
-
-}}
+
+ if (!requestFailed) {
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+ break;
+ }
+ }
+ if (canBeCancelled) {
+ UNIT_ASSERT_C(failAt > 0, "Failpoint never fired");
+ }
+ }
+ }
+
+ Y_UNIT_TEST(CrossShardReadOnly) {
+ TString query = R"(
+ (
+ (let row1 '('('key (Uint32 '0)) ))
+ (let row2 '('('key (Uint32 '3000000000)) ))
+ (let cols '('value))
+ (let select1 (SelectRow '/dc-1/T row1 cols))
+ (let select2 (SelectRow '/dc-1/T row2 cols))
+ (let ret (AsList
+ (SetResult 'ret1 select1)
+ (SetResult 'ret2 select2)
+ ))
+ (return ret)
+ )
+ )";
+ TestMkqlTxCancellation(query, true);
+ }
+
+ Y_UNIT_TEST(CrossShardReadOnlyWithReadSets) {
+ TString query = R"(
+ (
+ (let row1 '('('key (Uint32 '0)) ))
+ (let row2 '('('key (Uint32 '3000000000)) ))
+ (let cols '('value))
+ (let select1 (SelectRow '/dc-1/T row1 cols))
+ (let val (IfPresent select1
+ (lambda '(r) (block '(
+ (let select2 (SelectRow '/dc-1/T row2 cols))
+ (let res (SetResult 'ret2 select2))
+ (return res)
+ )))
+ (Void)
+ ))
+ (let ret (AsList
+ (SetResult 'ret1 select1)
+ val
+ ))
+ (return ret)
+ )
+ )";
+
+ // Normal scenario: cancellation is not allowed
+ TestMkqlTxCancellation(query, false);
+
+ // Hack scenario: cancellation is allowed by a flag
+ TestMkqlTxCancellation(query, true, true);
+ }
+
+ Y_UNIT_TEST(ImmediateReadOnly) {
+ TString query = R"(
+ (
+ (let row1 '('('key (Uint32 '0)) ))
+ (let row2 '('('key (Uint32 '3000000000)) ))
+ (let cols '('value))
+ (let select1 (SelectRow '/dc-1/T row1 cols 'head))
+ (let select2 (SelectRow '/dc-1/T row2 cols 'head))
+ (let ret (AsList
+ (SetResult 'ret1 select1)
+ (SetResult 'ret2 select2)
+ ))
+ (return ret)
+ )
+ )";
+ TestMkqlTxCancellation(query, true);
+ }
+}
+
+}}
diff --git a/ydb/core/client/client_ut.cpp b/ydb/core/client/client_ut.cpp
index 92e7a3204b4..b94c722588b 100644
--- a/ydb/core/client/client_ut.cpp
+++ b/ydb/core/client/client_ut.cpp
@@ -62,7 +62,7 @@ struct TTestTables {
bool OutOfOrder = false;
bool SoftUpdates = false;
ui32 FollowerCount = 0;
-
+
TOpts(EVariant var = OneShard_NoOpts, ui32 numFollowers = 0)
: Sharded(false)
, OutOfOrder(false)
@@ -460,21 +460,21 @@ Y_UNIT_TEST_SUITE(TClientTest) {
UNIT_ASSERT(resEmpty1.HaveValue() && bool(resEmpty1) == false);
UNIT_ASSERT(resEmpty2.HaveValue() && bool(resEmpty2) == false);
}
-
- const TString rangeQueryTemplate = R"___(
- (
- (let range '('('key (Uint64 '0) (Void))))
- (let select '('uint))
- (let options '('('ItemsLimit (Uint64 '30))) )
- (let result (SelectRange '/dc-1/Berkanavt/tables/Simple range select options __HEAD__))
- (return (AsList (SetResult 'result result) ))
- )
- )___";
-
- TString rangeQuery = rangeQueryTemplate;
+
+ const TString rangeQueryTemplate = R"___(
+ (
+ (let range '('('key (Uint64 '0) (Void))))
+ (let select '('uint))
+ (let options '('('ItemsLimit (Uint64 '30))) )
+ (let result (SelectRange '/dc-1/Berkanavt/tables/Simple range select options __HEAD__))
+ (return (AsList (SetResult 'result result) ))
+ )
+ )___";
+
+ TString rangeQuery = rangeQueryTemplate;
SubstGlobal(rangeQuery, "__HEAD__", !useHead ? (useFollower ? "'follower" : "'online") : "'head");
- NKikimrMiniKQL::TResult rangeRes;
- UNIT_ASSERT(client.FlatQuery(rangeQuery, rangeRes));
+ NKikimrMiniKQL::TResult rangeRes;
+ UNIT_ASSERT(client.FlatQuery(rangeQuery, rangeRes));
{
//Cerr << rangeRes << Endl;
@@ -686,24 +686,24 @@ Y_UNIT_TEST_SUITE(TClientTest) {
client.InitRootScheme();
TTestTables tables(client, TTestTables::Sharded_NoOpts);
- //server.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ //server.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
ReadWriteViaMiniKQLBody(client, true, false);
}
Y_UNIT_TEST(ReadWriteViaMiniKQLShardedFollower) {
TPortManager tp;
ui16 port = tp.GetPort(2134);
-
+
// Currently followers cannot be started at the same node with leader
- // so we need 2 nodes
+ // so we need 2 nodes
auto settings = TServerSettings(port);
settings.SetNodeCount(2);
TServer server(settings);
TClient client(settings);
- server.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
+ server.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
client.InitRootScheme();
{
@@ -1454,54 +1454,54 @@ Y_UNIT_TEST_SUITE(TClientTest) {
TTestTables tables(client, TTestTables::OneShard_NoOpts);
MultiSelectBody(client, true);
}
-
+
{
TTestTables tables(client, TTestTables::Sharded_NoOpts);
MultiSelectBody(client, true);
}
- }
-
+ }
+
void PrepareTestData(TClient& client, bool allowFollowerPromotion) {
client.InitRootScheme();
- client.MkDir("/dc-1", "Berkanavt");
- client.MkDir("/dc-1/Berkanavt", "tables");
- client.CreateTable(TablePlacement, Sprintf(
- R"___(
- Name: "Simple"
- Columns { Name: "key" Type: "Uint64"}
- Columns { Name: "uint" Type: "Uint64"}
- KeyColumnNames: ["key"]
+ client.MkDir("/dc-1", "Berkanavt");
+ client.MkDir("/dc-1/Berkanavt", "tables");
+ client.CreateTable(TablePlacement, Sprintf(
+ R"___(
+ Name: "Simple"
+ Columns { Name: "key" Type: "Uint64"}
+ Columns { Name: "uint" Type: "Uint64"}
+ KeyColumnNames: ["key"]
PartitionConfig { FollowerCount: 1 AllowFollowerPromotion: %s }
)___", allowFollowerPromotion ? "true": "false"));
-
- NKikimrMiniKQL::TResult writeRes;
+
+ NKikimrMiniKQL::TResult writeRes;
const TString writeQuery = R"___(
- (
- (let table '/dc-1/Berkanavt/tables/Simple)
- (let row1 '('('key (Uint64 '2))))
- (return (AsList
- (UpdateRow table row1 '( '('uint (Uint64 '10))))
- ))
- )
- )___";
-
- UNIT_ASSERT(client.FlatQuery(writeQuery, writeRes));
- }
-
+ (
+ (let table '/dc-1/Berkanavt/tables/Simple)
+ (let row1 '('('key (Uint64 '2))))
+ (return (AsList
+ (UpdateRow table row1 '( '('uint (Uint64 '10))))
+ ))
+ )
+ )___";
+
+ UNIT_ASSERT(client.FlatQuery(writeQuery, writeRes));
+ }
+
void CheckRead(TClient& client, bool readFromFollower) {
const TString readQuery = Sprintf(
- R"___(
- (
- (let key '('('key (Uint64 '2))))
- (let columns '('uint 'key))
- (let row (SelectRow '/dc-1/Berkanavt/tables/Simple key columns %s))
- (return (AsList (SetResult 'row row)))
- )
+ R"___(
+ (
+ (let key '('('key (Uint64 '2))))
+ (let columns '('uint 'key))
+ (let row (SelectRow '/dc-1/Berkanavt/tables/Simple key columns %s))
+ (return (AsList (SetResult 'row row)))
+ )
)___", readFromFollower ? "'follower" : "");
-
- NKikimrMiniKQL::TResult readRes;
- UNIT_ASSERT(client.FlatQuery(readQuery, readRes));
-
+
+ NKikimrMiniKQL::TResult readRes;
+ UNIT_ASSERT(client.FlatQuery(readQuery, readRes));
+
{
//Cerr << readRes << Endl;
TValue value = TValue::Create(readRes.GetValue(), readRes.GetType());
@@ -1510,37 +1510,37 @@ Y_UNIT_TEST_SUITE(TClientTest) {
UNIT_ASSERT_VALUES_EQUAL(10, ui64(row["uint"]));
UNIT_ASSERT_VALUES_EQUAL(2, ui64(row["key"]));
}
- }
-
+ }
+
void WaitForLeaderStart(TClient& client, TTestActorRuntime* runtime, ui64 tabletId, const TDuration& timeout) {
- if (client.WaitForTabletAlive(runtime, tabletId, true, timeout))
- return;
-
+ if (client.WaitForTabletAlive(runtime, tabletId, true, timeout))
+ return;
+
UNIT_ASSERT(!"Timeout expired while waiting for leader start");
- }
-
+ }
+
void WaitForFollowerStart(TClient& client, TTestActorRuntime* runtime, ui64 tabletId, const TDuration& timeout) {
- if (client.WaitForTabletAlive(runtime, tabletId, false, timeout))
- return;
-
+ if (client.WaitForTabletAlive(runtime, tabletId, false, timeout))
+ return;
+
UNIT_ASSERT(!"Timeout expired while waiting for follower start");
- }
-
+ }
+
Y_UNIT_TEST(ReadFromFollower) {
- TPortManager tp;
- ui16 port = tp.GetPort(2134);
+ TPortManager tp;
+ ui16 port = tp.GetPort(2134);
auto settings = TServerSettings(port);
settings.SetNodeCount(2);
TServer server(settings);
TClient client(settings);
- SetupLogging(server);
-
- PrepareTestData(client, false);
- CheckRead(client, true);
- }
-
+ SetupLogging(server);
+
+ PrepareTestData(client, false);
+ CheckRead(client, true);
+ }
+
Y_UNIT_TEST(FollowerCacheRefresh) {
TPortManager tp;
ui16 port = tp.GetPort(2134);
@@ -1626,53 +1626,53 @@ Y_UNIT_TEST_SUITE(TClientTest) {
}
Y_UNIT_TEST(PromoteFollower) {
- TPortManager tp;
- ui16 port = tp.GetPort(2134);
-
+ TPortManager tp;
+ ui16 port = tp.GetPort(2134);
+
auto settings = TServerSettings(port);
settings.SetNodeCount(2);
TServer server(settings);
TClient client(settings);
- PrepareTestData(client, true);
-
- SetupLogging(server);
-
- const ui64 tabletId = 72075186224037888ull;
-
+ PrepareTestData(client, true);
+
+ SetupLogging(server);
+
+ const ui64 tabletId = 72075186224037888ull;
+
ui32 leaderNode = client.GetLeaderNode(server.GetRuntime(), tabletId);
-
+
WaitForLeaderStart(client, server.GetRuntime(), tabletId, TDuration::Seconds(5));
WaitForFollowerStart(client, server.GetRuntime(), tabletId, TDuration::Seconds(5));
-
+
TVector<ui32> followerNodes = client.GetFollowerNodes(server.GetRuntime(), tabletId);
UNIT_ASSERT_VALUES_EQUAL(1, followerNodes.size());
UNIT_ASSERT_VALUES_UNEQUAL(leaderNode, followerNodes[0]);
-
+
Cout << "Read from follower" << Endl;
- CheckRead(client, true);
+ CheckRead(client, true);
Cout << "Read from leader" << Endl;
- CheckRead(client, false);
-
+ CheckRead(client, false);
+
Cout << "Disable node, leader should move back" << Endl;
client.MarkNodeInHive(server.GetRuntime(), leaderNode, false);
client.KickNodeInHive(server.GetRuntime(), leaderNode);
WaitForLeaderStart(client, server.GetRuntime(), tabletId, TDuration::Seconds(5));
-
+
Cout << "Read from new leader" << Endl;
- CheckRead(client, false);
+ CheckRead(client, false);
ui32 newLeaderNode = client.GetLeaderNode(server.GetRuntime(), tabletId);
UNIT_ASSERT_VALUES_UNEQUAL_C(newLeaderNode, leaderNode, "Leader has moved");
-
+
Cout << "Reenable node, follower should start there" << Endl;
client.MarkNodeInHive(server.GetRuntime(), leaderNode, true);
WaitForFollowerStart(client, server.GetRuntime(), tabletId, TDuration::Seconds(5));
followerNodes = client.GetFollowerNodes(server.GetRuntime(), tabletId);
-
- CheckRead(client, true);
- }
-
+
+ CheckRead(client, true);
+ }
+
void DiagnosticsBody(TClient &client, bool testWrite, bool allowFollower = false) {
TString query;
if (testWrite) {
@@ -1695,7 +1695,7 @@ Y_UNIT_TEST_SUITE(TClientTest) {
))
))", allowFollower ? "'follower" : "");
}
-
+
NKikimrMiniKQL::TResult result;
UNIT_ASSERT(client.FlatQuery(query, result));
@@ -1760,98 +1760,98 @@ Y_UNIT_TEST_SUITE(TClientTest) {
TString DiffStrings(const TString& newStr, const TString& oldStr) {
TVector<NDiff::TChunk<char>> chunks;
- NDiff::InlineDiff(chunks, newStr, oldStr, "\n");
-
+ NDiff::InlineDiff(chunks, newStr, oldStr, "\n");
+
TString res;
- TStringOutput out(res);
- for (const auto& c : chunks) {
+ TStringOutput out(res);
+ for (const auto& c : chunks) {
TString left(c.Left.begin(), c.Left.end());
TString right(c.Right.begin(), c.Right.end());
- if (!left.empty() || !right.empty()) {
- out << ">>>>>" << Endl
- << left << Endl
- << "=====" << Endl
- << right << Endl
- << "<<<<<" << Endl;
- }
- }
- return res;
- }
-
+ if (!left.empty() || !right.empty()) {
+ out << ">>>>>" << Endl
+ << left << Endl
+ << "=====" << Endl
+ << right << Endl
+ << "<<<<<" << Endl;
+ }
+ }
+ return res;
+ }
+
TString ToString(const NTabletFlatScheme::TSchemeChanges& scheme) {
TString str;
- ::google::protobuf::TextFormat::PrintToString(scheme, &str);
- return str;
- }
-
+ ::google::protobuf::TextFormat::PrintToString(scheme, &str);
+ return str;
+ }
+
Y_UNIT_TEST(LocalSchemeTxRead) {
- TPortManager tp;
- ui16 port = tp.GetPort(2134);
-
+ TPortManager tp;
+ ui16 port = tp.GetPort(2134);
+
const auto settings = TServerSettings(port);
TServer server(settings);
TClient client(settings);
- NTabletFlatScheme::TSchemeChanges scheme1;
- NTabletFlatScheme::TSchemeChanges scheme2;
+ NTabletFlatScheme::TSchemeChanges scheme1;
+ NTabletFlatScheme::TSchemeChanges scheme2;
TString err;
- bool success = client.LocalSchemeTx(Tests::Hive, "", true, scheme1, err);
- UNIT_ASSERT(success);
- success = client.LocalSchemeTx(Tests::Hive, "", false, scheme2, err);
- UNIT_ASSERT(success);
-
- UNIT_ASSERT_VALUES_EQUAL(ToString(scheme1), ToString(scheme2));
- }
-
+ bool success = client.LocalSchemeTx(Tests::Hive, "", true, scheme1, err);
+ UNIT_ASSERT(success);
+ success = client.LocalSchemeTx(Tests::Hive, "", false, scheme2, err);
+ UNIT_ASSERT(success);
+
+ UNIT_ASSERT_VALUES_EQUAL(ToString(scheme1), ToString(scheme2));
+ }
+
Y_UNIT_TEST(LocalSchemeTxModify) {
- TPortManager tp;
- ui16 port = tp.GetPort(2134);
-
+ TPortManager tp;
+ ui16 port = tp.GetPort(2134);
+
const auto settings = TServerSettings(port);
TServer server(settings);
TClient client(settings);
- NTabletFlatScheme::TSchemeChanges scheme;
+ NTabletFlatScheme::TSchemeChanges scheme;
TString err;
- bool success = false;
-
- success = client.LocalSchemeTx(Tests::Hive, "", true, scheme, err);
- UNIT_ASSERT(success);
+ bool success = false;
+
+ success = client.LocalSchemeTx(Tests::Hive, "", true, scheme, err);
+ UNIT_ASSERT(success);
TString oldScheme = ToString(scheme);
-
+
TString change = R"___(
- Delta {
- DeltaType: AddColumn
- TableId: 10
- ColumnId: 1001
- ColumnName: "NewColumn"
- ColumnType: 2
- }
- Delta {
- DeltaType: UpdateExecutorInfo
- ExecutorCacheSize: 10000000
- }
- )___";
-
- // Dry run first
- success = client.LocalSchemeTx(Tests::Hive, change, true, scheme, err);
- UNIT_ASSERT(success);
+ Delta {
+ DeltaType: AddColumn
+ TableId: 10
+ ColumnId: 1001
+ ColumnName: "NewColumn"
+ ColumnType: 2
+ }
+ Delta {
+ DeltaType: UpdateExecutorInfo
+ ExecutorCacheSize: 10000000
+ }
+ )___";
+
+ // Dry run first
+ success = client.LocalSchemeTx(Tests::Hive, change, true, scheme, err);
+ UNIT_ASSERT(success);
TString dryRunScheme = ToString(scheme);
- // Re-read
- success = client.LocalSchemeTx(Tests::Hive, "", true, scheme, err);
+ // Re-read
+ success = client.LocalSchemeTx(Tests::Hive, "", true, scheme, err);
TString newScheme = ToString(scheme);
- UNIT_ASSERT_VALUES_EQUAL_C(newScheme, oldScheme, "Schema changed by dry-run");
-
- // Update
- success = client.LocalSchemeTx(Tests::Hive, change, false, scheme, err);
- UNIT_ASSERT(success);
- newScheme = ToString(scheme);
- UNIT_ASSERT_VALUES_EQUAL_C(newScheme, dryRunScheme, "Dry-run result is not equal");
-
+ UNIT_ASSERT_VALUES_EQUAL_C(newScheme, oldScheme, "Schema changed by dry-run");
+
+ // Update
+ success = client.LocalSchemeTx(Tests::Hive, change, false, scheme, err);
+ UNIT_ASSERT(success);
+ newScheme = ToString(scheme);
+ UNIT_ASSERT_VALUES_EQUAL_C(newScheme, dryRunScheme, "Dry-run result is not equal");
+
TString schemaDiff = DiffStrings(oldScheme, newScheme);
- Cout << schemaDiff << Endl;
- UNIT_ASSERT_C(!schemaDiff.empty(), "Schema not changed after update");
- }
+ Cout << schemaDiff << Endl;
+ UNIT_ASSERT_C(!schemaDiff.empty(), "Schema not changed after update");
+ }
Y_UNIT_TEST(LocalSchemeDropTable) {
TPortManager tp;
diff --git a/ydb/core/client/flat_ut.cpp b/ydb/core/client/flat_ut.cpp
index 928fa4db671..362c052aa11 100644
--- a/ydb/core/client/flat_ut.cpp
+++ b/ydb/core/client/flat_ut.cpp
@@ -1,5 +1,5 @@
-#include "flat_ut_client.h"
-
+#include "flat_ut_client.h"
+
#include <ydb/core/base/appdata.h>
#include <ydb/core/testlib/test_client.h>
#include <ydb/core/tx/tx_proxy/proxy.h>
@@ -8,7 +8,7 @@
#include <ydb/core/tablet_flat/test/libs/rows/misc.h>
#include <ydb/public/lib/deprecated/kicli/kicli.h>
#include <ydb/core/engine/mkql_engine_flat.h>
-
+
#include <library/cpp/http/io/stream.h>
#include <library/cpp/http/server/http_ex.h>
#include <library/cpp/testing/unittest/tests_data.h>
@@ -17,13 +17,13 @@
#include <util/generic/xrange.h>
#include <util/random/mersenne.h>
#include <util/system/sanitizers.h>
-
-namespace NKikimr {
-namespace NFlatTests {
-
-using namespace Tests;
+
+namespace NKikimr {
+namespace NFlatTests {
+
+using namespace Tests;
using NClient::TValue;
-
+
namespace {
class TFailingMtpQueue: public TSimpleThreadPool {
private:
@@ -110,223 +110,223 @@ namespace {
Y_UNIT_TEST_SUITE(TFlatTest) {
-
+
Y_UNIT_TEST(Init) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
if (true) {
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::HIVE, NActors::NLog::PRI_DEBUG);
- }
-
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::HIVE, NActors::NLog::PRI_DEBUG);
+ }
+
TFlatMsgBusClient annoyingClient(port);
-
- annoyingClient.InitRoot();
+
+ annoyingClient.InitRoot();
auto status = annoyingClient.MkDir("/dc-1", "Berkanavt");
UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
status = annoyingClient.MkDir("/dc-1/Berkanavt", "tables");
UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
status = annoyingClient.MkDir("/dc-1/Berkanavt", "tables");
UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
-
- annoyingClient.CreateTable("/dc-1/Berkanavt/tables",
- "Name: \"Table1\""
- "Columns { Name: \"key1\" Type: \"Uint32\"}"
+
+ annoyingClient.CreateTable("/dc-1/Berkanavt/tables",
+ "Name: \"Table1\""
+ "Columns { Name: \"key1\" Type: \"Uint32\"}"
"Columns { Name: \"key2\" Type: \"Utf8\"}"
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
+ "Columns { Name: \"RowId\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\", \"key1\", \"key2\"]"
- );
-
- annoyingClient.CreateTable("/dc-1/Berkanavt/tables",
- "Name: \"Students\""
- "Columns { Name: \"Id\" Type: \"Uint32\"}"
+ "KeyColumnNames: [\"RowId\", \"key1\", \"key2\"]"
+ );
+
+ annoyingClient.CreateTable("/dc-1/Berkanavt/tables",
+ "Name: \"Students\""
+ "Columns { Name: \"Id\" Type: \"Uint32\"}"
"Columns { Name: \"Name\" Type: \"Utf8\"}"
"Columns { Name: \"LastName\" Type: \"Utf8\"}"
- "Columns { Name: \"Age\" Type: \"Uint32\"}"
- "KeyColumnNames: [\"Id\"]"
- "UniformPartitionsCount: 10"
- );
- annoyingClient.CreateTable("/dc-1/Berkanavt/tables",
- "Name: \"Classes\""
- "Columns { Name: \"Id\" Type: \"Uint32\"}"
+ "Columns { Name: \"Age\" Type: \"Uint32\"}"
+ "KeyColumnNames: [\"Id\"]"
+ "UniformPartitionsCount: 10"
+ );
+ annoyingClient.CreateTable("/dc-1/Berkanavt/tables",
+ "Name: \"Classes\""
+ "Columns { Name: \"Id\" Type: \"Uint32\"}"
"Columns { Name: \"Name\" Type: \"Utf8\"}"
"Columns { Name: \"ProfessorName\" Type: \"Utf8\"}"
- "Columns { Name: \"Level\" Type: \"Uint32\"}"
- "KeyColumnNames: [\"Id\"]"
- );
-
- annoyingClient.MkDir("/dc-1/Berkanavt/tables", "Table1");
+ "Columns { Name: \"Level\" Type: \"Uint32\"}"
+ "KeyColumnNames: [\"Id\"]"
+ );
+
+ annoyingClient.MkDir("/dc-1/Berkanavt/tables", "Table1");
status = annoyingClient.MkDir("/dc-1/Berkanavt/tables/Table1", "col42");
UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_ERROR);
-
- annoyingClient.Ls("/");
- annoyingClient.Ls("/dc-100");
- annoyingClient.Ls("/dc-1/Argonaut");
- annoyingClient.Ls("/dc-1/Berkanavt/tabls");
- annoyingClient.Ls("/dc-1/Berkanavt/tables");
- annoyingClient.Ls("/dc-1/Berkanavt/tables/Table1");
- annoyingClient.Ls("/dc-1/Berkanavt/tables/Students");
- annoyingClient.Ls("/dc-1/Berkanavt/tables/Classes");
- annoyingClient.Ls("/dc-1/Berkanavt/tables/Table1/key1");
-
- annoyingClient.FlatQuery(
- "("
- " (return (AsList (SetResult 'res1 (Int32 '2016))))"
- ")");
-
- // Update
- annoyingClient.FlatQuery(
- "("
- "(let row '('('Id (Uint32 '42))))"
- "(let myUpd '("
+
+ annoyingClient.Ls("/");
+ annoyingClient.Ls("/dc-100");
+ annoyingClient.Ls("/dc-1/Argonaut");
+ annoyingClient.Ls("/dc-1/Berkanavt/tabls");
+ annoyingClient.Ls("/dc-1/Berkanavt/tables");
+ annoyingClient.Ls("/dc-1/Berkanavt/tables/Table1");
+ annoyingClient.Ls("/dc-1/Berkanavt/tables/Students");
+ annoyingClient.Ls("/dc-1/Berkanavt/tables/Classes");
+ annoyingClient.Ls("/dc-1/Berkanavt/tables/Table1/key1");
+
+ annoyingClient.FlatQuery(
+ "("
+ " (return (AsList (SetResult 'res1 (Int32 '2016))))"
+ ")");
+
+ // Update
+ annoyingClient.FlatQuery(
+ "("
+ "(let row '('('Id (Uint32 '42))))"
+ "(let myUpd '("
" '('Name (Utf8 'Robert))"
" '('LastName (Utf8 '\"\\\"); DROP TABLE Students; --\"))"
- " '('Age (Uint32 '21))))"
- "(let pgmReturn (AsList"
- " (UpdateRow '/dc-1/Berkanavt/tables/Students row myUpd)"
- "))"
- "(return pgmReturn)"
- ")");
-
- // SelectRow
- annoyingClient.FlatQuery(
- "("
- "(let row '('('Id (Uint32 '42))))"
- "(let select '('Name))"
- "(let pgmReturn (AsList"
- " (SetResult 'myRes (SelectRow '/dc-1/Berkanavt/tables/Students row select))"
- "))"
- "(return pgmReturn)"
- ")");
-
- // Cross-shard
- annoyingClient.FlatQuery(
- "("
- "(let row '('('Id (Uint32 '2))))"
- "(let select '('Name))"
- "(let selectRes (SelectRow '/dc-1/Berkanavt/tables/Classes row select))"
- "(let name (FlatMap selectRes (lambda '(x) (Member x 'Name))))"
- "(let row '('('Id (Uint32 '3))))"
- "(let myUpd '("
- " '('Name name)"
+ " '('Age (Uint32 '21))))"
+ "(let pgmReturn (AsList"
+ " (UpdateRow '/dc-1/Berkanavt/tables/Students row myUpd)"
+ "))"
+ "(return pgmReturn)"
+ ")");
+
+ // SelectRow
+ annoyingClient.FlatQuery(
+ "("
+ "(let row '('('Id (Uint32 '42))))"
+ "(let select '('Name))"
+ "(let pgmReturn (AsList"
+ " (SetResult 'myRes (SelectRow '/dc-1/Berkanavt/tables/Students row select))"
+ "))"
+ "(return pgmReturn)"
+ ")");
+
+ // Cross-shard
+ annoyingClient.FlatQuery(
+ "("
+ "(let row '('('Id (Uint32 '2))))"
+ "(let select '('Name))"
+ "(let selectRes (SelectRow '/dc-1/Berkanavt/tables/Classes row select))"
+ "(let name (FlatMap selectRes (lambda '(x) (Member x 'Name))))"
+ "(let row '('('Id (Uint32 '3))))"
+ "(let myUpd '("
+ " '('Name name)"
" '('LastName (Utf8 'Tables))"
- " '('Age (Uint32 '21))))"
- "(let pgmReturn (AsList"
- " (UpdateRow '/dc-1/Berkanavt/tables/Students row myUpd)"
- "))"
- " (return pgmReturn)"
- ")"
- );
-
- // SelectRange
- annoyingClient.FlatQuery(
- "("
- "(let range '('ExcFrom '('Id (Uint32 '2) (Void))))"
- "(let select '('Id 'Name 'LastName))"
- "(let options '())"
- "(let pgmReturn (AsList"
- " (SetResult 'myRes (SelectRange '/dc-1/Berkanavt/tables/Students range select options))"
- "))"
- "(return pgmReturn)"
- ")"
- );
-
- // Erase
- annoyingClient.FlatQuery(
- "("
- "(let row '('('Id (Uint32 '42))))"
- "(let pgmReturn (AsList"
- " (EraseRow '/dc-1/Berkanavt/tables/Students row)"
- "))"
- "(return pgmReturn)"
- ")"
- );
+ " '('Age (Uint32 '21))))"
+ "(let pgmReturn (AsList"
+ " (UpdateRow '/dc-1/Berkanavt/tables/Students row myUpd)"
+ "))"
+ " (return pgmReturn)"
+ ")"
+ );
+
+ // SelectRange
+ annoyingClient.FlatQuery(
+ "("
+ "(let range '('ExcFrom '('Id (Uint32 '2) (Void))))"
+ "(let select '('Id 'Name 'LastName))"
+ "(let options '())"
+ "(let pgmReturn (AsList"
+ " (SetResult 'myRes (SelectRange '/dc-1/Berkanavt/tables/Students range select options))"
+ "))"
+ "(return pgmReturn)"
+ ")"
+ );
+
+ // Erase
+ annoyingClient.FlatQuery(
+ "("
+ "(let row '('('Id (Uint32 '42))))"
+ "(let pgmReturn (AsList"
+ " (EraseRow '/dc-1/Berkanavt/tables/Students row)"
+ "))"
+ "(return pgmReturn)"
+ ")"
+ );
Cout << "Drop tables" << Endl;
annoyingClient.DeleteTable("/dc-1/Berkanavt/tables", "Table1");
annoyingClient.DeleteTable("/dc-1/Berkanavt/tables", "Students");
annoyingClient.DeleteTable("/dc-1/Berkanavt/tables", "Classes");
- }
-
+ }
+
Y_UNIT_TEST(SelectBigRangePerf) {
- // Scenario from https://st.yandex-team.ru/KIKIMR-2715
- // Increase N_ROWS and N_REQS for profiling
- const int N_ROWS = 100; // 10000
- const int N_REQS = 10; // 100500
-
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ // Scenario from https://st.yandex-team.ru/KIKIMR-2715
+ // Increase N_ROWS and N_REQS for profiling
+ const int N_ROWS = 100; // 10000
+ const int N_REQS = 10; // 100500
+
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- if (!true) {
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::HIVE, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
- }
-
- TFlatMsgBusClient annoyingClient(port);
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "test");
- annoyingClient.MkDir("/dc-1/test", "perf");
-
- annoyingClient.CreateTable("/dc-1/test/perf",
- R"(Name: "FlatDaoPerfTestClient"
+ if (!true) {
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::HIVE, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
+ }
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "test");
+ annoyingClient.MkDir("/dc-1/test", "perf");
+
+ annoyingClient.CreateTable("/dc-1/test/perf",
+ R"(Name: "FlatDaoPerfTestClient"
Columns { Name: "ls" Type: "Utf8"}
- Columns { Name: "kg" Type: "Uint32"}
- Columns { Name: "localId" Type: "Uint64"}
- Columns { Name: "createdSeconds" Type: "Uint64"}
- Columns { Name: "mode1" Type: "Uint32"}
- KeyColumnNames: ["ls"]
- )");
-
- // insert rows
- for (int i = 0; i < N_ROWS; ++i) {
- annoyingClient.FlatQuery(Sprintf(
- R"(
- (
+ Columns { Name: "kg" Type: "Uint32"}
+ Columns { Name: "localId" Type: "Uint64"}
+ Columns { Name: "createdSeconds" Type: "Uint64"}
+ Columns { Name: "mode1" Type: "Uint32"}
+ KeyColumnNames: ["ls"]
+ )");
+
+ // insert rows
+ for (int i = 0; i < N_ROWS; ++i) {
+ annoyingClient.FlatQuery(Sprintf(
+ R"(
+ (
(let key '('('ls (Utf8 '%d)) ))
- (let myUpd '(
- '('kg (Uint32 '101))
- '('localId (Uint64 '102))
- '('createdSeconds (Uint64 '103))
- '('mode1 (Uint32 '104))
- ))
- (let pgmReturn (AsList
- (UpdateRow '"/dc-1/test/perf/FlatDaoPerfTestClient" key myUpd)
- ))
- (return pgmReturn)
- )
- )", i));
- }
-
- Cerr << "insert finished" << Endl;
-
- // SelectRange
- for (int i = 0; i < N_REQS; ++i) {
- TInstant start = TInstant::Now();
- annoyingClient.FlatQuery(
- R"(
- ((return (AsList
- (SetResult 'x
- (SelectRange '"/dc-1/test/perf/FlatDaoPerfTestClient"
+ (let myUpd '(
+ '('kg (Uint32 '101))
+ '('localId (Uint64 '102))
+ '('createdSeconds (Uint64 '103))
+ '('mode1 (Uint32 '104))
+ ))
+ (let pgmReturn (AsList
+ (UpdateRow '"/dc-1/test/perf/FlatDaoPerfTestClient" key myUpd)
+ ))
+ (return pgmReturn)
+ )
+ )", i));
+ }
+
+ Cerr << "insert finished" << Endl;
+
+ // SelectRange
+ for (int i = 0; i < N_REQS; ++i) {
+ TInstant start = TInstant::Now();
+ annoyingClient.FlatQuery(
+ R"(
+ ((return (AsList
+ (SetResult 'x
+ (SelectRange '"/dc-1/test/perf/FlatDaoPerfTestClient"
'('ExcFrom 'ExcTo '('ls (Utf8 '"") (Void)))
- '('ls 'kg 'localId 'createdSeconds 'mode1)
- '('('BytesLimit (Uint64 '3000000)))
- )
- )
- )))
- )"
- );
- Cerr << (TInstant::Now()-start).MicroSeconds() << " usec" << Endl;
- }
- }
-
+ '('ls 'kg 'localId 'createdSeconds 'mode1)
+ '('('BytesLimit (Uint64 '3000000)))
+ )
+ )
+ )))
+ )"
+ );
+ Cerr << (TInstant::Now()-start).MicroSeconds() << " usec" << Endl;
+ }
+ }
+
Y_UNIT_TEST(SelectRowWithTargetParameter) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
@@ -377,145 +377,145 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
}
Y_UNIT_TEST(ModifyMultipleRowsCrossShardAllToAll) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- if (!true) {
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::HIVE, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
- }
-
- TFlatMsgBusClient annoyingClient(port);
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "test");
- annoyingClient.MkDir("/dc-1/test", "perf");
-
- annoyingClient.CreateTable("/dc-1/test/perf",
- R"(Name: "FlatDaoPerfTestClient"
- Columns { Name: "hash" Type: "Uint32"}
+ if (!true) {
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::HIVE, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
+ }
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "test");
+ annoyingClient.MkDir("/dc-1/test", "perf");
+
+ annoyingClient.CreateTable("/dc-1/test/perf",
+ R"(Name: "FlatDaoPerfTestClient"
+ Columns { Name: "hash" Type: "Uint32"}
Columns { Name: "ls" Type: "Utf8"}
- Columns { Name: "kg" Type: "Uint32"}
- Columns { Name: "localId" Type: "Uint64"}
- Columns { Name: "createdSeconds" Type: "Uint64"}
- Columns { Name: "mode1" Type: "Uint32"}
- KeyColumnNames: ["hash", "ls"]
- UniformPartitionsCount: 4
- )");
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- TString query = R"(
- (
- (let list_merges_updates
- (MapParameter
- (Parameter 'p
- (ListType
- (StructType
- '('hash (DataType 'Uint32))
+ Columns { Name: "kg" Type: "Uint32"}
+ Columns { Name: "localId" Type: "Uint64"}
+ Columns { Name: "createdSeconds" Type: "Uint64"}
+ Columns { Name: "mode1" Type: "Uint32"}
+ KeyColumnNames: ["hash", "ls"]
+ UniformPartitionsCount: 4
+ )");
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ TString query = R"(
+ (
+ (let list_merges_updates
+ (MapParameter
+ (Parameter 'p
+ (ListType
+ (StructType
+ '('hash (DataType 'Uint32))
'('ls (DataType 'Utf8))
- '('kg (DataType 'Uint32))
- '('localId (DataType 'Uint64))
- '('createdSeconds (DataType 'Uint64))
- '('mode1 (DataType 'Uint32))
- )
- )
- )
- (lambda '(item) (block '(
- (let merged_hash (Just (Member item 'hash)))
- (let merged_ls (Just (Member item 'ls)))
- (let row (SelectRow '"/dc-1/test/perf/FlatDaoPerfTestClient" '('('hash (Member item 'hash)) '('ls (Member item 'ls))) '('kg 'localId 'createdSeconds 'mode1)))
- (let merged_kg (IfPresent row (lambda '(x) (block '((return (Member x 'kg))))) (Just (Member item 'kg))))
- (let merged_localId (IfPresent row (lambda '(x) (block '((return (Member x 'localId))))) (Just (Member item 'localId))))
- (let merged_createdSeconds (IfPresent row (lambda '(x) (block '((return (Member x 'createdSeconds))))) (Just (Member item 'createdSeconds))))
- (let merged_mode1 (Just (Member item 'mode1)))
- (return '(
- (AsStruct
- '('hash merged_hash)
- '('ls merged_ls)
- '('kg merged_kg)
- '('localId merged_localId)
- '('createdSeconds merged_createdSeconds)
- '('mode1 merged_mode1)
- )
- (UpdateRow
- '"/dc-1/test/perf/FlatDaoPerfTestClient"
- '(
- '('hash (Member item 'hash))
- '('ls (Member item 'ls))
- )
- '(
- '('kg merged_kg)
- '('localId merged_localId)
- '('createdSeconds merged_createdSeconds)
- '('mode1 merged_mode1)
- )
- )
- ))
- )))
- )
- )
- (return
- (Append
- (Map list_merges_updates
- (lambda '(item) (block '(
- (return (Nth item '1))
- )))
- )
- (SetResult 'Result (AsStruct
- '('List (Map
- list_merges_updates
- (lambda '(item) (block '(
- (return (Nth item '0))
- )))
- ))
- '('Truncated (Bool 'False)))
- )
- )
- )
- )
- )";
-
- const TString params = R"(
- (
- (let params (Parameters))
- (let params (AddParameter params 'p (AsList
- (AsStruct
- '('hash (Uint32 '0))
+ '('kg (DataType 'Uint32))
+ '('localId (DataType 'Uint64))
+ '('createdSeconds (DataType 'Uint64))
+ '('mode1 (DataType 'Uint32))
+ )
+ )
+ )
+ (lambda '(item) (block '(
+ (let merged_hash (Just (Member item 'hash)))
+ (let merged_ls (Just (Member item 'ls)))
+ (let row (SelectRow '"/dc-1/test/perf/FlatDaoPerfTestClient" '('('hash (Member item 'hash)) '('ls (Member item 'ls))) '('kg 'localId 'createdSeconds 'mode1)))
+ (let merged_kg (IfPresent row (lambda '(x) (block '((return (Member x 'kg))))) (Just (Member item 'kg))))
+ (let merged_localId (IfPresent row (lambda '(x) (block '((return (Member x 'localId))))) (Just (Member item 'localId))))
+ (let merged_createdSeconds (IfPresent row (lambda '(x) (block '((return (Member x 'createdSeconds))))) (Just (Member item 'createdSeconds))))
+ (let merged_mode1 (Just (Member item 'mode1)))
+ (return '(
+ (AsStruct
+ '('hash merged_hash)
+ '('ls merged_ls)
+ '('kg merged_kg)
+ '('localId merged_localId)
+ '('createdSeconds merged_createdSeconds)
+ '('mode1 merged_mode1)
+ )
+ (UpdateRow
+ '"/dc-1/test/perf/FlatDaoPerfTestClient"
+ '(
+ '('hash (Member item 'hash))
+ '('ls (Member item 'ls))
+ )
+ '(
+ '('kg merged_kg)
+ '('localId merged_localId)
+ '('createdSeconds merged_createdSeconds)
+ '('mode1 merged_mode1)
+ )
+ )
+ ))
+ )))
+ )
+ )
+ (return
+ (Append
+ (Map list_merges_updates
+ (lambda '(item) (block '(
+ (return (Nth item '1))
+ )))
+ )
+ (SetResult 'Result (AsStruct
+ '('List (Map
+ list_merges_updates
+ (lambda '(item) (block '(
+ (return (Nth item '0))
+ )))
+ ))
+ '('Truncated (Bool 'False)))
+ )
+ )
+ )
+ )
+ )";
+
+ const TString params = R"(
+ (
+ (let params (Parameters))
+ (let params (AddParameter params 'p (AsList
+ (AsStruct
+ '('hash (Uint32 '0))
'('ls (Utf8 'A))
- '('kg (Uint32 '10))
- '('localId (Uint64 '20))
- '('createdSeconds (Uint64 '30))
- '('mode1 (Uint32 '40))
- )
- (AsStruct
- '('hash (Uint32 '1500000000))
+ '('kg (Uint32 '10))
+ '('localId (Uint64 '20))
+ '('createdSeconds (Uint64 '30))
+ '('mode1 (Uint32 '40))
+ )
+ (AsStruct
+ '('hash (Uint32 '1500000000))
'('ls (Utf8 'B))
- '('kg (Uint32 '10))
- '('localId (Uint64 '20))
- '('createdSeconds (Uint64 '30))
- '('mode1 (Uint32 '40))
- )
- (AsStruct
- '('hash (Uint32 '3000000000))
+ '('kg (Uint32 '10))
+ '('localId (Uint64 '20))
+ '('createdSeconds (Uint64 '30))
+ '('mode1 (Uint32 '40))
+ )
+ (AsStruct
+ '('hash (Uint32 '3000000000))
'('ls (Utf8 'C))
- '('kg (Uint32 '10))
- '('localId (Uint64 '20))
- '('createdSeconds (Uint64 '30))
- '('mode1 (Uint32 '40))
- )
- )))
- (return params)
- )
- )";
-
- NKikimrMiniKQL::TResult result;
- annoyingClient.FlatQueryParams(query, params, false, result);
- }
-
+ '('kg (Uint32 '10))
+ '('localId (Uint64 '20))
+ '('createdSeconds (Uint64 '30))
+ '('mode1 (Uint32 '40))
+ )
+ )))
+ (return params)
+ )
+ )";
+
+ NKikimrMiniKQL::TResult result;
+ annoyingClient.FlatQueryParams(query, params, false, result);
+ }
+
Y_UNIT_TEST(CrossRW) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
@@ -1014,290 +1014,290 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
}
Y_UNIT_TEST(MiniKQLRanges) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- if (!true) {
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::HIVE, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
- }
-
- TFlatMsgBusClient annoyingClient(port);
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "spuchin");
-
- annoyingClient.CreateTable("/dc-1/spuchin",
- R"___(
- Name: "TestTable2"
- Columns { Name: "Group" Type: "Uint32" }
+ if (!true) {
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::HIVE, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
+ }
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "spuchin");
+
+ annoyingClient.CreateTable("/dc-1/spuchin",
+ R"___(
+ Name: "TestTable2"
+ Columns { Name: "Group" Type: "Uint32" }
Columns { Name: "Name" Type: "String" }
- Columns { Name: "Amount" Type: "Uint64" }
+ Columns { Name: "Amount" Type: "Uint64" }
Columns { Name: "Comment" Type: "String"}
- KeyColumnNames: "Group"
- KeyColumnNames: "Name"
- )___"
- );
-
- // SelectRange, where end < begin
- annoyingClient.FlatQuery(
- R"___(
- (
- (let $1 (VoidType))
- (let $2 (ListType $1))
- (let $3 (List $2))
- (let $4 (Uint32 '"1002"))
- (let $5 '('"Group" $4 $4))
+ KeyColumnNames: "Group"
+ KeyColumnNames: "Name"
+ )___"
+ );
+
+ // SelectRange, where end < begin
+ annoyingClient.FlatQuery(
+ R"___(
+ (
+ (let $1 (VoidType))
+ (let $2 (ListType $1))
+ (let $3 (List $2))
+ (let $4 (Uint32 '"1002"))
+ (let $5 '('"Group" $4 $4))
(let $6 (String '"Name2"))
(let $7 (String '"Name1"))
- (let $8 '('"Name" $6 $7))
- (let $9 '('"ExcFrom" '"IncTo" $5 $8))
- (let $10 '('"Group" '"Name" '"Amount"))
- (let $11 '())
- (let $12 (SelectRange '"/dc-1/spuchin/TestTable2" $9 $10 $11))
- (let $13 (Member $12 '"List"))
- (let $14 (lambda '($19) (block '(
- (let $21 (Member $19 '"Amount"))
- (let $22 '('"Amount" $21))
- (let $23 (AsStruct $22))
- (return $23)
- ))))
- (let $15 (Map $13 $14))
- (let $16 '($15))
- (let $17 (SetResult '"Result" $16))
- (let $18 (Append $3 $17))
- (return $18)
- )
- )___"
- );
- }
-
+ (let $8 '('"Name" $6 $7))
+ (let $9 '('"ExcFrom" '"IncTo" $5 $8))
+ (let $10 '('"Group" '"Name" '"Amount"))
+ (let $11 '())
+ (let $12 (SelectRange '"/dc-1/spuchin/TestTable2" $9 $10 $11))
+ (let $13 (Member $12 '"List"))
+ (let $14 (lambda '($19) (block '(
+ (let $21 (Member $19 '"Amount"))
+ (let $22 '('"Amount" $21))
+ (let $23 (AsStruct $22))
+ (return $23)
+ ))))
+ (let $15 (Map $13 $14))
+ (let $16 '($15))
+ (let $17 (SetResult '"Result" $16))
+ (let $18 (Append $3 $17))
+ (return $18)
+ )
+ )___"
+ );
+ }
+
void TestLsSuccess(TFlatMsgBusClient& annoyingClient, const TString& name, const TVector<TString>& children) {
TString selfName = name;
- if (selfName != "/") {
+ if (selfName != "/") {
selfName= name.substr(name.find_last_of('/')+1);
- }
+ }
TAutoPtr<NMsgBusProxy::TBusResponse> res = annoyingClient.Ls(name);
- UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetPathDescription().GetSelf().GetName(), selfName, "Self name doesn't match");
-
- // Compare expected and actual children count
- UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetPathDescription().ChildrenSize(), children.size(),
- "Unexpected number of children for " + name);
-
+ UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetPathDescription().GetSelf().GetName(), selfName, "Self name doesn't match");
+
+ // Compare expected and actual children count
+ UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetPathDescription().ChildrenSize(), children.size(),
+ "Unexpected number of children for " + name);
+
THashSet<TString> actualChildren;
TString prevName;
- for (size_t i = 0; i < res->Record.GetPathDescription().ChildrenSize(); ++i) {
+ for (size_t i = 0; i < res->Record.GetPathDescription().ChildrenSize(); ++i) {
TString name = res->Record.GetPathDescription().GetChildren(i).GetName();
- bool res = actualChildren.insert(name).second;
- UNIT_ASSERT_C(res, "Repeating child: " + name);
- UNIT_ASSERT_C(prevName < name, "Children are not sorted: " + prevName + ", " + name);
- prevName = name;
- }
- // compare expected and actual children lists
- for (const auto& cname : children) {
- bool res = actualChildren.count(cname);
- UNIT_ASSERT_C(res, "Child not found: " + cname);
- }
- }
-
+ bool res = actualChildren.insert(name).second;
+ UNIT_ASSERT_C(res, "Repeating child: " + name);
+ UNIT_ASSERT_C(prevName < name, "Children are not sorted: " + prevName + ", " + name);
+ prevName = name;
+ }
+ // compare expected and actual children lists
+ for (const auto& cname : children) {
+ bool res = actualChildren.count(cname);
+ UNIT_ASSERT_C(res, "Child not found: " + cname);
+ }
+ }
+
void TestLsUknownPath(TFlatMsgBusClient& annoyingClient, const TString& name) {
TAutoPtr<NMsgBusProxy::TBusResponse> res = annoyingClient.Ls(name);
- UNIT_ASSERT_VALUES_EQUAL_C(res->Record.HasPathDescription(), false,
- "Unxepected description for " + name);
- UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR,
- "Unexpected status for " + name);
- }
-
+ UNIT_ASSERT_VALUES_EQUAL_C(res->Record.HasPathDescription(), false,
+ "Unxepected description for " + name);
+ UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR,
+ "Unexpected status for " + name);
+ }
+
Y_UNIT_TEST(Ls) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port).SetEnableSystemViews(false));
-
+
TFlatMsgBusClient annoyingClient(port);
annoyingClient.InitRoot();
-
- // Listing all domains always works
- TestLsSuccess(annoyingClient, "/", {"dc-1"});
- TestLsUknownPath(annoyingClient, "");
- TestLsUknownPath(annoyingClient, "//");
-
- TestLsSuccess(annoyingClient, "/", {"dc-1"});
- TestLsSuccess(annoyingClient, "/dc-1", {});
- TestLsUknownPath(annoyingClient, "/dc-11");
- TestLsUknownPath(annoyingClient, "/dc-2");
-
- annoyingClient.MkDir("/dc-1", "Berkanavt");
- TestLsSuccess(annoyingClient, "/", {"dc-1"});
- TestLsSuccess(annoyingClient, "/dc-1", {"Berkanavt"});
- TestLsSuccess(annoyingClient, "/dc-1/Berkanavt", {});
- annoyingClient.MkDir("/dc-1", "Berkanavt");
- TestLsSuccess(annoyingClient, "/dc-1", {"Berkanavt"});
-
- TestLsUknownPath(annoyingClient, "/dc-1/arcadia");
- annoyingClient.MkDir("/dc-1", "arcadia");
- TestLsSuccess(annoyingClient, "/dc-1", {"Berkanavt", "arcadia"});
- TestLsSuccess(annoyingClient, "/dc-1/arcadia", {});
- }
-
- void SetSchemeshardReadOnly(TServer& cleverServer, TFlatMsgBusClient& annoyingClient, bool isReadOnly) {
+
+ // Listing all domains always works
+ TestLsSuccess(annoyingClient, "/", {"dc-1"});
+ TestLsUknownPath(annoyingClient, "");
+ TestLsUknownPath(annoyingClient, "//");
+
+ TestLsSuccess(annoyingClient, "/", {"dc-1"});
+ TestLsSuccess(annoyingClient, "/dc-1", {});
+ TestLsUknownPath(annoyingClient, "/dc-11");
+ TestLsUknownPath(annoyingClient, "/dc-2");
+
+ annoyingClient.MkDir("/dc-1", "Berkanavt");
+ TestLsSuccess(annoyingClient, "/", {"dc-1"});
+ TestLsSuccess(annoyingClient, "/dc-1", {"Berkanavt"});
+ TestLsSuccess(annoyingClient, "/dc-1/Berkanavt", {});
+ annoyingClient.MkDir("/dc-1", "Berkanavt");
+ TestLsSuccess(annoyingClient, "/dc-1", {"Berkanavt"});
+
+ TestLsUknownPath(annoyingClient, "/dc-1/arcadia");
+ annoyingClient.MkDir("/dc-1", "arcadia");
+ TestLsSuccess(annoyingClient, "/dc-1", {"Berkanavt", "arcadia"});
+ TestLsSuccess(annoyingClient, "/dc-1/arcadia", {});
+ }
+
+ void SetSchemeshardReadOnly(TServer& cleverServer, TFlatMsgBusClient& annoyingClient, bool isReadOnly) {
ui64 schemeShardTabletId = Tests::ChangeStateStorage(Tests::SchemeRoot, Tests::TestDomain);
-
- NKikimrMiniKQL::TResult result;
- bool ok = annoyingClient.LocalQuery(schemeShardTabletId, Sprintf(R"(
- (
- (let key '('('Id (Uint64 '3)))) # SysParam_IsReadOnlyMode
+
+ NKikimrMiniKQL::TResult result;
+ bool ok = annoyingClient.LocalQuery(schemeShardTabletId, Sprintf(R"(
+ (
+ (let key '('('Id (Uint64 '3)))) # SysParam_IsReadOnlyMode
(let value '('('Value (Utf8 '"%s"))))
- (let ret (AsList (UpdateRow 'SysParams key value)))
- (return ret)
- ))", (isReadOnly ? "1" : "0")), result);
- // Cerr << result << "\n";
- UNIT_ASSERT(ok);
- annoyingClient.KillTablet(cleverServer, schemeShardTabletId);
-
- // Wait for schemeshard to restart
- TInstant waitStart = TInstant::Now();
- while (true) {
- Cerr << "Waiting for schemeshard to restart...\n";
- TAutoPtr<NMsgBusProxy::TBusResponse> res = annoyingClient.Ls("/dc-1");
- if (res->Record.GetStatus() == NMsgBusProxy::MSTATUS_OK)
- break;
- Sleep(TDuration::MilliSeconds(20));
- UNIT_ASSERT_C((TInstant::Now()-waitStart).MilliSeconds() < 5000, "Schemeshard cannot start for too long");
- }
- }
-
+ (let ret (AsList (UpdateRow 'SysParams key value)))
+ (return ret)
+ ))", (isReadOnly ? "1" : "0")), result);
+ // Cerr << result << "\n";
+ UNIT_ASSERT(ok);
+ annoyingClient.KillTablet(cleverServer, schemeShardTabletId);
+
+ // Wait for schemeshard to restart
+ TInstant waitStart = TInstant::Now();
+ while (true) {
+ Cerr << "Waiting for schemeshard to restart...\n";
+ TAutoPtr<NMsgBusProxy::TBusResponse> res = annoyingClient.Ls("/dc-1");
+ if (res->Record.GetStatus() == NMsgBusProxy::MSTATUS_OK)
+ break;
+ Sleep(TDuration::MilliSeconds(20));
+ UNIT_ASSERT_C((TInstant::Now()-waitStart).MilliSeconds() < 5000, "Schemeshard cannot start for too long");
+ }
+ }
+
Y_UNIT_TEST(ReadOnlyMode) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port).SetEnableMockOnSingleNode(false));
-
- TFlatMsgBusClient annoyingClient(port);
-
- annoyingClient.InitRoot();
- NMsgBusProxy::EResponseStatus status;
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
-
- status = annoyingClient.MkDir("/dc-1", "Dir1");
- UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
-
- //
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_SERVER, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_DEBUG);
-
- SetSchemeshardReadOnly(cleverServer, annoyingClient, true);
-
- status = annoyingClient.MkDir("/dc-1", "Dir222");
- UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_ERROR);
-
- SetSchemeshardReadOnly(cleverServer, annoyingClient, false);
-
- status = annoyingClient.MkDir("/dc-1", "Dir222");
- UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
- status = annoyingClient.MkDir("/dc-1", "Dir3333");
- UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
- }
-
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ annoyingClient.InitRoot();
+ NMsgBusProxy::EResponseStatus status;
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+
+ status = annoyingClient.MkDir("/dc-1", "Dir1");
+ UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
+
+ //
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_SERVER, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_DEBUG);
+
+ SetSchemeshardReadOnly(cleverServer, annoyingClient, true);
+
+ status = annoyingClient.MkDir("/dc-1", "Dir222");
+ UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_ERROR);
+
+ SetSchemeshardReadOnly(cleverServer, annoyingClient, false);
+
+ status = annoyingClient.MkDir("/dc-1", "Dir222");
+ UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
+ status = annoyingClient.MkDir("/dc-1", "Dir3333");
+ UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::MSTATUS_OK);
+ }
+
Y_UNIT_TEST(PathSorting) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port).SetEnableSystemViews(false));
-
- TFlatMsgBusClient annoyingClient(port);
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir1");
- annoyingClient.MkDir("/dc-1", "Dir3");
- annoyingClient.MkDir("/dc-1", "B");
- annoyingClient.MkDir("/dc-1", "Dir4");
- annoyingClient.MkDir("/dc-1", "Dir2");
- annoyingClient.MkDir("/dc-1", "A");
- TestLsSuccess(annoyingClient, "/dc-1", {"A", "B", "Dir1", "Dir2", "Dir3", "Dir4"});
- }
-
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir1");
+ annoyingClient.MkDir("/dc-1", "Dir3");
+ annoyingClient.MkDir("/dc-1", "B");
+ annoyingClient.MkDir("/dc-1", "Dir4");
+ annoyingClient.MkDir("/dc-1", "Dir2");
+ annoyingClient.MkDir("/dc-1", "A");
+ TestLsSuccess(annoyingClient, "/dc-1", {"A", "B", "Dir1", "Dir2", "Dir3", "Dir4"});
+ }
+
void TestLsPathIdSuccess(TFlatMsgBusClient& annoyingClient, ui64 schemeshardId, ui64 pathId, const TString& selfName, const TVector<TString>& children) {
TAutoPtr<NMsgBusProxy::TBusResponse> res = annoyingClient.LsPathId(schemeshardId, pathId);
- UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetPathDescription().GetSelf().GetName(), selfName, "Self name doesn't match");
-
- // Compare expected and actual children count
- UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetPathDescription().ChildrenSize(), children.size(),
- "Unexpected number of children for " + selfName);
-
+ UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetPathDescription().GetSelf().GetName(), selfName, "Self name doesn't match");
+
+ // Compare expected and actual children count
+ UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetPathDescription().ChildrenSize(), children.size(),
+ "Unexpected number of children for " + selfName);
+
THashSet<TString> actualChildren;
- for (size_t i = 0; i < res->Record.GetPathDescription().ChildrenSize(); ++i) {
+ for (size_t i = 0; i < res->Record.GetPathDescription().ChildrenSize(); ++i) {
TString n = res->Record.GetPathDescription().GetChildren(i).GetName();
- bool res = actualChildren.insert(n).second;
- UNIT_ASSERT_C(res, "Repeating child: " + n);
- }
- // compare expected and actual children lists
- for (const auto& cname : children) {
- bool res = actualChildren.count(cname);
- UNIT_ASSERT_C(res, "Child not found: " + cname);
- }
- }
-
- void TestLsUknownPathId(TFlatMsgBusClient& annoyingClient, ui64 schemeshardId, ui64 pathId) {
+ bool res = actualChildren.insert(n).second;
+ UNIT_ASSERT_C(res, "Repeating child: " + n);
+ }
+ // compare expected and actual children lists
+ for (const auto& cname : children) {
+ bool res = actualChildren.count(cname);
+ UNIT_ASSERT_C(res, "Child not found: " + cname);
+ }
+ }
+
+ void TestLsUknownPathId(TFlatMsgBusClient& annoyingClient, ui64 schemeshardId, ui64 pathId) {
TAutoPtr<NMsgBusProxy::TBusResponse> res = annoyingClient.LsPathId(schemeshardId, pathId);
- UNIT_ASSERT_VALUES_EQUAL_C(res->Record.HasPathDescription(), false,
- "Unxepected description for pathId " + ToString(pathId));
- UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR,
- "Unexpected status for pathId " + ToString(pathId));
- }
-
+ UNIT_ASSERT_VALUES_EQUAL_C(res->Record.HasPathDescription(), false,
+ "Unxepected description for pathId " + ToString(pathId));
+ UNIT_ASSERT_VALUES_EQUAL_C(res->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR,
+ "Unexpected status for pathId " + ToString(pathId));
+ }
+
Y_UNIT_TEST(LsPathId) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port).SetEnableSystemViews(false));
-
+
TFlatMsgBusClient annoyingClient(port);
-
+
TAutoPtr<NMsgBusProxy::TBusResponse> res = annoyingClient.Ls("/");
- ui64 schemeshardId = res->Record.GetPathDescription().GetChildren(0).GetSchemeshardId();
-
- annoyingClient.InitRoot();
- TestLsPathIdSuccess(annoyingClient, schemeshardId, 1, "dc-1", {});
- TestLsUknownPathId(annoyingClient, schemeshardId, 2);
- annoyingClient.MkDir("/dc-1", "Berkanavt");
- TestLsPathIdSuccess(annoyingClient, schemeshardId, 1, "dc-1", {"Berkanavt"});
- TestLsPathIdSuccess(annoyingClient, schemeshardId, 2, "Berkanavt", {});
- TestLsUknownPathId(annoyingClient, schemeshardId, 3);
- annoyingClient.MkDir("/dc-1", "arcadia");
- TestLsPathIdSuccess(annoyingClient, schemeshardId, 1, "dc-1", {"Berkanavt", "arcadia"});
- TestLsPathIdSuccess(annoyingClient, schemeshardId, 3, "arcadia", {});
- }
-
+ ui64 schemeshardId = res->Record.GetPathDescription().GetChildren(0).GetSchemeshardId();
+
+ annoyingClient.InitRoot();
+ TestLsPathIdSuccess(annoyingClient, schemeshardId, 1, "dc-1", {});
+ TestLsUknownPathId(annoyingClient, schemeshardId, 2);
+ annoyingClient.MkDir("/dc-1", "Berkanavt");
+ TestLsPathIdSuccess(annoyingClient, schemeshardId, 1, "dc-1", {"Berkanavt"});
+ TestLsPathIdSuccess(annoyingClient, schemeshardId, 2, "Berkanavt", {});
+ TestLsUknownPathId(annoyingClient, schemeshardId, 3);
+ annoyingClient.MkDir("/dc-1", "arcadia");
+ TestLsPathIdSuccess(annoyingClient, schemeshardId, 1, "dc-1", {"Berkanavt", "arcadia"});
+ TestLsPathIdSuccess(annoyingClient, schemeshardId, 3, "arcadia", {});
+ }
+
ui32 TestInitRoot(TFlatMsgBusClient& annoyingClient, const TString& name) {
TAutoPtr<NBus::TBusMessage> reply = annoyingClient.InitRootSchemeWithReply(name);
- TAutoPtr<NMsgBusProxy::TBusResponse> res = dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Release());
- UNIT_ASSERT(res);
- return res->Record.GetStatus();
- }
-
+ TAutoPtr<NMsgBusProxy::TBusResponse> res = dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Release());
+ UNIT_ASSERT(res);
+ return res->Record.GetStatus();
+ }
+
Y_UNIT_TEST(InitRoot) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- if (true) {
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- }
-
+ if (true) {
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ }
+
TFlatMsgBusClient annoyingClient(port);
-
+
UNIT_ASSERT_VALUES_EQUAL(TestInitRoot(annoyingClient, "dc-1"), NMsgBusProxy::MSTATUS_OK);
- // Reinitializing same root is Ok
+ // Reinitializing same root is Ok
UNIT_ASSERT_VALUES_EQUAL(TestInitRoot(annoyingClient, "dc-1"), NMsgBusProxy::MSTATUS_OK);
-
- // Unknown roots
+
+ // Unknown roots
UNIT_ASSERT_VALUES_EQUAL(TestInitRoot(annoyingClient, ""), NMsgBusProxy::MSTATUS_ERROR);
UNIT_ASSERT_VALUES_EQUAL(TestInitRoot(annoyingClient, "dc-11"), NMsgBusProxy::MSTATUS_ERROR);
UNIT_ASSERT_VALUES_EQUAL(TestInitRoot(annoyingClient, "dc-2"), NMsgBusProxy::MSTATUS_ERROR);
- }
+ }
Y_UNIT_TEST(CheckACL) {
TPortManager pm;
@@ -1351,7 +1351,7 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
UNIT_ASSERT_VALUES_EQUAL(response->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK);
response = annoyingClient.Ls("/dc-100");
UNIT_ASSERT_VALUES_EQUAL(response->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR);
- response = annoyingClient.Ls("/dc-1/Argonaut");
+ response = annoyingClient.Ls("/dc-1/Argonaut");
UNIT_ASSERT_VALUES_EQUAL(response->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR);
response = annoyingClient.Ls("/dc-1/Berkanavt/tabls");
UNIT_ASSERT_VALUES_EQUAL(response->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR);
@@ -1367,7 +1367,7 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
}
TTableId tabletId(ChangeStateStorage(Tests::SchemeRoot, TestDomain), studentsTableId);
- annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN); // there is should be something like "234ba4f44ef7c"
+ annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN); // there is should be something like "234ba4f44ef7c"
annoyingClient.FlatQuery("((return (AsList (SetResult 'res1 (Int32 '2016)))))");
const char * updateProgram = R"((
@@ -1378,356 +1378,356 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
// Update
annoyingClient.FlatQuery(updateProgram,
NMsgBusProxy::MSTATUS_ERROR,
- TEvTxUserProxy::TResultStatus::AccessDenied); // as argonaut@
+ TEvTxUserProxy::TResultStatus::AccessDenied); // as argonaut@
annoyingClient.SetSecurityToken("berkanavt@" BUILTIN_ACL_DOMAIN); // there is should be something like "234ba4f44ef7c"
annoyingClient.FlatQuery(updateProgram); // as berkanavt@
NACLib::TDiffACL acl;
- acl.AddAccess(NACLib::EAccessType::Allow, NACLib::GenericWrite, "argonaut@" BUILTIN_ACL_DOMAIN);
+ acl.AddAccess(NACLib::EAccessType::Allow, NACLib::GenericWrite, "argonaut@" BUILTIN_ACL_DOMAIN);
- annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
- annoyingClient.ModifyACL("/", "dc-1", acl.SerializeAsString()); // as argonaut@
+ annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
+ annoyingClient.ModifyACL("/", "dc-1", acl.SerializeAsString()); // as argonaut@
annoyingClient.ResetSchemeCache(cleverServer, tabletId);
annoyingClient.FlatQuery(updateProgram,
NMsgBusProxy::MSTATUS_ERROR,
- TEvTxUserProxy::TResultStatus::AccessDenied); // as argonaut@
+ TEvTxUserProxy::TResultStatus::AccessDenied); // as argonaut@
annoyingClient.SetSecurityToken("berkanavt@" BUILTIN_ACL_DOMAIN);
annoyingClient.ModifyACL("/", "dc-1", acl.SerializeAsString()); // as berkanavt@
annoyingClient.ResetSchemeCache(cleverServer, tabletId);
- annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
- annoyingClient.FlatQuery(updateProgram); // as argonaut@
+ annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
+ annoyingClient.FlatQuery(updateProgram); // as argonaut@
// the same but without first '/'
annoyingClient.ModifyACL("", "dc-1", acl.SerializeAsString()); // as berkanavt@
annoyingClient.ResetSchemeCache(cleverServer, tabletId);
- annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
- annoyingClient.FlatQuery(updateProgram); // as argonaut@
+ annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
+ annoyingClient.FlatQuery(updateProgram); // as argonaut@
acl.ClearAccess();
- acl.AddAccess(NACLib::EAccessType::Allow, NACLib::GenericRead, "argonaut@" BUILTIN_ACL_DOMAIN);
+ acl.AddAccess(NACLib::EAccessType::Allow, NACLib::GenericRead, "argonaut@" BUILTIN_ACL_DOMAIN);
- annoyingClient.ModifyACL("/dc-1", "Berkanavt", acl.SerializeAsString()); // as argonaut@
+ annoyingClient.ModifyACL("/dc-1", "Berkanavt", acl.SerializeAsString()); // as argonaut@
annoyingClient.ResetSchemeCache(cleverServer, tabletId);
- annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
- annoyingClient.FlatQuery(updateProgram); // as argonaut@
+ annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
+ annoyingClient.FlatQuery(updateProgram); // as argonaut@
#if 0
annoyingClient.SetSecurityToken("berkanavt@" BUILTIN_ACL_DOMAIN);
annoyingClient.ModifyACL("/dc-1", "Berkanavt", acl.SerializeAsString()); // as berkanavt@
annoyingClient.ResetSchemeCache(cleverServer, tabletId);
- annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
+ annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
annoyingClient.FlatQuery(updateProgram,
NMsgBusProxy::MSTATUS_ERROR,
- TEvTxUserProxy::TResultStatus::AccessDenied); // as argonaut@
+ TEvTxUserProxy::TResultStatus::AccessDenied); // as argonaut@
annoyingClient.SetSecurityToken("berkanavt@" BUILTIN_ACL_DOMAIN); // as berkanavt@
NACLib::TDiffACL newAcl;
newAcl.ClearAccess();
- newAcl.AddAccess(NACLib::EAccessType::Allow, NACLib::GenericWrite, "argonaut@" BUILTIN_ACL_DOMAIN);
+ newAcl.AddAccess(NACLib::EAccessType::Allow, NACLib::GenericWrite, "argonaut@" BUILTIN_ACL_DOMAIN);
annoyingClient.ModifyACL("/dc-1", "Berkanavt", newAcl.SerializeAsString()); // as berkanavt@
annoyingClient.ResetSchemeCache(cleverServer, tabletId);
- annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
- annoyingClient.FlatQuery(updateProgram); // as argonaut@
+ annoyingClient.SetSecurityToken("argonaut@" BUILTIN_ACL_DOMAIN);
+ annoyingClient.FlatQuery(updateProgram); // as argonaut@
#endif
}
-
+
Y_UNIT_TEST(OutOfDiskSpace) {
return; // TODO https://st.yandex-team.ru/KIKIMR-2279
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
NFake::TStorage diskParams;
- diskParams.DiskSize = 500ull*1024*1024;
- diskParams.SectorSize = 512;
- diskParams.ChunkSize = 50ull*1024*1024;
+ diskParams.DiskSize = 500ull*1024*1024;
+ diskParams.SectorSize = 512;
+ diskParams.ChunkSize = 50ull*1024*1024;
NKikimrConfig::TImmediateControlsConfig controls;
controls.MutableTxLimitControls()->SetPerRequestDataSizeLimit(1000000000);
controls.MutableTxLimitControls()->SetPerShardIncomingReadSetSizeLimit(1000000000);
controls.MutableTxLimitControls()->SetPerShardReadSizeLimit(1000000000);
-
+
TServer cleverServer = TServer(TServerSettings(port)
.SetControls(controls)
.SetCustomDiskParams(diskParams)
.SetEnableMockOnSingleNode(false));
- TFlatMsgBusClient annoyingClient(port);
-
- const char * table = "Name: \"Table\""
- "Columns { Name: \"Key\" Type: \"Uint32\"}"
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char * table = "Name: \"Table\""
+ "Columns { Name: \"Key\" Type: \"Uint32\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"Key\"]"
- "UniformPartitionsCount: 2";
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir", table);
-
+ "KeyColumnNames: [\"Key\"]"
+ "UniformPartitionsCount: 2";
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir", table);
+
TString insertRowQuery = "("
- "(let key '('('Key (Uint32 '%u))))"
+ "(let key '('('Key (Uint32 '%u))))"
"(let value '('('Value (Utf8 '%s))))"
- "(let ret_ (AsList"
- " (UpdateRow '/dc-1/Dir/Table key value)"
- "))"
- "(return ret_)"
- ")";
-
- TClient::TFlatQueryOptions opts;
+ "(let ret_ (AsList"
+ " (UpdateRow '/dc-1/Dir/Table key value)"
+ "))"
+ "(return ret_)"
+ ")";
+
+ TClient::TFlatQueryOptions opts;
NKikimrClient::TResponse response;
-
- int errorCount = 0;
- for (ui32 i = 0; i < 20; ++i) {
- Cout << "row " << i << Endl;
+
+ int errorCount = 0;
+ for (ui32 i = 0; i < 20; ++i) {
+ Cout << "row " << i << Endl;
ui32 status = annoyingClient.FlatQueryRaw(Sprintf(insertRowQuery.data(), i, TString(6000000, 'A').data()), opts, response);
UNIT_ASSERT(status == NMsgBusProxy::MSTATUS_OK || status == NMsgBusProxy::MSTATUS_REJECTED);
- if (status == NMsgBusProxy::MSTATUS_REJECTED) {
- ++errorCount;
- }
- }
- UNIT_ASSERT_C(errorCount > 0, "Out of disk space error must have happened");
-
+ if (status == NMsgBusProxy::MSTATUS_REJECTED) {
+ ++errorCount;
+ }
+ }
+ UNIT_ASSERT_C(errorCount > 0, "Out of disk space error must have happened");
+
TString readQuery =
- "("
- "(let range '('ExcFrom '('Key (Uint32 '0) (%s))))"
- "(let select '('Key))"
- "(let options '())"
- "(let pgmReturn (AsList"
- " (SetResult 'myRes (SelectRange '/dc-1/Dir/Table range select options %s))"
- "))"
- "(return pgmReturn)"
- ")";
-
- ui32 status = 0;
+ "("
+ "(let range '('ExcFrom '('Key (Uint32 '0) (%s))))"
+ "(let select '('Key))"
+ "(let options '())"
+ "(let pgmReturn (AsList"
+ " (SetResult 'myRes (SelectRange '/dc-1/Dir/Table range select options %s))"
+ "))"
+ "(return pgmReturn)"
+ ")";
+
+ ui32 status = 0;
status = annoyingClient.FlatQueryRaw(Sprintf(readQuery.data(), "Uint32 '10", "'head"), opts, response);
- UNIT_ASSERT_VALUES_EQUAL_C(status, NMsgBusProxy::MSTATUS_OK, "Single-shard read query should not fail");
-
+ UNIT_ASSERT_VALUES_EQUAL_C(status, NMsgBusProxy::MSTATUS_OK, "Single-shard read query should not fail");
+
status = annoyingClient.FlatQueryRaw(Sprintf(readQuery.data(), "Uint32 '10", ""), opts, response);
- UNIT_ASSERT_VALUES_EQUAL_C(status, NMsgBusProxy::MSTATUS_OK, "Single-shard read query should not fail");
-
+ UNIT_ASSERT_VALUES_EQUAL_C(status, NMsgBusProxy::MSTATUS_OK, "Single-shard read query should not fail");
+
status = annoyingClient.FlatQueryRaw(Sprintf(readQuery.data(), "Uint32 '3000000000", ""), opts, response);
- UNIT_ASSERT_VALUES_EQUAL_C(status, NMsgBusProxy::MSTATUS_REJECTED, "Multi-shard read query should fail");
- }
-
+ UNIT_ASSERT_VALUES_EQUAL_C(status, NMsgBusProxy::MSTATUS_REJECTED, "Multi-shard read query should fail");
+ }
+
void TestRejectByPerShardReadSize(const NKikimrConfig::TImmediateControlsConfig& controls,
TString tableConfig) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port)
.SetControls(controls));
-
- TFlatMsgBusClient annoyingClient(port);
-
+
+ TFlatMsgBusClient annoyingClient(port);
+
TString table =
- " Name: \"Table\""
- " Columns { Name: \"Key\" Type: \"Uint32\"}"
+ " Name: \"Table\""
+ " Columns { Name: \"Key\" Type: \"Uint32\"}"
" Columns { Name: \"Value\" Type: \"Utf8\"}"
- " KeyColumnNames: [\"Key\"]"
- " UniformPartitionsCount: 2 "
- + tableConfig;
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir", table);
-
+ " KeyColumnNames: [\"Key\"]"
+ " UniformPartitionsCount: 2 "
+ + tableConfig;
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir", table);
+
TString insertRowQuery = "("
- "(let key '('('Key (Uint32 '%u))))"
+ "(let key '('('Key (Uint32 '%u))))"
"(let value '('('Value (Utf8 '%s))))"
- "(let ret_ (AsList"
- " (UpdateRow '/dc-1/Dir/Table key value)"
- "))"
- "(return ret_)"
- ")";
-
- for (ui32 i = 0; i < 100; ++i) {
+ "(let ret_ (AsList"
+ " (UpdateRow '/dc-1/Dir/Table key value)"
+ "))"
+ "(return ret_)"
+ ")";
+
+ for (ui32 i = 0; i < 100; ++i) {
annoyingClient.FlatQuery(Sprintf(insertRowQuery.data(), i, TString(1000000, 'A').data()));
- }
-
- ui32 status = 0;
- TClient::TFlatQueryOptions opts;
+ }
+
+ ui32 status = 0;
+ TClient::TFlatQueryOptions opts;
NKikimrClient::TResponse response;
- status = annoyingClient.FlatQueryRaw(Sprintf(R"(
- (
- (let range '('ExcFrom 'ExcTo '('Key (Null) (Void))))
- (let data (Member (SelectRange '/dc-1/Dir/Table range '('Key 'Value) '()) 'List))
- (let result (Filter data (lambda '(row)
- (Coalesce (NotEqual (Member row 'Key) (Uint32 '1)) (Bool 'false))
- )))
- (return (AsList (SetResult 'Result result)))
- )
- )"
- ), opts, response);
-
+ status = annoyingClient.FlatQueryRaw(Sprintf(R"(
+ (
+ (let range '('ExcFrom 'ExcTo '('Key (Null) (Void))))
+ (let data (Member (SelectRange '/dc-1/Dir/Table range '('Key 'Value) '()) 'List))
+ (let result (Filter data (lambda '(row)
+ (Coalesce (NotEqual (Member row 'Key) (Uint32 '1)) (Bool 'false))
+ )))
+ (return (AsList (SetResult 'Result result)))
+ )
+ )"
+ ), opts, response);
+
UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, NMsgBusProxy::MSTATUS_ERROR, "Big read should fail");
- }
-
+ }
+
Y_UNIT_TEST(RejectByPerShardReadSize) {
NKikimrConfig::TImmediateControlsConfig controls;
controls.MutableTxLimitControls()->SetPerRequestDataSizeLimit(1000000000);
controls.MutableTxLimitControls()->SetPerShardIncomingReadSetSizeLimit(1000000000);
controls.MutableTxLimitControls()->SetPerShardReadSizeLimit(1000000000);
-
- // Test per-table limit
+
+ // Test per-table limit
TestRejectByPerShardReadSize(controls, " PartitionConfig { TxReadSizeLimit: 10000 } ");
-
- // Test per-domain limit
+
+ // Test per-domain limit
controls.MutableTxLimitControls()->SetPerShardReadSizeLimit(10000);
TestRejectByPerShardReadSize(controls, "");
- }
-
+ }
+
Y_UNIT_TEST(RejectByPerRequestSize) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
NKikimrConfig::TImmediateControlsConfig controls;
controls.MutableTxLimitControls()->SetPerRequestDataSizeLimit(10000);
controls.MutableTxLimitControls()->SetPerShardIncomingReadSetSizeLimit(1000000000);
controls.MutableTxLimitControls()->SetPerShardReadSizeLimit(1000000000);
-
+
TServer cleverServer = TServer(TServerSettings(port)
.SetControls(controls));
- TFlatMsgBusClient annoyingClient(port);
-
- const char * table =
- " Name: \"Table\""
- " Columns { Name: \"Key\" Type: \"Uint32\"}"
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char * table =
+ " Name: \"Table\""
+ " Columns { Name: \"Key\" Type: \"Uint32\"}"
" Columns { Name: \"Value\" Type: \"Utf8\"}"
- " KeyColumnNames: [\"Key\"]"
- " SplitBoundary { KeyPrefix { Tuple { Optional { Uint32 : 10000 } } }}"
- " PartitionConfig { TxReadSizeLimit: 100000000 } ";
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir", table);
-
+ " KeyColumnNames: [\"Key\"]"
+ " SplitBoundary { KeyPrefix { Tuple { Optional { Uint32 : 10000 } } }}"
+ " PartitionConfig { TxReadSizeLimit: 100000000 } ";
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir", table);
+
TString insertRowQuery = "("
- "(let key '('('Key (Uint32 '%u))))"
+ "(let key '('('Key (Uint32 '%u))))"
"(let value '('('Value (Utf8 '%s))))"
- "(let ret_ (AsList"
- " (UpdateRow '/dc-1/Dir/Table key value)"
- "))"
- "(return ret_)"
- ")";
-
- for (ui32 i = 5000; i < 5020; ++i) {
+ "(let ret_ (AsList"
+ " (UpdateRow '/dc-1/Dir/Table key value)"
+ "))"
+ "(return ret_)"
+ ")";
+
+ for (ui32 i = 5000; i < 5020; ++i) {
annoyingClient.FlatQuery(Sprintf(insertRowQuery.data(), i, TString(1000000, 'A').data()));
annoyingClient.FlatQuery(Sprintf(insertRowQuery.data(), (i/2)+10000, TString(1000000, 'A').data()));
- }
-
- TString readQuery = R"(
- (
- (let range1 '('ExcFrom '('Key (Uint32 '0) (Void))))
- (let range2 '('ExcFrom '('Key (Uint32 '10) (Void))))
- (let select '('Key 'Value))
- (let options '())
-
- (let filterFunc (lambda '(row)
- (Coalesce (NotEqual (Member row 'Key) (Uint32 '1)) (Bool 'false))
- ))
-
- (let list1 (Member (SelectRange '/dc-1/Dir/Table range1 select options) 'List))
- (let list2 (Member (SelectRange '/dc-1/Dir/Table range2 select options) 'List))
-
- (let pgmReturn (AsList
- (SetResult 'myRes1 (Filter list1 filterFunc) )
- (SetResult 'myRes2 (Filter list2 filterFunc) )
- ))
- (return pgmReturn)
- )
-
- )";
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- ui32 status = 0;
- TClient::TFlatQueryOptions opts;
+ }
+
+ TString readQuery = R"(
+ (
+ (let range1 '('ExcFrom '('Key (Uint32 '0) (Void))))
+ (let range2 '('ExcFrom '('Key (Uint32 '10) (Void))))
+ (let select '('Key 'Value))
+ (let options '())
+
+ (let filterFunc (lambda '(row)
+ (Coalesce (NotEqual (Member row 'Key) (Uint32 '1)) (Bool 'false))
+ ))
+
+ (let list1 (Member (SelectRange '/dc-1/Dir/Table range1 select options) 'List))
+ (let list2 (Member (SelectRange '/dc-1/Dir/Table range2 select options) 'List))
+
+ (let pgmReturn (AsList
+ (SetResult 'myRes1 (Filter list1 filterFunc) )
+ (SetResult 'myRes2 (Filter list2 filterFunc) )
+ ))
+ (return pgmReturn)
+ )
+
+ )";
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ ui32 status = 0;
+ TClient::TFlatQueryOptions opts;
NKikimrClient::TResponse response;
- status = annoyingClient.FlatQueryRaw(readQuery, opts, response);
- UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, NMsgBusProxy::MSTATUS_ERROR, "Big read should fail");
- }
-
+ status = annoyingClient.FlatQueryRaw(readQuery, opts, response);
+ UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, NMsgBusProxy::MSTATUS_ERROR, "Big read should fail");
+ }
+
Y_UNIT_TEST(RejectByIncomingReadSetSize) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
NKikimrConfig::TImmediateControlsConfig controls;
controls.MutableTxLimitControls()->SetPerRequestDataSizeLimit(100000000);
controls.MutableTxLimitControls()->SetPerShardIncomingReadSetSizeLimit(1000);
controls.MutableTxLimitControls()->SetPerShardReadSizeLimit(100000000);
-
+
TServer cleverServer = TServer(TServerSettings(port)
.SetControls(controls));
- TFlatMsgBusClient annoyingClient(port);
-
- const char * table =
- " Name: \"Table\""
- " Columns { Name: \"Key\" Type: \"Uint32\"}"
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char * table =
+ " Name: \"Table\""
+ " Columns { Name: \"Key\" Type: \"Uint32\"}"
" Columns { Name: \"Value\" Type: \"Utf8\"}"
- " KeyColumnNames: [\"Key\"]"
- " UniformPartitionsCount: 2"
- " PartitionConfig { TxReadSizeLimit: 100000000 } ";
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir", table);
-
+ " KeyColumnNames: [\"Key\"]"
+ " UniformPartitionsCount: 2"
+ " PartitionConfig { TxReadSizeLimit: 100000000 } ";
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir", table);
+
TString insertRowQuery = "("
- "(let key '('('Key (Uint32 '%u))))"
+ "(let key '('('Key (Uint32 '%u))))"
"(let value '('('Value (Utf8 '%s))))"
- "(let ret_ (AsList"
- " (UpdateRow '/dc-1/Dir/Table key value)"
- "))"
- "(return ret_)"
- ")";
-
- for (ui32 i = 4241; i < 4281; ++i) {
+ "(let ret_ (AsList"
+ " (UpdateRow '/dc-1/Dir/Table key value)"
+ "))"
+ "(return ret_)"
+ ")";
+
+ for (ui32 i = 4241; i < 4281; ++i) {
annoyingClient.FlatQuery(Sprintf(insertRowQuery.data(), i, TString(1000000, 'A').data()));
- }
-
+ }
+
TString readQuery =
- "("
- "(let key1 '('('Key (Uint32 '4242))))"
- "(let row (SelectRow '/dc-1/Dir/Table key1 '('Value)))"
+ "("
+ "(let key1 '('('Key (Uint32 '4242))))"
+ "(let row (SelectRow '/dc-1/Dir/Table key1 '('Value)))"
"(let val (IfPresent row (lambda '(r) ( Coalesce (Member r 'Value) (Utf8 'AAA))) (Utf8 'BBB)))"
- "(let key2 '('('Key (Uint32 '3333333333))))"
- "(let upd (UpdateRow '/dc-1/Dir/Table key2 '('('Value val))))"
- "(let range1 '('ExcFrom '('Key (Uint32 '0) (Void))))"
- "(let range2 '('ExcFrom '('Key (Uint32 '10) (Void))))"
- "(let select '('Key 'Value))"
- "(let options '())"
- "(let pgmReturn (AsList upd))"
- "(return pgmReturn)"
- ")";
-
- ui32 status = 0;
- TClient::TFlatQueryOptions opts;
+ "(let key2 '('('Key (Uint32 '3333333333))))"
+ "(let upd (UpdateRow '/dc-1/Dir/Table key2 '('('Value val))))"
+ "(let range1 '('ExcFrom '('Key (Uint32 '0) (Void))))"
+ "(let range2 '('ExcFrom '('Key (Uint32 '10) (Void))))"
+ "(let select '('Key 'Value))"
+ "(let options '())"
+ "(let pgmReturn (AsList upd))"
+ "(return pgmReturn)"
+ ")";
+
+ ui32 status = 0;
+ TClient::TFlatQueryOptions opts;
+ NKikimrClient::TResponse response;
+ status = annoyingClient.FlatQueryRaw(readQuery, opts, response);
+ UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, NMsgBusProxy::MSTATUS_ERROR, "Big read should fail");
+ }
+
+ void RunWriteQueryRetryOverloads(TFlatMsgBusClient& annoyingClient, TString query) {
+ i32 retries = 10;
+ TFlatMsgBusClient::TFlatQueryOptions opts;
NKikimrClient::TResponse response;
- status = annoyingClient.FlatQueryRaw(readQuery, opts, response);
- UNIT_ASSERT_VALUES_EQUAL_C((NMsgBusProxy::EResponseStatus)status, NMsgBusProxy::MSTATUS_ERROR, "Big read should fail");
- }
-
- void RunWriteQueryRetryOverloads(TFlatMsgBusClient& annoyingClient, TString query) {
- i32 retries = 10;
- TFlatMsgBusClient::TFlatQueryOptions opts;
- NKikimrClient::TResponse response;
- while (retries) {
- annoyingClient.FlatQueryRaw(query, opts, response);
-
- if (response.GetStatus() != NMsgBusProxy::MSTATUS_REJECTED ||
- response.GetProxyErrorCode() != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyShardOverloaded) {
- break;
- }
-
- --retries;
- Sleep(TDuration::Seconds(1));
- }
- UNIT_ASSERT_C(retries > 0, "Failed to write row");
- UNIT_ASSERT_VALUES_EQUAL_C(response.GetStatus(), NMsgBusProxy::MSTATUS_OK, "Failed to write row");
- UNIT_ASSERT_VALUES_EQUAL_C(response.GetExecutionEngineResponseStatus(), (ui32)NMiniKQL::IEngineFlat::EStatus::Complete, "Failed to write row");
- }
-
+ while (retries) {
+ annoyingClient.FlatQueryRaw(query, opts, response);
+
+ if (response.GetStatus() != NMsgBusProxy::MSTATUS_REJECTED ||
+ response.GetProxyErrorCode() != TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyShardOverloaded) {
+ break;
+ }
+
+ --retries;
+ Sleep(TDuration::Seconds(1));
+ }
+ UNIT_ASSERT_C(retries > 0, "Failed to write row");
+ UNIT_ASSERT_VALUES_EQUAL_C(response.GetStatus(), NMsgBusProxy::MSTATUS_OK, "Failed to write row");
+ UNIT_ASSERT_VALUES_EQUAL_C(response.GetExecutionEngineResponseStatus(), (ui32)NMiniKQL::IEngineFlat::EStatus::Complete, "Failed to write row");
+ }
+
void WriteRow(TFlatMsgBusClient& annoyingClient, TString table, ui32 key, TString value, TArrayRef<const char> large) {
TString insertRowQuery = "("
"(let key '('('Key (Uint32 '%u))))"
@@ -1744,17 +1744,17 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
void WriteRow(TFlatMsgBusClient& annoyingClient, TString table, ui32 key, TString value) {
TString insertRowQuery = "("
- "(let key '('('Key (Uint32 '%u))))"
+ "(let key '('('Key (Uint32 '%u))))"
"(let value '('('Value (Utf8 '%s))))"
- "(let ret_ (AsList"
- " (UpdateRow '/dc-1/Dir/%s key value)"
- "))"
- "(return ret_)"
- ")";
-
+ "(let ret_ (AsList"
+ " (UpdateRow '/dc-1/Dir/%s key value)"
+ "))"
+ "(return ret_)"
+ ")";
+
RunWriteQueryRetryOverloads(annoyingClient, Sprintf(insertRowQuery.data(), key, value.data(), table.data()));
- }
-
+ }
+
void WriteRandomRows(TFlatMsgBusClient &client, TString table, ui64 seed, ui32 rows) {
TMersenne<ui64> rnd(seed);
NTable::NTest::TRandomString<decltype(rnd)> blobs(rnd);
@@ -1766,213 +1766,213 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
TString ReadRow(TFlatMsgBusClient& annoyingClient, TString table, ui32 key) {
TString query =
- R"(
- (
- (let row '('('Key (Uint32 '%u))))
- (let select '('Value))
- (let pgmReturn (AsList
- (SetResult 'row (SelectRow '/dc-1/Dir/%s row select))
- ))
- (return pgmReturn)
- )
- )";
-
- NKikimrMiniKQL::TResult readRes;
+ R"(
+ (
+ (let row '('('Key (Uint32 '%u))))
+ (let select '('Value))
+ (let pgmReturn (AsList
+ (SetResult 'row (SelectRow '/dc-1/Dir/%s row select))
+ ))
+ (return pgmReturn)
+ )
+ )";
+
+ NKikimrMiniKQL::TResult readRes;
bool res = annoyingClient.FlatQuery(Sprintf(query.data(), key, table.data()), readRes);
- UNIT_ASSERT(res);
-
+ UNIT_ASSERT(res);
+
//Cerr << readRes << Endl;
TValue value = TValue::Create(readRes.GetValue(), readRes.GetType());
TValue row = value["row"];
TString strRes(row["Value"]);
- return strRes;
- }
-
+ return strRes;
+ }
+
void PrepareSourceTable(TFlatMsgBusClient& annoyingClient, bool withFollowers = false) {
- const char * table = R"___(
- Name: "TableOld"
- Columns { Name: "Key" Type: "Uint32"}
+ const char * table = R"___(
+ Name: "TableOld"
+ Columns { Name: "Key" Type: "Uint32"}
Columns { Name: "Value" Type: "Utf8"}
Columns { Name: "Large" Type: "String" Family: 0 }
- Columns { Name: "unused001" Type: "Bool"}
- Columns { Name: "unused002" Type: "Uint32"}
- Columns { Name: "unused003" Type: "Int64"}
- Columns { Name: "unused004" Type: "Float"}
- KeyColumnNames: ["Key"]
- UniformPartitionsCount: 2
-
- PartitionConfig {
+ Columns { Name: "unused001" Type: "Bool"}
+ Columns { Name: "unused002" Type: "Uint32"}
+ Columns { Name: "unused003" Type: "Int64"}
+ Columns { Name: "unused004" Type: "Float"}
+ KeyColumnNames: ["Key"]
+ UniformPartitionsCount: 2
+
+ PartitionConfig {
FollowerCount: %d
- CompactionPolicy {
- InMemSizeToSnapshot: 100000
- InMemStepsToSnapshot: 2
- InMemForceStepsToSnapshot: 3
- InMemForceSizeToSnapshot: 1000000
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 200000
- ReadAheadLoThreshold: 100000
- MinDataPageSize: 7168
- SnapBrokerQueue: 0
- Generation {
- GenerationId: 0
- SizeToCompact: 10000
- CountToCompact: 2
- ForceCountToCompact: 2
- ForceSizeToCompact: 20000
- CompactionBrokerQueue: 1
- KeepInCache: true
- }
- }
+ CompactionPolicy {
+ InMemSizeToSnapshot: 100000
+ InMemStepsToSnapshot: 2
+ InMemForceStepsToSnapshot: 3
+ InMemForceSizeToSnapshot: 1000000
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 200000
+ ReadAheadLoThreshold: 100000
+ MinDataPageSize: 7168
+ SnapBrokerQueue: 0
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 10000
+ CountToCompact: 2
+ ForceCountToCompact: 2
+ ForceSizeToCompact: 20000
+ CompactionBrokerQueue: 1
+ KeepInCache: true
+ }
+ }
ColumnFamilies {
Id: 0
Storage: ColumnStorageTest_1_2_1k
ColumnCache: ColumnCacheNone
}
EnableFilterByKey: true
- }
- )___";
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
+ }
+ )___";
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
annoyingClient.CreateTable("/dc-1/Dir", Sprintf(table, withFollowers ? 2 : 0));
-
+
TMersenne<ui64> rnd;
NTable::NTest::TRandomString<decltype(rnd)> blobs(rnd);
for (ui32 i = 0; i < 8; ++i) {
WriteRow(annoyingClient, "TableOld", i, "AAA", blobs.Do(rnd.Uniform(512, 1536)));
WriteRow(annoyingClient, "TableOld", 0x80000000 + i, "BBB", blobs.Do(rnd.Uniform(512, 1536)));
- }
- }
-
+ }
+ }
+
TString ReadFromTable(TFlatMsgBusClient& annoyingClient, TString table, ui32 fromKey = 0, bool follower = false) {
- const char* readQuery =
- "("
+ const char* readQuery =
+ "("
"(let range1 '('IncFrom '('Key (Uint32 '%d) (Void) )))"
"(let select '('Key 'Value 'Large))"
- "(let options '())"
- "(let pgmReturn (AsList"
+ "(let options '())"
+ "(let pgmReturn (AsList"
" (SetResult 'range1 (SelectRange '%s range1 select options (Uint32 '%d)))"
- "))"
- "(return pgmReturn)"
- ")";
-
- TFlatMsgBusClient::TFlatQueryOptions opts;
- NKikimrClient::TResponse response;
+ "))"
+ "(return pgmReturn)"
+ ")";
+
+ TFlatMsgBusClient::TFlatQueryOptions opts;
+ NKikimrClient::TResponse response;
annoyingClient.FlatQueryRaw(Sprintf(readQuery, fromKey, table.data(), follower ? TReadTarget::Follower().GetMode()
- : TReadTarget::Head().GetMode()), opts, response);
-
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
- UNIT_ASSERT(response.GetExecutionEngineResponseStatus() == ui32(NMiniKQL::IEngineFlat::EStatus::Complete));
- NKikimrMiniKQL::TResult result;
- result.Swap(response.MutableExecutionEngineEvaluatedResponse());
-
+ : TReadTarget::Head().GetMode()), opts, response);
+
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+ UNIT_ASSERT(response.GetExecutionEngineResponseStatus() == ui32(NMiniKQL::IEngineFlat::EStatus::Complete));
+ NKikimrMiniKQL::TResult result;
+ result.Swap(response.MutableExecutionEngineEvaluatedResponse());
+
TString strResult;
- ::google::protobuf::TextFormat::PrintToString(result.GetValue(), &strResult);
- return strResult;
- }
-
+ ::google::protobuf::TextFormat::PrintToString(result.GetValue(), &strResult);
+ return strResult;
+ }
+
template <class TSetType>
- void WaitForTabletsToBeDeletedInHive(TFlatMsgBusClient& annoyingClient, TTestActorRuntime* runtime,
+ void WaitForTabletsToBeDeletedInHive(TFlatMsgBusClient& annoyingClient, TTestActorRuntime* runtime,
const TSetType& tabletIds, const TDuration& timeout = TDuration::Seconds(20)) {
- TInstant waitStart = TInstant::Now();
- for (ui64 tabletId : tabletIds) {
- Cerr << "Check that tablet " << tabletId << " was deleted\n";
- while (annoyingClient.TabletExistsInHive(runtime, tabletId)) {
- UNIT_ASSERT_C((TInstant::Now()-waitStart) < timeout, "Tablet " << tabletId << " was not deleted");
- Sleep(TDuration::MilliSeconds(300));
- }
- }
- }
-
-
+ TInstant waitStart = TInstant::Now();
+ for (ui64 tabletId : tabletIds) {
+ Cerr << "Check that tablet " << tabletId << " was deleted\n";
+ while (annoyingClient.TabletExistsInHive(runtime, tabletId)) {
+ UNIT_ASSERT_C((TInstant::Now()-waitStart) < timeout, "Tablet " << tabletId << " was not deleted");
+ Sleep(TDuration::MilliSeconds(300));
+ }
+ }
+ }
+
+
Y_UNIT_TEST(CopyTableAndRead) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
-
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+
TServer cleverServer = TServer(TServerSettings(port));
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareSourceTable(annoyingClient);
-
- // Copy the table
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
- Cerr << "Copy TableOld to Table" << Endl;
-
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
-
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareSourceTable(annoyingClient);
+
+ // Copy the table
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ Cerr << "Copy TableOld to Table" << Endl;
+
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
+
TString strResultOld = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
-
+
Cout << strResultOld << Endl;
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
-
- // Make second copy of the old table
- Cerr << "Copy TableOld to Table2" << Endl;
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table2\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
- strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table2");
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+
+ // Make second copy of the old table
+ Cerr << "Copy TableOld to Table2" << Endl;
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table2\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
+ strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table2");
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
}
-
+
Y_UNIT_TEST(CopyTableAndCompareColumnsSchema) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
-
- TServer cleverServer = TServer(TServerSettings(port));
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::BS_CONTROLLER, NActors::NLog::PRI_ERROR);
-
- TFlatMsgBusClient annoyingClient(port);
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
-
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+
+ TServer cleverServer = TServer(TServerSettings(port));
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::BS_CONTROLLER, NActors::NLog::PRI_ERROR);
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+
for (int colCount = 1; colCount < 100; ++colCount) {
NKikimrSchemeOp::TTableDescription schema;
- TString name = "Table_" + ToString(colCount);
- schema.SetName(name);
- Cout << name << Endl;
- for (int ci : xrange(colCount)) {
- auto col = schema.AddColumns();
- col->SetName("col_" + ToString(ci));
- col->SetType("Int32");
- }
- schema.AddKeyColumnNames("col_0");
-
+ TString name = "Table_" + ToString(colCount);
+ schema.SetName(name);
+ Cout << name << Endl;
+ for (int ci : xrange(colCount)) {
+ auto col = schema.AddColumns();
+ col->SetName("col_" + ToString(ci));
+ col->SetType("Int32");
+ }
+ schema.AddKeyColumnNames("col_0");
+
while (annoyingClient.CreateTable("/dc-1/Dir", schema) != NMsgBusProxy::MSTATUS_OK) {}
while (annoyingClient.CreateTable("/dc-1/Dir", " Name: '" + name + "_Copy' CopyFromTable: '/dc-1/Dir/" + name + "'") != NMsgBusProxy::MSTATUS_OK) {}
-
- auto fnGetColumns = [](const NMsgBusProxy::TBusResponse* lsRes) {
- UNIT_ASSERT(lsRes);
- //Cout << lsRes->Record << Endl;
- UNIT_ASSERT_VALUES_EQUAL(lsRes->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK);
- THashMap<ui32, TString> columns;
- for (const auto& ci : lsRes->Record.GetPathDescription().GetTable().GetColumns()) {
- columns[ci.GetId()] = ci.GetName();
- }
- UNIT_ASSERT(!columns.empty());
- return columns;
- };
-
- auto orig = annoyingClient.Ls("/dc-1/Dir/" + name);
- auto copy = annoyingClient.Ls("/dc-1/Dir/" + name + "_Copy");
-
- auto origColumns = fnGetColumns(orig.Get());
- auto copyColumns = fnGetColumns(copy.Get());
-
- UNIT_ASSERT_VALUES_EQUAL(origColumns.size(), copyColumns.size());
-
- for (const auto& oc : origColumns) {
- UNIT_ASSERT_VALUES_EQUAL(oc.second, copyColumns[oc.first]);
- }
- }
- }
-
-
+
+ auto fnGetColumns = [](const NMsgBusProxy::TBusResponse* lsRes) {
+ UNIT_ASSERT(lsRes);
+ //Cout << lsRes->Record << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(lsRes->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+ THashMap<ui32, TString> columns;
+ for (const auto& ci : lsRes->Record.GetPathDescription().GetTable().GetColumns()) {
+ columns[ci.GetId()] = ci.GetName();
+ }
+ UNIT_ASSERT(!columns.empty());
+ return columns;
+ };
+
+ auto orig = annoyingClient.Ls("/dc-1/Dir/" + name);
+ auto copy = annoyingClient.Ls("/dc-1/Dir/" + name + "_Copy");
+
+ auto origColumns = fnGetColumns(orig.Get());
+ auto copyColumns = fnGetColumns(copy.Get());
+
+ UNIT_ASSERT_VALUES_EQUAL(origColumns.size(), copyColumns.size());
+
+ for (const auto& oc : origColumns) {
+ UNIT_ASSERT_VALUES_EQUAL(oc.second, copyColumns[oc.first]);
+ }
+ }
+ }
+
+
Y_UNIT_TEST(CopyCopiedTableAndRead) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
@@ -1982,12 +1982,12 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
+ PrepareSourceTable(annoyingClient);
// Copy the table
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
- Cerr << "Copy TableOld to Table" << Endl;
+ Cerr << "Copy TableOld to Table" << Endl;
annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
TString strResultOld = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
@@ -1995,728 +1995,728 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
Cout << strResult << Endl;
UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
- // Make second copy of the old table
- Cerr << "Copy Table to Table2" << Endl;
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table3\" CopyFromTable: \"/dc-1/Dir/Table\"");
- strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table3");
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
- }
+ // Make second copy of the old table
+ Cerr << "Copy Table to Table2" << Endl;
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table3\" CopyFromTable: \"/dc-1/Dir/Table\"");
+ strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table3");
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+ }
Y_UNIT_TEST(CopyTableAndAddFollowers) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
-
+
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareSourceTable(annoyingClient);
-
- // Copy the table
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareSourceTable(annoyingClient);
+
+ // Copy the table
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
- Cerr << "Copy TableOld to Table" << Endl;
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\""
+ Cerr << "Copy TableOld to Table" << Endl;
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\""
" PartitionConfig { FollowerCount: 1 }");
-
+
auto fnGetFollowerCount = [&annoyingClient] (const TString& path) {
- auto res = annoyingClient.Ls(path);
- UNIT_ASSERT(res);
- UNIT_ASSERT(res->Record.HasPathDescription());
- UNIT_ASSERT(res->Record.GetPathDescription().HasTable());
+ auto res = annoyingClient.Ls(path);
+ UNIT_ASSERT(res);
+ UNIT_ASSERT(res->Record.HasPathDescription());
+ UNIT_ASSERT(res->Record.GetPathDescription().HasTable());
return res->Record.GetPathDescription().GetTable().GetPartitionConfig().GetFollowerCount();
- };
-
+ };
+
UNIT_ASSERT_VALUES_EQUAL(fnGetFollowerCount("/dc-1/Dir/Table"), 1);
-
- TString strResultOld = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
- TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
-
- Cout << strResult << Endl;
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
-
+
+ TString strResultOld = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
+ TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
+
+ Cout << strResult << Endl;
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+
// Make copy of copy and disable followers
- Cerr << "Copy TableOld to Table2" << Endl;
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table2\" CopyFromTable: \"/dc-1/Dir/Table\""
+ Cerr << "Copy TableOld to Table2" << Endl;
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table2\" CopyFromTable: \"/dc-1/Dir/Table\""
" PartitionConfig { FollowerCount: 0 }");
-
+
UNIT_ASSERT_VALUES_EQUAL(fnGetFollowerCount("/dc-1/Dir/Table2"), 0);
-
- strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table2");
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
- }
-
+
+ strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table2");
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+ }
+
Y_UNIT_TEST(CopyTableAndDropCopy) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
+ TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
+ PrepareSourceTable(annoyingClient);
TString strResultOld = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
- Cerr << strResultOld << Endl;
-
+ Cerr << strResultOld << Endl;
+
THashSet<ui64> datashards;
TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld");
- datashards.insert(partitions.begin(), partitions.end());
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Make a copy and delete it
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
- partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
- datashards.insert(partitions.begin(), partitions.end());
- annoyingClient.DeleteTable("/dc-1/Dir", "Table");
-
+ datashards.insert(partitions.begin(), partitions.end());
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Make a copy and delete it
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
+ partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
+ datashards.insert(partitions.begin(), partitions.end());
+ annoyingClient.DeleteTable("/dc-1/Dir", "Table");
+
TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
-
- // Make a new copy
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
- partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
- datashards.insert(partitions.begin(), partitions.end());
-
- strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
-
- // Delete original table
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- // Delete the copy
- annoyingClient.DeleteTable("/dc-1/Dir", "Table");
-
- // Recreate table with the same name
- PrepareSourceTable(annoyingClient);
-
- // Wait for all datashards to be deleted
- WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
- }
-
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+
+ // Make a new copy
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
+ partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
+ datashards.insert(partitions.begin(), partitions.end());
+
+ strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+
+ // Delete original table
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ // Delete the copy
+ annoyingClient.DeleteTable("/dc-1/Dir", "Table");
+
+ // Recreate table with the same name
+ PrepareSourceTable(annoyingClient);
+
+ // Wait for all datashards to be deleted
+ WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
+ }
+
Y_UNIT_TEST(CopyTableAndDropOriginal) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
-
+
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareSourceTable(annoyingClient);
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareSourceTable(annoyingClient);
TString strResultOld = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
- Cerr << strResultOld << Endl;
-
+ Cerr << strResultOld << Endl;
+
THashSet<ui64> datashards;
- auto oldShards = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld");
- datashards.insert(oldShards.begin(), oldShards.end());
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Make a copy
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
- auto newShards = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
- datashards.insert(newShards.begin(), newShards.end());
-
- // Drop original
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
-
+ auto oldShards = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld");
+ datashards.insert(oldShards.begin(), oldShards.end());
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Make a copy
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
+ auto newShards = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
+ datashards.insert(newShards.begin(), newShards.end());
+
+ // Drop original
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+
TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
-
- // Delete the copy
- annoyingClient.DeleteTable("/dc-1/Dir", "Table");
-
- // Wait for all datashards to be deleted
- WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
- }
-
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+
+ // Delete the copy
+ annoyingClient.DeleteTable("/dc-1/Dir", "Table");
+
+ // Wait for all datashards to be deleted
+ WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
+ }
+
Y_UNIT_TEST(CopyTableAndReturnPartAfterCompaction) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
-
+
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareSourceTable(annoyingClient);
-
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareSourceTable(annoyingClient);
+
THashSet<ui64> datashards;
TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld");
- datashards.insert(partitions.begin(), partitions.end());
-
- // Copy the table
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- Cerr << "Copy TableOld to Table" << Endl;
- annoyingClient.CreateTable("/dc-1/Dir", R"___(
- Name: "Table"
- CopyFromTable: "/dc-1/Dir/TableOld"
-
- PartitionConfig {
- CompactionPolicy {
- InMemSizeToSnapshot: 100000
- InMemStepsToSnapshot: 2
- InMemForceStepsToSnapshot: 3
- InMemForceSizeToSnapshot: 1000000
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 200000
- ReadAheadLoThreshold: 100000
- MinDataPageSize: 7168
- SnapBrokerQueue: 0
- Generation {
- GenerationId: 0
- SizeToCompact: 10000
- CountToCompact: 2
- ForceCountToCompact: 2
- ForceSizeToCompact: 20000
- CompactionBrokerQueue: 1
- KeepInCache: true
- }
- }
+ datashards.insert(partitions.begin(), partitions.end());
+
+ // Copy the table
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ Cerr << "Copy TableOld to Table" << Endl;
+ annoyingClient.CreateTable("/dc-1/Dir", R"___(
+ Name: "Table"
+ CopyFromTable: "/dc-1/Dir/TableOld"
+
+ PartitionConfig {
+ CompactionPolicy {
+ InMemSizeToSnapshot: 100000
+ InMemStepsToSnapshot: 2
+ InMemForceStepsToSnapshot: 3
+ InMemForceSizeToSnapshot: 1000000
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 200000
+ ReadAheadLoThreshold: 100000
+ MinDataPageSize: 7168
+ SnapBrokerQueue: 0
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 10000
+ CountToCompact: 2
+ ForceCountToCompact: 2
+ ForceSizeToCompact: 20000
+ CompactionBrokerQueue: 1
+ KeepInCache: true
+ }
+ }
ColumnFamilies {
Id: 0
Storage: ColumnStorageTest_1_2_1k
ColumnCache: ColumnCacheNone
}
- }
- )___");
-
- partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
- datashards.insert(partitions.begin(), partitions.end());
-
- // Write new rows to the copy in order to trigger compaction
+ }
+ )___");
+
+ partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
+ datashards.insert(partitions.begin(), partitions.end());
+
+ // Write new rows to the copy in order to trigger compaction
WriteRandomRows(annoyingClient, "Table", 666, 100);
TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
-
- // Delete original table
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
-
+
+ // Delete original table
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+
TString strResultAfter = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
- UNIT_ASSERT_NO_DIFF(strResultAfter, strResult);
-
- // Delete the copy
- annoyingClient.DeleteTable("/dc-1/Dir", "Table");
-
- WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
- }
-
+ UNIT_ASSERT_NO_DIFF(strResultAfter, strResult);
+
+ // Delete the copy
+ annoyingClient.DeleteTable("/dc-1/Dir", "Table");
+
+ WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
+ }
+
Y_UNIT_TEST(CopyTableDropOriginalAndReturnPartAfterCompaction) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
-
+
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareSourceTable(annoyingClient);
-
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareSourceTable(annoyingClient);
+
THashSet<ui64> datashards;
TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld");
- datashards.insert(partitions.begin(), partitions.end());
-
- // Copy the table
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- Cerr << "Copy TableOld to Table" << Endl;
- annoyingClient.CreateTable("/dc-1/Dir", R"___(
- Name: "Table"
- CopyFromTable: "/dc-1/Dir/TableOld"
-
- PartitionConfig {
- CompactionPolicy {
- InMemSizeToSnapshot: 100000
- InMemStepsToSnapshot: 2
- InMemForceStepsToSnapshot: 3
- InMemForceSizeToSnapshot: 1000000
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 200000
- ReadAheadLoThreshold: 100000
- MinDataPageSize: 7168
- SnapBrokerQueue: 0
- Generation {
- GenerationId: 0
- SizeToCompact: 10000
- CountToCompact: 2
- ForceCountToCompact: 2
- ForceSizeToCompact: 20000
- CompactionBrokerQueue: 1
- KeepInCache: true
+ datashards.insert(partitions.begin(), partitions.end());
+
+ // Copy the table
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ Cerr << "Copy TableOld to Table" << Endl;
+ annoyingClient.CreateTable("/dc-1/Dir", R"___(
+ Name: "Table"
+ CopyFromTable: "/dc-1/Dir/TableOld"
+
+ PartitionConfig {
+ CompactionPolicy {
+ InMemSizeToSnapshot: 100000
+ InMemStepsToSnapshot: 2
+ InMemForceStepsToSnapshot: 3
+ InMemForceSizeToSnapshot: 1000000
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 200000
+ ReadAheadLoThreshold: 100000
+ MinDataPageSize: 7168
+ SnapBrokerQueue: 0
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 10000
+ CountToCompact: 2
+ ForceCountToCompact: 2
+ ForceSizeToCompact: 20000
+ CompactionBrokerQueue: 1
+ KeepInCache: true
ExtraCompactionPercent: 0
ExtraCompactionMinSize: 0
ExtraCompactionExpPercent: 0
ExtraCompactionExpMaxSize: 0
UpliftPartSize: 0
- }
- }
+ }
+ }
ColumnFamilies {
Id: 0
Storage: ColumnStorageTest_1_2_1k
ColumnCache: ColumnCacheNone
}
- }
- )___");
-
- // Delete original table
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
-
- // Write new rows to the copy in order to trigger compaction
+ }
+ )___");
+
+ // Delete original table
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+
+ // Write new rows to the copy in order to trigger compaction
WriteRandomRows(annoyingClient, "Table", 666, 100);
- // Check that first partition of the original table is deleted after part is returned
+ // Check that first partition of the original table is deleted after part is returned
WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), THashSet<ui64>({partitions[0]}));
-
- partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
- datashards.insert(partitions.begin(), partitions.end());
-
+
+ partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
+ datashards.insert(partitions.begin(), partitions.end());
+
TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
- for (int i = 0; i < 5; ++i) {
+ for (int i = 0; i < 5; ++i) {
TString strResultAfter = ReadFromTable(annoyingClient, "/dc-1/Dir/Table");
- UNIT_ASSERT_NO_DIFF(strResultAfter, strResult);
- }
-
- // Delete the copy
- annoyingClient.DeleteTable("/dc-1/Dir", "Table");
-
- WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
- }
-
+ UNIT_ASSERT_NO_DIFF(strResultAfter, strResult);
+ }
+
+ // Delete the copy
+ annoyingClient.DeleteTable("/dc-1/Dir", "Table");
+
+ WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
+ }
+
Y_UNIT_TEST(CopyCopiedTableAndDropFirstCopy) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
-
+
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareSourceTable(annoyingClient);
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareSourceTable(annoyingClient);
TString strResultOld = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
- Cerr << strResultOld << Endl;
-
+ Cerr << strResultOld << Endl;
+
THashSet<ui64> datashards;
- auto oldShards = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld");
- datashards.insert(oldShards.begin(), oldShards.end());
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Make a copy
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
- auto shards = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
- datashards.insert(shards.begin(), shards.end());
-
- // Make a copy-of-copy
- annoyingClient.CreateTable("/dc-1/Dir", " Name: \"TableNew\" CopyFromTable: \"/dc-1/Dir/Table\"");
- auto newShards = annoyingClient.GetTablePartitions("/dc-1/Dir/TableNew");
- datashards.insert(newShards.begin(), newShards.end());
-
- // Drop first copy
- annoyingClient.DeleteTable("/dc-1/Dir", "Table");
-
+ auto oldShards = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld");
+ datashards.insert(oldShards.begin(), oldShards.end());
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Make a copy
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"Table\" CopyFromTable: \"/dc-1/Dir/TableOld\"");
+ auto shards = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
+ datashards.insert(shards.begin(), shards.end());
+
+ // Make a copy-of-copy
+ annoyingClient.CreateTable("/dc-1/Dir", " Name: \"TableNew\" CopyFromTable: \"/dc-1/Dir/Table\"");
+ auto newShards = annoyingClient.GetTablePartitions("/dc-1/Dir/TableNew");
+ datashards.insert(newShards.begin(), newShards.end());
+
+ // Drop first copy
+ annoyingClient.DeleteTable("/dc-1/Dir", "Table");
+
TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/TableNew");
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
-
- // Drop original
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
-
- strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/TableNew");
- UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
-
- // Check that shards of Table are still alive
- for (ui64 tabletId : shards) {
- UNIT_ASSERT_C(annoyingClient.TabletExistsInHive(cleverServer.GetRuntime(), tabletId),
- "Partitions of dropped Table must not be deleted until TableNew returns all borrowed parts");
- }
- // Check that oldShards of TableOld are also alive
- for (ui64 tabletId : oldShards) {
- UNIT_ASSERT_C(annoyingClient.TabletExistsInHive(cleverServer.GetRuntime(), tabletId),
- "Partitions of dropped TableOld must not be deleted until TableNew returns all borrowed parts");
- }
-
- // Drop copy-of-copy
- annoyingClient.DeleteTable("/dc-1/Dir", "TableNew");
-
- // Wait for all datashards to be deleted
- WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
- }
-
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+
+ // Drop original
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+
+ strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/TableNew");
+ UNIT_ASSERT_NO_DIFF(strResult, strResultOld);
+
+ // Check that shards of Table are still alive
+ for (ui64 tabletId : shards) {
+ UNIT_ASSERT_C(annoyingClient.TabletExistsInHive(cleverServer.GetRuntime(), tabletId),
+ "Partitions of dropped Table must not be deleted until TableNew returns all borrowed parts");
+ }
+ // Check that oldShards of TableOld are also alive
+ for (ui64 tabletId : oldShards) {
+ UNIT_ASSERT_C(annoyingClient.TabletExistsInHive(cleverServer.GetRuntime(), tabletId),
+ "Partitions of dropped TableOld must not be deleted until TableNew returns all borrowed parts");
+ }
+
+ // Drop copy-of-copy
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableNew");
+
+ // Wait for all datashards to be deleted
+ WaitForTabletsToBeDeletedInHive(annoyingClient, cleverServer.GetRuntime(), datashards);
+ }
+
void DoSplitMergeTable(TFlatMsgBusClient& annoyingClient, TString table, const TVector<ui64>& srcPartitions, const TVector<ui32>& splitPoints) {
TVector<ui64> partitionsBefore;
- partitionsBefore = annoyingClient.GetTablePartitions(table);
- UNIT_ASSERT(partitionsBefore.size() > 0);
-
+ partitionsBefore = annoyingClient.GetTablePartitions(table);
+ UNIT_ASSERT(partitionsBefore.size() > 0);
+
TString strResultBefore = ReadFromTable(annoyingClient, table);
-
- TStringStream splitDescr;
- for (ui32 src : srcPartitions) {
- splitDescr << " SourceTabletId: " << partitionsBefore[src] << Endl;
- }
- for (ui32 p : splitPoints) {
- splitDescr << " SplitBoundary { KeyPrefix {Tuple { Optional { Uint32: " << p << " } } } }" << Endl;
- }
- annoyingClient.SplitTablePartition(table, splitDescr.Str());
-
+
+ TStringStream splitDescr;
+ for (ui32 src : srcPartitions) {
+ splitDescr << " SourceTabletId: " << partitionsBefore[src] << Endl;
+ }
+ for (ui32 p : splitPoints) {
+ splitDescr << " SplitBoundary { KeyPrefix {Tuple { Optional { Uint32: " << p << " } } } }" << Endl;
+ }
+ annoyingClient.SplitTablePartition(table, splitDescr.Str());
+
TVector<ui64> partitionsAfter;
- partitionsAfter = annoyingClient.GetTablePartitions(table);
- UNIT_ASSERT_VALUES_EQUAL(partitionsAfter.size(),
- partitionsBefore.size() - srcPartitions.size() + splitPoints.size() + 1);
- // TODO: check paritions that were not supposed to change
- //UNIT_ASSERT_VALUES_EQUAL(partitionsAfter.back(), partitionsBefore.back());
-
+ partitionsAfter = annoyingClient.GetTablePartitions(table);
+ UNIT_ASSERT_VALUES_EQUAL(partitionsAfter.size(),
+ partitionsBefore.size() - srcPartitions.size() + splitPoints.size() + 1);
+ // TODO: check paritions that were not supposed to change
+ //UNIT_ASSERT_VALUES_EQUAL(partitionsAfter.back(), partitionsBefore.back());
+
TString strResultAfter = ReadFromTable(annoyingClient, table);
- UNIT_ASSERT_NO_DIFF(strResultBefore, strResultAfter);
- }
-
+ UNIT_ASSERT_NO_DIFF(strResultBefore, strResultAfter);
+ }
+
void SplitTable(TFlatMsgBusClient& annoyingClient, TString table, ui64 partitionIdx, const TVector<ui32>& splitPoints) {
- DoSplitMergeTable(annoyingClient, table, {partitionIdx}, splitPoints);
- }
-
+ DoSplitMergeTable(annoyingClient, table, {partitionIdx}, splitPoints);
+ }
+
void MergeTable(TFlatMsgBusClient& annoyingClient, TString table, const TVector<ui64>& partitionIdxs) {
- DoSplitMergeTable(annoyingClient, table, partitionIdxs, {});
- }
-
- void DisableSplitMergePartCountLimit(TServer& cleverServer) {
- SetSplitMergePartCountLimit(cleverServer.GetRuntime(), -1);
- }
-
- Y_UNIT_TEST(SplitInvalidPath) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
-
- TFlatMsgBusClient annoyingClient(port);
-
- // cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir1");
-
- NKikimrClient::TResponse response;
- annoyingClient.TrySplitTablePartition("/dc-1/Dir1", "SourceTabletId: 100500 SplitBoundary { KeyPrefix {Tuple { Optional { Uint32: 42 } } } }", response);
- // Cerr << response;
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_ERROR);
+ DoSplitMergeTable(annoyingClient, table, partitionIdxs, {});
+ }
+
+ void DisableSplitMergePartCountLimit(TServer& cleverServer) {
+ SetSplitMergePartCountLimit(cleverServer.GetRuntime(), -1);
+ }
+
+ Y_UNIT_TEST(SplitInvalidPath) {
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+ DisableSplitMergePartCountLimit(cleverServer);
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ // cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir1");
+
+ NKikimrClient::TResponse response;
+ annoyingClient.TrySplitTablePartition("/dc-1/Dir1", "SourceTabletId: 100500 SplitBoundary { KeyPrefix {Tuple { Optional { Uint32: 42 } } } }", response);
+ // Cerr << response;
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_ERROR);
UNIT_ASSERT_VALUES_EQUAL(response.GetSchemeStatus(), NKikimrScheme::StatusNameConflict);
- }
-
+ }
+
Y_UNIT_TEST(SplitEmptyAndWrite) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
-
- // Write new rows to the copy in order to trigger compaction
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
+
+ // Write new rows to the copy in order to trigger compaction
TMap<ui32, TString> rows = {
- {1, "AAA"},
- {101, "BBB"},
- {201, "CCC"},
- {1001, "DDD"}
- };
- for (const auto& r : rows) {
- WriteRow(annoyingClient, "TableOld", r.first, r.second);
- }
-
- for (const auto& r : rows) {
+ {1, "AAA"},
+ {101, "BBB"},
+ {201, "CCC"},
+ {1001, "DDD"}
+ };
+ for (const auto& r : rows) {
+ WriteRow(annoyingClient, "TableOld", r.first, r.second);
+ }
+
+ for (const auto& r : rows) {
TString val = ReadRow(annoyingClient, "TableOld", r.first);
- UNIT_ASSERT_VALUES_EQUAL(val, r.second);
- }
-
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- annoyingClient.Ls("/dc-1/Dir/TableOld");
- }
-
- Y_UNIT_TEST(SplitEmptyToMany) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
-
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-
- const ui32 shardsBefore = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld").size();
- const ui32 SHARD_COUNT = 10000;
- TVector<ui32> points;
- points.reserve(SHARD_COUNT);
- for (ui32 p = 1; p <= SHARD_COUNT - shardsBefore; ++p) {
- points.push_back(p);
- }
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, points);
-
- UNIT_ASSERT_VALUES_EQUAL(SHARD_COUNT, annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld").size());
-
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- annoyingClient.Ls("/dc-1/Dir/TableOld");
- }
-
+ UNIT_ASSERT_VALUES_EQUAL(val, r.second);
+ }
+
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ annoyingClient.Ls("/dc-1/Dir/TableOld");
+ }
+
+ Y_UNIT_TEST(SplitEmptyToMany) {
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+ DisableSplitMergePartCountLimit(cleverServer);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
+
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+
+ const ui32 shardsBefore = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld").size();
+ const ui32 SHARD_COUNT = 10000;
+ TVector<ui32> points;
+ points.reserve(SHARD_COUNT);
+ for (ui32 p = 1; p <= SHARD_COUNT - shardsBefore; ++p) {
+ points.push_back(p);
+ }
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, points);
+
+ UNIT_ASSERT_VALUES_EQUAL(SHARD_COUNT, annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld").size());
+
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ annoyingClient.Ls("/dc-1/Dir/TableOld");
+ }
+
Y_UNIT_TEST(SplitEmptyTwice) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 2, {300, 400});
-
- // Write new rows to the copy in order to trigger compaction
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 2, {300, 400});
+
+ // Write new rows to the copy in order to trigger compaction
TMap<ui32, TString> rows = {
- {1, "AAA"},
- {101, "BBB"},
- {201, "CCC"},
- {1001, "DDD"}
- };
- for (const auto& r : rows) {
- WriteRow(annoyingClient, "TableOld", r.first, r.second);
- }
-
- for (const auto& r : rows) {
+ {1, "AAA"},
+ {101, "BBB"},
+ {201, "CCC"},
+ {1001, "DDD"}
+ };
+ for (const auto& r : rows) {
+ WriteRow(annoyingClient, "TableOld", r.first, r.second);
+ }
+
+ for (const auto& r : rows) {
TString val = ReadRow(annoyingClient, "TableOld", r.first);
- UNIT_ASSERT_VALUES_EQUAL(val, r.second);
- }
-
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- annoyingClient.Ls("/dc-1/Dir/TableOld");
- }
-
+ UNIT_ASSERT_VALUES_EQUAL(val, r.second);
+ }
+
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ annoyingClient.Ls("/dc-1/Dir/TableOld");
+ }
+
Y_UNIT_TEST(MergeEmptyAndWrite) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- MergeTable(annoyingClient, "/dc-1/Dir/TableOld", {0, 1});
-
- // Write new rows to the copy in order to trigger compaction
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ MergeTable(annoyingClient, "/dc-1/Dir/TableOld", {0, 1});
+
+ // Write new rows to the copy in order to trigger compaction
TMap<ui32, TString> rows = {
- {1, "AAA"},
- {101, "BBB"},
- {201, "CCC"},
- {1001, "DDD"}
- };
- for (const auto& r : rows) {
- WriteRow(annoyingClient, "TableOld", r.first, r.second);
- }
-
- for (const auto& r : rows) {
+ {1, "AAA"},
+ {101, "BBB"},
+ {201, "CCC"},
+ {1001, "DDD"}
+ };
+ for (const auto& r : rows) {
+ WriteRow(annoyingClient, "TableOld", r.first, r.second);
+ }
+
+ for (const auto& r : rows) {
TString val = ReadRow(annoyingClient, "TableOld", r.first);
- UNIT_ASSERT_VALUES_EQUAL(val, r.second);
- }
-
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- annoyingClient.Ls("/dc-1/Dir/TableOld");
- }
-
+ UNIT_ASSERT_VALUES_EQUAL(val, r.second);
+ }
+
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ annoyingClient.Ls("/dc-1/Dir/TableOld");
+ }
+
Y_UNIT_TEST(WriteMergeAndRead) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Write new rows to the copy in order to trigger compaction
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Write new rows to the copy in order to trigger compaction
TMap<ui32, TString> rows = {
- {1, "AAA"},
- {101, "BBB"},
- {3000000201, "CCC"},
- {3000001001, "DDD"}
- };
- for (const auto& r : rows) {
- WriteRow(annoyingClient, "TableOld", r.first, r.second);
- }
-
- MergeTable(annoyingClient, "/dc-1/Dir/TableOld", {0, 1});
-
- for (const auto& r : rows) {
+ {1, "AAA"},
+ {101, "BBB"},
+ {3000000201, "CCC"},
+ {3000001001, "DDD"}
+ };
+ for (const auto& r : rows) {
+ WriteRow(annoyingClient, "TableOld", r.first, r.second);
+ }
+
+ MergeTable(annoyingClient, "/dc-1/Dir/TableOld", {0, 1});
+
+ for (const auto& r : rows) {
TString val = ReadRow(annoyingClient, "TableOld", r.first);
- UNIT_ASSERT_VALUES_EQUAL(val, r.second);
- }
-
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- annoyingClient.Ls("/dc-1/Dir/TableOld");
- }
-
- Y_UNIT_TEST(WriteSplitByPartialKeyAndRead) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
-
- TFlatMsgBusClient annoyingClient(port);
-
- const char * table = R"___(
- Name: "TableOld"
- Columns { Name: "Key" Type: "Uint32"}
- Columns { Name: "Key2" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- Columns { Name: "Large" Type: "Utf8"}
- KeyColumnNames: ["Key", "Key2"]
- )___";
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir", table);
-
- auto fnWriteRow = [&annoyingClient](TMaybe<ui32> key1, TMaybe<ui32> key2, TString value) {
- TString insertRowQuery = "("
- "(let key '('('Key (%s)) '('Key2 (%s))))"
- "(let value '('Value (Utf8 '%s)))"
- "(let ret_ (AsList"
- " (UpdateRow '/dc-1/Dir/TableOld key '(value))"
- "))"
- "(return ret_)"
- ")";
-
+ UNIT_ASSERT_VALUES_EQUAL(val, r.second);
+ }
+
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ annoyingClient.Ls("/dc-1/Dir/TableOld");
+ }
+
+ Y_UNIT_TEST(WriteSplitByPartialKeyAndRead) {
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+ DisableSplitMergePartCountLimit(cleverServer);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char * table = R"___(
+ Name: "TableOld"
+ Columns { Name: "Key" Type: "Uint32"}
+ Columns { Name: "Key2" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ Columns { Name: "Large" Type: "Utf8"}
+ KeyColumnNames: ["Key", "Key2"]
+ )___";
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir", table);
+
+ auto fnWriteRow = [&annoyingClient](TMaybe<ui32> key1, TMaybe<ui32> key2, TString value) {
+ TString insertRowQuery = "("
+ "(let key '('('Key (%s)) '('Key2 (%s))))"
+ "(let value '('Value (Utf8 '%s)))"
+ "(let ret_ (AsList"
+ " (UpdateRow '/dc-1/Dir/TableOld key '(value))"
+ "))"
+ "(return ret_)"
+ ")";
+
annoyingClient.FlatQuery(Sprintf(insertRowQuery.data(),
key1 ? Sprintf("Uint32 '%u", *key1).data() : "Nothing (OptionalType (DataType 'Uint32))",
key2 ? Sprintf("Uint32 '%u", *key2).data() : "Nothing (OptionalType (DataType 'Uint32))",
value.data()));
- };
-
- const ui32 splitKey = 100;
-
- fnWriteRow(0, 10000, "AAA");
-
- fnWriteRow(splitKey, {}, "BBB");
- fnWriteRow(splitKey, 0, "CCC");
- fnWriteRow(splitKey, -1, "DDD");
-
- fnWriteRow(splitKey+1, {}, "EEE");
- fnWriteRow(splitKey+1, 0, "FFF");
- fnWriteRow(splitKey+1, -1, "GGG");
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {splitKey});
- }
-
+ };
+
+ const ui32 splitKey = 100;
+
+ fnWriteRow(0, 10000, "AAA");
+
+ fnWriteRow(splitKey, {}, "BBB");
+ fnWriteRow(splitKey, 0, "CCC");
+ fnWriteRow(splitKey, -1, "DDD");
+
+ fnWriteRow(splitKey+1, {}, "EEE");
+ fnWriteRow(splitKey+1, 0, "FFF");
+ fnWriteRow(splitKey+1, -1, "GGG");
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {splitKey});
+ }
+
Y_UNIT_TEST(SplitThenMerge) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
- MergeTable(annoyingClient, "/dc-1/Dir/TableOld", {1, 2});
- MergeTable(annoyingClient, "/dc-1/Dir/TableOld", {0, 1});
-
- // Write new rows to the copy in order to trigger compaction
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
+ MergeTable(annoyingClient, "/dc-1/Dir/TableOld", {1, 2});
+ MergeTable(annoyingClient, "/dc-1/Dir/TableOld", {0, 1});
+
+ // Write new rows to the copy in order to trigger compaction
TMap<ui32, TString> rows = {
- {1, "AAA"},
- {101, "BBB"},
- {201, "CCC"},
- {1001, "DDD"}
- };
- for (const auto& r : rows) {
- WriteRow(annoyingClient, "TableOld", r.first, r.second);
- }
-
- for (const auto& r : rows) {
+ {1, "AAA"},
+ {101, "BBB"},
+ {201, "CCC"},
+ {1001, "DDD"}
+ };
+ for (const auto& r : rows) {
+ WriteRow(annoyingClient, "TableOld", r.first, r.second);
+ }
+
+ for (const auto& r : rows) {
TString val = ReadRow(annoyingClient, "TableOld", r.first);
- UNIT_ASSERT_VALUES_EQUAL(val, r.second);
- }
-
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- annoyingClient.Ls("/dc-1/Dir/TableOld");
- }
-
+ UNIT_ASSERT_VALUES_EQUAL(val, r.second);
+ }
+
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ annoyingClient.Ls("/dc-1/Dir/TableOld");
+ }
+
Y_UNIT_TEST(WriteSplitAndRead) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Write new rows to the copy in order to trigger compaction
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Write new rows to the copy in order to trigger compaction
TMap<ui32, TString> rows = {
- {1, "AAA"},
- {101, "BBB"},
- {201, "CCC"}
- };
- for (const auto& r : rows) {
- WriteRow(annoyingClient, "TableOld", r.first, r.second);
- }
-
+ {1, "AAA"},
+ {101, "BBB"},
+ {201, "CCC"}
+ };
+ for (const auto& r : rows) {
+ WriteRow(annoyingClient, "TableOld", r.first, r.second);
+ }
+
TString strResult = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
-
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
-
- for (const auto& r : rows) {
+
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
+
+ for (const auto& r : rows) {
TString val = ReadRow(annoyingClient, "TableOld", r.first);
- UNIT_ASSERT_VALUES_EQUAL(val, r.second);
- }
-
+ UNIT_ASSERT_VALUES_EQUAL(val, r.second);
+ }
+
TString strResultAfter = ReadFromTable(annoyingClient, "/dc-1/Dir/TableOld");
- UNIT_ASSERT_NO_DIFF(strResultAfter, strResult);
-
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- annoyingClient.Ls("/dc-1/Dir/TableOld");
- }
-
+ UNIT_ASSERT_NO_DIFF(strResultAfter, strResult);
+
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ annoyingClient.Ls("/dc-1/Dir/TableOld");
+ }
+
Y_UNIT_TEST(WriteSplitAndReadFromFollower) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port).SetNodeCount(2));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
@@ -2772,7 +2772,7 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
TPortManager pm;
ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
@@ -2817,455 +2817,455 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
}
Y_UNIT_TEST(SplitBoundaryRead) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Write 1 row and split on its key
- WriteRow(annoyingClient, "TableOld", 11111, "Boundary");
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {11111});
- }
-
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Write 1 row and split on its key
+ WriteRow(annoyingClient, "TableOld", 11111, "Boundary");
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {11111});
+ }
+
Y_UNIT_TEST(WriteSplitWriteSplit) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
- PrepareSourceTable(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Write new rows to the copy in order to trigger compaction
+ TFlatMsgBusClient annoyingClient(port);
+ PrepareSourceTable(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Write new rows to the copy in order to trigger compaction
TMap<ui32, TString> rows1 = {
- {1, "AAA"},
- {101, "BBB"},
- {201, "CCC"}
- };
- for (const auto& r : rows1) {
- WriteRow(annoyingClient, "TableOld", r.first, r.second);
- }
-
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
-
+ {1, "AAA"},
+ {101, "BBB"},
+ {201, "CCC"}
+ };
+ for (const auto& r : rows1) {
+ WriteRow(annoyingClient, "TableOld", r.first, r.second);
+ }
+
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 0, {100, 200});
+
TMap<ui32, TString> rows2 = {
- {2, "2222AAA"},
- {102, "2222BBB"},
- {202, "2222CCC"}
- };
- for (const auto& r : rows2) {
- WriteRow(annoyingClient, "TableOld", r.first, r.second);
- }
-
- SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 1, {101});
-
+ {2, "2222AAA"},
+ {102, "2222BBB"},
+ {202, "2222CCC"}
+ };
+ for (const auto& r : rows2) {
+ WriteRow(annoyingClient, "TableOld", r.first, r.second);
+ }
+
+ SplitTable(annoyingClient, "/dc-1/Dir/TableOld", 1, {101});
+
TMap<ui32, TString> rows = rows1;
- rows.insert(rows2.begin(), rows2.end());
- for (const auto& r : rows) {
+ rows.insert(rows2.begin(), rows2.end());
+ for (const auto& r : rows) {
TString val = ReadRow(annoyingClient, "TableOld", r.first);
- UNIT_ASSERT_VALUES_EQUAL(val, r.second);
- }
-
- annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
- annoyingClient.Ls("/dc-1/Dir/TableOld");
- }
+ UNIT_ASSERT_VALUES_EQUAL(val, r.second);
+ }
+
+ annoyingClient.DeleteTable("/dc-1/Dir", "TableOld");
+ annoyingClient.Ls("/dc-1/Dir/TableOld");
+ }
Y_UNIT_TEST(AutoSplitBySize) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
+ DisableSplitMergePartCountLimit(cleverServer);
cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_INFO);
- TFlatMsgBusClient annoyingClient(port);
-
- const char * tableDescr = R"___(
- Name: "T1"
- Columns { Name: "Key" Type: "String"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["Key"]
-
- PartitionConfig {
- PartitioningPolicy {
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char * tableDescr = R"___(
+ Name: "T1"
+ Columns { Name: "Key" Type: "String"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["Key"]
+
+ PartitionConfig {
+ PartitioningPolicy {
SizeToSplit: 45000000
- }
- CompactionPolicy {
- InMemSizeToSnapshot: 100000
- InMemStepsToSnapshot: 1
- InMemForceStepsToSnapshot: 2
- InMemForceSizeToSnapshot: 200000
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 200000
- ReadAheadLoThreshold: 100000
- MinDataPageSize: 7168
- SnapBrokerQueue: 0
- Generation {
- GenerationId: 0
- SizeToCompact: 10000
- CountToCompact: 2
- ForceCountToCompact: 2
- ForceSizeToCompact: 20000
- CompactionBrokerQueue: 1
- KeepInCache: true
- }
- }
- }
- )___";
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir", tableDescr);
-
+ }
+ CompactionPolicy {
+ InMemSizeToSnapshot: 100000
+ InMemStepsToSnapshot: 1
+ InMemForceStepsToSnapshot: 2
+ InMemForceSizeToSnapshot: 200000
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 200000
+ ReadAheadLoThreshold: 100000
+ MinDataPageSize: 7168
+ SnapBrokerQueue: 0
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 10000
+ CountToCompact: 2
+ ForceCountToCompact: 2
+ ForceSizeToCompact: 20000
+ CompactionBrokerQueue: 1
+ KeepInCache: true
+ }
+ }
+ }
+ )___";
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir", tableDescr);
+
TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 1);
-
- // Force stats reporting without delays
+ UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 1);
+
+ // Force stats reporting without delays
NDataShard::gDbStatsReportInterval = TDuration::Seconds(0);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Write rows to trigger split
- auto fnWriteRow = [&](TString key, TString value) {
- Cerr << key << Endl;
- TString insertRowQuery = R"___(
- (
- (let key '('('Key (String '%s))))
- (let value '('('Value (Utf8 '%s))))
- (let ret_ (AsList
- (UpdateRow '/dc-1/Dir/%s key value)
- ))
- (return ret_)
- )
- )___";
-
- int retryCnt = 20;
- while (retryCnt--) {
- TFlatMsgBusClient::TFlatQueryOptions opts;
- NKikimrClient::TResponse response;
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Write rows to trigger split
+ auto fnWriteRow = [&](TString key, TString value) {
+ Cerr << key << Endl;
+ TString insertRowQuery = R"___(
+ (
+ (let key '('('Key (String '%s))))
+ (let value '('('Value (Utf8 '%s))))
+ (let ret_ (AsList
+ (UpdateRow '/dc-1/Dir/%s key value)
+ ))
+ (return ret_)
+ )
+ )___";
+
+ int retryCnt = 20;
+ while (retryCnt--) {
+ TFlatMsgBusClient::TFlatQueryOptions opts;
+ NKikimrClient::TResponse response;
annoyingClient.FlatQueryRaw(Sprintf(insertRowQuery.data(), key.data(), value.data(), "T1"), opts, response);
- ui32 responseStatus = response.GetStatus();
- if (responseStatus == NMsgBusProxy::MSTATUS_REJECTED) {
- Sleep(TDuration::Seconds(1));
- } else {
- UNIT_ASSERT_VALUES_EQUAL(responseStatus, NMsgBusProxy::MSTATUS_OK);
- break;
- }
- }
- };
-
+ ui32 responseStatus = response.GetStatus();
+ if (responseStatus == NMsgBusProxy::MSTATUS_REJECTED) {
+ Sleep(TDuration::Seconds(1));
+ } else {
+ UNIT_ASSERT_VALUES_EQUAL(responseStatus, NMsgBusProxy::MSTATUS_OK);
+ break;
+ }
+ }
+ };
+
TString bigValue(6*1024*1024, 'a');
-
- for (int i = 0; i < 4; ++i) {
- fnWriteRow(Sprintf("A-%d", i), bigValue);
- fnWriteRow(Sprintf("B-%d", i), bigValue);
- }
-
- // Check that split actually happened
- for (int retry = 0; retry < 30 && partitions.size() == 1; ++retry) {
- partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- Sleep(TDuration::Seconds(1));
- }
- UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 2);
-
- // Write some more rows to trigger another split
+
+ for (int i = 0; i < 4; ++i) {
+ fnWriteRow(Sprintf("A-%d", i), bigValue);
+ fnWriteRow(Sprintf("B-%d", i), bigValue);
+ }
+
+ // Check that split actually happened
+ for (int retry = 0; retry < 30 && partitions.size() == 1; ++retry) {
+ partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+ Sleep(TDuration::Seconds(1));
+ }
+ UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 2);
+
+ // Write some more rows to trigger another split
for (int i = 0; i < 4; ++i) {
- fnWriteRow(Sprintf("C-%d", i), bigValue);
- }
-
- // Check that split actually happened
- for (int retry = 0; retry < 30 && partitions.size() == 2; ++retry) {
- partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- Sleep(TDuration::Seconds(1));
- }
- UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 3);
- }
-
- void WriteKVRow(TFlatMsgBusClient& annoyingClient, ui32 key, TString value) {
- Cerr << key << Endl;
- TString insertRowQuery = R"___(
- (
- (let key '('('Key (Uint32 '%u))))
- (let value '('('Value (Utf8 '%s))))
- (let ret_ (AsList
- (UpdateRow '/dc-1/Dir/%s key value)
- ))
- (return ret_)
- )
- )___";
-
- int retryCnt = 20;
- while (retryCnt--) {
- TFlatMsgBusClient::TFlatQueryOptions opts;
- NKikimrClient::TResponse response;
+ fnWriteRow(Sprintf("C-%d", i), bigValue);
+ }
+
+ // Check that split actually happened
+ for (int retry = 0; retry < 30 && partitions.size() == 2; ++retry) {
+ partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+ Sleep(TDuration::Seconds(1));
+ }
+ UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 3);
+ }
+
+ void WriteKVRow(TFlatMsgBusClient& annoyingClient, ui32 key, TString value) {
+ Cerr << key << Endl;
+ TString insertRowQuery = R"___(
+ (
+ (let key '('('Key (Uint32 '%u))))
+ (let value '('('Value (Utf8 '%s))))
+ (let ret_ (AsList
+ (UpdateRow '/dc-1/Dir/%s key value)
+ ))
+ (return ret_)
+ )
+ )___";
+
+ int retryCnt = 20;
+ while (retryCnt--) {
+ TFlatMsgBusClient::TFlatQueryOptions opts;
+ NKikimrClient::TResponse response;
annoyingClient.FlatQueryRaw(Sprintf(insertRowQuery.data(), key, value.data(), "T1"), opts, response);
- ui32 responseStatus = response.GetStatus();
- if (responseStatus == NMsgBusProxy::MSTATUS_REJECTED) {
- Sleep(TDuration::Seconds(1));
- } else {
- UNIT_ASSERT_VALUES_EQUAL(responseStatus, NMsgBusProxy::MSTATUS_OK);
- break;
- }
- }
- }
-
- void EraseKVRow(TFlatMsgBusClient& annoyingClient, ui32 key) {
- Cerr << key << Endl;
- TString query = R"___(
- (
- (let key '('('Key (Uint32 '%u))))
- (let ret_ (AsList
- (EraseRow '/dc-1/Dir/%s key)
- ))
- (return ret_)
- )
- )___";
-
- int retryCnt = 20;
- while (retryCnt--) {
- TFlatMsgBusClient::TFlatQueryOptions opts;
- NKikimrClient::TResponse response;
+ ui32 responseStatus = response.GetStatus();
+ if (responseStatus == NMsgBusProxy::MSTATUS_REJECTED) {
+ Sleep(TDuration::Seconds(1));
+ } else {
+ UNIT_ASSERT_VALUES_EQUAL(responseStatus, NMsgBusProxy::MSTATUS_OK);
+ break;
+ }
+ }
+ }
+
+ void EraseKVRow(TFlatMsgBusClient& annoyingClient, ui32 key) {
+ Cerr << key << Endl;
+ TString query = R"___(
+ (
+ (let key '('('Key (Uint32 '%u))))
+ (let ret_ (AsList
+ (EraseRow '/dc-1/Dir/%s key)
+ ))
+ (return ret_)
+ )
+ )___";
+
+ int retryCnt = 20;
+ while (retryCnt--) {
+ TFlatMsgBusClient::TFlatQueryOptions opts;
+ NKikimrClient::TResponse response;
annoyingClient.FlatQueryRaw(Sprintf(query.data(), key, "T1"), opts, response);
- ui32 responseStatus = response.GetStatus();
- if (responseStatus == NMsgBusProxy::MSTATUS_REJECTED) {
- Sleep(TDuration::Seconds(1));
- } else {
- UNIT_ASSERT_VALUES_EQUAL(responseStatus, NMsgBusProxy::MSTATUS_OK);
- break;
- }
- }
- }
-
+ ui32 responseStatus = response.GetStatus();
+ if (responseStatus == NMsgBusProxy::MSTATUS_REJECTED) {
+ Sleep(TDuration::Seconds(1));
+ } else {
+ UNIT_ASSERT_VALUES_EQUAL(responseStatus, NMsgBusProxy::MSTATUS_OK);
+ break;
+ }
+ }
+ }
+
Y_UNIT_TEST(AutoMergeBySize) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
- DisableSplitMergePartCountLimit(cleverServer);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_ERROR);
-
- TFlatMsgBusClient annoyingClient(port);
-
- const char * tableDescr = R"___(
- Name: "T1"
- Columns { Name: "Key" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["Key"]
- UniformPartitionsCount: 4
- PartitionConfig {
- PartitioningPolicy {
- SizeToSplit: 50000000
- MaxPartitionsCount: 6
- }
- CompactionPolicy {
- InMemSizeToSnapshot: 100000
- InMemStepsToSnapshot: 1
- InMemForceStepsToSnapshot: 2
- InMemForceSizeToSnapshot: 200000
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 200000
- ReadAheadLoThreshold: 100000
- MinDataPageSize: 7168
- SnapBrokerQueue: 0
- Generation {
- GenerationId: 0
- SizeToCompact: 10000
- CountToCompact: 2
- ForceCountToCompact: 2
- ForceSizeToCompact: 20000
- CompactionBrokerQueue: 1
- KeepInCache: true
- }
- }
- }
- )___";
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir", tableDescr);
-
- TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 4);
-
- // Force stats reporting without delays
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+ DisableSplitMergePartCountLimit(cleverServer);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_ERROR);
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char * tableDescr = R"___(
+ Name: "T1"
+ Columns { Name: "Key" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["Key"]
+ UniformPartitionsCount: 4
+ PartitionConfig {
+ PartitioningPolicy {
+ SizeToSplit: 50000000
+ MaxPartitionsCount: 6
+ }
+ CompactionPolicy {
+ InMemSizeToSnapshot: 100000
+ InMemStepsToSnapshot: 1
+ InMemForceStepsToSnapshot: 2
+ InMemForceSizeToSnapshot: 200000
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 200000
+ ReadAheadLoThreshold: 100000
+ MinDataPageSize: 7168
+ SnapBrokerQueue: 0
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 10000
+ CountToCompact: 2
+ ForceCountToCompact: 2
+ ForceSizeToCompact: 20000
+ CompactionBrokerQueue: 1
+ KeepInCache: true
+ }
+ }
+ }
+ )___";
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir", tableDescr);
+
+ TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+ UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 4);
+
+ // Force stats reporting without delays
NDataShard::gDbStatsReportInterval = TDuration::Seconds(0);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- // Write rows to trigger split
- auto fnWriteRow = [&annoyingClient] (ui32 key, TString value) {
- return WriteKVRow(annoyingClient, key, value);
- };
-
- TString smallValue(10*1024, '0');
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ // Write rows to trigger split
+ auto fnWriteRow = [&annoyingClient] (ui32 key, TString value) {
+ return WriteKVRow(annoyingClient, key, value);
+ };
+
+ TString smallValue(10*1024, '0');
TString bigValue(7*1024*1024, '0');
-
- // Allow the table to shrink to 2 partitions
- annoyingClient.AlterTable("/dc-1/Dir", R"(
- Name: "T1"
- PartitionConfig {
- PartitioningPolicy {
- MinPartitionsCount: 2
- }
- }
- )");
-
- // Write some values to trigger stats update and merge
- for (int i = 0; i < 5; ++i) {
- fnWriteRow(0x42, smallValue);
- fnWriteRow(0x40000042, smallValue);
- fnWriteRow(0x80000042, smallValue);
- fnWriteRow(0xc0000042, smallValue);
- }
-
- // Check that merge actually happened
- for (int retry = 0; retry < 15 && partitions.size() > 2; ++retry) {
- partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- Sleep(TDuration::Seconds(1));
- }
- UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 2);
-
- // Write some more rows to trigger split
- for (int i = 0; i < 50; ++i) {
- fnWriteRow(0x20000000 + i, bigValue);
- }
-
- auto lsRes = annoyingClient.Ls("/dc-1/Dir/T1");
- Cout << lsRes->Record << Endl;
-
- // Check that split actually happened
- for (int retry = 0; retry < 15 && partitions.size() < 6; ++retry) {
- partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- Sleep(TDuration::Seconds(1));
- }
- UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 6);
- }
-
+
+ // Allow the table to shrink to 2 partitions
+ annoyingClient.AlterTable("/dc-1/Dir", R"(
+ Name: "T1"
+ PartitionConfig {
+ PartitioningPolicy {
+ MinPartitionsCount: 2
+ }
+ }
+ )");
+
+ // Write some values to trigger stats update and merge
+ for (int i = 0; i < 5; ++i) {
+ fnWriteRow(0x42, smallValue);
+ fnWriteRow(0x40000042, smallValue);
+ fnWriteRow(0x80000042, smallValue);
+ fnWriteRow(0xc0000042, smallValue);
+ }
+
+ // Check that merge actually happened
+ for (int retry = 0; retry < 15 && partitions.size() > 2; ++retry) {
+ partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+ Sleep(TDuration::Seconds(1));
+ }
+ UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 2);
+
+ // Write some more rows to trigger split
+ for (int i = 0; i < 50; ++i) {
+ fnWriteRow(0x20000000 + i, bigValue);
+ }
+
+ auto lsRes = annoyingClient.Ls("/dc-1/Dir/T1");
+ Cout << lsRes->Record << Endl;
+
+ // Check that split actually happened
+ for (int retry = 0; retry < 15 && partitions.size() < 6; ++retry) {
+ partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+ Sleep(TDuration::Seconds(1));
+ }
+ UNIT_ASSERT_VALUES_EQUAL(partitions.size(), 6);
+ }
+
Y_UNIT_TEST(AutoSplitMergeQueue) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port).SetEnableMvcc(false));
- DisableSplitMergePartCountLimit(cleverServer);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_ERROR);
- //cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
-
- TFlatMsgBusClient annoyingClient(port);
-
- const char * tableDescr = R"___(
- Name: "T1"
- Columns { Name: "Key" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["Key"]
- PartitionConfig {
- PartitioningPolicy {
- SizeToSplit: 300000
- MaxPartitionsCount: 6
- MinPartitionsCount: 1
- }
- CompactionPolicy {
- InMemSizeToSnapshot: 10000
- InMemStepsToSnapshot: 1
- InMemForceStepsToSnapshot: 1
- InMemForceSizeToSnapshot: 20000
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 200000
- ReadAheadLoThreshold: 100000
- MinDataPageSize: 7168
- SnapBrokerQueue: 0
- }
- }
- )___";
-
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir", tableDescr);
-
- TVector<ui64> initialPartitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
-
- // Force stats reporting without delays
+ DisableSplitMergePartCountLimit(cleverServer);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_ERROR);
+ //cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char * tableDescr = R"___(
+ Name: "T1"
+ Columns { Name: "Key" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["Key"]
+ PartitionConfig {
+ PartitioningPolicy {
+ SizeToSplit: 300000
+ MaxPartitionsCount: 6
+ MinPartitionsCount: 1
+ }
+ CompactionPolicy {
+ InMemSizeToSnapshot: 10000
+ InMemStepsToSnapshot: 1
+ InMemForceStepsToSnapshot: 1
+ InMemForceSizeToSnapshot: 20000
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 200000
+ ReadAheadLoThreshold: 100000
+ MinDataPageSize: 7168
+ SnapBrokerQueue: 0
+ }
+ }
+ )___";
+
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir", tableDescr);
+
+ TVector<ui64> initialPartitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+
+ // Force stats reporting without delays
NDataShard::gDbStatsReportInterval = TDuration::Seconds(0);
NDataShard::gDbStatsDataSizeResolution = 80000;
-
- TString bigValue(100*1024, '0');
- int key = 0;
-
- // Write some values to the tail and delete from the head
- for (; key < 300; ++key) {
- WriteKVRow(annoyingClient, key, bigValue);
- EraseKVRow(annoyingClient, key-30);
- if (key % 50 == 0) {
- TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- UNIT_ASSERT_C(partitions.size() <= 6, "Table grew beyond MaxPartitionsCount");
- }
- }
- TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- UNIT_ASSERT_VALUES_UNEQUAL_C(partitions.size(), 1, "Split did't happen");
-
- // Delete rest of the rows
- for (key -= 30; key < 300; ++key) {
- EraseKVRow(annoyingClient, key);
- }
-
- // Wait for merge to happen
+
+ TString bigValue(100*1024, '0');
+ int key = 0;
+
+ // Write some values to the tail and delete from the head
+ for (; key < 300; ++key) {
+ WriteKVRow(annoyingClient, key, bigValue);
+ EraseKVRow(annoyingClient, key-30);
+ if (key % 50 == 0) {
+ TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+ UNIT_ASSERT_C(partitions.size() <= 6, "Table grew beyond MaxPartitionsCount");
+ }
+ }
+ TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+ UNIT_ASSERT_VALUES_UNEQUAL_C(partitions.size(), 1, "Split did't happen");
+
+ // Delete rest of the rows
+ for (key -= 30; key < 300; ++key) {
+ EraseKVRow(annoyingClient, key);
+ }
+
+ // Wait for merge to happen
for (int retry = 0; retry < 45 && annoyingClient.GetTablePartitions("/dc-1/Dir/T1").size() != 1; ++retry) {
- Sleep(TDuration::Seconds(1));
- }
-
- TVector<ui64> finalPartitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
- UNIT_ASSERT_VALUES_EQUAL_C(finalPartitions.size(), 1, "Empty table didn't merge into 1 shard");
- UNIT_ASSERT_VALUES_UNEQUAL_C(finalPartitions[0], initialPartitions[0], "Partitions din't change");
- }
-
+ Sleep(TDuration::Seconds(1));
+ }
+
+ TVector<ui64> finalPartitions = annoyingClient.GetTablePartitions("/dc-1/Dir/T1");
+ UNIT_ASSERT_VALUES_EQUAL_C(finalPartitions.size(), 1, "Empty table didn't merge into 1 shard");
+ UNIT_ASSERT_VALUES_UNEQUAL_C(finalPartitions[0], initialPartitions[0], "Partitions din't change");
+ }
+
Y_UNIT_TEST(GetTabletCounters) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareSourceTable(annoyingClient);
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareSourceTable(annoyingClient);
TVector<ui64> partitions = annoyingClient.GetTablePartitions("/dc-1/Dir/TableOld");
-
- TAutoPtr<NKikimr::NMsgBusProxy::TBusTabletCountersRequest> request(new NKikimr::NMsgBusProxy::TBusTabletCountersRequest());
- request->Record.SetTabletID(partitions[0]);
+
+ TAutoPtr<NKikimr::NMsgBusProxy::TBusTabletCountersRequest> request(new NKikimr::NMsgBusProxy::TBusTabletCountersRequest());
+ request->Record.SetTabletID(partitions[0]);
request->Record.SetConnectToFollower(false);
-
- TAutoPtr<NBus::TBusMessage> reply;
- NBus::EMessageStatus status = annoyingClient.SyncCall(request, reply);
-
- UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
- const NKikimr::NMsgBusProxy::TBusResponse* res = dynamic_cast<NKikimr::NMsgBusProxy::TBusResponse*>(reply.Get());
- UNIT_ASSERT(res);
-// Cerr << res->Record << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res->Record.GetStatus(), NKikimr::NMsgBusProxy::MSTATUS_OK);
- UNIT_ASSERT_VALUES_EQUAL(res->Record.GetTabletId(), partitions[0]);
- UNIT_ASSERT(res->Record.HasTabletCounters());
- bool found = false;
- for (const auto& sc : res->Record.GetTabletCounters().GetExecutorCounters().GetSimpleCounters()) {
+
+ TAutoPtr<NBus::TBusMessage> reply;
+ NBus::EMessageStatus status = annoyingClient.SyncCall(request, reply);
+
+ UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
+ const NKikimr::NMsgBusProxy::TBusResponse* res = dynamic_cast<NKikimr::NMsgBusProxy::TBusResponse*>(reply.Get());
+ UNIT_ASSERT(res);
+// Cerr << res->Record << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res->Record.GetStatus(), NKikimr::NMsgBusProxy::MSTATUS_OK);
+ UNIT_ASSERT_VALUES_EQUAL(res->Record.GetTabletId(), partitions[0]);
+ UNIT_ASSERT(res->Record.HasTabletCounters());
+ bool found = false;
+ for (const auto& sc : res->Record.GetTabletCounters().GetExecutorCounters().GetSimpleCounters()) {
if (sc.GetName() == "DbDataBytes") {
- found = true;
- break;
- }
- }
+ found = true;
+ break;
+ }
+ }
UNIT_ASSERT_C(found, "DbDataBytes counter not found");
- }
+ }
void LargeDatashardReplyRO(TFlatMsgBusClient& client) {
const ui32 TABLE_ROWS = 700;
@@ -3494,108 +3494,108 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
res = annoyingClient.FlatQuery(selectQuery);
UNIT_ASSERT(!res.GetValue().GetStruct(0).GetOptional().HasOptional());
}
-
+
Y_UNIT_TEST(PartBloomFilter) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
- if (!true) {
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_DEBUG);
- }
-
- TFlatMsgBusClient annoyingClient(port);
-
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+ if (!true) {
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::OPS_COMPACT, NActors::NLog::PRI_DEBUG);
+ }
+
+ TFlatMsgBusClient annoyingClient(port);
+
const char * table = R"(Name: "TableWithFilter"
- Columns { Name: "key1" Type: "Uint32" }
- Columns { Name: "value" Type: "Uint32" }
- KeyColumnNames: ["key1"]
- PartitionConfig {
- CompactionPolicy {
- InMemSizeToSnapshot: 100000
- InMemStepsToSnapshot: 1
- InMemForceStepsToSnapshot: 2
- InMemForceSizeToSnapshot: 200000
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 200000
- ReadAheadLoThreshold: 100000
- MinDataPageSize: 7168
- Generation {
- GenerationId: 0
- SizeToCompact: 10000
- CountToCompact: 200
- ForceCountToCompact: 200
- ForceSizeToCompact: 20000
- CompactionBrokerQueue: 1
- KeepInCache: false
- }
- }
+ Columns { Name: "key1" Type: "Uint32" }
+ Columns { Name: "value" Type: "Uint32" }
+ KeyColumnNames: ["key1"]
+ PartitionConfig {
+ CompactionPolicy {
+ InMemSizeToSnapshot: 100000
+ InMemStepsToSnapshot: 1
+ InMemForceStepsToSnapshot: 2
+ InMemForceSizeToSnapshot: 200000
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 200000
+ ReadAheadLoThreshold: 100000
+ MinDataPageSize: 7168
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 10000
+ CountToCompact: 200
+ ForceCountToCompact: 200
+ ForceSizeToCompact: 20000
+ CompactionBrokerQueue: 1
+ KeepInCache: false
+ }
+ }
EnableFilterByKey: true
- }
- )";
-
- annoyingClient.InitRoot();
+ }
+ )";
+
+ annoyingClient.InitRoot();
auto res = annoyingClient.CreateTable("/dc-1", table);
UNIT_ASSERT_VALUES_EQUAL(res, NMsgBusProxy::MSTATUS_OK);
-
- const ui32 ROW_COUNT = 30;
-
- // Write many rows to produce some flat parts
- Cout << "INSERT key = i" << Endl;
- for (ui32 i = 0; i < ROW_COUNT; ++i) {
- annoyingClient.FlatQuery(Sprintf(R"((
- (let row_ '('('key1 (Uint32 '%u))))
- (let cols_ '('('value (Uint32 '%u))))
+
+ const ui32 ROW_COUNT = 30;
+
+ // Write many rows to produce some flat parts
+ Cout << "INSERT key = i" << Endl;
+ for (ui32 i = 0; i < ROW_COUNT; ++i) {
+ annoyingClient.FlatQuery(Sprintf(R"((
+ (let row_ '('('key1 (Uint32 '%u))))
+ (let cols_ '('('value (Uint32 '%u))))
(let insert (UpdateRow '/dc-1/TableWithFilter row_ cols_))
- (let ret_ (AsList insert))
- (return ret_)
- ))", i, i));
- }
-
+ (let ret_ (AsList insert))
+ (return ret_)
+ ))", i, i));
+ }
+
Cout << "SELECT value FROM TableWithFilter WHERE key = i" << Endl;
- for (ui32 i = 0; i < ROW_COUNT; ++i) {
- auto res = annoyingClient.FlatQuery(Sprintf(R"((
- (let row_ '('('key1 (Uint32 '%u))))
- (let cols_ '('value))
+ for (ui32 i = 0; i < ROW_COUNT; ++i) {
+ auto res = annoyingClient.FlatQuery(Sprintf(R"((
+ (let row_ '('('key1 (Uint32 '%u))))
+ (let cols_ '('value))
(let select_ (SelectRow '/dc-1/TableWithFilter row_ cols_))
- (let ret_ (AsList (SetResult 'ret0 select_)))
- (return ret_)
- ))", i));
-
- // Cout << res << Endl;
+ (let ret_ (AsList (SetResult 'ret0 select_)))
+ (return ret_)
+ ))", i));
+
+ // Cout << res << Endl;
TValue value = TValue::Create(res.GetValue(), res.GetType());
TValue ret0 = value["ret0"];
ui32 val = ret0[0];
- UNIT_ASSERT_VALUES_EQUAL(val, i);
- }
-
- // Extend the key
- Cout << "ALTER TABLE Table DROP PRIMARY KEY, ADD COLUMN key2 ADD PRIMARY KEY(key1, key2)" << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(val, i);
+ }
+
+ // Extend the key
+ Cout << "ALTER TABLE Table DROP PRIMARY KEY, ADD COLUMN key2 ADD PRIMARY KEY(key1, key2)" << Endl;
res = annoyingClient.AlterTable("/dc-1", R"(
Name: "TableWithFilter"
- Columns { Name: "key2" Type: "Uint64" }
- KeyColumnNames: ["key1", "key2"]
- )");
+ Columns { Name: "key2" Type: "Uint64" }
+ KeyColumnNames: ["key1", "key2"]
+ )");
UNIT_ASSERT_VALUES_EQUAL(res, NMsgBusProxy::MSTATUS_OK);
-
- // Read old rows
- Cout << "SELECT value FROM Table WHERE key1 = i AND key2 is NULL" << Endl;
- for (ui32 i = 0; i < ROW_COUNT; ++i) {
- auto res = annoyingClient.FlatQuery(Sprintf(R"((
- (let row_ '('('key1 (Uint32 '%u)) '('key2 (Null))))
- (let cols_ '('value))
+
+ // Read old rows
+ Cout << "SELECT value FROM Table WHERE key1 = i AND key2 is NULL" << Endl;
+ for (ui32 i = 0; i < ROW_COUNT; ++i) {
+ auto res = annoyingClient.FlatQuery(Sprintf(R"((
+ (let row_ '('('key1 (Uint32 '%u)) '('key2 (Null))))
+ (let cols_ '('value))
(let select_ (SelectRow '/dc-1/TableWithFilter row_ cols_))
- (let ret_ (AsList (SetResult 'ret0 select_)))
- (return ret_)
- ))", i));
-
- // Cout << res << Endl;
+ (let ret_ (AsList (SetResult 'ret0 select_)))
+ (return ret_)
+ ))", i));
+
+ // Cout << res << Endl;
TValue value = TValue::Create(res.GetValue(), res.GetType());
TValue ret0 = value["ret0"];
ui32 val = ret0[0];
- UNIT_ASSERT_VALUES_EQUAL(val, i);
- }
+ UNIT_ASSERT_VALUES_EQUAL(val, i);
+ }
}
-
+
Y_UNIT_TEST(SelectRangeBytesLimit) {
const ui32 TABLE_ROWS = 10;
@@ -3655,7 +3655,7 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
auto key = (ui64) list[i]["Key"];
UNIT_ASSERT(key < 100);
}
- }
+ }
Y_UNIT_TEST(SelectRangeItemsLimit) {
const ui32 TABLE_ROWS = 10;
@@ -4262,6 +4262,6 @@ Y_UNIT_TEST_SUITE(TFlatTest) {
UNIT_ASSERT_VALUES_EQUAL(expected, 5); // 5 should not have been included
}
-}
-
-}}
+}
+
+}}
diff --git a/ydb/core/client/flat_ut_client.h b/ydb/core/client/flat_ut_client.h
index 96e61658f92..07f5b30cc6b 100644
--- a/ydb/core/client/flat_ut_client.h
+++ b/ydb/core/client/flat_ut_client.h
@@ -1,34 +1,34 @@
-#pragma once
-
+#pragma once
+
#include <ydb/core/base/tablet_resolver.h>
#include <ydb/core/testlib/test_client.h>
#include <ydb/core/tx/tx_proxy/proxy.h>
#include <ydb/public/lib/deprecated/kicli/kicli.h>
-
+
#include <library/cpp/testing/unittest/registar.h>
#include <google/protobuf/text_format.h>
-
-namespace NKikimr {
-namespace NFlatTests {
-
-class TFlatMsgBusClient : public Tests::TClient {
-public:
+
+namespace NKikimr {
+namespace NFlatTests {
+
+class TFlatMsgBusClient : public Tests::TClient {
+public:
TFlatMsgBusClient(const Tests::TServerSettings& settings)
: TClient(settings)
{}
TFlatMsgBusClient(ui16 port)
: TFlatMsgBusClient(Tests::TServerSettings(port))
- {}
-
- void InitRoot() {
+ {}
+
+ void InitRoot() {
InitRootScheme();
- }
-
+ }
+
using TClient::FlatQuery;
NKikimrMiniKQL::TResult FlatQuery(const TString& mkql) {
- NKikimrMiniKQL::TResult res;
+ NKikimrMiniKQL::TResult res;
TClient::TFlatQueryOptions opts;
bool success = TClient::FlatQuery(mkql, opts, res, NMsgBusProxy::MSTATUS_OK);
UNIT_ASSERT(success);
@@ -46,20 +46,20 @@ public:
bool success = TClient::FlatQuery(mkql, opts, res, expectedResponse);
UNIT_ASSERT(success == (expectedStatus == NMsgBusProxy::MSTATUS_OK));
return res;
- }
-
+ }
+
TAutoPtr<NMsgBusProxy::TBusResponse> LsPathId(ui64 schemeshardId, ui64 pathId) {
TAutoPtr<NMsgBusProxy::TBusSchemeDescribe> request(new NMsgBusProxy::TBusSchemeDescribe());
- request->Record.SetPathId(pathId);
- request->Record.SetSchemeshardId(schemeshardId);
- TAutoPtr<NBus::TBusMessage> reply;
- NBus::EMessageStatus msgStatus = SendWhenReady(request, reply);
- UNIT_ASSERT_VALUES_EQUAL(msgStatus, NBus::MESSAGE_OK);
+ request->Record.SetPathId(pathId);
+ request->Record.SetSchemeshardId(schemeshardId);
+ TAutoPtr<NBus::TBusMessage> reply;
+ NBus::EMessageStatus msgStatus = SendWhenReady(request, reply);
+ UNIT_ASSERT_VALUES_EQUAL(msgStatus, NBus::MESSAGE_OK);
Cout << PrintResult<NMsgBusProxy::TBusResponse>(reply.Get()) << Endl;
return dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Release());
- }
+ }
- void ResetSchemeCache(Tests::TServer &server, TTableId tableId) {
+ void ResetSchemeCache(Tests::TServer &server, TTableId tableId) {
TTestActorRuntime* runtime = server.GetRuntime();
TActorId txProxy = MakeTxProxyID();
TActorId sender = runtime->AllocateEdgeActor();
@@ -69,9 +69,9 @@ public:
auto readSchemeStringResult = runtime->GrabEdgeEventRethrow<TEvTxUserProxy::TEvInvalidateTableResult>(handle);
Y_UNUSED(readSchemeStringResult);
}
-
- void KillTablet(Tests::TServer &server, ui64 tabletId) {
- TTestActorRuntime* runtime = server.GetRuntime();
+
+ void KillTablet(Tests::TServer &server, ui64 tabletId) {
+ TTestActorRuntime* runtime = server.GetRuntime();
TActorId sender = runtime->AllocateEdgeActor();
runtime->Send(new IEventHandle(MakeTabletResolverID(), sender, new TEvTabletResolver::TEvTabletProblem(tabletId, TActorId())));
@@ -82,41 +82,41 @@ public:
UNIT_ASSERT(forwardResult && forwardResult->Tablet);
runtime->Send(new IEventHandle(forwardResult->Tablet, sender, new TEvents::TEvPoisonPill()));
runtime->Send(new IEventHandle(MakeTabletResolverID(), sender, new TEvTabletResolver::TEvTabletProblem(tabletId, TActorId())));
- }
-
+ }
+
TVector<ui64> GetTablePartitions(const TString& tablePath) {
- TAutoPtr<NMsgBusProxy::TBusResponse> msg = Ls(tablePath);
+ TAutoPtr<NMsgBusProxy::TBusResponse> msg = Ls(tablePath);
const NKikimrClient::TResponse &response = msg->Record;
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
- const auto& descr = response.GetPathDescription();
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+ const auto& descr = response.GetPathDescription();
TVector<ui64> partitions;
- for (ui32 i = 0; i < descr.TablePartitionsSize(); ++i) {
- partitions.push_back(descr.GetTablePartitions(i).GetDatashardId());
- // Cerr << partitions.back() << Endl;
- }
- return partitions;
- }
-
- void TrySplitTablePartition(const TString& tablePath, const TString& splitDescription, NKikimrClient::TResponse& response) {
+ for (ui32 i = 0; i < descr.TablePartitionsSize(); ++i) {
+ partitions.push_back(descr.GetTablePartitions(i).GetDatashardId());
+ // Cerr << partitions.back() << Endl;
+ }
+ return partitions;
+ }
+
+ void TrySplitTablePartition(const TString& tablePath, const TString& splitDescription, NKikimrClient::TResponse& response) {
TAutoPtr<NMsgBusProxy::TBusSchemeOperation> request(new NMsgBusProxy::TBusSchemeOperation());
- auto *op = request->Record.MutableTransaction()->MutableModifyScheme();
+ auto *op = request->Record.MutableTransaction()->MutableModifyScheme();
op->SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpSplitMergeTablePartitions);
- UNIT_ASSERT(::google::protobuf::TextFormat::ParseFromString(splitDescription, op->MutableSplitMergeTablePartitions()));
- op->MutableSplitMergeTablePartitions()->SetTablePath(tablePath);
-
- TAutoPtr<NBus::TBusMessage> reply;
- NBus::EMessageStatus status = SendAndWaitCompletion(request.Release(), reply);
- UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
+ UNIT_ASSERT(::google::protobuf::TextFormat::ParseFromString(splitDescription, op->MutableSplitMergeTablePartitions()));
+ op->MutableSplitMergeTablePartitions()->SetTablePath(tablePath);
+
+ TAutoPtr<NBus::TBusMessage> reply;
+ NBus::EMessageStatus status = SendAndWaitCompletion(request.Release(), reply);
+ UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
UNIT_ASSERT_VALUES_EQUAL(reply->GetHeader()->Type, (int)NMsgBusProxy::MTYPE_CLIENT_RESPONSE);
- response = static_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record;
- }
-
- NMsgBusProxy::EResponseStatus SplitTablePartition(const TString& tablePath, const TString& splitDescription) {
- NKikimrClient::TResponse response;
- TrySplitTablePartition(tablePath, splitDescription, response);
+ response = static_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record;
+ }
+
+ NMsgBusProxy::EResponseStatus SplitTablePartition(const TString& tablePath, const TString& splitDescription) {
+ NKikimrClient::TResponse response;
+ TrySplitTablePartition(tablePath, splitDescription, response);
UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), (int)NMsgBusProxy::MSTATUS_OK);
- return (NMsgBusProxy::EResponseStatus)response.GetStatus();
- }
-};
-
-}}
+ return (NMsgBusProxy::EResponseStatus)response.GetStatus();
+ }
+};
+
+}}
diff --git a/ydb/core/client/locks_ut.cpp b/ydb/core/client/locks_ut.cpp
index 7c030184963..47b7c3dfdd5 100644
--- a/ydb/core/client/locks_ut.cpp
+++ b/ydb/core/client/locks_ut.cpp
@@ -12,7 +12,7 @@
static const bool EnableLogs = false;
namespace NKikimr {
-namespace NLocksTests {
+namespace NLocksTests {
using namespace Tests;
diff --git a/ydb/core/client/minikql_compile/mkql_compile_service.cpp b/ydb/core/client/minikql_compile/mkql_compile_service.cpp
index 52f8940f1f8..e01d61a22cc 100644
--- a/ydb/core/client/minikql_compile/mkql_compile_service.cpp
+++ b/ydb/core/client/minikql_compile/mkql_compile_service.cpp
@@ -72,7 +72,7 @@ public:
void Bootstrap(const TActorContext& ctx) {
Counters = GetServiceCounters(AppData(ctx)->Counters, "compile")->GetSubgroup("subsystem", "cache");
- AllocPoolCounters = TAlignedPagePoolCounters(AppData(ctx)->Counters, "compile");
+ AllocPoolCounters = TAlignedPagePoolCounters(AppData(ctx)->Counters, "compile");
if (!DbSchemeResolver) {
DbSchemeResolver.Reset(MakeDbSchemeResolver(ctx));
@@ -165,7 +165,7 @@ private:
TCompilingMap Compiling;
TIntrusivePtr<NMonitoring::TDynamicCounters> Counters;
- TAlignedPagePoolCounters AllocPoolCounters;
+ TAlignedPagePoolCounters AllocPoolCounters;
TActorId SchemeCache;
THolder<NYql::IDbSchemeResolver> DbSchemeResolver;
};
diff --git a/ydb/core/client/minikql_compile/yql_expr_minikql.cpp b/ydb/core/client/minikql_compile/yql_expr_minikql.cpp
index 7ea8945efc9..76f38bf35cb 100644
--- a/ydb/core/client/minikql_compile/yql_expr_minikql.cpp
+++ b/ydb/core/client/minikql_compile/yql_expr_minikql.cpp
@@ -29,7 +29,7 @@
#include <ydb/library/yql/core/yql_expr_type_annotation.h>
#include <ydb/library/yql/providers/common/mkql/yql_type_mkql.h>
#include <ydb/library/yql/providers/common/mkql/yql_provider_mkql.h>
-
+
#include <library/cpp/threading/future/async.h>
#include <util/generic/algorithm.h>
@@ -335,7 +335,7 @@ public:
private:
static bool CheckKeyColumn(const TStringBuf& columnName, ui32 keyIndex, IDbSchemeResolver::TTableResult* lookup, TExprNode& node, TExprContext& ctx) {
- auto column = lookup->Columns.FindPtr(columnName);
+ auto column = lookup->Columns.FindPtr(columnName);
if (!column) {
ctx.AddError(YqlIssue(ctx.GetPosition(node.Pos()), TIssuesIds::KIKIMR_SCHEME_MISMATCH, TStringBuilder()
<< "Unknown column '" << columnName
@@ -353,8 +353,8 @@ private:
}
return true;
- }
-
+ }
+
static bool CheckRowTuple(IDbSchemeResolver::TTableResult* lookup, TExprNode& node, TExprNode& rowTuple, TExprContext& ctx) {
if (rowTuple.ChildrenSize() != lookup->KeyColumnCount) {
ctx.AddError(YqlIssue(ctx.GetPosition(node.Pos()), TIssuesIds::KIKIMR_SCHEME_MISMATCH, TStringBuilder()
@@ -477,9 +477,9 @@ private:
}
Y_ENSURE_EX(keyCount <= lookup->KeyColumnCount, TNodeException(node)
- << "Too many key columns specified, table [" << lookup->Table.TableName
- << "] has only: " << lookup->KeyColumnCount
- << ", but got " << keyCount << ".");
+ << "Too many key columns specified, table [" << lookup->Table.TableName
+ << "] has only: " << lookup->KeyColumnCount
+ << ", but got " << keyCount << ".");
Y_ENSURE_EX(fromComponents > 0, TNodeException(node)
<< "Expected at least one component of key in the 'from' section of the range");
@@ -499,10 +499,10 @@ private:
auto selectTuple = node.Child(2);
- // Check that all selected columns are present in table schema
- ui32 selectIndex = 0;
- for (auto selectItem : selectTuple->Children()) {
- auto columnName = selectItem->Content();
+ // Check that all selected columns are present in table schema
+ ui32 selectIndex = 0;
+ for (auto selectItem : selectTuple->Children()) {
+ auto columnName = selectItem->Content();
if (!NKikimr::IsSystemColumn(columnName)) {
auto column = lookup->Columns.FindPtr(columnName);
Y_ENSURE_EX(column, TNodeException(node)
@@ -510,10 +510,10 @@ private:
<< "' for table [" << lookup->Table.TableName
<< "] at select position #" << selectIndex);
}
-
- ++selectIndex;
- }
-
+
+ ++selectIndex;
+ }
+
auto optionsNode = node.Child(3);
Y_ENSURE_EX(optionsNode->IsList(), TNodeException(optionsNode) << "Expected tuple");
for (auto optionsItem : optionsNode->Children()) {
@@ -1366,7 +1366,7 @@ ConvertToMiniKQL(TExprContainer::TPtr expr,
TConvertResult convRes;
convRes.Errors.AddIssues(expr->Context.IssueManager.GetIssues());
promise.SetValue(convRes);
- return;
+ return;
}
TRuntimeNode convertedNode = CompileNode(*expr->Root, expr->Context, ctx, compiler.Get());
@@ -1392,7 +1392,7 @@ ConvertToMiniKQL(TExprContainer::TPtr expr,
TConvertResult convRes;
convRes.Errors.AddIssues(expr->Context.IssueManager.GetIssues());
promise.SetValue(convRes);
- return promise.GetFuture();
+ return promise.GetFuture();
}
TRuntimeNode convertedNode = CompileNode(*expr->Root, expr->Context, ctx, compiler.Get());
@@ -1573,16 +1573,16 @@ private:
errors = Expr->Context.IssueManager.GetIssues();
return false;
}
- IGraphTransformer::TStatus status(IGraphTransformer::TStatus::Ok);
- do {
+ IGraphTransformer::TStatus status(IGraphTransformer::TStatus::Ok);
+ do {
status = ExpandApply(Expr->Root, Expr->Root, Expr->Context);
- } while (status.Level == IGraphTransformer::TStatus::Repeat);
+ } while (status.Level == IGraphTransformer::TStatus::Repeat);
Y_VERIFY_DEBUG(status.Level == IGraphTransformer::TStatus::Ok ||
- status.Level == IGraphTransformer::TStatus::Error);
- if (status.Level != IGraphTransformer::TStatus::Ok) {
+ status.Level == IGraphTransformer::TStatus::Error);
+ if (status.Level != IGraphTransformer::TStatus::Ok) {
errors = Expr->Context.IssueManager.GetIssues();
- return false;
- }
+ return false;
+ }
return true;
}
diff --git a/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp b/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp
index 37c0a4012b1..4fcdf735ba2 100644
--- a/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp
+++ b/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp
@@ -75,15 +75,15 @@ namespace {
table.Columns.insert(std::make_pair("key", TColumn{ 34, 0, NUdf::TDataType<ui32>::Id, 0 }));
table.Columns.insert(std::make_pair("value", TColumn{ 56, -1, NUdf::TDataType<char*>::Id, (ui32)EInplaceUpdateMode::Min }));
services.DbSchemeResolver.AddTable(table);
-
- IDbSchemeResolver::TTableResult table2(IDbSchemeResolver::TTableResult::Ok);
- table2.Table.TableName = "table2";
+
+ IDbSchemeResolver::TTableResult table2(IDbSchemeResolver::TTableResult::Ok);
+ table2.Table.TableName = "table2";
table2.Table.ColumnNames = { "key", "value" };
- table2.TableId.Reset(new TTableId(10, 20));
- table2.KeyColumnCount = 1;
+ table2.TableId.Reset(new TTableId(10, 20));
+ table2.KeyColumnCount = 1;
table2.Columns.insert(std::make_pair("key", TColumn{ 340, 0, NUdf::TDataType<ui32>::Id, 0 }));
table2.Columns.insert(std::make_pair("value", TColumn{ 560, -1, NUdf::TDataType<char*>::Id, (ui32)EInplaceUpdateMode::Min }));
- services.DbSchemeResolver.AddTable(table2);
+ services.DbSchemeResolver.AddTable(table2);
}
}
@@ -224,37 +224,37 @@ Y_UNIT_TEST_SUITE(TTestYqlToMiniKQLCompile) {
}
Y_UNIT_TEST(SimpleCrossShardTx) {
- auto programText = R"___(
- (
- (let row '('('key (Uint32 '2))))
- (let select '('value))
- (let selectRes (SelectRow 'table1 row select))
- (let val (FlatMap selectRes (lambda '(x) (Member x 'value))))
- (let row '('('key (Uint32 '2))))
- (let myUpd '(
- '('value val)
- ))
- (let pgmReturn (AsList
- (UpdateRow 'table2 row myUpd)
- ))
- (return pgmReturn)
- )
- )___";
-
- TServices services;
- RegisterSampleTables(services);
- auto pgm = ProgramText2Bin(programText, services);
- services.ExtractKeys(pgm);
+ auto programText = R"___(
+ (
+ (let row '('('key (Uint32 '2))))
+ (let select '('value))
+ (let selectRes (SelectRow 'table1 row select))
+ (let val (FlatMap selectRes (lambda '(x) (Member x 'value))))
+ (let row '('('key (Uint32 '2))))
+ (let myUpd '(
+ '('value val)
+ ))
+ (let pgmReturn (AsList
+ (UpdateRow 'table2 row myUpd)
+ ))
+ (return pgmReturn)
+ )
+ )___";
+
+ TServices services;
+ RegisterSampleTables(services);
+ auto pgm = ProgramText2Bin(programText, services);
+ services.ExtractKeys(pgm);
UNIT_ASSERT_VALUES_EQUAL(services.DescList.size(), 2);
THashMap<TKeyDesc::ERowOperation, ui32> counters;
- for (ui32 i = 0; i < services.DescList.size(); ++i) {
+ for (ui32 i = 0; i < services.DescList.size(); ++i) {
++counters[services.DescList[i]->RowOperation];
- }
+ }
UNIT_ASSERT_VALUES_EQUAL(counters.size(), 2);
UNIT_ASSERT_VALUES_EQUAL(counters[TKeyDesc::ERowOperation::Read], 1);
UNIT_ASSERT_VALUES_EQUAL(counters[TKeyDesc::ERowOperation::Update], 1);
- }
+ }
Y_UNIT_TEST(AcquireLocks) {
auto programText = R"___(
diff --git a/ydb/core/client/query_stats_ut.cpp b/ydb/core/client/query_stats_ut.cpp
index 7f37b51b470..2832399386b 100644
--- a/ydb/core/client/query_stats_ut.cpp
+++ b/ydb/core/client/query_stats_ut.cpp
@@ -1,177 +1,177 @@
-#include "flat_ut_client.h"
-
+#include "flat_ut_client.h"
+
#include <ydb/core/base/tablet_resolver.h>
#include <ydb/core/tx/tx_proxy/proxy.h>
#include <ydb/core/tx/datashard/datashard.h>
#include <ydb/core/tx/datashard/datashard_failpoints.h>
#include <ydb/core/engine/mkql_engine_flat.h>
-
+
#include <library/cpp/testing/unittest/registar.h>
#include <google/protobuf/text_format.h>
-
-namespace NKikimr {
-namespace NFlatTests {
-
-using namespace Tests;
-using NClient::TValue;
-
-Y_UNIT_TEST_SUITE(TQueryStats) {
-
- TServer PrepareTest() {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
- if (false) {
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_NOTICE);
- }
-
- TFlatMsgBusClient annoyingClient(port);
-
- const char* table1 = R"(
- Name: "T"
- Columns { Name: "key" Type: "Uint32" }
- Columns { Name: "value" Type: "Int32" }
- KeyColumnNames: ["key"]
- UniformPartitionsCount: 2
- )";
-
- const char* table2 = R"(
- Name: "T2"
- Columns { Name: "key" Type: "Uint32" }
- Columns { Name: "value" Type: "Int32" }
- KeyColumnNames: ["key"]
- UniformPartitionsCount: 2
- )";
-
- annoyingClient.InitRoot();
- annoyingClient.CreateTable("/dc-1", table1);
- annoyingClient.CreateTable("/dc-1", table2);
-
- annoyingClient.FlatQuery(
- "("
- " (return (AsList"
- " (UpdateRow '/dc-1/T '('('key (Uint32 '0))) '('('value (Int32 '11111))) )"
- " (UpdateRow '/dc-1/T '('('key (Uint32 '3000000000))) '('('value (Int32 '22222))) )"
- " ))"
- ")"
- );
- return cleverServer;
- }
-
- NKikimrClient::TResponse FlatQueryWithStats(TFlatMsgBusClient& annoyingClient, const TString& mkql) {
- TClient::TFlatQueryOptions opts;
- opts.CollectStats = true;
-
- NKikimrClient::TResponse response;
- annoyingClient.FlatQueryRaw(mkql, opts, response);
-
- return response;
- }
-
- Y_UNIT_TEST(OffByDefault) {
- TString query = R"(
- (
- (let row1 '('('key (Uint32 '0)) ))
- (let cols '('value))
- (let select1 (SelectRow '/dc-1/T row1 cols 'head))
- (let ret (AsList
- (SetResult 'ret1 select1)
- ))
- (return ret)
- )
- )";
-
- TServer server = PrepareTest();
- TFlatMsgBusClient annoyingClient(server.GetSettings().Port);
- NKikimrClient::TResponse res;
- TClient::TFlatQueryOptions opts;
- annoyingClient.FlatQueryRaw(query, opts, res);
- // Cerr << res << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), NMsgBusProxy::MSTATUS_OK);
- UNIT_ASSERT_VALUES_EQUAL(res.GetExecutionEngineResponseStatus(), ui32(NMiniKQL::IEngineFlat::EStatus::Complete));
- UNIT_ASSERT_VALUES_EQUAL(res.HasTxStats(), false);
- }
-
- Y_UNIT_TEST(ImmediateMkql) {
- TString query = R"(
- (
- (let row1 '('('key (Uint32 '0)) ))
- (let row2 '('('key (Uint32 '3000000000)) ))
- (let cols '('value))
- (let select1 (SelectRow '/dc-1/T row1 cols 'head))
- (let select2 (SelectRow '/dc-1/T row2 cols 'head))
- (let range (SelectRange '/dc-1/T '('IncFrom '('key (Uint32 '0) (Uint32 '1) ) ) cols '() 'head))
- (let ret (AsList
- (SetResult 'ret1 select1)
- (SetResult 'ret2 select2)
- (SetResult 'range range)
- ))
- (return ret)
- )
- )";
-
- TServer server = PrepareTest();
- TFlatMsgBusClient annoyingClient(server.GetSettings().Port);
- NKikimrClient::TResponse res = FlatQueryWithStats(annoyingClient, query);
- // Cerr << res << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), NMsgBusProxy::MSTATUS_OK);
- UNIT_ASSERT_VALUES_EQUAL(res.GetExecutionEngineResponseStatus(), ui32(NMiniKQL::IEngineFlat::EStatus::Complete));
-
- UNIT_ASSERT_VALUES_EQUAL(res.HasTxStats(), true);
- auto stats = res.GetTxStats();
- UNIT_ASSERT_VALUES_EQUAL(stats.HasDurationUs(), true);
- UNIT_ASSERT(stats.GetDurationUs() > 0);
- UNIT_ASSERT_VALUES_EQUAL(stats.TableAccessStatsSize(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetTableInfo().GetName(), "/dc-1/T");
- UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetSelectRow().GetCount(), 2);
- UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetSelectRow().GetRows(), 2);
- UNIT_ASSERT_VALUES_EQUAL(stats.PerShardStatsSize(), 2);
- UNIT_ASSERT_VALUES_UNEQUAL(stats.GetPerShardStats(0).GetCpuTimeUsec(), 0);
- UNIT_ASSERT_VALUES_UNEQUAL(stats.GetPerShardStats(1).GetCpuTimeUsec(), 0);
- UNIT_ASSERT_VALUES_UNEQUAL(stats.GetComputeCpuTimeUsec(), 0);
- }
-
- Y_UNIT_TEST(CrossShardMkql) {
- TString query = R"(
- (
- (let row1 '('('key (Uint32 '0)) ))
- (let row2 '('('key (Uint32 '3000000000)) ))
- (let cols '('value))
- (let select1 (SelectRow '/dc-1/T row1 cols))
- (let select2 (SelectRow '/dc-1/T row2 cols))
- (let range (SelectRange '/dc-1/T '('IncFrom '('key (Uint32 '0) (Void) ) ) cols '() ))
- (let ret (AsList
- (SetResult 'ret1 select1)
- (SetResult 'ret2 select2)
- (SetResult 'range range)
- (UpdateRow '/dc-1/T '('('key (Uint32 '10))) '('('value (Int32 '10))) )
- (EraseRow '/dc-1/T '('('key (Uint32 '0))) )
- ))
- (return ret)
- )
- )";
-
- TServer server = PrepareTest();
- TFlatMsgBusClient annoyingClient(server.GetSettings().Port);
- NKikimrClient::TResponse res = FlatQueryWithStats(annoyingClient, query);
- // Cerr << res << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), NMsgBusProxy::MSTATUS_OK);
- UNIT_ASSERT_VALUES_EQUAL(res.GetExecutionEngineResponseStatus(), ui32(NMiniKQL::IEngineFlat::EStatus::Complete));
-
- UNIT_ASSERT_VALUES_EQUAL(res.HasTxStats(), true);
- auto stats = res.GetTxStats();
- UNIT_ASSERT_VALUES_EQUAL(stats.HasDurationUs(), true);
- UNIT_ASSERT(stats.GetDurationUs() > 0);
- UNIT_ASSERT_VALUES_EQUAL(stats.TableAccessStatsSize(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetTableInfo().GetName(), "/dc-1/T");
- UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetSelectRow().GetCount(), 2);
- UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetSelectRow().GetRows(), 2);
- UNIT_ASSERT_VALUES_EQUAL(stats.PerShardStatsSize(), 2);
- UNIT_ASSERT_VALUES_UNEQUAL(stats.GetPerShardStats(0).GetCpuTimeUsec(), 0);
- UNIT_ASSERT_VALUES_UNEQUAL(stats.GetPerShardStats(1).GetCpuTimeUsec(), 0);
- UNIT_ASSERT_VALUES_UNEQUAL(stats.GetComputeCpuTimeUsec(), 0);
- }
-}
-
-}}
+
+namespace NKikimr {
+namespace NFlatTests {
+
+using namespace Tests;
+using NClient::TValue;
+
+Y_UNIT_TEST_SUITE(TQueryStats) {
+
+ TServer PrepareTest() {
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+ if (false) {
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_NOTICE);
+ }
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ const char* table1 = R"(
+ Name: "T"
+ Columns { Name: "key" Type: "Uint32" }
+ Columns { Name: "value" Type: "Int32" }
+ KeyColumnNames: ["key"]
+ UniformPartitionsCount: 2
+ )";
+
+ const char* table2 = R"(
+ Name: "T2"
+ Columns { Name: "key" Type: "Uint32" }
+ Columns { Name: "value" Type: "Int32" }
+ KeyColumnNames: ["key"]
+ UniformPartitionsCount: 2
+ )";
+
+ annoyingClient.InitRoot();
+ annoyingClient.CreateTable("/dc-1", table1);
+ annoyingClient.CreateTable("/dc-1", table2);
+
+ annoyingClient.FlatQuery(
+ "("
+ " (return (AsList"
+ " (UpdateRow '/dc-1/T '('('key (Uint32 '0))) '('('value (Int32 '11111))) )"
+ " (UpdateRow '/dc-1/T '('('key (Uint32 '3000000000))) '('('value (Int32 '22222))) )"
+ " ))"
+ ")"
+ );
+ return cleverServer;
+ }
+
+ NKikimrClient::TResponse FlatQueryWithStats(TFlatMsgBusClient& annoyingClient, const TString& mkql) {
+ TClient::TFlatQueryOptions opts;
+ opts.CollectStats = true;
+
+ NKikimrClient::TResponse response;
+ annoyingClient.FlatQueryRaw(mkql, opts, response);
+
+ return response;
+ }
+
+ Y_UNIT_TEST(OffByDefault) {
+ TString query = R"(
+ (
+ (let row1 '('('key (Uint32 '0)) ))
+ (let cols '('value))
+ (let select1 (SelectRow '/dc-1/T row1 cols 'head))
+ (let ret (AsList
+ (SetResult 'ret1 select1)
+ ))
+ (return ret)
+ )
+ )";
+
+ TServer server = PrepareTest();
+ TFlatMsgBusClient annoyingClient(server.GetSettings().Port);
+ NKikimrClient::TResponse res;
+ TClient::TFlatQueryOptions opts;
+ annoyingClient.FlatQueryRaw(query, opts, res);
+ // Cerr << res << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+ UNIT_ASSERT_VALUES_EQUAL(res.GetExecutionEngineResponseStatus(), ui32(NMiniKQL::IEngineFlat::EStatus::Complete));
+ UNIT_ASSERT_VALUES_EQUAL(res.HasTxStats(), false);
+ }
+
+ Y_UNIT_TEST(ImmediateMkql) {
+ TString query = R"(
+ (
+ (let row1 '('('key (Uint32 '0)) ))
+ (let row2 '('('key (Uint32 '3000000000)) ))
+ (let cols '('value))
+ (let select1 (SelectRow '/dc-1/T row1 cols 'head))
+ (let select2 (SelectRow '/dc-1/T row2 cols 'head))
+ (let range (SelectRange '/dc-1/T '('IncFrom '('key (Uint32 '0) (Uint32 '1) ) ) cols '() 'head))
+ (let ret (AsList
+ (SetResult 'ret1 select1)
+ (SetResult 'ret2 select2)
+ (SetResult 'range range)
+ ))
+ (return ret)
+ )
+ )";
+
+ TServer server = PrepareTest();
+ TFlatMsgBusClient annoyingClient(server.GetSettings().Port);
+ NKikimrClient::TResponse res = FlatQueryWithStats(annoyingClient, query);
+ // Cerr << res << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+ UNIT_ASSERT_VALUES_EQUAL(res.GetExecutionEngineResponseStatus(), ui32(NMiniKQL::IEngineFlat::EStatus::Complete));
+
+ UNIT_ASSERT_VALUES_EQUAL(res.HasTxStats(), true);
+ auto stats = res.GetTxStats();
+ UNIT_ASSERT_VALUES_EQUAL(stats.HasDurationUs(), true);
+ UNIT_ASSERT(stats.GetDurationUs() > 0);
+ UNIT_ASSERT_VALUES_EQUAL(stats.TableAccessStatsSize(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetTableInfo().GetName(), "/dc-1/T");
+ UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetSelectRow().GetCount(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetSelectRow().GetRows(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(stats.PerShardStatsSize(), 2);
+ UNIT_ASSERT_VALUES_UNEQUAL(stats.GetPerShardStats(0).GetCpuTimeUsec(), 0);
+ UNIT_ASSERT_VALUES_UNEQUAL(stats.GetPerShardStats(1).GetCpuTimeUsec(), 0);
+ UNIT_ASSERT_VALUES_UNEQUAL(stats.GetComputeCpuTimeUsec(), 0);
+ }
+
+ Y_UNIT_TEST(CrossShardMkql) {
+ TString query = R"(
+ (
+ (let row1 '('('key (Uint32 '0)) ))
+ (let row2 '('('key (Uint32 '3000000000)) ))
+ (let cols '('value))
+ (let select1 (SelectRow '/dc-1/T row1 cols))
+ (let select2 (SelectRow '/dc-1/T row2 cols))
+ (let range (SelectRange '/dc-1/T '('IncFrom '('key (Uint32 '0) (Void) ) ) cols '() ))
+ (let ret (AsList
+ (SetResult 'ret1 select1)
+ (SetResult 'ret2 select2)
+ (SetResult 'range range)
+ (UpdateRow '/dc-1/T '('('key (Uint32 '10))) '('('value (Int32 '10))) )
+ (EraseRow '/dc-1/T '('('key (Uint32 '0))) )
+ ))
+ (return ret)
+ )
+ )";
+
+ TServer server = PrepareTest();
+ TFlatMsgBusClient annoyingClient(server.GetSettings().Port);
+ NKikimrClient::TResponse res = FlatQueryWithStats(annoyingClient, query);
+ // Cerr << res << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+ UNIT_ASSERT_VALUES_EQUAL(res.GetExecutionEngineResponseStatus(), ui32(NMiniKQL::IEngineFlat::EStatus::Complete));
+
+ UNIT_ASSERT_VALUES_EQUAL(res.HasTxStats(), true);
+ auto stats = res.GetTxStats();
+ UNIT_ASSERT_VALUES_EQUAL(stats.HasDurationUs(), true);
+ UNIT_ASSERT(stats.GetDurationUs() > 0);
+ UNIT_ASSERT_VALUES_EQUAL(stats.TableAccessStatsSize(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetTableInfo().GetName(), "/dc-1/T");
+ UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetSelectRow().GetCount(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(stats.GetTableAccessStats(0).GetSelectRow().GetRows(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(stats.PerShardStatsSize(), 2);
+ UNIT_ASSERT_VALUES_UNEQUAL(stats.GetPerShardStats(0).GetCpuTimeUsec(), 0);
+ UNIT_ASSERT_VALUES_UNEQUAL(stats.GetPerShardStats(1).GetCpuTimeUsec(), 0);
+ UNIT_ASSERT_VALUES_UNEQUAL(stats.GetComputeCpuTimeUsec(), 0);
+ }
+}
+
+}}
diff --git a/ydb/core/client/s3_listing_ut.cpp b/ydb/core/client/s3_listing_ut.cpp
index d8079f71cb6..919c45dc467 100644
--- a/ydb/core/client/s3_listing_ut.cpp
+++ b/ydb/core/client/s3_listing_ut.cpp
@@ -1,211 +1,211 @@
-#include "flat_ut_client.h"
-
+#include "flat_ut_client.h"
+
#include <library/cpp/testing/unittest/registar.h>
-
-namespace NKikimr {
-namespace NFlatTests {
-
-using namespace Tests;
+
+namespace NKikimr {
+namespace NFlatTests {
+
+using namespace Tests;
using NClient::TValue;
-
+
Y_UNIT_TEST_SUITE(TS3ListingTest) {
-
- void S3WriteRow(TFlatMsgBusClient& annoyingClient, ui64 hash, TString name, TString path, ui64 version, ui64 ts, TString data, TString table) {
- TString insertRowQuery = R"(
- (
- (let key '(
- '('Hash (Uint64 '%llu))
- '('Name (Utf8 '"%s"))
- '('Path (Utf8 '"%s"))
- '('Version (Uint64 '%llu))
- ))
- (let value '(
- '('Timestamp (Uint64 '%llu))
- '('Data (String '"%s"))
- ))
- (let ret_ (AsList
- (UpdateRow '/dc-1/Dir/%s key value)
- ))
- (return ret_)
- )
- )";
-
+
+ void S3WriteRow(TFlatMsgBusClient& annoyingClient, ui64 hash, TString name, TString path, ui64 version, ui64 ts, TString data, TString table) {
+ TString insertRowQuery = R"(
+ (
+ (let key '(
+ '('Hash (Uint64 '%llu))
+ '('Name (Utf8 '"%s"))
+ '('Path (Utf8 '"%s"))
+ '('Version (Uint64 '%llu))
+ ))
+ (let value '(
+ '('Timestamp (Uint64 '%llu))
+ '('Data (String '"%s"))
+ ))
+ (let ret_ (AsList
+ (UpdateRow '/dc-1/Dir/%s key value)
+ ))
+ (return ret_)
+ )
+ )";
+
annoyingClient.FlatQuery(Sprintf(insertRowQuery.data(), hash, name.data(), path.data(), version, ts, data.data(), table.data()));
- }
-
- void S3DeleteRow(TFlatMsgBusClient& annoyingClient, ui64 hash, TString name, TString path, ui64 version, TString table) {
- TString eraseRowQuery = R"(
- (
- (let key '(
- '('Hash (Uint64 '%llu))
- '('Name (Utf8 '"%s"))
- '('Path (Utf8 '"%s"))
- '('Version (Uint64 '%llu))
- ))
- (let ret_ (AsList
- (EraseRow '/dc-1/Dir/%s key)
- ))
- (return ret_)
- )
- )";
-
+ }
+
+ void S3DeleteRow(TFlatMsgBusClient& annoyingClient, ui64 hash, TString name, TString path, ui64 version, TString table) {
+ TString eraseRowQuery = R"(
+ (
+ (let key '(
+ '('Hash (Uint64 '%llu))
+ '('Name (Utf8 '"%s"))
+ '('Path (Utf8 '"%s"))
+ '('Version (Uint64 '%llu))
+ ))
+ (let ret_ (AsList
+ (EraseRow '/dc-1/Dir/%s key)
+ ))
+ (return ret_)
+ )
+ )";
+
annoyingClient.FlatQuery(Sprintf(eraseRowQuery.data(), hash, name.data(), path.data(), version, table.data()));
- }
-
- void PrepareS3Data(TFlatMsgBusClient& annoyingClient) {
- annoyingClient.InitRoot();
- annoyingClient.MkDir("/dc-1", "Dir");
- annoyingClient.CreateTable("/dc-1/Dir",
- R"(Name: "Table"
- Columns { Name: "Hash" Type: "Uint64"}
- Columns { Name: "Name" Type: "Utf8"}
- Columns { Name: "Path" Type: "Utf8"}
- Columns { Name: "Version" Type: "Uint64"}
- Columns { Name: "Timestamp" Type: "Uint64"}
- Columns { Name: "Data" Type: "String"}
- Columns { Name: "ExtraData" Type: "String"}
- Columns { Name: "Unused1" Type: "Uint32"}
- KeyColumnNames: [
- "Hash",
- "Name",
- "Path",
- "Version"
- ]
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint64 : 60 }}
- }}
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint64 : 100 }}
- Tuple { Optional { Text : 'Bucket100' }}
- Tuple { Optional { Text : '/Videos/Game of Thrones/Season 1/Episode 2' }}
- }}
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint64 : 100 }}
- Tuple { Optional { Text : 'Bucket100' }}
- Tuple { Optional { Text : '/Videos/Game of Thrones/Season 1/Episode 8' }}
- }}
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint64 : 100 }}
- Tuple { Optional { Text : 'Bucket100' }}
- Tuple { Optional { Text : '/Videos/Godfather 2.avi' }}
- }}
- PartitionConfig {
- ExecutorCacheSize: 100
-
- CompactionPolicy {
- InMemSizeToSnapshot: 2000
+ }
+
+ void PrepareS3Data(TFlatMsgBusClient& annoyingClient) {
+ annoyingClient.InitRoot();
+ annoyingClient.MkDir("/dc-1", "Dir");
+ annoyingClient.CreateTable("/dc-1/Dir",
+ R"(Name: "Table"
+ Columns { Name: "Hash" Type: "Uint64"}
+ Columns { Name: "Name" Type: "Utf8"}
+ Columns { Name: "Path" Type: "Utf8"}
+ Columns { Name: "Version" Type: "Uint64"}
+ Columns { Name: "Timestamp" Type: "Uint64"}
+ Columns { Name: "Data" Type: "String"}
+ Columns { Name: "ExtraData" Type: "String"}
+ Columns { Name: "Unused1" Type: "Uint32"}
+ KeyColumnNames: [
+ "Hash",
+ "Name",
+ "Path",
+ "Version"
+ ]
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint64 : 60 }}
+ }}
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint64 : 100 }}
+ Tuple { Optional { Text : 'Bucket100' }}
+ Tuple { Optional { Text : '/Videos/Game of Thrones/Season 1/Episode 2' }}
+ }}
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint64 : 100 }}
+ Tuple { Optional { Text : 'Bucket100' }}
+ Tuple { Optional { Text : '/Videos/Game of Thrones/Season 1/Episode 8' }}
+ }}
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint64 : 100 }}
+ Tuple { Optional { Text : 'Bucket100' }}
+ Tuple { Optional { Text : '/Videos/Godfather 2.avi' }}
+ }}
+ PartitionConfig {
+ ExecutorCacheSize: 100
+
+ CompactionPolicy {
+ InMemSizeToSnapshot: 2000
InMemStepsToSnapshot: 1
- InMemForceStepsToSnapshot: 50
+ InMemForceStepsToSnapshot: 50
InMemForceSizeToSnapshot: 16777216
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 1048576
- ReadAheadLoThreshold: 16384
- MinDataPageSize: 300
- SnapBrokerQueue: 0
-
- LogOverheadSizeToSnapshot: 16777216
- LogOverheadCountToSnapshot: 500
- DroppedRowsPercentToCompact: 146
-
- Generation {
- GenerationId: 0
- SizeToCompact: 0
- CountToCompact: 2000
- ForceCountToCompact: 4000
- ForceSizeToCompact: 100000000
- #CompactionBrokerQueue: 4294967295
- KeepInCache: false
- ResourceBrokerTask: "compaction_gen1"
- ExtraCompactionPercent: 100
- ExtraCompactionMinSize: 16384
- ExtraCompactionExpPercent: 110
- ExtraCompactionExpMaxSize: 0
- UpliftPartSize: 0
- }
- }
-
- }
- )");
-
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 1, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Thunderstruck.mp3", 1, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/rock.m3u", 1, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana", 1, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/Smeels Like Teen Spirit.mp3", 1, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/In Bloom.mp3", 1, 20, "", "Table");
-
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Photos/face.jpg", 1, 10, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Photos/facepalm.jpg", 1, 20, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Photos/palm.jpg", 1, 30, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 1.avi", 1, 100, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 10.avi", 1, 300, "", "Table");
-
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 2.avi", 1, 200, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 3.avi", 1, 300, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 4.avi", 1, 300, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 5.avi", 1, 300, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 6.avi", 1, 300, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 7.avi", 1, 300, "", "Table");
-
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 8.avi", 1, 300, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 9.avi", 1, 300, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 2/Episode 1.avi", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Godfather 2.avi", 1, 500, "", "Table");
-
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Godfather.avi", 1, 500, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Godmother.avi", 1, 500, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/House of Cards/Season 1/Chapter 1.avi", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/House of Cards/Season 1/Chapter 2.avi", 1, 1200, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Terminator 2.avi", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/XXX/1.avi", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/XXX/2.avi", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/XXX/3.avi", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/XXX/3d.avi", 1, 1100, "", "Table");
-
- S3WriteRow(annoyingClient, 333, "Bucket333", "asdf", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 333, "Bucket333", "boo/bar", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 333, "Bucket333", "boo/baz/xyzzy", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 333, "Bucket333", "cquux/thud", 1, 1100, "", "Table");
- S3WriteRow(annoyingClient, 333, "Bucket333", "cquux/bla", 1, 1100, "", "Table");
-
- S3DeleteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/Smeels Like Teen Spirit.mp3", 1, "Table");
- S3DeleteRow(annoyingClient, 100, "Bucket100", "/Photos/palm.jpg", 1, "Table");
- S3DeleteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 2.avi", 1, "Table");
- S3DeleteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 5.avi", 1, "Table");
- S3DeleteRow(annoyingClient, 100, "Bucket100", "/Videos/House of Cards/Season 1/Chapter 2.avi", 1, "Table");
- }
-
- void DoListingBySelectRange(TFlatMsgBusClient& annoyingClient,
- ui64 bucket, const TString& pathPrefix, const TString& pathDelimiter, const TString& startAfter, ui32 maxKeys,
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 1048576
+ ReadAheadLoThreshold: 16384
+ MinDataPageSize: 300
+ SnapBrokerQueue: 0
+
+ LogOverheadSizeToSnapshot: 16777216
+ LogOverheadCountToSnapshot: 500
+ DroppedRowsPercentToCompact: 146
+
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 0
+ CountToCompact: 2000
+ ForceCountToCompact: 4000
+ ForceSizeToCompact: 100000000
+ #CompactionBrokerQueue: 4294967295
+ KeepInCache: false
+ ResourceBrokerTask: "compaction_gen1"
+ ExtraCompactionPercent: 100
+ ExtraCompactionMinSize: 16384
+ ExtraCompactionExpPercent: 110
+ ExtraCompactionExpMaxSize: 0
+ UpliftPartSize: 0
+ }
+ }
+
+ }
+ )");
+
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 1, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Thunderstruck.mp3", 1, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/rock.m3u", 1, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana", 1, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/Smeels Like Teen Spirit.mp3", 1, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/In Bloom.mp3", 1, 20, "", "Table");
+
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Photos/face.jpg", 1, 10, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Photos/facepalm.jpg", 1, 20, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Photos/palm.jpg", 1, 30, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 1.avi", 1, 100, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 10.avi", 1, 300, "", "Table");
+
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 2.avi", 1, 200, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 3.avi", 1, 300, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 4.avi", 1, 300, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 5.avi", 1, 300, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 6.avi", 1, 300, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 7.avi", 1, 300, "", "Table");
+
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 8.avi", 1, 300, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 9.avi", 1, 300, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 2/Episode 1.avi", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Godfather 2.avi", 1, 500, "", "Table");
+
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Godfather.avi", 1, 500, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Godmother.avi", 1, 500, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/House of Cards/Season 1/Chapter 1.avi", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/House of Cards/Season 1/Chapter 2.avi", 1, 1200, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/Videos/Terminator 2.avi", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/XXX/1.avi", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/XXX/2.avi", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/XXX/3.avi", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/XXX/3d.avi", 1, 1100, "", "Table");
+
+ S3WriteRow(annoyingClient, 333, "Bucket333", "asdf", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 333, "Bucket333", "boo/bar", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 333, "Bucket333", "boo/baz/xyzzy", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 333, "Bucket333", "cquux/thud", 1, 1100, "", "Table");
+ S3WriteRow(annoyingClient, 333, "Bucket333", "cquux/bla", 1, 1100, "", "Table");
+
+ S3DeleteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/Smeels Like Teen Spirit.mp3", 1, "Table");
+ S3DeleteRow(annoyingClient, 100, "Bucket100", "/Photos/palm.jpg", 1, "Table");
+ S3DeleteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 2.avi", 1, "Table");
+ S3DeleteRow(annoyingClient, 100, "Bucket100", "/Videos/Game of Thrones/Season 1/Episode 5.avi", 1, "Table");
+ S3DeleteRow(annoyingClient, 100, "Bucket100", "/Videos/House of Cards/Season 1/Chapter 2.avi", 1, "Table");
+ }
+
+ void DoListingBySelectRange(TFlatMsgBusClient& annoyingClient,
+ ui64 bucket, const TString& pathPrefix, const TString& pathDelimiter, const TString& startAfter, ui32 maxKeys,
TSet<TString>& commonPrefixes, TSet<TString>& contents)
- {
- // Read all rows from the bucket
- TString table = "Table";
- ui64 hash = bucket;
- TString name = "Bucket" + ToString(bucket);
- TString selectBucketQuery = R"(
- (
- (let range '(
- '('Hash (Uint64 '%llu) (Uint64 '%llu))
- '('Name (Utf8 '"%s") (Utf8 '"%s"))
- '('Path (Nothing (OptionalType (DataType 'Utf8))) (Void))
- '('Version (Nothing (OptionalType (DataType 'Uint64))) (Void))
- ))
- (let columns '(
- 'Path
- ))
- (let res
- (SelectRange '/dc-1/Dir/%s range columns '() )
- )
- (return (AsList (SetResult 'Objects res)))
- )
- )";
-
- TClient::TFlatQueryOptions opts;
- NKikimrMiniKQL::TResult res;
+ {
+ // Read all rows from the bucket
+ TString table = "Table";
+ ui64 hash = bucket;
+ TString name = "Bucket" + ToString(bucket);
+ TString selectBucketQuery = R"(
+ (
+ (let range '(
+ '('Hash (Uint64 '%llu) (Uint64 '%llu))
+ '('Name (Utf8 '"%s") (Utf8 '"%s"))
+ '('Path (Nothing (OptionalType (DataType 'Utf8))) (Void))
+ '('Version (Nothing (OptionalType (DataType 'Uint64))) (Void))
+ ))
+ (let columns '(
+ 'Path
+ ))
+ (let res
+ (SelectRange '/dc-1/Dir/%s range columns '() )
+ )
+ (return (AsList (SetResult 'Objects res)))
+ )
+ )";
+
+ TClient::TFlatQueryOptions opts;
+ NKikimrMiniKQL::TResult res;
annoyingClient.FlatQuery(Sprintf(selectBucketQuery.data(), hash, hash, name.data(), name.data(), table.data()), opts, res);
-
- //Cout << res << Endl;
+
+ //Cout << res << Endl;
TValue value = TValue::Create(res.GetValue(), res.GetType());
TValue objects = value["Objects"];
TValue l = objects["List"];
@@ -213,480 +213,480 @@ Y_UNIT_TEST_SUITE(TS3ListingTest) {
for (ui32 i = 0; i < l.Size(); ++i) {
TValue ps = l[i];
paths.emplace_back(ps["Path"]);
- }
-
- // Make a list of common prefixes and a list of full paths that match the parameter
- commonPrefixes.clear();
- contents.clear();
- for (const auto& p : paths) {
- if (commonPrefixes.size() + contents.size() == maxKeys)
- break;
-
- if (!p.StartsWith(pathPrefix))
- continue;
-
- if (p <= startAfter)
- continue;
-
- size_t delimPos = p.find_first_of(pathDelimiter, pathPrefix.length());
- if (delimPos == TString::npos) {
- contents.insert(p);
- } else {
- TString prefix = p.substr(0, delimPos + pathDelimiter.length());
- if (prefix > startAfter) {
- commonPrefixes.insert(prefix);
- }
- }
- }
- }
-
- void DoS3Listing(TFlatMsgBusClient& annoyingClient,
- ui64 bucket, const TString& pathPrefix, const TString& pathDelimiter, const TString& startAfter,
+ }
+
+ // Make a list of common prefixes and a list of full paths that match the parameter
+ commonPrefixes.clear();
+ contents.clear();
+ for (const auto& p : paths) {
+ if (commonPrefixes.size() + contents.size() == maxKeys)
+ break;
+
+ if (!p.StartsWith(pathPrefix))
+ continue;
+
+ if (p <= startAfter)
+ continue;
+
+ size_t delimPos = p.find_first_of(pathDelimiter, pathPrefix.length());
+ if (delimPos == TString::npos) {
+ contents.insert(p);
+ } else {
+ TString prefix = p.substr(0, delimPos + pathDelimiter.length());
+ if (prefix > startAfter) {
+ commonPrefixes.insert(prefix);
+ }
+ }
+ }
+ }
+
+ void DoS3Listing(TFlatMsgBusClient& annoyingClient,
+ ui64 bucket, const TString& pathPrefix, const TString& pathDelimiter, const TString& startAfter,
const TVector<TString>& columnsToReturn, ui32 maxKeys,
TVector<TString>& commonPrefixes, TVector<TString>& contents)
- {
- TString pbPrefixCols =
- "Type {"
- " Kind : Tuple "
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4 } } } }"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- "}"
- "Value { "
- " Tuple { Optional { Uint64 : " + ToString(bucket) +" } } "
- " Tuple { Optional { Text : 'Bucket" + ToString(bucket) + "' } } "
- "}";
-
- TString pbStartAfterSuffix;
- if (startAfter) {
- pbStartAfterSuffix =
- "Type {"
- " Kind : Tuple "
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- "}"
- "Value { "
- " Tuple { Optional { Text : '" + startAfter + "' } } "
- "}";
- }
-
- NKikimrClient::TS3ListingResponse response;
- annoyingClient.S3Listing("/dc-1/Dir/Table", pbPrefixCols, pathPrefix, pathDelimiter, pbStartAfterSuffix, columnsToReturn, maxKeys, 0, response);
-
- //Cerr << response << Endl;
-
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
-
- const NKikimrMiniKQL::TResult& res = response.GetResult();
-
- commonPrefixes.clear();
- contents.clear();
-
+ {
+ TString pbPrefixCols =
+ "Type {"
+ " Kind : Tuple "
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4 } } } }"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ "}"
+ "Value { "
+ " Tuple { Optional { Uint64 : " + ToString(bucket) +" } } "
+ " Tuple { Optional { Text : 'Bucket" + ToString(bucket) + "' } } "
+ "}";
+
+ TString pbStartAfterSuffix;
+ if (startAfter) {
+ pbStartAfterSuffix =
+ "Type {"
+ " Kind : Tuple "
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ "}"
+ "Value { "
+ " Tuple { Optional { Text : '" + startAfter + "' } } "
+ "}";
+ }
+
+ NKikimrClient::TS3ListingResponse response;
+ annoyingClient.S3Listing("/dc-1/Dir/Table", pbPrefixCols, pathPrefix, pathDelimiter, pbStartAfterSuffix, columnsToReturn, maxKeys, 0, response);
+
+ //Cerr << response << Endl;
+
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+
+ const NKikimrMiniKQL::TResult& res = response.GetResult();
+
+ commonPrefixes.clear();
+ contents.clear();
+
TValue value = TValue::Create(res.GetValue(), res.GetType());
TValue prefixes = value["CommonPrefixes"];
TValue objects = value["Contents"];
for (ui32 i = 0; i < prefixes.Size(); ++i) {
TValue ps = prefixes[i];
commonPrefixes.emplace_back(ps["Path"]);
- }
+ }
for (ui32 i = 0; i < objects.Size(); ++i) {
TValue ps = objects[i];
contents.emplace_back(ps["Path"]);
- }
- }
-
- void CompareS3Listing(TFlatMsgBusClient& annoyingClient, ui64 bucket, const TString& pathPrefix, const TString& pathDelimiter,
- const TString& startAfter, ui32 maxKeys, const TVector<TString>& columnsToReturn)
- {
+ }
+ }
+
+ void CompareS3Listing(TFlatMsgBusClient& annoyingClient, ui64 bucket, const TString& pathPrefix, const TString& pathDelimiter,
+ const TString& startAfter, ui32 maxKeys, const TVector<TString>& columnsToReturn)
+ {
TSet<TString> expectedCommonPrefixes;
TSet<TString> expectedContents;
- DoListingBySelectRange(annoyingClient, bucket, pathPrefix, pathDelimiter, startAfter, maxKeys, expectedCommonPrefixes, expectedContents);
-
+ DoListingBySelectRange(annoyingClient, bucket, pathPrefix, pathDelimiter, startAfter, maxKeys, expectedCommonPrefixes, expectedContents);
+
TVector<TString> commonPrefixes;
TVector<TString> contents;
- DoS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, startAfter, columnsToReturn, maxKeys, commonPrefixes, contents);
-
- UNIT_ASSERT_VALUES_EQUAL(expectedCommonPrefixes.size(), commonPrefixes.size());
- ui32 i = 0;
- for (const auto& p : expectedCommonPrefixes) {
- Cout << "CommonPrefix: " << p << Endl;
- UNIT_ASSERT_VALUES_EQUAL(p, commonPrefixes[i]);
- ++i;
- }
-
- UNIT_ASSERT_VALUES_EQUAL(expectedContents.size(), contents.size());
- i = 0;
- for (const auto& p : expectedContents) {
- Cout << "Contents: " << p << Endl;
- UNIT_ASSERT_VALUES_EQUAL(p, contents[i]);
- ++i;
- }
- }
-
- void TestS3Listing(TFlatMsgBusClient& annoyingClient, ui64 bucket, const TString& pathPrefix, const TString& pathDelimiter,
- ui32 maxKeys, const TVector<TString>& columnsToReturn)
- {
- Cout << Endl << "---------------------------------------" << Endl
- << "Bucket" << bucket << " : " << pathPrefix << Endl;
-
- CompareS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, "", maxKeys, columnsToReturn);
- CompareS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, pathPrefix, maxKeys, columnsToReturn);
-
- TSet<TString> expectedCommonPrefixes;
- TSet<TString> expectedContents;
- DoListingBySelectRange(annoyingClient, bucket, pathPrefix, pathDelimiter, "", 100500, expectedCommonPrefixes, expectedContents);
-
- for (const TString& after : expectedCommonPrefixes) {
- CompareS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, after, maxKeys, columnsToReturn);
- }
-
- for (const TString& after : expectedContents) {
- CompareS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, after, maxKeys, columnsToReturn);
- }
- }
-
+ DoS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, startAfter, columnsToReturn, maxKeys, commonPrefixes, contents);
+
+ UNIT_ASSERT_VALUES_EQUAL(expectedCommonPrefixes.size(), commonPrefixes.size());
+ ui32 i = 0;
+ for (const auto& p : expectedCommonPrefixes) {
+ Cout << "CommonPrefix: " << p << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(p, commonPrefixes[i]);
+ ++i;
+ }
+
+ UNIT_ASSERT_VALUES_EQUAL(expectedContents.size(), contents.size());
+ i = 0;
+ for (const auto& p : expectedContents) {
+ Cout << "Contents: " << p << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(p, contents[i]);
+ ++i;
+ }
+ }
+
+ void TestS3Listing(TFlatMsgBusClient& annoyingClient, ui64 bucket, const TString& pathPrefix, const TString& pathDelimiter,
+ ui32 maxKeys, const TVector<TString>& columnsToReturn)
+ {
+ Cout << Endl << "---------------------------------------" << Endl
+ << "Bucket" << bucket << " : " << pathPrefix << Endl;
+
+ CompareS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, "", maxKeys, columnsToReturn);
+ CompareS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, pathPrefix, maxKeys, columnsToReturn);
+
+ TSet<TString> expectedCommonPrefixes;
+ TSet<TString> expectedContents;
+ DoListingBySelectRange(annoyingClient, bucket, pathPrefix, pathDelimiter, "", 100500, expectedCommonPrefixes, expectedContents);
+
+ for (const TString& after : expectedCommonPrefixes) {
+ CompareS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, after, maxKeys, columnsToReturn);
+ }
+
+ for (const TString& after : expectedContents) {
+ CompareS3Listing(annoyingClient, bucket, pathPrefix, pathDelimiter, after, maxKeys, columnsToReturn);
+ }
+ }
+
Y_UNIT_TEST(Listing) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
TServer cleverServer = TServer(TServerSettings(port));
-
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareS3Data(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_TRACE);
-
-
- TestS3Listing(annoyingClient, 50, "", "", 10, {});
- TestS3Listing(annoyingClient, 50, "", "/", 7, {});
- TestS3Listing(annoyingClient, 50, "Music/", "/", 9, {});
- TestS3Listing(annoyingClient, 50, "Music/Nirvana", "/", 11, {});
- TestS3Listing(annoyingClient, 50, "Music/Nirvana/", "/", 2, {});
- TestS3Listing(annoyingClient, 50, "Photos/", "/", 3, {});
-
- TestS3Listing(annoyingClient, 100, "", "", 4, {});
- TestS3Listing(annoyingClient, 100, "", "/", 7, {});
- TestS3Listing(annoyingClient, 100, "/", "", 3, {});
- TestS3Listing(annoyingClient, 100, "/", "/", 1, {});
- TestS3Listing(annoyingClient, 100, "/Photos/", "/", 11, {});
- TestS3Listing(annoyingClient, 100, "/Videos/", "/", 18, {});
- TestS3Listing(annoyingClient, 100, "/Videos", "/", 3, {"Path", "Timestamp"});
- TestS3Listing(annoyingClient, 100, "/Videos/Game ", "/", 5, {"Path", "Timestamp"});
- TestS3Listing(annoyingClient, 100, "/Videos/Game of Thrones/Season 1/", "/", 6, {"Path", "Timestamp"});
- TestS3Listing(annoyingClient, 100, "/Videos/Game of Thr", " ", 4, {"Path", "Timestamp"});
-
- TestS3Listing(annoyingClient, 20, "", "/", 8, {"Path", "Timestamp"});
- TestS3Listing(annoyingClient, 200, "/", "/", 3, {"Path", "Timestamp"});
-
- // Request NULL columns
- TestS3Listing(annoyingClient, 50, "Photos/", "/", 7, {"ExtraData"});
- TestS3Listing(annoyingClient, 50, "Photos/", "", 2, {"Unused1"});
- TestS3Listing(annoyingClient, 50, "Music/", "/", 11, {"ExtraData"});
- TestS3Listing(annoyingClient, 50, "/", "", 8, {"Unused1"});
-
- TestS3Listing(annoyingClient, 333, "", "", 2, {});
- TestS3Listing(annoyingClient, 333, "", "/", 2, {});
- TestS3Listing(annoyingClient, 333, "", "", 3, {});
- TestS3Listing(annoyingClient, 333, "", "/", 3, {});
- }
-
- Y_UNIT_TEST(MaxKeysAndSharding) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
-
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareS3Data(annoyingClient);
-
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_TRACE);
-
- for (auto commonPrefix: {"/", "/Videos", "/Videos/", "/W", "/X",
- "/Videos/Game of", "/Videos/Game of Thrones/",
- "/Videos/Game of Thrones/Season 1",
- "/Videos/Game of Thrones/Season 1/"})
- {
- for (ui32 maxKeys = 1; maxKeys < 20; ++maxKeys) {
- TestS3Listing(annoyingClient, 100, commonPrefix, "/", maxKeys, {});
- }
- }
- }
-
- TString MakeTuplePb(const TVector<TString>& values) {
- TStringStream pbPrefixCols;
- pbPrefixCols <<
- "Type {"
- " Kind : Tuple "
- " Tuple {";
- for (size_t i = 0; i < values.size(); ++i) {
- pbPrefixCols <<
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }";
- }
- pbPrefixCols <<
- " }"
- "}"
- "Value { ";
- for (const auto& pc : values) {
- pbPrefixCols <<
- " Tuple { Optional { Text : '" << pc << "' } } ";
- }
- pbPrefixCols << "}";
-
- return pbPrefixCols.Str();
- }
-
- void TestS3GenericListingRequest(TFlatMsgBusClient& annoyingClient,
- const TVector<TString>& prefixColumns, const TString& pathPrefix, const TString& pathDelimiter,
- const TVector<TString>& startAfterSuffixColumns,
- const TVector<TString>& columnsToReturn, ui32 maxKeys, ui32 timeoutMillisec,
- NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK,
- const TString& expectedErrMessage = "")
- {
- TString pbPrefixCols = MakeTuplePb(prefixColumns);
- // Cerr << pbPrefixCols << Endl;
-
- TString pbStartAfterSuffixCols = MakeTuplePb(startAfterSuffixColumns);
- // Cerr << pbStartAfterSuffixCols << Endl;
-
- NKikimrClient::TS3ListingResponse response;
- annoyingClient.S3Listing("/dc-1/Dir/Table", pbPrefixCols, pathPrefix, pathDelimiter,
- pbStartAfterSuffixCols, columnsToReturn, maxKeys, timeoutMillisec, response);
-
- //
- Cerr << response << Endl;
-
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), expectedStatus);
- UNIT_ASSERT_VALUES_EQUAL(response.GetDescription(), expectedErrMessage);
- }
-
- void TestS3ListingRequest(TFlatMsgBusClient& annoyingClient,
- const TVector<TString>& prefixColumns, const TString& pathPrefix, const TString& pathDelimiter,
- const TString& startAfter, const TVector<TString>& columnsToReturn, ui32 maxKeys,
- NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK,
- const TString& expectedErrMessage = "")
- {
- TVector<TString> startAfterSuffix;
- if (!startAfter.empty())
- startAfterSuffix.push_back(startAfter);
- return TestS3GenericListingRequest(annoyingClient, prefixColumns, pathPrefix, pathDelimiter,
- startAfterSuffix,
- columnsToReturn, maxKeys, 0,
- expectedStatus, expectedErrMessage);
- }
-
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareS3Data(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_TRACE);
+
+
+ TestS3Listing(annoyingClient, 50, "", "", 10, {});
+ TestS3Listing(annoyingClient, 50, "", "/", 7, {});
+ TestS3Listing(annoyingClient, 50, "Music/", "/", 9, {});
+ TestS3Listing(annoyingClient, 50, "Music/Nirvana", "/", 11, {});
+ TestS3Listing(annoyingClient, 50, "Music/Nirvana/", "/", 2, {});
+ TestS3Listing(annoyingClient, 50, "Photos/", "/", 3, {});
+
+ TestS3Listing(annoyingClient, 100, "", "", 4, {});
+ TestS3Listing(annoyingClient, 100, "", "/", 7, {});
+ TestS3Listing(annoyingClient, 100, "/", "", 3, {});
+ TestS3Listing(annoyingClient, 100, "/", "/", 1, {});
+ TestS3Listing(annoyingClient, 100, "/Photos/", "/", 11, {});
+ TestS3Listing(annoyingClient, 100, "/Videos/", "/", 18, {});
+ TestS3Listing(annoyingClient, 100, "/Videos", "/", 3, {"Path", "Timestamp"});
+ TestS3Listing(annoyingClient, 100, "/Videos/Game ", "/", 5, {"Path", "Timestamp"});
+ TestS3Listing(annoyingClient, 100, "/Videos/Game of Thrones/Season 1/", "/", 6, {"Path", "Timestamp"});
+ TestS3Listing(annoyingClient, 100, "/Videos/Game of Thr", " ", 4, {"Path", "Timestamp"});
+
+ TestS3Listing(annoyingClient, 20, "", "/", 8, {"Path", "Timestamp"});
+ TestS3Listing(annoyingClient, 200, "/", "/", 3, {"Path", "Timestamp"});
+
+ // Request NULL columns
+ TestS3Listing(annoyingClient, 50, "Photos/", "/", 7, {"ExtraData"});
+ TestS3Listing(annoyingClient, 50, "Photos/", "", 2, {"Unused1"});
+ TestS3Listing(annoyingClient, 50, "Music/", "/", 11, {"ExtraData"});
+ TestS3Listing(annoyingClient, 50, "/", "", 8, {"Unused1"});
+
+ TestS3Listing(annoyingClient, 333, "", "", 2, {});
+ TestS3Listing(annoyingClient, 333, "", "/", 2, {});
+ TestS3Listing(annoyingClient, 333, "", "", 3, {});
+ TestS3Listing(annoyingClient, 333, "", "/", 3, {});
+ }
+
+ Y_UNIT_TEST(MaxKeysAndSharding) {
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareS3Data(annoyingClient);
+
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_TRACE);
+
+ for (auto commonPrefix: {"/", "/Videos", "/Videos/", "/W", "/X",
+ "/Videos/Game of", "/Videos/Game of Thrones/",
+ "/Videos/Game of Thrones/Season 1",
+ "/Videos/Game of Thrones/Season 1/"})
+ {
+ for (ui32 maxKeys = 1; maxKeys < 20; ++maxKeys) {
+ TestS3Listing(annoyingClient, 100, commonPrefix, "/", maxKeys, {});
+ }
+ }
+ }
+
+ TString MakeTuplePb(const TVector<TString>& values) {
+ TStringStream pbPrefixCols;
+ pbPrefixCols <<
+ "Type {"
+ " Kind : Tuple "
+ " Tuple {";
+ for (size_t i = 0; i < values.size(); ++i) {
+ pbPrefixCols <<
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }";
+ }
+ pbPrefixCols <<
+ " }"
+ "}"
+ "Value { ";
+ for (const auto& pc : values) {
+ pbPrefixCols <<
+ " Tuple { Optional { Text : '" << pc << "' } } ";
+ }
+ pbPrefixCols << "}";
+
+ return pbPrefixCols.Str();
+ }
+
+ void TestS3GenericListingRequest(TFlatMsgBusClient& annoyingClient,
+ const TVector<TString>& prefixColumns, const TString& pathPrefix, const TString& pathDelimiter,
+ const TVector<TString>& startAfterSuffixColumns,
+ const TVector<TString>& columnsToReturn, ui32 maxKeys, ui32 timeoutMillisec,
+ NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK,
+ const TString& expectedErrMessage = "")
+ {
+ TString pbPrefixCols = MakeTuplePb(prefixColumns);
+ // Cerr << pbPrefixCols << Endl;
+
+ TString pbStartAfterSuffixCols = MakeTuplePb(startAfterSuffixColumns);
+ // Cerr << pbStartAfterSuffixCols << Endl;
+
+ NKikimrClient::TS3ListingResponse response;
+ annoyingClient.S3Listing("/dc-1/Dir/Table", pbPrefixCols, pathPrefix, pathDelimiter,
+ pbStartAfterSuffixCols, columnsToReturn, maxKeys, timeoutMillisec, response);
+
+ //
+ Cerr << response << Endl;
+
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), expectedStatus);
+ UNIT_ASSERT_VALUES_EQUAL(response.GetDescription(), expectedErrMessage);
+ }
+
+ void TestS3ListingRequest(TFlatMsgBusClient& annoyingClient,
+ const TVector<TString>& prefixColumns, const TString& pathPrefix, const TString& pathDelimiter,
+ const TString& startAfter, const TVector<TString>& columnsToReturn, ui32 maxKeys,
+ NMsgBusProxy::EResponseStatus expectedStatus = NMsgBusProxy::MSTATUS_OK,
+ const TString& expectedErrMessage = "")
+ {
+ TVector<TString> startAfterSuffix;
+ if (!startAfter.empty())
+ startAfterSuffix.push_back(startAfter);
+ return TestS3GenericListingRequest(annoyingClient, prefixColumns, pathPrefix, pathDelimiter,
+ startAfterSuffix,
+ columnsToReturn, maxKeys, 0,
+ expectedStatus, expectedErrMessage);
+ }
+
Y_UNIT_TEST(SchemaChecks) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
-
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareS3Data(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_TRACE);
-
- TestS3ListingRequest(annoyingClient, {}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Value for path column 'Hash' has type Uint64, expected Utf8");
-
- TestS3ListingRequest(annoyingClient, {""}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid KeyPrefix: Cannot parse value of type Uint64 from text '' in tuple at position 0");
-
- TestS3ListingRequest(annoyingClient, {"AAA"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid KeyPrefix: Cannot parse value of type Uint64 from text 'AAA' in tuple at position 0");
-
- TestS3ListingRequest(annoyingClient, {"-1"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid KeyPrefix: Cannot parse value of type Uint64 from text '-1' in tuple at position 0");
-
- TestS3ListingRequest(annoyingClient, {"1"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_OK,
- "");
-
- TestS3ListingRequest(annoyingClient, {"1", "Bucket1", "/"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Value for path column 'Version' has type Uint64, expected Utf8");
-
- TestS3ListingRequest(annoyingClient, {"1", "Bucket1", "/Photos", "1"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid KeyPrefix: Tuple size 4 is greater that expected size 3");
-
- TestS3ListingRequest(annoyingClient, {"1", "Bucket1", "/Photos", "/"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid KeyPrefix: Tuple size 4 is greater that expected size 3");
-
- TestS3ListingRequest(annoyingClient, {"1", "2", "3"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Value for path column 'Version' has type Uint64, expected Utf8");
-
- TestS3ListingRequest(annoyingClient, {"1", "2", "3", "4"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid KeyPrefix: Tuple size 4 is greater that expected size 3");
-
- TestS3ListingRequest(annoyingClient, {"1", "2", "3", "4", "5"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid KeyPrefix: Tuple size 5 is greater that expected size 3");
-
- TestS3ListingRequest(annoyingClient, {"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid KeyPrefix: Tuple size 10 is greater that expected size 3");
-
- TestS3ListingRequest(annoyingClient, {"1"}, "/", "/", "", {"NonExistingColumn"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Unknown column 'NonExistingColumn'");
-
- TestS3ListingRequest(annoyingClient, {"1", "Bucket1"}, "/", "/", "abc", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid StartAfterKeySuffix: StartAfter parameter doesn't match PathPrefix");
- }
-
- Y_UNIT_TEST(Split) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
- SetSplitMergePartCountLimit(cleverServer.GetRuntime(), -1);
-
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareS3Data(annoyingClient);
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
-// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- TestS3ListingRequest(annoyingClient, {"100", "Bucket100"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_OK,
- "");
-
- // Split shard #1 (where Bucket100 is stored)
- TVector<ui64> shards = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
- annoyingClient.SplitTablePartition("/dc-1/Dir/Table",
- "SourceTabletId: " + ToString(shards[1]) + " "
- "SplitBoundary { KeyPrefix { "
- " Tuple { Optional { Uint64: 100 } } "
- " Tuple { Optional { Text: 'Bucket100' } } "
- " Tuple { Optional { Text: '/Vid' } } "
- "} }");
-
- TVector<ui64> shardsAfter = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
- UNIT_ASSERT_VALUES_EQUAL(shards.size() + 1, shardsAfter.size());
-
- TestS3ListingRequest(annoyingClient, {"100", "Bucket100"}, "/", "/", "", {"Path"}, 10,
- NMsgBusProxy::MSTATUS_OK,
- "");
-
- CompareS3Listing(annoyingClient, 100, "/", "/", "", 100500, {"Path"});
- }
-
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareS3Data(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_TRACE);
+
+ TestS3ListingRequest(annoyingClient, {}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Value for path column 'Hash' has type Uint64, expected Utf8");
+
+ TestS3ListingRequest(annoyingClient, {""}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid KeyPrefix: Cannot parse value of type Uint64 from text '' in tuple at position 0");
+
+ TestS3ListingRequest(annoyingClient, {"AAA"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid KeyPrefix: Cannot parse value of type Uint64 from text 'AAA' in tuple at position 0");
+
+ TestS3ListingRequest(annoyingClient, {"-1"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid KeyPrefix: Cannot parse value of type Uint64 from text '-1' in tuple at position 0");
+
+ TestS3ListingRequest(annoyingClient, {"1"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_OK,
+ "");
+
+ TestS3ListingRequest(annoyingClient, {"1", "Bucket1", "/"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Value for path column 'Version' has type Uint64, expected Utf8");
+
+ TestS3ListingRequest(annoyingClient, {"1", "Bucket1", "/Photos", "1"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid KeyPrefix: Tuple size 4 is greater that expected size 3");
+
+ TestS3ListingRequest(annoyingClient, {"1", "Bucket1", "/Photos", "/"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid KeyPrefix: Tuple size 4 is greater that expected size 3");
+
+ TestS3ListingRequest(annoyingClient, {"1", "2", "3"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Value for path column 'Version' has type Uint64, expected Utf8");
+
+ TestS3ListingRequest(annoyingClient, {"1", "2", "3", "4"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid KeyPrefix: Tuple size 4 is greater that expected size 3");
+
+ TestS3ListingRequest(annoyingClient, {"1", "2", "3", "4", "5"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid KeyPrefix: Tuple size 5 is greater that expected size 3");
+
+ TestS3ListingRequest(annoyingClient, {"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid KeyPrefix: Tuple size 10 is greater that expected size 3");
+
+ TestS3ListingRequest(annoyingClient, {"1"}, "/", "/", "", {"NonExistingColumn"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Unknown column 'NonExistingColumn'");
+
+ TestS3ListingRequest(annoyingClient, {"1", "Bucket1"}, "/", "/", "abc", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid StartAfterKeySuffix: StartAfter parameter doesn't match PathPrefix");
+ }
+
+ Y_UNIT_TEST(Split) {
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+ SetSplitMergePartCountLimit(cleverServer.GetRuntime(), -1);
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareS3Data(annoyingClient);
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
+// cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ TestS3ListingRequest(annoyingClient, {"100", "Bucket100"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_OK,
+ "");
+
+ // Split shard #1 (where Bucket100 is stored)
+ TVector<ui64> shards = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
+ annoyingClient.SplitTablePartition("/dc-1/Dir/Table",
+ "SourceTabletId: " + ToString(shards[1]) + " "
+ "SplitBoundary { KeyPrefix { "
+ " Tuple { Optional { Uint64: 100 } } "
+ " Tuple { Optional { Text: 'Bucket100' } } "
+ " Tuple { Optional { Text: '/Vid' } } "
+ "} }");
+
+ TVector<ui64> shardsAfter = annoyingClient.GetTablePartitions("/dc-1/Dir/Table");
+ UNIT_ASSERT_VALUES_EQUAL(shards.size() + 1, shardsAfter.size());
+
+ TestS3ListingRequest(annoyingClient, {"100", "Bucket100"}, "/", "/", "", {"Path"}, 10,
+ NMsgBusProxy::MSTATUS_OK,
+ "");
+
+ CompareS3Listing(annoyingClient, 100, "/", "/", "", 100500, {"Path"});
+ }
+
Y_UNIT_TEST(SuffixColumns) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServer cleverServer = TServer(TServerSettings(port));
-
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareS3Data(annoyingClient);
-
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 55, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 66, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 77, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 88, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 666, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Thunderstruck.mp3", 66, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/rock.m3u", 111, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/rock.m3u", 222, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/rock.m3u", 333, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana", 112, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/Smeels Like Teen Spirit.mp3", 100, 10, "", "Table");
- S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/In Bloom.mp3", 120, 20, "", "Table");
-
- //
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_TRACE);
-
- TestS3GenericListingRequest(annoyingClient, {"50", "Bucket50"}, "Music/AC DC/", "/", {"Music/AC DC/Shoot to Thrill.mp3", "66"}, {"Path", "Version", "Data"}, 10, 10000,
- NMsgBusProxy::MSTATUS_OK,
- "");
-
- TestS3GenericListingRequest(annoyingClient, {"50", "Bucket50"}, "Music/AC DC/", "/", {"Music/AC DC/Shoot to Thrill.mp3"}, {"Path", "Version", "Timestamp"}, 10, 10000,
- NMsgBusProxy::MSTATUS_OK,
- "");
-
- TestS3GenericListingRequest(annoyingClient, {"50", "Bucket50"}, "Music/AC DC/", "/", {"Music/AC DC/Shoot to Thrill.mp3", "66", "abcd"}, {"Path", "Version"}, 10, 10000,
- NMsgBusProxy::MSTATUS_ERROR,
- "Invalid StartAfterKeySuffix: Tuple size 3 is greater that expected size 2");
- }
-
- Y_UNIT_TEST(ManyDeletes) {
- TPortManager pm;
- ui16 port = pm.GetPort(2134);
- TServerSettings settings(port);
- settings.NodeCount = 1;
- TServer cleverServer = TServer(TServerSettings(port));
-
- // Disable shared cache to trigger restarts
- TAtomic unused = 42;
- cleverServer.GetRuntime()->GetAppData().Icb->SetValue("SharedPageCache_Size", 10, unused);
- cleverServer.GetRuntime()->GetAppData().Icb->SetValue("SharedPageCache_Size", 10, unused);
- UNIT_ASSERT_VALUES_EQUAL(unused, 10);
-
- TFlatMsgBusClient annoyingClient(port);
-
- PrepareS3Data(annoyingClient);
-
-#ifdef NDEBUG
- const int N_ROWS = 10000;
-#else
- const int N_ROWS = 5000;
-#endif
-
- TString bigData(300, 'a');
-
- for (int i = 0; i < N_ROWS; ++i) {
- S3WriteRow(annoyingClient, 100, "Bucket100", "/A/Santa Barbara " + ToString(i), 1, 1100, bigData, "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/B/Santa Barbara " + ToString(i%4000), 1, 1100, bigData, "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/C/Santa Barbara " + ToString(i), 1, 1100, bigData, "Table");
- S3WriteRow(annoyingClient, 100, "Bucket100", "/D/Santa Barbara " + ToString(i), 1, 1100, bigData, "Table");
- if (i % 100 == 0)
- Cerr << ".";
- }
- Cerr << "\n";
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- CompareS3Listing(annoyingClient, 100, "/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/A/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/B/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/P/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/Photos/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/Videos/", "/", "", 1000, {});
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_ERROR);
-
- for (int i = 0; i < N_ROWS/2; ++i) {
- S3DeleteRow(annoyingClient, 100, "Bucket100", "/A/Santa Barbara " + ToString(i), 1, "Table");
- S3DeleteRow(annoyingClient, 100, "Bucket100", "/B/Santa Barbara " + ToString(i), 1, "Table");
- S3DeleteRow(annoyingClient, 100, "Bucket100", "/C/Santa Barbara " + ToString(i), 1, "Table");
- S3DeleteRow(annoyingClient, 100, "Bucket100", "/D/Santa Barbara " + ToString(i), 1, "Table");
- if (i % 100 == 0)
- Cerr << ".";
- }
- Cerr << "\n";
-
- cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
-
- CompareS3Listing(annoyingClient, 100, "/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/A/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/B/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/P/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/Photos/", "/", "", 1000, {});
- CompareS3Listing(annoyingClient, 100, "/Videos/", "/", "", 1000, {});
- }
-}
-
-}}
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServer cleverServer = TServer(TServerSettings(port));
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareS3Data(annoyingClient);
+
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 55, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 66, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 77, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 88, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Shoot to Thrill.mp3", 666, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/AC DC/Thunderstruck.mp3", 66, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/rock.m3u", 111, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/rock.m3u", 222, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/rock.m3u", 333, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana", 112, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/Smeels Like Teen Spirit.mp3", 100, 10, "", "Table");
+ S3WriteRow(annoyingClient, 50, "Bucket50", "Music/Nirvana/In Bloom.mp3", 120, 20, "", "Table");
+
+ //
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_TRACE);
+
+ TestS3GenericListingRequest(annoyingClient, {"50", "Bucket50"}, "Music/AC DC/", "/", {"Music/AC DC/Shoot to Thrill.mp3", "66"}, {"Path", "Version", "Data"}, 10, 10000,
+ NMsgBusProxy::MSTATUS_OK,
+ "");
+
+ TestS3GenericListingRequest(annoyingClient, {"50", "Bucket50"}, "Music/AC DC/", "/", {"Music/AC DC/Shoot to Thrill.mp3"}, {"Path", "Version", "Timestamp"}, 10, 10000,
+ NMsgBusProxy::MSTATUS_OK,
+ "");
+
+ TestS3GenericListingRequest(annoyingClient, {"50", "Bucket50"}, "Music/AC DC/", "/", {"Music/AC DC/Shoot to Thrill.mp3", "66", "abcd"}, {"Path", "Version"}, 10, 10000,
+ NMsgBusProxy::MSTATUS_ERROR,
+ "Invalid StartAfterKeySuffix: Tuple size 3 is greater that expected size 2");
+ }
+
+ Y_UNIT_TEST(ManyDeletes) {
+ TPortManager pm;
+ ui16 port = pm.GetPort(2134);
+ TServerSettings settings(port);
+ settings.NodeCount = 1;
+ TServer cleverServer = TServer(TServerSettings(port));
+
+ // Disable shared cache to trigger restarts
+ TAtomic unused = 42;
+ cleverServer.GetRuntime()->GetAppData().Icb->SetValue("SharedPageCache_Size", 10, unused);
+ cleverServer.GetRuntime()->GetAppData().Icb->SetValue("SharedPageCache_Size", 10, unused);
+ UNIT_ASSERT_VALUES_EQUAL(unused, 10);
+
+ TFlatMsgBusClient annoyingClient(port);
+
+ PrepareS3Data(annoyingClient);
+
+#ifdef NDEBUG
+ const int N_ROWS = 10000;
+#else
+ const int N_ROWS = 5000;
+#endif
+
+ TString bigData(300, 'a');
+
+ for (int i = 0; i < N_ROWS; ++i) {
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/A/Santa Barbara " + ToString(i), 1, 1100, bigData, "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/B/Santa Barbara " + ToString(i%4000), 1, 1100, bigData, "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/C/Santa Barbara " + ToString(i), 1, 1100, bigData, "Table");
+ S3WriteRow(annoyingClient, 100, "Bucket100", "/D/Santa Barbara " + ToString(i), 1, 1100, bigData, "Table");
+ if (i % 100 == 0)
+ Cerr << ".";
+ }
+ Cerr << "\n";
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ CompareS3Listing(annoyingClient, 100, "/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/A/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/B/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/P/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/Photos/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/Videos/", "/", "", 1000, {});
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_ERROR);
+
+ for (int i = 0; i < N_ROWS/2; ++i) {
+ S3DeleteRow(annoyingClient, 100, "Bucket100", "/A/Santa Barbara " + ToString(i), 1, "Table");
+ S3DeleteRow(annoyingClient, 100, "Bucket100", "/B/Santa Barbara " + ToString(i), 1, "Table");
+ S3DeleteRow(annoyingClient, 100, "Bucket100", "/C/Santa Barbara " + ToString(i), 1, "Table");
+ S3DeleteRow(annoyingClient, 100, "Bucket100", "/D/Santa Barbara " + ToString(i), 1, "Table");
+ if (i % 100 == 0)
+ Cerr << ".";
+ }
+ Cerr << "\n";
+
+ cleverServer.GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+
+ CompareS3Listing(annoyingClient, 100, "/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/A/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/B/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/P/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/Photos/", "/", "", 1000, {});
+ CompareS3Listing(annoyingClient, 100, "/Videos/", "/", "", 1000, {});
+ }
+}
+
+}}
diff --git a/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp b/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp
index 770b0753848..eb74fa36e94 100644
--- a/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp
+++ b/ydb/core/client/scheme_cache_lib/yql_db_scheme_resolver.cpp
@@ -159,7 +159,7 @@ public:
virtual void ResolveTables(const TVector<TTable>& tables, NActors::TActorId responseTo) override {
TAutoPtr<NActors::IActor> proxyActor(new TTableProxyActor(SchemeCacheActor, responseTo, tables));
- HostActorSystem->Register(proxyActor.Release(), TMailboxType::HTSwap, HostActorSystem->AppData<TAppData>()->UserPoolId);
+ HostActorSystem->Register(proxyActor.Release(), TMailboxType::HTSwap, HostActorSystem->AppData<TAppData>()->UserPoolId);
}
private:
diff --git a/ydb/core/client/server/grpc_server.cpp b/ydb/core/client/server/grpc_server.cpp
index c0a1502cd32..aa122da36a1 100644
--- a/ydb/core/client/server/grpc_server.cpp
+++ b/ydb/core/client/server/grpc_server.cpp
@@ -177,14 +177,14 @@ public:
}
}
- void Reply(const NKikimrClient::TS3ListingResponse& resp) override {
- try {
+ void Reply(const NKikimrClient::TS3ListingResponse& resp) override {
+ try {
Finish(dynamic_cast<const TOut&>(resp), 0);
- } catch (const std::bad_cast&) {
- Y_FAIL("unexpected response type generated");
- }
- }
-
+ } catch (const std::bad_cast&) {
+ Y_FAIL("unexpected response type generated");
+ }
+ }
+
void Reply(const NKikimrClient::TConsoleResponse& resp) override {
try {
Finish(dynamic_cast<const TOut&>(resp), 0);
@@ -226,24 +226,24 @@ public:
static void GenerateErrorResponse(NKikimrClient::TSqsResponse&, const TString&)
{ }
- static void GenerateErrorResponse(NKikimrClient::TS3ListingResponse& resp, const TString& reason) {
- resp.SetStatus(NMsgBusProxy::MSTATUS_ERROR);
- resp.SetDescription(reason);
- }
+ static void GenerateErrorResponse(NKikimrClient::TS3ListingResponse& resp, const TString& reason) {
+ resp.SetStatus(NMsgBusProxy::MSTATUS_ERROR);
+ resp.SetDescription(reason);
+ }
static void GenerateErrorResponse(NKikimrClient::TConsoleResponse& resp, const TString& reason) {
resp.MutableStatus()->SetCode(Ydb::StatusIds::GENERIC_ERROR);
resp.MutableStatus()->SetReason(reason);
}
-
+
NMsgBusProxy::TBusMessageContext BindBusContext(int type) override {
return BusContext.ConstructInPlace(this, type);
}
- TString GetPeer() const override {
- return GetPeerName();
- }
-
+ TString GetPeer() const override {
+ return GetPeerName();
+ }
+
private:
void* GetGRpcTag() {
return static_cast<IQueueEvent*>(this);
@@ -422,27 +422,27 @@ void TGRpcService::Start() {
SetupIncomingRequests();
}
-void TGRpcService::RegisterRequestActor(NActors::IActor* req) {
- ActorSystem->Register(req, TMailboxType::HTSwap, ActorSystem->AppData<TAppData>()->UserPoolId);
-}
-
+void TGRpcService::RegisterRequestActor(NActors::IActor* req) {
+ ActorSystem->Register(req, TMailboxType::HTSwap, ActorSystem->AppData<TAppData>()->UserPoolId);
+}
+
void TGRpcService::SetupIncomingRequests() {
auto getCounterBlock = NGRpcService::CreateCounterCb(Counters, ActorSystem);
#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
(new TSimpleRequest<NKikimrClient::IN, NKikimrClient::OUT>(this, &Service_, CQ, \
- [this](IRequestContext *ctx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem, ctx->GetPeer()); \
- ACTION; \
- }, &NKikimrClient::TGRpcServer::AsyncService::Request ## NAME, \
+ [this](IRequestContext *ctx) { \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem, ctx->GetPeer()); \
+ ACTION; \
+ }, &NKikimrClient::TGRpcServer::AsyncService::Request ## NAME, \
*ActorSystem, #NAME, getCounterBlock("legacy", #NAME)))->Start();
#define ADD_ACTOR_REQUEST(NAME, TYPE, MTYPE) \
ADD_REQUEST(NAME, TYPE, TResponse, { \
NMsgBusProxy::TBusMessageContext msg(ctx->BindBusContext(NMsgBusProxy::MTYPE)); \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem, ctx->GetPeer()); \
- RegisterRequestActor(CreateMessageBus ## NAME(msg)); \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem, ctx->GetPeer()); \
+ RegisterRequestActor(CreateMessageBus ## NAME(msg)); \
})
@@ -470,33 +470,33 @@ void TGRpcService::SetupIncomingRequests() {
// dynamic node registration
ADD_REQUEST(RegisterNode, TNodeRegistrationRequest, TNodeRegistrationResponse, {
NMsgBusProxy::TBusMessageContext msg(ctx->BindBusContext(NMsgBusProxy::MTYPE_CLIENT_NODE_REGISTRATION_REQUEST));
- RegisterRequestActor(CreateMessageBusRegisterNode(msg));
+ RegisterRequestActor(CreateMessageBusRegisterNode(msg));
})
// CMS request
ADD_REQUEST(CmsRequest, TCmsRequest, TCmsResponse, {
NMsgBusProxy::TBusMessageContext msg(ctx->BindBusContext(NMsgBusProxy::MTYPE_CLIENT_CMS_REQUEST));
- RegisterRequestActor(CreateMessageBusCmsRequest(msg));
+ RegisterRequestActor(CreateMessageBusCmsRequest(msg));
})
// SQS request
ADD_REQUEST(SqsRequest, TSqsRequest, TSqsResponse, {
NMsgBusProxy::TBusMessageContext msg(ctx->BindBusContext(NMsgBusProxy::MTYPE_CLIENT_SQS_REQUEST));
- RegisterRequestActor(CreateMessageBusSqsRequest(msg));
+ RegisterRequestActor(CreateMessageBusSqsRequest(msg));
})
- // S3 listing request
- ADD_REQUEST(S3Listing, TS3ListingRequest, TS3ListingResponse, {
- NMsgBusProxy::TBusMessageContext msg(ctx->BindBusContext(NMsgBusProxy::MTYPE_CLIENT_S3_LISTING_REQUEST));
- RegisterRequestActor(CreateMessageBusS3ListingRequest(msg));
- })
+ // S3 listing request
+ ADD_REQUEST(S3Listing, TS3ListingRequest, TS3ListingResponse, {
+ NMsgBusProxy::TBusMessageContext msg(ctx->BindBusContext(NMsgBusProxy::MTYPE_CLIENT_S3_LISTING_REQUEST));
+ RegisterRequestActor(CreateMessageBusS3ListingRequest(msg));
+ })
// Console request
ADD_REQUEST(ConsoleRequest, TConsoleRequest, TConsoleResponse, {
NMsgBusProxy::TBusMessageContext msg(ctx->BindBusContext(NMsgBusProxy::MTYPE_CLIENT_CONSOLE_REQUEST));
- RegisterRequestActor(CreateMessageBusConsoleRequest(msg));
+ RegisterRequestActor(CreateMessageBusConsoleRequest(msg));
})
-
+
#define ADD_PROXY_REQUEST_BASE(NAME, TYPE, RES_TYPE, EVENT_TYPE, MTYPE) \
ADD_REQUEST(NAME, TYPE, RES_TYPE, { \
if (MsgBusProxy) { \
diff --git a/ydb/core/client/server/grpc_server.h b/ydb/core/client/server/grpc_server.h
index fb11219c1d6..2f2ae1da6b6 100644
--- a/ydb/core/client/server/grpc_server.h
+++ b/ydb/core/client/server/grpc_server.h
@@ -37,7 +37,7 @@ public:
virtual void Reply(const NKikimrClient::TNodeRegistrationResponse& resp) = 0;
virtual void Reply(const NKikimrClient::TCmsResponse& resp) = 0;
virtual void Reply(const NKikimrClient::TSqsResponse& resp) = 0;
- virtual void Reply(const NKikimrClient::TS3ListingResponse& resp) = 0;
+ virtual void Reply(const NKikimrClient::TS3ListingResponse& resp) = 0;
virtual void Reply(const NKikimrClient::TConsoleResponse& resp) = 0;
//! Send error reply when request wasn't handled properly.
@@ -45,9 +45,9 @@ public:
//! Bind MessageBus context to the request.
virtual NMsgBusProxy::TBusMessageContext BindBusContext(int type) = 0;
-
- //! Returns peer address
- virtual TString GetPeer() const = 0;
+
+ //! Returns peer address
+ virtual TString GetPeer() const = 0;
};
//! Implements interaction Kikimr via gRPC protocol.
@@ -68,8 +68,8 @@ public:
i64 GetCurrentInFlight() const;
private:
- void RegisterRequestActor(NActors::IActor* req);
-
+ void RegisterRequestActor(NActors::IActor* req);
+
//! Setup handlers for incoming requests.
void SetupIncomingRequests();
diff --git a/ydb/core/client/server/msgbus_server.cpp b/ydb/core/client/server/msgbus_server.cpp
index 2e020d9ebd3..8a4d8a3bbd5 100644
--- a/ydb/core/client/server/msgbus_server.cpp
+++ b/ydb/core/client/server/msgbus_server.cpp
@@ -102,7 +102,7 @@ public:
MTYPE(TBusMessageBusTraceStatus)
MTYPE(TBusTabletKillRequest)
MTYPE(TBusTabletStateRequest)
- MTYPE(TBusTabletCountersRequest)
+ MTYPE(TBusTabletCountersRequest)
MTYPE(TBusTabletLocalMKQL)
MTYPE(TBusTabletLocalSchemeTx)
MTYPE(TBusSchemeOperation)
@@ -125,8 +125,8 @@ public:
MTYPE(TBusSqsRequest)
MTYPE(TBusWhoAmI)
MTYPE(TBusStreamRequest)
- MTYPE(TBusS3ListingRequest)
- MTYPE(TBusS3ListingResponse)
+ MTYPE(TBusS3ListingRequest)
+ MTYPE(TBusS3ListingResponse)
MTYPE(TBusInterconnectDebug)
MTYPE(TBusConsoleRequest)
MTYPE(TBusResolveNode)
@@ -175,7 +175,7 @@ public:
REPLY_OPTION(TBusNodeRegistrationResponse)
REPLY_OPTION(TBusCmsResponse)
REPLY_OPTION(TBusSqsResponse)
- REPLY_OPTION(TBusS3ListingResponse)
+ REPLY_OPTION(TBusS3ListingResponse)
REPLY_OPTION(TBusConsoleResponse)
default:
@@ -456,8 +456,8 @@ void TMessageBusServer::InitSession(TActorSystem *actorSystem, const TActorId &p
Proxy = proxy;
Session = NBus::TBusServerSession::Create(&Protocol, this, SessionConfig, BusQueue);
HttpServer.Reset(CreateMessageBusHttpServer(actorSystem, this, Protocol, SessionConfig));
- Monitor = ActorSystem->Register(new TMessageBusMonitorActor(Session, SessionConfig), TMailboxType::HTSwap,
- actorSystem->AppData<TAppData>()->UserPoolId);
+ Monitor = ActorSystem->Register(new TMessageBusMonitorActor(Session, SessionConfig), TMailboxType::HTSwap,
+ actorSystem->AppData<TAppData>()->UserPoolId);
}
void TMessageBusServer::ShutdownSession() {
@@ -505,11 +505,11 @@ void TMessageBusServer::OnMessage(TBusMessageContext &msg) {
return ClientActorRequest(CreateMessageBusChooseProxy, msg);
case MTYPE_CLIENT_TABLET_STATE_REQUEST:
return ClientActorRequest(CreateMessageBusTabletStateRequest, msg);
- case MTYPE_CLIENT_TABLET_COUNTERS_REQUEST:
- return ClientActorRequest(CreateMessageBusTabletCountersRequest, msg);
+ case MTYPE_CLIENT_TABLET_COUNTERS_REQUEST:
+ return ClientActorRequest(CreateMessageBusTabletCountersRequest, msg);
case MTYPE_CLIENT_LOCAL_MINIKQL:
return ClientActorRequest(CreateMessageBusLocalMKQL, msg);
- case MTYPE_CLIENT_LOCAL_SCHEME_TX:
+ case MTYPE_CLIENT_LOCAL_SCHEME_TX:
return ClientActorRequest(CreateMessageBusLocalSchemeTx, msg);
case MTYPE_CLIENT_TABLET_KILL_REQUEST:
return ClientActorRequest(CreateMessageBusTabletKillRequest, msg);
@@ -540,12 +540,12 @@ void TMessageBusServer::OnMessage(TBusMessageContext &msg) {
return ClientActorRequest(CreateMessageBusResolveNode, msg);
case MTYPE_CLIENT_CMS_REQUEST:
return ClientActorRequest(CreateMessageBusCmsRequest, msg);
- case MTYPE_CLIENT_SQS_REQUEST:
- return ClientActorRequest(CreateMessageBusSqsRequest, msg);
+ case MTYPE_CLIENT_SQS_REQUEST:
+ return ClientActorRequest(CreateMessageBusSqsRequest, msg);
case MTYPE_CLIENT_WHOAMI:
return ClientActorRequest(CreateMessageBusWhoAmI, msg);
- case MTYPE_CLIENT_S3_LISTING_REQUEST:
- return ClientActorRequest(CreateMessageBusS3ListingRequest, msg);
+ case MTYPE_CLIENT_S3_LISTING_REQUEST:
+ return ClientActorRequest(CreateMessageBusS3ListingRequest, msg);
case MTYPE_CLIENT_INTERCONNECT_DEBUG:
return ClientActorRequest(CreateMessageBusInterconnectDebug, msg);
case MTYPE_CLIENT_CONSOLE_REQUEST:
@@ -558,15 +558,15 @@ void TMessageBusServer::OnMessage(TBusMessageContext &msg) {
}
void TMessageBusServer::OnError(TAutoPtr<NBus::TBusMessage> msg, NBus::EMessageStatus status) {
- if (ActorSystem) {
- if (status == NBus::MESSAGE_SHUTDOWN) {
- LOG_DEBUG_S(*ActorSystem, NKikimrServices::MSGBUS_REQUEST, "Msgbus client disconnected before reply was sent"
- << " msg# " << msg->Describe());
- } else {
- LOG_ERROR_S(*ActorSystem, NKikimrServices::MSGBUS_REQUEST, "Failed to send reply over msgbus status# " << status
- << " msg# " << msg->Describe());
- }
- }
+ if (ActorSystem) {
+ if (status == NBus::MESSAGE_SHUTDOWN) {
+ LOG_DEBUG_S(*ActorSystem, NKikimrServices::MSGBUS_REQUEST, "Msgbus client disconnected before reply was sent"
+ << " msg# " << msg->Describe());
+ } else {
+ LOG_ERROR_S(*ActorSystem, NKikimrServices::MSGBUS_REQUEST, "Failed to send reply over msgbus status# " << status
+ << " msg# " << msg->Describe());
+ }
+ }
}
template<typename TEv>
@@ -579,14 +579,14 @@ void TMessageBusServer::ClientProxyRequest(TBusMessageContext &msg) {
void TMessageBusServer::ClientActorRequest(ActorCreationFunc func, TBusMessageContext &msg) {
if (IActor *x = func(msg))
- ActorSystem->Register(x, TMailboxType::HTSwap, ActorSystem->AppData<TAppData>()->UserPoolId);
+ ActorSystem->Register(x, TMailboxType::HTSwap, ActorSystem->AppData<TAppData>()->UserPoolId);
else
msg.SendReplyMove(new TBusResponseStatus(MSTATUS_ERROR));
}
void TMessageBusServer::GetTypes(TBusMessageContext &msg) {
if (IActor *x = CreateMessageBusGetTypes(msg)) {
- ActorSystem->Register(x, TMailboxType::HTSwap, ActorSystem->AppData<TAppData>()->UserPoolId);
+ ActorSystem->Register(x, TMailboxType::HTSwap, ActorSystem->AppData<TAppData>()->UserPoolId);
} else {
auto reply = new TBusTypesResponse();
reply->Record.SetStatus(MSTATUS_ERROR);
diff --git a/ydb/core/client/server/msgbus_server.h b/ydb/core/client/server/msgbus_server.h
index a5711311316..3d40fc90017 100644
--- a/ydb/core/client/server/msgbus_server.h
+++ b/ydb/core/client/server/msgbus_server.h
@@ -4,7 +4,7 @@
#include <ydb/public/lib/base/defs.h>
#include <ydb/public/lib/base/msgbus.h>
#include "msgbus_http_server.h"
-#include "msgbus_server_pq_metacache.h"
+#include "msgbus_server_pq_metacache.h"
namespace NMonitoring {
class TBusNgMonPage;
@@ -280,9 +280,9 @@ IActor* CreateMessageBusServerProxy(
);
//IActor* CreateMessageBusDatashardSetConfig(TBusMessageContext &msg);
-IActor* CreateMessageBusTabletCountersRequest(TBusMessageContext &msg);
+IActor* CreateMessageBusTabletCountersRequest(TBusMessageContext &msg);
IActor* CreateMessageBusLocalMKQL(TBusMessageContext &msg);
-IActor* CreateMessageBusLocalSchemeTx(TBusMessageContext &msg);
+IActor* CreateMessageBusLocalSchemeTx(TBusMessageContext &msg);
IActor* CreateMessageBusSchemeInitRoot(TBusMessageContext &msg);
IActor* CreateMessageBusBSAdm(TBusMessageContext &msg);
IActor* CreateMessageBusGetTypes(TBusMessageContext &msg);
@@ -305,7 +305,7 @@ IActor* CreateMessageBusRegisterNode(TBusMessageContext &msg);
IActor* CreateMessageBusCmsRequest(TBusMessageContext &msg);
IActor* CreateMessageBusSqsRequest(TBusMessageContext &msg);
IActor* CreateMessageBusWhoAmI(TBusMessageContext &msg);
-IActor* CreateMessageBusS3ListingRequest(TBusMessageContext& msg);
+IActor* CreateMessageBusS3ListingRequest(TBusMessageContext& msg);
IActor* CreateMessageBusInterconnectDebug(TBusMessageContext& msg);
IActor* CreateMessageBusConsoleRequest(TBusMessageContext &msg);
IActor* CreateMessageBusTestShardControl(TBusMessageContext &msg);
diff --git a/ydb/core/client/server/msgbus_server_cms.cpp b/ydb/core/client/server/msgbus_server_cms.cpp
index 013b56a6435..53ada2a3fad 100644
--- a/ydb/core/client/server/msgbus_server_cms.cpp
+++ b/ydb/core/client/server/msgbus_server_cms.cpp
@@ -24,8 +24,8 @@ class TCmsRequestActor : public TMessageBusSecureRequest<TMessageBusServerReques
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::MSGBUS_COMMON;
- }
-
+ }
+
TCmsRequestActor(NKikimrClient::TCmsRequest &request, NMsgBusProxy::TBusMessageContext &msg)
: TBase(msg)
, Request(request)
diff --git a/ydb/core/client/server/msgbus_server_console.cpp b/ydb/core/client/server/msgbus_server_console.cpp
index 70a90107276..66bc0b02ebe 100644
--- a/ydb/core/client/server/msgbus_server_console.cpp
+++ b/ydb/core/client/server/msgbus_server_console.cpp
@@ -24,8 +24,8 @@ class TConsoleRequestActor : public TMessageBusSecureRequest<TMessageBusServerRe
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::MSGBUS_COMMON;
- }
-
+ }
+
TConsoleRequestActor(NKikimrClient::TConsoleRequest &request, NMsgBusProxy::TBusMessageContext &msg)
: TBase(msg)
, Request(request)
diff --git a/ydb/core/client/server/msgbus_server_db.cpp b/ydb/core/client/server/msgbus_server_db.cpp
index 4324e843338..08f3d717864 100644
--- a/ydb/core/client/server/msgbus_server_db.cpp
+++ b/ydb/core/client/server/msgbus_server_db.cpp
@@ -491,7 +491,7 @@ public:
void BuildAndRunProgram(const NActors::TActorContext& ctx) {
try {
const NMiniKQL::IFunctionRegistry& functionRegistry = *AppData(ctx)->FunctionRegistry;
- TAlignedPagePoolCounters counters(AppData(ctx)->Counters, "build");
+ TAlignedPagePoolCounters counters(AppData(ctx)->Counters, "build");
NMiniKQL::TScopedAlloc alloc(counters, functionRegistry.SupportsSizedAllocators());
NMiniKQL::TTypeEnvironment env(alloc);
NMiniKQL::TKikimrProgramBuilder pgmBuilder(env, functionRegistry);
@@ -729,8 +729,8 @@ protected:
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::MSGBUS_COMMON;
- }
-
+ }
+
TServerDbSchema(TBusMessageContext &msg, TActorId txProxyId, const TIntrusivePtr<TMessageBusDbOpsCounters>& dbOperationsCounters);
TServerDbSchema(
@@ -983,8 +983,8 @@ protected:
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::MSGBUS_COMMON;
- }
-
+ }
+
TServerDbBatch(TBusMessageContext &msg, const TActorId txProxyId, const TActorId& schemeCache, const TIntrusivePtr<TMessageBusDbOpsCounters>& dbOperationsCounters);
void HandleTimeout(const TActorContext& ctx) {
diff --git a/ydb/core/client/server/msgbus_server_local_enumerate_tablets.cpp b/ydb/core/client/server/msgbus_server_local_enumerate_tablets.cpp
index c0d23e53b05..9f867dddc7c 100644
--- a/ydb/core/client/server/msgbus_server_local_enumerate_tablets.cpp
+++ b/ydb/core/client/server/msgbus_server_local_enumerate_tablets.cpp
@@ -1,13 +1,13 @@
-#include "msgbus_servicereq.h"
+#include "msgbus_servicereq.h"
#include <ydb/core/mind/local.h>
#include <ydb/core/protos/local.pb.h>
-namespace NKikimr {
-namespace NMsgBusProxy {
-
-namespace {
+namespace NKikimr {
+namespace NMsgBusProxy {
+
+namespace {
const ui32 DefaultTimeout = 90000;
-}
-
+}
+
template <typename ResponseType>
class TMessageBusLocalEnumerateTablets: public TMessageBusLocalServiceRequest<TMessageBusLocalEnumerateTablets<ResponseType>, NKikimrServices::TActivity::FRONT_ENUMERATE_TABLETS> {
using TBase = TMessageBusLocalServiceRequest<TMessageBusLocalEnumerateTablets<ResponseType>, NKikimrServices::TActivity::FRONT_ENUMERATE_TABLETS>;
@@ -17,7 +17,7 @@ class TMessageBusLocalEnumerateTablets: public TMessageBusLocalServiceRequest<TM
bool IsFiltered;
bool IsOk;
bool IsNodeIdPresent;
-public:
+public:
TMessageBusLocalEnumerateTablets(TBusMessageContext &msg, TDuration timeout)
: TBase(msg, timeout)
, DomainUid(0)
@@ -26,7 +26,7 @@ public:
, IsFiltered(false)
, IsOk(true)
, IsNodeIdPresent(false)
- {
+ {
const auto &record = static_cast<TBusLocalEnumerateTablets*>(msg.GetMessage())->Record;
IsOk = IsOk && record.HasDomainUid();
if (record.HasNodeId()) {
@@ -40,8 +40,8 @@ public:
TabletType = record.GetTabletType();
}
}
- }
-
+ }
+
void Handle(TEvLocal::TEvEnumerateTabletsResult::TPtr &ev, const TActorContext &ctx) {
const NKikimrLocal::TEvEnumerateTabletsResult &record = ev->Get()->Record;
Y_VERIFY(record.HasStatus());
@@ -65,8 +65,8 @@ public:
}
TBase::SendReplyAndDie(response.Release(), ctx);
}
- }
-
+ }
+
TActorId MakeServiceID(const TActorContext &ctx) {
auto &domainsInfo = *AppData(ctx)->DomainsInfo;
auto domainIt = domainsInfo.Domains.find(DomainUid);
@@ -79,17 +79,17 @@ public:
ui32 hiveUid = domainsInfo.GetDefaultHiveUid(DomainUid);
ui64 hiveId = domainsInfo.GetHive(hiveUid);
return MakeLocalRegistrarID(nodeId, hiveId);
- }
-
+ }
+
TEvLocal::TEvEnumerateTablets* MakeReq(const TActorContext &ctx) {
Y_UNUSED(ctx);
if (IsFiltered) {
return new TEvLocal::TEvEnumerateTablets(TabletType);
}
return new TEvLocal::TEvEnumerateTablets();
- }
-
- NBus::TBusMessage* CreateErrorReply(EResponseStatus status, const TActorContext &ctx) {
+ }
+
+ NBus::TBusMessage* CreateErrorReply(EResponseStatus status, const TActorContext &ctx) {
Y_UNUSED(ctx);
Y_UNUSED(status);
ui64 nodeId = IsNodeIdPresent ? NodeId : ctx.SelfID.NodeId();
@@ -97,16 +97,16 @@ public:
response->Record.SetStatus(MSTATUS_ERROR);
response->Record.SetErrorReason(Sprintf("Invalid DomainUid# %" PRIu64 ", NodeId# %" PRIu64
" or kikimr hive/domain/node configuration, Marker# LE3", (ui64)DomainUid, (ui64)nodeId));
- return response.Release();
- }
-
- void HandleTimeout(const TActorContext &ctx) {
+ return response.Release();
+ }
+
+ void HandleTimeout(const TActorContext &ctx) {
Y_UNUSED(ctx);
TAutoPtr<TBusResponse> response(new TBusResponseStatus(MSTATUS_TIMEOUT, ""));
TBase::SendReplyAndDie(response.Release(), ctx);
- }
-
- void HandleUndelivered(TEvents::TEvUndelivered::TPtr& ev, const TActorContext& ctx) {
+ }
+
+ void HandleUndelivered(TEvents::TEvUndelivered::TPtr& ev, const TActorContext& ctx) {
Y_UNUSED(ev);
THolder<ResponseType> response(new ResponseType());
ui64 nodeId = IsNodeIdPresent ? NodeId : ctx.SelfID.NodeId();
@@ -115,28 +115,28 @@ public:
", Marker# LE2", (ui64)nodeId));
TBase::SendReplyAndDie(response.Release(), ctx);
- }
-
- STFUNC(StateFunc) {
- switch (ev->GetTypeRewrite()) {
+ }
+
+ STFUNC(StateFunc) {
+ switch (ev->GetTypeRewrite()) {
HFunc(TEvLocal::TEvEnumerateTabletsResult, Handle);
- HFunc(TEvents::TEvUndelivered, HandleUndelivered);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
- }
- }
-};
-
+ HFunc(TEvents::TEvUndelivered, HandleUndelivered);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+ }
+ }
+};
+
IActor* CreateMessageBusLocalEnumerateTablets(TBusMessageContext &msg) {
//const auto &record = static_cast<TBusLocalEnumerateTablets*>(msg.GetMessage())->Record;
//const TDuration timeout = TDuration::MilliSeconds(record.HasTimeout() ? record.GetTimeout() : DefaultTimeout);
const TDuration timeout = TDuration::MilliSeconds(DefaultTimeout);
-
+
if (msg.GetMessage()->GetHeader()->Type == MTYPE_CLIENT_OLD_LOCAL_ENUMERATE_TABLETS) {
return new TMessageBusLocalEnumerateTablets<TBusLocalEnumerateTabletsResult>(msg, timeout);
} else {
return new TMessageBusLocalEnumerateTablets<TBusResponse>(msg, timeout);
}
-}
-
-}
-}
+}
+
+}
+}
diff --git a/ydb/core/client/server/msgbus_server_local_scheme_tx.cpp b/ydb/core/client/server/msgbus_server_local_scheme_tx.cpp
index 1537e39917a..c1eed46ef99 100644
--- a/ydb/core/client/server/msgbus_server_local_scheme_tx.cpp
+++ b/ydb/core/client/server/msgbus_server_local_scheme_tx.cpp
@@ -1,63 +1,63 @@
-#include "msgbus_tabletreq.h"
+#include "msgbus_tabletreq.h"
#include "msgbus_securereq.h"
-
-namespace NKikimr {
-namespace NMsgBusProxy {
-
-namespace {
- const ui64 DefaultTimeout = 90000;
-}
-
+
+namespace NKikimr {
+namespace NMsgBusProxy {
+
+namespace {
+ const ui64 DefaultTimeout = 90000;
+}
+
class TMessageBusLocalSchemeTx : public TMessageBusSecureRequest<TMessageBusSimpleTabletRequest<TMessageBusLocalSchemeTx,
TEvTablet::TEvLocalSchemeTxResponse, NKikimrServices::TActivity::FRONT_LOCAL_TXRQ>> {
using TBase = TMessageBusSecureRequest<TMessageBusSimpleTabletRequest<TMessageBusLocalSchemeTx,
TEvTablet::TEvLocalSchemeTxResponse, NKikimrServices::TActivity::FRONT_LOCAL_TXRQ>>;
NKikimrClient::TLocalSchemeTx Request;
-public:
+public:
TMessageBusLocalSchemeTx(TBusMessageContext &msg, NKikimrClient::TLocalSchemeTx &request, ui64 tabletId, bool withRetry, TDuration timeout, bool connectToFollower)
: TMessageBusSecureRequest(msg, tabletId, withRetry, timeout, connectToFollower)
- , Request()
- {
- Request.Swap(&request);
+ , Request()
+ {
+ Request.Swap(&request);
TBase::SetSecurityToken(Request.GetSecurityToken());
TBase::SetRequireAdminAccess(true);
- }
-
- void Handle(TEvTablet::TEvLocalSchemeTxResponse::TPtr &ev, const TActorContext &ctx) {
- auto &record = ev->Get()->Record;
-
- const auto replyStatus = (record.GetStatus() == NKikimrProto::OK) ? MSTATUS_OK : MSTATUS_ERROR;
- TAutoPtr<TBusResponse> response(new TBusResponseStatus(replyStatus));
-
- response->Record.SetTabletId(TabletID);
- response->Record.MutableLocalDbScheme()->Swap(record.MutableFullScheme());
-
- if (record.HasErrorReason())
- response->Record.SetErrorReason(record.GetErrorReason());
-
- return SendReplyAndDie(response.Release(), ctx);
- }
-
- TEvTablet::TEvLocalSchemeTx* MakeReq(const TActorContext &ctx) {
- Y_UNUSED(ctx);
-
- TAutoPtr<TEvTablet::TEvLocalSchemeTx> req = new TEvTablet::TEvLocalSchemeTx();
- req->Record.MutableSchemeChanges()->Swap(Request.MutableSchemeChanges());
- req->Record.SetDryRun(Request.GetDryRun());
-
- return req.Release();
- }
-};
-
-IActor* CreateMessageBusLocalSchemeTx(TBusMessageContext &msg) {
- auto &record = static_cast<TBusTabletLocalSchemeTx *>(msg.GetMessage())->Record;
-
+ }
+
+ void Handle(TEvTablet::TEvLocalSchemeTxResponse::TPtr &ev, const TActorContext &ctx) {
+ auto &record = ev->Get()->Record;
+
+ const auto replyStatus = (record.GetStatus() == NKikimrProto::OK) ? MSTATUS_OK : MSTATUS_ERROR;
+ TAutoPtr<TBusResponse> response(new TBusResponseStatus(replyStatus));
+
+ response->Record.SetTabletId(TabletID);
+ response->Record.MutableLocalDbScheme()->Swap(record.MutableFullScheme());
+
+ if (record.HasErrorReason())
+ response->Record.SetErrorReason(record.GetErrorReason());
+
+ return SendReplyAndDie(response.Release(), ctx);
+ }
+
+ TEvTablet::TEvLocalSchemeTx* MakeReq(const TActorContext &ctx) {
+ Y_UNUSED(ctx);
+
+ TAutoPtr<TEvTablet::TEvLocalSchemeTx> req = new TEvTablet::TEvLocalSchemeTx();
+ req->Record.MutableSchemeChanges()->Swap(Request.MutableSchemeChanges());
+ req->Record.SetDryRun(Request.GetDryRun());
+
+ return req.Release();
+ }
+};
+
+IActor* CreateMessageBusLocalSchemeTx(TBusMessageContext &msg) {
+ auto &record = static_cast<TBusTabletLocalSchemeTx *>(msg.GetMessage())->Record;
+
const bool connectToFollower = record.HasConnectToFollower() ? record.GetConnectToFollower() : false;
- const ui64 tabletId = record.GetTabletID();
- const bool withRetry = record.HasWithRetry() ? record.GetWithRetry() : false;
- const TDuration timeout = TDuration::MilliSeconds(record.HasTimeout() ? record.GetTimeout() : DefaultTimeout);
-
+ const ui64 tabletId = record.GetTabletID();
+ const bool withRetry = record.HasWithRetry() ? record.GetWithRetry() : false;
+ const TDuration timeout = TDuration::MilliSeconds(record.HasTimeout() ? record.GetTimeout() : DefaultTimeout);
+
return new TMessageBusLocalSchemeTx(msg, record, tabletId, withRetry, timeout, connectToFollower);
-}
-
-}}
+}
+
+}}
diff --git a/ydb/core/client/server/msgbus_server_node_registration.cpp b/ydb/core/client/server/msgbus_server_node_registration.cpp
index e1aae31025d..a3dda6092d0 100644
--- a/ydb/core/client/server/msgbus_server_node_registration.cpp
+++ b/ydb/core/client/server/msgbus_server_node_registration.cpp
@@ -23,8 +23,8 @@ class TNodeRegistrationActor : public TActorBootstrapped<TNodeRegistrationActor>
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::MSGBUS_COMMON;
- }
-
+ }
+
TNodeRegistrationActor(NKikimrClient::TNodeRegistrationRequest &request, NMsgBusProxy::TBusMessageContext &msg)
: TMessageBusSessionIdentHolder(msg)
, Request(request)
diff --git a/ydb/core/client/server/msgbus_server_persqueue.cpp b/ydb/core/client/server/msgbus_server_persqueue.cpp
index 3ca0f16a0fb..78b83ef3a3b 100644
--- a/ydb/core/client/server/msgbus_server_persqueue.cpp
+++ b/ydb/core/client/server/msgbus_server_persqueue.cpp
@@ -1,7 +1,7 @@
#include "msgbus_tabletreq.h"
#include "msgbus_server_persqueue.h"
-#include "msgbus_server_pq_metacache.h"
+#include "msgbus_server_pq_metacache.h"
#include "msgbus_server_pq_metarequest.h"
#include <library/cpp/actors/core/interconnect.h>
#include <library/cpp/actors/interconnect/interconnect.h>
@@ -438,7 +438,7 @@ class TMessageBusServerPersQueueImpl : public TActorBootstrapped<TMessageBusServ
protected:
NKikimrClient::TPersQueueRequest RequestProto;
const TString RequestId;
- const bool IsMetaRequest;
+ const bool IsMetaRequest;
const bool IsFetchRequest;
bool CanProcessFetchRequest; //any partitions answered that it has data or WaitMs timeout occured
@@ -497,7 +497,7 @@ public:
return;
}
if (record.HasMetaRequest()) {
- Y_VERIFY(IsMetaRequest);
+ Y_VERIFY(IsMetaRequest);
auto& meta = record.GetMetaRequest();
ui32 count = meta.HasCmdGetPartitionLocations() + meta.HasCmdGetPartitionOffsets() +
meta.HasCmdGetTopicMetadata() + meta.HasCmdGetPartitionStatus() + meta.HasCmdGetReadSessionsInfo();
@@ -777,7 +777,7 @@ public:
bool AnswerIfCanForMeta(const TActorContext& ctx) {
- Y_VERIFY(IsMetaRequest);
+ Y_VERIFY(IsMetaRequest);
Y_VERIFY(RequestProto.HasMetaRequest());
if (AclRequests)
return false;
@@ -980,7 +980,7 @@ public:
break;
}
}
-
+
if (!tabletId) {
ErrorReason = Sprintf("no partition %u in topic '%s', Marker# PQ4", partition, topic.c_str());
return SendReplyAndDie(CreateErrorReply(MSTATUS_ERROR, NPersQueue::NErrorCode::UNKNOWN_TOPIC, ctx), ctx);
@@ -1050,8 +1050,8 @@ public:
bool res = it->second.PartitionToTablet.insert({part, tabletId}).second;
Y_VERIFY(res);
if (TabletInfo.find(tabletId) == TabletInfo.end()) {
- auto& tabletInfo = TabletInfo[tabletId];
- tabletInfo.Topic = topic;
+ auto& tabletInfo = TabletInfo[tabletId];
+ tabletInfo.Topic = topic;
it->second.Tablets.push_back(tabletId);
// Tablet node resolution relies on opening a pipe
@@ -1135,27 +1135,27 @@ public:
TEvTabletPipe::TEvClientConnected *msg = ev->Get();
const ui64 tabletId = ev->Get()->TabletId;
if (msg->Status != NKikimrProto::OK) {
-
+
if (HandlePipeError(tabletId, ctx))
return;
-
- ErrorReason = Sprintf("Client pipe to %" PRIu64 " connection error, Status# %s, Marker# PQ6",
+
+ ErrorReason = Sprintf("Client pipe to %" PRIu64 " connection error, Status# %s, Marker# PQ6",
tabletId, NKikimrProto::EReplyStatus_Name(msg->Status).data());
return SendReplyAndDie(CreateErrorReply(MSTATUS_ERROR, NPersQueue::NErrorCode::ERROR, ctx), ctx);
}
-
- // Update node resolution info for GetPartitionLocations request
+
+ // Update node resolution info for GetPartitionLocations request
if (RequestProto.HasMetaRequest() && (RequestProto.GetMetaRequest().HasCmdGetPartitionLocations()
|| RequestProto.GetMetaRequest().HasCmdGetReadSessionsInfo())) {
- auto it = TabletInfo.find(ev->Get()->TabletId);
- if (it != TabletInfo.end()) {
- ui32 nodeId = ev->Get()->ServerId.NodeId();
- it->second.NodeId = nodeId;
+ auto it = TabletInfo.find(ev->Get()->TabletId);
+ if (it != TabletInfo.end()) {
+ ui32 nodeId = ev->Get()->ServerId.NodeId();
+ it->second.NodeId = nodeId;
TabletsDiscovered.insert(tabletId);
- AnswerIfCanForMeta(ctx);
- }
- }
+ AnswerIfCanForMeta(ctx);
+ }
+ }
}
void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) {
diff --git a/ydb/core/client/server/msgbus_server_pq_metacache.cpp b/ydb/core/client/server/msgbus_server_pq_metacache.cpp
index 4bbbf94c508..f44ab6f480f 100644
--- a/ydb/core/client/server/msgbus_server_pq_metacache.cpp
+++ b/ydb/core/client/server/msgbus_server_pq_metacache.cpp
@@ -1,4 +1,4 @@
-#include "msgbus_server_persqueue.h"
+#include "msgbus_server_persqueue.h"
#include "msgbus_server_pq_metacache.h"
#include <ydb/public/api/protos/draft/persqueue_error_codes.pb.h>
@@ -9,13 +9,13 @@
#include <ydb/core/base/counters.h>
#include <ydb/core/base/appdata.h>
-
+
namespace NKikimr::NMsgBusProxy {
-
+
using namespace NYdb::NTable;
-
+
namespace NPqMetaCacheV2 {
-
+
IActor* CreateSchemeCache(NActors::TActorSystem* ActorSystem, TIntrusivePtr<NMonitoring::TDynamicCounters> counters) {
auto appData = ActorSystem->AppData<TAppData>();
auto cacheCounters = GetServiceCounters(counters, "pqproxy|schemecache");
@@ -45,8 +45,8 @@ public:
, VersionCheckInterval(versionCheckInterval)
, Generation(std::make_shared<TAtomicCounter>())
{
- }
-
+ }
+
void Bootstrap(const TActorContext& ctx) {
if (ClientWrapper == nullptr) {
auto* driver = AppData(ctx)->YdbDriver;
diff --git a/ydb/core/client/server/msgbus_server_pq_metacache.h b/ydb/core/client/server/msgbus_server_pq_metacache.h
index 7786db9178e..6c12b76b041 100644
--- a/ydb/core/client/server/msgbus_server_pq_metacache.h
+++ b/ydb/core/client/server/msgbus_server_pq_metacache.h
@@ -1,5 +1,5 @@
-#pragma once
-
+#pragma once
+
#include <library/cpp/actors/core/events.h>
#include <library/cpp/actors/core/event_local.h>
#include <ydb/core/tx/schemeshard/schemeshard.h>
@@ -8,10 +8,10 @@
#include <ydb/public/api/protos/draft/persqueue_error_codes.pb.h>
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
#include <util/generic/string.h>
-#include <util/generic/vector.h>
-
+#include <util/generic/vector.h>
+
namespace NKikimr::NMsgBusProxy {
-
+
static const ui32 PQ_METACACHE_TIMEOUT_SECONDS = 120;
static const ui32 PQ_METACACHE_REFRESH_INTERVAL_SECONDS = 10;
diff --git a/ydb/core/client/server/msgbus_server_proxy.cpp b/ydb/core/client/server/msgbus_server_proxy.cpp
index d7d5ae8f7d7..7f3146d21a7 100644
--- a/ydb/core/client/server/msgbus_server_proxy.cpp
+++ b/ydb/core/client/server/msgbus_server_proxy.cpp
@@ -28,16 +28,16 @@ class TMessageBusServerFlatDescribeRequest : public TMessageBusSecureRequest<TMe
void Handle(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr& ev, const TActorContext& ctx) {
auto &mutableRecord = *ev->Get()->MutableRecord();
TAutoPtr<ResponseType> response(new ResponseType());
- response->Record.SetSchemeStatus(mutableRecord.GetStatus());
- const auto status = mutableRecord.GetStatus();
+ response->Record.SetSchemeStatus(mutableRecord.GetStatus());
+ const auto status = mutableRecord.GetStatus();
if (status == NKikimrScheme::StatusSuccess) {
- response->Record.SetStatus(MSTATUS_OK);
- response->Record.SetPath(mutableRecord.GetPath());
+ response->Record.SetStatus(MSTATUS_OK);
+ response->Record.SetPath(mutableRecord.GetPath());
response->Record.MutablePathDescription()->Swap(mutableRecord.MutablePathDescription());
response->Record.SetStatusCode(NKikimrIssues::TStatusIds::SUCCESS);
- } else {
- response->Record.SetStatus(MSTATUS_ERROR);
- response->Record.SetErrorReason(mutableRecord.GetReason());
+ } else {
+ response->Record.SetStatus(MSTATUS_ERROR);
+ response->Record.SetErrorReason(mutableRecord.GetReason());
switch (status) {
case NKikimrScheme::StatusPathDoesNotExist:
@@ -49,7 +49,7 @@ class TMessageBusServerFlatDescribeRequest : public TMessageBusSecureRequest<TMe
IssueManager.RaiseIssue(MakeIssue(NKikimrIssues::TIssuesIds::DEFAULT_ERROR));
break;
}
- }
+ }
if (IssueManager.GetIssues())
IssuesToMessage(IssueManager.GetIssues(), response->Record.MutableIssues());
diff --git a/ydb/core/client/server/msgbus_server_proxy.h b/ydb/core/client/server/msgbus_server_proxy.h
index 622f82dcc15..cb9f9c19d38 100644
--- a/ydb/core/client/server/msgbus_server_proxy.h
+++ b/ydb/core/client/server/msgbus_server_proxy.h
@@ -59,8 +59,8 @@ private:
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::MSGBUS_PROXY_ACTOR;
- }
-
+ }
+
TMessageBusServerProxy(
TMessageBusServer* server,
std::shared_ptr<IPersQueueGetReadSessionsInfoWorkerFactory> pqReadSessionsInfoWorkerFactory
diff --git a/ydb/core/client/server/msgbus_server_request.cpp b/ydb/core/client/server/msgbus_server_request.cpp
index 4fed8d68839..9223bea9f1a 100644
--- a/ydb/core/client/server/msgbus_server_request.cpp
+++ b/ydb/core/client/server/msgbus_server_request.cpp
@@ -72,13 +72,13 @@ public:
Proposal.Reset(new TEvTxUserProxy::TEvProposeTransaction());
NKikimrTxUserProxy::TEvProposeTransaction &record = Proposal->Record;
- // Transaction protobuf structure might be very heavy (if it has a batch of parameters)
- // so we don't want to copy it, just move its contents
+ // Transaction protobuf structure might be very heavy (if it has a batch of parameters)
+ // so we don't want to copy it, just move its contents
record.MutableTransaction()->Swap(Request->Record.MutableTransaction());
if (Request->Record.HasProxyFlags())
record.SetProxyFlags(Request->Record.GetProxyFlags());
-
+
if (Request->Record.HasExecTimeoutPeriod())
record.SetExecTimeoutPeriod(Request->Record.GetExecTimeoutPeriod());
else {
@@ -114,7 +114,7 @@ public:
} else
if (mkqlTx.GetParams().HasProto()) {
try {
- TAlignedPagePoolCounters counters(AppData(ctx)->Counters, "params");
+ TAlignedPagePoolCounters counters(AppData(ctx)->Counters, "params");
NMiniKQL::TScopedAlloc alloc(counters, AppData(ctx)->FunctionRegistry->SupportsSizedAllocators());
NMiniKQL::TTypeEnvironment env(alloc);
NMiniKQL::TRuntimeNode node = NMiniKQL::ImportValueFromProto(mkqlTx.GetParams().GetProto(), env);
@@ -162,7 +162,7 @@ void TMessageBusServerRequest::ReplyWithResult(EResponseStatus status,
TAutoPtr<TBusResponse> response(ProposeTransactionStatusToResponse(status, result));
if (result.HasExecutionEngineEvaluatedResponse()) {
- response->Record.MutableExecutionEngineEvaluatedResponse()->Swap(result.MutableExecutionEngineEvaluatedResponse());
+ response->Record.MutableExecutionEngineEvaluatedResponse()->Swap(result.MutableExecutionEngineEvaluatedResponse());
}
if (result.HasSerializedReadTableResponse()) {
response->Record.SetSerializedReadTableResponse(result.GetSerializedReadTableResponse());
@@ -171,20 +171,20 @@ void TMessageBusServerRequest::ReplyWithResult(EResponseStatus status,
response->Record.SetProxyErrorCode(result.GetStatus());
}
- if (result.HasTxStats()) {
- response->Record.MutableTxStats()->Swap(result.MutableTxStats());
- }
-
+ if (result.HasTxStats()) {
+ response->Record.MutableTxStats()->Swap(result.MutableTxStats());
+ }
+
SendReplyAutoPtr(response);
-
+
FinishReply(ctx);
}
void TMessageBusServerRequest::FinishReply(const TActorContext &ctx)
{
- if (Proposal)
- AsyncDestroy(Proposal, ctx, AppData(ctx)->UserPoolId);
-
+ if (Proposal)
+ AsyncDestroy(Proposal, ctx, AppData(ctx)->UserPoolId);
+
Die(ctx);
}
@@ -309,7 +309,7 @@ void TMessageBusServerRequest::Handle(TEvTxUserProxy::TEvProposeTransactionStatu
ReplyWithResult(MSTATUS_ERROR, msg->Record, ctx);
return;
case TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyShardNotAvailable:
- if (!RetryResolve(ctx)) // TODO: retry if partitioning changed due to split/merge
+ if (!RetryResolve(ctx)) // TODO: retry if partitioning changed due to split/merge
ReplyWithResult(MSTATUS_REJECTED, msg->Record, ctx);
return;
case TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyShardUnknown:
diff --git a/ydb/core/client/server/msgbus_server_s3_listing.cpp b/ydb/core/client/server/msgbus_server_s3_listing.cpp
index 480ad45a79b..3626866322b 100644
--- a/ydb/core/client/server/msgbus_server_s3_listing.cpp
+++ b/ydb/core/client/server/msgbus_server_s3_listing.cpp
@@ -1,4 +1,4 @@
-#include "msgbus_server_request.h"
+#include "msgbus_server_request.h"
#include <ydb/public/lib/base/msgbus.h>
#include <ydb/core/tx/scheme_cache/scheme_cache.h>
#include <ydb/core/tx/datashard/datashard.h>
@@ -9,332 +9,332 @@
#include <ydb/core/grpc_services/rpc_calls.h>
#include <ydb/core/ydb_convert/ydb_convert.h>
#include <util/system/unaligned_mem.h>
-
-namespace NKikimr {
-namespace NMsgBusProxy {
-
-
+
+namespace NKikimr {
+namespace NMsgBusProxy {
+
+
template <NKikimrServices::TActivity::EType acitvityType>
-class TS3ListingRequestBase : public TActorBootstrapped<TS3ListingRequestBase<acitvityType>> {
-private:
- typedef TS3ListingRequestBase<acitvityType> TSelf;
- typedef TActorBootstrapped<TSelf> TBase;
-
- static constexpr ui32 DEFAULT_MAX_KEYS = 1001;
- static constexpr ui32 DEFAULT_TIMEOUT_SEC = 5*60;
-
- const NKikimrClient::TS3ListingRequest* Request;
- THolder<const NACLib::TUserToken> UserToken;
- ui32 MaxKeys;
+class TS3ListingRequestBase : public TActorBootstrapped<TS3ListingRequestBase<acitvityType>> {
+private:
+ typedef TS3ListingRequestBase<acitvityType> TSelf;
+ typedef TActorBootstrapped<TSelf> TBase;
+
+ static constexpr ui32 DEFAULT_MAX_KEYS = 1001;
+ static constexpr ui32 DEFAULT_TIMEOUT_SEC = 5*60;
+
+ const NKikimrClient::TS3ListingRequest* Request;
+ THolder<const NACLib::TUserToken> UserToken;
+ ui32 MaxKeys;
TActorId SchemeCache;
TActorId LeaderPipeCache;
- TDuration Timeout;
+ TDuration Timeout;
TActorId TimeoutTimerActorId;
- TAutoPtr<TKeyDesc> KeyRange;
- bool WaitingResolveReply;
- bool Finished;
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate> ResolveNamesResult;
+ TAutoPtr<TKeyDesc> KeyRange;
+ bool WaitingResolveReply;
+ bool Finished;
+ TAutoPtr<NSchemeCache::TSchemeCacheNavigate> ResolveNamesResult;
TVector<NScheme::TTypeId> KeyColumnTypes;
- TSysTables::TTableColumnInfo PathColumnInfo;
+ TSysTables::TTableColumnInfo PathColumnInfo;
TVector<TSysTables::TTableColumnInfo> CommonPrefixesColumns;
TVector<TSysTables::TTableColumnInfo> ContentsColumns;
- TSerializedCellVec PrefixColumns;
- TSerializedCellVec StartAfterSuffixColumns;
- TSerializedCellVec KeyRangeFrom;
- TSerializedCellVec KeyRangeTo;
- ui32 CurrentShardIdx;
+ TSerializedCellVec PrefixColumns;
+ TSerializedCellVec StartAfterSuffixColumns;
+ TSerializedCellVec KeyRangeFrom;
+ TSerializedCellVec KeyRangeTo;
+ ui32 CurrentShardIdx;
TVector<TSerializedCellVec> CommonPrefixesRows;
TVector<TSerializedCellVec> ContentsRows;
-
-public:
+
+public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return acitvityType;
- }
-
+ return acitvityType;
+ }
+
TS3ListingRequestBase(TActorId schemeCache, THolder<const NACLib::TUserToken>&& userToken)
- : Request(nullptr)
- , UserToken(std::move(userToken))
- , MaxKeys(DEFAULT_MAX_KEYS)
- , SchemeCache(schemeCache)
+ : Request(nullptr)
+ , UserToken(std::move(userToken))
+ , MaxKeys(DEFAULT_MAX_KEYS)
+ , SchemeCache(schemeCache)
, LeaderPipeCache(MakePipePeNodeCacheID(false))
- , Timeout(TDuration::Seconds(DEFAULT_TIMEOUT_SEC))
- , WaitingResolveReply(false)
- , Finished(false)
- , CurrentShardIdx(0)
- {
- }
-
- virtual const NKikimrClient::TS3ListingRequest* ExtractRequest(TString& errDescr) = 0;
-
- void Bootstrap(const NActors::TActorContext& ctx) {
- TString errDescr;
- Request = ExtractRequest(errDescr);
- if (!Request) {
- return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest, errDescr, ctx);
- }
-
- if (Request->GetMaxKeys() > 0 && Request->GetMaxKeys() <= DEFAULT_MAX_KEYS) {
- MaxKeys = Request->GetMaxKeys();
- }
-
- ui32 userTimeoutMillisec = Request->GetTimeout();
- if (userTimeoutMillisec > 0 && TDuration::MilliSeconds(userTimeoutMillisec) < Timeout) {
- Timeout = TDuration::MilliSeconds(userTimeoutMillisec);
- }
-
- ResolveTable(Request->GetTableName(), ctx);
- }
-
- void Die(const NActors::TActorContext& ctx) override {
- Y_VERIFY(Finished);
- Y_VERIFY(!WaitingResolveReply);
+ , Timeout(TDuration::Seconds(DEFAULT_TIMEOUT_SEC))
+ , WaitingResolveReply(false)
+ , Finished(false)
+ , CurrentShardIdx(0)
+ {
+ }
+
+ virtual const NKikimrClient::TS3ListingRequest* ExtractRequest(TString& errDescr) = 0;
+
+ void Bootstrap(const NActors::TActorContext& ctx) {
+ TString errDescr;
+ Request = ExtractRequest(errDescr);
+ if (!Request) {
+ return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest, errDescr, ctx);
+ }
+
+ if (Request->GetMaxKeys() > 0 && Request->GetMaxKeys() <= DEFAULT_MAX_KEYS) {
+ MaxKeys = Request->GetMaxKeys();
+ }
+
+ ui32 userTimeoutMillisec = Request->GetTimeout();
+ if (userTimeoutMillisec > 0 && TDuration::MilliSeconds(userTimeoutMillisec) < Timeout) {
+ Timeout = TDuration::MilliSeconds(userTimeoutMillisec);
+ }
+
+ ResolveTable(Request->GetTableName(), ctx);
+ }
+
+ void Die(const NActors::TActorContext& ctx) override {
+ Y_VERIFY(Finished);
+ Y_VERIFY(!WaitingResolveReply);
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(0));
- if (TimeoutTimerActorId) {
- ctx.Send(TimeoutTimerActorId, new TEvents::TEvPoisonPill());
- }
- TBase::Die(ctx);
- }
-
-protected:
- virtual void SendReplyMessage(NKikimrClient::TS3ListingResponse&& response) = 0;
-
-private:
- STFUNC(StateWaitResolveTable) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void ResolveTable(const TString& table, const NActors::TActorContext& ctx) {
- // TODO: check all params;
-
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate> request(new NSchemeCache::TSchemeCacheNavigate());
- NSchemeCache::TSchemeCacheNavigate::TEntry entry;
+ if (TimeoutTimerActorId) {
+ ctx.Send(TimeoutTimerActorId, new TEvents::TEvPoisonPill());
+ }
+ TBase::Die(ctx);
+ }
+
+protected:
+ virtual void SendReplyMessage(NKikimrClient::TS3ListingResponse&& response) = 0;
+
+private:
+ STFUNC(StateWaitResolveTable) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void ResolveTable(const TString& table, const NActors::TActorContext& ctx) {
+ // TODO: check all params;
+
+ TAutoPtr<NSchemeCache::TSchemeCacheNavigate> request(new NSchemeCache::TSchemeCacheNavigate());
+ NSchemeCache::TSchemeCacheNavigate::TEntry entry;
entry.Path = SplitPath(table);
- if (entry.Path.empty()) {
- return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::ResolveError, "Invalid table path specified", ctx);
- }
- entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
- request->ResultSet.emplace_back(entry);
- ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(request));
-
- TimeoutTimerActorId = CreateLongTimer(ctx, Timeout,
- new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
-
- TBase::Become(&TSelf::StateWaitResolveTable);
- WaitingResolveReply = true;
- }
-
- void ReplyWithError(EResponseStatus status, NTxProxy::TResultStatus::EStatus errorCode, const TString& message, const TActorContext& ctx) {
- NKikimrClient::TS3ListingResponse response;
- response.SetStatus(status);
- response.SetDescription(message);
- response.SetErrorCode(errorCode);
- SendReplyMessage(std::move(response));
- Finished = true;
-
- // We cannot Die() while scheme cache request is in flight because that request has pointer to
- // KeyRange member so we must not destroy it before we get the response
- if (!WaitingResolveReply) {
- Die(ctx);
- }
- }
-
- void HandleTimeout(const TActorContext& ctx) {
- return ReplyWithError(MSTATUS_TIMEOUT, NTxProxy::TResultStatus::EStatus::ExecTimeout, "Request timed out", ctx);
- }
-
- void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev, const TActorContext& ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- const NSchemeCache::TSchemeCacheNavigate& request = *ev->Get()->Request;
- Y_VERIFY(request.ResultSet.size() == 1);
- if (request.ResultSet.front().Status != NSchemeCache::TSchemeCacheNavigate::EStatus::Ok) {
- return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::ResolveError,
+ if (entry.Path.empty()) {
+ return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::ResolveError, "Invalid table path specified", ctx);
+ }
+ entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
+ request->ResultSet.emplace_back(entry);
+ ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(request));
+
+ TimeoutTimerActorId = CreateLongTimer(ctx, Timeout,
+ new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
+
+ TBase::Become(&TSelf::StateWaitResolveTable);
+ WaitingResolveReply = true;
+ }
+
+ void ReplyWithError(EResponseStatus status, NTxProxy::TResultStatus::EStatus errorCode, const TString& message, const TActorContext& ctx) {
+ NKikimrClient::TS3ListingResponse response;
+ response.SetStatus(status);
+ response.SetDescription(message);
+ response.SetErrorCode(errorCode);
+ SendReplyMessage(std::move(response));
+ Finished = true;
+
+ // We cannot Die() while scheme cache request is in flight because that request has pointer to
+ // KeyRange member so we must not destroy it before we get the response
+ if (!WaitingResolveReply) {
+ Die(ctx);
+ }
+ }
+
+ void HandleTimeout(const TActorContext& ctx) {
+ return ReplyWithError(MSTATUS_TIMEOUT, NTxProxy::TResultStatus::EStatus::ExecTimeout, "Request timed out", ctx);
+ }
+
+ void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev, const TActorContext& ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ const NSchemeCache::TSchemeCacheNavigate& request = *ev->Get()->Request;
+ Y_VERIFY(request.ResultSet.size() == 1);
+ if (request.ResultSet.front().Status != NSchemeCache::TSchemeCacheNavigate::EStatus::Ok) {
+ return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::ResolveError,
ToString(request.ResultSet.front().Status), ctx);
- }
- ResolveNamesResult = ev->Get()->Request;
-
- if (!BuildSchema(ctx)) {
- return;
- }
-
- if (!BuildKeyRange(ctx)) {
- return;
- }
-
- ResolveShards(ctx);
- }
-
- bool BuildSchema(const NActors::TActorContext& ctx) {
- Y_UNUSED(ctx);
-
- auto& entry = ResolveNamesResult->ResultSet.front();
-
+ }
+ ResolveNamesResult = ev->Get()->Request;
+
+ if (!BuildSchema(ctx)) {
+ return;
+ }
+
+ if (!BuildKeyRange(ctx)) {
+ return;
+ }
+
+ ResolveShards(ctx);
+ }
+
+ bool BuildSchema(const NActors::TActorContext& ctx) {
+ Y_UNUSED(ctx);
+
+ auto& entry = ResolveNamesResult->ResultSet.front();
+
TVector<ui32> keyColumnIds;
THashMap<TString, ui32> columnByName;
- for (const auto& ci : entry.Columns) {
- columnByName[ci.second.Name] = ci.second.Id;
- i32 keyOrder = ci.second.KeyOrder;
- if (keyOrder != -1) {
- Y_VERIFY(keyOrder >= 0);
- KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), keyOrder + 1));
- KeyColumnTypes[keyOrder] = ci.second.PType;
- keyColumnIds.resize(Max<size_t>(keyColumnIds.size(), keyOrder + 1));
- keyColumnIds[keyOrder] = ci.second.Id;
- }
- }
-
- TString errStr;
+ for (const auto& ci : entry.Columns) {
+ columnByName[ci.second.Name] = ci.second.Id;
+ i32 keyOrder = ci.second.KeyOrder;
+ if (keyOrder != -1) {
+ Y_VERIFY(keyOrder >= 0);
+ KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), keyOrder + 1));
+ KeyColumnTypes[keyOrder] = ci.second.PType;
+ keyColumnIds.resize(Max<size_t>(keyColumnIds.size(), keyOrder + 1));
+ keyColumnIds[keyOrder] = ci.second.Id;
+ }
+ }
+
+ TString errStr;
TVector<TCell> prefixCells;
- TConstArrayRef<NScheme::TTypeId> prefixTypes(KeyColumnTypes.data(), KeyColumnTypes.size() - 1); // -1 for path column
- NMiniKQL::CellsFromTuple(&Request->GetKeyPrefix().GetType(), Request->GetKeyPrefix().GetValue(),
- prefixTypes, true, prefixCells, errStr);
- if (!errStr.empty()) {
- ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest, "Invalid KeyPrefix: " + errStr, ctx);
- return false;
- }
-
- PrefixColumns.Parse(TSerializedCellVec::Serialize(prefixCells));
-
- // Check path column
- ui32 pathColPos = prefixCells.size();
- Y_VERIFY(pathColPos < KeyColumnTypes.size());
- PathColumnInfo = entry.Columns[keyColumnIds[pathColPos]];
- if (PathColumnInfo.PType != NScheme::NTypeIds::Utf8) {
- ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest,
- Sprintf("Value for path column '%s' has type %s, expected Utf8",
- PathColumnInfo.Name.data(), NScheme::TypeName(PathColumnInfo.PType)), ctx);
- return false;
- }
-
- CommonPrefixesColumns.push_back(PathColumnInfo);
-
- TVector<TCell> suffixCells;
- TConstArrayRef<NScheme::TTypeId> suffixTypes(KeyColumnTypes.data() + pathColPos, KeyColumnTypes.size() - pathColPos); // starts at path column
- NMiniKQL::CellsFromTuple(&Request->GetStartAfterKeySuffix().GetType(), Request->GetStartAfterKeySuffix().GetValue(),
- suffixTypes, true, suffixCells, errStr);
- if (!errStr.empty()) {
- ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest,
- "Invalid StartAfterKeySuffix: " + errStr, ctx);
- return false;
- }
-
- StartAfterSuffixColumns.Parse(TSerializedCellVec::Serialize(suffixCells));
-
- if (!StartAfterSuffixColumns.GetCells().empty()) {
- TString startAfterPath = TString(StartAfterSuffixColumns.GetCells()[0].Data(), StartAfterSuffixColumns.GetCells()[0].Size());
- if (!startAfterPath.StartsWith(Request->GetPathColumnPrefix())) {
- ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest,
- "Invalid StartAfterKeySuffix: StartAfter parameter doesn't match PathPrefix", ctx);
- return false;
- }
- }
-
- // Check ColumsToReturn
- TSet<TString> requestedColumns(Request->GetColumnsToReturn().begin(), Request->GetColumnsToReturn().end());
-
- // Always request all suffix columns starting from path column
- for (size_t i = pathColPos; i < keyColumnIds.size(); ++i) {
- ui32 colId = keyColumnIds[i];
- requestedColumns.erase(entry.Columns[colId].Name);
- ContentsColumns.push_back(entry.Columns[colId]);
- }
-
- for (const auto& name : requestedColumns) {
+ TConstArrayRef<NScheme::TTypeId> prefixTypes(KeyColumnTypes.data(), KeyColumnTypes.size() - 1); // -1 for path column
+ NMiniKQL::CellsFromTuple(&Request->GetKeyPrefix().GetType(), Request->GetKeyPrefix().GetValue(),
+ prefixTypes, true, prefixCells, errStr);
+ if (!errStr.empty()) {
+ ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest, "Invalid KeyPrefix: " + errStr, ctx);
+ return false;
+ }
+
+ PrefixColumns.Parse(TSerializedCellVec::Serialize(prefixCells));
+
+ // Check path column
+ ui32 pathColPos = prefixCells.size();
+ Y_VERIFY(pathColPos < KeyColumnTypes.size());
+ PathColumnInfo = entry.Columns[keyColumnIds[pathColPos]];
+ if (PathColumnInfo.PType != NScheme::NTypeIds::Utf8) {
+ ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest,
+ Sprintf("Value for path column '%s' has type %s, expected Utf8",
+ PathColumnInfo.Name.data(), NScheme::TypeName(PathColumnInfo.PType)), ctx);
+ return false;
+ }
+
+ CommonPrefixesColumns.push_back(PathColumnInfo);
+
+ TVector<TCell> suffixCells;
+ TConstArrayRef<NScheme::TTypeId> suffixTypes(KeyColumnTypes.data() + pathColPos, KeyColumnTypes.size() - pathColPos); // starts at path column
+ NMiniKQL::CellsFromTuple(&Request->GetStartAfterKeySuffix().GetType(), Request->GetStartAfterKeySuffix().GetValue(),
+ suffixTypes, true, suffixCells, errStr);
+ if (!errStr.empty()) {
+ ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest,
+ "Invalid StartAfterKeySuffix: " + errStr, ctx);
+ return false;
+ }
+
+ StartAfterSuffixColumns.Parse(TSerializedCellVec::Serialize(suffixCells));
+
+ if (!StartAfterSuffixColumns.GetCells().empty()) {
+ TString startAfterPath = TString(StartAfterSuffixColumns.GetCells()[0].Data(), StartAfterSuffixColumns.GetCells()[0].Size());
+ if (!startAfterPath.StartsWith(Request->GetPathColumnPrefix())) {
+ ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest,
+ "Invalid StartAfterKeySuffix: StartAfter parameter doesn't match PathPrefix", ctx);
+ return false;
+ }
+ }
+
+ // Check ColumsToReturn
+ TSet<TString> requestedColumns(Request->GetColumnsToReturn().begin(), Request->GetColumnsToReturn().end());
+
+ // Always request all suffix columns starting from path column
+ for (size_t i = pathColPos; i < keyColumnIds.size(); ++i) {
+ ui32 colId = keyColumnIds[i];
+ requestedColumns.erase(entry.Columns[colId].Name);
+ ContentsColumns.push_back(entry.Columns[colId]);
+ }
+
+ for (const auto& name : requestedColumns) {
if (!columnByName.contains(name)) {
- ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest,
- Sprintf("Unknown column '%s'", name.data()), ctx);
- return false;
- }
- ContentsColumns.push_back(entry.Columns[columnByName[name]]);
- }
-
- return true;
- }
-
- bool BuildKeyRange(const NActors::TActorContext& ctx) {
- Y_UNUSED(ctx);
-
+ ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::WrongRequest,
+ Sprintf("Unknown column '%s'", name.data()), ctx);
+ return false;
+ }
+ ContentsColumns.push_back(entry.Columns[columnByName[name]]);
+ }
+
+ return true;
+ }
+
+ bool BuildKeyRange(const NActors::TActorContext& ctx) {
+ Y_UNUSED(ctx);
+
TVector<TCell> fromValues(PrefixColumns.GetCells().begin(), PrefixColumns.GetCells().end());
TVector<TCell> toValues(PrefixColumns.GetCells().begin(), PrefixColumns.GetCells().end());
-
- TString pathPrefix = Request->GetPathColumnPrefix();
- TString endPathPrefix;
-
- if (pathPrefix.empty()) {
- fromValues.resize(KeyColumnTypes.size());
- } else {
- // TODO: check for valid UTF-8
-
- fromValues.push_back(TCell(pathPrefix.data(), pathPrefix.size()));
- fromValues.resize(KeyColumnTypes.size());
-
- endPathPrefix = pathPrefix;
- // pathPrefix must be a valid Utf8 string, so it cannot contain 0xff byte and its safe to add 1
- // to make end of range key
- endPathPrefix.back() = endPathPrefix.back() + 1;
- toValues.push_back(TCell(endPathPrefix.data(), endPathPrefix.size()));
- toValues.resize(KeyColumnTypes.size());
- }
-
- if (!StartAfterSuffixColumns.GetCells().empty()) {
- // TODO: check for valid UTF-8
- for (size_t i = 0; i < StartAfterSuffixColumns.GetCells().size(); ++i) {
- fromValues[PathColumnInfo.KeyOrder + i] = StartAfterSuffixColumns.GetCells()[i];
- }
- }
-
- KeyRangeFrom.Parse(TSerializedCellVec::Serialize(fromValues));
- KeyRangeTo.Parse(TSerializedCellVec::Serialize(toValues));
-
- TTableRange range(KeyRangeFrom.GetCells(), true,
- KeyRangeTo.GetCells(), false,
- false);
-
+
+ TString pathPrefix = Request->GetPathColumnPrefix();
+ TString endPathPrefix;
+
+ if (pathPrefix.empty()) {
+ fromValues.resize(KeyColumnTypes.size());
+ } else {
+ // TODO: check for valid UTF-8
+
+ fromValues.push_back(TCell(pathPrefix.data(), pathPrefix.size()));
+ fromValues.resize(KeyColumnTypes.size());
+
+ endPathPrefix = pathPrefix;
+ // pathPrefix must be a valid Utf8 string, so it cannot contain 0xff byte and its safe to add 1
+ // to make end of range key
+ endPathPrefix.back() = endPathPrefix.back() + 1;
+ toValues.push_back(TCell(endPathPrefix.data(), endPathPrefix.size()));
+ toValues.resize(KeyColumnTypes.size());
+ }
+
+ if (!StartAfterSuffixColumns.GetCells().empty()) {
+ // TODO: check for valid UTF-8
+ for (size_t i = 0; i < StartAfterSuffixColumns.GetCells().size(); ++i) {
+ fromValues[PathColumnInfo.KeyOrder + i] = StartAfterSuffixColumns.GetCells()[i];
+ }
+ }
+
+ KeyRangeFrom.Parse(TSerializedCellVec::Serialize(fromValues));
+ KeyRangeTo.Parse(TSerializedCellVec::Serialize(toValues));
+
+ TTableRange range(KeyRangeFrom.GetCells(), true,
+ KeyRangeTo.GetCells(), false,
+ false);
+
TVector<TKeyDesc::TColumnOp> columns;
- for (const auto& ci : ContentsColumns) {
+ for (const auto& ci : ContentsColumns) {
TKeyDesc::TColumnOp op = { ci.Id, TKeyDesc::EColumnOperation::Read, ci.PType, 0, 0 };
- columns.push_back(op);
- }
-
- auto& entry = ResolveNamesResult->ResultSet.front();
-
- KeyRange.Reset(new TKeyDesc(entry.TableId, range, TKeyDesc::ERowOperation::Read, KeyColumnTypes, columns));
- return true;
- }
-
- void ResolveShards(const NActors::TActorContext& ctx) {
- TAutoPtr<NSchemeCache::TSchemeCacheRequest> request(new NSchemeCache::TSchemeCacheRequest());
-
+ columns.push_back(op);
+ }
+
+ auto& entry = ResolveNamesResult->ResultSet.front();
+
+ KeyRange.Reset(new TKeyDesc(entry.TableId, range, TKeyDesc::ERowOperation::Read, KeyColumnTypes, columns));
+ return true;
+ }
+
+ void ResolveShards(const NActors::TActorContext& ctx) {
+ TAutoPtr<NSchemeCache::TSchemeCacheRequest> request(new NSchemeCache::TSchemeCacheRequest());
+
request->ResultSet.emplace_back(std::move(KeyRange));
-
- TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> resolveReq(new TEvTxProxySchemeCache::TEvResolveKeySet(request));
- ctx.Send(SchemeCache, resolveReq.Release());
-
- TBase::Become(&TSelf::StateWaitResolveShards);
- WaitingResolveReply = true;
- }
-
- STFUNC(StateWaitResolveShards) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTxProxySchemeCache::TEvResolveKeySetResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- bool CheckAccess(TString& errorMessage) {
- const ui32 access = NACLib::EAccessRights::SelectRow;
+
+ TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> resolveReq(new TEvTxProxySchemeCache::TEvResolveKeySet(request));
+ ctx.Send(SchemeCache, resolveReq.Release());
+
+ TBase::Become(&TSelf::StateWaitResolveShards);
+ WaitingResolveReply = true;
+ }
+
+ STFUNC(StateWaitResolveShards) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTxProxySchemeCache::TEvResolveKeySetResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ bool CheckAccess(TString& errorMessage) {
+ const ui32 access = NACLib::EAccessRights::SelectRow;
if (access != 0
&& UserToken != nullptr
&& KeyRange->Status == TKeyDesc::EStatus::Ok
@@ -345,418 +345,418 @@ private:
explanation << "Access denied for " << UserToken->GetUserSID()
<< " with access " << NACLib::AccessRightsToString(access)
<< " to table [" << Request->GetTableName() << "]";
-
+
errorMessage = explanation.Str();
return false;
- }
- return true;
- }
-
- void Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, const TActorContext &ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- TEvTxProxySchemeCache::TEvResolveKeySetResult *msg = ev->Get();
+ }
+ return true;
+ }
+
+ void Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, const TActorContext &ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ TEvTxProxySchemeCache::TEvResolveKeySetResult *msg = ev->Get();
Y_VERIFY(msg->Request->ResultSet.size() == 1);
KeyRange = std::move(msg->Request->ResultSet[0].KeyDescription);
-
+
if (msg->Request->ErrorCount > 0) {
- return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::ResolveError,
- Sprintf("Unknown table '%s'", Request->GetTableName().data()), ctx);
- }
-
- TString accessCheckError;
- if (!CheckAccess(accessCheckError)) {
- return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::AccessDenied, accessCheckError, ctx);
- }
-
+ return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::ResolveError,
+ Sprintf("Unknown table '%s'", Request->GetTableName().data()), ctx);
+ }
+
+ TString accessCheckError;
+ if (!CheckAccess(accessCheckError)) {
+ return ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::AccessDenied, accessCheckError, ctx);
+ }
+
auto getShardsString = [] (const TVector<TKeyDesc::TPartitionInfo>& partitions) {
TVector<ui64> shards;
shards.reserve(partitions.size());
for (auto& partition : partitions) {
shards.push_back(partition.ShardId);
}
-
+
return JoinVectorIntoString(shards, ", ");
};
LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Range shards: " << getShardsString(KeyRange->Partitions));
if (KeyRange->Partitions.size() > 0) {
- CurrentShardIdx = 0;
- MakeShardRequest(CurrentShardIdx, ctx);
- } else {
- ReplySuccess(ctx);
- }
- }
-
- void MakeShardRequest(ui32 idx, const NActors::TActorContext& ctx) {
+ CurrentShardIdx = 0;
+ MakeShardRequest(CurrentShardIdx, ctx);
+ } else {
+ ReplySuccess(ctx);
+ }
+ }
+
+ void MakeShardRequest(ui32 idx, const NActors::TActorContext& ctx) {
ui64 shardId = KeyRange->Partitions[idx].ShardId;
-
- THolder<TEvDataShard::TEvS3ListingRequest> ev(new TEvDataShard::TEvS3ListingRequest());
+
+ THolder<TEvDataShard::TEvS3ListingRequest> ev(new TEvDataShard::TEvS3ListingRequest());
ev->Record.SetTableId(KeyRange->TableId.PathId.LocalPathId);
- ev->Record.SetSerializedKeyPrefix(PrefixColumns.GetBuffer());
- ev->Record.SetPathColumnPrefix(Request->GetPathColumnPrefix());
- ev->Record.SetPathColumnDelimiter(Request->GetPathColumnDelimiter());
- ev->Record.SetSerializedStartAfterKeySuffix(StartAfterSuffixColumns.GetBuffer());
- ev->Record.SetMaxKeys(MaxKeys - ContentsRows.size() - CommonPrefixesRows.size());
- if (!CommonPrefixesRows.empty()) {
- // Next shard might have the same common prefix, need to skip it
- ev->Record.SetLastCommonPrefix(CommonPrefixesRows.back().GetBuffer());
- }
-
- for (const auto& ci : ContentsColumns) {
- ev->Record.AddColumnsToReturn(ci.Id);
- }
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Sending request to shards " << shardId);
-
+ ev->Record.SetSerializedKeyPrefix(PrefixColumns.GetBuffer());
+ ev->Record.SetPathColumnPrefix(Request->GetPathColumnPrefix());
+ ev->Record.SetPathColumnDelimiter(Request->GetPathColumnDelimiter());
+ ev->Record.SetSerializedStartAfterKeySuffix(StartAfterSuffixColumns.GetBuffer());
+ ev->Record.SetMaxKeys(MaxKeys - ContentsRows.size() - CommonPrefixesRows.size());
+ if (!CommonPrefixesRows.empty()) {
+ // Next shard might have the same common prefix, need to skip it
+ ev->Record.SetLastCommonPrefix(CommonPrefixesRows.back().GetBuffer());
+ }
+
+ for (const auto& ci : ContentsColumns) {
+ ev->Record.AddColumnsToReturn(ci.Id);
+ }
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Sending request to shards " << shardId);
+
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvForward(ev.Release(), shardId, true), IEventHandle::FlagTrackDelivery);
-
- TBase::Become(&TSelf::StateWaitResults);
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr &ev, const TActorContext &ctx) {
- Y_UNUSED(ev);
- ReplyWithError(MSTATUS_INTERNALERROR, NTxProxy::TResultStatus::EStatus::Unknown,
- "Internal error: pipe cache is not available, the cluster might not be configured properly", ctx);
- }
-
- void Handle(TEvPipeCache::TEvDeliveryProblem::TPtr &ev, const TActorContext &ctx) {
- Y_UNUSED(ev);
- // Invalidate scheme cache in case of partitioning change
+
+ TBase::Become(&TSelf::StateWaitResults);
+ }
+
+ void Handle(TEvents::TEvUndelivered::TPtr &ev, const TActorContext &ctx) {
+ Y_UNUSED(ev);
+ ReplyWithError(MSTATUS_INTERNALERROR, NTxProxy::TResultStatus::EStatus::Unknown,
+ "Internal error: pipe cache is not available, the cluster might not be configured properly", ctx);
+ }
+
+ void Handle(TEvPipeCache::TEvDeliveryProblem::TPtr &ev, const TActorContext &ctx) {
+ Y_UNUSED(ev);
+ // Invalidate scheme cache in case of partitioning change
ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvInvalidateTable(KeyRange->TableId, TActorId()));
- ReplyWithError(MSTATUS_NOTREADY, NTxProxy::TResultStatus::EStatus::ProxyShardNotAvailable, "Failed to connect to shard", ctx);
- }
-
- STFUNC(StateWaitResults) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvDataShard::TEvS3ListingResponse, Handle);
- HFunc(TEvents::TEvUndelivered, Handle);
- HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void Handle(TEvDataShard::TEvS3ListingResponse::TPtr& ev, const NActors::TActorContext& ctx) {
- const auto& shardResponse = ev->Get()->Record;
-
- // Notify the cache that we are done with the pipe
+ ReplyWithError(MSTATUS_NOTREADY, NTxProxy::TResultStatus::EStatus::ProxyShardNotAvailable, "Failed to connect to shard", ctx);
+ }
+
+ STFUNC(StateWaitResults) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvDataShard::TEvS3ListingResponse, Handle);
+ HFunc(TEvents::TEvUndelivered, Handle);
+ HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void Handle(TEvDataShard::TEvS3ListingResponse::TPtr& ev, const NActors::TActorContext& ctx) {
+ const auto& shardResponse = ev->Get()->Record;
+
+ // Notify the cache that we are done with the pipe
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(shardResponse.GetTabletID()));
-
- if (shardResponse.GetStatus() == NKikimrTxDataShard::TError::WRONG_SHARD_STATE) {
- // Invalidate scheme cache in case of partitioning change
+
+ if (shardResponse.GetStatus() == NKikimrTxDataShard::TError::WRONG_SHARD_STATE) {
+ // Invalidate scheme cache in case of partitioning change
ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvInvalidateTable(KeyRange->TableId, TActorId()));
- ReplyWithError(MSTATUS_NOTREADY, NTxProxy::TResultStatus::EStatus::ProxyShardNotAvailable, shardResponse.GetErrorDescription(), ctx);
- return;
- }
-
- if (shardResponse.GetStatus() != NKikimrTxDataShard::TError::OK) {
- ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::ExecError, shardResponse.GetErrorDescription(), ctx);
- return;
- }
-
- for (size_t i = 0; i < shardResponse.CommonPrefixesRowsSize(); ++i) {
- if (!CommonPrefixesRows.empty() && CommonPrefixesRows.back().GetBuffer() == shardResponse.GetCommonPrefixesRows(i)) {
- LOG_ERROR_S(ctx, NKikimrServices::MSGBUS_REQUEST, "S3 listing got duplicate common prefix from shard " << shardResponse.GetTabletID());
- }
- CommonPrefixesRows.emplace_back(shardResponse.GetCommonPrefixesRows(i));
- }
-
- for (size_t i = 0; i < shardResponse.ContentsRowsSize(); ++i) {
- ContentsRows.emplace_back(shardResponse.GetContentsRows(i));
- }
-
+ ReplyWithError(MSTATUS_NOTREADY, NTxProxy::TResultStatus::EStatus::ProxyShardNotAvailable, shardResponse.GetErrorDescription(), ctx);
+ return;
+ }
+
+ if (shardResponse.GetStatus() != NKikimrTxDataShard::TError::OK) {
+ ReplyWithError(MSTATUS_ERROR, NTxProxy::TResultStatus::EStatus::ExecError, shardResponse.GetErrorDescription(), ctx);
+ return;
+ }
+
+ for (size_t i = 0; i < shardResponse.CommonPrefixesRowsSize(); ++i) {
+ if (!CommonPrefixesRows.empty() && CommonPrefixesRows.back().GetBuffer() == shardResponse.GetCommonPrefixesRows(i)) {
+ LOG_ERROR_S(ctx, NKikimrServices::MSGBUS_REQUEST, "S3 listing got duplicate common prefix from shard " << shardResponse.GetTabletID());
+ }
+ CommonPrefixesRows.emplace_back(shardResponse.GetCommonPrefixesRows(i));
+ }
+
+ for (size_t i = 0; i < shardResponse.ContentsRowsSize(); ++i) {
+ ContentsRows.emplace_back(shardResponse.GetContentsRows(i));
+ }
+
if (CurrentShardIdx+1 < KeyRange->Partitions.size() &&
- MaxKeys > ContentsRows.size() + CommonPrefixesRows.size() &&
- shardResponse.GetMoreRows())
- {
- ++CurrentShardIdx;
- MakeShardRequest(CurrentShardIdx, ctx);
- } else {
- ReplySuccess(ctx);
- }
- }
-
- void AddResultColumn(NKikimrMiniKQL::TStructType& row, const TSysTables::TTableColumnInfo& colInfo) const {
- auto* col = row.AddMember();
- col->SetName(colInfo.Name);
- col->MutableType()->SetKind(NKikimrMiniKQL::Optional);
- auto* item = col->MutableType()->MutableOptional()->MutableItem();
- item->SetKind(NKikimrMiniKQL::Data);
- item->MutableData()->SetScheme(colInfo.PType);
- }
-
- void BuildResultType(NKikimrMiniKQL::TType& type) const {
- // CommonPrefixes: list struct { Path : String }
- type.SetKind(NKikimrMiniKQL::Struct);
- auto* st = type.MutableStruct();
- {
- auto* prefixes = st->AddMember();
- prefixes->SetName("CommonPrefixes");
- auto* rowList = prefixes->MutableType();
- rowList->SetKind(NKikimrMiniKQL::List);
- auto* row = rowList->MutableList()->MutableItem();
- row->SetKind(NKikimrMiniKQL::Struct);
- auto* rowSt = row->MutableStruct();
- for (const auto& ci : CommonPrefixesColumns) {
- AddResultColumn(*rowSt, ci);
- }
- }
-
- // Contents: list of struct { Path : String Col1 : Type1 ... }
- {
- auto* contents = st->AddMember();
- contents->SetName("Contents");
- auto* rowList = contents->MutableType();
- rowList->SetKind(NKikimrMiniKQL::List);
- auto* row = rowList->MutableList()->MutableItem();
- row->SetKind(NKikimrMiniKQL::Struct);
- auto* rowSt = row->MutableStruct();
-
- for (const auto& ci : ContentsColumns) {
- AddResultColumn(*rowSt, ci);
- }
- }
- }
-
+ MaxKeys > ContentsRows.size() + CommonPrefixesRows.size() &&
+ shardResponse.GetMoreRows())
+ {
+ ++CurrentShardIdx;
+ MakeShardRequest(CurrentShardIdx, ctx);
+ } else {
+ ReplySuccess(ctx);
+ }
+ }
+
+ void AddResultColumn(NKikimrMiniKQL::TStructType& row, const TSysTables::TTableColumnInfo& colInfo) const {
+ auto* col = row.AddMember();
+ col->SetName(colInfo.Name);
+ col->MutableType()->SetKind(NKikimrMiniKQL::Optional);
+ auto* item = col->MutableType()->MutableOptional()->MutableItem();
+ item->SetKind(NKikimrMiniKQL::Data);
+ item->MutableData()->SetScheme(colInfo.PType);
+ }
+
+ void BuildResultType(NKikimrMiniKQL::TType& type) const {
+ // CommonPrefixes: list struct { Path : String }
+ type.SetKind(NKikimrMiniKQL::Struct);
+ auto* st = type.MutableStruct();
+ {
+ auto* prefixes = st->AddMember();
+ prefixes->SetName("CommonPrefixes");
+ auto* rowList = prefixes->MutableType();
+ rowList->SetKind(NKikimrMiniKQL::List);
+ auto* row = rowList->MutableList()->MutableItem();
+ row->SetKind(NKikimrMiniKQL::Struct);
+ auto* rowSt = row->MutableStruct();
+ for (const auto& ci : CommonPrefixesColumns) {
+ AddResultColumn(*rowSt, ci);
+ }
+ }
+
+ // Contents: list of struct { Path : String Col1 : Type1 ... }
+ {
+ auto* contents = st->AddMember();
+ contents->SetName("Contents");
+ auto* rowList = contents->MutableType();
+ rowList->SetKind(NKikimrMiniKQL::List);
+ auto* row = rowList->MutableList()->MutableItem();
+ row->SetKind(NKikimrMiniKQL::Struct);
+ auto* rowSt = row->MutableStruct();
+
+ for (const auto& ci : ContentsColumns) {
+ AddResultColumn(*rowSt, ci);
+ }
+ }
+ }
+
void AddResultRow(NKikimrMiniKQL::TValue& listOfRows, const TVector<TSysTables::TTableColumnInfo>& rowScheme, const TSerializedCellVec& cells) const {
- Y_VERIFY(rowScheme.size() >= cells.GetCells().size());
- TString errStr;
-
- auto& mkqlRow = *listOfRows.AddList();
- for (ui32 i = 0; i < cells.GetCells().size(); ++i) {
- const TCell& c = cells.GetCells()[i];
- auto* val = mkqlRow.AddStruct();
-
- bool ok = NMiniKQL::CellToValue(rowScheme[i].PType, c, *val, errStr);
+ Y_VERIFY(rowScheme.size() >= cells.GetCells().size());
+ TString errStr;
+
+ auto& mkqlRow = *listOfRows.AddList();
+ for (ui32 i = 0; i < cells.GetCells().size(); ++i) {
+ const TCell& c = cells.GetCells()[i];
+ auto* val = mkqlRow.AddStruct();
+
+ bool ok = NMiniKQL::CellToValue(rowScheme[i].PType, c, *val, errStr);
Y_VERIFY(ok, "Failed to build result position %" PRIu32 " error: %s", i, errStr.data());
- }
- }
-
- void ReplySuccess(const NActors::TActorContext& ctx) {
- NKikimrClient::TS3ListingResponse resp;
- resp.SetStatus(MSTATUS_OK);
- resp.SetKeySuffixSize(KeyColumnTypes.size() - PathColumnInfo.KeyOrder);
-
- NKikimrMiniKQL::TResult& result = *resp.MutableResult();
-
- BuildResultType(*result.MutableType());
-
- auto* prefixes = result.MutableValue()->AddStruct();
- for (size_t i = 0; i < CommonPrefixesRows.size(); ++i) {
- AddResultRow(*prefixes, CommonPrefixesColumns, CommonPrefixesRows[i]);
- }
-
- auto* contents = result.MutableValue()->AddStruct();
- for (size_t i = 0; i < ContentsRows.size(); ++i) {
- AddResultRow(*contents, ContentsColumns, ContentsRows[i]);
- }
-
- SendReplyMessage(std::move(resp));
- Finished = true;
- Die(ctx);
- }
-};
-
-
-//////////////////////////////////////////////////////
-// MsgBus and old GRPC API implementation
-
+ }
+ }
+
+ void ReplySuccess(const NActors::TActorContext& ctx) {
+ NKikimrClient::TS3ListingResponse resp;
+ resp.SetStatus(MSTATUS_OK);
+ resp.SetKeySuffixSize(KeyColumnTypes.size() - PathColumnInfo.KeyOrder);
+
+ NKikimrMiniKQL::TResult& result = *resp.MutableResult();
+
+ BuildResultType(*result.MutableType());
+
+ auto* prefixes = result.MutableValue()->AddStruct();
+ for (size_t i = 0; i < CommonPrefixesRows.size(); ++i) {
+ AddResultRow(*prefixes, CommonPrefixesColumns, CommonPrefixesRows[i]);
+ }
+
+ auto* contents = result.MutableValue()->AddStruct();
+ for (size_t i = 0; i < ContentsRows.size(); ++i) {
+ AddResultRow(*contents, ContentsColumns, ContentsRows[i]);
+ }
+
+ SendReplyMessage(std::move(resp));
+ Finished = true;
+ Die(ctx);
+ }
+};
+
+
+//////////////////////////////////////////////////////
+// MsgBus and old GRPC API implementation
+
class TS3ListingRequestMsgbus : public TMessageBusSessionIdentHolder, public TS3ListingRequestBase<NKikimrServices::TActivity::MSGBUS_COMMON> {
-private:
- TAutoPtr<TBusS3ListingRequest> RequestHolder;
-
-public:
+private:
+ TAutoPtr<TBusS3ListingRequest> RequestHolder;
+
+public:
TS3ListingRequestMsgbus(NMsgBusProxy::TBusMessageContext& msgCtx, TActorId schemeCache)
- : TMessageBusSessionIdentHolder(msgCtx)
- , TS3ListingRequestBase(schemeCache, nullptr)
- , RequestHolder(static_cast<TBusS3ListingRequest*>(msgCtx.ReleaseMessage()))
- {}
-
- const NKikimrClient::TS3ListingRequest* ExtractRequest(TString& errDescr) override {
- errDescr.clear();
- return &RequestHolder->Record;
- }
-
-protected:
- void SendReplyMessage(NKikimrClient::TS3ListingResponse&& response) override {
- TAutoPtr<TBusS3ListingResponse> responseMsg(new TBusS3ListingResponse());
- responseMsg->Record = std::move(response);
- TMessageBusSessionIdentHolder::SendReplyAutoPtr(responseMsg);
- }
-};
-
-IActor* CreateMessageBusS3ListingRequest(TBusMessageContext& msg) {
+ : TMessageBusSessionIdentHolder(msgCtx)
+ , TS3ListingRequestBase(schemeCache, nullptr)
+ , RequestHolder(static_cast<TBusS3ListingRequest*>(msgCtx.ReleaseMessage()))
+ {}
+
+ const NKikimrClient::TS3ListingRequest* ExtractRequest(TString& errDescr) override {
+ errDescr.clear();
+ return &RequestHolder->Record;
+ }
+
+protected:
+ void SendReplyMessage(NKikimrClient::TS3ListingResponse&& response) override {
+ TAutoPtr<TBusS3ListingResponse> responseMsg(new TBusS3ListingResponse());
+ responseMsg->Record = std::move(response);
+ TMessageBusSessionIdentHolder::SendReplyAutoPtr(responseMsg);
+ }
+};
+
+IActor* CreateMessageBusS3ListingRequest(TBusMessageContext& msg) {
TActorId schemeCache = MakeSchemeCacheID();
- return new TS3ListingRequestMsgbus(msg, schemeCache);
-}
-
-} // namespace NMsgBusProxy
-
-
-//////////////////////////////////////////////////////
-// new GRPC API implementation
-
-namespace NGRpcService {
-
-using namespace NActors;
-using namespace Ydb;
-
-class TMessageConverter {
-protected:
- static Ydb::S3Internal::S3ListingResult ConvertResult(NKikimrClient::TS3ListingResponse& msgbusResponse) {
- Ydb::S3Internal::S3ListingResult grpcResult;
- Y_VERIFY(msgbusResponse.GetStatus() == NMsgBusProxy::MSTATUS_OK);
- //Cerr << msgbusResponse << Endl;
-
- Y_VERIFY(msgbusResponse.GetResult().GetType().GetStruct().GetMember(0).GetName() == "CommonPrefixes");
- ConvertMiniKQLRowsToResultSet(
- msgbusResponse.GetResult().GetType().GetStruct().GetMember(0).GetType(),
- msgbusResponse.GetResult().GetValue().GetStruct(0),
- *grpcResult.Mutablecommon_prefixes());
-
- Y_VERIFY(msgbusResponse.GetResult().GetType().GetStruct().GetMember(1).GetName() == "Contents");
- ConvertMiniKQLRowsToResultSet(
- msgbusResponse.GetResult().GetType().GetStruct().GetMember(1).GetType(),
- msgbusResponse.GetResult().GetValue().GetStruct(1),
- *grpcResult.Mutablecontents());
-
- grpcResult.Setkey_suffix_size(msgbusResponse.GetKeySuffixSize());
-
- return grpcResult;
- }
-
- static Ydb::StatusIds::StatusCode ConvertMsgBusProxyStatusToYdb(ui32 msgBusStatus, ui32 errorCode) {
- switch (msgBusStatus) {
- case NMsgBusProxy::MSTATUS_OK:
- return Ydb::StatusIds::SUCCESS;
- case NMsgBusProxy::MSTATUS_TIMEOUT:
- return Ydb::StatusIds::TIMEOUT;
- case NMsgBusProxy::MSTATUS_INTERNALERROR:
- return Ydb::StatusIds::INTERNAL_ERROR;
- case NMsgBusProxy::MSTATUS_NOTREADY:
- return Ydb::StatusIds::UNAVAILABLE;
-
- case NMsgBusProxy::MSTATUS_ERROR: {
- switch (errorCode) {
- case NTxProxy::TResultStatus::EStatus::ResolveError:
- return Ydb::StatusIds::SCHEME_ERROR;
- case NTxProxy::TResultStatus::EStatus::ProxyShardNotAvailable:
- return Ydb::StatusIds::UNAVAILABLE;
- case NTxProxy::TResultStatus::EStatus::WrongRequest:
- return Ydb::StatusIds::BAD_REQUEST;
- case NTxProxy::TResultStatus::AccessDenied:
- return Ydb::StatusIds::UNAUTHORIZED;
- default:
- return Ydb::StatusIds::GENERIC_ERROR;
- }
- }
-
- default:
- return Ydb::StatusIds::GENERIC_ERROR;
- }
- }
-
- static NKikimrClient::TS3ListingRequest ConvertRequest(const Ydb::S3Internal::S3ListingRequest* proto) {
- NKikimrClient::TS3ListingRequest msgbusRequest;
- msgbusRequest.SetTableName(proto->Gettable_name());
- if (proto->Haskey_prefix()) {
- ValueToParams(proto->Getkey_prefix(), *msgbusRequest.MutableKeyPrefix());
- }
- msgbusRequest.SetPathColumnPrefix(proto->Getpath_column_prefix());
- msgbusRequest.SetPathColumnDelimiter(proto->Getpath_column_delimiter());
- if (proto->Hasstart_after_key_suffix()) {
- ValueToParams(proto->Getstart_after_key_suffix(), *msgbusRequest.MutableStartAfterKeySuffix());
- }
- msgbusRequest.SetMaxKeys(proto->Getmax_keys());
- msgbusRequest.MutableColumnsToReturn()->CopyFrom(proto->Getcolumns_to_return());
-
- // TODO: operaiton params
- return msgbusRequest;
- }
-
- static void ValueToParams(const Ydb::TypedValue& tv, NKikimrMiniKQL::TParams& params) {
- ConvertYdbTypeToMiniKQLType(tv.Gettype(), *params.MutableType());
- ConvertYdbValueToMiniKQLValue(tv.Gettype(), tv.Getvalue(), *params.MutableValue());
- }
-
+ return new TS3ListingRequestMsgbus(msg, schemeCache);
+}
+
+} // namespace NMsgBusProxy
+
+
+//////////////////////////////////////////////////////
+// new GRPC API implementation
+
+namespace NGRpcService {
+
+using namespace NActors;
+using namespace Ydb;
+
+class TMessageConverter {
+protected:
+ static Ydb::S3Internal::S3ListingResult ConvertResult(NKikimrClient::TS3ListingResponse& msgbusResponse) {
+ Ydb::S3Internal::S3ListingResult grpcResult;
+ Y_VERIFY(msgbusResponse.GetStatus() == NMsgBusProxy::MSTATUS_OK);
+ //Cerr << msgbusResponse << Endl;
+
+ Y_VERIFY(msgbusResponse.GetResult().GetType().GetStruct().GetMember(0).GetName() == "CommonPrefixes");
+ ConvertMiniKQLRowsToResultSet(
+ msgbusResponse.GetResult().GetType().GetStruct().GetMember(0).GetType(),
+ msgbusResponse.GetResult().GetValue().GetStruct(0),
+ *grpcResult.Mutablecommon_prefixes());
+
+ Y_VERIFY(msgbusResponse.GetResult().GetType().GetStruct().GetMember(1).GetName() == "Contents");
+ ConvertMiniKQLRowsToResultSet(
+ msgbusResponse.GetResult().GetType().GetStruct().GetMember(1).GetType(),
+ msgbusResponse.GetResult().GetValue().GetStruct(1),
+ *grpcResult.Mutablecontents());
+
+ grpcResult.Setkey_suffix_size(msgbusResponse.GetKeySuffixSize());
+
+ return grpcResult;
+ }
+
+ static Ydb::StatusIds::StatusCode ConvertMsgBusProxyStatusToYdb(ui32 msgBusStatus, ui32 errorCode) {
+ switch (msgBusStatus) {
+ case NMsgBusProxy::MSTATUS_OK:
+ return Ydb::StatusIds::SUCCESS;
+ case NMsgBusProxy::MSTATUS_TIMEOUT:
+ return Ydb::StatusIds::TIMEOUT;
+ case NMsgBusProxy::MSTATUS_INTERNALERROR:
+ return Ydb::StatusIds::INTERNAL_ERROR;
+ case NMsgBusProxy::MSTATUS_NOTREADY:
+ return Ydb::StatusIds::UNAVAILABLE;
+
+ case NMsgBusProxy::MSTATUS_ERROR: {
+ switch (errorCode) {
+ case NTxProxy::TResultStatus::EStatus::ResolveError:
+ return Ydb::StatusIds::SCHEME_ERROR;
+ case NTxProxy::TResultStatus::EStatus::ProxyShardNotAvailable:
+ return Ydb::StatusIds::UNAVAILABLE;
+ case NTxProxy::TResultStatus::EStatus::WrongRequest:
+ return Ydb::StatusIds::BAD_REQUEST;
+ case NTxProxy::TResultStatus::AccessDenied:
+ return Ydb::StatusIds::UNAUTHORIZED;
+ default:
+ return Ydb::StatusIds::GENERIC_ERROR;
+ }
+ }
+
+ default:
+ return Ydb::StatusIds::GENERIC_ERROR;
+ }
+ }
+
+ static NKikimrClient::TS3ListingRequest ConvertRequest(const Ydb::S3Internal::S3ListingRequest* proto) {
+ NKikimrClient::TS3ListingRequest msgbusRequest;
+ msgbusRequest.SetTableName(proto->Gettable_name());
+ if (proto->Haskey_prefix()) {
+ ValueToParams(proto->Getkey_prefix(), *msgbusRequest.MutableKeyPrefix());
+ }
+ msgbusRequest.SetPathColumnPrefix(proto->Getpath_column_prefix());
+ msgbusRequest.SetPathColumnDelimiter(proto->Getpath_column_delimiter());
+ if (proto->Hasstart_after_key_suffix()) {
+ ValueToParams(proto->Getstart_after_key_suffix(), *msgbusRequest.MutableStartAfterKeySuffix());
+ }
+ msgbusRequest.SetMaxKeys(proto->Getmax_keys());
+ msgbusRequest.MutableColumnsToReturn()->CopyFrom(proto->Getcolumns_to_return());
+
+ // TODO: operaiton params
+ return msgbusRequest;
+ }
+
+ static void ValueToParams(const Ydb::TypedValue& tv, NKikimrMiniKQL::TParams& params) {
+ ConvertYdbTypeToMiniKQLType(tv.Gettype(), *params.MutableType());
+ ConvertYdbValueToMiniKQLValue(tv.Gettype(), tv.Getvalue(), *params.MutableValue());
+ }
+
static void ConvertMiniKQLRowsToResultSet(const NKikimrMiniKQL::TType& rowsListType, const NKikimrMiniKQL::TValue& rowsList, Ydb::ResultSet& resultSet) {
- TStackVec<NKikimrMiniKQL::TType> columnTypes;
- Y_VERIFY(rowsListType.GetKind() == NKikimrMiniKQL::ETypeKind::List);
- for (const auto& column : rowsListType.GetList().GetItem().GetStruct().GetMember()) {
+ TStackVec<NKikimrMiniKQL::TType> columnTypes;
+ Y_VERIFY(rowsListType.GetKind() == NKikimrMiniKQL::ETypeKind::List);
+ for (const auto& column : rowsListType.GetList().GetItem().GetStruct().GetMember()) {
auto columnMeta = resultSet.add_columns();
- columnMeta->set_name(column.GetName());
- columnTypes.push_back(column.GetType());
- ConvertMiniKQLTypeToYdbType(column.GetType(), *columnMeta->mutable_type());
- }
-
- for (const auto& row : rowsList.GetList()) {
- auto newRow = resultSet.add_rows();
- ui32 columnCount = static_cast<ui32>(row.StructSize());
- Y_VERIFY(columnCount == columnTypes.size());
- for (ui32 i = 0; i < columnCount; i++) {
- const auto& column = row.GetStruct(i);
- ConvertMiniKQLValueToYdbValue(columnTypes[i], column, *newRow->add_items());
- }
- }
- resultSet.set_truncated(false);
- }
-};
-
-
+ columnMeta->set_name(column.GetName());
+ columnTypes.push_back(column.GetType());
+ ConvertMiniKQLTypeToYdbType(column.GetType(), *columnMeta->mutable_type());
+ }
+
+ for (const auto& row : rowsList.GetList()) {
+ auto newRow = resultSet.add_rows();
+ ui32 columnCount = static_cast<ui32>(row.StructSize());
+ Y_VERIFY(columnCount == columnTypes.size());
+ for (ui32 i = 0; i < columnCount; i++) {
+ const auto& column = row.GetStruct(i);
+ ConvertMiniKQLValueToYdbValue(columnTypes[i], column, *newRow->add_items());
+ }
+ }
+ resultSet.set_truncated(false);
+ }
+};
+
+
class TS3ListingRequestGrpc : protected TMessageConverter, public NMsgBusProxy::TS3ListingRequestBase<NKikimrServices::TActivity::GRPC_REQ> {
-private:
- TAutoPtr<TEvS3ListingRequest> GrpcRequest;
- NKikimrClient::TS3ListingRequest MsgbusRequest;
-
-public:
+private:
+ TAutoPtr<TEvS3ListingRequest> GrpcRequest;
+ NKikimrClient::TS3ListingRequest MsgbusRequest;
+
+public:
TS3ListingRequestGrpc(TAutoPtr<TEvS3ListingRequest> request, TActorId schemeCache)
- : TS3ListingRequestBase(schemeCache,
+ : TS3ListingRequestBase(schemeCache,
THolder<const NACLib::TUserToken>(request->GetInternalToken() ? new NACLib::TUserToken(request->GetInternalToken()) : nullptr))
- , GrpcRequest(request)
- {}
-
-
- const NKikimrClient::TS3ListingRequest* ExtractRequest(TString& errDescr) override {
- try {
- MsgbusRequest = TMessageConverter::ConvertRequest(GrpcRequest->GetProtoRequest());
- } catch (std::exception& e) {
- errDescr = e.what();
- return nullptr;
- }
-
- return &MsgbusRequest;
- }
-
-protected:
- void SendReplyMessage(NKikimrClient::TS3ListingResponse&& msgbusResponse) override {
- if (msgbusResponse.GetStatus() != NMsgBusProxy::MSTATUS_OK) {
- Ydb::StatusIds::StatusCode grpcStatus =
- TMessageConverter::ConvertMsgBusProxyStatusToYdb(msgbusResponse.GetStatus(), msgbusResponse.GetErrorCode());
- TString description = msgbusResponse.GetDescription();
-
- if (!description.empty()) {
- GrpcRequest->RaiseIssue(NYql::TIssue(description));
- }
+ , GrpcRequest(request)
+ {}
+
+
+ const NKikimrClient::TS3ListingRequest* ExtractRequest(TString& errDescr) override {
+ try {
+ MsgbusRequest = TMessageConverter::ConvertRequest(GrpcRequest->GetProtoRequest());
+ } catch (std::exception& e) {
+ errDescr = e.what();
+ return nullptr;
+ }
+
+ return &MsgbusRequest;
+ }
+
+protected:
+ void SendReplyMessage(NKikimrClient::TS3ListingResponse&& msgbusResponse) override {
+ if (msgbusResponse.GetStatus() != NMsgBusProxy::MSTATUS_OK) {
+ Ydb::StatusIds::StatusCode grpcStatus =
+ TMessageConverter::ConvertMsgBusProxyStatusToYdb(msgbusResponse.GetStatus(), msgbusResponse.GetErrorCode());
+ TString description = msgbusResponse.GetDescription();
+
+ if (!description.empty()) {
+ GrpcRequest->RaiseIssue(NYql::TIssue(description));
+ }
GrpcRequest->ReplyWithYdbStatus(grpcStatus);
- } else {
- Ydb::S3Internal::S3ListingResult grpcResult = TMessageConverter::ConvertResult(msgbusResponse);
- GrpcRequest->SendResult(grpcResult, Ydb::StatusIds::SUCCESS);
- }
- }
-};
-
-
-IActor* CreateGrpcS3ListingRequest(TAutoPtr<TEvS3ListingRequest> request) {
+ } else {
+ Ydb::S3Internal::S3ListingResult grpcResult = TMessageConverter::ConvertResult(msgbusResponse);
+ GrpcRequest->SendResult(grpcResult, Ydb::StatusIds::SUCCESS);
+ }
+ }
+};
+
+
+IActor* CreateGrpcS3ListingRequest(TAutoPtr<TEvS3ListingRequest> request) {
TActorId schemeCache = MakeSchemeCacheID();
- return new TS3ListingRequestGrpc(request, schemeCache);
-}
-
-} // namespace NGRpcService
-} // namespace NKikimr
+ return new TS3ListingRequestGrpc(request, schemeCache);
+}
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/core/client/server/msgbus_server_scheme_initroot.cpp b/ydb/core/client/server/msgbus_server_scheme_initroot.cpp
index ef3864017fb..a8538eea1cb 100644
--- a/ydb/core/client/server/msgbus_server_scheme_initroot.cpp
+++ b/ydb/core/client/server/msgbus_server_scheme_initroot.cpp
@@ -12,21 +12,21 @@ namespace NKikimr {
namespace NMsgBusProxy {
using namespace NSchemeShard;
-
+
class TMessageBusSchemeInitRoot : public TMessageBusSecureRequest<TMessageBusServerRequestBase<TMessageBusSchemeInitRoot>> {
using TBase = TMessageBusSecureRequest<TMessageBusServerRequestBase<TMessageBusSchemeInitRoot>>;
THolder<TBusSchemeInitRoot> Request;
const bool WithRetry = true;
TActorId PipeClient;
-
+
void ReplyWithResult(EResponseStatus status, TEvSchemeShard::TEvInitRootShardResult::EStatus ssStatus, const TActorContext &ctx) {
TAutoPtr<TBusResponseStatus> response(new TBusResponseStatus(status));
response->Record.SetSchemeStatus(ssStatus);
SendReplyAutoPtr(response);
Request.Destroy();
Die(ctx);
- }
-
+ }
+
void Handle(TEvSchemeShard::TEvInitRootShardResult::TPtr& ev, const TActorContext& ctx) {
const NKikimrTxScheme::TEvInitRootShardResult &record = ev->Get()->Record;
const auto status = (TEvSchemeShard::TEvInitRootShardResult::EStatus)record.GetStatus();
diff --git a/ydb/core/client/server/msgbus_server_scheme_request.cpp b/ydb/core/client/server/msgbus_server_scheme_request.cpp
index 5dd3ad78f38..7df5e262d07 100644
--- a/ydb/core/client/server/msgbus_server_scheme_request.cpp
+++ b/ydb/core/client/server/msgbus_server_scheme_request.cpp
@@ -121,15 +121,15 @@ void TMessageBusServerSchemeRequest<TBusPersQueue>::SendProposeRequest(const TAc
pqgroup->MutablePQTabletConfig()->MergeFrom(cmd.GetConfig());
}
- if (Request->Record.HasMetaRequest() && Request->Record.GetMetaRequest().HasCmdDeleteTopic()) {
- const auto& cmd = Request->Record.GetMetaRequest().GetCmdDeleteTopic();
- auto *transaction = record.MutableTransaction()->MutableModifyScheme();
+ if (Request->Record.HasMetaRequest() && Request->Record.GetMetaRequest().HasCmdDeleteTopic()) {
+ const auto& cmd = Request->Record.GetMetaRequest().GetCmdDeleteTopic();
+ auto *transaction = record.MutableTransaction()->MutableModifyScheme();
transaction->SetWorkingDir(TopicPrefix(ctx));
transaction->SetOperationType(NKikimrSchemeOp::ESchemeOpDropPersQueueGroup);
- auto *pqgroup = transaction->MutableDrop();
- pqgroup->SetName(cmd.GetTopic());
- }
-
+ auto *pqgroup = transaction->MutableDrop();
+ pqgroup->SetName(cmd.GetTopic());
+ }
+
req->Record.SetUserToken(TBase::GetSerializedToken());
ctx.Send(MakeTxProxyID(), req.Release());
@@ -212,10 +212,10 @@ void TMessageBusServerSchemeRequest<TBusSchemeOperation>::ReplyWithResult(ERespo
void TMessageBusServerProxy::Handle(TEvBusProxy::TEvPersQueue::TPtr& ev, const TActorContext& ctx) {
TEvBusProxy::TEvPersQueue *msg = ev->Get();
const auto& rec = static_cast<TBusPersQueue *>(msg->MsgContext.GetMessage())->Record;
- if (rec.HasMetaRequest() && (rec.GetMetaRequest().HasCmdCreateTopic()
- || rec.GetMetaRequest().HasCmdChangeTopic()
- || rec.GetMetaRequest().HasCmdDeleteTopic())) {
- ctx.Register(new TMessageBusServerSchemeRequest<TBusPersQueue>(ev->Get()), TMailboxType::HTSwap, AppData()->UserPoolId);
+ if (rec.HasMetaRequest() && (rec.GetMetaRequest().HasCmdCreateTopic()
+ || rec.GetMetaRequest().HasCmdChangeTopic()
+ || rec.GetMetaRequest().HasCmdDeleteTopic())) {
+ ctx.Register(new TMessageBusServerSchemeRequest<TBusPersQueue>(ev->Get()), TMailboxType::HTSwap, AppData()->UserPoolId);
return;
}
ctx.Register(CreateMessageBusServerPersQueue(msg->MsgContext, PqMetaCache, PQReadSessionsInfoWorkerFactory));
diff --git a/ydb/core/client/server/msgbus_server_tablet_counters.cpp b/ydb/core/client/server/msgbus_server_tablet_counters.cpp
index 8c915805c22..4cae1322582 100644
--- a/ydb/core/client/server/msgbus_server_tablet_counters.cpp
+++ b/ydb/core/client/server/msgbus_server_tablet_counters.cpp
@@ -1,48 +1,48 @@
-#include "msgbus_tabletreq.h"
-
-namespace NKikimr {
-namespace NMsgBusProxy {
-
-namespace {
- const ui64 DefaultTimeout = 90000;
-}
-
+#include "msgbus_tabletreq.h"
+
+namespace NKikimr {
+namespace NMsgBusProxy {
+
+namespace {
+ const ui64 DefaultTimeout = 90000;
+}
+
class TMessageBusTabletCounters : public TMessageBusSimpleTabletRequest<TMessageBusTabletCounters, TEvTablet::TEvGetCountersResponse, NKikimrServices::TActivity::FRONT_GETCOUNTERS> {
- NKikimrClient::TTabletCountersRequest Request;
-public:
- TMessageBusTabletCounters(TBusMessageContext &msg, NKikimrClient::TTabletCountersRequest &request, ui64 tabletId,
+ NKikimrClient::TTabletCountersRequest Request;
+public:
+ TMessageBusTabletCounters(TBusMessageContext &msg, NKikimrClient::TTabletCountersRequest &request, ui64 tabletId,
bool withRetry, TDuration timeout, bool connectToFollower)
: TMessageBusSimpleTabletRequest(msg, tabletId, withRetry, timeout, connectToFollower)
- , Request()
- {
- Request.Swap(&request);
- }
-
- void Handle(TEvTablet::TEvGetCountersResponse::TPtr &ev, const TActorContext &ctx) {
- auto &record = ev->Get()->Record;
-
- TAutoPtr<TBusResponse> response(new TBusResponseStatus(MSTATUS_OK));
- response->Record.SetTabletId(TabletID);
- response->Record.MutableTabletCounters()->Swap(record.MutableTabletCounters());
-
- return SendReplyAndDie(response.Release(), ctx);
- }
-
- TEvTablet::TEvGetCounters* MakeReq(const TActorContext &ctx) {
- Y_UNUSED(ctx);
- return new TEvTablet::TEvGetCounters();
- }
-};
-
-IActor* CreateMessageBusTabletCountersRequest(TBusMessageContext &msg) {
- auto &record = static_cast<TBusTabletCountersRequest*>(msg.GetMessage())->Record;
-
+ , Request()
+ {
+ Request.Swap(&request);
+ }
+
+ void Handle(TEvTablet::TEvGetCountersResponse::TPtr &ev, const TActorContext &ctx) {
+ auto &record = ev->Get()->Record;
+
+ TAutoPtr<TBusResponse> response(new TBusResponseStatus(MSTATUS_OK));
+ response->Record.SetTabletId(TabletID);
+ response->Record.MutableTabletCounters()->Swap(record.MutableTabletCounters());
+
+ return SendReplyAndDie(response.Release(), ctx);
+ }
+
+ TEvTablet::TEvGetCounters* MakeReq(const TActorContext &ctx) {
+ Y_UNUSED(ctx);
+ return new TEvTablet::TEvGetCounters();
+ }
+};
+
+IActor* CreateMessageBusTabletCountersRequest(TBusMessageContext &msg) {
+ auto &record = static_cast<TBusTabletCountersRequest*>(msg.GetMessage())->Record;
+
const bool connectToFollower = record.HasConnectToFollower() ? record.GetConnectToFollower() : false;
- const ui64 tabletId = record.GetTabletID();
- const bool withRetry = record.HasWithRetry() ? record.GetWithRetry() : false;
- const TDuration timeout = TDuration::MilliSeconds(record.HasTimeout() ? record.GetTimeout() : DefaultTimeout);
-
+ const ui64 tabletId = record.GetTabletID();
+ const bool withRetry = record.HasWithRetry() ? record.GetWithRetry() : false;
+ const TDuration timeout = TDuration::MilliSeconds(record.HasTimeout() ? record.GetTimeout() : DefaultTimeout);
+
return new TMessageBusTabletCounters(msg, record, tabletId, withRetry, timeout, connectToFollower);
-}
-
-}}
+}
+
+}}
diff --git a/ydb/core/client/server/msgbus_server_tx_request.cpp b/ydb/core/client/server/msgbus_server_tx_request.cpp
index 4fe136810ac..a0d73fa9d69 100644
--- a/ydb/core/client/server/msgbus_server_tx_request.cpp
+++ b/ydb/core/client/server/msgbus_server_tx_request.cpp
@@ -8,14 +8,14 @@ namespace NMsgBusProxy {
class TMessageBusTxStatusRequestActor : public TMessageBusSimpleTabletRequest<TMessageBusTxStatusRequestActor, NSchemeShard::TEvSchemeShard::TEvNotifyTxCompletionResult, NKikimrServices::TActivity::FRONT_SCHEME_TXSTATUS> {
const ui64 TxId;
const ui64 PathId;
- bool InProgress;
+ bool InProgress;
public:
TMessageBusTxStatusRequestActor(NMsgBusProxy::TBusMessageContext& msg, const TBusSchemeOperationStatus* casted)
: TMessageBusSimpleTabletRequest(msg, casted->Record.GetFlatTxId().GetSchemeShardTabletId(), false,
TDuration::MilliSeconds(casted->Record.GetPollOptions().GetTimeout()), false)
, TxId(casted->Record.GetFlatTxId().GetTxId())
, PathId(casted->Record.GetFlatTxId().GetPathId())
- , InProgress(false)
+ , InProgress(false)
{}
TMessageBusTxStatusRequestActor(NMsgBusProxy::TBusMessageContext& msg)
@@ -32,12 +32,12 @@ public:
}
void Handle(NSchemeShard::TEvSchemeShard::TEvNotifyTxCompletionRegistered::TPtr&, const TActorContext&) {
- InProgress = true;
- }
-
+ InProgress = true;
+ }
+
void HandleTimeout(const TActorContext& ctx) {
TAutoPtr<NMsgBusProxy::TBusResponse> response = new NMsgBusProxy::TBusResponse();
- response->Record.SetStatus(InProgress ? NMsgBusProxy::MSTATUS_INPROGRESS : NMsgBusProxy::MSTATUS_TIMEOUT);
+ response->Record.SetStatus(InProgress ? NMsgBusProxy::MSTATUS_INPROGRESS : NMsgBusProxy::MSTATUS_TIMEOUT);
response->Record.MutableFlatTxId()->SetTxId(TxId);
response->Record.MutableFlatTxId()->SetPathId(PathId);
response->Record.MutableFlatTxId()->SetSchemeShardTabletId(TabletID);
diff --git a/ydb/core/client/server/ya.make b/ydb/core/client/server/ya.make
index 25d6c7cf944..8cbeec21b77 100644
--- a/ydb/core/client/server/ya.make
+++ b/ydb/core/client/server/ya.make
@@ -27,8 +27,8 @@ SRCS(
msgbus_server_keyvalue.cpp
msgbus_server_persqueue.cpp
msgbus_server_persqueue.h
- msgbus_server_pq_metacache.h
- msgbus_server_pq_metacache.cpp
+ msgbus_server_pq_metacache.h
+ msgbus_server_pq_metacache.cpp
msgbus_server_pq_metarequest.h
msgbus_server_pq_metarequest.cpp
msgbus_server_pq_read_session_info.cpp
@@ -37,17 +37,17 @@ SRCS(
msgbus_server_load.cpp
msgbus_server_local_enumerate_tablets.cpp
msgbus_server_local_minikql.cpp
- msgbus_server_local_scheme_tx.cpp
+ msgbus_server_local_scheme_tx.cpp
msgbus_server_node_registration.cpp
msgbus_server_proxy.cpp
msgbus_server_proxy.h
msgbus_server_request.cpp
msgbus_server_request.h
- msgbus_server_s3_listing.cpp
+ msgbus_server_s3_listing.cpp
msgbus_server_scheme_initroot.cpp
msgbus_server_scheme_request.cpp
msgbus_server_sqs.cpp
- msgbus_server_tablet_counters.cpp
+ msgbus_server_tablet_counters.cpp
msgbus_server_tablet_kill.cpp
msgbus_server_tablet_state.cpp
msgbus_server_test_shard_request.cpp
diff --git a/ydb/core/client/ut/ya.make b/ydb/core/client/ut/ya.make
index 5af0467ce57..5d839f47c85 100644
--- a/ydb/core/client/ut/ya.make
+++ b/ydb/core/client/ut/ya.make
@@ -40,12 +40,12 @@ YQL_LAST_ABI_VERSION()
INCLUDE(${ARCADIA_ROOT}/ydb/tests/supp/ubsan_supp.inc)
SRCS(
- cancel_tx_ut.cpp
+ cancel_tx_ut.cpp
client_ut.cpp
- flat_ut.cpp
+ flat_ut.cpp
locks_ut.cpp
- query_stats_ut.cpp
- s3_listing_ut.cpp
+ query_stats_ut.cpp
+ s3_listing_ut.cpp
)
END()
diff --git a/ydb/core/cms/console/console_ut_configs.cpp b/ydb/core/cms/console/console_ut_configs.cpp
index af2cc957d2c..0cb1655281d 100644
--- a/ydb/core/cms/console/console_ut_configs.cpp
+++ b/ydb/core/cms/console/console_ut_configs.cpp
@@ -1329,12 +1329,12 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
CheckGetItems(runtime, TVector<ui32>(),
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2,
ITEM_DOMAIN_TENANT_POOL_1, ITEM_DOMAIN_TENANT_POOL_2);
- CheckGetItems(runtime, TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ CheckGetItems(runtime, TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2);
- CheckGetItems(runtime, TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::TenantPoolConfigItem}),
+ CheckGetItems(runtime, TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::TenantPoolConfigItem}),
ITEM_DOMAIN_TENANT_POOL_1, ITEM_DOMAIN_TENANT_POOL_2);
- CheckGetItems(runtime, TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem,
- (ui32)NKikimrConsole::TConfigItem::TenantPoolConfigItem}),
+ CheckGetItems(runtime, TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem,
+ (ui32)NKikimrConsole::TConfigItem::TenantPoolConfigItem}),
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2,
ITEM_DOMAIN_TENANT_POOL_1, ITEM_DOMAIN_TENANT_POOL_2);
CheckGetItems(runtime, TVector<ui32>(NKikimrConsole::TConfigItem::ActorSystemConfigItem));
@@ -1366,113 +1366,113 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsById(runtime, TVector<ui64>({id2[0]}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1);
CheckGetItemsById(runtime, TVector<ui64>({id2[1]}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE23_LOG_1);
CheckGetItemsById(runtime, TVector<ui64>({id2[0], id2[0]}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1);
CheckGetItemsById(runtime, TVector<ui64>({id2[0], id2[1]}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1);
CheckGetItemsById(runtime, TVector<ui64>({id2[1]}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsById(runtime, TVector<ui64>({987654321}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
CheckGetItemsByNodeId(runtime, TVector<ui32>({1}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({2}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({3}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE23_LOG_1, ITEM_NODE34_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({4}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE34_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({1, 1}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({1, 2}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({1, 2, 3}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1, ITEM_NODE34_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({1, 2, 3, 4}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1, ITEM_NODE34_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({2, 4, 5}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1, ITEM_NODE34_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1, ITEM_NODE34_LOG_1);
CheckGetItemsByNodeId(runtime, TVector<ui32>({4}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByNodeId(runtime, TVector<ui32>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByNodeId(runtime, TVector<ui32>({5}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
CheckGetItemsByHost(runtime, TVector<TString>({"host1"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST12_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host2"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST12_LOG_1, ITEM_HOST23_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host3"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST23_LOG_1, ITEM_HOST34_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host4"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST34_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host1", "host1"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST12_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host1", "host2"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST12_LOG_1, ITEM_HOST23_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host1", "host2", "host3"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST12_LOG_1, ITEM_HOST23_LOG_1, ITEM_HOST34_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host1", "host2", "host3", "host4"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST12_LOG_1, ITEM_HOST23_LOG_1, ITEM_HOST34_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host2", "host3", "host5"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST12_LOG_1, ITEM_HOST23_LOG_1, ITEM_HOST34_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_HOST12_LOG_1, ITEM_HOST23_LOG_1, ITEM_HOST34_LOG_1);
CheckGetItemsByHost(runtime, TVector<TString>({"host4"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByHost(runtime, TVector<TString>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByHost(runtime, TVector<TString>({"host5"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
CheckGetItemsByTenant(runtime, TVector<TString>({"tenant1"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_LOG_1, ITEM_TENANT1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2);
CheckGetItemsByTenant(runtime, TVector<TString>({"tenant2"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT2_LOG_1, ITEM_TENANT2_LOG_2,
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2,
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByTenant(runtime, TVector<TString>({"tenant1", "tenant1"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_LOG_1, ITEM_TENANT1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2);
CheckGetItemsByTenant(runtime, TVector<TString>({"tenant1", "tenant2"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_LOG_1, ITEM_TENANT1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2,
@@ -1480,7 +1480,7 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2,
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByTenant(runtime, TVector<TString>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_LOG_1, ITEM_TENANT1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2,
@@ -1488,29 +1488,29 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2,
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByTenant(runtime, TVector<TString>({"tenant2"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByTenant(runtime, TVector<TString>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByTenant(runtime, TVector<TString>({"tenant3"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
CheckGetItemsByNodeType(runtime, TVector<TString>({"type1"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TYPE1_LOG_1, ITEM_TYPE1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2);
CheckGetItemsByNodeType(runtime, TVector<TString>({"type2"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TYPE2_LOG_1, ITEM_TYPE2_LOG_2,
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2,
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByNodeType(runtime, TVector<TString>({"type1", "type1"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TYPE1_LOG_1, ITEM_TYPE1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2);
CheckGetItemsByNodeType(runtime, TVector<TString>({"type1", "type2"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TYPE1_LOG_1, ITEM_TYPE1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2,
@@ -1518,7 +1518,7 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2,
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByNodeType(runtime, TVector<TString>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TYPE1_LOG_1, ITEM_TYPE1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2,
@@ -1526,52 +1526,52 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2,
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByNodeType(runtime, TVector<TString>({"type2"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByNodeType(runtime, TVector<TString>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByNodeType(runtime, TVector<TString>({"type3"}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant1", "type1")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2);
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant1", "type2")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2);
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant2", "type1")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2);
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant2", "type2")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant1", "type1"),
std::make_pair("tenant1", "type1")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2);
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant1", "type1"),
std::make_pair("tenant1", "type2")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2);
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant1", "type1"),
std::make_pair("tenant2", "type2")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2,
ITEM_TENANT1_TYPE2_LOG_1, ITEM_TENANT1_TYPE2_LOG_2,
ITEM_TENANT2_TYPE1_LOG_1, ITEM_TENANT2_TYPE1_LOG_2,
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant2", "type2")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant1", "type3")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
CheckGetItemsByTenantAndNodeType(runtime, TVector<std::pair<TString, TString>>({std::make_pair("tenant3", "type2")}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}));
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<ui32>({1, 2})), TVector<ui32>(),
ITEM_NODE12_LOG_1);
@@ -1580,7 +1580,7 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<ui32>({3, 4})), TVector<ui32>(),
ITEM_NODE34_LOG_1);
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<ui32>({3, 4})),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<ui32>({})), TVector<ui32>());
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<ui32>({1})), TVector<ui32>());
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<ui32>({2})), TVector<ui32>());
@@ -1597,7 +1597,7 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
TVector<ui32>(),
ITEM_HOST34_LOG_1);
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<TString>({TString("host3"), TString("host4")})),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<TString>()), TVector<ui32>());
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<TString>({TString("host1")})), TVector<ui32>());
CheckGetItemsByUsageScope(runtime, MakeUsageScope(TVector<TString>({TString("host2")})), TVector<ui32>());
@@ -1610,14 +1610,14 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
CheckGetItemsByUsageScope(runtime, MakeUsageScope("tenant2", ""), TVector<ui32>(),
ITEM_TENANT2_LOG_1, ITEM_TENANT2_LOG_2);
CheckGetItemsByUsageScope(runtime, MakeUsageScope("tenant2", ""),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByUsageScope(runtime, MakeUsageScope("tenant3", ""), TVector<ui32>());
CheckGetItemsByUsageScope(runtime, MakeUsageScope("", "type1"), TVector<ui32>(),
ITEM_TYPE1_LOG_1, ITEM_TYPE1_LOG_2);
CheckGetItemsByUsageScope(runtime, MakeUsageScope("", "type2"), TVector<ui32>(),
ITEM_TYPE2_LOG_1, ITEM_TYPE2_LOG_2);
CheckGetItemsByUsageScope(runtime, MakeUsageScope("", "type2"),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByUsageScope(runtime, MakeUsageScope("", "type3"), TVector<ui32>());
CheckGetItemsByUsageScope(runtime, MakeUsageScope("tenant1", "type1"), TVector<ui32>(),
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2);
@@ -1628,7 +1628,7 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
CheckGetItemsByUsageScope(runtime, MakeUsageScope("tenant2", "type2"), TVector<ui32>(),
ITEM_TENANT2_TYPE2_LOG_1, ITEM_TENANT2_TYPE2_LOG_2);
CheckGetItemsByUsageScope(runtime, MakeUsageScope("tenant2", "type2"),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByUsageScope(runtime, MakeUsageScope("tenant3", "type1"), TVector<ui32>());
CheckGetItemsByUsageScope(runtime, MakeUsageScope("tenant1", "type3"), TVector<ui32>());
@@ -1636,18 +1636,18 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2,
ITEM_DOMAIN_TENANT_POOL_1, ITEM_DOMAIN_TENANT_POOL_2);
CheckGetItemsByUsageScope(runtime, MakeUsageScope("", ""),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2);
CheckGetItemsByUsageScope(runtime, MakeUsageScope("", ""),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetItemsByUsageScope(runtime, NKikimrConsole::TUsageScope(), TVector<ui32>(),
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2,
ITEM_DOMAIN_TENANT_POOL_1, ITEM_DOMAIN_TENANT_POOL_2);
CheckGetItemsByUsageScope(runtime, NKikimrConsole::TUsageScope(),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2);
CheckGetItemsByUsageScope(runtime, NKikimrConsole::TUsageScope(),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
auto scope1 = MakeUsageScope(TVector<ui32>({1, 2}));
auto scope2 = MakeUsageScope(TVector<TString>({TString("host1"), TString("host2")}));
@@ -1664,7 +1664,7 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2,
ITEM_DOMAIN_TENANT_POOL_1, ITEM_DOMAIN_TENANT_POOL_2);
CheckGetItemsByUsageScope(runtime, TVector<NKikimrConsole::TUsageScope>({scope1, scope2, scope3, scope4, scope5, scope6}),
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_NODE12_LOG_1, ITEM_HOST12_LOG_1,
ITEM_TENANT1_LOG_1, ITEM_TENANT1_LOG_2,
ITEM_TYPE1_LOG_1, ITEM_TYPE1_LOG_2,
@@ -1692,7 +1692,7 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
CheckGetItemsByCookie(runtime, TVector<TString>({"cookie3"}),
TVector<ui32>());
- CheckGetItems(runtime, TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
+ CheckGetItems(runtime, TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::LogConfigItem}),
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2,
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1,
ITEM_NODE34_LOG_1, ITEM_HOST12_LOG_1,
@@ -1760,7 +1760,7 @@ Y_UNIT_TEST_SUITE(TConsoleConfigTests) {
ITEM_TYPE1_LOG_1, ITEM_TYPE1_LOG_2,
ITEM_TENANT1_TYPE1_LOG_1, ITEM_TENANT1_TYPE1_LOG_2);
CheckGetNodeItems(runtime, 1, "host1", "tenant1", "type1",
- TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
+ TVector<ui32>({(ui32)NKikimrConsole::TConfigItem::ActorSystemConfigItem}));
CheckGetNodeItems(runtime, 2, "host1", "tenant1", "type1", TVector<ui32>(),
ITEM_DOMAIN_LOG_1, ITEM_DOMAIN_LOG_2,
ITEM_NODE12_LOG_1, ITEM_NODE23_LOG_1, ITEM_HOST12_LOG_1,
diff --git a/ydb/core/cms/console/immediate_controls_configurator_ut.cpp b/ydb/core/cms/console/immediate_controls_configurator_ut.cpp
index b70e9c3e8b3..8ad8f6ed1b8 100644
--- a/ydb/core/cms/console/immediate_controls_configurator_ut.cpp
+++ b/ydb/core/cms/console/immediate_controls_configurator_ut.cpp
@@ -129,7 +129,7 @@ void InitImmediateControlsConfigurator(TTenantTestRuntime &runtime)
cfg.MutableDataShardControls()->SetDisableByKeyFilter(1);
cfg.MutableDataShardControls()->SetMaxTxLagMilliseconds(2592000000ULL);
cfg.MutableDataShardControls()->SetCanCancelROWithReadSets(1);
- cfg.MutableTxLimitControls()->SetPerRequestDataSizeLimit(256000000000000ULL);
+ cfg.MutableTxLimitControls()->SetPerRequestDataSizeLimit(256000000000000ULL);
cfg.MutableTxLimitControls()->SetPerShardReadSizeLimit(107374182400ULL);
cfg.MutableTxLimitControls()->SetPerShardIncomingReadSetSizeLimit(5368709120ULL);
}
@@ -144,7 +144,7 @@ void InitImmediateControlsConfigurator(TTenantTestRuntime &runtime)
cfg.MutableDataShardControls()->SetDisableByKeyFilter(10);
cfg.MutableDataShardControls()->SetMaxTxLagMilliseconds(25920000000ULL);
cfg.MutableDataShardControls()->SetCanCancelROWithReadSets(10);
- cfg.MutableTxLimitControls()->SetPerRequestDataSizeLimit(300000000000000ULL);
+ cfg.MutableTxLimitControls()->SetPerRequestDataSizeLimit(300000000000000ULL);
cfg.MutableTxLimitControls()->SetPerShardReadSizeLimit(1073741824000ULL);
cfg.MutableTxLimitControls()->SetPerShardIncomingReadSetSizeLimit(53687091200ULL);
}
diff --git a/ydb/core/cms/ui/datashard_hist.js b/ydb/core/cms/ui/datashard_hist.js
index c690aeae618..54de6f2d001 100644
--- a/ydb/core/cms/ui/datashard_hist.js
+++ b/ydb/core/cms/ui/datashard_hist.js
@@ -40,9 +40,9 @@ function addTableHistograms(data) {
if (data.CountHistogram)
addHistogram(tableName, keys, 'Rows count', data.CountHistogram)
-
- if (data.KeyAccessSample)
- addHistogram(tableName, keys, 'Key access sample', data.KeyAccessSample)
+
+ if (data.KeyAccessSample)
+ addHistogram(tableName, keys, 'Key access sample', data.KeyAccessSample)
}
function addHistogram(tableName, keys, histName, hist) {
diff --git a/ydb/core/cms/ui/datashard_info.js b/ydb/core/cms/ui/datashard_info.js
index a8b1235fe35..f1c8bad40f5 100644
--- a/ydb/core/cms/ui/datashard_info.js
+++ b/ydb/core/cms/ui/datashard_info.js
@@ -47,16 +47,16 @@ function onDataShardInfoLoaded(data) {
if (table.Stats) {
tableInfoHTML += `
<table class="ds-info">
- <caption class="ds-info-table-stats">Metrics</caption>
- <tbody class="ds-info">
- <tr class="ds-info">
- <td class="ds-info">CPU</td>
- <td class="ds-info">` + table.Metrics.CPU/10000 + `%</td>
- </tr>
- </tbody>
- </table>
-
- <table class="ds-info">
+ <caption class="ds-info-table-stats">Metrics</caption>
+ <tbody class="ds-info">
+ <tr class="ds-info">
+ <td class="ds-info">CPU</td>
+ <td class="ds-info">` + table.Metrics.CPU/10000 + `%</td>
+ </tr>
+ </tbody>
+ </table>
+
+ <table class="ds-info">
<caption class="ds-info-table-stats">Statistics</caption>
<tbody class="ds-info">
<tr class="ds-info">
diff --git a/ydb/core/cms/walle_check_task_adapter.cpp b/ydb/core/cms/walle_check_task_adapter.cpp
index 13b70ec8eda..e5570c9af91 100644
--- a/ydb/core/cms/walle_check_task_adapter.cpp
+++ b/ydb/core/cms/walle_check_task_adapter.cpp
@@ -13,8 +13,8 @@ class TWalleCheckTaskAdapter : public TActorBootstrapped<TWalleCheckTaskAdapter>
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::CMS_WALLE_REQ;
- }
-
+ }
+
TWalleCheckTaskAdapter(TEvCms::TEvWalleCheckTaskRequest::TPtr &event,
const TCmsStatePtr state, TActorId cms)
: RequestEvent(event)
diff --git a/ydb/core/cms/walle_create_task_adapter.cpp b/ydb/core/cms/walle_create_task_adapter.cpp
index dc90baf57b8..56989e2db03 100644
--- a/ydb/core/cms/walle_create_task_adapter.cpp
+++ b/ydb/core/cms/walle_create_task_adapter.cpp
@@ -14,8 +14,8 @@ class TWalleCreateTaskAdapter : public TActorBootstrapped<TWalleCreateTaskAdapte
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::CMS_WALLE_REQ;
- }
-
+ }
+
TWalleCreateTaskAdapter(TEvCms::TEvWalleCreateTaskRequest::TPtr &event, TActorId cms)
: RequestEvent(event)
, Cms(cms)
diff --git a/ydb/core/cms/walle_list_tasks_adapter.cpp b/ydb/core/cms/walle_list_tasks_adapter.cpp
index 43063fcc8b2..d256294cc18 100644
--- a/ydb/core/cms/walle_list_tasks_adapter.cpp
+++ b/ydb/core/cms/walle_list_tasks_adapter.cpp
@@ -13,8 +13,8 @@ class TWalleListTasksAdapter : public TActorBootstrapped<TWalleListTasksAdapter>
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::CMS_WALLE_REQ;
- }
-
+ }
+
TWalleListTasksAdapter(TEvCms::TEvWalleListTasksRequest::TPtr &event, const TCmsStatePtr state)
: RequestEvent(event)
, State(state)
diff --git a/ydb/core/cms/walle_remove_task_adapter.cpp b/ydb/core/cms/walle_remove_task_adapter.cpp
index 51ce2035c1b..899d4848250 100644
--- a/ydb/core/cms/walle_remove_task_adapter.cpp
+++ b/ydb/core/cms/walle_remove_task_adapter.cpp
@@ -13,8 +13,8 @@ class TWalleRemoveTaskAdapter : public TActorBootstrapped<TWalleRemoveTaskAdapte
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::CMS_WALLE_REQ;
- }
-
+ }
+
TWalleRemoveTaskAdapter(TEvCms::TEvWalleRemoveTaskRequest::TPtr &event, const TCmsStatePtr state, TActorId cms)
: RequestEvent(event)
, State(state)
diff --git a/ydb/core/control/immediate_control_board_actor.cpp b/ydb/core/control/immediate_control_board_actor.cpp
index 2d22882fc58..bcb4a3344c3 100644
--- a/ydb/core/control/immediate_control_board_actor.cpp
+++ b/ydb/core/control/immediate_control_board_actor.cpp
@@ -42,8 +42,8 @@ class TImmediateControlActor : public TActorBootstrapped<TImmediateControlActor>
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::IMMEDIATE_CONTROL_BOARD;
- }
-
+ }
+
TImmediateControlActor(TIntrusivePtr<TControlBoard> board,
const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters)
: Board(board)
diff --git a/ydb/core/control/immediate_control_board_actor_ut.cpp b/ydb/core/control/immediate_control_board_actor_ut.cpp
index f008a315752..2753077d936 100644
--- a/ydb/core/control/immediate_control_board_actor_ut.cpp
+++ b/ydb/core/control/immediate_control_board_actor_ut.cpp
@@ -74,7 +74,7 @@ template <class T>
static void Run(i64 instances = 1) {
TVector<TActorId> testIds;
TAppData appData(0, 0, 0, 0, TMap<TString, ui32>(),
- nullptr, nullptr, nullptr, nullptr);
+ nullptr, nullptr, nullptr, nullptr);
try {
Counters = TIntrusivePtr<NMonitoring::TDynamicCounters>(new NMonitoring::TDynamicCounters());
diff --git a/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp b/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp
index 341f1144c74..450d4135c6c 100644
--- a/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp
+++ b/ydb/core/driver_lib/cli_base/cli_cmds_db.cpp
@@ -187,8 +187,8 @@ public:
bool AccessRightsEffective;
bool BackupInfo;
bool Protobuf;
- bool PartitionStats;
- bool Boundaries;
+ bool PartitionStats;
+ bool Boundaries;
virtual void Config(TConfig& config) override {
TClientCommand::Config(config);
@@ -198,8 +198,8 @@ public:
AccessRightsEffective = false;
BackupInfo = false;
Protobuf = false;
- PartitionStats = false;
- Boundaries = false;
+ PartitionStats = false;
+ Boundaries = false;
config.SetFreeArgsNum(1);
SetFreeArgTitle(0, "<PATH>", "Schema path or pathId (e.g. 72075186232623600/1225)");
config.Opts->AddLongOption('t', "tree", "Show schema path tree").NoArgument().SetFlag(&Tree);
@@ -208,37 +208,37 @@ public:
config.Opts->AddLongOption('e', "effacl", "Show effective acl information").NoArgument().SetFlag(&AccessRightsEffective);
config.Opts->AddLongOption('b', "backup", "Show backup information").NoArgument().SetFlag(&BackupInfo);
config.Opts->AddLongOption('P', "protobuf", "Debug print all info as is").NoArgument().SetFlag(&Protobuf);
- config.Opts->AddLongOption('s', "stats", "Return partition stats").NoArgument().SetFlag(&PartitionStats);
- config.Opts->AddLongOption("boundaries", "Return boundaries").NoArgument().SetFlag(&Boundaries);
+ config.Opts->AddLongOption('s', "stats", "Return partition stats").NoArgument().SetFlag(&PartitionStats);
+ config.Opts->AddLongOption("boundaries", "Return boundaries").NoArgument().SetFlag(&Boundaries);
}
virtual void Parse(TConfig& config) override {
TClientCommand::Parse(config);
Request = new NKikimrClient::TSchemeDescribe();
Path = config.ParseResult->GetFreeArgs()[0];
- if (Path.StartsWith('/')) {
- Request->SetPath(Path);
- } else {
- // treat path as PathId like '72075186232623600/1225'
+ if (Path.StartsWith('/')) {
+ Request->SetPath(Path);
+ } else {
+ // treat path as PathId like '72075186232623600/1225'
TVector<TString> fields;
- Split(Path, "/", fields);
- if (fields.size() != 2) {
- Cerr << "Invaid path or pathId: " << Path << Endl;
- exit(1);
- }
- ui64 schemeshardId = FromString<ui64>(fields[0]);
- ui32 pathId = FromString<ui32>(fields[1]);
- Request->SetSchemeshardId(schemeshardId);
- Request->SetPathId(pathId);
- }
- auto options = Request->MutableOptions();
- options->SetBackupInfo(BackupInfo);
- options->SetReturnPartitionStats(PartitionStats);
- options->SetReturnBoundaries(Boundaries);
- options->SetReturnPartitioningInfo(!Boundaries);
- options->SetShowPrivateTable(true);
-
- Protobuf = Protobuf || PartitionStats || Boundaries;
+ Split(Path, "/", fields);
+ if (fields.size() != 2) {
+ Cerr << "Invaid path or pathId: " << Path << Endl;
+ exit(1);
+ }
+ ui64 schemeshardId = FromString<ui64>(fields[0]);
+ ui32 pathId = FromString<ui32>(fields[1]);
+ Request->SetSchemeshardId(schemeshardId);
+ Request->SetPathId(pathId);
+ }
+ auto options = Request->MutableOptions();
+ options->SetBackupInfo(BackupInfo);
+ options->SetReturnPartitionStats(PartitionStats);
+ options->SetReturnBoundaries(Boundaries);
+ options->SetReturnPartitioningInfo(!Boundaries);
+ options->SetShowPrivateTable(true);
+
+ Protobuf = Protobuf || PartitionStats || Boundaries;
}
void PadString(TString& str, size_t size) {
@@ -261,10 +261,10 @@ public:
break;
case NKikimrSchemeOp::EPathTypeColumnStore:
type = "<column store>";
- break;
+ break;
case NKikimrSchemeOp::EPathTypeColumnTable:
type = "<column table>";
- break;
+ break;
case NKikimrSchemeOp::EPathTypeSequence:
type = "<sequence>";
break;
@@ -341,13 +341,13 @@ public:
}
case NKikimrSchemeOp::EPathTypePersQueueGroup: {
const NKikimrSchemeOp::TPersQueueGroupDescription& pqGroup(path.GetPersQueueGroup());
- for (ui32 pi = 0; pi < pqGroup.PartitionsSize(); ++pi) {
- const auto& partition = pqGroup.GetPartitions(pi);
+ for (ui32 pi = 0; pi < pqGroup.PartitionsSize(); ++pi) {
+ const auto& partition = pqGroup.GetPartitions(pi);
TString partitionId = Sprintf(" %6" PRIu32 " ", partition.GetPartitionId());
- Cout << partitionId << "│" << partition.GetTabletId() << Endl;
- }
+ Cout << partitionId << "│" << partition.GetTabletId() << Endl;
+ }
break;
- }
+ }
default:
break;
}
@@ -476,10 +476,10 @@ public:
break;
case NKikimrSchemeOp::EPathTypeColumnStore:
type = "<column store>";
- break;
+ break;
case NKikimrSchemeOp::EPathTypeColumnTable:
type = "<column table>";
- break;
+ break;
case NKikimrSchemeOp::EPathTypeSequence:
type = "<sequence>";
break;
@@ -1330,124 +1330,124 @@ public:
};
-class TClientCommandS3Listing: public TClientCommand {
-public:
- TClientCommandS3Listing()
- : TClientCommand("s3-listing", {}, "S3 bucket listing")
- {}
-
- virtual void Config(TConfig& config) override {
- TClientCommand::Config(config);
- config.Opts->AddLongOption("prefix-columns", "Key prefix with all columns that preceed path-column")
- .RequiredArgument("VALUE").StoreResult(&PrefixColumns);
- config.Opts->AddLongOption("path-prefix", "Path prefix")
- .RequiredArgument("STR").StoreResult(&PathPrefix).DefaultValue("");
- config.Opts->AddLongOption("path-delimiter", "Path delimiter")
- .RequiredArgument("STR").StoreResult(&PathDelimiter).DefaultValue("");
- config.Opts->AddLongOption("start-after", "Start after the specified suffix starting with path column")
- .RequiredArgument("STR").StoreResult(&StartAfter).DefaultValue("");
- config.Opts->AddLongOption("max-keys", "Maximum number of key to return")
- .RequiredArgument("NUM").StoreResult(&MaxKeys).DefaultValue("1000");
- config.Opts->AddLongOption("columns", "Comma separated list of columns to read")
- .RequiredArgument("NAMES").StoreResult(&Columns);
+class TClientCommandS3Listing: public TClientCommand {
+public:
+ TClientCommandS3Listing()
+ : TClientCommand("s3-listing", {}, "S3 bucket listing")
+ {}
+
+ virtual void Config(TConfig& config) override {
+ TClientCommand::Config(config);
+ config.Opts->AddLongOption("prefix-columns", "Key prefix with all columns that preceed path-column")
+ .RequiredArgument("VALUE").StoreResult(&PrefixColumns);
+ config.Opts->AddLongOption("path-prefix", "Path prefix")
+ .RequiredArgument("STR").StoreResult(&PathPrefix).DefaultValue("");
+ config.Opts->AddLongOption("path-delimiter", "Path delimiter")
+ .RequiredArgument("STR").StoreResult(&PathDelimiter).DefaultValue("");
+ config.Opts->AddLongOption("start-after", "Start after the specified suffix starting with path column")
+ .RequiredArgument("STR").StoreResult(&StartAfter).DefaultValue("");
+ config.Opts->AddLongOption("max-keys", "Maximum number of key to return")
+ .RequiredArgument("NUM").StoreResult(&MaxKeys).DefaultValue("1000");
+ config.Opts->AddLongOption("columns", "Comma separated list of columns to read")
+ .RequiredArgument("NAMES").StoreResult(&Columns);
config.SetFreeArgsNum(1);
SetFreeArgTitle(0, "<PATH>", "path to table");
- }
-
- virtual void Parse(TConfig& config) override {
- TClientCommand::Parse(config);
- Table = config.ParseResult->GetFreeArgs()[0];
- }
-
- virtual int Run(TConfig& config) override {
- auto handler = [this](NClient::TKikimr& kikimr) {
- TAutoPtr<NMsgBusProxy::TBusS3ListingRequest> req(new NMsgBusProxy::TBusS3ListingRequest());
-
- req->Record.SetTableName(Table);
- req->Record.SetPathColumnPrefix(PathPrefix);
- req->Record.SetPathColumnDelimiter(PathDelimiter);
- req->Record.SetMaxKeys(MaxKeys);
-
- if (PrefixColumns) {
- auto &value = *req->Record.MutableKeyPrefix()->MutableValue();
- auto &type = *req->Record.MutableKeyPrefix()->MutableType();
- type.SetKind(NKikimrMiniKQL::Tuple);
- for (auto &s : StringSplitter(PrefixColumns).Split(',').ToList<TString>()) {
- *value.AddTuple()->MutableOptional()->MutableText() = s;
- auto &elem = *type.MutableTuple()->AddElement();
- elem.SetKind(NKikimrMiniKQL::Optional);
- auto &item = *elem.MutableOptional()->MutableItem();
- item.SetKind(NKikimrMiniKQL::Data);
- item.MutableData()->SetScheme(NUdf::TDataType<NUdf::TUtf8>::Id);
- }
- }
-
- if (StartAfter) {
- auto &value = *req->Record.MutableStartAfterKeySuffix()->MutableValue();
- auto &type = *req->Record.MutableStartAfterKeySuffix()->MutableType();
- type.SetKind(NKikimrMiniKQL::Tuple);
- for (auto &s : StringSplitter(StartAfter).Split(',').ToList<TString>()) {
- *value.AddTuple()->MutableOptional()->MutableText() = s;
- auto &elem = *type.MutableTuple()->AddElement();
- elem.SetKind(NKikimrMiniKQL::Optional);
- auto &item = *elem.MutableOptional()->MutableItem();
- item.SetKind(NKikimrMiniKQL::Data);
- item.MutableData()->SetScheme(NUdf::TDataType<NUdf::TUtf8>::Id);
- }
- }
-
- if (Columns) {
- for (const auto& c : StringSplitter(Columns).Split(',').ToList<TString>()) {
- req->Record.AddColumnsToReturn(c);
- }
- }
-
- NThreading::TFuture<NClient::TResult> future = kikimr.ExecuteRequest(req.Release());
-
- return HandleResponse<NClient::TResult>(future, [](const NClient::TResult& result) -> int {
- PrintResponse(result);
- return 0;
- });
- };
- return InvokeThroughKikimr(config, std::move(handler));
- }
-
- static int PrintResponse(const NClient::TResult &result) {
- auto error = result.GetError();
- if (!result.GetError().Success()) {
- Cerr << error.GetCode() << Endl
- << error.GetMessage() << Endl;
- return 1;
- }
-
- const NKikimrClient::TS3ListingResponse& response = result.GetResponse<NMsgBusProxy::TBusS3ListingResponse>().Record;
-
- ui32 status = response.GetStatus();
- if (status != NMsgBusProxy::MSTATUS_OK) {
- TString name;
- TString descr;
- NMsgBusProxy::ExplainResponseStatus(status, name, descr);
- Cerr << name << Endl
- << descr << Endl
- << response.GetDescription() << Endl;
- return 1;
- }
-
- const NKikimrMiniKQL::TResult& res = response.GetResult();
- auto resVal = NClient::TValue::Create(res.GetValue(), res.GetType());
- Cout << resVal.GetValueText<NClient::TFormatJSON>() << Endl;
- return 0;
- }
-
- TString Table;
- TString PrefixColumns;
- TString PathPrefix;
- TString PathDelimiter;
- TString StartAfter;
- ui32 MaxKeys;
- TString Columns;
-};
-
+ }
+
+ virtual void Parse(TConfig& config) override {
+ TClientCommand::Parse(config);
+ Table = config.ParseResult->GetFreeArgs()[0];
+ }
+
+ virtual int Run(TConfig& config) override {
+ auto handler = [this](NClient::TKikimr& kikimr) {
+ TAutoPtr<NMsgBusProxy::TBusS3ListingRequest> req(new NMsgBusProxy::TBusS3ListingRequest());
+
+ req->Record.SetTableName(Table);
+ req->Record.SetPathColumnPrefix(PathPrefix);
+ req->Record.SetPathColumnDelimiter(PathDelimiter);
+ req->Record.SetMaxKeys(MaxKeys);
+
+ if (PrefixColumns) {
+ auto &value = *req->Record.MutableKeyPrefix()->MutableValue();
+ auto &type = *req->Record.MutableKeyPrefix()->MutableType();
+ type.SetKind(NKikimrMiniKQL::Tuple);
+ for (auto &s : StringSplitter(PrefixColumns).Split(',').ToList<TString>()) {
+ *value.AddTuple()->MutableOptional()->MutableText() = s;
+ auto &elem = *type.MutableTuple()->AddElement();
+ elem.SetKind(NKikimrMiniKQL::Optional);
+ auto &item = *elem.MutableOptional()->MutableItem();
+ item.SetKind(NKikimrMiniKQL::Data);
+ item.MutableData()->SetScheme(NUdf::TDataType<NUdf::TUtf8>::Id);
+ }
+ }
+
+ if (StartAfter) {
+ auto &value = *req->Record.MutableStartAfterKeySuffix()->MutableValue();
+ auto &type = *req->Record.MutableStartAfterKeySuffix()->MutableType();
+ type.SetKind(NKikimrMiniKQL::Tuple);
+ for (auto &s : StringSplitter(StartAfter).Split(',').ToList<TString>()) {
+ *value.AddTuple()->MutableOptional()->MutableText() = s;
+ auto &elem = *type.MutableTuple()->AddElement();
+ elem.SetKind(NKikimrMiniKQL::Optional);
+ auto &item = *elem.MutableOptional()->MutableItem();
+ item.SetKind(NKikimrMiniKQL::Data);
+ item.MutableData()->SetScheme(NUdf::TDataType<NUdf::TUtf8>::Id);
+ }
+ }
+
+ if (Columns) {
+ for (const auto& c : StringSplitter(Columns).Split(',').ToList<TString>()) {
+ req->Record.AddColumnsToReturn(c);
+ }
+ }
+
+ NThreading::TFuture<NClient::TResult> future = kikimr.ExecuteRequest(req.Release());
+
+ return HandleResponse<NClient::TResult>(future, [](const NClient::TResult& result) -> int {
+ PrintResponse(result);
+ return 0;
+ });
+ };
+ return InvokeThroughKikimr(config, std::move(handler));
+ }
+
+ static int PrintResponse(const NClient::TResult &result) {
+ auto error = result.GetError();
+ if (!result.GetError().Success()) {
+ Cerr << error.GetCode() << Endl
+ << error.GetMessage() << Endl;
+ return 1;
+ }
+
+ const NKikimrClient::TS3ListingResponse& response = result.GetResponse<NMsgBusProxy::TBusS3ListingResponse>().Record;
+
+ ui32 status = response.GetStatus();
+ if (status != NMsgBusProxy::MSTATUS_OK) {
+ TString name;
+ TString descr;
+ NMsgBusProxy::ExplainResponseStatus(status, name, descr);
+ Cerr << name << Endl
+ << descr << Endl
+ << response.GetDescription() << Endl;
+ return 1;
+ }
+
+ const NKikimrMiniKQL::TResult& res = response.GetResult();
+ auto resVal = NClient::TValue::Create(res.GetValue(), res.GetType());
+ Cout << resVal.GetValueText<NClient::TFormatJSON>() << Endl;
+ return 0;
+ }
+
+ TString Table;
+ TString PrefixColumns;
+ TString PathPrefix;
+ TString PathDelimiter;
+ TString StartAfter;
+ ui32 MaxKeys;
+ TString Columns;
+};
+
TClientCommandDb::TClientCommandDb()
: TClientCommandTree("db", {}, "KiKiMR DB operations")
{
diff --git a/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp b/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp
index d8740582ae5..361e757ca1d 100644
--- a/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp
+++ b/ydb/core/driver_lib/cli_utils/cli_cmds_tablet.cpp
@@ -161,46 +161,46 @@ public:
}
};
-class TClientCommandTabletSchemeTx : public TClientCommand {
-public:
- TClientCommandTabletSchemeTx()
- : TClientCommand("scheme-tx", { "scheme" })
- {
- }
-
- TAutoPtr<NMsgBusProxy::TBusTabletLocalSchemeTx> Request;
+class TClientCommandTabletSchemeTx : public TClientCommand {
+public:
+ TClientCommandTabletSchemeTx()
+ : TClientCommand("scheme-tx", { "scheme" })
+ {
+ }
+
+ TAutoPtr<NMsgBusProxy::TBusTabletLocalSchemeTx> Request;
TString SchemeChanges;
-
- virtual void Config(TConfig& config) override {
- TClientCommand::Config(config);
+
+ virtual void Config(TConfig& config) override {
+ TClientCommand::Config(config);
config.Opts->AddLongOption("follower", "connect to follower");
- config.Opts->AddLongOption("dry-run", "test changes without applying");
+ config.Opts->AddLongOption("dry-run", "test changes without applying");
config.SetFreeArgsNum(1, 1);
SetFreeArgTitle(0, "<SCHEME CHANGES>", "Scheme changes to apply");
- }
-
- virtual void Parse(TConfig& config) override {
- TClientCommand::Parse(config);
-
- SchemeChanges = config.ParseResult->GetFreeArgs().at(0);
-
- Request = new NMsgBusProxy::TBusTabletLocalSchemeTx;
- Request->Record.SetTabletID(config.TabletId);
- auto* schemeChanges = Request->Record.MutableSchemeChanges();
- if (!google::protobuf::TextFormat::ParseFromString(SchemeChanges, schemeChanges)) {
- ythrow TWithBackTrace<yexception>() << "Invalid scheme changes protobuf passed";
- }
-
+ }
+
+ virtual void Parse(TConfig& config) override {
+ TClientCommand::Parse(config);
+
+ SchemeChanges = config.ParseResult->GetFreeArgs().at(0);
+
+ Request = new NMsgBusProxy::TBusTabletLocalSchemeTx;
+ Request->Record.SetTabletID(config.TabletId);
+ auto* schemeChanges = Request->Record.MutableSchemeChanges();
+ if (!google::protobuf::TextFormat::ParseFromString(SchemeChanges, schemeChanges)) {
+ ythrow TWithBackTrace<yexception>() << "Invalid scheme changes protobuf passed";
+ }
+
if (config.ParseResult->Has("follower"))
Request->Record.SetConnectToFollower(true);
- Request->Record.SetDryRun(config.ParseResult->Has("dry-run"));
- }
-
- virtual int Run(TConfig& config) override {
- return MessageBusCall(config, Request);
- }
-};
-
+ Request->Record.SetDryRun(config.ParseResult->Has("dry-run"));
+ }
+
+ virtual int Run(TConfig& config) override {
+ return MessageBusCall(config, Request);
+ }
+};
+
class TClientCommandDrainNode : public TClientCommand {
public:
TClientCommandDrainNode()
diff --git a/ydb/core/driver_lib/cli_utils/cli_minikql_compile_and_exec.cpp b/ydb/core/driver_lib/cli_utils/cli_minikql_compile_and_exec.cpp
index 23e9ce0a2ab..b4429d5721b 100644
--- a/ydb/core/driver_lib/cli_utils/cli_minikql_compile_and_exec.cpp
+++ b/ydb/core/driver_lib/cli_utils/cli_minikql_compile_and_exec.cpp
@@ -39,8 +39,8 @@ int CompileAndExecMiniKQL(TCommandConfig &cmdConf, int argc, char **argv) {
config.Parse(argc, argv);
auto functionRegistry = NMiniKQL::CreateFunctionRegistry(NMiniKQL::CreateBuiltinRegistry());
- TAlignedPagePoolCounters countersStub;
- NMiniKQL::TScopedAlloc alloc(countersStub);
+ TAlignedPagePoolCounters countersStub;
+ NMiniKQL::TScopedAlloc alloc(countersStub);
NMiniKQL::TTypeEnvironment TypeEnv(alloc);
TAutoPtr<NMsgBusProxy::TBusRequest> request(new NMsgBusProxy::TBusRequest());
@@ -63,7 +63,7 @@ int CompileAndExecMiniKQL(TCommandConfig &cmdConf, int argc, char **argv) {
}
mkqlTx->SetFlatMKQL(true);
-
+
TAutoPtr<NBus::TBusMessage> reply;
NBus::EMessageStatus msgStatus = config.SyncCall(request, reply);
diff --git a/ydb/core/driver_lib/run/config.h b/ydb/core/driver_lib/run/config.h
index 58ef40dba62..faf1797413e 100644
--- a/ydb/core/driver_lib/run/config.h
+++ b/ydb/core/driver_lib/run/config.h
@@ -19,7 +19,7 @@ union TBasicKikimrServicesMask {
bool EnableStateStorageService:1;
bool EnableLocalService:1;
bool EnableSharedCache:1;
- bool EnableBlobCache:1;
+ bool EnableBlobCache:1;
bool EnableLogger:1;
bool EnableSchedulerActor:1;
bool EnableProfiler:1;
diff --git a/ydb/core/driver_lib/run/kikimr_services_initializers.cpp b/ydb/core/driver_lib/run/kikimr_services_initializers.cpp
index fa4e2037bfb..819c1478d19 100644
--- a/ydb/core/driver_lib/run/kikimr_services_initializers.cpp
+++ b/ydb/core/driver_lib/run/kikimr_services_initializers.cpp
@@ -347,7 +347,7 @@ static TSchedulerConfig CreateSchedulerConfig(const NKikimrConfig::TActorSystemC
Y_VERIFY_DEBUG((resolution & (resolution - 1)) == 0); // resolution must be power of 2
const ui64 spinThreshold = config.HasSpinThreshold() ? config.GetSpinThreshold() : 0;
const ui64 progressThreshold = config.HasProgressThreshold() ? config.GetProgressThreshold() : 10000;
- const bool useSchedulerActor = config.HasUseSchedulerActor() ? config.GetUseSchedulerActor() : false;
+ const bool useSchedulerActor = config.HasUseSchedulerActor() ? config.GetUseSchedulerActor() : false;
return TSchedulerConfig(resolution, spinThreshold, progressThreshold, useSchedulerActor);
}
@@ -467,7 +467,7 @@ static TInterconnectSettings GetInterconnectSettings(const NKikimrConfig::TInter
if (config.HasBatchPeriodDuration()) {
result.BatchPeriod = DurationFromProto(config.GetBatchPeriodDuration());
} else {
- result.BatchPeriod = TDuration();
+ result.BatchPeriod = TDuration();
}
result.BindOnAllAddresses = config.GetBindOnAllAddresses();
@@ -772,7 +772,7 @@ void TImmediateControlBoardInitializer::InitializeServices(NActors::TActorSystem
const NKikimr::TAppData* appData) {
setup->LocalServices.push_back(std::pair<TActorId, TActorSetupCmd>(
MakeIcbId(NodeId),
- TActorSetupCmd(CreateImmediateControlActor(appData->Icb, appData->Counters), TMailboxType::ReadAsFilled, appData->UserPoolId)
+ TActorSetupCmd(CreateImmediateControlActor(appData->Icb, appData->Counters), TMailboxType::ReadAsFilled, appData->UserPoolId)
));
setup->LocalServices.push_back(std::pair<TActorId, TActorSetupCmd>(
TActorId(),
@@ -922,8 +922,8 @@ void TLocalServiceInitializer::InitializeServices(
new TTabletSetupInfo(&NSysView::CreateSysViewProcessor, TMailboxType::ReadAsFilled, appData->UserPoolId, TMailboxType::ReadAsFilled, appData->SystemPoolId));
localConfig->TabletClassInfo[appData->DefaultTabletTypes.TestShard] = TLocalConfig::TTabletClassInfo(
new TTabletSetupInfo(&NTestShard::CreateTestShard, TMailboxType::ReadAsFilled, appData->UserPoolId, TMailboxType::ReadAsFilled, appData->SystemPoolId));
- localConfig->TabletClassInfo[appData->DefaultTabletTypes.ColumnShard] = TLocalConfig::TTabletClassInfo(
- new TTabletSetupInfo(&CreateColumnShard, TMailboxType::ReadAsFilled, appData->UserPoolId, TMailboxType::ReadAsFilled, appData->SystemPoolId));
+ localConfig->TabletClassInfo[appData->DefaultTabletTypes.ColumnShard] = TLocalConfig::TTabletClassInfo(
+ new TTabletSetupInfo(&CreateColumnShard, TMailboxType::ReadAsFilled, appData->UserPoolId, TMailboxType::ReadAsFilled, appData->SystemPoolId));
localConfig->TabletClassInfo[appData->DefaultTabletTypes.SequenceShard] = TLocalConfig::TTabletClassInfo(
new TTabletSetupInfo(&NSequenceShard::CreateSequenceShard, TMailboxType::ReadAsFilled, appData->UserPoolId, TMailboxType::ReadAsFilled, appData->SystemPoolId));
localConfig->TabletClassInfo[appData->DefaultTabletTypes.ReplicationController] = TLocalConfig::TTabletClassInfo(
@@ -995,24 +995,24 @@ void TSharedCacheInitializer::InitializeServices(
TActorSetupCmd(configurator, TMailboxType::HTSwap, appData->UserPoolId));
}
-// TBlobCacheInitializer
-
-TBlobCacheInitializer::TBlobCacheInitializer(const TKikimrRunConfig& runConfig)
- : IKikimrServicesInitializer(runConfig)
-{}
-
-void TBlobCacheInitializer::InitializeServices(
- NActors::TActorSystemSetup* setup,
- const NKikimr::TAppData* appData) {
-
- TIntrusivePtr<NMonitoring::TDynamicCounters> tabletGroup = GetServiceCounters(appData->Counters, "tablets");
- TIntrusivePtr<NMonitoring::TDynamicCounters> blobCacheGroup = tabletGroup->GetSubgroup("type", "BLOB_CACHE");
-
- static const constexpr ui64 DEFAULT_CACHE_SIZE_BYTES = 1000ull << 20;
+// TBlobCacheInitializer
+
+TBlobCacheInitializer::TBlobCacheInitializer(const TKikimrRunConfig& runConfig)
+ : IKikimrServicesInitializer(runConfig)
+{}
+
+void TBlobCacheInitializer::InitializeServices(
+ NActors::TActorSystemSetup* setup,
+ const NKikimr::TAppData* appData) {
+
+ TIntrusivePtr<NMonitoring::TDynamicCounters> tabletGroup = GetServiceCounters(appData->Counters, "tablets");
+ TIntrusivePtr<NMonitoring::TDynamicCounters> blobCacheGroup = tabletGroup->GetSubgroup("type", "BLOB_CACHE");
+
+ static const constexpr ui64 DEFAULT_CACHE_SIZE_BYTES = 1000ull << 20;
setup->LocalServices.push_back(std::pair<TActorId, TActorSetupCmd>(NBlobCache::MakeBlobCacheServiceId(),
TActorSetupCmd(NBlobCache::CreateBlobCache(DEFAULT_CACHE_SIZE_BYTES, blobCacheGroup), TMailboxType::ReadAsFilled, appData->UserPoolId)));
-}
-
+}
+
// TLoggerInitializer
TLoggerInitializer::TLoggerInitializer(const TKikimrRunConfig& runConfig,
@@ -1059,7 +1059,7 @@ void TSchedulerActorInitializer::InitializeServices(
auto& systemConfig = Config.GetActorSystemConfig();
NActors::IActor *schedulerActor = CreateSchedulerActor(CreateSchedulerConfig(systemConfig.GetScheduler()));
if (schedulerActor) {
- NActors::TActorSetupCmd schedulerActorCmd(schedulerActor, NActors::TMailboxType::ReadAsFilled, appData->SystemPoolId);
+ NActors::TActorSetupCmd schedulerActorCmd(schedulerActor, NActors::TMailboxType::ReadAsFilled, appData->SystemPoolId);
setup->LocalServices.emplace_back(MakeSchedulerActorId(), schedulerActorCmd);
}
}
@@ -1401,7 +1401,7 @@ void TMiniKQLCompileServiceInitializer::InitializeServices(NActors::TActorSystem
setup->LocalServices.push_back(std::pair<TActorId, TActorSetupCmd>(MakeMiniKQLCompileServiceID(),
TActorSetupCmd(compileService,
TMailboxType::ReadAsFilled,
- appData->UserPoolId)));
+ appData->UserPoolId)));
}
static bool IsServiceInitialized(NActors::TActorSystemSetup* setup, TActorId service)
@@ -1464,7 +1464,7 @@ void TSecurityServicesInitializer::InitializeServices(NActors::TActorSystemSetup
}
if (ticketParser) {
setup->LocalServices.push_back(std::pair<TActorId, TActorSetupCmd>(MakeTicketParserID(),
- TActorSetupCmd(ticketParser, TMailboxType::HTSwap, appData->UserPoolId)));
+ TActorSetupCmd(ticketParser, TMailboxType::HTSwap, appData->UserPoolId)));
}
}
}
@@ -1511,13 +1511,13 @@ void TGRpcServicesInitializer::InitializeServices(NActors::TActorSystemSetup* se
setup->LocalServices.push_back(std::pair<TActorId,
TActorSetupCmd>(NGRpcService::CreateGRpcRequestProxyId(),
TActorSetupCmd(grpcReqProxy, TMailboxType::ReadAsFilled,
- appData->UserPoolId)));
+ appData->UserPoolId)));
}
if (!IsServiceInitialized(setup, NKesus::MakeKesusProxyServiceId())) {
if (IActor* proxy = NKesus::CreateKesusProxyService()) {
setup->LocalServices.emplace_back(NKesus::MakeKesusProxyServiceId(),
- TActorSetupCmd(proxy, TMailboxType::ReadAsFilled, appData->UserPoolId));
+ TActorSetupCmd(proxy, TMailboxType::ReadAsFilled, appData->UserPoolId));
}
}
@@ -1525,12 +1525,12 @@ void TGRpcServicesInitializer::InitializeServices(NActors::TActorSystemSetup* se
// logical copy from TKikimrRunner::InitializeGrpc
const auto &config = Config.GetGRpcConfig();
- if (appData->Mon) {
- setup->LocalServices.emplace_back(NGRpcService::GrpcMonServiceId(),
- TActorSetupCmd(NGRpcService::CreateGrpcMonService(), TMailboxType::ReadAsFilled, appData->UserPoolId)
- );
- }
-
+ if (appData->Mon) {
+ setup->LocalServices.emplace_back(NGRpcService::GrpcMonServiceId(),
+ TActorSetupCmd(NGRpcService::CreateGrpcMonService(), TMailboxType::ReadAsFilled, appData->UserPoolId)
+ );
+ }
+
TVector<TString> rootDomains;
for (auto &domain : appData->DomainsInfo->Domains)
rootDomains.emplace_back("/" + domain.second->Name);
diff --git a/ydb/core/driver_lib/run/kikimr_services_initializers.h b/ydb/core/driver_lib/run/kikimr_services_initializers.h
index c3ce79914c3..407ce1bb7ba 100644
--- a/ydb/core/driver_lib/run/kikimr_services_initializers.h
+++ b/ydb/core/driver_lib/run/kikimr_services_initializers.h
@@ -43,9 +43,9 @@ public:
// base, nameservice, interconnect
class TBasicServicesInitializer : public IKikimrServicesInitializer {
static IExecutorPool*
- CreateExecutorPool(const NKikimrConfig::TActorSystemConfig::TExecutor& poolConfig,
+ CreateExecutorPool(const NKikimrConfig::TActorSystemConfig::TExecutor& poolConfig,
const NKikimrConfig::TActorSystemConfig& systemConfig,
- ui32 poolId, ui32 maxActivityType);
+ ui32 poolId, ui32 maxActivityType);
static ISchedulerThread* CreateScheduler(const NKikimrConfig::TActorSystemConfig::TScheduler &config);
@@ -91,13 +91,13 @@ public:
void InitializeServices(NActors::TActorSystemSetup *setup, const NKikimr::TAppData *appData) override;
};
-class TBlobCacheInitializer : public IKikimrServicesInitializer {
-public:
- TBlobCacheInitializer(const TKikimrRunConfig& runConfig);
-
- void InitializeServices(NActors::TActorSystemSetup *setup, const NKikimr::TAppData *appData) override;
-};
-
+class TBlobCacheInitializer : public IKikimrServicesInitializer {
+public:
+ TBlobCacheInitializer(const TKikimrRunConfig& runConfig);
+
+ void InitializeServices(NActors::TActorSystemSetup *setup, const NKikimr::TAppData *appData) override;
+};
+
class TLoggerInitializer : public IKikimrServicesInitializer {
TIntrusivePtr<NActors::NLog::TSettings> LogSettings;
std::shared_ptr<TLogBackend> LogBackend;
diff --git a/ydb/core/driver_lib/run/main.cpp b/ydb/core/driver_lib/run/main.cpp
index 730f5c1c21e..f0a9315685b 100644
--- a/ydb/core/driver_lib/run/main.cpp
+++ b/ydb/core/driver_lib/run/main.cpp
@@ -14,9 +14,9 @@
#include <ydb/core/driver_lib/run/config_parser.h>
#include <ydb/core/driver_lib/run/run.h>
-// allocator info
+// allocator info
#include <library/cpp/malloc/api/malloc.h>
-
+
#ifndef _win_
#include <sys/mman.h>
#endif
@@ -24,33 +24,33 @@
namespace NKikimr {
int MainRun(const TKikimrRunConfig& runConfig, std::shared_ptr<TModuleFactories> factories) {
-#ifdef _win32_
- WSADATA dummy;
- WSAStartup(MAKEWORD(2, 2), &dummy);
-#endif
-
- TKikimrRunner::SetSignalHandlers();
+#ifdef _win32_
+ WSADATA dummy;
+ WSAStartup(MAKEWORD(2, 2), &dummy);
+#endif
+
+ TKikimrRunner::SetSignalHandlers();
Cout << "Starting Kikimr r" << GetArcadiaLastChange()
- << " built by " << GetProgramBuildUser() << Endl;
-
+ << " built by " << GetProgramBuildUser() << Endl;
+
TIntrusivePtr<TKikimrRunner> runner = TKikimrRunner::CreateKikimrRunner(runConfig, std::move(factories));
- if (runner) {
- runner->KikimrStart();
- runner->BusyLoop();
- // exit busy loop by a signal
- Cout << "Shutting Kikimr down" << Endl;
- runner->KikimrStop(false);
- }
-
- return 0;
-}
-
-
- void PrintAllocatorInfoAndExit() {
- Cout << "linked with malloc: " << NMalloc::MallocInfo().Name << Endl;
- exit(0);
- }
-
+ if (runner) {
+ runner->KikimrStart();
+ runner->BusyLoop();
+ // exit busy loop by a signal
+ Cout << "Shutting Kikimr down" << Endl;
+ runner->KikimrStop(false);
+ }
+
+ return 0;
+}
+
+
+ void PrintAllocatorInfoAndExit() {
+ Cout << "linked with malloc: " << NMalloc::MallocInfo().Name << Endl;
+ exit(0);
+ }
+
int Main(int argc, char **argv, std::shared_ptr<TModuleFactories> factories) {
#ifndef _win_
mlockall(MCL_CURRENT);
@@ -59,7 +59,7 @@ int MainRun(const TKikimrRunConfig& runConfig, std::shared_ptr<TModuleFactories>
using TDriverModeParser = TCliCommands<EDriverMode>;
NKikimrConfig::TAppConfig appConfig;
- TCommandConfig cmdConf;
+ TCommandConfig cmdConf;
TKikimrRunConfig runConfig(appConfig);
TRunCommandConfigParser configParser(runConfig);
@@ -78,7 +78,7 @@ int MainRun(const TKikimrRunConfig& runConfig, std::shared_ptr<TModuleFactories>
opts.AddLongOption('t', "time", "Show request execution time").NoArgument();
opts.AddLongOption('o', "progress", "Show progress of long requests").NoArgument();
opts.AddLongOption(0, "allocator-info", "Print the name of allocator linked to the binary and exit")
- .NoArgument().Handler(&PrintAllocatorInfoAndExit);
+ .NoArgument().Handler(&PrintAllocatorInfoAndExit);
opts.SetFreeArgsMin(1);
opts.SetFreeArgTitle(0, "<command>", TDriverModeParser::CommandsCsv());
opts.SetCmdLineDescr(NDriverClient::NewClientCommandsDescription(factories));
@@ -150,27 +150,27 @@ int MainRun(const TKikimrRunConfig& runConfig, std::shared_ptr<TModuleFactories>
}
} // NKikimr
-namespace {
-std::terminate_handler defaultTerminateHandler;
-}
-
-void KikimrTerminateHandler() {
- Cerr << "======= terminate() call stack ========\n";
- FormatBackTrace(&Cerr);
- Cerr << "=======================================\n";
-
- auto oldHandler = defaultTerminateHandler;
- if (oldHandler)
- oldHandler();
- else
- abort();
-}
-
-void SetupTerminateHandler() {
- defaultTerminateHandler = std::get_terminate();
- std::set_terminate(KikimrTerminateHandler);
-}
-
+namespace {
+std::terminate_handler defaultTerminateHandler;
+}
+
+void KikimrTerminateHandler() {
+ Cerr << "======= terminate() call stack ========\n";
+ FormatBackTrace(&Cerr);
+ Cerr << "=======================================\n";
+
+ auto oldHandler = defaultTerminateHandler;
+ if (oldHandler)
+ oldHandler();
+ else
+ abort();
+}
+
+void SetupTerminateHandler() {
+ defaultTerminateHandler = std::get_terminate();
+ std::set_terminate(KikimrTerminateHandler);
+}
+
int ParameterizedMain(int argc, char **argv, std::shared_ptr<NKikimr::TModuleFactories> factories) {
try {
return NKikimr::Main(argc, argv, std::move(factories));
diff --git a/ydb/core/driver_lib/run/run.cpp b/ydb/core/driver_lib/run/run.cpp
index cd45d11bd57..a4f74aa4e0f 100644
--- a/ydb/core/driver_lib/run/run.cpp
+++ b/ydb/core/driver_lib/run/run.cpp
@@ -183,10 +183,10 @@ public:
for (const NKikimrConfig::TDomainsConfig::THiveConfig &hiveConfig : Config.GetDomainsConfig().GetHiveConfig()) {
appData->DomainsInfo->AddHive(hiveConfig.GetHiveUid(), hiveConfig.GetHive());
}
-
- for (const NKikimrConfig::TDomainsConfig::TNamedCompactionPolicy &policy : Config.GetDomainsConfig().GetNamedCompactionPolicy()) {
- appData->DomainsInfo->AddCompactionPolicy(policy.GetName(), new NLocalDb::TCompactionPolicy(policy.GetPolicy()));
- }
+
+ for (const NKikimrConfig::TDomainsConfig::TNamedCompactionPolicy &policy : Config.GetDomainsConfig().GetNamedCompactionPolicy()) {
+ appData->DomainsInfo->AddCompactionPolicy(policy.GetName(), new NLocalDb::TCompactionPolicy(policy.GetPolicy()));
+ }
const auto& securityConfig(Config.GetDomainsConfig().GetSecurityConfig());
@@ -515,11 +515,11 @@ void TKikimrRunner::InitializeGRpc(const TKikimrRunConfig& runConfig) {
names["pqv1"] = &hasPQv1;
bool hasPQCD = false;
names["pqcd"] = &hasPQCD;
- bool hasS3Internal = false;
+ bool hasS3Internal = false;
names["s3_internal"] = &hasS3Internal;
- bool hasExperimental = false;
+ bool hasExperimental = false;
names["experimental"] = &hasExperimental;
- bool hasClickhouseInternal = services.empty();
+ bool hasClickhouseInternal = services.empty();
names["clickhouse_internal"] = &hasClickhouseInternal;
bool hasRateLimiter = false;
names["rate_limiter"] = &hasRateLimiter;
@@ -535,7 +535,7 @@ void TKikimrRunner::InitializeGRpc(const TKikimrRunConfig& runConfig) {
names["datastreams"] = &hasDataStreams;
bool hasYandexQuery = false;
names["yq"] = &hasYandexQuery;
- bool hasLogStore = false;
+ bool hasLogStore = false;
names["logstore"] = &hasLogStore;
bool hasAuth = services.empty();
names["auth"] = &hasAuth;
@@ -629,19 +629,19 @@ void TKikimrRunner::InitializeGRpc(const TKikimrRunConfig& runConfig) {
server.AddService(new NGRpcService::TGRpcYdbTableService(ActorSystem.Get(), Counters, grpcRequestProxyId));
}
- if (hasExperimental) {
- server.AddService(new NGRpcService::TGRpcYdbExperimentalService(ActorSystem.Get(), Counters,
- grpcRequestProxyId));
- }
-
- if (hasClickhouseInternal) {
- server.AddService(new NGRpcService::TGRpcYdbClickhouseInternalService(ActorSystem.Get(), Counters,
- AppData->InFlightLimiterRegistry, grpcRequestProxyId));
- }
-
- if (hasS3Internal) {
- server.AddService(new NGRpcService::TGRpcYdbS3InternalService(ActorSystem.Get(), Counters,
- grpcRequestProxyId));
+ if (hasExperimental) {
+ server.AddService(new NGRpcService::TGRpcYdbExperimentalService(ActorSystem.Get(), Counters,
+ grpcRequestProxyId));
+ }
+
+ if (hasClickhouseInternal) {
+ server.AddService(new NGRpcService::TGRpcYdbClickhouseInternalService(ActorSystem.Get(), Counters,
+ AppData->InFlightLimiterRegistry, grpcRequestProxyId));
+ }
+
+ if (hasS3Internal) {
+ server.AddService(new NGRpcService::TGRpcYdbS3InternalService(ActorSystem.Get(), Counters,
+ grpcRequestProxyId));
}
if (hasScripting) {
@@ -717,10 +717,10 @@ void TKikimrRunner::InitializeGRpc(const TKikimrRunConfig& runConfig) {
server.AddService(new NGRpcService::TGRpcYandexQueryService(ActorSystem.Get(), Counters, grpcRequestProxyId));
server.AddService(new NGRpcService::TGRpcYqPrivateTaskService(ActorSystem.Get(), Counters, grpcRequestProxyId));
}
-
- if (hasLogStore) {
- server.AddService(new NGRpcService::TGRpcYdbLogStoreService(ActorSystem.Get(), Counters, grpcRequestProxyId));
- }
+
+ if (hasLogStore) {
+ server.AddService(new NGRpcService::TGRpcYdbLogStoreService(ActorSystem.Get(), Counters, grpcRequestProxyId));
+ }
if (ModuleFactories) {
for (const auto& service : ModuleFactories->GrpcServiceFactory.Create(enabled, disabled, ActorSystem.Get(), Counters, grpcRequestProxyId)) {
@@ -738,7 +738,7 @@ void TKikimrRunner::InitializeGRpc(const TKikimrRunConfig& runConfig) {
opts.SetPort(grpcConfig.GetPort());
opts.SetWorkerThreads(grpcConfig.GetWorkerThreads());
opts.SetGRpcMemoryQuotaBytes(grpcConfig.GetGRpcMemoryQuotaBytes());
- opts.SetMaxMessageSize(grpcConfig.HasMaxMessageSize() ? grpcConfig.GetMaxMessageSize() : DEFAULT_GRPC_MESSAGE_SIZE_LIMIT);
+ opts.SetMaxMessageSize(grpcConfig.HasMaxMessageSize() ? grpcConfig.GetMaxMessageSize() : DEFAULT_GRPC_MESSAGE_SIZE_LIMIT);
opts.SetMaxGlobalRequestInFlight(grpcConfig.GetMaxInFlight());
opts.SetLogger(NGrpc::CreateActorSystemLogger(*ActorSystem.Get(), NKikimrServices::GRPC_SERVER));
@@ -848,7 +848,7 @@ void TKikimrRunner::InitializeAppData(const TKikimrRunConfig& runConfig)
servicePools,
TypeRegistry.Get(),
FunctionRegistry.Get(),
- FormatFactory.Get(),
+ FormatFactory.Get(),
&KikimrShouldContinue));
AppData->DataShardExportFactory = ModuleFactories ? ModuleFactories->DataShardExportFactory.get() : nullptr;
AppData->SqsEventsWriterFactory = ModuleFactories ? ModuleFactories->SqsEventsWriterFactory.get() : nullptr;
@@ -981,10 +981,10 @@ void TKikimrRunner::InitializeLogSettings(const TKikimrRunConfig& runConfig)
Y_FAIL("Unknown log format: \"%s\"", logConfig.GetFormat().data());
}
- if (logConfig.HasAllowDropEntries()) {
- LogSettings->SetAllowDrop(logConfig.GetAllowDropEntries());
- }
-
+ if (logConfig.HasAllowDropEntries()) {
+ LogSettings->SetAllowDrop(logConfig.GetAllowDropEntries());
+ }
+
if (logConfig.HasUseLocalTimestamps()) {
LogSettings->SetUseLocalTimestamps(logConfig.GetUseLocalTimestamps());
}
@@ -1097,20 +1097,20 @@ void TKikimrRunner::InitializeActorSystem(
false,
ActorSystem.Get(),
NInterconnect::MakeInterconnectMonActorId(runConfig.NodeId));
-
- if (servicesMask.EnableGRpcService) {
- Monitoring->RegisterActorPage(nullptr, "grpc", "GRPC", false, ActorSystem.Get(), NGRpcService::GrpcMonServiceId());
- }
- }
+
+ if (servicesMask.EnableGRpcService) {
+ Monitoring->RegisterActorPage(nullptr, "grpc", "GRPC", false, ActorSystem.Get(), NGRpcService::GrpcMonServiceId());
+ }
+ }
if (servicesMask.EnableSqs && AppData->SqsConfig.GetEnableSqs()) {
if (AppData->SqsConfig.GetHttpServerConfig().GetPort()) {
SqsHttp.Reset(new NSQS::TAsyncHttpServer(AppData->SqsConfig));
- SqsHttp->Initialize(ActorSystem.Get(),
+ SqsHttp->Initialize(ActorSystem.Get(),
GetServiceCounters(AppData->Counters, "sqs"),
GetServiceCounters(AppData->Counters, "ymq_public"),
- AppData->UserPoolId);
+ AppData->UserPoolId);
}
}
@@ -1165,9 +1165,9 @@ TIntrusivePtr<TServiceInitializersList> TKikimrRunner::CreateServiceInitializers
if (serviceMask.EnableSharedCache) {
sil->AddServiceInitializer(new TSharedCacheInitializer(runConfig));
}
- if (serviceMask.EnableBlobCache) {
- sil->AddServiceInitializer(new TBlobCacheInitializer(runConfig));
- }
+ if (serviceMask.EnableBlobCache) {
+ sil->AddServiceInitializer(new TBlobCacheInitializer(runConfig));
+ }
if (serviceMask.EnableLogger) {
sil->AddServiceInitializer(new TLoggerInitializer(runConfig, LogSettings, LogBackend));
}
@@ -1377,10 +1377,10 @@ void TKikimrRunner::KikimrStart() {
}
}
- if (SqsHttp) {
- SqsHttp->Start();
- }
-
+ if (SqsHttp) {
+ SqsHttp->Start();
+ }
+
EnableActorCallstack();
ThreadSigmask(SIG_UNBLOCK);
}
@@ -1433,10 +1433,10 @@ void TKikimrRunner::KikimrStop(bool graceful) {
ActorSystem->Send(new IEventHandle(MakeInterconnectListenerActorId(true), {}, new TEvents::TEvPoisonPill));
}
- if (SqsHttp) {
- SqsHttp->Shutdown();
- }
-
+ if (SqsHttp) {
+ SqsHttp->Shutdown();
+ }
+
if (YdbDriver) {
YdbDriver->Stop(true);
}
diff --git a/ydb/core/driver_lib/run/run.h b/ydb/core/driver_lib/run/run.h
index ae081d4a08b..dfd93a24e71 100644
--- a/ydb/core/driver_lib/run/run.h
+++ b/ydb/core/driver_lib/run/run.h
@@ -32,7 +32,7 @@ protected:
std::shared_ptr<TModuleFactories> ModuleFactories;
TIntrusivePtr<NScheme::TTypeRegistry> TypeRegistry;
TIntrusivePtr<NMiniKQL::IMutableFunctionRegistry> FunctionRegistry;
- TIntrusivePtr<TFormatFactory> FormatFactory;
+ TIntrusivePtr<TFormatFactory> FormatFactory;
NYq::IYqSharedResources::TPtr YqSharedResources;
TAutoPtr<TMon> Monitoring;
@@ -67,7 +67,7 @@ protected:
virtual ~TKikimrRunner();
- virtual void InitializeRegistries(const TKikimrRunConfig& runConfig);
+ virtual void InitializeRegistries(const TKikimrRunConfig& runConfig);
void InitializeAllocator(const TKikimrRunConfig& runConfig);
diff --git a/ydb/core/driver_lib/run/version.cpp b/ydb/core/driver_lib/run/version.cpp
index 94cc430e699..b8f2ef8cafd 100644
--- a/ydb/core/driver_lib/run/version.cpp
+++ b/ydb/core/driver_lib/run/version.cpp
@@ -14,7 +14,7 @@ TMaybe<NActors::TInterconnectProxyCommon::TVersionInfo> VERSION = NActors::TInte
TString GetBranchName(TString url) {
bool found = false;
- for (const char *prefix : {"arcadia.yandex.ru/arc/", "arcadia/arc/", "arcadia.arc.yandex.ru/arc/"}) {
+ for (const char *prefix : {"arcadia.yandex.ru/arc/", "arcadia/arc/", "arcadia.arc.yandex.ru/arc/"}) {
const char *base = url.data();
const char *p = strstr(base, prefix);
if (p) {
diff --git a/ydb/core/engine/kikimr_program_builder.cpp b/ydb/core/engine/kikimr_program_builder.cpp
index 2f0599bacd4..fd32a772ee9 100644
--- a/ydb/core/engine/kikimr_program_builder.cpp
+++ b/ydb/core/engine/kikimr_program_builder.cpp
@@ -571,7 +571,7 @@ TRuntimeNode TKikimrProgramBuilder::Bind(TRuntimeNode program, TRuntimeNode para
auto itemValue = list->GetItems()[i];
bool wereChanges;
auto newValue = SinglePassVisitCallables(lambdaRootNode, explorer,
- [&](TInternName name) {
+ [&](TInternName name) {
Y_UNUSED(name);
return [&](TCallable& callable, const TTypeEnvironment& env) {
Y_UNUSED(env);
@@ -580,7 +580,7 @@ TRuntimeNode TKikimrProgramBuilder::Bind(TRuntimeNode program, TRuntimeNode para
}
if (callable.GetType()->GetNameStr() == arg) {
- TCallableBuilder itemCallableBuilder(Env, arg.Str(), callable.GetType()->GetReturnType(), true);
+ TCallableBuilder itemCallableBuilder(Env, arg.Str(), callable.GetType()->GetReturnType(), true);
TRuntimeNode itemArg(itemCallableBuilder.Build(), false);
return itemArg;
}
diff --git a/ydb/core/engine/minikql/flat_local_minikql_host.h b/ydb/core/engine/minikql/flat_local_minikql_host.h
index 8f9aecc2c97..edfd1822869 100644
--- a/ydb/core/engine/minikql/flat_local_minikql_host.h
+++ b/ydb/core/engine/minikql/flat_local_minikql_host.h
@@ -13,7 +13,7 @@ public:
TEngineHostCounters& counters,
const TEngineHostSettings& settings,
const TMiniKQLFactory* factory)
- : TEngineHost(db, counters, settings)
+ : TEngineHost(db, counters, settings)
, Factory(factory)
{}
diff --git a/ydb/core/engine/minikql/flat_local_tx_factory.cpp b/ydb/core/engine/minikql/flat_local_tx_factory.cpp
index 3b780ce8f23..d23f4d8bc73 100644
--- a/ydb/core/engine/minikql/flat_local_tx_factory.cpp
+++ b/ydb/core/engine/minikql/flat_local_tx_factory.cpp
@@ -2,7 +2,7 @@
#include "flat_local_minikql_program.h"
#include "flat_local_tx_minikql.h"
-#include "flat_local_tx_read_columns.h"
+#include "flat_local_tx_read_columns.h"
#include "flat_local_tx_scheme.h"
namespace NKikimr {
@@ -23,10 +23,10 @@ TAutoPtr<ITransaction> TMiniKQLFactory::Make(TEvTablet::TEvLocalSchemeTx::TPtr &
}
TAutoPtr<ITransaction> TMiniKQLFactory::Make(TEvTablet::TEvLocalReadColumns::TPtr &ev)
-{
- return new TFlatLocalReadColumns(ev->Sender, ev);
-}
-
+{
+ return new TFlatLocalReadColumns(ev->Sender, ev);
+}
+
TRowVersion TMiniKQLFactory::GetWriteVersion(const TTableId& tableId) const
{
Y_UNUSED(tableId);
diff --git a/ydb/core/engine/minikql/flat_local_tx_minikql.h b/ydb/core/engine/minikql/flat_local_tx_minikql.h
index 5f8fd104f4a..0feabf20e68 100644
--- a/ydb/core/engine/minikql/flat_local_tx_minikql.h
+++ b/ydb/core/engine/minikql/flat_local_tx_minikql.h
@@ -25,33 +25,33 @@ public:
results.reserve(tables.size());
for (auto& table : tables) {
- TTableResult result(TTableResult::Ok);
-
+ TTableResult result(TTableResult::Ok);
+
const ui32* tableId = Scheme.TableNames.FindPtr(table.TableName);
- if (!tableId) {
- result = TTableResult(TTableResult::Error, "Unknown table " + table.TableName);
- } else {
+ if (!tableId) {
+ result = TTableResult(TTableResult::Error, "Unknown table " + table.TableName);
+ } else {
const auto *tableInfo = Scheme.Tables.FindPtr(*tableId);
- Y_VERIFY(tableInfo);
+ Y_VERIFY(tableInfo);
- result.KeyColumnCount = tableInfo->KeyColumns.size();
- result.Table = table;
- result.TableId = new TTableId(TabletId, *tableId);
+ result.KeyColumnCount = tableInfo->KeyColumns.size();
+ result.Table = table;
+ result.TableId = new TTableId(TabletId, *tableId);
- for (const auto& column : table.ColumnNames) {
- const ui32* columnId = tableInfo->ColumnNames.FindPtr(column);
- if (!columnId) {
- result = TTableResult(TTableResult::Error, "Unknown column " + table.TableName + ":" + column);
- break;
- }
+ for (const auto& column : table.ColumnNames) {
+ const ui32* columnId = tableInfo->ColumnNames.FindPtr(column);
+ if (!columnId) {
+ result = TTableResult(TTableResult::Error, "Unknown column " + table.TableName + ":" + column);
+ break;
+ }
const auto *columnInfo = tableInfo->Columns.FindPtr(*columnId);
- Y_VERIFY(columnInfo);
+ Y_VERIFY(columnInfo);
- auto insertResult = result.Columns.insert(std::make_pair(column, IDbSchemeResolver::TTableResult::TColumn
+ auto insertResult = result.Columns.insert(std::make_pair(column, IDbSchemeResolver::TTableResult::TColumn
{*columnId, (i32)columnInfo->KeyOrder, columnInfo->PType, 0}));
- Y_VERIFY(insertResult.second);
- }
+ Y_VERIFY(insertResult.second);
+ }
}
results.push_back(result);
@@ -102,7 +102,7 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
return true;
}
- bool PrepareParams(TTransactionContext &txc, const TAppData *appData) {
+ bool PrepareParams(TTransactionContext &txc, const TAppData *appData) {
Y_UNUSED(txc);
if (SourceProgram.Params.Binary) {
SerializedMiniKQLParams = SourceProgram.Program.Binary;
@@ -119,10 +119,10 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
return false;
}
- TAlignedPagePoolCounters counters(appData->Counters, "local_tx");
+ TAlignedPagePoolCounters counters(appData->Counters, "local_tx");
TScopedAlloc alloc(counters, appData->FunctionRegistry->SupportsSizedAllocators());
TTypeEnvironment typeEnv(alloc);
- auto future = ConvertToMiniKQL(expr, appData->FunctionRegistry, &typeEnv, nullptr);
+ auto future = ConvertToMiniKQL(expr, appData->FunctionRegistry, &typeEnv, nullptr);
future.Wait();
NYql::TConvertResult compileResult = future.GetValue();
@@ -135,7 +135,7 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
return true;
}
- bool PrepareProgram(TTransactionContext &txc, const TAppData *appData) {
+ bool PrepareProgram(TTransactionContext &txc, const TAppData *appData) {
// simple case - everything prepared for us and no params
if (SourceProgram.Program.Binary) {
SerializedMiniKQLProgram = SourceProgram.Program.Binary;
@@ -152,18 +152,18 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
return false;
}
- TAlignedPagePoolCounters counters(appData->Counters, "local_tx");
+ TAlignedPagePoolCounters counters(appData->Counters, "local_tx");
TScopedAlloc alloc(counters, appData->FunctionRegistry->SupportsSizedAllocators());
TTypeEnvironment typeEnv(alloc);
TLocalDbSchemeResolver dbResolver(txc.DB.GetScheme(), TabletId);
const auto unguard = Unguard(alloc);
- auto future = ConvertToMiniKQL(expr, appData->FunctionRegistry, &typeEnv, &dbResolver);
+ auto future = ConvertToMiniKQL(expr, appData->FunctionRegistry, &typeEnv, &dbResolver);
future.Wait();
NYql::TConvertResult compileResult = future.GetValue();
- if (!compileResult.Errors.Empty()) {
+ if (!compileResult.Errors.Empty()) {
ProgramCompileResult->Errors.AddIssues(compileResult.Errors);
- return false;
- }
+ return false;
+ }
ProgramCompileResult->CompiledProgram = SerializeRuntimeNode(compileResult.Node, typeEnv);
SerializedMiniKQLProgram = ProgramCompileResult->CompiledProgram;
@@ -179,7 +179,7 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
bool MakeCompileResponse(const TActorContext &ctx) {
TAutoPtr<TEvTablet::TEvLocalMKQLResponse> response = new TEvTablet::TEvLocalMKQLResponse();
auto &record = response->Record;
- record.SetOrigin(TabletId);
+ record.SetOrigin(TabletId);
if (ProgramCompileResult) {
auto *compileResults = record.MutableCompileResults();
@@ -188,7 +188,7 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
compileResults->SetCompiledProgram(ProgramCompileResult->CompiledProgram);
} else {
NYql::IssuesToMessage(ProgramCompileResult->Errors, compileResults->MutableProgramCompileErrors());
- record.SetStatus(NKikimrProto::ERROR);
+ record.SetStatus(NKikimrProto::ERROR);
}
} else {
record.SetStatus(NKikimrProto::ERROR);
@@ -201,7 +201,7 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
bool MakeResponse(IEngineFlat *engine, const TActorContext &ctx) {
TAutoPtr<TEvTablet::TEvLocalMKQLResponse> response = new TEvTablet::TEvLocalMKQLResponse();
auto &record = response->Record;
- record.SetOrigin(TabletId);
+ record.SetOrigin(TabletId);
record.SetStatus((EngineResultStatusCode == IEngineFlat::EResult::Ok && EngineResponseStatus != IEngineFlat::EStatus::Error) ? NKikimrProto::OK : NKikimrProto::ERROR);
if (EngineResultStatusCode != IEngineFlat::EResult::Unknown)
@@ -230,7 +230,7 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
const auto functionRegistry = appData->FunctionRegistry;
if (!SerializedMiniKQLProgram) {
- if (!PrepareProgram(txc, appData))
+ if (!PrepareProgram(txc, appData))
return MakeCompileResponse(ctx);
if (SourceProgram.CompileOnly)
@@ -238,18 +238,18 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
}
if (!SerializedMiniKQLParams) {
- if (!PrepareParams(txc, appData))
+ if (!PrepareParams(txc, appData))
return MakeResponse(nullptr, ctx);
}
try {
- TAlignedPagePoolCounters poolCounters(appData->Counters, "local_tx");
-
+ TAlignedPagePoolCounters poolCounters(appData->Counters, "local_tx");
+
TEngineFlatSettings proxySettings(
IEngineFlat::EProtocol::V1,
functionRegistry,
- *TAppData::RandomProvider, *TAppData::TimeProvider,
- nullptr, poolCounters
+ *TAppData::RandomProvider, *TAppData::TimeProvider,
+ nullptr, poolCounters
);
proxySettings.EvaluateResultType = true;
proxySettings.EvaluateResultValue = true;
@@ -287,21 +287,21 @@ class TFlatLocalMiniKQL : public NTabletFlatExecutor::ITransaction {
const TString shardProgram = shardData.Program;
proxyEngine->AfterShardProgramsExtracted();
- TEngineHostCounters hostCounters;
+ TEngineHostCounters hostCounters;
TLocalMiniKQLHost host(txc.DB, hostCounters, TEngineHostSettings(TabletId, false), Factory);
TEngineFlatSettings engineSettings(
IEngineFlat::EProtocol::V1,
functionRegistry,
- *TAppData::RandomProvider, *TAppData::TimeProvider,
- &host, poolCounters
+ *TAppData::RandomProvider, *TAppData::TimeProvider,
+ &host, poolCounters
);
TAutoPtr<IEngineFlat> engine = CreateEngineFlat(engineSettings);
EngineResultStatusCode = engine->AddProgram(TabletId, shardProgram);
if (EngineResultStatusCode != IEngineFlat::EResult::Ok)
return MakeResponse(engine.Get(), ctx);
- IEngineFlat::TValidationInfo validationInfo;
- EngineResultStatusCode = engine->Validate(validationInfo);
+ IEngineFlat::TValidationInfo validationInfo;
+ EngineResultStatusCode = engine->Validate(validationInfo);
if (EngineResultStatusCode != IEngineFlat::EResult::Ok)
return MakeResponse(engine.Get(), ctx);
diff --git a/ydb/core/engine/minikql/flat_local_tx_read_columns.h b/ydb/core/engine/minikql/flat_local_tx_read_columns.h
index 4d5e177c8df..57d3c7bef9d 100644
--- a/ydb/core/engine/minikql/flat_local_tx_read_columns.h
+++ b/ydb/core/engine/minikql/flat_local_tx_read_columns.h
@@ -1,5 +1,5 @@
-#pragma once
-
+#pragma once
+
#include <ydb/core/tablet_flat/flat_dbase_apply.h>
#include <ydb/core/tablet_flat/flat_database.h>
#include <ydb/core/tablet_flat/tablet_flat_executed.h>
@@ -7,202 +7,202 @@
#include <ydb/core/protos/issue_id.pb.h>
#include <ydb/core/formats/factory.h>
#include <ydb/core/base/appdata.h>
-
-namespace NKikimr {
-namespace NMiniKQL {
-
-class TFlatLocalReadColumns : public NTabletFlatExecutor::ITransaction {
-public:
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+class TFlatLocalReadColumns : public NTabletFlatExecutor::ITransaction {
+public:
TFlatLocalReadColumns(TActorId sender, TEvTablet::TEvLocalReadColumns::TPtr &ev)
- : Sender(sender)
- , Ev(ev)
- {}
-
- bool Execute(TTransactionContext &txc, const TActorContext &ctx) override {
- Y_UNUSED(ctx);
-
- Response.Reset(new TEvTablet::TEvLocalReadColumnsResponse);
-
- if (Ev->Get()->Record.GetMaxRows()) {
- RowsLimit = Ev->Get()->Record.GetMaxRows();
- }
-
- if (Ev->Get()->Record.GetMaxBytes()) {
- BytesLimit = Ev->Get()->Record.GetMaxBytes();
- }
-
- TString tableName = Ev->Get()->Record.GetTableName();
- auto nameIt = txc.DB.GetScheme().TableNames.find(tableName);
-
- if (nameIt == txc.DB.GetScheme().TableNames.end()) {
- SetError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown table %s", tableName.c_str()));
- return true;
- }
-
- const ui64 tableId = nameIt->second;
- const auto* tableInfo = txc.DB.GetScheme().GetTableInfo(tableId);
-
- TString format = "clickhouse_native";
- if (Ev->Get()->Record.HasFormat()) {
- format = Ev->Get()->Record.GetFormat();
- }
- std::unique_ptr<IBlockBuilder> blockBuilder = AppData()->FormatFactory->CreateBlockBuilder(format);
- if (!blockBuilder) {
- SetError(Ydb::StatusIds::BAD_REQUEST,
- Sprintf("Unsupported block format \"%s\"", format.data()));
- return true;
- }
-
- TVector<NScheme::TTypeId> keyColumnTypes;
- TSmallVec<TRawTypeValue> keyFrom;
- TSmallVec<TRawTypeValue> keyTo;
- bool inclusiveFrom = Ev->Get()->Record.GetFromKeyInclusive();;
- bool inclusiveTo = Ev->Get()->Record.GetToKeyInclusive();;
-
- // TODO: check schemas
- for (ui32 keyColId : tableInfo->KeyColumns) {
- keyColumnTypes.push_back(tableInfo->Columns.FindPtr(keyColId)->PType);
- }
-
- TSerializedCellVec fromKeyCells(Ev->Get()->Record.GetFromKey());
- keyFrom.clear();
- for (ui32 i = 0; i < fromKeyCells.GetCells().size(); ++i) {
- keyFrom.push_back(TRawTypeValue(fromKeyCells.GetCells()[i].AsRef(), keyColumnTypes[i]));
- }
- keyFrom.resize(tableInfo->KeyColumns.size());
-
- TSerializedCellVec toKeyCells(Ev->Get()->Record.GetToKey());
- keyTo.clear();
- for (ui32 i = 0; i < toKeyCells.GetCells().size(); ++i) {
- keyTo.push_back(TRawTypeValue(toKeyCells.GetCells()[i].AsRef(), keyColumnTypes[i]));
- }
-
- TVector<NTable::TTag> valueColumns;
- TVector<std::pair<TString, NScheme::TTypeId>> columns;
+ : Sender(sender)
+ , Ev(ev)
+ {}
+
+ bool Execute(TTransactionContext &txc, const TActorContext &ctx) override {
+ Y_UNUSED(ctx);
+
+ Response.Reset(new TEvTablet::TEvLocalReadColumnsResponse);
+
+ if (Ev->Get()->Record.GetMaxRows()) {
+ RowsLimit = Ev->Get()->Record.GetMaxRows();
+ }
+
+ if (Ev->Get()->Record.GetMaxBytes()) {
+ BytesLimit = Ev->Get()->Record.GetMaxBytes();
+ }
+
+ TString tableName = Ev->Get()->Record.GetTableName();
+ auto nameIt = txc.DB.GetScheme().TableNames.find(tableName);
+
+ if (nameIt == txc.DB.GetScheme().TableNames.end()) {
+ SetError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown table %s", tableName.c_str()));
+ return true;
+ }
+
+ const ui64 tableId = nameIt->second;
+ const auto* tableInfo = txc.DB.GetScheme().GetTableInfo(tableId);
+
+ TString format = "clickhouse_native";
+ if (Ev->Get()->Record.HasFormat()) {
+ format = Ev->Get()->Record.GetFormat();
+ }
+ std::unique_ptr<IBlockBuilder> blockBuilder = AppData()->FormatFactory->CreateBlockBuilder(format);
+ if (!blockBuilder) {
+ SetError(Ydb::StatusIds::BAD_REQUEST,
+ Sprintf("Unsupported block format \"%s\"", format.data()));
+ return true;
+ }
+
+ TVector<NScheme::TTypeId> keyColumnTypes;
+ TSmallVec<TRawTypeValue> keyFrom;
+ TSmallVec<TRawTypeValue> keyTo;
+ bool inclusiveFrom = Ev->Get()->Record.GetFromKeyInclusive();;
+ bool inclusiveTo = Ev->Get()->Record.GetToKeyInclusive();;
+
+ // TODO: check schemas
+ for (ui32 keyColId : tableInfo->KeyColumns) {
+ keyColumnTypes.push_back(tableInfo->Columns.FindPtr(keyColId)->PType);
+ }
+
+ TSerializedCellVec fromKeyCells(Ev->Get()->Record.GetFromKey());
+ keyFrom.clear();
+ for (ui32 i = 0; i < fromKeyCells.GetCells().size(); ++i) {
+ keyFrom.push_back(TRawTypeValue(fromKeyCells.GetCells()[i].AsRef(), keyColumnTypes[i]));
+ }
+ keyFrom.resize(tableInfo->KeyColumns.size());
+
+ TSerializedCellVec toKeyCells(Ev->Get()->Record.GetToKey());
+ keyTo.clear();
+ for (ui32 i = 0; i < toKeyCells.GetCells().size(); ++i) {
+ keyTo.push_back(TRawTypeValue(toKeyCells.GetCells()[i].AsRef(), keyColumnTypes[i]));
+ }
+
+ TVector<NTable::TTag> valueColumns;
+ TVector<std::pair<TString, NScheme::TTypeId>> columns;
for (const auto& col : Ev->Get()->Record.GetColumns()) {
- const auto* colNameInfo = tableInfo->ColumnNames.FindPtr(col);
- if (!colNameInfo) {
- SetError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown column %s", col.c_str()));
- return true;
- }
-
- NTable::TTag colId = *colNameInfo;
- valueColumns.push_back(colId);
- columns.push_back({col, tableInfo->Columns.FindPtr(colId)->PType});
- }
-
- // TODO: make sure KeyFrom and KeyTo properly reference non-inline cells data
-
- if (!Precharge(txc.DB, tableId, keyFrom, keyTo, valueColumns))
- return false;
-
- size_t rows = 0;
- size_t bytes = 0;
-
- ui64 rowsPerBlock = Ev->Get()->Record.GetMaxRows() ? Ev->Get()->Record.GetMaxRows() : 64000;
- ui64 bytesPerBlock = 64000;
-
- bool shardFinished = false;
-
- {
- TString err;
- if (!blockBuilder->Start(columns, rowsPerBlock, bytesPerBlock, err)) {
- SetError(Ydb::StatusIds::BAD_REQUEST,
- Sprintf("Failed to create block builder \"%s\"", err.data()));
- return true;
- }
-
- NTable::TKeyRange iterRange;
- iterRange.MinKey = keyFrom;
- iterRange.MinInclusive = inclusiveFrom;
- iterRange.MaxKey = keyTo;
- iterRange.MaxInclusive = inclusiveTo;
-
- auto iter = txc.DB.IterateRange(tableId, iterRange, valueColumns);
-
- TString lastKeySerialized;
- bool lastKeyInclusive = true;
- while (iter->Next(NTable::ENext::All) == NTable::EReady::Data) {
- TDbTupleRef rowKey = iter->GetKey();
- lastKeySerialized = TSerializedCellVec::Serialize(rowKey.Cells());
-
- // Skip erased row
+ const auto* colNameInfo = tableInfo->ColumnNames.FindPtr(col);
+ if (!colNameInfo) {
+ SetError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown column %s", col.c_str()));
+ return true;
+ }
+
+ NTable::TTag colId = *colNameInfo;
+ valueColumns.push_back(colId);
+ columns.push_back({col, tableInfo->Columns.FindPtr(colId)->PType});
+ }
+
+ // TODO: make sure KeyFrom and KeyTo properly reference non-inline cells data
+
+ if (!Precharge(txc.DB, tableId, keyFrom, keyTo, valueColumns))
+ return false;
+
+ size_t rows = 0;
+ size_t bytes = 0;
+
+ ui64 rowsPerBlock = Ev->Get()->Record.GetMaxRows() ? Ev->Get()->Record.GetMaxRows() : 64000;
+ ui64 bytesPerBlock = 64000;
+
+ bool shardFinished = false;
+
+ {
+ TString err;
+ if (!blockBuilder->Start(columns, rowsPerBlock, bytesPerBlock, err)) {
+ SetError(Ydb::StatusIds::BAD_REQUEST,
+ Sprintf("Failed to create block builder \"%s\"", err.data()));
+ return true;
+ }
+
+ NTable::TKeyRange iterRange;
+ iterRange.MinKey = keyFrom;
+ iterRange.MinInclusive = inclusiveFrom;
+ iterRange.MaxKey = keyTo;
+ iterRange.MaxInclusive = inclusiveTo;
+
+ auto iter = txc.DB.IterateRange(tableId, iterRange, valueColumns);
+
+ TString lastKeySerialized;
+ bool lastKeyInclusive = true;
+ while (iter->Next(NTable::ENext::All) == NTable::EReady::Data) {
+ TDbTupleRef rowKey = iter->GetKey();
+ lastKeySerialized = TSerializedCellVec::Serialize(rowKey.Cells());
+
+ // Skip erased row
if (iter->Row().GetRowState() == NTable::ERowOp::Erase) {
- continue;
- }
-
- TDbTupleRef rowValues = iter->GetValues();
-
- blockBuilder->AddRow(rowKey, rowValues);
-
- rows++;
- bytes = blockBuilder->Bytes();
-
- if (rows >= RowsLimit || bytes >= BytesLimit)
- break;
- }
-
- // We don't want to do many restarts if pages weren't precharged
- // So we just return whatever we read so far and the client can request more rows
- if (iter->Last() == NTable::EReady::Page && rows < 1000 && bytes < 100000 && Restarts < 1) {
- ++Restarts;
- return false;
- }
-
- if (iter->Last() == NTable::EReady::Gone) {
- shardFinished = true;
- lastKeySerialized.clear();
- lastKeyInclusive = false;
- }
-
- TString buffer = blockBuilder->Finish();
- buffer.resize(blockBuilder->Bytes());
-
- Response->Record.SetStatus(Ydb::StatusIds::SUCCESS);
- Response->Record.SetBlocks(buffer);
- Response->Record.SetLastKey(lastKeySerialized);
- Response->Record.SetLastKeyInclusive(lastKeyInclusive);
- Response->Record.SetEndOfShard(shardFinished);
- }
-
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- MakeResponse(ctx);
- }
-
-private:
- bool Precharge(NTable::TDatabase& db, ui32 localTid,
- const TSmallVec<TRawTypeValue>& keyFrom,
- const TSmallVec<TRawTypeValue>& keyTo,
- const TVector<NTable::TTag>& valueColumns) {
- bool ready = db.Precharge(localTid,
- keyFrom,
- keyTo,
- valueColumns,
- 0,
- RowsLimit, BytesLimit);
- return ready;
- }
-
- void SetError(ui32 status, TString descr) {
- Response->Record.SetStatus(status);
- Response->Record.SetErrorDescription(descr);
- }
-
- void MakeResponse(const TActorContext &ctx) {
- ctx.Send(Sender, Response.Release());
- }
-
-private:
+ continue;
+ }
+
+ TDbTupleRef rowValues = iter->GetValues();
+
+ blockBuilder->AddRow(rowKey, rowValues);
+
+ rows++;
+ bytes = blockBuilder->Bytes();
+
+ if (rows >= RowsLimit || bytes >= BytesLimit)
+ break;
+ }
+
+ // We don't want to do many restarts if pages weren't precharged
+ // So we just return whatever we read so far and the client can request more rows
+ if (iter->Last() == NTable::EReady::Page && rows < 1000 && bytes < 100000 && Restarts < 1) {
+ ++Restarts;
+ return false;
+ }
+
+ if (iter->Last() == NTable::EReady::Gone) {
+ shardFinished = true;
+ lastKeySerialized.clear();
+ lastKeyInclusive = false;
+ }
+
+ TString buffer = blockBuilder->Finish();
+ buffer.resize(blockBuilder->Bytes());
+
+ Response->Record.SetStatus(Ydb::StatusIds::SUCCESS);
+ Response->Record.SetBlocks(buffer);
+ Response->Record.SetLastKey(lastKeySerialized);
+ Response->Record.SetLastKeyInclusive(lastKeyInclusive);
+ Response->Record.SetEndOfShard(shardFinished);
+ }
+
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ MakeResponse(ctx);
+ }
+
+private:
+ bool Precharge(NTable::TDatabase& db, ui32 localTid,
+ const TSmallVec<TRawTypeValue>& keyFrom,
+ const TSmallVec<TRawTypeValue>& keyTo,
+ const TVector<NTable::TTag>& valueColumns) {
+ bool ready = db.Precharge(localTid,
+ keyFrom,
+ keyTo,
+ valueColumns,
+ 0,
+ RowsLimit, BytesLimit);
+ return ready;
+ }
+
+ void SetError(ui32 status, TString descr) {
+ Response->Record.SetStatus(status);
+ Response->Record.SetErrorDescription(descr);
+ }
+
+ void MakeResponse(const TActorContext &ctx) {
+ ctx.Send(Sender, Response.Release());
+ }
+
+private:
const TActorId Sender;
- TEvTablet::TEvLocalReadColumns::TPtr Ev;
- TAutoPtr<TEvTablet::TEvLocalReadColumnsResponse> Response;
-
- ui64 RowsLimit = 10000000;
- ui64 BytesLimit = 30*1024*1024;
- ui64 Restarts = 0;
-};
-
-}}
+ TEvTablet::TEvLocalReadColumns::TPtr Ev;
+ TAutoPtr<TEvTablet::TEvLocalReadColumnsResponse> Response;
+
+ ui64 RowsLimit = 10000000;
+ ui64 BytesLimit = 30*1024*1024;
+ ui64 Restarts = 0;
+};
+
+}}
diff --git a/ydb/core/engine/minikql/flat_local_tx_scheme.h b/ydb/core/engine/minikql/flat_local_tx_scheme.h
index 17219baa483..c70476a0cdb 100644
--- a/ydb/core/engine/minikql/flat_local_tx_scheme.h
+++ b/ydb/core/engine/minikql/flat_local_tx_scheme.h
@@ -1,6 +1,6 @@
#pragma once
-#include "flat_local_minikql_host.h"
+#include "flat_local_minikql_host.h"
#include <ydb/core/tablet_flat/flat_dbase_apply.h>
#include <ydb/core/tablet_flat/flat_database.h>
#include <ydb/core/tablet_flat/tablet_flat_executed.h>
@@ -10,51 +10,51 @@
#include <ydb/core/client/minikql_compile/compile_context.h>
#include <ydb/library/yql/minikql/mkql_node_serialization.h>
#include <ydb/core/base/appdata.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NMiniKQL {
-
+
class TFlatLocalSchemeTx : public NTabletFlatExecutor::ITransaction {
public:
TFlatLocalSchemeTx(TActorId sender, TEvTablet::TEvLocalSchemeTx::TPtr &ev)
: Sender(sender)
, Ev(ev)
{}
-
- bool Execute(TTransactionContext &txc, const TActorContext &ctx) override {
- Y_UNUSED(ctx);
-
- Response.Reset(new TEvTablet::TEvLocalSchemeTxResponse);
-
+
+ bool Execute(TTransactionContext &txc, const TActorContext &ctx) override {
+ Y_UNUSED(ctx);
+
+ Response.Reset(new TEvTablet::TEvLocalSchemeTxResponse);
+
auto &delta = Ev->Get()->Record.GetSchemeChanges();
auto currentScheme = txc.DB.GetScheme();
NTable::TSchemeModifier(currentScheme).Apply(delta);
- // TODO: Validate scheme change
-
+ // TODO: Validate scheme change
+
if (!Ev->Get()->Record.GetDryRun())
txc.DB.Alter().Merge(delta);
-
+
auto schemeSnapshot = currentScheme.GetSnapshot();
- Response->Record.MutableFullScheme()->Swap(schemeSnapshot.Get());
- Response->Record.SetStatus(NKikimrProto::OK);
+ Response->Record.MutableFullScheme()->Swap(schemeSnapshot.Get());
+ Response->Record.SetStatus(NKikimrProto::OK);
Response->Record.SetOrigin(txc.Tablet);
-
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- MakeResponse(ctx);
- }
-
- void MakeResponse(const TActorContext &ctx) {
- ctx.Send(Sender, Response.Release());
- }
-
+
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ MakeResponse(ctx);
+ }
+
+ void MakeResponse(const TActorContext &ctx) {
+ ctx.Send(Sender, Response.Release());
+ }
+
private:
const TActorId Sender;
TEvTablet::TEvLocalSchemeTx::TPtr Ev;
TAutoPtr<TEvTablet::TEvLocalSchemeTxResponse> Response;
-};
-
-}}
+};
+
+}}
diff --git a/ydb/core/engine/minikql/minikql_engine_host.cpp b/ydb/core/engine/minikql/minikql_engine_host.cpp
index f77582498ac..36c35f32cc4 100644
--- a/ydb/core/engine/minikql/minikql_engine_host.cpp
+++ b/ydb/core/engine/minikql/minikql_engine_host.cpp
@@ -8,16 +8,16 @@
#include <ydb/core/tx/datashard/sys_tables.h>
#include <library/cpp/containers/stack_vector/stack_vec.h>
-
+
namespace NKikimr {
namespace NMiniKQL {
using TScheme = NTable::TScheme;
void ConvertTableKeys(const TScheme& scheme, const TScheme::TTableInfo* tableInfo,
- const TArrayRef<const TCell>& row, TSmallVec<TRawTypeValue>& key, ui64* keyDataBytes)
+ const TArrayRef<const TCell>& row, TSmallVec<TRawTypeValue>& key, ui64* keyDataBytes)
{
- ui64 bytes = 0;
+ ui64 bytes = 0;
key.reserve(row.size());
for (size_t keyIdx = 0; keyIdx < row.size(); keyIdx++) {
const TCell& cell = row[keyIdx];
@@ -25,25 +25,25 @@ void ConvertTableKeys(const TScheme& scheme, const TScheme::TTableInfo* tableInf
NScheme::TTypeId vtype = scheme.GetColumnInfo(tableInfo, keyCol)->PType;
if (cell.IsNull()) {
key.emplace_back();
- bytes += 1;
+ bytes += 1;
} else {
key.emplace_back(cell.Data(), cell.Size(), vtype);
- bytes += cell.Size();
+ bytes += cell.Size();
}
}
- if (keyDataBytes)
- *keyDataBytes = bytes;
+ if (keyDataBytes)
+ *keyDataBytes = bytes;
}
-TEngineHost::TEngineHost(NTable::TDatabase& db, TEngineHostCounters& counters, const TEngineHostSettings& settings)
+TEngineHost::TEngineHost(NTable::TDatabase& db, TEngineHostCounters& counters, const TEngineHostSettings& settings)
: Db(db)
, Scheme(db.GetScheme())
- , Settings(settings)
- , Counters(counters)
+ , Settings(settings)
+ , Counters(counters)
{}
ui64 TEngineHost::GetShardId() const {
- return Settings.ShardId;
+ return Settings.ShardId;
}
const TScheme::TTableInfo* TEngineHost::GetTableInfo(const TTableId& tableId) const {
@@ -51,7 +51,7 @@ const TScheme::TTableInfo* TEngineHost::GetTableInfo(const TTableId& tableId) co
}
bool TEngineHost::IsReadonly() const {
- return Settings.IsReadonly;
+ return Settings.IsReadonly;
}
@@ -121,27 +121,27 @@ bool TEngineHost::IsValidKey(TKeyDesc& key, std::pair<ui64, ui64>& maxSnapshotTi
ui64 TEngineHost::CalculateReadSize(const TVector<const TKeyDesc*>& keys) const {
NTable::TSizeEnv env;
-
+
for (const TKeyDesc* ki : keys) {
- DoCalculateReadSize(*ki, env);
- }
-
- return env.GetSize();
+ DoCalculateReadSize(*ki, env);
+ }
+
+ return env.GetSize();
}
void TEngineHost::DoCalculateReadSize(const TKeyDesc& key, NTable::TSizeEnv& env) const {
- if (TSysTables::IsSystemTable(key.TableId))
- return;
- if (key.RowOperation != TKeyDesc::ERowOperation::Read)
- return;
- ui64 localTid = LocalTableId(key.TableId);
- Y_VERIFY(localTid, "table not exist");
- const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(localTid);
- TSmallVec<TRawTypeValue> keyFrom;
- TSmallVec<TRawTypeValue> keyTo;
- ConvertKeys(tableInfo, key.Range.From, keyFrom);
- ConvertKeys(tableInfo, key.Range.To, keyTo);
-
+ if (TSysTables::IsSystemTable(key.TableId))
+ return;
+ if (key.RowOperation != TKeyDesc::ERowOperation::Read)
+ return;
+ ui64 localTid = LocalTableId(key.TableId);
+ Y_VERIFY(localTid, "table not exist");
+ const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(localTid);
+ TSmallVec<TRawTypeValue> keyFrom;
+ TSmallVec<TRawTypeValue> keyTo;
+ ConvertKeys(tableInfo, key.Range.From, keyFrom);
+ ConvertKeys(tableInfo, key.Range.To, keyTo);
+
TSmallVec<NTable::TTag> tags;
for (const auto& column : key.Columns) {
if (Y_LIKELY(column.Operation == TKeyDesc::EColumnOperation::Read)) {
@@ -149,39 +149,39 @@ void TEngineHost::DoCalculateReadSize(const TKeyDesc& key, NTable::TSizeEnv& env
}
}
- Db.CalculateReadSize(env, localTid,
- keyFrom,
- keyTo,
+ Db.CalculateReadSize(env, localTid,
+ keyFrom,
+ keyTo,
tags,
- 0,
+ 0,
key.RangeLimits.ItemsLimit, key.RangeLimits.BytesLimit,
key.Reverse ? NTable::EDirection::Reverse : NTable::EDirection::Forward);
-}
-
-ui64 TEngineHost::CalculateResultSize(const TKeyDesc& key) const {
- if (TSysTables::IsSystemTable(key.TableId))
- return 0;
-
- ui64 localTid = LocalTableId(key.TableId);
- Y_VERIFY(localTid, "table not exist");
- if (key.Range.Point) {
- return Db.EstimateRowSize(localTid);
- } else {
+}
+
+ui64 TEngineHost::CalculateResultSize(const TKeyDesc& key) const {
+ if (TSysTables::IsSystemTable(key.TableId))
+ return 0;
+
+ ui64 localTid = LocalTableId(key.TableId);
+ Y_VERIFY(localTid, "table not exist");
+ if (key.Range.Point) {
+ return Db.EstimateRowSize(localTid);
+ } else {
NTable::TSizeEnv env;
- DoCalculateReadSize(key, env);
- ui64 size = env.GetSize();
-
- if (key.RangeLimits.ItemsLimit != 0) {
- ui64 rowSize = Db.EstimateRowSize(localTid);
- size = Min(size, rowSize*key.RangeLimits.ItemsLimit);
- }
- if (key.RangeLimits.BytesLimit != 0) {
- size = Min(size, key.RangeLimits.BytesLimit);
- }
- return size;
- }
-}
-
+ DoCalculateReadSize(key, env);
+ ui64 size = env.GetSize();
+
+ if (key.RangeLimits.ItemsLimit != 0) {
+ ui64 rowSize = Db.EstimateRowSize(localTid);
+ size = Min(size, rowSize*key.RangeLimits.ItemsLimit);
+ }
+ if (key.RangeLimits.BytesLimit != 0) {
+ size = Min(size, key.RangeLimits.BytesLimit);
+ }
+ return size;
+ }
+}
+
void TEngineHost::PinPages(const TVector<THolder<TKeyDesc>>& keys, ui64 pageFaultCount) {
ui64 limitMultiplier = 1;
if (pageFaultCount >= 2) {
@@ -200,9 +200,9 @@ void TEngineHost::PinPages(const TVector<THolder<TKeyDesc>>& keys, ui64 pageFaul
}
};
- bool ret = true;
- for (const auto& ki : keys) {
- const TKeyDesc& key = *ki;
+ bool ret = true;
+ for (const auto& ki : keys) {
+ const TKeyDesc& key = *ki;
if (TSysTables::IsSystemTable(key.TableId))
continue;
@@ -225,17 +225,17 @@ void TEngineHost::PinPages(const TVector<THolder<TKeyDesc>>& keys, ui64 pageFaul
}
if (columnOpFilter.empty()) {
- continue;
+ continue;
}
- ui64 localTid = LocalTableId(key.TableId);
- Y_VERIFY(localTid, "table not exist");
- const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(localTid);
- TSmallVec<TRawTypeValue> keyFrom;
- TSmallVec<TRawTypeValue> keyTo;
- ConvertKeys(tableInfo, key.Range.From, keyFrom);
- ConvertKeys(tableInfo, key.Range.To, keyTo);
-
+ ui64 localTid = LocalTableId(key.TableId);
+ Y_VERIFY(localTid, "table not exist");
+ const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(localTid);
+ TSmallVec<TRawTypeValue> keyFrom;
+ TSmallVec<TRawTypeValue> keyTo;
+ ConvertKeys(tableInfo, key.Range.From, keyFrom);
+ ConvertKeys(tableInfo, key.Range.To, keyTo);
+
TSmallVec<NTable::TTag> tags;
for (const auto& column : key.Columns) {
if (columnOpFilter.contains(column.Operation)) {
@@ -243,22 +243,22 @@ void TEngineHost::PinPages(const TVector<THolder<TKeyDesc>>& keys, ui64 pageFaul
}
}
- bool ready = Db.Precharge(localTid,
+ bool ready = Db.Precharge(localTid,
keyFrom,
- key.Range.Point ? keyFrom : keyTo,
+ key.Range.Point ? keyFrom : keyTo,
tags,
- Settings.DisableByKeyFilter ? (ui64)NTable::NoByKey : 0,
+ Settings.DisableByKeyFilter ? (ui64)NTable::NoByKey : 0,
adjustLimit(key.RangeLimits.ItemsLimit),
adjustLimit(key.RangeLimits.BytesLimit),
key.Reverse ? NTable::EDirection::Reverse : NTable::EDirection::Forward,
GetReadVersion(key.TableId));
- ret &= ready;
- }
-
- if (!ret)
- throw TNotReadyTabletException();
-}
-
+ ret &= ready;
+ }
+
+ if (!ret)
+ throw TNotReadyTabletException();
+}
+
NUdf::TUnboxedValue TEngineHost::SelectRow(const TTableId& tableId, const TArrayRef<const TCell>& row,
TStructLiteral* columnIds, TOptionalType* returnType, const TReadTarget& readTarget,
const THolderFactory& holderFactory)
@@ -275,7 +275,7 @@ NUdf::TUnboxedValue TEngineHost::SelectRow(const TTableId& tableId, const TArray
ui64 localTid = LocalTableId(tableId);
Y_VERIFY(localTid, "table not exist");
const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(localTid);
- TSmallVec<TRawTypeValue> key;
+ TSmallVec<TRawTypeValue> key;
ConvertKeys(tableInfo, row, key);
TSmallVec<NTable::TTag> tags;
@@ -292,9 +292,9 @@ NUdf::TUnboxedValue TEngineHost::SelectRow(const TTableId& tableId, const TArray
if (key.size() != Db.GetScheme().GetTableInfo(localTid)->KeyColumns.size())
throw TSchemeErrorTabletException();
- Counters.NSelectRow++;
- Settings.KeyAccessSampler->AddSample(tableId, row);
-
+ Counters.NSelectRow++;
+ Settings.KeyAccessSampler->AddSample(tableId, row);
+
NTable::TSelectStats stats;
ui64 flags = Settings.DisableByKeyFilter ? (ui64)NTable::NoByKey : 0;
const auto ready = Db.Select(localTid, key, tags, dbRow, stats, flags, GetReadVersion(tableId));
@@ -315,10 +315,10 @@ NUdf::TUnboxedValue TEngineHost::SelectRow(const TTableId& tableId, const TArray
NUdf::TUnboxedValue* rowItems = nullptr;
auto rowResult = holderFactory.CreateDirectArrayHolder(tags.size(), rowItems);
- ui64 rowBytes = 0;
+ ui64 rowBytes = 0;
for (ui32 i = 0; i < tags.size(); ++i) {
rowItems[i] = GetCellValue(dbRow.Get(i), cellTypes[i]);
- rowBytes += dbRow.Get(i).IsNull() ? 1 : dbRow.Get(i).Size();
+ rowBytes += dbRow.Get(i).IsNull() ? 1 : dbRow.Get(i).Size();
}
for (ui32 i = 0; i < systemColumnTags.size(); ++i) {
switch (systemColumnTags[i]) {
@@ -329,11 +329,11 @@ NUdf::TUnboxedValue TEngineHost::SelectRow(const TTableId& tableId, const TArray
ythrow yexception() << "Unknown system column tag: " << systemColumnTags[i];
}
}
- rowBytes = std::max(rowBytes, (ui64)8);
+ rowBytes = std::max(rowBytes, (ui64)8);
- Counters.SelectRowBytes += rowBytes;
+ Counters.SelectRowBytes += rowBytes;
Counters.SelectRowRows++;
-
+
return std::move(rowResult);
}
@@ -487,7 +487,7 @@ public:
, Bytes(0)
, SystemColumnTags(systemColumnTags)
, ShardId(shardId) {}
-
+
bool Next(NUdf::TUnboxedValue& value) override {
bool truncated = false;
@@ -546,7 +546,7 @@ public:
TDbTupleRef rowValues = Iter->GetValues();
ui64 rowSize = 0;
for (ui32 i = 0; i < rowValues.ColumnCount; ++i) {
- rowSize += rowValues.Columns[i].IsNull() ? 1 : rowValues.Columns[i].Size();
+ rowSize += rowValues.Columns[i].IsNull() ? 1 : rowValues.Columns[i].Size();
}
// Some per-row overhead to deal with the case when no columns were requested
rowSize = std::max(rowSize, (ui64)8);
@@ -555,7 +555,7 @@ public:
Bytes += rowSize;
List.EngineHost.GetCounters().SelectRangeRows++;
List.EngineHost.GetCounters().SelectRangeBytes += rowSize;
- List.KeyAccessSampler->AddSample(List.TableId, tuple.Cells());
+ List.KeyAccessSampler->AddSample(List.TableId, tuple.Cells());
if (HasCurrent && CurrentRowValue.UniqueBoxed()) {
CurrentRow()->Reuse(rowValues);
@@ -625,14 +625,14 @@ public:
public:
TSelectRangeLazyRowsList(NTable::TDatabase& db, const TScheme& scheme, const THolderFactory& holderFactory,
- const TTableId& tableId, ui64 localTid, const TSmallVec<NTable::TTag>& tags, const TSmallVec<bool>& skipNullKeys, const TTableRange& range,
+ const TTableId& tableId, ui64 localTid, const TSmallVec<NTable::TTag>& tags, const TSmallVec<bool>& skipNullKeys, const TTableRange& range,
ui64 itemsLimit, ui64 bytesLimit, bool reverse, TEngineHost& engineHost
- , const TSmallVec<NTable::TTag>& systemColumnTags, ui64 shardId, IKeyAccessSampler::TPtr keyAccessSampler)
+ , const TSmallVec<NTable::TTag>& systemColumnTags, ui64 shardId, IKeyAccessSampler::TPtr keyAccessSampler)
: TCustomListValue(&holderFactory.GetMemInfo())
, Db(db)
, Scheme(scheme)
, HolderFactory(holderFactory)
- , TableId(tableId)
+ , TableId(tableId)
, LocalTid(localTid)
, Tags(tags)
, SystemColumnTags(systemColumnTags)
@@ -643,8 +643,8 @@ public:
, RangeHolder(range)
, Reverse(reverse)
, EngineHost(engineHost)
- , KeyAccessSampler(keyAccessSampler)
- {}
+ , KeyAccessSampler(keyAccessSampler)
+ {}
NUdf::TUnboxedValue GetListIterator() const override {
const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(LocalTid);
@@ -708,7 +708,7 @@ private:
NTable::TDatabase& Db;
const TScheme& Scheme;
const THolderFactory& HolderFactory;
- TTableId TableId;
+ TTableId TableId;
ui64 LocalTid;
TSmallVec<NTable::TTag> Tags;
TSmallVec<NTable::TTag> SystemColumnTags;
@@ -723,17 +723,17 @@ private:
mutable TMaybe<TString> FirstKey;
mutable TMaybe<ui64> SizeBytes;
TEngineHost& EngineHost;
- IKeyAccessSampler::TPtr KeyAccessSampler;
+ IKeyAccessSampler::TPtr KeyAccessSampler;
};
class TSelectRangeResult : public TComputationValue<TSelectRangeResult> {
public:
- TSelectRangeResult(NTable::TDatabase& db, const TScheme& scheme, const THolderFactory& holderFactory, const TTableId& tableId, ui64 localTid,
+ TSelectRangeResult(NTable::TDatabase& db, const TScheme& scheme, const THolderFactory& holderFactory, const TTableId& tableId, ui64 localTid,
const TSmallVec<NTable::TTag>& tags, const TSmallVec<bool>& skipNullKeys, const TTableRange& range,
ui64 itemsLimit, ui64 bytesLimit, bool reverse, TEngineHost& engineHost,
- const TSmallVec<NTable::TTag>& systemColumnTags, ui64 shardId, IKeyAccessSampler::TPtr keyAccessSampler)
+ const TSmallVec<NTable::TTag>& systemColumnTags, ui64 shardId, IKeyAccessSampler::TPtr keyAccessSampler)
: TComputationValue(&holderFactory.GetMemInfo())
- , List(NUdf::TUnboxedValuePod(new TSelectRangeLazyRowsList(db, scheme, holderFactory, tableId, localTid, tags,
+ , List(NUdf::TUnboxedValuePod(new TSelectRangeLazyRowsList(db, scheme, holderFactory, tableId, localTid, tags,
skipNullKeys, range, itemsLimit, bytesLimit, reverse, engineHost, systemColumnTags, shardId, keyAccessSampler))) {}
private:
@@ -842,9 +842,9 @@ NUdf::TUnboxedValue TEngineHost::SelectRange(const TTableId& tableId, const TTab
}
}
- Counters.NSelectRange++;
-
- return NUdf::TUnboxedValuePod(new TSelectRangeResult(Db, Scheme, holderFactory, tableId, localTid, tags,
+ Counters.NSelectRange++;
+
+ return NUdf::TUnboxedValuePod(new TSelectRangeResult(Db, Scheme, holderFactory, tableId, localTid, tags,
skipNullKeysFlags, range, itemsLimit, bytesLimit, reverse, *this, systemColumnTags, GetShardId(),
Settings.KeyAccessSampler));
}
@@ -854,11 +854,11 @@ void TEngineHost::UpdateRow(const TTableId& tableId, const TArrayRef<const TCell
ui64 localTid = LocalTableId(tableId);
Y_VERIFY(localTid, "table not exist");
const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(localTid);
- TSmallVec<TRawTypeValue> key;
- ui64 keyBytes = 0;
+ TSmallVec<TRawTypeValue> key;
+ ui64 keyBytes = 0;
ConvertTableKeys(Scheme, tableInfo, row, key, &keyBytes);
- ui64 valueBytes = 0;
+ ui64 valueBytes = 0;
TSmallVec<NTable::TUpdateOp> ops;
for (size_t i = 0; i < commands.size(); i++) {
const TUpdateCommand& upd = commands[i];
@@ -866,7 +866,7 @@ void TEngineHost::UpdateRow(const TTableId& tableId, const TArrayRef<const TCell
NScheme::TTypeId vtype = Scheme.GetColumnInfo(tableInfo, upd.Column)->PType;
ops.emplace_back(upd.Column, NTable::ECellOp::Set,
upd.Value.IsNull() ? TRawTypeValue() : TRawTypeValue(upd.Value.Data(), upd.Value.Size(), vtype));
- valueBytes += upd.Value.IsNull() ? 1 : upd.Value.Size();
+ valueBytes += upd.Value.IsNull() ? 1 : upd.Value.Size();
}
if (auto collector = GetChangeCollector(tableId)) {
@@ -882,10 +882,10 @@ void TEngineHost::UpdateRow(const TTableId& tableId, const TArrayRef<const TCell
}
Db.Update(localTid, NTable::ERowOp::Upsert, key, ops, GetWriteVersion(tableId));
-
- Settings.KeyAccessSampler->AddSample(tableId, row);
- Counters.NUpdateRow++;
- Counters.UpdateRowBytes += keyBytes + valueBytes;
+
+ Settings.KeyAccessSampler->AddSample(tableId, row);
+ Counters.NUpdateRow++;
+ Counters.UpdateRowBytes += keyBytes + valueBytes;
}
// Erases the single row.
@@ -893,10 +893,10 @@ void TEngineHost::EraseRow(const TTableId& tableId, const TArrayRef<const TCell>
ui64 localTid = LocalTableId(tableId);
Y_VERIFY(localTid, "table not exist");
const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(localTid);
- TSmallVec<TRawTypeValue> key;
- ui64 keyBytes = 0;
+ TSmallVec<TRawTypeValue> key;
+ ui64 keyBytes = 0;
ConvertTableKeys(Scheme, tableInfo, row, key, &keyBytes);
-
+
if (auto collector = GetChangeCollector(tableId)) {
collector->SetWriteVersion(GetWriteVersion(tableId));
if (collector->NeedToReadKeys()) {
@@ -911,9 +911,9 @@ void TEngineHost::EraseRow(const TTableId& tableId, const TArrayRef<const TCell>
Db.Update(localTid, NTable::ERowOp::Erase, key, { }, GetWriteVersion(tableId));
- Settings.KeyAccessSampler->AddSample(tableId, row);
- Counters.NEraseRow++;
- Counters.EraseRowBytes += keyBytes + 8;
+ Settings.KeyAccessSampler->AddSample(tableId, row);
+ Counters.NEraseRow++;
+ Counters.EraseRowBytes += keyBytes + 8;
}
// Check that table is erased
@@ -936,8 +936,8 @@ ui64 TEngineHost::LocalTableId(const TTableId& tableId) const {
return tableId.PathId.LocalPathId;
}
-void TEngineHost::ConvertKeys(const TScheme::TTableInfo* tableInfo, const TArrayRef<const TCell>& row,
- TSmallVec<TRawTypeValue>& key) const
+void TEngineHost::ConvertKeys(const TScheme::TTableInfo* tableInfo, const TArrayRef<const TCell>& row,
+ TSmallVec<TRawTypeValue>& key) const
{
ConvertTableKeys(Scheme, tableInfo, row, key, nullptr);
}
diff --git a/ydb/core/engine/minikql/minikql_engine_host.h b/ydb/core/engine/minikql/minikql_engine_host.h
index 662b7edca29..012ee6891bc 100644
--- a/ydb/core/engine/minikql/minikql_engine_host.h
+++ b/ydb/core/engine/minikql/minikql_engine_host.h
@@ -12,20 +12,20 @@
namespace NKikimr {
namespace NMiniKQL {
-struct TEngineHostCounters {
- ui64 NSelectRow = 0;
- ui64 NSelectRange = 0;
- ui64 NUpdateRow = 0;
- ui64 NEraseRow = 0;
-
+struct TEngineHostCounters {
+ ui64 NSelectRow = 0;
+ ui64 NSelectRange = 0;
+ ui64 NUpdateRow = 0;
+ ui64 NEraseRow = 0;
+
ui64 SelectRowRows = 0;
- ui64 SelectRowBytes = 0;
- ui64 SelectRangeRows = 0;
- ui64 SelectRangeBytes = 0;
- ui64 SelectRangeDeletedRowSkips = 0;
- ui64 UpdateRowBytes = 0;
- ui64 EraseRowBytes = 0;
-
+ ui64 SelectRowBytes = 0;
+ ui64 SelectRangeRows = 0;
+ ui64 SelectRangeBytes = 0;
+ ui64 SelectRangeDeletedRowSkips = 0;
+ ui64 UpdateRowBytes = 0;
+ ui64 EraseRowBytes = 0;
+
ui64 InvisibleRowSkips = 0;
TEngineHostCounters& operator+=(const TEngineHostCounters& other) {
@@ -44,63 +44,63 @@ struct TEngineHostCounters {
return *this;
}
- TString ToString() const {
- return TStringBuilder()
- << "{NSelectRow: " << NSelectRow
- << ", NSelectRange: " << NSelectRange
- << ", NUpdateRow: " << NUpdateRow
- << ", NEraseRow: " << NEraseRow
+ TString ToString() const {
+ return TStringBuilder()
+ << "{NSelectRow: " << NSelectRow
+ << ", NSelectRange: " << NSelectRange
+ << ", NUpdateRow: " << NUpdateRow
+ << ", NEraseRow: " << NEraseRow
<< ", SelectRowRows: " << SelectRowRows
- << ", SelectRowBytes: " << SelectRowBytes
- << ", SelectRangeRows: " << SelectRangeRows
- << ", SelectRangeBytes: " << SelectRangeBytes
- << ", UpdateRowBytes: " << UpdateRowBytes
- << ", EraseRowBytes: " << EraseRowBytes
- << ", SelectRangeDeletedRowSkips: " << SelectRangeDeletedRowSkips
+ << ", SelectRowBytes: " << SelectRowBytes
+ << ", SelectRangeRows: " << SelectRangeRows
+ << ", SelectRangeBytes: " << SelectRangeBytes
+ << ", UpdateRowBytes: " << UpdateRowBytes
+ << ", EraseRowBytes: " << EraseRowBytes
+ << ", SelectRangeDeletedRowSkips: " << SelectRangeDeletedRowSkips
<< ", InvisibleRowSkips: " << InvisibleRowSkips
- << "}";
- }
-};
-
-struct IKeyAccessSampler : public TThrRefBase {
- using TPtr = TIntrusivePtr<IKeyAccessSampler>;
- virtual void AddSample(const TTableId& tableId, const TArrayRef<const TCell>& key) = 0;
-};
-
-struct TNoopKeySampler : public IKeyAccessSampler {
- void AddSample(const TTableId& tableId, const TArrayRef<const TCell>& key) override {
- Y_UNUSED(tableId);
- Y_UNUSED(key);
- }
-};
-
-struct TEngineHostSettings {
- ui64 ShardId;
- bool IsReadonly;
- bool DisableByKeyFilter;
- IKeyAccessSampler::TPtr KeyAccessSampler;
-
- explicit TEngineHostSettings(ui64 shardId = 0, bool IsReadonly = false, bool disableByKeyFilter = false,
- IKeyAccessSampler::TPtr keyAccessSampler = new TNoopKeySampler())
- : ShardId(shardId)
- , IsReadonly(IsReadonly)
- , DisableByKeyFilter(disableByKeyFilter)
- , KeyAccessSampler(keyAccessSampler)
- {}
-};
-
+ << "}";
+ }
+};
+
+struct IKeyAccessSampler : public TThrRefBase {
+ using TPtr = TIntrusivePtr<IKeyAccessSampler>;
+ virtual void AddSample(const TTableId& tableId, const TArrayRef<const TCell>& key) = 0;
+};
+
+struct TNoopKeySampler : public IKeyAccessSampler {
+ void AddSample(const TTableId& tableId, const TArrayRef<const TCell>& key) override {
+ Y_UNUSED(tableId);
+ Y_UNUSED(key);
+ }
+};
+
+struct TEngineHostSettings {
+ ui64 ShardId;
+ bool IsReadonly;
+ bool DisableByKeyFilter;
+ IKeyAccessSampler::TPtr KeyAccessSampler;
+
+ explicit TEngineHostSettings(ui64 shardId = 0, bool IsReadonly = false, bool disableByKeyFilter = false,
+ IKeyAccessSampler::TPtr keyAccessSampler = new TNoopKeySampler())
+ : ShardId(shardId)
+ , IsReadonly(IsReadonly)
+ , DisableByKeyFilter(disableByKeyFilter)
+ , KeyAccessSampler(keyAccessSampler)
+ {}
+};
+
class TEngineHost : public IEngineFlatHost {
public:
using TScheme = NTable::TScheme;
- explicit TEngineHost(NTable::TDatabase& db, TEngineHostCounters& counters,
- const TEngineHostSettings& settings = TEngineHostSettings());
+ explicit TEngineHost(NTable::TDatabase& db, TEngineHostCounters& counters,
+ const TEngineHostSettings& settings = TEngineHostSettings());
ui64 GetShardId() const override;
const TScheme::TTableInfo* GetTableInfo(const TTableId& tableId) const override;
bool IsReadonly() const override;
bool IsValidKey(TKeyDesc& key, std::pair<ui64, ui64>& maxSnapshotTime) const override;
ui64 CalculateReadSize(const TVector<const TKeyDesc*>& keys) const override;
- ui64 CalculateResultSize(const TKeyDesc& key) const override;
+ ui64 CalculateResultSize(const TKeyDesc& key) const override;
void PinPages(const TVector<THolder<TKeyDesc>>& keys, ui64 pageFaultCount) override;
NUdf::TUnboxedValue SelectRow(const TTableId& tableId, const TArrayRef<const TCell>& row,
@@ -138,8 +138,8 @@ protected:
protected:
NTable::TDatabase& Db;
const TScheme& Scheme;
- const TEngineHostSettings Settings;
- TEngineHostCounters& Counters;
+ const TEngineHostSettings Settings;
+ TEngineHostCounters& Counters;
TPeriodicCallback PeriodicCallback;
};
@@ -166,7 +166,7 @@ public:
void AnalyzeRowType(TStructLiteral* columnIds, TSmallVec<NTable::TTag>& tags, TSmallVec<NTable::TTag>& systemColumnTags);
NUdf::TUnboxedValue GetCellValue(const TCell& cell, NScheme::TTypeId type);
NUdf::TUnboxedValue CreateSelectRangeLazyRowsList(NTable::TDatabase& db, const NTable::TScheme& scheme,
- const THolderFactory& holderFactory, const TTableId& tableId, ui64 localTid, const TSmallVec<NTable::TTag>& tags,
+ const THolderFactory& holderFactory, const TTableId& tableId, ui64 localTid, const TSmallVec<NTable::TTag>& tags,
const TSmallVec<bool>& skipNullKeys, const TTableRange& range, ui64 itemsLimit, ui64 bytesLimit,
bool reverse, TEngineHostCounters& counters, const TSmallVec<NTable::TTag>& systemColumnTags, ui64 shardId);
diff --git a/ydb/core/engine/mkql_engine_flat.cpp b/ydb/core/engine/mkql_engine_flat.cpp
index 4a2efa180e6..c7fe388eef8 100644
--- a/ydb/core/engine/mkql_engine_flat.cpp
+++ b/ydb/core/engine/mkql_engine_flat.cpp
@@ -57,7 +57,7 @@ TStructLiteral& GetPgmMyReadsStruct(TStructLiteral& pgmStruct) {
return static_cast<TStructLiteral&>(*myReadsNode.GetNode());
}
-const TStructLiteral& GetPgmShardsForReadStruct(const TStructLiteral& pgmStruct) {
+const TStructLiteral& GetPgmShardsForReadStruct(const TStructLiteral& pgmStruct) {
TRuntimeNode shardsForReadNode = pgmStruct.GetValue(3);
MKQL_ENSURE(shardsForReadNode.IsImmediate() && shardsForReadNode.GetNode()->GetType()->IsStruct(),
"ShardsForRead: Expected immediate struct");
@@ -254,11 +254,11 @@ public:
, AreIncomingReadsetsPrepared(false)
, IsExecuted(false)
, ReadOnlyOriginPrograms(true)
- , IsCancelled(false)
+ , IsCancelled(false)
{
Ui64Type = TDataType::Create(NUdf::TDataType<ui64>::Id, Env);
ResultType = Env.GetEmptyStruct()->GetType();
- Alloc.DisableStrictAllocationCheck();
+ Alloc.DisableStrictAllocationCheck();
Alloc.Release();
}
@@ -392,10 +392,10 @@ public:
Y_VERIFY(key->Status == TKeyDesc::EStatus::Ok, "Some DB keys are not resolved correctly");
AddShards(affectedShardSet, *key);
if (affectedShardSet.size() > limits.ShardCount) {
- AddError("PrepareShardPrograms", __LINE__,
+ AddError("PrepareShardPrograms", __LINE__,
Sprintf("too many affected shards: %u (max allowed %u)", (ui32)affectedShardSet.size(), limits.ShardCount).data());
return EResult::TooManyShards;
- }
+ }
if (key->RowOperation == TKeyDesc::ERowOperation::Update || key->RowOperation == TKeyDesc::ERowOperation::Erase) {
AddShards(writeSet, *key);
@@ -559,33 +559,33 @@ public:
Status = EStatus::Error;
}
- // Temporary check for KIKIMR-7112
- bool CheckValidUint8InKey(TKeyDesc& desc) const {
- if (!desc.Range.Point) {
- for (NScheme::TTypeId typeId : desc.KeyColumnTypes) {
- if (typeId == NScheme::NTypeIds::Uint8) {
- AddError("Validate", __LINE__, "Bad shard program: dynamic keys with Uint8 columns are currently prohibited");
- return false;
- }
- }
- } else {
- if (desc.Range.From.size() > desc.KeyColumnTypes.size()) {
- AddError("Validate", __LINE__, "Bad shard program: key size is greater that specified in schema");
- return false;
- }
- for (size_t i = 0; i < desc.Range.From.size(); ++i) {
- if (desc.KeyColumnTypes[i] != NScheme::NTypeIds::Uint8)
- continue;
- const TCell& c = desc.Range.From[i];
- if (!c.IsNull() && c.AsValue<ui8>() > 127) {
- AddError("Validate", __LINE__, "Bad shard program: keys with Uint8 column values >127 are currently prohibited");
- return false;
- }
- }
- }
- return true;
- }
-
+ // Temporary check for KIKIMR-7112
+ bool CheckValidUint8InKey(TKeyDesc& desc) const {
+ if (!desc.Range.Point) {
+ for (NScheme::TTypeId typeId : desc.KeyColumnTypes) {
+ if (typeId == NScheme::NTypeIds::Uint8) {
+ AddError("Validate", __LINE__, "Bad shard program: dynamic keys with Uint8 columns are currently prohibited");
+ return false;
+ }
+ }
+ } else {
+ if (desc.Range.From.size() > desc.KeyColumnTypes.size()) {
+ AddError("Validate", __LINE__, "Bad shard program: key size is greater that specified in schema");
+ return false;
+ }
+ for (size_t i = 0; i < desc.Range.From.size(); ++i) {
+ if (desc.KeyColumnTypes[i] != NScheme::NTypeIds::Uint8)
+ continue;
+ const TCell& c = desc.Range.From[i];
+ if (!c.IsNull() && c.AsValue<ui8>() > 127) {
+ AddError("Validate", __LINE__, "Bad shard program: keys with Uint8 column values >127 are currently prohibited");
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
void BuildResult() noexcept override {
Y_VERIFY(AreShardProgramsExtracted, "AfterShardProgramsExtracted must be called first");
Y_VERIFY(!IsResultBuilt, "BuildResult is already called");
@@ -795,7 +795,7 @@ public:
return result;
}
- EResult Validate(TValidationInfo& validationInfo) override {
+ EResult Validate(TValidationInfo& validationInfo) override {
Y_VERIFY(!IsProgramValidated, "Validate is already called");
Y_VERIFY(ProgramPerOrigin.size() == 1, "One program must be added to engine");
Y_VERIFY(Settings.Host, "Host is not set");
@@ -873,15 +873,15 @@ public:
return EResult::ProgramError;
}
- // Extract reads that are included in the reply
+ // Extract reads that are included in the reply
THashSet<TStringBuf> replyIds;
- const auto& replyStruct = static_cast<const TStructLiteral&>(*replyPgm.GetNode());
- for (ui32 j = 0, f = replyStruct.GetValuesCount(); j < f; ++j) {
- TStringBuf uniqId(replyStruct.GetType()->GetMemberName(j));
- // Save the id of read operation that is included in reply
- replyIds.insert(uniqId);
- }
-
+ const auto& replyStruct = static_cast<const TStructLiteral&>(*replyPgm.GetNode());
+ for (ui32 j = 0, f = replyStruct.GetValuesCount(); j < f; ++j) {
+ TStringBuf uniqId(replyStruct.GetType()->GetMemberName(j));
+ // Save the id of read operation that is included in reply
+ replyIds.insert(uniqId);
+ }
+
auto writePgm = runPgmStruct.GetValue(1);
auto listType = writePgm.GetStaticType();
if (listType->GetKind() != TType::EKind::List) {
@@ -895,28 +895,28 @@ public:
return EResult::ProgramError;
}
- // Extract reads that are included in out readsets
+ // Extract reads that are included in out readsets
THashMap<TStringBuf, THashSet<ui64>> readTargets;
- const ui64 myShardId = Settings.Host->GetShardId();
- auto shardsToWriteNode = pgmStruct.GetValue(4);
- MKQL_ENSURE(shardsToWriteNode.IsImmediate() && shardsToWriteNode.GetNode()->GetType()->IsStruct(),
- "Expected immediate struct");
- const auto& shardsToWriteStruct = static_cast<const TStructLiteral&>(*shardsToWriteNode.GetNode());
- for (ui32 i = 0, e = shardsToWriteStruct.GetValuesCount(); i < e; ++i) {
- TStringBuf uniqId(shardsToWriteStruct.GetType()->GetMemberName(i));
- auto shardsList = AS_VALUE(TListLiteral, shardsToWriteStruct.GetValue(i));
- auto itemType = shardsList->GetType()->GetItemType();
- MKQL_ENSURE(itemType->IsData() && static_cast<TDataType*>(itemType)->GetSchemeType()
+ const ui64 myShardId = Settings.Host->GetShardId();
+ auto shardsToWriteNode = pgmStruct.GetValue(4);
+ MKQL_ENSURE(shardsToWriteNode.IsImmediate() && shardsToWriteNode.GetNode()->GetType()->IsStruct(),
+ "Expected immediate struct");
+ const auto& shardsToWriteStruct = static_cast<const TStructLiteral&>(*shardsToWriteNode.GetNode());
+ for (ui32 i = 0, e = shardsToWriteStruct.GetValuesCount(); i < e; ++i) {
+ TStringBuf uniqId(shardsToWriteStruct.GetType()->GetMemberName(i));
+ auto shardsList = AS_VALUE(TListLiteral, shardsToWriteStruct.GetValue(i));
+ auto itemType = shardsList->GetType()->GetItemType();
+ MKQL_ENSURE(itemType->IsData() && static_cast<TDataType*>(itemType)->GetSchemeType()
== NUdf::TDataType<ui64>::Id, "Bad shard list");
for (ui32 shardIndex = 0; shardIndex < shardsList->GetItemsCount(); ++shardIndex) {
ui64 shard = AS_VALUE(TDataLiteral, shardsList->GetItems()[shardIndex])->AsValue().Get<ui64>();
- if (shard != myShardId) {
- // Save the target shard id for the read operation
- readTargets[uniqId].insert(shard);
- }
- }
- }
-
+ if (shard != myShardId) {
+ // Save the target shard id for the read operation
+ readTargets[uniqId].insert(shard);
+ }
+ }
+ }
+
validationInfo.Clear();
EResult result;
{
@@ -930,7 +930,7 @@ public:
{
for (ui32 i = 0; i < validationInfo.ReadsCount; ++i) {
- TStringBuf uniqId(myReadsStruct->GetType()->GetMemberName(i));
+ TStringBuf uniqId(myReadsStruct->GetType()->GetMemberName(i));
TRuntimeNode item = myReadsStruct->GetValue(i);
if (!item.GetNode()->GetType()->IsCallable()) {
AddError("Validate", __LINE__, "Bad shard program");
@@ -941,23 +941,23 @@ public:
Y_VERIFY(desc);
Y_VERIFY(desc->RowOperation == TKeyDesc::ERowOperation::Read);
TValidatedKey validKey(std::move(desc), false);
-
- auto targetIt = readTargets.find(uniqId);
+
+ auto targetIt = readTargets.find(uniqId);
if (replyIds.contains(uniqId) || targetIt != readTargets.end()) {
- // Is this read result included in the reply?
+ // Is this read result included in the reply?
if (replyIds.contains(uniqId)) {
validKey.IsResultPart = true;
- }
- // Is this read result included into outgoing read sets?
- if (targetIt != readTargets.end()) {
+ }
+ // Is this read result included into outgoing read sets?
+ if (targetIt != readTargets.end()) {
// TODO: can't we move them?
for (ui64 shard : targetIt->second) {
validKey.TargetShards.insert(shard);
- validationInfo.HasOutReadsets = true;
- }
- }
- }
-
+ validationInfo.HasOutReadsets = true;
+ }
+ }
+ }
+
validationInfo.Keys.emplace_back(std::move(validKey));
}
}
@@ -974,12 +974,12 @@ public:
Y_VERIFY(desc);
Y_VERIFY(desc->RowOperation == TKeyDesc::ERowOperation::Update ||
desc->RowOperation == TKeyDesc::ERowOperation::Erase);
- if (!desc->Range.Point) {
+ if (!desc->Range.Point) {
++validationInfo.DynKeysCount;
- }
- if (!CheckValidUint8InKey(*desc)) {
- return EResult::ProgramError;
- }
+ }
+ if (!CheckValidUint8InKey(*desc)) {
+ return EResult::ProgramError;
+ }
validationInfo.Keys.emplace_back(TValidatedKey(std::move(desc), true));
}
@@ -996,23 +996,23 @@ public:
default:
return result;
}
-
- // Check if we expect incoming readsets
- auto& shardForRead = GetPgmShardsForReadStruct(pgmStruct);
- for (ui32 i = 0; i < shardForRead.GetValuesCount() && !validationInfo.HasInReadsets; ++i) {
- auto shardsList = AS_VALUE(TListLiteral, shardForRead.GetValue(i));
- auto itemType = shardsList->GetType()->GetItemType();
- MKQL_ENSURE(itemType->IsData() && static_cast<TDataType*>(itemType)->GetSchemeType()
- == NUdf::TDataType<ui64>::Id, "Bad shard list");
-
- for (ui32 shardIndex = 0; shardIndex < shardsList->GetItemsCount(); ++shardIndex) {
- ui64 shard = AS_VALUE(TDataLiteral, shardsList->GetItems()[shardIndex])->AsValue().Get<ui64>();
- if (shard != myShardId) {
- validationInfo.HasInReadsets = true;
- break;
- }
- }
- }
+
+ // Check if we expect incoming readsets
+ auto& shardForRead = GetPgmShardsForReadStruct(pgmStruct);
+ for (ui32 i = 0; i < shardForRead.GetValuesCount() && !validationInfo.HasInReadsets; ++i) {
+ auto shardsList = AS_VALUE(TListLiteral, shardForRead.GetValue(i));
+ auto itemType = shardsList->GetType()->GetItemType();
+ MKQL_ENSURE(itemType->IsData() && static_cast<TDataType*>(itemType)->GetSchemeType()
+ == NUdf::TDataType<ui64>::Id, "Bad shard list");
+
+ for (ui32 shardIndex = 0; shardIndex < shardsList->GetItemsCount(); ++shardIndex) {
+ ui64 shard = AS_VALUE(TDataLiteral, shardsList->GetItems()[shardIndex])->AsValue().Get<ui64>();
+ if (shard != myShardId) {
+ validationInfo.HasInReadsets = true;
+ break;
+ }
+ }
+ }
}
try {
@@ -1046,29 +1046,29 @@ public:
}
EResult PinPages(ui64 pageFaultCount) override {
- Y_VERIFY(ProgramPerOrigin.size() == 1, "One program must be added to engine");
- Y_VERIFY(Settings.Host, "Host is not set");
-
- if (IsCancelled) {
- // Do nothing and quickly proceed
- return EResult::Ok;
- }
-
+ Y_VERIFY(ProgramPerOrigin.size() == 1, "One program must be added to engine");
+ Y_VERIFY(Settings.Host, "Host is not set");
+
+ if (IsCancelled) {
+ // Do nothing and quickly proceed
+ return EResult::Ok;
+ }
+
TGuard<TScopedAlloc> allocGuard(Alloc);
-
+
TVector<THolder<TKeyDesc>> prechargeKeys;
- // iterate over all ProgramPerOrigin (for merged datashards)
- for (const auto& pi : ProgramPerOrigin) {
- auto pgm = pi.second;
- MKQL_ENSURE(pgm.IsImmediate() && pgm.GetStaticType()->IsStruct(), "Expected immediate struct");
- const auto& pgmStruct = static_cast<const TStructLiteral&>(*pgm.GetNode());
- MKQL_ENSURE(pgmStruct.GetValuesCount() == 5, "Expected 5 members"); // AllReads, MyKeys, Run, ShardsForRead, ShardsToWrite
-
- auto myKeys = pgmStruct.GetValue(1);
- MKQL_ENSURE(myKeys.IsImmediate() && myKeys.GetNode()->GetType()->IsStruct(), "Expected immediate struct");
- const auto& myKeysStruct = static_cast<const TStructLiteral&>(*myKeys.GetNode());
- MKQL_ENSURE(myKeysStruct.GetValuesCount() == 2, "Expected 2 members");
-
+ // iterate over all ProgramPerOrigin (for merged datashards)
+ for (const auto& pi : ProgramPerOrigin) {
+ auto pgm = pi.second;
+ MKQL_ENSURE(pgm.IsImmediate() && pgm.GetStaticType()->IsStruct(), "Expected immediate struct");
+ const auto& pgmStruct = static_cast<const TStructLiteral&>(*pgm.GetNode());
+ MKQL_ENSURE(pgmStruct.GetValuesCount() == 5, "Expected 5 members"); // AllReads, MyKeys, Run, ShardsForRead, ShardsToWrite
+
+ auto myKeys = pgmStruct.GetValue(1);
+ MKQL_ENSURE(myKeys.IsImmediate() && myKeys.GetNode()->GetType()->IsStruct(), "Expected immediate struct");
+ const auto& myKeysStruct = static_cast<const TStructLiteral&>(*myKeys.GetNode());
+ MKQL_ENSURE(myKeysStruct.GetValuesCount() == 2, "Expected 2 members");
+
// 0 - reads, 1 - writes
for (ui32 opId = 0; opId <= 1; ++opId) {
auto myOps = myKeysStruct.GetValue(opId);
@@ -1077,18 +1077,18 @@ public:
auto myOpsStruct = static_cast<TStructLiteral*>(myOps.GetNode());
for (ui32 i = 0, e = myOpsStruct->GetValuesCount(); i < e; ++i) {
TRuntimeNode item = myOpsStruct->GetValue(i);
- Y_VERIFY(item.GetNode()->GetType()->IsCallable(), "Bad shard program");
- THolder<TKeyDesc> desc = ExtractTableKey(*static_cast<TCallable*>(item.GetNode()), Strings, Env);
- Y_VERIFY(desc);
- prechargeKeys.emplace_back(std::move(desc));
- }
- }
- }
-
+ Y_VERIFY(item.GetNode()->GetType()->IsCallable(), "Bad shard program");
+ THolder<TKeyDesc> desc = ExtractTableKey(*static_cast<TCallable*>(item.GetNode()), Strings, Env);
+ Y_VERIFY(desc);
+ prechargeKeys.emplace_back(std::move(desc));
+ }
+ }
+ }
+
Settings.Host->PinPages(prechargeKeys, pageFaultCount);
- return EResult::Ok;
- }
-
+ return EResult::Ok;
+ }
+
EResult PrepareOutgoingReadsets() override {
Y_VERIFY(!AreOutgoingReadSetsPrepared, "PrepareOutgoingReadsets is already called");
Y_VERIFY(Settings.Host, "Host is not set");
@@ -1122,48 +1122,48 @@ public:
== NUdf::TDataType<ui64>::Id, "Bad shard list type.");
if (shardsList->GetItemsCount() > 0) {
- if(!IsCancelled) {
- TRuntimeNode item = myReads.GetValue(readIdx);
- MKQL_ENSURE(item.GetNode()->GetType()->IsCallable(), "Expected callable");
- auto callable = static_cast<TCallable*>(item.GetNode());
-
- NUdf::TUnboxedValue readValue;
- auto name = callable->GetType()->GetNameStr();
- if (name == Strings.SelectRow) {
- readValue = PerformLocalSelectRow(*callable, *Settings.Host, holderFactory, Env);
- }
- else if (name == Strings.SelectRange) {
- readValue = PerformLocalSelectRange(*callable, *Settings.Host, holderFactory, Env);
- }
- else {
- THROW TWithBackTrace<yexception>() << "Unknown callable: "
- << callable->GetType()->GetName();
- }
-
- ui32 readCallableId = FromString<ui32>(readName);
- MKQL_ENSURE(readCallableId == callable->GetUniqueId(),
- "Invalid struct member name:" << myReads.GetType()->GetMemberName(readIdx));
-
- auto returnType = GetActualReturnType(*callable, Env, Strings);
- TValuePacker packer(false, returnType);
- readResults.emplace_back(TString(packer.Pack(readValue)));
- const TStringBuf& readValueStr = readResults.back();
-
- for (ui32 shardIndex = 0; shardIndex < shardsList->GetItemsCount(); ++shardIndex) {
- ui64 shardId = AS_VALUE(TDataLiteral, shardsList->GetItems()[shardIndex])->AsValue().Get<ui64>();
- if (shardId != myShardId) {
- auto& results = resultsPerTarget[shardId];
- results.AddResult(callable->GetUniqueId(), readValueStr, Env);
- }
+ if(!IsCancelled) {
+ TRuntimeNode item = myReads.GetValue(readIdx);
+ MKQL_ENSURE(item.GetNode()->GetType()->IsCallable(), "Expected callable");
+ auto callable = static_cast<TCallable*>(item.GetNode());
+
+ NUdf::TUnboxedValue readValue;
+ auto name = callable->GetType()->GetNameStr();
+ if (name == Strings.SelectRow) {
+ readValue = PerformLocalSelectRow(*callable, *Settings.Host, holderFactory, Env);
+ }
+ else if (name == Strings.SelectRange) {
+ readValue = PerformLocalSelectRange(*callable, *Settings.Host, holderFactory, Env);
+ }
+ else {
+ THROW TWithBackTrace<yexception>() << "Unknown callable: "
+ << callable->GetType()->GetName();
+ }
+
+ ui32 readCallableId = FromString<ui32>(readName);
+ MKQL_ENSURE(readCallableId == callable->GetUniqueId(),
+ "Invalid struct member name:" << myReads.GetType()->GetMemberName(readIdx));
+
+ auto returnType = GetActualReturnType(*callable, Env, Strings);
+ TValuePacker packer(false, returnType);
+ readResults.emplace_back(TString(packer.Pack(readValue)));
+ const TStringBuf& readValueStr = readResults.back();
+
+ for (ui32 shardIndex = 0; shardIndex < shardsList->GetItemsCount(); ++shardIndex) {
+ ui64 shardId = AS_VALUE(TDataLiteral, shardsList->GetItems()[shardIndex])->AsValue().Get<ui64>();
+ if (shardId != myShardId) {
+ auto& results = resultsPerTarget[shardId];
+ results.AddResult(callable->GetUniqueId(), readValueStr, Env);
+ }
+ }
+ } else {
+ for (ui32 shardIndex = 0; shardIndex < shardsList->GetItemsCount(); ++shardIndex) {
+ ui64 shardId = AS_VALUE(TDataLiteral, shardsList->GetItems()[shardIndex])->AsValue().Get<ui64>();
+ if (shardId != myShardId) {
+ // TODO: Pass special 'Tx was Cancelled' flag in the readset so that target shard can cancel this Tx as well
+ resultsPerTarget[shardId];
+ }
}
- } else {
- for (ui32 shardIndex = 0; shardIndex < shardsList->GetItemsCount(); ++shardIndex) {
- ui64 shardId = AS_VALUE(TDataLiteral, shardsList->GetItems()[shardIndex])->AsValue().Get<ui64>();
- if (shardId != myShardId) {
- // TODO: Pass special 'Tx was Cancelled' flag in the readset so that target shard can cancel this Tx as well
- resultsPerTarget[shardId];
- }
- }
}
}
@@ -1279,23 +1279,23 @@ public:
IncomingReadsets.push_back(TString(readset));
}
- EResult Cancel() override {
- IsCancelled = true;
- Errors = "Tx was cancelled";
- return EResult::Ok;
- }
-
+ EResult Cancel() override {
+ IsCancelled = true;
+ Errors = "Tx was cancelled";
+ return EResult::Ok;
+ }
+
EResult Execute() override {
Y_VERIFY(!IsExecuted, "Execute is already called");
Y_VERIFY(Settings.Host, "Host is not set");
TGuard<TScopedAlloc> allocGuard(Alloc);
- if (IsCancelled) {
- IsExecuted = true;
- Status = EStatus::Error;
- return EResult::Cancelled;
- }
-
+ if (IsCancelled) {
+ IsExecuted = true;
+ Status = EStatus::Error;
+ return EResult::Cancelled;
+ }
+
if (Status == EStatus::Error) {
IsExecuted = true;
return EResult::ProgramError;
@@ -1642,7 +1642,7 @@ private:
TRuntimeNode specializedProgram = TRuntimeNode(builder.Build(), true);
auto lpoProvider = GetLiteralPropagationOptimizationFuncProvider();
- auto funcProvider = [&](TInternName name) {
+ auto funcProvider = [&](TInternName name) {
auto lpoFunc = lpoProvider(name);
if (lpoFunc)
return lpoFunc;
@@ -1879,7 +1879,7 @@ private:
// Walk through Member callable, required as SelectRange returns a struct
if (input->GetType()->GetNameStr() == Strings.Builtins.Member) {
- input = getCallableForPushdown(input->GetInput(0), TInternName());
+ input = getCallableForPushdown(input->GetInput(0), TInternName());
if (!input) {
return;
}
@@ -1917,7 +1917,7 @@ private:
name == Strings.Builtins.FlatMap)
{
// Push computation of map callables down to datashards
- auto input = getCallableForPushdown(callable->GetInput(0), TInternName());
+ auto input = getCallableForPushdown(callable->GetInput(0), TInternName());
if (!input) {
continue;
}
@@ -1953,7 +1953,7 @@ private:
continue;
}
- auto input = getCallableForPushdown(callable->GetInput(0), TInternName());
+ auto input = getCallableForPushdown(callable->GetInput(0), TInternName());
if (!input) {
continue;
}
@@ -1985,7 +1985,7 @@ private:
continue;
}
- auto input = getCallableForPushdown(callable->GetInput(0), TInternName());
+ auto input = getCallableForPushdown(callable->GetInput(0), TInternName());
if (!input) {
continue;
}
@@ -1994,7 +1994,7 @@ private:
}
}
- auto secondPass = [&](TInternName name) {
+ auto secondPass = [&](TInternName name) {
if (name == Strings.CombineByKeyMerge) {
return TCallableVisitFunc([this](TCallable& callable, const TTypeEnvironment& env) {
Y_UNUSED(env);
@@ -2062,7 +2062,7 @@ private:
};
auto lpoProvider = GetLiteralPropagationOptimizationFuncProvider();
- auto funcProvider = [&](TInternName name) {
+ auto funcProvider = [&](TInternName name) {
auto lpoFunc = lpoProvider(name);
if (lpoFunc)
return lpoFunc;
@@ -2127,7 +2127,7 @@ private:
THolder<IComputationGraph> ResultGraph;
THashMap<TString, NUdf::TUnboxedValue> ResultValues;
bool ReadOnlyOriginPrograms;
- bool IsCancelled;
+ bool IsCancelled;
};
}
@@ -2139,14 +2139,14 @@ TAutoPtr<IEngineFlat> CreateEngineFlat(const TEngineFlatSettings& settings) {
} // namespace NMiniKQL
} // namespace NKikimr
-
-template<>
+
+template<>
void Out<NKikimr::NMiniKQL::IEngineFlat::EStatus>(IOutputStream& o, NKikimr::NMiniKQL::IEngineFlat::EStatus status) {
- using namespace NKikimr::NMiniKQL;
- switch (status) {
- case IEngineFlat::EStatus::Unknown: o << "Unknown"; break;
- case IEngineFlat::EStatus::Error: o << "Error"; break;
- case IEngineFlat::EStatus::Complete: o << "Complete"; break;
- case IEngineFlat::EStatus::Aborted: o << "Aborted"; break;
- }
-}
+ using namespace NKikimr::NMiniKQL;
+ switch (status) {
+ case IEngineFlat::EStatus::Unknown: o << "Unknown"; break;
+ case IEngineFlat::EStatus::Error: o << "Error"; break;
+ case IEngineFlat::EStatus::Complete: o << "Complete"; break;
+ case IEngineFlat::EStatus::Aborted: o << "Aborted"; break;
+ }
+}
diff --git a/ydb/core/engine/mkql_engine_flat.h b/ydb/core/engine/mkql_engine_flat.h
index 40664a739ec..7bee67584b9 100644
--- a/ydb/core/engine/mkql_engine_flat.h
+++ b/ydb/core/engine/mkql_engine_flat.h
@@ -157,18 +157,18 @@ public:
bool NeedSizeCalculation() const { return !IsWrite && (IsResultPart || TargetShards); }
};
- struct TValidationInfo {
+ struct TValidationInfo {
TVector<TValidatedKey> Keys;
ui32 ReadsCount;
ui32 WritesCount;
ui32 DynKeysCount;
- bool HasOutReadsets;
- bool HasInReadsets;
+ bool HasOutReadsets;
+ bool HasInReadsets;
bool Loaded;
- TValidationInfo() {
- Clear();
- }
+ TValidationInfo() {
+ Clear();
+ }
TValidationInfo(TValidationInfo&&) = default;
TValidationInfo(const TValidationInfo&) = delete;
@@ -181,12 +181,12 @@ public:
ReadsCount = 0;
WritesCount = 0;
DynKeysCount = 0;
- HasOutReadsets = false;
- HasInReadsets = false;
+ HasOutReadsets = false;
+ HasInReadsets = false;
Loaded = false;
}
- };
-
+ };
+
//-- error reporting
virtual TString GetErrors() const noexcept = 0;
@@ -214,7 +214,7 @@ public:
//-- datashard interface
virtual EResult AddProgram(ui64 origin, const TStringBuf& program, bool readOnly = false) noexcept = 0;
virtual EResult ValidateKeys(TValidationInfo& validationInfo) = 0;
- virtual EResult Validate(TValidationInfo& validationInfo) = 0;
+ virtual EResult Validate(TValidationInfo& validationInfo) = 0;
virtual EResult PrepareOutgoingReadsets() = 0;
virtual ui32 GetOutgoingReadsetsCount() const noexcept = 0;
@@ -227,7 +227,7 @@ public:
virtual ui64 GetExpectedIncomingReadsetOriginShard(ui32 index) const noexcept = 0;
virtual void AddIncomingReadset(const TStringBuf& readset) noexcept = 0;
- virtual EResult Cancel() = 0;
+ virtual EResult Cancel() = 0;
virtual EResult PinPages(ui64 pageFaultCount = 0) = 0;
virtual EResult Execute() = 0;
virtual TString GetShardReply(ui64 origin) const noexcept = 0;
@@ -255,7 +255,7 @@ namespace NMiniKQL {
IRandomProvider& RandomProvider;
ITimeProvider& TimeProvider;
IEngineFlatHost* Host;
- TAlignedPagePoolCounters AllocCounters;
+ TAlignedPagePoolCounters AllocCounters;
std::function<void(const char* operation, ui32 line, const TBackTrace*)> BacktraceWriter;
std::function<void(const TString& message)> LogErrorWriter;
bool ForceOnline;
@@ -268,15 +268,15 @@ namespace NMiniKQL {
const IFunctionRegistry* functionRegistry,
IRandomProvider& randomProvider,
ITimeProvider& timeProvider,
- IEngineFlatHost* host = nullptr,
- const TAlignedPagePoolCounters& allocCounters = TAlignedPagePoolCounters()
- )
+ IEngineFlatHost* host = nullptr,
+ const TAlignedPagePoolCounters& allocCounters = TAlignedPagePoolCounters()
+ )
: Protocol(protocol)
, FunctionRegistry(functionRegistry)
, RandomProvider(randomProvider)
, TimeProvider(timeProvider)
, Host(host)
- , AllocCounters(allocCounters)
+ , AllocCounters(allocCounters)
, ForceOnline(false)
{
Y_VERIFY(FunctionRegistry);
diff --git a/ydb/core/engine/mkql_engine_flat_extfunc.cpp b/ydb/core/engine/mkql_engine_flat_extfunc.cpp
index 90faad3f12c..414d3aeeb45 100644
--- a/ydb/core/engine/mkql_engine_flat_extfunc.cpp
+++ b/ydb/core/engine/mkql_engine_flat_extfunc.cpp
@@ -694,11 +694,11 @@ namespace {
resultLists.emplace_back(std::move(list));
totalSize += size;
totalItems += items;
-
- if (bytesLimit && bytesLimit < totalSize) {
+
+ if (bytesLimit && bytesLimit < totalSize) {
resultTruncated = true;
- break;
- }
+ break;
+ }
if (truncated) {
if (!bytesLimit) {
diff --git a/ydb/core/engine/mkql_engine_flat_host.h b/ydb/core/engine/mkql_engine_flat_host.h
index 71df4832495..0593fb3a9b8 100644
--- a/ydb/core/engine/mkql_engine_flat_host.h
+++ b/ydb/core/engine/mkql_engine_flat_host.h
@@ -32,15 +32,15 @@ public:
// Validate key and fill status into it.
virtual bool IsValidKey(TKeyDesc& key, std::pair<ui64, ui64>& maxSnapshotTime) const = 0;
- // Calculate the whole size of data that needs to be read into memory
+ // Calculate the whole size of data that needs to be read into memory
virtual ui64 CalculateReadSize(const TVector<const TKeyDesc*>& keys) const = 0;
- // Exstimate size of the merged result of reading the data
- virtual ui64 CalculateResultSize(const TKeyDesc& key) const = 0;
-
- // At Tx execution make sure that all pages are loaded
+ // Exstimate size of the merged result of reading the data
+ virtual ui64 CalculateResultSize(const TKeyDesc& key) const = 0;
+
+ // At Tx execution make sure that all pages are loaded
virtual void PinPages(const TVector<THolder<TKeyDesc>>& keys, ui64 pageFaultCount = 0) = 0;
-
+
// Returns empty optional with type 'returnType' or the filled one.
virtual NUdf::TUnboxedValue SelectRow(const TTableId& tableId, const TArrayRef<const TCell>& row,
TStructLiteral* columnIds, TOptionalType* returnType, const TReadTarget& readTarget,
diff --git a/ydb/core/engine/mkql_engine_flat_host_ut.cpp b/ydb/core/engine/mkql_engine_flat_host_ut.cpp
index 58ec1cc8319..68679f168df 100644
--- a/ydb/core/engine/mkql_engine_flat_host_ut.cpp
+++ b/ydb/core/engine/mkql_engine_flat_host_ut.cpp
@@ -41,7 +41,7 @@ Y_UNIT_TEST_SUITE(TMiniKQLEngineFlatHostTest) {
Y_UNIT_TEST(ShardId) {
NTable::TDatabase DB;
- TEngineHostCounters hostCounters;
+ TEngineHostCounters hostCounters;
TUnversionedEngineHost host(DB, hostCounters, TEngineHostSettings(100));
UNIT_ASSERT_VALUES_EQUAL(host.GetShardId(), 100);
}
@@ -71,7 +71,7 @@ Y_UNIT_TEST_SUITE(TMiniKQLEngineFlatHostTest) {
{ // Execute some minikql
NTable::TDummyEnv env;
DB.Begin(3, env);
- TEngineHostCounters hostCounters;
+ TEngineHostCounters hostCounters;
TUnversionedEngineHost host(DB, hostCounters);
// TODO: ... MINIKQL ...
diff --git a/ydb/core/engine/mkql_engine_flat_impl.h b/ydb/core/engine/mkql_engine_flat_impl.h
index 27fa96d17fb..bb232a52aae 100644
--- a/ydb/core/engine/mkql_engine_flat_impl.h
+++ b/ydb/core/engine/mkql_engine_flat_impl.h
@@ -20,7 +20,7 @@ namespace NMiniKQL {
, Length(env.InternName(TStringBuf("Length")))
, Arg(env.InternName(TStringBuf("Arg")))
{
- All.reserve(20);
+ All.reserve(20);
All.insert(Filter);
All.insert(FilterNullMembers);
All.insert(SkipNullMembers);
@@ -34,16 +34,16 @@ namespace NMiniKQL {
All.insert(Arg);
}
- const TInternName Filter;
- const TInternName FilterNullMembers;
+ const TInternName Filter;
+ const TInternName FilterNullMembers;
const TInternName SkipNullMembers;
- const TInternName FlatMap;
- const TInternName Map;
- const TInternName Member;
- const TInternName ToHashedDict;
- const TInternName DictItems;
- const TInternName Take;
- const TInternName Length;
+ const TInternName FlatMap;
+ const TInternName Map;
+ const TInternName Member;
+ const TInternName ToHashedDict;
+ const TInternName DictItems;
+ const TInternName Take;
+ const TInternName Length;
const TInternName Arg;
THashSet<TInternName> All;
@@ -72,11 +72,11 @@ namespace NMiniKQL {
TBuiltinStrings Builtins;
- const TInternName SetResult;
- const TInternName Abort;
- const TInternName StepTxId;
- const TInternName AcquireLocks;
- const TInternName CombineByKeyMerge;
+ const TInternName SetResult;
+ const TInternName Abort;
+ const TInternName StepTxId;
+ const TInternName AcquireLocks;
+ const TInternName CombineByKeyMerge;
const TInternName Diagnostics;
const TInternName PartialTake;
const TInternName PartialSort;
diff --git a/ydb/core/engine/mkql_engine_flat_ut.cpp b/ydb/core/engine/mkql_engine_flat_ut.cpp
index 3d2f7056d97..e2b12e2d7a5 100644
--- a/ydb/core/engine/mkql_engine_flat_ut.cpp
+++ b/ydb/core/engine/mkql_engine_flat_ut.cpp
@@ -175,13 +175,13 @@ namespace {
proxyEngine->AfterShardProgramsExtracted();
- THashMap<ui64, TAutoPtr<TEngineHostCounters>> hostCounters;
+ THashMap<ui64, TAutoPtr<TEngineHostCounters>> hostCounters;
THashMap<ui64, TAutoPtr<IEngineFlatHost>> hosts;
for (const auto& shardPgm : shardPrograms) {
- auto& counters = hostCounters[shardPgm.first];
- counters.Reset(new TEngineHostCounters());
+ auto& counters = hostCounters[shardPgm.first];
+ counters.Reset(new TEngineHostCounters());
hosts[shardPgm.first].Reset(new TUnversionedEngineHost(
- *ShardDbState.Dbs[shardPgm.first], *counters, TEngineHostSettings(shardPgm.first, false)));
+ *ShardDbState.Dbs[shardPgm.first], *counters, TEngineHostSettings(shardPgm.first, false)));
}
for (const auto& shardPgm : shardPrograms) {
@@ -189,14 +189,14 @@ namespace {
auto dataEngine = CreateEngineFlat(TEngineFlatSettings(IEngineFlat::EProtocol::V1, FunctionRegistry.Get(),
*RandomProvider, *TimeProvider, hosts[shardPgm.first].Get()));
UNIT_ASSERT(dataEngine->AddProgram(shardPgm.first, shardPgm.second) == IEngineFlat::EResult::Ok);
- IEngineFlat::TValidationInfo validationInfo;
- IEngineFlat::EResult result = dataEngine->Validate(validationInfo);
+ IEngineFlat::TValidationInfo validationInfo;
+ IEngineFlat::EResult result = dataEngine->Validate(validationInfo);
if (result != IEngineFlat::EResult::Ok) {
Cerr << dataEngine->GetErrors() << Endl;
return IEngineFlat::EStatus::Error;
}
- UNIT_ASSERT(dataEngine->PinPages() == IEngineFlat::EResult::Ok);
+ UNIT_ASSERT(dataEngine->PinPages() == IEngineFlat::EResult::Ok);
ShardDbState.CommitTransaction(shardPgm.first);
}
@@ -255,7 +255,7 @@ namespace {
}
}
- dataEngine->PinPages();
+ dataEngine->PinPages();
auto result = dataEngine->Execute();
if (result != IEngineFlat::EResult::Ok) {
Cerr << dataEngine->GetErrors() << Endl;
@@ -3670,22 +3670,22 @@ Value {
}
{
- driver.ShardDbState.BeginTransaction(Shard1);
- NIceDb::TNiceDb db(*driver.ShardDbState.Dbs[Shard1]);
+ driver.ShardDbState.BeginTransaction(Shard1);
+ NIceDb::TNiceDb db(*driver.ShardDbState.Dbs[Shard1]);
db.Table<Schema1::Table1>()
.Key(ui32(43))
.Update(NIceDb::TUpdate<Schema1::Table1::Value>("qwe"));
- driver.ShardDbState.CommitTransaction(Shard1);
- }
-
- {
- driver.ShardDbState.BeginTransaction(Shard2);
- NIceDb::TNiceDb db(*driver.ShardDbState.Dbs[Shard2]);
- db.Table<Schema1::Table1>()
- .Key(ui32(44))
- .Update(NIceDb::TUpdate<Schema1::Table1::Value>("zxc"));
-
+ driver.ShardDbState.CommitTransaction(Shard1);
+ }
+
+ {
+ driver.ShardDbState.BeginTransaction(Shard2);
+ NIceDb::TNiceDb db(*driver.ShardDbState.Dbs[Shard2]);
+ db.Table<Schema1::Table1>()
+ .Key(ui32(44))
+ .Update(NIceDb::TUpdate<Schema1::Table1::Value>("zxc"));
+
driver.ShardDbState.CommitTransaction(Shard2);
}
@@ -3700,7 +3700,7 @@ Value {
rowFrom[0] = pgmBuilder.NewEmptyOptionalDataLiteral(NUdf::TDataType<ui32>::Id);
options.FromColumns = rowFrom;
options.Flags = pgmBuilder.TProgramBuilder::NewDataLiteral<ui32>(TReadRangeOptions::TFlags::ExcludeTermValue);
- const ui64 RowOverheadBytes = 8;
+ const ui64 RowOverheadBytes = 8;
options.BytesLimit = pgmBuilder.TProgramBuilder::NewDataLiteral<ui64>(4 + RowOverheadBytes);
auto value = pgmBuilder.SelectRange(TTableId(OwnerId, Table1Id), keyTypes, columns, options);
auto pgm = pgmBuilder.Build(pgmBuilder.AsList(pgmBuilder.SetResult("myRes", value)));
@@ -3773,13 +3773,13 @@ Value {
}
}
}
- List {
- Struct {
- Optional {
- Text: "qwe"
- }
- }
- }
+ List {
+ Struct {
+ Optional {
+ Text: "qwe"
+ }
+ }
+ }
}
Struct {
Bool: true
diff --git a/ydb/core/engine/mkql_keys.cpp b/ydb/core/engine/mkql_keys.cpp
index f6bf5ca33a5..05afb89adc2 100644
--- a/ydb/core/engine/mkql_keys.cpp
+++ b/ydb/core/engine/mkql_keys.cpp
@@ -78,8 +78,8 @@ THolder<TKeyDesc> ExtractKeyTuple(const TTableId& tableId, TTupleLiteral* tuple,
fromValues[i] = toValues[i] = MakeCell(keyColumnTypes[i], data, env);
}
- TTableRange range(TConstArrayRef<TCell>(fromValues.data(), tuple->GetValuesCount()),
- inclusiveFrom, TConstArrayRef<TCell>(toValues.data(), staticComponents), inclusiveTo, point);
+ TTableRange range(TConstArrayRef<TCell>(fromValues.data(), tuple->GetValuesCount()),
+ inclusiveFrom, TConstArrayRef<TCell>(toValues.data(), staticComponents), inclusiveTo, point);
return MakeHolder<TKeyDesc>(tableId, range, rowOperation, keyColumnTypes, columns);
}
@@ -166,8 +166,8 @@ THolder<TKeyDesc> ExtractSelectRange(TCallable& callable, const TTypeEnvironment
reverse = AS_VALUE(TDataLiteral, callable.GetInput(10))->AsValue().Get<bool>();
}
- TTableRange range(TConstArrayRef<TCell>(fromValues.data(), fromValues.size()),
- inclusiveFrom, TConstArrayRef<TCell>(toValues.data(), toValues.size()), inclusiveTo, point);
+ TTableRange range(TConstArrayRef<TCell>(fromValues.data(), fromValues.size()),
+ inclusiveFrom, TConstArrayRef<TCell>(toValues.data(), toValues.size()), inclusiveTo, point);
THolder<TKeyDesc> desc(
new TKeyDesc(tableId, range, TKeyDesc::ERowOperation::Read, keyColumnTypes, columns, itemsLimit, bytesLimit, reverse));
desc->ReadTarget = ExtractFlatReadTarget(callable.GetInput(8));
diff --git a/ydb/core/engine/mkql_keys.h b/ydb/core/engine/mkql_keys.h
index e5db43efc4b..635ad3ce147 100644
--- a/ydb/core/engine/mkql_keys.h
+++ b/ydb/core/engine/mkql_keys.h
@@ -22,7 +22,7 @@ struct TTableStrings {
, UpdateRow(env.InternName(TStringBuf("UpdateRow")))
, EraseRow(env.InternName(TStringBuf("EraseRow")))
{
- All.reserve(10);
+ All.reserve(10);
All.insert(SelectRow);
All.insert(SelectRange);
All.insert(UpdateRow);
@@ -32,10 +32,10 @@ struct TTableStrings {
DbWrites.insert(EraseRow);
}
- const TInternName SelectRow;
- const TInternName SelectRange;
- const TInternName UpdateRow;
- const TInternName EraseRow;
+ const TInternName SelectRow;
+ const TInternName SelectRange;
+ const TInternName UpdateRow;
+ const TInternName EraseRow;
THashSet<TInternName> All;
THashSet<TInternName> DbWrites;
diff --git a/ydb/core/engine/mkql_proto.cpp b/ydb/core/engine/mkql_proto.cpp
index 4c400b038ce..3dd053e3724 100644
--- a/ydb/core/engine/mkql_proto.cpp
+++ b/ydb/core/engine/mkql_proto.cpp
@@ -180,216 +180,216 @@ public:
{}
};
-// NOTE: TCell's can reference memomry from tupleValue
-bool CellsFromTuple(const NKikimrMiniKQL::TType* tupleType,
- const NKikimrMiniKQL::TValue& tupleValue,
- const TConstArrayRef<NScheme::TTypeId>& types,
- bool allowCastFromString,
+// NOTE: TCell's can reference memomry from tupleValue
+bool CellsFromTuple(const NKikimrMiniKQL::TType* tupleType,
+ const NKikimrMiniKQL::TValue& tupleValue,
+ const TConstArrayRef<NScheme::TTypeId>& types,
+ bool allowCastFromString,
TVector<TCell>& key,
- TString& errStr)
-{
-
-#define CHECK_OR_RETURN_ERROR(cond, descr) \
- if (!(cond)) { \
- errStr = descr; \
- return false; \
- }
-
- if (tupleType) {
- CHECK_OR_RETURN_ERROR(tupleType->GetKind() == NKikimrMiniKQL::Tuple ||
- (tupleType->GetKind() == NKikimrMiniKQL::Unknown && tupleType->GetTuple().ElementSize() == 0), "Must be a tuple");
- CHECK_OR_RETURN_ERROR(tupleType->GetTuple().ElementSize() <= types.size(),
- "Tuple size " + ToString(tupleType->GetTuple().ElementSize()) + " is greater that expected size " + ToString(types.size()));
-
- for (size_t i = 0; i < tupleType->GetTuple().ElementSize(); ++i) {
- const auto& ti = tupleType->GetTuple().GetElement(i);
- CHECK_OR_RETURN_ERROR(ti.GetKind() == NKikimrMiniKQL::Optional, "Element at index " + ToString(i) + " in not an Optional");
- const auto& item = ti.GetOptional().GetItem();
- CHECK_OR_RETURN_ERROR(item.GetKind() == NKikimrMiniKQL::Data, "Element at index " + ToString(i) + " Item kind is not Data");
- const auto& typeId = item.GetData().GetScheme();
- CHECK_OR_RETURN_ERROR(typeId == types[i] ||
- allowCastFromString && (typeId == NScheme::NTypeIds::Utf8),
- "Element at index " + ToString(i) + " has type " + ToString(typeId) + " but expected type is " + ToString(types[i]));
- }
-
- CHECK_OR_RETURN_ERROR(tupleType->GetTuple().ElementSize() == tupleValue.TupleSize(),
- Sprintf("Tuple value length %" PRISZT " doesn't match the length in type %" PRISZT, tupleValue.TupleSize(), tupleType->GetTuple().ElementSize()));
- } else {
- CHECK_OR_RETURN_ERROR(types.size() >= tupleValue.TupleSize(),
- Sprintf("Tuple length %" PRISZT " is greater than key column count %" PRISZT, tupleValue.TupleSize(), types.size()));
- }
-
- for (ui32 i = 0; i < tupleValue.TupleSize(); ++i) {
- auto& o = tupleValue.GetTuple(i);
-
- auto element_case = o.value_value_case();
-
- CHECK_OR_RETURN_ERROR(element_case == NKikimrMiniKQL::TValue::kOptional ||
- element_case == NKikimrMiniKQL::TValue::VALUE_VALUE_NOT_SET,
- Sprintf("Optional type is expected in tuple at position %" PRIu32, i));
-
- CHECK_OR_RETURN_ERROR(o.ListSize() == 0 &&
- o.StructSize() == 0 &&
- o.TupleSize() == 0 &&
- o.DictSize() == 0,
- Sprintf("Optional type is expected in tuple at position %" PRIu32, i));
-
- if (!o.HasOptional()) {
- key.push_back(TCell());
- continue;
- }
-
- auto& v = o.GetOptional();
-
- auto value_case = v.value_value_case();
-
- CHECK_OR_RETURN_ERROR(value_case != NKikimrMiniKQL::TValue::kOptional &&
- value_case != NKikimrMiniKQL::TValue::VALUE_VALUE_NOT_SET,
- Sprintf("Data must be present at position %" PRIu32, i));
-
- CHECK_OR_RETURN_ERROR(v.ListSize() == 0 &&
- v.StructSize() == 0 &&
- v.TupleSize() == 0 &&
- v.DictSize() == 0,
- Sprintf("Simple type is expected in tuple at position %" PRIu32, i));
-
- TCell c;
- switch (types[i]) {
-
-#define CASE_SIMPLE_TYPE(name, type, protoField) \
- case NScheme::NTypeIds::name: \
- { \
- bool valuePresent = v.Has##protoField(); \
- if (valuePresent) { \
- type val = v.Get##protoField(); \
- c = TCell((const char*)&val, sizeof(val)); \
- } else if (allowCastFromString && v.HasText()) { \
+ TString& errStr)
+{
+
+#define CHECK_OR_RETURN_ERROR(cond, descr) \
+ if (!(cond)) { \
+ errStr = descr; \
+ return false; \
+ }
+
+ if (tupleType) {
+ CHECK_OR_RETURN_ERROR(tupleType->GetKind() == NKikimrMiniKQL::Tuple ||
+ (tupleType->GetKind() == NKikimrMiniKQL::Unknown && tupleType->GetTuple().ElementSize() == 0), "Must be a tuple");
+ CHECK_OR_RETURN_ERROR(tupleType->GetTuple().ElementSize() <= types.size(),
+ "Tuple size " + ToString(tupleType->GetTuple().ElementSize()) + " is greater that expected size " + ToString(types.size()));
+
+ for (size_t i = 0; i < tupleType->GetTuple().ElementSize(); ++i) {
+ const auto& ti = tupleType->GetTuple().GetElement(i);
+ CHECK_OR_RETURN_ERROR(ti.GetKind() == NKikimrMiniKQL::Optional, "Element at index " + ToString(i) + " in not an Optional");
+ const auto& item = ti.GetOptional().GetItem();
+ CHECK_OR_RETURN_ERROR(item.GetKind() == NKikimrMiniKQL::Data, "Element at index " + ToString(i) + " Item kind is not Data");
+ const auto& typeId = item.GetData().GetScheme();
+ CHECK_OR_RETURN_ERROR(typeId == types[i] ||
+ allowCastFromString && (typeId == NScheme::NTypeIds::Utf8),
+ "Element at index " + ToString(i) + " has type " + ToString(typeId) + " but expected type is " + ToString(types[i]));
+ }
+
+ CHECK_OR_RETURN_ERROR(tupleType->GetTuple().ElementSize() == tupleValue.TupleSize(),
+ Sprintf("Tuple value length %" PRISZT " doesn't match the length in type %" PRISZT, tupleValue.TupleSize(), tupleType->GetTuple().ElementSize()));
+ } else {
+ CHECK_OR_RETURN_ERROR(types.size() >= tupleValue.TupleSize(),
+ Sprintf("Tuple length %" PRISZT " is greater than key column count %" PRISZT, tupleValue.TupleSize(), types.size()));
+ }
+
+ for (ui32 i = 0; i < tupleValue.TupleSize(); ++i) {
+ auto& o = tupleValue.GetTuple(i);
+
+ auto element_case = o.value_value_case();
+
+ CHECK_OR_RETURN_ERROR(element_case == NKikimrMiniKQL::TValue::kOptional ||
+ element_case == NKikimrMiniKQL::TValue::VALUE_VALUE_NOT_SET,
+ Sprintf("Optional type is expected in tuple at position %" PRIu32, i));
+
+ CHECK_OR_RETURN_ERROR(o.ListSize() == 0 &&
+ o.StructSize() == 0 &&
+ o.TupleSize() == 0 &&
+ o.DictSize() == 0,
+ Sprintf("Optional type is expected in tuple at position %" PRIu32, i));
+
+ if (!o.HasOptional()) {
+ key.push_back(TCell());
+ continue;
+ }
+
+ auto& v = o.GetOptional();
+
+ auto value_case = v.value_value_case();
+
+ CHECK_OR_RETURN_ERROR(value_case != NKikimrMiniKQL::TValue::kOptional &&
+ value_case != NKikimrMiniKQL::TValue::VALUE_VALUE_NOT_SET,
+ Sprintf("Data must be present at position %" PRIu32, i));
+
+ CHECK_OR_RETURN_ERROR(v.ListSize() == 0 &&
+ v.StructSize() == 0 &&
+ v.TupleSize() == 0 &&
+ v.DictSize() == 0,
+ Sprintf("Simple type is expected in tuple at position %" PRIu32, i));
+
+ TCell c;
+ switch (types[i]) {
+
+#define CASE_SIMPLE_TYPE(name, type, protoField) \
+ case NScheme::NTypeIds::name: \
+ { \
+ bool valuePresent = v.Has##protoField(); \
+ if (valuePresent) { \
+ type val = v.Get##protoField(); \
+ c = TCell((const char*)&val, sizeof(val)); \
+ } else if (allowCastFromString && v.HasText()) { \
const auto slot = NUdf::GetDataSlot(types[i]); \
const auto out = NMiniKQL::ValueFromString(slot, v.GetText()); \
CHECK_OR_RETURN_ERROR(out, Sprintf("Cannot parse value of type " #name " from text '%s' in tuple at position %" PRIu32, v.GetText().data(), i)); \
const auto val = out.Get<type>(); \
- c = TCell((const char*)&val, sizeof(val)); \
- } else { \
- CHECK_OR_RETURN_ERROR(false, Sprintf("Value of type " #name " expected in tuple at position %" PRIu32, i)); \
- } \
- Y_VERIFY(c.IsInline()); \
- break; \
- }
-
- CASE_SIMPLE_TYPE(Bool, bool, Bool);
- CASE_SIMPLE_TYPE(Int8, i8, Int32);
- CASE_SIMPLE_TYPE(Uint8, ui8, Uint32);
- CASE_SIMPLE_TYPE(Int16, i16, Int32);
- CASE_SIMPLE_TYPE(Uint16, ui16, Uint32);
- CASE_SIMPLE_TYPE(Int32, i32, Int32);
- CASE_SIMPLE_TYPE(Uint32, ui32, Uint32);
- CASE_SIMPLE_TYPE(Int64, i64, Int64);
- CASE_SIMPLE_TYPE(Uint64, ui64, Uint64);
- CASE_SIMPLE_TYPE(Float, float, Float);
- CASE_SIMPLE_TYPE(Double, double,Double);
+ c = TCell((const char*)&val, sizeof(val)); \
+ } else { \
+ CHECK_OR_RETURN_ERROR(false, Sprintf("Value of type " #name " expected in tuple at position %" PRIu32, i)); \
+ } \
+ Y_VERIFY(c.IsInline()); \
+ break; \
+ }
+
+ CASE_SIMPLE_TYPE(Bool, bool, Bool);
+ CASE_SIMPLE_TYPE(Int8, i8, Int32);
+ CASE_SIMPLE_TYPE(Uint8, ui8, Uint32);
+ CASE_SIMPLE_TYPE(Int16, i16, Int32);
+ CASE_SIMPLE_TYPE(Uint16, ui16, Uint32);
+ CASE_SIMPLE_TYPE(Int32, i32, Int32);
+ CASE_SIMPLE_TYPE(Uint32, ui32, Uint32);
+ CASE_SIMPLE_TYPE(Int64, i64, Int64);
+ CASE_SIMPLE_TYPE(Uint64, ui64, Uint64);
+ CASE_SIMPLE_TYPE(Float, float, Float);
+ CASE_SIMPLE_TYPE(Double, double,Double);
CASE_SIMPLE_TYPE(Date, ui16, Uint32);
CASE_SIMPLE_TYPE(Datetime, ui32, Uint32);
CASE_SIMPLE_TYPE(Timestamp, ui64, Uint64);
CASE_SIMPLE_TYPE(Interval, i64, Int64);
-
-
-#undef CASE_SIMPLE_TYPE
-
- case NScheme::NTypeIds::Yson:
- case NScheme::NTypeIds::Json:
- case NScheme::NTypeIds::Utf8:
- {
- c = TCell(v.GetText().data(), v.GetText().size());
- break;
- }
+
+
+#undef CASE_SIMPLE_TYPE
+
+ case NScheme::NTypeIds::Yson:
+ case NScheme::NTypeIds::Json:
+ case NScheme::NTypeIds::Utf8:
+ {
+ c = TCell(v.GetText().data(), v.GetText().size());
+ break;
+ }
case NScheme::NTypeIds::JsonDocument:
case NScheme::NTypeIds::DyNumber:
{
c = TCell(v.GetBytes().data(), v.GetBytes().size());
break;
}
- case NScheme::NTypeIds::String:
- {
- if (v.HasBytes()) {
- c = TCell(v.GetBytes().data(), v.GetBytes().size());
- } else if (allowCastFromString && v.HasText()) {
- c = TCell(v.GetText().data(), v.GetText().size());
- } else {
- CHECK_OR_RETURN_ERROR(false, Sprintf("Cannot parse value of type String in tuple at position %" PRIu32, i));
- }
- break;
- }
- default:
- CHECK_OR_RETURN_ERROR(false, Sprintf("Unsupported typeId %" PRIu16 " at index %" PRIu32, types[i], i));
- break;
- }
-
- CHECK_OR_RETURN_ERROR(!c.IsNull(), Sprintf("Invalid non-NULL value at index %" PRIu32, i));
- key.push_back(c);
- }
-
-#undef CHECK_OR_RETURN_ERROR
-
- return true;
-}
-
-bool CellToValue(NScheme::TTypeId typeId, const TCell& c, NKikimrMiniKQL::TValue& val, TString& errStr) {
- if (c.IsNull()) {
- return true;
- }
-
- switch (typeId) {
- case NScheme::NTypeIds::Int8:
- Y_VERIFY(c.Size() == sizeof(i8));
- val.MutableOptional()->SetInt32(*(i8*)c.Data());
- break;
- case NScheme::NTypeIds::Uint8:
- Y_VERIFY(c.Size() == sizeof(ui8));
- val.MutableOptional()->SetUint32(*(ui8*)c.Data());
- break;
-
- case NScheme::NTypeIds::Int16:
- Y_VERIFY(c.Size() == sizeof(i16));
- val.MutableOptional()->SetInt32(ReadUnaligned<i16>(c.Data()));
- break;
- case NScheme::NTypeIds::Uint16:
- Y_VERIFY(c.Size() == sizeof(ui16));
- val.MutableOptional()->SetUint32(ReadUnaligned<ui16>(c.Data()));
- break;
-
- case NScheme::NTypeIds::Int32:
- Y_VERIFY(c.Size() == sizeof(i32));
- val.MutableOptional()->SetInt32(ReadUnaligned<i32>(c.Data()));
- break;
- case NScheme::NTypeIds::Uint32:
- Y_VERIFY(c.Size() == sizeof(ui32));
- val.MutableOptional()->SetUint32(ReadUnaligned<ui32>(c.Data()));
- break;
-
- case NScheme::NTypeIds::Int64:
- Y_VERIFY(c.Size() == sizeof(i64));
- val.MutableOptional()->SetInt64(ReadUnaligned<i64>(c.Data()));
- break;
- case NScheme::NTypeIds::Uint64:
- Y_VERIFY(c.Size() == sizeof(ui64));
- val.MutableOptional()->SetUint64(ReadUnaligned<ui64>(c.Data()));
- break;
-
- case NScheme::NTypeIds::Bool:
- Y_VERIFY(c.Size() == sizeof(bool));
- val.MutableOptional()->SetBool(*(bool*)c.Data());
- break;
-
- case NScheme::NTypeIds::Float:
- Y_VERIFY(c.Size() == sizeof(float));
- val.MutableOptional()->SetFloat(ReadUnaligned<float>(c.Data()));
- break;
-
- case NScheme::NTypeIds::Double:
- Y_VERIFY(c.Size() == sizeof(double));
- val.MutableOptional()->SetDouble(ReadUnaligned<double>(c.Data()));
- break;
-
+ case NScheme::NTypeIds::String:
+ {
+ if (v.HasBytes()) {
+ c = TCell(v.GetBytes().data(), v.GetBytes().size());
+ } else if (allowCastFromString && v.HasText()) {
+ c = TCell(v.GetText().data(), v.GetText().size());
+ } else {
+ CHECK_OR_RETURN_ERROR(false, Sprintf("Cannot parse value of type String in tuple at position %" PRIu32, i));
+ }
+ break;
+ }
+ default:
+ CHECK_OR_RETURN_ERROR(false, Sprintf("Unsupported typeId %" PRIu16 " at index %" PRIu32, types[i], i));
+ break;
+ }
+
+ CHECK_OR_RETURN_ERROR(!c.IsNull(), Sprintf("Invalid non-NULL value at index %" PRIu32, i));
+ key.push_back(c);
+ }
+
+#undef CHECK_OR_RETURN_ERROR
+
+ return true;
+}
+
+bool CellToValue(NScheme::TTypeId typeId, const TCell& c, NKikimrMiniKQL::TValue& val, TString& errStr) {
+ if (c.IsNull()) {
+ return true;
+ }
+
+ switch (typeId) {
+ case NScheme::NTypeIds::Int8:
+ Y_VERIFY(c.Size() == sizeof(i8));
+ val.MutableOptional()->SetInt32(*(i8*)c.Data());
+ break;
+ case NScheme::NTypeIds::Uint8:
+ Y_VERIFY(c.Size() == sizeof(ui8));
+ val.MutableOptional()->SetUint32(*(ui8*)c.Data());
+ break;
+
+ case NScheme::NTypeIds::Int16:
+ Y_VERIFY(c.Size() == sizeof(i16));
+ val.MutableOptional()->SetInt32(ReadUnaligned<i16>(c.Data()));
+ break;
+ case NScheme::NTypeIds::Uint16:
+ Y_VERIFY(c.Size() == sizeof(ui16));
+ val.MutableOptional()->SetUint32(ReadUnaligned<ui16>(c.Data()));
+ break;
+
+ case NScheme::NTypeIds::Int32:
+ Y_VERIFY(c.Size() == sizeof(i32));
+ val.MutableOptional()->SetInt32(ReadUnaligned<i32>(c.Data()));
+ break;
+ case NScheme::NTypeIds::Uint32:
+ Y_VERIFY(c.Size() == sizeof(ui32));
+ val.MutableOptional()->SetUint32(ReadUnaligned<ui32>(c.Data()));
+ break;
+
+ case NScheme::NTypeIds::Int64:
+ Y_VERIFY(c.Size() == sizeof(i64));
+ val.MutableOptional()->SetInt64(ReadUnaligned<i64>(c.Data()));
+ break;
+ case NScheme::NTypeIds::Uint64:
+ Y_VERIFY(c.Size() == sizeof(ui64));
+ val.MutableOptional()->SetUint64(ReadUnaligned<ui64>(c.Data()));
+ break;
+
+ case NScheme::NTypeIds::Bool:
+ Y_VERIFY(c.Size() == sizeof(bool));
+ val.MutableOptional()->SetBool(*(bool*)c.Data());
+ break;
+
+ case NScheme::NTypeIds::Float:
+ Y_VERIFY(c.Size() == sizeof(float));
+ val.MutableOptional()->SetFloat(ReadUnaligned<float>(c.Data()));
+ break;
+
+ case NScheme::NTypeIds::Double:
+ Y_VERIFY(c.Size() == sizeof(double));
+ val.MutableOptional()->SetDouble(ReadUnaligned<double>(c.Data()));
+ break;
+
case NScheme::NTypeIds::Date:
Y_VERIFY(c.Size() == sizeof(ui16));
val.MutableOptional()->SetUint32(ReadUnaligned<i16>(c.Data()));
@@ -408,23 +408,23 @@ bool CellToValue(NScheme::TTypeId typeId, const TCell& c, NKikimrMiniKQL::TValue
break;
case NScheme::NTypeIds::JsonDocument:
- case NScheme::NTypeIds::String:
+ case NScheme::NTypeIds::String:
case NScheme::NTypeIds::DyNumber:
- val.MutableOptional()->SetBytes(c.Data(), c.Size());
- break;
-
- case NScheme::NTypeIds::Json:
- case NScheme::NTypeIds::Yson:
- case NScheme::NTypeIds::Utf8:
- val.MutableOptional()->SetText(c.Data(), c.Size());
- break;
- default:
- errStr = "Unknown type: " + ToString(typeId);
- return false;
- }
- return true;
-}
-
+ val.MutableOptional()->SetBytes(c.Data(), c.Size());
+ break;
+
+ case NScheme::NTypeIds::Json:
+ case NScheme::NTypeIds::Yson:
+ case NScheme::NTypeIds::Utf8:
+ val.MutableOptional()->SetText(c.Data(), c.Size());
+ break;
+ default:
+ errStr = "Unknown type: " + ToString(typeId);
+ return false;
+ }
+ return true;
+}
+
} // namspace NMiniKQL
} // namspace NKikimr
diff --git a/ydb/core/engine/mkql_proto.h b/ydb/core/engine/mkql_proto.h
index d9bea63933b..510ff15991f 100644
--- a/ydb/core/engine/mkql_proto.h
+++ b/ydb/core/engine/mkql_proto.h
@@ -13,16 +13,16 @@ namespace NMiniKQL {
class THolderFactory;
NUdf::TUnboxedValue ImportValueFromProto(TType* type, const Ydb::Value& value, const THolderFactory& factory);
-
-// NOTE: TCell's can reference memomry from tupleValue
-bool CellsFromTuple(const NKikimrMiniKQL::TType* tupleType,
- const NKikimrMiniKQL::TValue& tupleValue,
- const TConstArrayRef<NScheme::TTypeId>& expectedTypes,
- bool allowCastFromString,
+
+// NOTE: TCell's can reference memomry from tupleValue
+bool CellsFromTuple(const NKikimrMiniKQL::TType* tupleType,
+ const NKikimrMiniKQL::TValue& tupleValue,
+ const TConstArrayRef<NScheme::TTypeId>& expectedTypes,
+ bool allowCastFromString,
TVector<TCell>& key,
- TString& errStr);
-
-bool CellToValue(NScheme::TTypeId typeId, const TCell& c, NKikimrMiniKQL::TValue& val, TString& errStr);
-
+ TString& errStr);
+
+bool CellToValue(NScheme::TTypeId typeId, const TCell& c, NKikimrMiniKQL::TValue& val, TString& errStr);
+
} // namspace NMiniKQL
} // namspace NKikimr
diff --git a/ydb/core/engine/mkql_proto_ut.cpp b/ydb/core/engine/mkql_proto_ut.cpp
index c438b21e446..c6e00e947c5 100644
--- a/ydb/core/engine/mkql_proto_ut.cpp
+++ b/ydb/core/engine/mkql_proto_ut.cpp
@@ -389,214 +389,214 @@ Y_UNIT_TEST(TestExportVariantStructTypeYdb) {
"variant_index: 1\n");
}
- TString DoTestCellsFromTuple(const TConstArrayRef<NScheme::TTypeId>& types, TString paramsProto) {
- NKikimrMiniKQL::TParams params;
- bool parseOk = ::google::protobuf::TextFormat::ParseFromString(paramsProto, &params);
- UNIT_ASSERT_C(parseOk, paramsProto);
-
+ TString DoTestCellsFromTuple(const TConstArrayRef<NScheme::TTypeId>& types, TString paramsProto) {
+ NKikimrMiniKQL::TParams params;
+ bool parseOk = ::google::protobuf::TextFormat::ParseFromString(paramsProto, &params);
+ UNIT_ASSERT_C(parseOk, paramsProto);
+
TVector<TCell> cells;
- TString errStr;
- bool res = CellsFromTuple(&params.GetType(), params.GetValue(), types, true, cells, errStr);
- UNIT_ASSERT_VALUES_EQUAL_C(res, errStr.empty(), paramsProto);
-
- return errStr;
- }
-
+ TString errStr;
+ bool res = CellsFromTuple(&params.GetType(), params.GetValue(), types, true, cells, errStr);
+ UNIT_ASSERT_VALUES_EQUAL_C(res, errStr.empty(), paramsProto);
+
+ return errStr;
+ }
+
Y_UNIT_TEST(TestCellsFromTuple) {
- UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { Optional { Int32: -42 } }"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("Value of type Int32 expected in tuple at position 0", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { Optional { Int64: -42 } }"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { }"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { Optional { Text : '-42' } }"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::String
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { Optional { Text : 'AAAA' } }"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("Cannot parse value of type Uint32 from text '-42' in tuple at position 0", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Uint32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { Optional { Text : '-42' } }"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("Tuple value length 0 doesn't match the length in type 1", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- " }"
- " Value {"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " }"
- " }"
- " Value {"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("Data must be present at position 0", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { Optional {} }"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("Tuple value length 0 doesn't match the length in type 1", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32,
- NScheme::NTypeIds::Utf8
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- " }"
- " Value {"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32,
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { Optional { Int32 : -42 } }"
- " Tuple { Optional { Text : '-42' } }"
- " }"
- )
- );
-
- UNIT_ASSERT_VALUES_EQUAL("Tuple size 2 is greater that expected size 1", DoTestCellsFromTuple(
- {
- NScheme::NTypeIds::Int32
- },
- " Type {"
- " Kind : Tuple"
- " Tuple {"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
- " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
- " }"
- " }"
- " Value {"
- " Tuple { Optional { Int32 : -42 } }"
- " Tuple { Optional { Text : '-42' } }"
- " }"
- )
- );
- }
+ UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { Optional { Int32: -42 } }"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("Value of type Int32 expected in tuple at position 0", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { Optional { Int64: -42 } }"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { }"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { Optional { Text : '-42' } }"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::String
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { Optional { Text : 'AAAA' } }"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("Cannot parse value of type Uint32 from text '-42' in tuple at position 0", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Uint32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { Optional { Text : '-42' } }"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("Tuple value length 0 doesn't match the length in type 1", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " }"
+ " }"
+ " Value {"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("Data must be present at position 0", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { Optional {} }"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("Tuple value length 0 doesn't match the length in type 1", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32,
+ NScheme::NTypeIds::Utf8
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32,
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { Optional { Int32 : -42 } }"
+ " Tuple { Optional { Text : '-42' } }"
+ " }"
+ )
+ );
+
+ UNIT_ASSERT_VALUES_EQUAL("Tuple size 2 is greater that expected size 1", DoTestCellsFromTuple(
+ {
+ NScheme::NTypeIds::Int32
+ },
+ " Type {"
+ " Kind : Tuple"
+ " Tuple {"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 1 } } } }"
+ " Element { Kind : Optional Optional { Item { Kind : Data Data { Scheme : 4608 } } } }"
+ " }"
+ " }"
+ " Value {"
+ " Tuple { Optional { Int32 : -42 } }"
+ " Tuple { Optional { Text : '-42' } }"
+ " }"
+ )
+ );
+ }
}
}
diff --git a/ydb/core/formats/arrow_batch_builder.cpp b/ydb/core/formats/arrow_batch_builder.cpp
index 542ecf905ca..8ecf97b9c78 100644
--- a/ydb/core/formats/arrow_batch_builder.cpp
+++ b/ydb/core/formats/arrow_batch_builder.cpp
@@ -87,28 +87,28 @@ bool TArrowBatchBuilder::Start(const TVector<std::pair<TString, NScheme::TTypeId
return status.ok();
}
-void TArrowBatchBuilder::AppendCell(const TCell& cell, ui32 colNum) {
- NumBytes += cell.Size();
- const ui32 ydbType = YdbSchema[colNum].second;
- auto status = NKikimr::NArrow::AppendCell(*BatchBuilder, cell, colNum, ydbType);
- Y_VERIFY(status.ok());
-}
-
-void TArrowBatchBuilder::AddRow(const TDbTupleRef& key, const TDbTupleRef& value) {
+void TArrowBatchBuilder::AppendCell(const TCell& cell, ui32 colNum) {
+ NumBytes += cell.Size();
+ const ui32 ydbType = YdbSchema[colNum].second;
+ auto status = NKikimr::NArrow::AppendCell(*BatchBuilder, cell, colNum, ydbType);
+ Y_VERIFY(status.ok());
+}
+
+void TArrowBatchBuilder::AddRow(const TDbTupleRef& key, const TDbTupleRef& value) {
++NumRows;
-
- auto fnAppendTuple = [&] (const TDbTupleRef& tuple, size_t offsetInRow) {
- for (size_t i = 0; i < tuple.ColumnCount; ++i) {
- const ui32 ydbType = tuple.Types[i];
- const ui32 colNum = offsetInRow + i;
- Y_VERIFY(ydbType == YdbSchema[colNum].second);
- auto& cell = tuple.Columns[i];
- AppendCell(cell, colNum);
- }
- };
-
- fnAppendTuple(key, 0);
- fnAppendTuple(value, key.ColumnCount);
+
+ auto fnAppendTuple = [&] (const TDbTupleRef& tuple, size_t offsetInRow) {
+ for (size_t i = 0; i < tuple.ColumnCount; ++i) {
+ const ui32 ydbType = tuple.Types[i];
+ const ui32 colNum = offsetInRow + i;
+ Y_VERIFY(ydbType == YdbSchema[colNum].second);
+ auto& cell = tuple.Columns[i];
+ AppendCell(cell, colNum);
+ }
+ };
+
+ fnAppendTuple(key, 0);
+ fnAppendTuple(value, key.ColumnCount);
}
void TArrowBatchBuilder::AddRow(const TConstArrayRef<TCell>& key, const TConstArrayRef<TCell>& value) {
@@ -117,11 +117,11 @@ void TArrowBatchBuilder::AddRow(const TConstArrayRef<TCell>& key, const TConstAr
size_t offset = 0;
for (size_t i = 0; i < key.size(); ++i, ++offset) {
auto& cell = key[i];
- AppendCell(cell, offset);
+ AppendCell(cell, offset);
}
for (size_t i = 0; i < value.size(); ++i, ++offset) {
auto& cell = value[i];
- AppendCell(cell, offset);
+ AppendCell(cell, offset);
}
}
@@ -147,9 +147,9 @@ void TArrowBatchBuilder::ReserveData(ui32 columnNo, size_t size) {
});
}
-std::shared_ptr<arrow::RecordBatch> TArrowBatchBuilder::FlushBatch(bool reinitialize) {
+std::shared_ptr<arrow::RecordBatch> TArrowBatchBuilder::FlushBatch(bool reinitialize) {
if (NumRows) {
- auto status = BatchBuilder->Flush(reinitialize, &Batch);
+ auto status = BatchBuilder->Flush(reinitialize, &Batch);
Y_VERIFY(status.ok());
}
NumRows = NumBytes = 0;
@@ -158,7 +158,7 @@ std::shared_ptr<arrow::RecordBatch> TArrowBatchBuilder::FlushBatch(bool reinitia
TString TArrowBatchBuilder::Finish() {
if (!Batch) {
- FlushBatch(false);
+ FlushBatch(false);
}
TString str = NArrow::SerializeBatch(Batch, WriteOptions);
diff --git a/ydb/core/formats/arrow_batch_builder.h b/ydb/core/formats/arrow_batch_builder.h
index 1d6a60d6e9d..d52a94ed20c 100644
--- a/ydb/core/formats/arrow_batch_builder.h
+++ b/ydb/core/formats/arrow_batch_builder.h
@@ -22,10 +22,10 @@ public:
Y_UNUSED(err);
return Start(columns);
}
-
- void AddRow(const NKikimr::TDbTupleRef& key, const NKikimr::TDbTupleRef& value) override;
+
+ void AddRow(const NKikimr::TDbTupleRef& key, const NKikimr::TDbTupleRef& value) override;
void AddRow(const TConstArrayRef<TCell>& key, const TConstArrayRef<TCell>& value);
-
+
// You have to call it before Start()
void Reserve(size_t numRows) {
RowsToReserve = numRows;
@@ -39,28 +39,28 @@ public:
}
bool Start(const TVector<std::pair<TString, NScheme::TTypeId>>& columns);
- std::shared_ptr<arrow::RecordBatch> FlushBatch(bool reinitialize);
+ std::shared_ptr<arrow::RecordBatch> FlushBatch(bool reinitialize);
std::shared_ptr<arrow::RecordBatch> GetBatch() const { return Batch; }
-protected:
- void AppendCell(const TCell& cell, ui32 colNum);
-
- const TVector<std::pair<TString, NScheme::TTypeId>>& GetYdbSchema() const {
- return YdbSchema;
- }
-
+protected:
+ void AppendCell(const TCell& cell, ui32 colNum);
+
+ const TVector<std::pair<TString, NScheme::TTypeId>>& GetYdbSchema() const {
+ return YdbSchema;
+ }
+
private:
arrow::ipc::IpcWriteOptions WriteOptions;
TVector<std::pair<TString, NScheme::TTypeId>> YdbSchema;
std::unique_ptr<arrow::RecordBatchBuilder> BatchBuilder;
std::shared_ptr<arrow::RecordBatch> Batch;
size_t RowsToReserve{DEFAULT_ROWS_TO_RESERVE};
-
-protected:
+
+protected:
size_t NumRows{0};
size_t NumBytes{0};
-private:
+private:
std::unique_ptr<IBlockBuilder> Clone() const override {
return std::make_unique<TArrowBatchBuilder>();
}
diff --git a/ydb/core/formats/arrow_helpers.cpp b/ydb/core/formats/arrow_helpers.cpp
index 3e7af5c070b..3e1e1b0444a 100644
--- a/ydb/core/formats/arrow_helpers.cpp
+++ b/ydb/core/formats/arrow_helpers.cpp
@@ -236,9 +236,9 @@ TString SerializeSchema(const arrow::Schema& schema) {
return TString((const char*)(*buffer)->data(), (*buffer)->size());
}
-std::shared_ptr<arrow::Schema> DeserializeSchema(const TString& str) {
- std::shared_ptr<arrow::Buffer> buffer(std::make_shared<TBufferOverString>(str));
- arrow::io::BufferReader reader(buffer);
+std::shared_ptr<arrow::Schema> DeserializeSchema(const TString& str) {
+ std::shared_ptr<arrow::Buffer> buffer(std::make_shared<TBufferOverString>(str));
+ arrow::io::BufferReader reader(buffer);
arrow::ipc::DictionaryMemo dictMemo;
auto schema = ReadSchema(&reader, &dictMemo);
if (!schema.ok()) {
@@ -316,13 +316,13 @@ TString SerializeBatchNoCompression(const std::shared_ptr<arrow::RecordBatch>& b
return SerializeBatch(batch, writeOptions);
}
-std::shared_ptr<arrow::RecordBatch> DeserializeBatch(const TString& blob, const std::shared_ptr<arrow::Schema>& schema) {
+std::shared_ptr<arrow::RecordBatch> DeserializeBatch(const TString& blob, const std::shared_ptr<arrow::Schema>& schema) {
arrow::ipc::DictionaryMemo dictMemo;
auto options = arrow::ipc::IpcReadOptions::Defaults();
options.use_threads = false;
- std::shared_ptr<arrow::Buffer> buffer(std::make_shared<TBufferOverString>(blob));
- arrow::io::BufferReader reader(buffer);
+ std::shared_ptr<arrow::Buffer> buffer(std::make_shared<TBufferOverString>(blob));
+ arrow::io::BufferReader reader(buffer);
auto batch = ReadRecordBatch(schema, &dictMemo, options, &reader);
if (!batch.ok() || !(*batch)->Validate().ok()) {
return {};
@@ -451,25 +451,25 @@ std::vector<std::shared_ptr<arrow::RecordBatch>> MergeSortedBatches(const std::v
return out;
}
-// Check if the pertumation doesn't reoder anything
-bool IsNoOp(const arrow::UInt64Array& permutation) {
- for (i64 i = 0; i < permutation.length(); ++i) {
- if (permutation.Value(i) != (ui64)i) {
- return false;
- }
- }
- return true;
-}
-
-std::shared_ptr<arrow::RecordBatch> Reorder(const std::shared_ptr<arrow::RecordBatch>& batch,
- const std::shared_ptr<arrow::UInt64Array>& permutation) {
- Y_VERIFY(permutation->length() == batch->num_rows());
-
- auto res = IsNoOp(*permutation) ? batch : arrow::compute::Take(batch, permutation);
- Y_VERIFY(res.ok());
- return (*res).record_batch();
-}
-
+// Check if the pertumation doesn't reoder anything
+bool IsNoOp(const arrow::UInt64Array& permutation) {
+ for (i64 i = 0; i < permutation.length(); ++i) {
+ if (permutation.Value(i) != (ui64)i) {
+ return false;
+ }
+ }
+ return true;
+}
+
+std::shared_ptr<arrow::RecordBatch> Reorder(const std::shared_ptr<arrow::RecordBatch>& batch,
+ const std::shared_ptr<arrow::UInt64Array>& permutation) {
+ Y_VERIFY(permutation->length() == batch->num_rows());
+
+ auto res = IsNoOp(*permutation) ? batch : arrow::compute::Take(batch, permutation);
+ Y_VERIFY(res.ok());
+ return (*res).record_batch();
+}
+
std::vector<std::shared_ptr<arrow::RecordBatch>> ShardingSplit(const std::shared_ptr<arrow::RecordBatch>& batch,
const std::vector<ui32>& sharding, ui32 numShards) {
Y_VERIFY((size_t)batch->num_rows() == sharding.size());
@@ -494,7 +494,7 @@ std::vector<std::shared_ptr<arrow::RecordBatch>> ShardingSplit(const std::shared
Y_VERIFY_OK(builder.Finish(&permutation));
}
- auto reorderedBatch = Reorder(batch, permutation);
+ auto reorderedBatch = Reorder(batch, permutation);
std::vector<std::shared_ptr<arrow::RecordBatch>> out(numShards);
@@ -909,7 +909,7 @@ std::shared_ptr<arrow::UInt64Array> MakeSortPermutation(const std::shared_ptr<ar
std::shared_ptr<arrow::RecordBatch> SortBatch(const std::shared_ptr<arrow::RecordBatch>& batch,
const std::shared_ptr<arrow::Schema>& sortingKey) {
auto sortPermutation = MakeSortPermutation(batch, sortingKey);
- return Reorder(batch, sortPermutation);
+ return Reorder(batch, sortPermutation);
}
bool TArrowToYdbConverter::Process(const arrow::RecordBatch& batch, TString& errorMessage) {
diff --git a/ydb/core/formats/arrow_helpers.h b/ydb/core/formats/arrow_helpers.h
index 5574b2f9a62..cd3ec9f865f 100644
--- a/ydb/core/formats/arrow_helpers.h
+++ b/ydb/core/formats/arrow_helpers.h
@@ -9,22 +9,22 @@
namespace NKikimr::NArrow {
-// Arrow inrernally keeps references to Buffer objects with the data
-// This helper class implements arrow::Buffer over TString that owns
-// the actual memory
-class TBufferOverString : public arrow::Buffer {
- TString Str;
-public:
- explicit TBufferOverString(TString str)
- : arrow::Buffer((const unsigned char*)str.data(), str.size())
- , Str(str)
- {
- Y_VERIFY(data() == (const unsigned char*)Str.data());
- }
-};
-
+// Arrow inrernally keeps references to Buffer objects with the data
+// This helper class implements arrow::Buffer over TString that owns
+// the actual memory
+class TBufferOverString : public arrow::Buffer {
+ TString Str;
+public:
+ explicit TBufferOverString(TString str)
+ : arrow::Buffer((const unsigned char*)str.data(), str.size())
+ , Str(str)
+ {
+ Y_VERIFY(data() == (const unsigned char*)Str.data());
+ }
+};
+
std::shared_ptr<arrow::DataType> GetArrowType(NScheme::TTypeId typeId);
-
+
template <typename T>
inline bool ArrayEqualValue(const std::shared_ptr<arrow::Array>& x, const std::shared_ptr<arrow::Array>& y) {
auto& arrX = static_cast<const T&>(*x);
@@ -55,12 +55,12 @@ std::vector<std::shared_ptr<arrow::Field>> MakeArrowFields(const TVector<std::pa
std::shared_ptr<arrow::Schema> MakeArrowSchema(const TVector<std::pair<TString, NScheme::TTypeId>>& columns);
TString SerializeSchema(const arrow::Schema& schema);
-std::shared_ptr<arrow::Schema> DeserializeSchema(const TString& str);
+std::shared_ptr<arrow::Schema> DeserializeSchema(const TString& str);
TString SerializeBatch(const std::shared_ptr<arrow::RecordBatch>& batch, const arrow::ipc::IpcWriteOptions& options);
TString SerializeBatchNoCompression(const std::shared_ptr<arrow::RecordBatch>& batch);
-std::shared_ptr<arrow::RecordBatch> DeserializeBatch(const TString& blob,
+std::shared_ptr<arrow::RecordBatch> DeserializeBatch(const TString& blob,
const std::shared_ptr<arrow::Schema>& schema);
std::shared_ptr<arrow::RecordBatch> MakeEmptyBatch(const std::shared_ptr<arrow::Schema>& schema);
diff --git a/ydb/core/formats/clickhouse_block.cpp b/ydb/core/formats/clickhouse_block.cpp
index b5d297bc65a..0fd77f5fdad 100644
--- a/ydb/core/formats/clickhouse_block.cpp
+++ b/ydb/core/formats/clickhouse_block.cpp
@@ -1,699 +1,699 @@
-// The code in this file is based on original ClickHouse source code
-// which is licensed under Apache license v2.0
-// See: https://github.com/ClickHouse/ClickHouse/
-
-#include "factory.h"
-
+// The code in this file is based on original ClickHouse source code
+// which is licensed under Apache license v2.0
+// See: https://github.com/ClickHouse/ClickHouse/
+
+#include "factory.h"
+
#include <ydb/core/scheme/scheme_tablecell.h>
#include <ydb/library/yql/public/decimal/yql_decimal.h>
-#include <util/stream/str.h>
-#include <util/generic/ptr.h>
-#include <util/generic/vector.h>
-#include <util/generic/string.h>
-#include <util/generic/hash.h>
-
-namespace NKikHouse {
-namespace NSerialization {
-
-class IColumn;
-using TMutableColumnPtr = TIntrusivePtr<IColumn>;
-using TColumnPtr = TIntrusiveConstPtr<IColumn>;
-
-class IDataType;
-using TDataTypePtr = TIntrusiveConstPtr<IDataType>;
-
-// Generic data type interface
-class IDataType : public TThrRefBase {
-public:
- virtual ~IDataType() = default;
- virtual const TString& getName() const = 0;
- virtual TMutableColumnPtr createColumn() const = 0;
-
- // Converts 'const this' into TIntrusiveConstPtr
- TDataTypePtr getPtr() const {
- return TDataTypePtr(const_cast<IDataType*>(this));
- }
-};
-
-// Generic column interface
-class IColumn : public TThrRefBase {
-public:
- virtual ~IColumn() = default;
- virtual const TDataTypePtr& getType() const = 0;
- virtual void insertData(const char* buf, size_t sz) = 0;
- virtual void insertDefault() = 0;
- virtual size_t rows() const = 0;
- virtual size_t byteSize() const = 0;
- // Does binary serialization
- virtual void serialize(IOutputStream& out) const = 0;
-};
-
-struct TTypeAndName {
- TDataTypePtr Type;
- TString Name;
-};
-
-using TTypesAndNames = TVector<TTypeAndName>;
-
-// A block of data with several named columns that have the same number of rows
-class TBlock {
- TTypesAndNames TypesAndNames;
- TVector<TMutableColumnPtr> Data;
-public:
- TBlock() = default;
-
- TBlock(TTypesAndNames&& typesAndNames)
- : TypesAndNames(std::move(typesAndNames))
- {
- for (const auto& tn : TypesAndNames) {
- Data.push_back(tn.Type->createColumn());
- }
- }
-
- TBlock cloneEmpty() const {
- return TBlock(TTypesAndNames(TypesAndNames));
- }
-
- size_t columns() const {
- return TypesAndNames.size();
- }
-
- size_t rows() const {
- return Data.empty() ? 0 : Data.front()->rows();
- }
-
- const TDataTypePtr& getType(size_t i) const {
- return TypesAndNames.at(i).Type;
- }
-
- const TString& getName(size_t i) const {
- return TypesAndNames.at(i).Name;
- }
-
- TMutableColumnPtr getMutableColumn(size_t i) {
- return Data.at(i);
- }
-
- TColumnPtr getColumn(size_t i) const {
- return Data.at(i);
- }
-
- // Check that all columns have the same number of rows
- void checkNumberOfRows() const {
- if (Data.empty())
- return;
-
- size_t expectedRowCount = Data[0]->rows();
- for (size_t i = 1; i < Data.size(); ++i) {
- if (Data[i]->rows() != expectedRowCount) {
- throw yexception() << "Column '" << getName(i) << "' in the block has different number of rows: "
- << Data[i]->rows() << " expected: " << expectedRowCount;
- }
- }
- }
-
- // Writes some metadata before the block contents
- void serializeBlockHeader(IOutputStream& out) const;
-};
-
-
-inline void writeVarUInt(ui64 x, IOutputStream& out) {
- for (size_t i = 0; i < 9; ++i) {
- ui8 byte = x & 0x7F;
- if (x > 0x7F)
- byte |= 0x80;
-
- out.Write(byte);
-
- x >>= 7;
- if (!x)
- return;
- }
-}
-
-inline void writeStringBinary(const TStringBuf& s, IOutputStream& out) {
- writeVarUInt(s.size(), out);
- out.Write(s);
-}
-
-template <typename T>
-inline void writePODBinary(const T & x, IOutputStream& out) {
- out.Write(reinterpret_cast<const char *>(&x), sizeof(x));
-}
-
-
-void TBlock::serializeBlockHeader(IOutputStream& out) const {
-#define APPLY_FOR_BLOCK_INFO_FIELDS(M) \
- M(bool, is_overflows, false, 1) \
- M(i32, bucket_num, -1, 2)
-
-#define DECLARE_FIELD(TYPE, NAME, DEFAULT, FIELD_NUM) \
- TYPE NAME = DEFAULT;
-
- APPLY_FOR_BLOCK_INFO_FIELDS(DECLARE_FIELD)
-#undef DECLARE_FIELD
-
- /// Set of pairs `FIELD_NUM`, value in binary form. Then 0.
-#define WRITE_FIELD(TYPE, NAME, DEFAULT, FIELD_NUM) \
- writeVarUInt(FIELD_NUM, out); \
- writePODBinary(NAME, out);
-
- APPLY_FOR_BLOCK_INFO_FIELDS(WRITE_FIELD)
-#undef WRITE_FIELD
-
- writeVarUInt(0, out);
-}
-
-
-// Writes blocks into an output stream
-class TBlockWriter {
- ui32 ClientRevision = 0;
- IOutputStream& Out;
-public:
- TBlockWriter(IOutputStream& out, ui32 clientRevision)
- : ClientRevision(clientRevision)
- , Out(out)
- {}
-
- void Write(const TBlock& block) {
- // Additional information about the block.
- if (ClientRevision > 0)
- block.serializeBlockHeader(Out);
-
- block.checkNumberOfRows();
-
- // Dimensions
- size_t columns = block.columns();
- size_t rows = block.rows();
-
- writeVarUInt(columns, Out);
- writeVarUInt(rows, Out);
-
- for (size_t i = 0; i < columns; ++i) {
- TString columnName = block.getName(i);
- writeStringBinary(columnName, Out);
-
- TDataTypePtr type = block.getType(i);
- TString typeName = type->getName();
- writeStringBinary(typeName, Out);
-
- // Data
- if (rows) {
- // Zero items of data is always represented as zero number of bytes.
- TColumnPtr column = block.getColumn(i);
- writeData(column, Out);
- }
- }
- }
-
-private:
- void writeData(const TColumnPtr & column, IOutputStream& out) {
- column->serialize(out);
- }
-};
-
-
-class TDataTypeBase : public IDataType {
- TString Name;
-protected:
- explicit TDataTypeBase(const TString& name)
- : Name(name)
- {}
-public:
- const TString& getName() const override {
- return Name;
- }
-};
-
-
-class TColumnBase : public IColumn {
- TDataTypePtr Type;
-protected:
- explicit TColumnBase(TDataTypePtr dataType)
- : Type(dataType)
- {}
-public:
- const TDataTypePtr& getType() const override {
- return Type;
- }
-};
-
-
-class TNullableColumn : public TColumnBase {
- TVector<char> Nulls;
- TMutableColumnPtr Values;
-public:
- TNullableColumn(TDataTypePtr dataType);
-
- void insertData(const char* buf, size_t sz) override {
- Nulls.push_back(0);
- Values->insertData(buf, sz);
- }
-
- void insertDefault() override {
- Nulls.push_back(1);
- Values->insertDefault();
- }
-
- size_t rows() const override {
- return Nulls.size();
- }
-
- size_t byteSize() const override {
- return Nulls.size() + Values->byteSize();
- }
-
- void serialize(IOutputStream& out) const override {
- out.Write(Nulls.data(), Nulls.size());
- Values->serialize(out);
- }
-};
-
-
-class TFixedSizeColumn : public TColumnBase {
- TVector<char> Data;
- const size_t ElementSize;
-public:
- TFixedSizeColumn(TDataTypePtr dataType, size_t elementSize)
- : TColumnBase(dataType)
- , ElementSize(elementSize)
- {}
-
- void insertData(const char* buf, size_t sz) override {
- if (sz != ElementSize) {
- throw yexception() << "Data size " << sz << " doesn't match element size " << ElementSize;
- }
- Data.insert(Data.end(), buf, buf + sz);
- }
-
- void insertDefault() override {
- Data.resize(Data.size() + ElementSize);
- }
-
- size_t rows() const override {
- return Data.size() / ElementSize;
- }
-
- size_t byteSize() const override {
- return Data.size();
- }
-
- void serialize(IOutputStream& out) const override {
- out.Write(Data.data(), Data.size());
- }
-};
-
-
-class TStringColumn : public TColumnBase {
- using TOffset = size_t;
- TVector<char> Data; // The buffer to store all strings data
- TVector<TOffset> Offsets; // Offsets in the buffer (i-th offset points to the end of i-th string)
-public:
- TStringColumn(TDataTypePtr dataType)
- : TColumnBase(dataType)
- {}
-
- void insertData(const char* buf, size_t sz) override {
- Data.insert(Data.end(), buf, buf + sz);
- Data.push_back(0); // Always append '\0' at the end
- Offsets.push_back(Data.size());
- }
-
- void insertDefault() override {
- Data.push_back(0);
- Offsets.push_back(Data.size());
- }
-
- size_t rows() const override {
- return Offsets.size();
- }
-
- size_t byteSize() const override {
- return Data.size() + Offsets.size()*sizeof(TOffset);
- }
-
- void serialize(IOutputStream& out) const override {
- if (rows() == 0)
- return;
-
- size_t size = Offsets[0] - 1;
- writeVarUInt(size, out);
- out.Write(Data.data(), size);
-
- for (size_t i = 1; i < Offsets.size(); ++i) {
- size_t size = Offsets[i] - 1 - Offsets[i-1];
- writeVarUInt(size, out);
- out.Write(Data.data() + Offsets[i-1], size);
- }
- }
-};
-
-
-class TNullableDataType : public TDataTypeBase {
- const TDataTypePtr Nested;
-public:
- explicit TNullableDataType(const TDataTypePtr& nested)
- : TDataTypeBase("Nullable(" + nested->getName() + ")")
- , Nested(nested)
- {}
-
- TMutableColumnPtr createColumn() const override {
- return new TNullableColumn(this->getPtr());
- }
-
- TDataTypePtr getNested() const {
- return Nested;
- }
-};
-
-
-template<class TElement>
-class TFixedSizeDataType : public TDataTypeBase {
-public:
- explicit TFixedSizeDataType(const TString& name)
- : TDataTypeBase(name)
- {}
-
- TMutableColumnPtr createColumn() const override {
- return new TFixedSizeColumn(this->getPtr(), sizeof(TElement));
- }
-};
-
-
-class TStringDataType : public TDataTypeBase {
-public:
- TStringDataType()
- : TDataTypeBase("String")
- {}
-
- TMutableColumnPtr createColumn() const override {
- return new TStringColumn(this->getPtr());
- }
-};
-
-
-TNullableColumn::TNullableColumn(TDataTypePtr dataType)
- : TColumnBase(dataType)
- , Nulls()
- , Values()
-{
- const TNullableDataType* nullableType = dynamic_cast<const TNullableDataType*>(dataType.Get());
- Values = nullableType->getNested()->createColumn();
-}
-
-
-using namespace NKikimr;
-
-// Returns specific data types by their names
-class TDataTypeRegistry : public TThrRefBase {
- THashMap<TString, TDataTypePtr> Types;
-
-private:
- void Register(TDataTypePtr dataType) {
- Types[dataType->getName()] = dataType;
- }
-
-public:
- TDataTypeRegistry() {
- Register(new TStringDataType());
-
- Register(new TFixedSizeDataType<i8>("Int8"));
- Register(new TFixedSizeDataType<i16>("Int16"));
- Register(new TFixedSizeDataType<i32>("Int32"));
- Register(new TFixedSizeDataType<i64>("Int64"));
-
- Register(new TFixedSizeDataType<ui8>("UInt8"));
- Register(new TFixedSizeDataType<ui16>("UInt16"));
- Register(new TFixedSizeDataType<ui32>("UInt32"));
- Register(new TFixedSizeDataType<ui64>("UInt64"));
-
- Register(new TFixedSizeDataType<float>("Float32"));
- Register(new TFixedSizeDataType<double>("Float64"));
-
- Register(new TFixedSizeDataType<ui16>("Date"));
- Register(new TFixedSizeDataType<ui32>("DateTime"));
-
- Register(new TFixedSizeDataType<NYql::NDecimal::TInt128>("Decimal(22,9)"));
- }
-
- TDataTypePtr Get(TStringBuf name) const {
- return Types.at(name);
- }
-
- TDataTypePtr GetByYdbType(NScheme::TTypeId t) const {
-
- #define CONVERT(ydbType, chType) \
- case NScheme::NTypeIds::ydbType: \
- return Get(#chType);
-
- switch (t) {
- CONVERT(Bool, UInt8);
-
- CONVERT(Int8, Int8);
- CONVERT(Int16, Int16);
- CONVERT(Int32, Int32);
- CONVERT(Int64, Int64);
-
- CONVERT(Uint8, UInt8);
- CONVERT(Uint16, UInt16);
- CONVERT(Uint32, UInt32);
- CONVERT(Uint64, UInt64);
-
- CONVERT(Float, Float32);
- CONVERT(Double, Float64);
-
- CONVERT(String, String);
- CONVERT(Utf8, String);
- CONVERT(Json, String);
- CONVERT(Yson, String);
-
- CONVERT(Date, Date);
- CONVERT(Datetime, DateTime);
- CONVERT(Timestamp, UInt64);
- CONVERT(Interval, Int64);
-
- CONVERT(Decimal, Decimal(22,9));
-
- // Some internal types
- CONVERT(PairUi64Ui64, String);
- CONVERT(ActorId, String);
- CONVERT(StepOrderId, String);
-
- default:
- throw yexception() << "Unsupported type: " << t;
- }
- #undef CONVERT
- }
-};
-
-using TDataTypeRegistryPtr = TIntrusiveConstPtr<TDataTypeRegistry>;
-
-void AddNull(const TMutableColumnPtr& column) {
- // Default value is NULL for Nullable column
- column->insertDefault();
-}
-
-constexpr NYql::NDecimal::TInt128 Decimal128Min = NYql::NDecimal::GetBounds<38, true, true>().first;
-constexpr NYql::NDecimal::TInt128 Decimal128Max = NYql::NDecimal::GetBounds<38, true, true>().second;
-
-void AddDecimal(const TMutableColumnPtr& column, const TCell& cell) {
- struct THalves {
- ui64 lo;
- ui64 hi;
- };
-
- if (cell.Size() != sizeof(THalves)) {
- AddNull(column);
- return;
- }
-
- const THalves halves = cell.AsValue<THalves>();
- const NYql::NDecimal::TInt128 val = NYql::NDecimal::FromHalfs(halves.lo, halves.hi);
-
- // Return MAX Decimal128 instead of +inf and MIN Decimal128 instead of -inf
- if (val == NYql::NDecimal::Inf()) {
- auto infVal = NYql::NDecimal::MakePair(Decimal128Max);
- column->insertData((const char*)&infVal, sizeof(infVal));
- return;
- }
-
- if (val == -NYql::NDecimal::Inf()) {
- auto minusInfVal = NYql::NDecimal::MakePair(Decimal128Min);
- column->insertData((const char*)&minusInfVal, sizeof(minusInfVal));
- return;
- }
-
- if (NYql::NDecimal::IsNormal(val)) {
- column->insertData(cell.Data(), cell.Size());
- return;
- } else {
- // Convert all non-numbers to NULLs
- AddNull(column);
- return;
- }
-}
-
-size_t AddValue(const TMutableColumnPtr& column, const TCell& cell, ui32 ydbType) {
- Y_UNUSED(ydbType);
- size_t prevBytes = column->byteSize();
- if (cell.IsNull()) {
- AddNull(column);
- } else {
- if (ydbType == NScheme::NTypeIds::Decimal) {
- AddDecimal(column, cell);
- } else {
- column->insertData(cell.Data(), cell.Size());
- }
- }
- return column->byteSize() - prevBytes;
-}
-
-TTypesAndNames MakeColumns(const TDataTypeRegistryPtr& dataTypeRegistry, const TVector<std::pair<TString, NScheme::TTypeId>>& columns) {
- TTypesAndNames res;
- for (auto& c : columns) {
- TDataTypePtr dataType = dataTypeRegistry->GetByYdbType(c.second);
- dataType = new TNullableDataType(dataType);
- res.push_back({dataType, c.first});
- }
- return res;
-}
-
-} // namespace NSerialization
-
-using namespace NSerialization;
-
-// Saves rows in ClickHouse native format so that they can be sent to CH
-// and processed there without further conversions
-class TBlockBuilder : public NKikimr::IBlockBuilder {
-public:
- explicit TBlockBuilder(TDataTypeRegistryPtr dataTypeRegistry);
- ~TBlockBuilder();
- bool Start(const TVector<std::pair<TString, NScheme::TTypeId>>& columns, ui64 maxRowsInBlock, ui64 maxBytesInBlock, TString& err) override;
- void AddRow(const NKikimr::TDbTupleRef& key, const NKikimr::TDbTupleRef& value) override;
- TString Finish() override;
- size_t Bytes() const override;
-
-private:
+#include <util/stream/str.h>
+#include <util/generic/ptr.h>
+#include <util/generic/vector.h>
+#include <util/generic/string.h>
+#include <util/generic/hash.h>
+
+namespace NKikHouse {
+namespace NSerialization {
+
+class IColumn;
+using TMutableColumnPtr = TIntrusivePtr<IColumn>;
+using TColumnPtr = TIntrusiveConstPtr<IColumn>;
+
+class IDataType;
+using TDataTypePtr = TIntrusiveConstPtr<IDataType>;
+
+// Generic data type interface
+class IDataType : public TThrRefBase {
+public:
+ virtual ~IDataType() = default;
+ virtual const TString& getName() const = 0;
+ virtual TMutableColumnPtr createColumn() const = 0;
+
+ // Converts 'const this' into TIntrusiveConstPtr
+ TDataTypePtr getPtr() const {
+ return TDataTypePtr(const_cast<IDataType*>(this));
+ }
+};
+
+// Generic column interface
+class IColumn : public TThrRefBase {
+public:
+ virtual ~IColumn() = default;
+ virtual const TDataTypePtr& getType() const = 0;
+ virtual void insertData(const char* buf, size_t sz) = 0;
+ virtual void insertDefault() = 0;
+ virtual size_t rows() const = 0;
+ virtual size_t byteSize() const = 0;
+ // Does binary serialization
+ virtual void serialize(IOutputStream& out) const = 0;
+};
+
+struct TTypeAndName {
+ TDataTypePtr Type;
+ TString Name;
+};
+
+using TTypesAndNames = TVector<TTypeAndName>;
+
+// A block of data with several named columns that have the same number of rows
+class TBlock {
+ TTypesAndNames TypesAndNames;
+ TVector<TMutableColumnPtr> Data;
+public:
+ TBlock() = default;
+
+ TBlock(TTypesAndNames&& typesAndNames)
+ : TypesAndNames(std::move(typesAndNames))
+ {
+ for (const auto& tn : TypesAndNames) {
+ Data.push_back(tn.Type->createColumn());
+ }
+ }
+
+ TBlock cloneEmpty() const {
+ return TBlock(TTypesAndNames(TypesAndNames));
+ }
+
+ size_t columns() const {
+ return TypesAndNames.size();
+ }
+
+ size_t rows() const {
+ return Data.empty() ? 0 : Data.front()->rows();
+ }
+
+ const TDataTypePtr& getType(size_t i) const {
+ return TypesAndNames.at(i).Type;
+ }
+
+ const TString& getName(size_t i) const {
+ return TypesAndNames.at(i).Name;
+ }
+
+ TMutableColumnPtr getMutableColumn(size_t i) {
+ return Data.at(i);
+ }
+
+ TColumnPtr getColumn(size_t i) const {
+ return Data.at(i);
+ }
+
+ // Check that all columns have the same number of rows
+ void checkNumberOfRows() const {
+ if (Data.empty())
+ return;
+
+ size_t expectedRowCount = Data[0]->rows();
+ for (size_t i = 1; i < Data.size(); ++i) {
+ if (Data[i]->rows() != expectedRowCount) {
+ throw yexception() << "Column '" << getName(i) << "' in the block has different number of rows: "
+ << Data[i]->rows() << " expected: " << expectedRowCount;
+ }
+ }
+ }
+
+ // Writes some metadata before the block contents
+ void serializeBlockHeader(IOutputStream& out) const;
+};
+
+
+inline void writeVarUInt(ui64 x, IOutputStream& out) {
+ for (size_t i = 0; i < 9; ++i) {
+ ui8 byte = x & 0x7F;
+ if (x > 0x7F)
+ byte |= 0x80;
+
+ out.Write(byte);
+
+ x >>= 7;
+ if (!x)
+ return;
+ }
+}
+
+inline void writeStringBinary(const TStringBuf& s, IOutputStream& out) {
+ writeVarUInt(s.size(), out);
+ out.Write(s);
+}
+
+template <typename T>
+inline void writePODBinary(const T & x, IOutputStream& out) {
+ out.Write(reinterpret_cast<const char *>(&x), sizeof(x));
+}
+
+
+void TBlock::serializeBlockHeader(IOutputStream& out) const {
+#define APPLY_FOR_BLOCK_INFO_FIELDS(M) \
+ M(bool, is_overflows, false, 1) \
+ M(i32, bucket_num, -1, 2)
+
+#define DECLARE_FIELD(TYPE, NAME, DEFAULT, FIELD_NUM) \
+ TYPE NAME = DEFAULT;
+
+ APPLY_FOR_BLOCK_INFO_FIELDS(DECLARE_FIELD)
+#undef DECLARE_FIELD
+
+ /// Set of pairs `FIELD_NUM`, value in binary form. Then 0.
+#define WRITE_FIELD(TYPE, NAME, DEFAULT, FIELD_NUM) \
+ writeVarUInt(FIELD_NUM, out); \
+ writePODBinary(NAME, out);
+
+ APPLY_FOR_BLOCK_INFO_FIELDS(WRITE_FIELD)
+#undef WRITE_FIELD
+
+ writeVarUInt(0, out);
+}
+
+
+// Writes blocks into an output stream
+class TBlockWriter {
+ ui32 ClientRevision = 0;
+ IOutputStream& Out;
+public:
+ TBlockWriter(IOutputStream& out, ui32 clientRevision)
+ : ClientRevision(clientRevision)
+ , Out(out)
+ {}
+
+ void Write(const TBlock& block) {
+ // Additional information about the block.
+ if (ClientRevision > 0)
+ block.serializeBlockHeader(Out);
+
+ block.checkNumberOfRows();
+
+ // Dimensions
+ size_t columns = block.columns();
+ size_t rows = block.rows();
+
+ writeVarUInt(columns, Out);
+ writeVarUInt(rows, Out);
+
+ for (size_t i = 0; i < columns; ++i) {
+ TString columnName = block.getName(i);
+ writeStringBinary(columnName, Out);
+
+ TDataTypePtr type = block.getType(i);
+ TString typeName = type->getName();
+ writeStringBinary(typeName, Out);
+
+ // Data
+ if (rows) {
+ // Zero items of data is always represented as zero number of bytes.
+ TColumnPtr column = block.getColumn(i);
+ writeData(column, Out);
+ }
+ }
+ }
+
+private:
+ void writeData(const TColumnPtr & column, IOutputStream& out) {
+ column->serialize(out);
+ }
+};
+
+
+class TDataTypeBase : public IDataType {
+ TString Name;
+protected:
+ explicit TDataTypeBase(const TString& name)
+ : Name(name)
+ {}
+public:
+ const TString& getName() const override {
+ return Name;
+ }
+};
+
+
+class TColumnBase : public IColumn {
+ TDataTypePtr Type;
+protected:
+ explicit TColumnBase(TDataTypePtr dataType)
+ : Type(dataType)
+ {}
+public:
+ const TDataTypePtr& getType() const override {
+ return Type;
+ }
+};
+
+
+class TNullableColumn : public TColumnBase {
+ TVector<char> Nulls;
+ TMutableColumnPtr Values;
+public:
+ TNullableColumn(TDataTypePtr dataType);
+
+ void insertData(const char* buf, size_t sz) override {
+ Nulls.push_back(0);
+ Values->insertData(buf, sz);
+ }
+
+ void insertDefault() override {
+ Nulls.push_back(1);
+ Values->insertDefault();
+ }
+
+ size_t rows() const override {
+ return Nulls.size();
+ }
+
+ size_t byteSize() const override {
+ return Nulls.size() + Values->byteSize();
+ }
+
+ void serialize(IOutputStream& out) const override {
+ out.Write(Nulls.data(), Nulls.size());
+ Values->serialize(out);
+ }
+};
+
+
+class TFixedSizeColumn : public TColumnBase {
+ TVector<char> Data;
+ const size_t ElementSize;
+public:
+ TFixedSizeColumn(TDataTypePtr dataType, size_t elementSize)
+ : TColumnBase(dataType)
+ , ElementSize(elementSize)
+ {}
+
+ void insertData(const char* buf, size_t sz) override {
+ if (sz != ElementSize) {
+ throw yexception() << "Data size " << sz << " doesn't match element size " << ElementSize;
+ }
+ Data.insert(Data.end(), buf, buf + sz);
+ }
+
+ void insertDefault() override {
+ Data.resize(Data.size() + ElementSize);
+ }
+
+ size_t rows() const override {
+ return Data.size() / ElementSize;
+ }
+
+ size_t byteSize() const override {
+ return Data.size();
+ }
+
+ void serialize(IOutputStream& out) const override {
+ out.Write(Data.data(), Data.size());
+ }
+};
+
+
+class TStringColumn : public TColumnBase {
+ using TOffset = size_t;
+ TVector<char> Data; // The buffer to store all strings data
+ TVector<TOffset> Offsets; // Offsets in the buffer (i-th offset points to the end of i-th string)
+public:
+ TStringColumn(TDataTypePtr dataType)
+ : TColumnBase(dataType)
+ {}
+
+ void insertData(const char* buf, size_t sz) override {
+ Data.insert(Data.end(), buf, buf + sz);
+ Data.push_back(0); // Always append '\0' at the end
+ Offsets.push_back(Data.size());
+ }
+
+ void insertDefault() override {
+ Data.push_back(0);
+ Offsets.push_back(Data.size());
+ }
+
+ size_t rows() const override {
+ return Offsets.size();
+ }
+
+ size_t byteSize() const override {
+ return Data.size() + Offsets.size()*sizeof(TOffset);
+ }
+
+ void serialize(IOutputStream& out) const override {
+ if (rows() == 0)
+ return;
+
+ size_t size = Offsets[0] - 1;
+ writeVarUInt(size, out);
+ out.Write(Data.data(), size);
+
+ for (size_t i = 1; i < Offsets.size(); ++i) {
+ size_t size = Offsets[i] - 1 - Offsets[i-1];
+ writeVarUInt(size, out);
+ out.Write(Data.data() + Offsets[i-1], size);
+ }
+ }
+};
+
+
+class TNullableDataType : public TDataTypeBase {
+ const TDataTypePtr Nested;
+public:
+ explicit TNullableDataType(const TDataTypePtr& nested)
+ : TDataTypeBase("Nullable(" + nested->getName() + ")")
+ , Nested(nested)
+ {}
+
+ TMutableColumnPtr createColumn() const override {
+ return new TNullableColumn(this->getPtr());
+ }
+
+ TDataTypePtr getNested() const {
+ return Nested;
+ }
+};
+
+
+template<class TElement>
+class TFixedSizeDataType : public TDataTypeBase {
+public:
+ explicit TFixedSizeDataType(const TString& name)
+ : TDataTypeBase(name)
+ {}
+
+ TMutableColumnPtr createColumn() const override {
+ return new TFixedSizeColumn(this->getPtr(), sizeof(TElement));
+ }
+};
+
+
+class TStringDataType : public TDataTypeBase {
+public:
+ TStringDataType()
+ : TDataTypeBase("String")
+ {}
+
+ TMutableColumnPtr createColumn() const override {
+ return new TStringColumn(this->getPtr());
+ }
+};
+
+
+TNullableColumn::TNullableColumn(TDataTypePtr dataType)
+ : TColumnBase(dataType)
+ , Nulls()
+ , Values()
+{
+ const TNullableDataType* nullableType = dynamic_cast<const TNullableDataType*>(dataType.Get());
+ Values = nullableType->getNested()->createColumn();
+}
+
+
+using namespace NKikimr;
+
+// Returns specific data types by their names
+class TDataTypeRegistry : public TThrRefBase {
+ THashMap<TString, TDataTypePtr> Types;
+
+private:
+ void Register(TDataTypePtr dataType) {
+ Types[dataType->getName()] = dataType;
+ }
+
+public:
+ TDataTypeRegistry() {
+ Register(new TStringDataType());
+
+ Register(new TFixedSizeDataType<i8>("Int8"));
+ Register(new TFixedSizeDataType<i16>("Int16"));
+ Register(new TFixedSizeDataType<i32>("Int32"));
+ Register(new TFixedSizeDataType<i64>("Int64"));
+
+ Register(new TFixedSizeDataType<ui8>("UInt8"));
+ Register(new TFixedSizeDataType<ui16>("UInt16"));
+ Register(new TFixedSizeDataType<ui32>("UInt32"));
+ Register(new TFixedSizeDataType<ui64>("UInt64"));
+
+ Register(new TFixedSizeDataType<float>("Float32"));
+ Register(new TFixedSizeDataType<double>("Float64"));
+
+ Register(new TFixedSizeDataType<ui16>("Date"));
+ Register(new TFixedSizeDataType<ui32>("DateTime"));
+
+ Register(new TFixedSizeDataType<NYql::NDecimal::TInt128>("Decimal(22,9)"));
+ }
+
+ TDataTypePtr Get(TStringBuf name) const {
+ return Types.at(name);
+ }
+
+ TDataTypePtr GetByYdbType(NScheme::TTypeId t) const {
+
+ #define CONVERT(ydbType, chType) \
+ case NScheme::NTypeIds::ydbType: \
+ return Get(#chType);
+
+ switch (t) {
+ CONVERT(Bool, UInt8);
+
+ CONVERT(Int8, Int8);
+ CONVERT(Int16, Int16);
+ CONVERT(Int32, Int32);
+ CONVERT(Int64, Int64);
+
+ CONVERT(Uint8, UInt8);
+ CONVERT(Uint16, UInt16);
+ CONVERT(Uint32, UInt32);
+ CONVERT(Uint64, UInt64);
+
+ CONVERT(Float, Float32);
+ CONVERT(Double, Float64);
+
+ CONVERT(String, String);
+ CONVERT(Utf8, String);
+ CONVERT(Json, String);
+ CONVERT(Yson, String);
+
+ CONVERT(Date, Date);
+ CONVERT(Datetime, DateTime);
+ CONVERT(Timestamp, UInt64);
+ CONVERT(Interval, Int64);
+
+ CONVERT(Decimal, Decimal(22,9));
+
+ // Some internal types
+ CONVERT(PairUi64Ui64, String);
+ CONVERT(ActorId, String);
+ CONVERT(StepOrderId, String);
+
+ default:
+ throw yexception() << "Unsupported type: " << t;
+ }
+ #undef CONVERT
+ }
+};
+
+using TDataTypeRegistryPtr = TIntrusiveConstPtr<TDataTypeRegistry>;
+
+void AddNull(const TMutableColumnPtr& column) {
+ // Default value is NULL for Nullable column
+ column->insertDefault();
+}
+
+constexpr NYql::NDecimal::TInt128 Decimal128Min = NYql::NDecimal::GetBounds<38, true, true>().first;
+constexpr NYql::NDecimal::TInt128 Decimal128Max = NYql::NDecimal::GetBounds<38, true, true>().second;
+
+void AddDecimal(const TMutableColumnPtr& column, const TCell& cell) {
+ struct THalves {
+ ui64 lo;
+ ui64 hi;
+ };
+
+ if (cell.Size() != sizeof(THalves)) {
+ AddNull(column);
+ return;
+ }
+
+ const THalves halves = cell.AsValue<THalves>();
+ const NYql::NDecimal::TInt128 val = NYql::NDecimal::FromHalfs(halves.lo, halves.hi);
+
+ // Return MAX Decimal128 instead of +inf and MIN Decimal128 instead of -inf
+ if (val == NYql::NDecimal::Inf()) {
+ auto infVal = NYql::NDecimal::MakePair(Decimal128Max);
+ column->insertData((const char*)&infVal, sizeof(infVal));
+ return;
+ }
+
+ if (val == -NYql::NDecimal::Inf()) {
+ auto minusInfVal = NYql::NDecimal::MakePair(Decimal128Min);
+ column->insertData((const char*)&minusInfVal, sizeof(minusInfVal));
+ return;
+ }
+
+ if (NYql::NDecimal::IsNormal(val)) {
+ column->insertData(cell.Data(), cell.Size());
+ return;
+ } else {
+ // Convert all non-numbers to NULLs
+ AddNull(column);
+ return;
+ }
+}
+
+size_t AddValue(const TMutableColumnPtr& column, const TCell& cell, ui32 ydbType) {
+ Y_UNUSED(ydbType);
+ size_t prevBytes = column->byteSize();
+ if (cell.IsNull()) {
+ AddNull(column);
+ } else {
+ if (ydbType == NScheme::NTypeIds::Decimal) {
+ AddDecimal(column, cell);
+ } else {
+ column->insertData(cell.Data(), cell.Size());
+ }
+ }
+ return column->byteSize() - prevBytes;
+}
+
+TTypesAndNames MakeColumns(const TDataTypeRegistryPtr& dataTypeRegistry, const TVector<std::pair<TString, NScheme::TTypeId>>& columns) {
+ TTypesAndNames res;
+ for (auto& c : columns) {
+ TDataTypePtr dataType = dataTypeRegistry->GetByYdbType(c.second);
+ dataType = new TNullableDataType(dataType);
+ res.push_back({dataType, c.first});
+ }
+ return res;
+}
+
+} // namespace NSerialization
+
+using namespace NSerialization;
+
+// Saves rows in ClickHouse native format so that they can be sent to CH
+// and processed there without further conversions
+class TBlockBuilder : public NKikimr::IBlockBuilder {
+public:
+ explicit TBlockBuilder(TDataTypeRegistryPtr dataTypeRegistry);
+ ~TBlockBuilder();
+ bool Start(const TVector<std::pair<TString, NScheme::TTypeId>>& columns, ui64 maxRowsInBlock, ui64 maxBytesInBlock, TString& err) override;
+ void AddRow(const NKikimr::TDbTupleRef& key, const NKikimr::TDbTupleRef& value) override;
+ TString Finish() override;
+ size_t Bytes() const override;
+
+private:
std::unique_ptr<IBlockBuilder> Clone() const override;
-
-private:
- const TDataTypeRegistryPtr DataTypeRegistry;
-
- class TImpl;
- TAutoPtr<TImpl> Impl;
-};
-
-
-class TBlockBuilder::TImpl {
- constexpr static ui32 DBMS_MIN_REVISION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD = 54408;
-public:
- TImpl(const TDataTypeRegistryPtr& dataTypeRegistry, const TVector<std::pair<TString, NScheme::TTypeId>>& columns, ui64 maxRowsInBlock, ui64 maxBytesInBlock)
- : MaxRowsInBlock(maxRowsInBlock)
- , MaxBytesInBlock(maxBytesInBlock)
- , BlockTemplate(MakeColumns(dataTypeRegistry, columns))
- , Out(Buffer)
- , BlockWriter(Out, DBMS_MIN_REVISION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD)
- {
- StartNewBlock();
- }
-
- void AddRow(const TDbTupleRef& key, const TDbTupleRef& value) {
- Y_UNUSED(key);
-
- if (CurrentBlockRows >= MaxRowsInBlock || CurrentBlockBytes >= MaxBytesInBlock) {
- FinishCurrentBlock();
- }
-
- ++CurrentBlockRows;
- for (size_t ci = 0; ci < value.ColumnCount; ++ci) {
- CurrentBlockBytes += AddValue(CurrentBlock.getMutableColumn(ci), value.Columns[ci], value.Types[ci]);
- }
- }
-
- TString Finish() {
- FinishCurrentBlock();
- Out.Finish();
- return Buffer;
- }
-
- size_t Bytes() const {
- return CurrentBlockBytes + Buffer.size();
- }
-
-private:
- void FinishCurrentBlock() {
- if (CurrentBlockRows > 0) {
- BlockWriter.Write(CurrentBlock);
-
- StartNewBlock();
- }
- }
-
- void StartNewBlock() {
- CurrentBlockRows = 0;
- CurrentBlockBytes = 0;
- CurrentBlock = BlockTemplate.cloneEmpty();
- }
-
-private:
- TDataTypeRegistryPtr DateTypeRegistry;
- const ui64 MaxRowsInBlock;
- const ui64 MaxBytesInBlock;
- TBlock BlockTemplate;
- size_t CurrentBlockRows;
- size_t CurrentBlockBytes;
- TBlock CurrentBlock;
- TString Buffer;
- TStringOutput Out;
- TBlockWriter BlockWriter;
-};
-
-
-TBlockBuilder::TBlockBuilder(TDataTypeRegistryPtr dataTypeRegistry)
- : DataTypeRegistry(dataTypeRegistry)
-{}
-
-TBlockBuilder::~TBlockBuilder() {
-}
-
-bool TBlockBuilder::Start(const TVector<std::pair<TString, NScheme::TTypeId>>& columns, ui64 maxRowsInBlock, ui64 maxBytesInBlock, TString& err) {
- try {
- Impl.Reset(new TImpl(DataTypeRegistry, columns, maxRowsInBlock, maxBytesInBlock));
- } catch (std::exception& e) {
- err = e.what();
- return false;
- }
- return true;
-}
-
-void TBlockBuilder::AddRow(const TDbTupleRef& key, const TDbTupleRef& value) {
- if (Impl)
- Impl->AddRow(key, value);
-}
-
-TString TBlockBuilder::Finish() {
- if (!Impl)
- return TString();
-
- return Impl->Finish();
-}
-
-size_t TBlockBuilder::Bytes() const {
- if (!Impl)
- return 0;
-
- return Impl->Bytes();
-}
-
-std::unique_ptr<IBlockBuilder> TBlockBuilder::Clone() const {
- return std::make_unique<TBlockBuilder>(DataTypeRegistry);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-void RegisterFormat(NKikimr::TFormatFactory& factory) {
- TDataTypeRegistryPtr dataTypeRegistry(new TDataTypeRegistry);
- factory.RegisterBlockBuilder(std::make_unique<TBlockBuilder>(dataTypeRegistry), "clickhouse_native");
-}
-
-} // namespace NKikHouse
+
+private:
+ const TDataTypeRegistryPtr DataTypeRegistry;
+
+ class TImpl;
+ TAutoPtr<TImpl> Impl;
+};
+
+
+class TBlockBuilder::TImpl {
+ constexpr static ui32 DBMS_MIN_REVISION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD = 54408;
+public:
+ TImpl(const TDataTypeRegistryPtr& dataTypeRegistry, const TVector<std::pair<TString, NScheme::TTypeId>>& columns, ui64 maxRowsInBlock, ui64 maxBytesInBlock)
+ : MaxRowsInBlock(maxRowsInBlock)
+ , MaxBytesInBlock(maxBytesInBlock)
+ , BlockTemplate(MakeColumns(dataTypeRegistry, columns))
+ , Out(Buffer)
+ , BlockWriter(Out, DBMS_MIN_REVISION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD)
+ {
+ StartNewBlock();
+ }
+
+ void AddRow(const TDbTupleRef& key, const TDbTupleRef& value) {
+ Y_UNUSED(key);
+
+ if (CurrentBlockRows >= MaxRowsInBlock || CurrentBlockBytes >= MaxBytesInBlock) {
+ FinishCurrentBlock();
+ }
+
+ ++CurrentBlockRows;
+ for (size_t ci = 0; ci < value.ColumnCount; ++ci) {
+ CurrentBlockBytes += AddValue(CurrentBlock.getMutableColumn(ci), value.Columns[ci], value.Types[ci]);
+ }
+ }
+
+ TString Finish() {
+ FinishCurrentBlock();
+ Out.Finish();
+ return Buffer;
+ }
+
+ size_t Bytes() const {
+ return CurrentBlockBytes + Buffer.size();
+ }
+
+private:
+ void FinishCurrentBlock() {
+ if (CurrentBlockRows > 0) {
+ BlockWriter.Write(CurrentBlock);
+
+ StartNewBlock();
+ }
+ }
+
+ void StartNewBlock() {
+ CurrentBlockRows = 0;
+ CurrentBlockBytes = 0;
+ CurrentBlock = BlockTemplate.cloneEmpty();
+ }
+
+private:
+ TDataTypeRegistryPtr DateTypeRegistry;
+ const ui64 MaxRowsInBlock;
+ const ui64 MaxBytesInBlock;
+ TBlock BlockTemplate;
+ size_t CurrentBlockRows;
+ size_t CurrentBlockBytes;
+ TBlock CurrentBlock;
+ TString Buffer;
+ TStringOutput Out;
+ TBlockWriter BlockWriter;
+};
+
+
+TBlockBuilder::TBlockBuilder(TDataTypeRegistryPtr dataTypeRegistry)
+ : DataTypeRegistry(dataTypeRegistry)
+{}
+
+TBlockBuilder::~TBlockBuilder() {
+}
+
+bool TBlockBuilder::Start(const TVector<std::pair<TString, NScheme::TTypeId>>& columns, ui64 maxRowsInBlock, ui64 maxBytesInBlock, TString& err) {
+ try {
+ Impl.Reset(new TImpl(DataTypeRegistry, columns, maxRowsInBlock, maxBytesInBlock));
+ } catch (std::exception& e) {
+ err = e.what();
+ return false;
+ }
+ return true;
+}
+
+void TBlockBuilder::AddRow(const TDbTupleRef& key, const TDbTupleRef& value) {
+ if (Impl)
+ Impl->AddRow(key, value);
+}
+
+TString TBlockBuilder::Finish() {
+ if (!Impl)
+ return TString();
+
+ return Impl->Finish();
+}
+
+size_t TBlockBuilder::Bytes() const {
+ if (!Impl)
+ return 0;
+
+ return Impl->Bytes();
+}
+
+std::unique_ptr<IBlockBuilder> TBlockBuilder::Clone() const {
+ return std::make_unique<TBlockBuilder>(DataTypeRegistry);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void RegisterFormat(NKikimr::TFormatFactory& factory) {
+ TDataTypeRegistryPtr dataTypeRegistry(new TDataTypeRegistry);
+ factory.RegisterBlockBuilder(std::make_unique<TBlockBuilder>(dataTypeRegistry), "clickhouse_native");
+}
+
+} // namespace NKikHouse
diff --git a/ydb/core/formats/clickhouse_block.h b/ydb/core/formats/clickhouse_block.h
index 9b5c890debd..40471674be9 100644
--- a/ydb/core/formats/clickhouse_block.h
+++ b/ydb/core/formats/clickhouse_block.h
@@ -1,9 +1,9 @@
-#pragma once
-
-#include "factory.h"
-
-namespace NKikHouse {
-
-void RegisterFormat(NKikimr::TFormatFactory& factory);
-
-}
+#pragma once
+
+#include "factory.h"
+
+namespace NKikHouse {
+
+void RegisterFormat(NKikimr::TFormatFactory& factory);
+
+}
diff --git a/ydb/core/formats/factory.h b/ydb/core/formats/factory.h
index 45b9f831578..59f9079fac7 100644
--- a/ydb/core/formats/factory.h
+++ b/ydb/core/formats/factory.h
@@ -1,42 +1,42 @@
-#pragma once
-
+#pragma once
+
#include <ydb/core/scheme/scheme_type_id.h>
#include <ydb/core/scheme/scheme_tablecell.h>
-#include <util/generic/ptr.h>
-#include <util/generic/hash.h>
-
-namespace NKikimr {
-
-class IBlockBuilder {
-public:
- virtual ~IBlockBuilder() = default;
-
- virtual bool Start(const TVector<std::pair<TString, NScheme::TTypeId>>& columns, ui64 maxRowsInBlock, ui64 maxBytesInBlock, TString& err) = 0;
- virtual void AddRow(const TDbTupleRef& key, const TDbTupleRef& value) = 0;
- virtual TString Finish() = 0;
- virtual size_t Bytes() const = 0;
-
-private:
- friend class TFormatFactory;
-
- virtual std::unique_ptr<IBlockBuilder> Clone() const = 0;
-};
-
-
-class TFormatFactory : public TThrRefBase {
-public:
- void RegisterBlockBuilder(std::unique_ptr<IBlockBuilder>&& builder, const TString& format) {
- Formats[format] = std::move(builder);
- }
-
- std::unique_ptr<IBlockBuilder> CreateBlockBuilder(const TString& format) const {
- auto it = Formats.FindPtr(format);
- if (!it)
- return nullptr;
- return (*it)->Clone();
- }
-private:
- THashMap<TString, std::unique_ptr<IBlockBuilder>> Formats;
-};
-
-}
+#include <util/generic/ptr.h>
+#include <util/generic/hash.h>
+
+namespace NKikimr {
+
+class IBlockBuilder {
+public:
+ virtual ~IBlockBuilder() = default;
+
+ virtual bool Start(const TVector<std::pair<TString, NScheme::TTypeId>>& columns, ui64 maxRowsInBlock, ui64 maxBytesInBlock, TString& err) = 0;
+ virtual void AddRow(const TDbTupleRef& key, const TDbTupleRef& value) = 0;
+ virtual TString Finish() = 0;
+ virtual size_t Bytes() const = 0;
+
+private:
+ friend class TFormatFactory;
+
+ virtual std::unique_ptr<IBlockBuilder> Clone() const = 0;
+};
+
+
+class TFormatFactory : public TThrRefBase {
+public:
+ void RegisterBlockBuilder(std::unique_ptr<IBlockBuilder>&& builder, const TString& format) {
+ Formats[format] = std::move(builder);
+ }
+
+ std::unique_ptr<IBlockBuilder> CreateBlockBuilder(const TString& format) const {
+ auto it = Formats.FindPtr(format);
+ if (!it)
+ return nullptr;
+ return (*it)->Clone();
+ }
+private:
+ THashMap<TString, std::unique_ptr<IBlockBuilder>> Formats;
+};
+
+}
diff --git a/ydb/core/formats/sharding.h b/ydb/core/formats/sharding.h
index 676c47529a1..317ec60295e 100644
--- a/ydb/core/formats/sharding.h
+++ b/ydb/core/formats/sharding.h
@@ -59,15 +59,15 @@ private:
// KIKIMR-11529
class TLogsSharding {
public:
- static constexpr ui32 DEFAULT_ACITVE_SHARDS = 10;
- static constexpr TDuration DEFAULT_CHANGE_PERIOD = TDuration::Minutes(5);
-
+ static constexpr ui32 DEFAULT_ACITVE_SHARDS = 10;
+ static constexpr TDuration DEFAULT_CHANGE_PERIOD = TDuration::Minutes(5);
+
// TODO
- TLogsSharding(ui32 shardsCountTotal, ui32 shardsCountActive = DEFAULT_ACITVE_SHARDS, TDuration changePeriod = DEFAULT_CHANGE_PERIOD)
- : ShardsCount(shardsCountTotal)
- , NumActive(Min<ui32>(shardsCountActive, ShardsCount))
- , TsMin(0)
- , ChangePeriod(changePeriod.MicroSeconds())
+ TLogsSharding(ui32 shardsCountTotal, ui32 shardsCountActive = DEFAULT_ACITVE_SHARDS, TDuration changePeriod = DEFAULT_CHANGE_PERIOD)
+ : ShardsCount(shardsCountTotal)
+ , NumActive(Min<ui32>(shardsCountActive, ShardsCount))
+ , TsMin(0)
+ , ChangePeriod(changePeriod.MicroSeconds())
{}
// tsMin = GetTsMin(tabletIdsMap, timestamp);
@@ -80,7 +80,7 @@ public:
ui64 uidHash = XXH64(uid.data(), uid.size(), 0);
ui32 tsInterval = (timestamp - TsMin) / ChangePeriod;
ui32 numIntervals = ShardsCount / NumActive;
- return ((uidHash % NumActive) + (tsInterval % numIntervals) * NumActive) % ShardsCount;
+ return ((uidHash % NumActive) + (tsInterval % numIntervals) * NumActive) % ShardsCount;
}
std::vector<ui32> MakeSharding(const std::shared_ptr<arrow::RecordBatch>& batch,
@@ -118,8 +118,8 @@ public:
private:
ui32 ShardsCount;
ui32 NumActive;
- ui64 TsMin;
- ui64 ChangePeriod;
+ ui64 TsMin;
+ ui64 ChangePeriod;
};
}
diff --git a/ydb/core/formats/ut_arrow.cpp b/ydb/core/formats/ut_arrow.cpp
index 948529fb2b3..344bbe7cb8e 100644
--- a/ydb/core/formats/ut_arrow.cpp
+++ b/ydb/core/formats/ut_arrow.cpp
@@ -355,7 +355,7 @@ std::shared_ptr<arrow::RecordBatch> VectorToBatch(const std::vector<struct TData
batchBuilder.AddRow(key, value);
}
- return batchBuilder.FlushBatch(false);
+ return batchBuilder.FlushBatch(false);
}
std::vector<TDataRow> TestRows() {
diff --git a/ydb/core/formats/ya.make b/ydb/core/formats/ya.make
index 35897473bc1..26078f2d2a3 100644
--- a/ydb/core/formats/ya.make
+++ b/ydb/core/formats/ya.make
@@ -2,34 +2,34 @@ RECURSE_FOR_TESTS(
ut
)
-LIBRARY()
-
+LIBRARY()
+
OWNER(
davenger
g:kikimr
)
-
-PEERDIR(
+
+PEERDIR(
contrib/libs/apache/arrow
ydb/core/scheme
-)
-
-SRCS(
+)
+
+SRCS(
arrow_batch_builder.cpp
arrow_batch_builder.h
arrow_helpers.cpp
arrow_helpers.h
- clickhouse_block.h
- clickhouse_block.cpp
+ clickhouse_block.h
+ clickhouse_block.cpp
input_stream.h
merging_sorted_input_stream.cpp
merging_sorted_input_stream.h
one_batch_input_stream.h
sharding.h
sort_cursor.h
- factory.h
+ factory.h
program.cpp
program.h
-)
-
-END()
+)
+
+END()
diff --git a/ydb/core/grpc_services/base/base.h b/ydb/core/grpc_services/base/base.h
index 8d869c056ee..44b25c4a5f9 100644
--- a/ydb/core/grpc_services/base/base.h
+++ b/ydb/core/grpc_services/base/base.h
@@ -90,13 +90,13 @@ struct TRpcServices {
EvCancelOperation,
EvForgetOperation,
EvExecDataQueryAst,
- EvExecuteYqlScript,
- EvUploadRows,
+ EvExecuteYqlScript,
+ EvUploadRows,
EvS3Listing,
EvExplainDataQueryAst,
- EvReadColumns,
+ EvReadColumns,
EvBiStreamPing,
- EvRefreshTokenRequest, // internal call
+ EvRefreshTokenRequest, // internal call
EvGetShardLocations,
EvExperimentalStreamQuery,
EvStreamPQWrite,
@@ -105,9 +105,9 @@ struct TRpcServices {
EvListOperations,
EvExportToYt,
EvDiscoverPQClusters,
- EvBulkUpsert,
+ EvBulkUpsert,
EvWhoAmI,
- EvKikhouseDescribeTable,
+ EvKikhouseDescribeTable,
EvCreateRateLimiterResource,
EvAlterRateLimiterResource,
EvDropRateLimiterResource,
@@ -189,14 +189,14 @@ struct TRpcServices {
EvYandexQueryListBindings,
EvYandexQueryDescribeBinding,
EvYandexQueryModifyBinding,
- EvYandexQueryDeleteBinding,
- EvCreateLogStore,
- EvDescribeLogStore,
- EvDropLogStore,
- EvCreateLogTable,
- EvDescribeLogTable,
+ EvYandexQueryDeleteBinding,
+ EvCreateLogStore,
+ EvDescribeLogStore,
+ EvDropLogStore,
+ EvCreateLogTable,
+ EvDescribeLogTable,
EvDropLogTable,
- EvAlterLogTable,
+ EvAlterLogTable,
EvLogin,
EvAnalyticsInternalPingTask,
EvAnalyticsInternalGetTask,
diff --git a/ydb/core/grpc_services/grpc_helper.cpp b/ydb/core/grpc_services/grpc_helper.cpp
index e078be56051..b1ef484cc7d 100644
--- a/ydb/core/grpc_services/grpc_helper.cpp
+++ b/ydb/core/grpc_services/grpc_helper.cpp
@@ -6,40 +6,40 @@ namespace NGRpcService {
//using namespace NActors;
NGrpc::IGRpcRequestLimiterPtr TCreateLimiterCB::operator()(const char* serviceName, const char* requestName, i64 limit) const {
- TString fullName = TString(serviceName) + "_" + requestName;
- return LimiterRegistry->RegisterRequestType(fullName, limit);
-}
-
-
+ TString fullName = TString(serviceName) + "_" + requestName;
+ return LimiterRegistry->RegisterRequestType(fullName, limit);
+}
+
+
class TRequestInFlightLimiter : public NGrpc::IGRpcRequestLimiter {
-private:
+private:
NGrpc::TInFlightLimiterImpl<TControlWrapper> RequestLimiter;
-
-public:
- explicit TRequestInFlightLimiter(TControlWrapper limiter)
- : RequestLimiter(std::move(limiter))
- {}
-
- bool IncRequest() override {
- return RequestLimiter.Inc();
- }
-
- void DecRequest() override {
- RequestLimiter.Dec();
- }
-};
-
-
+
+public:
+ explicit TRequestInFlightLimiter(TControlWrapper limiter)
+ : RequestLimiter(std::move(limiter))
+ {}
+
+ bool IncRequest() override {
+ return RequestLimiter.Inc();
+ }
+
+ void DecRequest() override {
+ RequestLimiter.Dec();
+ }
+};
+
+
NGrpc::IGRpcRequestLimiterPtr TInFlightLimiterRegistry::RegisterRequestType(TString name, i64 limit) {
- TGuard<TMutex> g(Lock);
- if (!PerTypeLimiters.count(name)) {
- TControlWrapper control(limit, 0, 1000000);
- Icb->RegisterSharedControl(control, name + "_MaxInFlight");
- PerTypeLimiters[name] = new TRequestInFlightLimiter(control);
- }
-
- return PerTypeLimiters[name];
-}
-
+ TGuard<TMutex> g(Lock);
+ if (!PerTypeLimiters.count(name)) {
+ TControlWrapper control(limit, 0, 1000000);
+ Icb->RegisterSharedControl(control, name + "_MaxInFlight");
+ PerTypeLimiters[name] = new TRequestInFlightLimiter(control);
+ }
+
+ return PerTypeLimiters[name];
+}
+
} // namespace NGRpcService
} // namespace NKikimr
diff --git a/ydb/core/grpc_services/grpc_helper.h b/ydb/core/grpc_services/grpc_helper.h
index 38987405f46..9a2a673c7b7 100644
--- a/ydb/core/grpc_services/grpc_helper.h
+++ b/ydb/core/grpc_services/grpc_helper.h
@@ -1,45 +1,45 @@
#pragma once
#include "defs.h"
-#include "grpc_mon.h"
+#include "grpc_mon.h"
#include <ydb/core/control/immediate_control_board_impl.h>
#include <ydb/core/grpc_services/counters/counters.h>
-
+
#include <library/cpp/grpc/server/grpc_request.h>
namespace NKikimr {
namespace NGRpcService {
-class TInFlightLimiterRegistry : public TThrRefBase {
-private:
- TIntrusivePtr<NKikimr::TControlBoard> Icb;
- TMutex Lock;
+class TInFlightLimiterRegistry : public TThrRefBase {
+private:
+ TIntrusivePtr<NKikimr::TControlBoard> Icb;
+ TMutex Lock;
THashMap<TString, NGrpc::IGRpcRequestLimiterPtr> PerTypeLimiters;
-
-public:
- explicit TInFlightLimiterRegistry(TIntrusivePtr<NKikimr::TControlBoard> icb)
- : Icb(icb)
- {}
-
+
+public:
+ explicit TInFlightLimiterRegistry(TIntrusivePtr<NKikimr::TControlBoard> icb)
+ : Icb(icb)
+ {}
+
NGrpc::IGRpcRequestLimiterPtr RegisterRequestType(TString name, i64 limit);
-};
-
-class TCreateLimiterCB {
-public:
- explicit TCreateLimiterCB(TIntrusivePtr<TInFlightLimiterRegistry> limiterRegistry)
- : LimiterRegistry(limiterRegistry)
- {}
-
+};
+
+class TCreateLimiterCB {
+public:
+ explicit TCreateLimiterCB(TIntrusivePtr<TInFlightLimiterRegistry> limiterRegistry)
+ : LimiterRegistry(limiterRegistry)
+ {}
+
NGrpc::IGRpcRequestLimiterPtr operator()(const char* serviceName, const char* requestName, i64 limit) const;
-
-private:
- TIntrusivePtr<TInFlightLimiterRegistry> LimiterRegistry;
-};
-
-inline TCreateLimiterCB CreateLimiterCb(TIntrusivePtr<TInFlightLimiterRegistry> limiterRegistry) {
- return TCreateLimiterCB(limiterRegistry);
-}
-
+
+private:
+ TIntrusivePtr<TInFlightLimiterRegistry> LimiterRegistry;
+};
+
+inline TCreateLimiterCB CreateLimiterCb(TIntrusivePtr<TInFlightLimiterRegistry> limiterRegistry) {
+ return TCreateLimiterCB(limiterRegistry);
+}
+
template <typename TIn, typename TOut, typename TService, typename TInProtoPrinter=google::protobuf::TextFormat::Printer, typename TOutProtoPrinter=google::protobuf::TextFormat::Printer>
using TGRpcRequest = NGrpc::TGRpcRequest<TIn, TOut, TService, TInProtoPrinter, TOutProtoPrinter>;
diff --git a/ydb/core/grpc_services/grpc_mon.cpp b/ydb/core/grpc_services/grpc_mon.cpp
index bd2b25a6471..cd15157b052 100644
--- a/ydb/core/grpc_services/grpc_mon.cpp
+++ b/ydb/core/grpc_services/grpc_mon.cpp
@@ -1,29 +1,29 @@
-#include "grpc_mon.h"
-
+#include "grpc_mon.h"
+
#include <ydb/core/base/events.h>
-
+
#include <library/cpp/cache/cache.h>
#include <library/cpp/monlib/service/pages/templates.h>
#include <library/cpp/actors/core/mon.h>
#include <library/cpp/actors/core/hfunc.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-struct TEvGrpcMon {
- enum EEv {
- EvReportPeer = EventSpaceBegin(TKikimrEvents::ES_GRPC_MON),
-
- EvEnd
- };
-
- static_assert(EvEnd < EventSpaceEnd(TKikimrEvents::ES_GRPC_MON), "expect EvEnd < EventSpaceEnd(TKikimrEvents::ES_GRPC_MON)");
-
- struct TEvReportPeer: public TEventLocal<TEvReportPeer, EvReportPeer> {
+
+namespace NKikimr {
+namespace NGRpcService {
+
+struct TEvGrpcMon {
+ enum EEv {
+ EvReportPeer = EventSpaceBegin(TKikimrEvents::ES_GRPC_MON),
+
+ EvEnd
+ };
+
+ static_assert(EvEnd < EventSpaceEnd(TKikimrEvents::ES_GRPC_MON), "expect EvEnd < EventSpaceEnd(TKikimrEvents::ES_GRPC_MON)");
+
+ struct TEvReportPeer: public TEventLocal<TEvReportPeer, EvReportPeer> {
explicit TEvReportPeer(const TString& name)
- : Name(name)
- {}
-
+ : Name(name)
+ {}
+
TEvReportPeer(const TString& name, const TString& buildInfo)
: Name(name)
, SdkBuildInfo(buildInfo)
@@ -31,94 +31,94 @@ struct TEvGrpcMon {
const TString Name;
const TString SdkBuildInfo;
- };
-};
-
-
-class TGrpcMon : public TActor<TGrpcMon> {
-public:
- TGrpcMon()
- : TActor(&TGrpcMon::StateNormal)
- , Peers(2000)
- {}
-
+ };
+};
+
+
+class TGrpcMon : public TActor<TGrpcMon> {
+public:
+ TGrpcMon()
+ : TActor(&TGrpcMon::StateNormal)
+ , Peers(2000)
+ {}
+
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::GRPC_MON;
- }
-
-private:
- STFUNC(StateNormal) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvGrpcMon::TEvReportPeer, HandlePeer);
- HFunc(NMon::TEvHttpInfo, HandleHttp);
- }
- }
-
- void HandlePeer(TEvGrpcMon::TEvReportPeer::TPtr &ev, const TActorContext &ctx) {
- Y_UNUSED(ctx);
- const auto& info = *ev->Get();
- auto now = TInstant::Now();
- auto it = Peers.Find(info.Name);
- if (it == Peers.End()) {
- TPeerInfo val;
- val.LastRequest = now;
+ }
+
+private:
+ STFUNC(StateNormal) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvGrpcMon::TEvReportPeer, HandlePeer);
+ HFunc(NMon::TEvHttpInfo, HandleHttp);
+ }
+ }
+
+ void HandlePeer(TEvGrpcMon::TEvReportPeer::TPtr &ev, const TActorContext &ctx) {
+ Y_UNUSED(ctx);
+ const auto& info = *ev->Get();
+ auto now = TInstant::Now();
+ auto it = Peers.Find(info.Name);
+ if (it == Peers.End()) {
+ TPeerInfo val;
+ val.LastRequest = now;
val.SdkBuildInfo = info.SdkBuildInfo;
- Peers.Insert(info.Name, val);
- } else {
- it->LastRequest = now;
- }
- }
-
- void HandleHttp(NMon::TEvHttpInfo::TPtr &ev, const TActorContext &ctx) {
- TStringStream str;
- HTML(str) {
- TABLE_SORTABLE_CLASS("table") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() { str << "Address";}
- TABLEH() { str << "Last Request";}
+ Peers.Insert(info.Name, val);
+ } else {
+ it->LastRequest = now;
+ }
+ }
+
+ void HandleHttp(NMon::TEvHttpInfo::TPtr &ev, const TActorContext &ctx) {
+ TStringStream str;
+ HTML(str) {
+ TABLE_SORTABLE_CLASS("table") {
+ TABLEHEAD() {
+ TABLER() {
+ TABLEH() { str << "Address";}
+ TABLEH() { str << "Last Request";}
TABLEH() { str << "Sdk BuildInfo";}
- }
- }
- TABLEBODY() {
- for (auto p = Peers.Begin(); p != Peers.End(); ++p) {
- TABLER() {
- TABLED() { str << p.Key(); }
- TABLED() { str << p.Value().LastRequest.ToRfc822StringLocal(); }
+ }
+ }
+ TABLEBODY() {
+ for (auto p = Peers.Begin(); p != Peers.End(); ++p) {
+ TABLER() {
+ TABLED() { str << p.Key(); }
+ TABLED() { str << p.Value().LastRequest.ToRfc822StringLocal(); }
TABLED() { str << p.Value().SdkBuildInfo; }
- }
- }
- }
- }
- }
-
- ctx.Send(ev->Sender, new NMon::TEvHttpInfoRes(str.Str()));
- }
-
-private:
- struct TPeerInfo {
- TInstant LastRequest;
+ }
+ }
+ }
+ }
+ }
+
+ ctx.Send(ev->Sender, new NMon::TEvHttpInfoRes(str.Str()));
+ }
+
+private:
+ struct TPeerInfo {
+ TInstant LastRequest;
TString SdkBuildInfo;
- };
-
- TLRUCache<TString, TPeerInfo> Peers;
-};
-
+ };
+
+ TLRUCache<TString, TPeerInfo> Peers;
+};
+
TActorId GrpcMonServiceId() {
- const char x[12] = "GrpcMonSvc!";
+ const char x[12] = "GrpcMonSvc!";
return TActorId(0, TStringBuf(x, 12));
-}
-
-IActor* CreateGrpcMonService() {
- return new TGrpcMon;
-}
-
+}
+
+IActor* CreateGrpcMonService() {
+ return new TGrpcMon;
+}
+
void ReportGrpcReqToMon(NActors::TActorSystem& actorSystem, const TString& fromAddress) {
- actorSystem.Send(GrpcMonServiceId(), new TEvGrpcMon::TEvReportPeer(fromAddress));
-}
-
+ actorSystem.Send(GrpcMonServiceId(), new TEvGrpcMon::TEvReportPeer(fromAddress));
+}
+
void ReportGrpcReqToMon(NActors::TActorSystem& actorSystem, const TString& fromAddress, const TString& buildInfo) {
actorSystem.Send(GrpcMonServiceId(), new TEvGrpcMon::TEvReportPeer(fromAddress, buildInfo));
}
-}}
+}}
diff --git a/ydb/core/grpc_services/grpc_mon.h b/ydb/core/grpc_services/grpc_mon.h
index 72316226c77..f5996769777 100644
--- a/ydb/core/grpc_services/grpc_mon.h
+++ b/ydb/core/grpc_services/grpc_mon.h
@@ -1,16 +1,16 @@
-#pragma once
-
+#pragma once
+
#include <ydb/core/base/defs.h>
-
-#include <util/generic/string.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
+
+#include <util/generic/string.h>
+
+namespace NKikimr {
+namespace NGRpcService {
+
TActorId GrpcMonServiceId();
-IActor* CreateGrpcMonService();
-
+IActor* CreateGrpcMonService();
+
void ReportGrpcReqToMon(NActors::TActorSystem&, const TString& fromAddress);
void ReportGrpcReqToMon(NActors::TActorSystem&, const TString& fromAddress, const TString& buildInfo);
-
-}}
+
+}}
diff --git a/ydb/core/grpc_services/grpc_request_proxy.cpp b/ydb/core/grpc_services/grpc_request_proxy.cpp
index c7b073f8f38..a1e7accb877 100644
--- a/ydb/core/grpc_services/grpc_request_proxy.cpp
+++ b/ydb/core/grpc_services/grpc_request_proxy.cpp
@@ -87,8 +87,8 @@ public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::GRPC_PROXY;
- }
-
+ }
+
private:
void HandlePoolStatus(TEvTenantPool::TEvTenantPoolStatus::TPtr& ev, const TActorContext& ctx);
void HandleRefreshToken(TRefreshTokenImpl::TPtr& ev, const TActorContext& ctx);
diff --git a/ydb/core/grpc_services/grpc_request_proxy.h b/ydb/core/grpc_services/grpc_request_proxy.h
index 99741b55609..97315f6e9fb 100644
--- a/ydb/core/grpc_services/grpc_request_proxy.h
+++ b/ydb/core/grpc_services/grpc_request_proxy.h
@@ -81,10 +81,10 @@ protected:
void Handle(TEvAlterCoordinationNode::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDropCoordinationNode::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDescribeCoordinationNode::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvReadColumnsRequest::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvGetShardLocationsRequest::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvKikhouseDescribeTableRequest::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvS3ListingRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvReadColumnsRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvGetShardLocationsRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvKikhouseDescribeTableRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvS3ListingRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvBiStreamPingRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvExperimentalStreamQueryRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvStreamPQWriteRequest::TPtr& ev, const TActorContext& ctx);
@@ -101,7 +101,7 @@ protected:
void Handle(TEvImportFromS3Request::TPtr& ev, const TActorContext& ctx);
void Handle(TEvImportDataRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDiscoverPQClustersRequest::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvBulkUpsertRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvBulkUpsertRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvWhoAmIRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvCreateRateLimiterResource::TPtr& ev, const TActorContext& ctx);
void Handle(TEvAlterRateLimiterResource::TPtr& ev, const TActorContext& ctx);
diff --git a/ydb/core/grpc_services/resolve_local_db_table.cpp b/ydb/core/grpc_services/resolve_local_db_table.cpp
index e9073856125..3f1aeb74a58 100644
--- a/ydb/core/grpc_services/resolve_local_db_table.cpp
+++ b/ydb/core/grpc_services/resolve_local_db_table.cpp
@@ -1,62 +1,62 @@
-#include "resolve_local_db_table.h"
-
+#include "resolve_local_db_table.h"
+
#include <ydb/core/tablet_flat/flat_dbase_apply.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
- TMaybe<ui64> TryParseLocalDbPath(const TVector<TString>& path) {
- if (path.size() != 4 || path[1] != ".sys_tablets") {
- return {};
- }
-
- ui64 tabletId = -1;
- TString tabletIdStr = path[2];
- if (TryFromString<ui64>(tabletIdStr, tabletId)) {
- return tabletId;
- } else {
- return {};
- }
- }
-
- void FillLocalDbTableSchema(
- NSchemeCache::TSchemeCacheNavigate& result,
- const NTabletFlatScheme::TSchemeChanges& fullScheme,
- const TString& tableName)
- {
- NTable::TScheme scheme;
- NTable::TSchemeModifier applier(scheme);
- applier.Apply(fullScheme);
-
- result.ResultSet.resize(1);
- NSchemeCache::TSchemeCacheNavigate::TEntry& entry = result.ResultSet.back();
- entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
-
- entry.Status = NSchemeCache::TSchemeCacheNavigate::EStatus::Ok;
-
- auto* ti = scheme.TableNames.FindPtr(tableName);
- if (!ti) {
- entry.Status = NSchemeCache::TSchemeCacheNavigate::EStatus::PathErrorUnknown;
- return;
- }
-
- entry.Kind = NSchemeCache::TSchemeCacheNavigate::KindTable;
-
- const NTable::TScheme::TTableInfo* tableInfo = scheme.Tables.FindPtr(*ti);
-
- for (const auto& col : tableInfo->Columns) {
- entry.Columns[col.first] = TSysTables::TTableColumnInfo(col.second.Name, col.first, col.second.PType, col.second.KeyOrder);
- }
- }
-
- bool IsSuperUser(const NACLib::TUserToken& userToken, const TAppData& appData) {
- const auto& adminSids = appData.AdministrationAllowedSIDs;
- for (const auto& sid : adminSids) {
- if (userToken.IsExist(sid))
- return true;
- }
- return false;
- }
-
-} // namespace NKikimr
-} // namespace NGRpcService
+
+namespace NKikimr {
+namespace NGRpcService {
+
+ TMaybe<ui64> TryParseLocalDbPath(const TVector<TString>& path) {
+ if (path.size() != 4 || path[1] != ".sys_tablets") {
+ return {};
+ }
+
+ ui64 tabletId = -1;
+ TString tabletIdStr = path[2];
+ if (TryFromString<ui64>(tabletIdStr, tabletId)) {
+ return tabletId;
+ } else {
+ return {};
+ }
+ }
+
+ void FillLocalDbTableSchema(
+ NSchemeCache::TSchemeCacheNavigate& result,
+ const NTabletFlatScheme::TSchemeChanges& fullScheme,
+ const TString& tableName)
+ {
+ NTable::TScheme scheme;
+ NTable::TSchemeModifier applier(scheme);
+ applier.Apply(fullScheme);
+
+ result.ResultSet.resize(1);
+ NSchemeCache::TSchemeCacheNavigate::TEntry& entry = result.ResultSet.back();
+ entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
+
+ entry.Status = NSchemeCache::TSchemeCacheNavigate::EStatus::Ok;
+
+ auto* ti = scheme.TableNames.FindPtr(tableName);
+ if (!ti) {
+ entry.Status = NSchemeCache::TSchemeCacheNavigate::EStatus::PathErrorUnknown;
+ return;
+ }
+
+ entry.Kind = NSchemeCache::TSchemeCacheNavigate::KindTable;
+
+ const NTable::TScheme::TTableInfo* tableInfo = scheme.Tables.FindPtr(*ti);
+
+ for (const auto& col : tableInfo->Columns) {
+ entry.Columns[col.first] = TSysTables::TTableColumnInfo(col.second.Name, col.first, col.second.PType, col.second.KeyOrder);
+ }
+ }
+
+ bool IsSuperUser(const NACLib::TUserToken& userToken, const TAppData& appData) {
+ const auto& adminSids = appData.AdministrationAllowedSIDs;
+ for (const auto& sid : adminSids) {
+ if (userToken.IsExist(sid))
+ return true;
+ }
+ return false;
+ }
+
+} // namespace NKikimr
+} // namespace NGRpcService
diff --git a/ydb/core/grpc_services/resolve_local_db_table.h b/ydb/core/grpc_services/resolve_local_db_table.h
index c89245f2250..6f94a819d66 100644
--- a/ydb/core/grpc_services/resolve_local_db_table.h
+++ b/ydb/core/grpc_services/resolve_local_db_table.h
@@ -1,23 +1,23 @@
-#pragma once
-
+#pragma once
+
#include <ydb/core/tx/scheme_cache/scheme_cache.h>
#include <ydb/core/tablet_flat/flat_dbase_scheme.h>
#include <ydb/core/base/appdata.h>
-
-#include <util/generic/string.h>
-#include <util/generic/vector.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
- TMaybe<ui64> TryParseLocalDbPath(const TVector<TString>& path);
-
- void FillLocalDbTableSchema(
- NSchemeCache::TSchemeCacheNavigate& result,
- const NTabletFlatScheme::TSchemeChanges& fullScheme,
- const TString& tableName);
-
- bool IsSuperUser(const NACLib::TUserToken& userToken, const TAppData& appData);
-
-} // namespace NKikimr
-} // namespace NGRpcService
+
+#include <util/generic/string.h>
+#include <util/generic/vector.h>
+
+namespace NKikimr {
+namespace NGRpcService {
+
+ TMaybe<ui64> TryParseLocalDbPath(const TVector<TString>& path);
+
+ void FillLocalDbTableSchema(
+ NSchemeCache::TSchemeCacheNavigate& result,
+ const NTabletFlatScheme::TSchemeChanges& fullScheme,
+ const TString& tableName);
+
+ bool IsSuperUser(const NACLib::TUserToken& userToken, const TAppData& appData);
+
+} // namespace NKikimr
+} // namespace NGRpcService
diff --git a/ydb/core/grpc_services/rpc_deferrable.h b/ydb/core/grpc_services/rpc_deferrable.h
index 6f0a9fc4249..644e7b84c40 100644
--- a/ydb/core/grpc_services/rpc_deferrable.h
+++ b/ydb/core/grpc_services/rpc_deferrable.h
@@ -2,7 +2,7 @@
#include "defs.h"
#include "grpc_request_proxy.h"
-#include "rpc_common.h"
+#include "rpc_common.h"
#include <ydb/core/tx/tx_proxy/proxy.h>
#include <ydb/core/base/kikimr_issue.h>
@@ -13,7 +13,7 @@
#include <ydb/public/lib/operation_id/operation_id.h>
#include <ydb/core/actorlib_impl/long_timer.h>
-
+
#include <library/cpp/actors/core/actor_bootstrapped.h>
namespace NKikimr {
@@ -21,11 +21,11 @@ namespace NGRpcService {
template <typename TDerived, typename TRequest, bool IsOperation>
class TRpcRequestWithOperationParamsActor : public TActorBootstrapped<TDerived> {
-private:
- typedef TActorBootstrapped<TDerived> TBase;
+private:
+ typedef TActorBootstrapped<TDerived> TBase;
typedef typename std::conditional<IsOperation, IRequestOpCtx, IRequestNoOpCtx>::type TRequestBase;
-
-public:
+
+public:
enum EWakeupTag {
WakeupTagTimeout = 10,
WakeupTagCancel = 11,
@@ -45,8 +45,8 @@ public:
const typename TRequest::TRequest* GetProtoRequest() const {
return TRequest::GetProtoRequest(Request_);
- }
-
+ }
+
Ydb::Operations::OperationParams::OperationMode GetOperationMode() const {
return GetProtoRequest()->operation_params().operation_mode();
}
@@ -55,15 +55,15 @@ public:
HasCancel_ = static_cast<TDerived*>(this)->HasCancelOperation();
if (OperationTimeout_) {
- OperationTimeoutTimer = CreateLongTimer(ctx, OperationTimeout_,
- new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup(WakeupTagTimeout)),
- AppData(ctx)->UserPoolId);
+ OperationTimeoutTimer = CreateLongTimer(ctx, OperationTimeout_,
+ new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup(WakeupTagTimeout)),
+ AppData(ctx)->UserPoolId);
}
if (HasCancel_ && CancelAfter_) {
- CancelAfterTimer = CreateLongTimer(ctx, CancelAfter_,
- new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup(WakeupTagCancel)),
- AppData(ctx)->UserPoolId);
+ CancelAfterTimer = CreateLongTimer(ctx, CancelAfter_,
+ new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup(WakeupTagCancel)),
+ AppData(ctx)->UserPoolId);
}
auto selfId = ctx.SelfID;
diff --git a/ydb/core/grpc_services/rpc_discovery.cpp b/ydb/core/grpc_services/rpc_discovery.cpp
index 63b093ffc92..75550f92acf 100644
--- a/ydb/core/grpc_services/rpc_discovery.cpp
+++ b/ydb/core/grpc_services/rpc_discovery.cpp
@@ -142,8 +142,8 @@ class TListEndpointsRPC : public TActorBootstrapped<TListEndpointsRPC> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::GRPC_REQ;
- }
-
+ }
+
TListEndpointsRPC(TEvListEndpointsRequest::TPtr &msg, TActorId cacheId)
: Request(msg->Release().Release())
, CacheId(cacheId)
diff --git a/ydb/core/grpc_services/rpc_execute_data_query.cpp b/ydb/core/grpc_services/rpc_execute_data_query.cpp
index f7101d6372f..317a3488ce0 100644
--- a/ydb/core/grpc_services/rpc_execute_data_query.cpp
+++ b/ydb/core/grpc_services/rpc_execute_data_query.cpp
@@ -159,17 +159,17 @@ public:
ctx.Send(NKqp::MakeKqpProxyID(ctx.SelfID.NodeId()), ev.Release());
}
- static void ConvertReadStats(const NKikimrQueryStats::TReadOpStats& from, Ydb::TableStats::OperationStats* to) {
- to->set_rows(to->rows() + from.GetRows());
- to->set_bytes(to->bytes() + from.GetBytes());
- }
-
- static void ConvertWriteStats(const NKikimrQueryStats::TWriteOpStats& from, Ydb::TableStats::OperationStats* to) {
- to->set_rows(from.GetCount());
- to->set_bytes(from.GetBytes());
- }
-
- static void ConvertQueryStats(const NKikimrKqp::TQueryResponse& from, Ydb::Table::ExecuteQueryResult* to) {
+ static void ConvertReadStats(const NKikimrQueryStats::TReadOpStats& from, Ydb::TableStats::OperationStats* to) {
+ to->set_rows(to->rows() + from.GetRows());
+ to->set_bytes(to->bytes() + from.GetBytes());
+ }
+
+ static void ConvertWriteStats(const NKikimrQueryStats::TWriteOpStats& from, Ydb::TableStats::OperationStats* to) {
+ to->set_rows(from.GetCount());
+ to->set_bytes(from.GetBytes());
+ }
+
+ static void ConvertQueryStats(const NKikimrKqp::TQueryResponse& from, Ydb::Table::ExecuteQueryResult* to) {
if (from.HasQueryStats()) {
FillQueryStats(*to->mutable_query_stats(), from);
to->mutable_query_stats()->set_query_ast(from.GetQueryAst());
@@ -177,35 +177,35 @@ public:
}
// TODO: For compatibility with old kqp workers, deprecate.
- if (from.GetProfile().KqlProfilesSize() == 1) {
- const auto& kqlProlfile = from.GetProfile().GetKqlProfiles(0);
- const auto& phases = kqlProlfile.GetMkqlProfiles();
- for (const auto& s : phases) {
- if (s.HasTxStats()) {
- const auto& tableStats = s.GetTxStats().GetTableAccessStats();
- auto* phase = to->mutable_query_stats()->add_query_phases();
- phase->set_duration_us(s.GetTxStats().GetDurationUs());
- for (const auto& ts : tableStats) {
- auto* tableAccess = phase->add_table_access();
- tableAccess->set_name(ts.GetTableInfo().GetName());
- if (ts.HasSelectRow()) {
- ConvertReadStats(ts.GetSelectRow(), tableAccess->mutable_reads());
- }
- if (ts.HasSelectRange()) {
- ConvertReadStats(ts.GetSelectRange(), tableAccess->mutable_reads());
- }
- if (ts.HasUpdateRow()) {
- ConvertWriteStats(ts.GetUpdateRow(), tableAccess->mutable_updates());
- }
- if (ts.HasEraseRow()) {
- ConvertWriteStats(ts.GetEraseRow(), tableAccess->mutable_deletes());
- }
- }
- }
- }
- }
- }
-
+ if (from.GetProfile().KqlProfilesSize() == 1) {
+ const auto& kqlProlfile = from.GetProfile().GetKqlProfiles(0);
+ const auto& phases = kqlProlfile.GetMkqlProfiles();
+ for (const auto& s : phases) {
+ if (s.HasTxStats()) {
+ const auto& tableStats = s.GetTxStats().GetTableAccessStats();
+ auto* phase = to->mutable_query_stats()->add_query_phases();
+ phase->set_duration_us(s.GetTxStats().GetDurationUs());
+ for (const auto& ts : tableStats) {
+ auto* tableAccess = phase->add_table_access();
+ tableAccess->set_name(ts.GetTableInfo().GetName());
+ if (ts.HasSelectRow()) {
+ ConvertReadStats(ts.GetSelectRow(), tableAccess->mutable_reads());
+ }
+ if (ts.HasSelectRange()) {
+ ConvertReadStats(ts.GetSelectRange(), tableAccess->mutable_reads());
+ }
+ if (ts.HasUpdateRow()) {
+ ConvertWriteStats(ts.GetUpdateRow(), tableAccess->mutable_updates());
+ }
+ if (ts.HasEraseRow()) {
+ ConvertWriteStats(ts.GetEraseRow(), tableAccess->mutable_deletes());
+ }
+ }
+ }
+ }
+ }
+ }
+
void Handle(NKqp::TEvKqp::TEvQueryResponse::TPtr& ev, const TActorContext& ctx) {
const auto& record = ev->Get()->Record.GetRef();
SetCost(record.GetConsumedRu());
@@ -217,7 +217,7 @@ public:
auto queryResult = TEvExecuteDataQueryRequest::AllocateResult<Ydb::Table::ExecuteQueryResult>(Request_);
ConvertKqpQueryResultsToDbResult(kqpResponse, queryResult);
- ConvertQueryStats(kqpResponse, queryResult);
+ ConvertQueryStats(kqpResponse, queryResult);
if (kqpResponse.HasTxMeta()) {
queryResult->mutable_tx_meta()->CopyFrom(kqpResponse.GetTxMeta());
}
diff --git a/ydb/core/grpc_services/rpc_get_operation.cpp b/ydb/core/grpc_services/rpc_get_operation.cpp
index 681b9d27f21..6f7a1406863 100644
--- a/ydb/core/grpc_services/rpc_get_operation.cpp
+++ b/ydb/core/grpc_services/rpc_get_operation.cpp
@@ -67,7 +67,7 @@ class TGetOperationRPC : public TRpcOperationRequestActor<TGetOperationRPC, TEvG
public:
using TRpcOperationRequestActor::TRpcOperationRequestActor;
-
+
void Bootstrap(const TActorContext &ctx) {
const auto req = Request->GetProtoRequest();
diff --git a/ydb/core/grpc_services/rpc_get_shard_locations.cpp b/ydb/core/grpc_services/rpc_get_shard_locations.cpp
index 018d3dbca7f..f7088c8eb60 100644
--- a/ydb/core/grpc_services/rpc_get_shard_locations.cpp
+++ b/ydb/core/grpc_services/rpc_get_shard_locations.cpp
@@ -1,193 +1,193 @@
-#include "grpc_request_proxy.h"
-#include "rpc_calls.h"
-#include "rpc_common.h"
-
+#include "grpc_request_proxy.h"
+#include "rpc_calls.h"
+#include "rpc_common.h"
+
#include <ydb/library/aclib/aclib.h>
#include <ydb/core/base/tablet_pipe.h>
-
+
#include <library/cpp/actors/core/actor_bootstrapped.h>
#include <library/cpp/actors/core/hfunc.h>
#include <library/cpp/actors/core/interconnect.h>
#include <library/cpp/actors/interconnect/interconnect.h>
-
-#include <util/string/vector.h>
-#include <util/generic/hash.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-using namespace NActors;
-using namespace Ydb;
-
-class TGetShardLocationsRPC : public TActorBootstrapped<TGetShardLocationsRPC> {
- using TBase = TActorBootstrapped<TGetShardLocationsRPC>;
-
-private:
- constexpr static ui64 INVALID_TABLET_ID = Max<ui64>();
- constexpr static ui64 DEFAULT_TIMEOUT_MSEC = 100;
-
- struct TNodeInfo {
- TString Host;
- ui16 Port;
- };
-
- TAutoPtr<TEvGetShardLocationsRequest> Request;
- Ydb::ClickhouseInternal::GetShardLocationsResult Result;
-
+
+#include <util/string/vector.h>
+#include <util/generic/hash.h>
+
+namespace NKikimr {
+namespace NGRpcService {
+
+using namespace NActors;
+using namespace Ydb;
+
+class TGetShardLocationsRPC : public TActorBootstrapped<TGetShardLocationsRPC> {
+ using TBase = TActorBootstrapped<TGetShardLocationsRPC>;
+
+private:
+ constexpr static ui64 INVALID_TABLET_ID = Max<ui64>();
+ constexpr static ui64 DEFAULT_TIMEOUT_MSEC = 100;
+
+ struct TNodeInfo {
+ TString Host;
+ ui16 Port;
+ };
+
+ TAutoPtr<TEvGetShardLocationsRequest> Request;
+ Ydb::ClickhouseInternal::GetShardLocationsResult Result;
+
THashMap<ui64, TActorId> ShardPipes;
- THashMap<ui64, ui32> ShardNodes;
- THashMap<ui32, TNodeInfo> NodeInfos;
-
-public:
+ THashMap<ui64, ui32> ShardNodes;
+ THashMap<ui32, TNodeInfo> NodeInfos;
+
+public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::GRPC_REQ;
- }
-
- explicit TGetShardLocationsRPC(TAutoPtr<TEvGetShardLocationsRequest> request)
- : TBase()
- , Request(request)
- {}
-
- void Bootstrap(const NActors::TActorContext& ctx) {
- TString errorMessage;
- if (!CheckAccess(errorMessage)) {
- return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, errorMessage, ctx);
- }
-
- TDuration timeout = TDuration::MilliSeconds(DEFAULT_TIMEOUT_MSEC);
- if (Request->GetProtoRequest()->operation_params().has_operation_timeout())
- timeout = GetDuration(Request->GetProtoRequest()->operation_params().operation_timeout());
- ctx.Schedule(timeout, new TEvents::TEvWakeup());
- ResolveShards(ctx);
- }
-
- void Die(const NActors::TActorContext& ctx) override {
- // Destroy all pipe clients
- for (const auto& p : ShardPipes) {
- ctx.Send(p.second, new TEvents::TEvPoisonPill());
- }
- TBase::Die(ctx);
- }
-
-private:
- STFUNC(StateWaitResolve) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTabletPipe::TEvClientDestroyed, Handle);
- HFunc(TEvTabletPipe::TEvClientConnected, Handle);
- HFunc(TEvInterconnect::TEvNodesInfo, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void ResolveShards(const NActors::TActorContext& ctx) {
- // Create pipes to all shards
- for (ui64 ti : Request->GetProtoRequest()->tablet_ids()) {
- if (ti == 0)
- ti = INVALID_TABLET_ID;
-
- if (ShardPipes.contains(ti))
- continue;
-
- NTabletPipe::TClientConfig clientConfig;
+ }
+
+ explicit TGetShardLocationsRPC(TAutoPtr<TEvGetShardLocationsRequest> request)
+ : TBase()
+ , Request(request)
+ {}
+
+ void Bootstrap(const NActors::TActorContext& ctx) {
+ TString errorMessage;
+ if (!CheckAccess(errorMessage)) {
+ return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, errorMessage, ctx);
+ }
+
+ TDuration timeout = TDuration::MilliSeconds(DEFAULT_TIMEOUT_MSEC);
+ if (Request->GetProtoRequest()->operation_params().has_operation_timeout())
+ timeout = GetDuration(Request->GetProtoRequest()->operation_params().operation_timeout());
+ ctx.Schedule(timeout, new TEvents::TEvWakeup());
+ ResolveShards(ctx);
+ }
+
+ void Die(const NActors::TActorContext& ctx) override {
+ // Destroy all pipe clients
+ for (const auto& p : ShardPipes) {
+ ctx.Send(p.second, new TEvents::TEvPoisonPill());
+ }
+ TBase::Die(ctx);
+ }
+
+private:
+ STFUNC(StateWaitResolve) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTabletPipe::TEvClientDestroyed, Handle);
+ HFunc(TEvTabletPipe::TEvClientConnected, Handle);
+ HFunc(TEvInterconnect::TEvNodesInfo, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void ResolveShards(const NActors::TActorContext& ctx) {
+ // Create pipes to all shards
+ for (ui64 ti : Request->GetProtoRequest()->tablet_ids()) {
+ if (ti == 0)
+ ti = INVALID_TABLET_ID;
+
+ if (ShardPipes.contains(ti))
+ continue;
+
+ NTabletPipe::TClientConfig clientConfig;
clientConfig.AllowFollower = false;
- clientConfig.CheckAliveness = false;
+ clientConfig.CheckAliveness = false;
clientConfig.RetryPolicy = {
.RetryLimitCount = 2,
.MinRetryTime = TDuration::MilliSeconds(5),
};
- ShardPipes[ti] = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, ti, clientConfig));
- }
-
- // Get list of cluster nodes
+ ShardPipes[ti] = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, ti, clientConfig));
+ }
+
+ // Get list of cluster nodes
const TActorId nameserviceId = GetNameserviceActorId();
- ctx.Send(nameserviceId, new TEvInterconnect::TEvListNodes());
-
- TBase::Become(&TThis::StateWaitResolve);
- }
-
- void HandleTimeout(const TActorContext& ctx) {
- return ReplyWithError(Ydb::StatusIds::TIMEOUT, "Request timed out", ctx);
- }
-
- bool CheckAccess(TString& errorMessage) {
- if (Request->GetInternalToken().empty())
- return true;
-
- NACLib::TUserToken userToken(Request->GetInternalToken());
- // TODO: check describe rights for root?
-
- Y_UNUSED(errorMessage);
- return true;
- }
-
-
- void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) {
- TEvTabletPipe::TEvClientConnected* msg = ev->Get();
- const ui64 tabletId = msg->TabletId;
- Y_VERIFY(tabletId != 0);
- if (msg->Status != NKikimrProto::OK) {
- ShardNodes[tabletId] = -1;
- } else {
- ShardNodes[tabletId] = msg->ServerId.NodeId();
- }
-
- return CheckFinished(ctx);
- }
-
- void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) {
- const ui64 tabletId = ev->Get()->TabletId;
- Y_VERIFY(tabletId != 0);
- ShardNodes[tabletId] = -1;
-
- return CheckFinished(ctx);
- }
-
- void CheckFinished(const TActorContext& ctx) {
- if (ShardNodes.size() == ShardPipes.size() && !NodeInfos.empty())
- ReplySuccess(ctx);
- }
-
- void Handle(TEvInterconnect::TEvNodesInfo::TPtr &ev, const TActorContext &ctx) {
- const TEvInterconnect::TEvNodesInfo* nodesInfo = ev->Get();
- Y_VERIFY(!nodesInfo->Nodes.empty());
- for (const auto& ni : nodesInfo->Nodes) {
- NodeInfos[ni.NodeId].Host = ni.Host;
- NodeInfos[ni.NodeId].Port = ni.Port;
- }
-
- CheckFinished(ctx);
- }
-
- void ReplySuccess(const NActors::TActorContext& ctx) {
- for (const auto& sn : ShardNodes) {
- auto* info = Result.add_tablets();
- info->set_tablet_id(sn.first);
- info->set_host(NodeInfos[sn.second].Host);
- info->set_port(NodeInfos[sn.second].Port);
- }
- ReplyWithResult(Ydb::StatusIds::SUCCESS, Result, ctx);
- }
-
- void ReplyWithError(StatusIds::StatusCode status, const TString& message, const TActorContext& ctx) {
- Request->RaiseIssue(NYql::TIssue(message));
+ ctx.Send(nameserviceId, new TEvInterconnect::TEvListNodes());
+
+ TBase::Become(&TThis::StateWaitResolve);
+ }
+
+ void HandleTimeout(const TActorContext& ctx) {
+ return ReplyWithError(Ydb::StatusIds::TIMEOUT, "Request timed out", ctx);
+ }
+
+ bool CheckAccess(TString& errorMessage) {
+ if (Request->GetInternalToken().empty())
+ return true;
+
+ NACLib::TUserToken userToken(Request->GetInternalToken());
+ // TODO: check describe rights for root?
+
+ Y_UNUSED(errorMessage);
+ return true;
+ }
+
+
+ void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) {
+ TEvTabletPipe::TEvClientConnected* msg = ev->Get();
+ const ui64 tabletId = msg->TabletId;
+ Y_VERIFY(tabletId != 0);
+ if (msg->Status != NKikimrProto::OK) {
+ ShardNodes[tabletId] = -1;
+ } else {
+ ShardNodes[tabletId] = msg->ServerId.NodeId();
+ }
+
+ return CheckFinished(ctx);
+ }
+
+ void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) {
+ const ui64 tabletId = ev->Get()->TabletId;
+ Y_VERIFY(tabletId != 0);
+ ShardNodes[tabletId] = -1;
+
+ return CheckFinished(ctx);
+ }
+
+ void CheckFinished(const TActorContext& ctx) {
+ if (ShardNodes.size() == ShardPipes.size() && !NodeInfos.empty())
+ ReplySuccess(ctx);
+ }
+
+ void Handle(TEvInterconnect::TEvNodesInfo::TPtr &ev, const TActorContext &ctx) {
+ const TEvInterconnect::TEvNodesInfo* nodesInfo = ev->Get();
+ Y_VERIFY(!nodesInfo->Nodes.empty());
+ for (const auto& ni : nodesInfo->Nodes) {
+ NodeInfos[ni.NodeId].Host = ni.Host;
+ NodeInfos[ni.NodeId].Port = ni.Port;
+ }
+
+ CheckFinished(ctx);
+ }
+
+ void ReplySuccess(const NActors::TActorContext& ctx) {
+ for (const auto& sn : ShardNodes) {
+ auto* info = Result.add_tablets();
+ info->set_tablet_id(sn.first);
+ info->set_host(NodeInfos[sn.second].Host);
+ info->set_port(NodeInfos[sn.second].Port);
+ }
+ ReplyWithResult(Ydb::StatusIds::SUCCESS, Result, ctx);
+ }
+
+ void ReplyWithError(StatusIds::StatusCode status, const TString& message, const TActorContext& ctx) {
+ Request->RaiseIssue(NYql::TIssue(message));
Request->ReplyWithYdbStatus(status);
- Die(ctx);
- }
-
- void ReplyWithResult(StatusIds::StatusCode status,
- const Ydb::ClickhouseInternal::GetShardLocationsResult& result,
- const TActorContext& ctx) {
- Request->SendResult(result, status);
- Die(ctx);
- }
-};
-
-void TGRpcRequestProxy::Handle(TEvGetShardLocationsRequest::TPtr& ev, const TActorContext& ctx) {
- ctx.Register(new TGetShardLocationsRPC(ev->Release().Release()));
-}
-
-} // namespace NKikimr
-} // namespace NGRpcService
+ Die(ctx);
+ }
+
+ void ReplyWithResult(StatusIds::StatusCode status,
+ const Ydb::ClickhouseInternal::GetShardLocationsResult& result,
+ const TActorContext& ctx) {
+ Request->SendResult(result, status);
+ Die(ctx);
+ }
+};
+
+void TGRpcRequestProxy::Handle(TEvGetShardLocationsRequest::TPtr& ev, const TActorContext& ctx) {
+ ctx.Register(new TGetShardLocationsRPC(ev->Release().Release()));
+}
+
+} // namespace NKikimr
+} // namespace NGRpcService
diff --git a/ydb/core/grpc_services/rpc_kh_describe.cpp b/ydb/core/grpc_services/rpc_kh_describe.cpp
index 546d5376b0e..e2c48213ced 100644
--- a/ydb/core/grpc_services/rpc_kh_describe.cpp
+++ b/ydb/core/grpc_services/rpc_kh_describe.cpp
@@ -1,344 +1,344 @@
-#include "grpc_request_proxy.h"
-#include "rpc_calls.h"
-#include "rpc_common.h"
-#include "resolve_local_db_table.h"
-
+#include "grpc_request_proxy.h"
+#include "rpc_calls.h"
+#include "rpc_common.h"
+#include "resolve_local_db_table.h"
+
#include <ydb/library/aclib/aclib.h>
#include <ydb/core/actorlib_impl/long_timer.h>
#include <ydb/core/tx/scheme_cache/scheme_cache.h>
#include <ydb/core/tablet_flat/tablet_flat_executed.h>
#include <ydb/core/base/tablet_pipecache.h>
-
+
#include <library/cpp/actors/core/actor_bootstrapped.h>
#include <library/cpp/actors/core/hfunc.h>
#include <library/cpp/actors/core/interconnect.h>
#include <library/cpp/actors/interconnect/interconnect.h>
-
-#include <util/string/vector.h>
-#include <util/generic/hash.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-using namespace NActors;
-using namespace Ydb;
-
-class TKikhouseDescribeTableRPC : public TActorBootstrapped<TKikhouseDescribeTableRPC> {
- using TBase = TActorBootstrapped<TKikhouseDescribeTableRPC>;
-
-private:
- static constexpr ui32 DEFAULT_TIMEOUT_SEC = 5;
-
- TAutoPtr<TEvKikhouseDescribeTableRequest> Request;
- Ydb::ClickhouseInternal::DescribeTableResult Result;
-
- TDuration Timeout;
+
+#include <util/string/vector.h>
+#include <util/generic/hash.h>
+
+namespace NKikimr {
+namespace NGRpcService {
+
+using namespace NActors;
+using namespace Ydb;
+
+class TKikhouseDescribeTableRPC : public TActorBootstrapped<TKikhouseDescribeTableRPC> {
+ using TBase = TActorBootstrapped<TKikhouseDescribeTableRPC>;
+
+private:
+ static constexpr ui32 DEFAULT_TIMEOUT_SEC = 5;
+
+ TAutoPtr<TEvKikhouseDescribeTableRequest> Request;
+ Ydb::ClickhouseInternal::DescribeTableResult Result;
+
+ TDuration Timeout;
TActorId TimeoutTimerActorId;
-
- bool WaitingResolveReply;
- bool Finished;
-
- TVector<NScheme::TTypeId> KeyColumnTypes;
+
+ bool WaitingResolveReply;
+ bool Finished;
+
+ TVector<NScheme::TTypeId> KeyColumnTypes;
THolder<NKikimr::TKeyDesc> KeyRange;
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate> ResolveNamesResult;
-
-public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::GRPC_REQ;
- }
-
- explicit TKikhouseDescribeTableRPC(TAutoPtr<TEvKikhouseDescribeTableRequest> request)
- : TBase()
- , Request(request)
- , Timeout(TDuration::Seconds(DEFAULT_TIMEOUT_SEC))
- , WaitingResolveReply(false)
- , Finished(false)
- {}
-
- void Bootstrap(const NActors::TActorContext& ctx) {
- ResolveTable(ctx);
- }
-
- void Die(const NActors::TActorContext& ctx) override {
- Y_VERIFY(Finished);
- Y_VERIFY(!WaitingResolveReply);
-
- if (TimeoutTimerActorId) {
- ctx.Send(TimeoutTimerActorId, new TEvents::TEvPoisonPill());
- }
-
- TBase::Die(ctx);
- }
-
-private:
- STFUNC(StateWaitResolveTable) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTablet::TEvLocalSchemeTxResponse, Handle);
- HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
- HFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void ResolveTable(const NActors::TActorContext& ctx) {
- const TString table = Request->GetProtoRequest()->path();
- auto path = ::NKikimr::SplitPath(table);
- TMaybe<ui64> tabletId = TryParseLocalDbPath(path);
- if (tabletId) {
- if (Request->GetInternalToken().empty() || !IsSuperUser(NACLib::TUserToken(Request->GetInternalToken()), *AppData(ctx))) {
- return ReplyWithError(Ydb::StatusIds::NOT_FOUND, "Invalid table path specified", ctx);
- }
-
- std::unique_ptr<TEvTablet::TEvLocalSchemeTx> ev(new TEvTablet::TEvLocalSchemeTx());
- ctx.Send(MakePipePeNodeCacheID(true), new TEvPipeCache::TEvForward(ev.release(), *tabletId, true), IEventHandle::FlagTrackDelivery);
-
- TBase::Become(&TThis::StateWaitResolveTable);
- WaitingResolveReply = true;
- } else {
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate> request(new NSchemeCache::TSchemeCacheNavigate());
- NSchemeCache::TSchemeCacheNavigate::TEntry entry;
- entry.Path = std::move(path);
- if (entry.Path.empty()) {
- return ReplyWithError(Ydb::StatusIds::NOT_FOUND, "Invalid table path specified", ctx);
- }
- entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
- request->ResultSet.emplace_back(entry);
- ctx.Send(MakeSchemeCacheID(), new TEvTxProxySchemeCache::TEvNavigateKeySet(request));
-
- TimeoutTimerActorId = CreateLongTimer(ctx, Timeout,
- new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
-
- TBase::Become(&TThis::StateWaitResolveTable);
- WaitingResolveReply = true;
- }
- }
-
- void Handle(TEvPipeCache::TEvDeliveryProblem::TPtr& ev, const TActorContext& ctx) {
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Got TEvDeliveryProblem, TabletId: " << ev->Get()->TabletId
- << ", NotDelivered: " << ev->Get()->NotDelivered);
- return ReplyWithError(Ydb::StatusIds::UNAVAILABLE, "Invalid table path specified", ctx);
- }
-
- void HandleTimeout(const TActorContext& ctx) {
- return ReplyWithError(Ydb::StatusIds::TIMEOUT, "Request timed out", ctx);
- }
-
- void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev, const TActorContext& ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- ResolveNamesResult = ev->Get()->Request;
-
- return ProceedWithSchema(ctx);
- }
-
- void Handle(TEvTablet::TEvLocalSchemeTxResponse::TPtr &ev, const TActorContext &ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- ResolveNamesResult = new NSchemeCache::TSchemeCacheNavigate();
- auto &record = ev->Get()->Record;
-
- const TString table = Request->GetProtoRequest()->path();
- auto path = ::NKikimr::SplitPath(table);
- FillLocalDbTableSchema(*ResolveNamesResult, record.GetFullScheme(), path.back());
- ResolveNamesResult->ResultSet.back().Path = path;
-
- return ProceedWithSchema(ctx);
- }
-
- void ProceedWithSchema(const TActorContext& ctx) {
- Y_VERIFY(ResolveNamesResult->ResultSet.size() == 1);
- const auto& entry = ResolveNamesResult->ResultSet.front();
-
- if (entry.Status != NSchemeCache::TSchemeCacheNavigate::EStatus::Ok) {
+ TAutoPtr<NSchemeCache::TSchemeCacheNavigate> ResolveNamesResult;
+
+public:
+ static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
+ return NKikimrServices::TActivity::GRPC_REQ;
+ }
+
+ explicit TKikhouseDescribeTableRPC(TAutoPtr<TEvKikhouseDescribeTableRequest> request)
+ : TBase()
+ , Request(request)
+ , Timeout(TDuration::Seconds(DEFAULT_TIMEOUT_SEC))
+ , WaitingResolveReply(false)
+ , Finished(false)
+ {}
+
+ void Bootstrap(const NActors::TActorContext& ctx) {
+ ResolveTable(ctx);
+ }
+
+ void Die(const NActors::TActorContext& ctx) override {
+ Y_VERIFY(Finished);
+ Y_VERIFY(!WaitingResolveReply);
+
+ if (TimeoutTimerActorId) {
+ ctx.Send(TimeoutTimerActorId, new TEvents::TEvPoisonPill());
+ }
+
+ TBase::Die(ctx);
+ }
+
+private:
+ STFUNC(StateWaitResolveTable) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTablet::TEvLocalSchemeTxResponse, Handle);
+ HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
+ HFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void ResolveTable(const NActors::TActorContext& ctx) {
+ const TString table = Request->GetProtoRequest()->path();
+ auto path = ::NKikimr::SplitPath(table);
+ TMaybe<ui64> tabletId = TryParseLocalDbPath(path);
+ if (tabletId) {
+ if (Request->GetInternalToken().empty() || !IsSuperUser(NACLib::TUserToken(Request->GetInternalToken()), *AppData(ctx))) {
+ return ReplyWithError(Ydb::StatusIds::NOT_FOUND, "Invalid table path specified", ctx);
+ }
+
+ std::unique_ptr<TEvTablet::TEvLocalSchemeTx> ev(new TEvTablet::TEvLocalSchemeTx());
+ ctx.Send(MakePipePeNodeCacheID(true), new TEvPipeCache::TEvForward(ev.release(), *tabletId, true), IEventHandle::FlagTrackDelivery);
+
+ TBase::Become(&TThis::StateWaitResolveTable);
+ WaitingResolveReply = true;
+ } else {
+ TAutoPtr<NSchemeCache::TSchemeCacheNavigate> request(new NSchemeCache::TSchemeCacheNavigate());
+ NSchemeCache::TSchemeCacheNavigate::TEntry entry;
+ entry.Path = std::move(path);
+ if (entry.Path.empty()) {
+ return ReplyWithError(Ydb::StatusIds::NOT_FOUND, "Invalid table path specified", ctx);
+ }
+ entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
+ request->ResultSet.emplace_back(entry);
+ ctx.Send(MakeSchemeCacheID(), new TEvTxProxySchemeCache::TEvNavigateKeySet(request));
+
+ TimeoutTimerActorId = CreateLongTimer(ctx, Timeout,
+ new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
+
+ TBase::Become(&TThis::StateWaitResolveTable);
+ WaitingResolveReply = true;
+ }
+ }
+
+ void Handle(TEvPipeCache::TEvDeliveryProblem::TPtr& ev, const TActorContext& ctx) {
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Got TEvDeliveryProblem, TabletId: " << ev->Get()->TabletId
+ << ", NotDelivered: " << ev->Get()->NotDelivered);
+ return ReplyWithError(Ydb::StatusIds::UNAVAILABLE, "Invalid table path specified", ctx);
+ }
+
+ void HandleTimeout(const TActorContext& ctx) {
+ return ReplyWithError(Ydb::StatusIds::TIMEOUT, "Request timed out", ctx);
+ }
+
+ void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev, const TActorContext& ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ ResolveNamesResult = ev->Get()->Request;
+
+ return ProceedWithSchema(ctx);
+ }
+
+ void Handle(TEvTablet::TEvLocalSchemeTxResponse::TPtr &ev, const TActorContext &ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ ResolveNamesResult = new NSchemeCache::TSchemeCacheNavigate();
+ auto &record = ev->Get()->Record;
+
+ const TString table = Request->GetProtoRequest()->path();
+ auto path = ::NKikimr::SplitPath(table);
+ FillLocalDbTableSchema(*ResolveNamesResult, record.GetFullScheme(), path.back());
+ ResolveNamesResult->ResultSet.back().Path = path;
+
+ return ProceedWithSchema(ctx);
+ }
+
+ void ProceedWithSchema(const TActorContext& ctx) {
+ Y_VERIFY(ResolveNamesResult->ResultSet.size() == 1);
+ const auto& entry = ResolveNamesResult->ResultSet.front();
+
+ if (entry.Status != NSchemeCache::TSchemeCacheNavigate::EStatus::Ok) {
return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, ToString(entry.Status), ctx);
- }
-
- TString errorMessage;
- if (!CheckAccess(errorMessage)) {
- return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, errorMessage, ctx);
- }
-
- TVector<TString> keyColumns;
- for (const auto& col : entry.Columns) {
- auto* colMeta = Result.add_columns();
- colMeta->set_name(col.second.Name);
- colMeta->mutable_type()->mutable_optional_type()->mutable_item()->set_type_id((Ydb::Type::PrimitiveTypeId)col.second.PType);
- if (col.second.KeyOrder == -1)
- continue;
-
- keyColumns.resize(Max<size_t>(keyColumns.size(), col.second.KeyOrder + 1));
- keyColumns[col.second.KeyOrder] = col.second.Name;
-
- KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), col.second.KeyOrder + 1));
- KeyColumnTypes[col.second.KeyOrder] = col.second.PType;
- }
-
- for (TString k : keyColumns) {
- Result.add_primary_key(k);
- }
-
- if (!Request->GetProtoRequest()->include_partitions_info()) {
- return ReplySuccess(ctx);
- }
-
- ResolveShards(ctx);
- }
-
- bool CheckAccess(TString& errorMessage) {
- if (Request->GetInternalToken().empty())
- return true;
-
- NACLib::TUserToken userToken(Request->GetInternalToken());
-
- const ui32 access = NACLib::EAccessRights::DescribeSchema;
- for (const NSchemeCache::TSchemeCacheNavigate::TEntry& entry : ResolveNamesResult->ResultSet) {
- if (access != 0 && entry.SecurityObject != nullptr &&
- !entry.SecurityObject->CheckAccess(access, userToken))
- {
- TStringStream explanation;
- explanation << "Access denied for " << userToken.GetUserSID()
- << " with access " << NACLib::AccessRightsToString(access)
- << " to table [" << Request->GetProtoRequest()->path() << "]";
-
- errorMessage = explanation.Str();
- return false;
- }
- }
- return true;
- }
-
- void ResolveShards(const NActors::TActorContext& ctx) {
- auto& entry = ResolveNamesResult->ResultSet.front();
-
- if (entry.TableId.IsSystemView()) {
- // Add fake shard for sys view
- auto* p = Result.add_partitions();
- p->set_tablet_id(1);
- p->set_end_key("");
- p->set_end_key_inclusive(false);
-
- return ReplySuccess(ctx);
- } else if(TMaybe<ui64> tabletId = TryParseLocalDbPath(entry.Path)) {
- // Add fake shard for sys view
- auto* p = Result.add_partitions();
- p->set_tablet_id(*tabletId);
- p->set_end_key("");
- p->set_end_key_inclusive(false);
-
- return ReplySuccess(ctx);
- }
-
- // We are going to access all columns
- TVector<TKeyDesc::TColumnOp> columns;
- for (const auto& ci : entry.Columns) {
- TKeyDesc::TColumnOp op = { ci.second.Id, TKeyDesc::EColumnOperation::Read, ci.second.PType, 0, 0 };
- columns.push_back(op);
- }
-
- TVector<TCell> minusInf(KeyColumnTypes.size());
- TVector<TCell> plusInf;
- TTableRange range(minusInf, true, plusInf, true, false);
- KeyRange.Reset(new TKeyDesc(entry.TableId, range, TKeyDesc::ERowOperation::Read, KeyColumnTypes, columns));
-
- TAutoPtr<NSchemeCache::TSchemeCacheRequest> request(new NSchemeCache::TSchemeCacheRequest());
-
+ }
+
+ TString errorMessage;
+ if (!CheckAccess(errorMessage)) {
+ return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, errorMessage, ctx);
+ }
+
+ TVector<TString> keyColumns;
+ for (const auto& col : entry.Columns) {
+ auto* colMeta = Result.add_columns();
+ colMeta->set_name(col.second.Name);
+ colMeta->mutable_type()->mutable_optional_type()->mutable_item()->set_type_id((Ydb::Type::PrimitiveTypeId)col.second.PType);
+ if (col.second.KeyOrder == -1)
+ continue;
+
+ keyColumns.resize(Max<size_t>(keyColumns.size(), col.second.KeyOrder + 1));
+ keyColumns[col.second.KeyOrder] = col.second.Name;
+
+ KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), col.second.KeyOrder + 1));
+ KeyColumnTypes[col.second.KeyOrder] = col.second.PType;
+ }
+
+ for (TString k : keyColumns) {
+ Result.add_primary_key(k);
+ }
+
+ if (!Request->GetProtoRequest()->include_partitions_info()) {
+ return ReplySuccess(ctx);
+ }
+
+ ResolveShards(ctx);
+ }
+
+ bool CheckAccess(TString& errorMessage) {
+ if (Request->GetInternalToken().empty())
+ return true;
+
+ NACLib::TUserToken userToken(Request->GetInternalToken());
+
+ const ui32 access = NACLib::EAccessRights::DescribeSchema;
+ for (const NSchemeCache::TSchemeCacheNavigate::TEntry& entry : ResolveNamesResult->ResultSet) {
+ if (access != 0 && entry.SecurityObject != nullptr &&
+ !entry.SecurityObject->CheckAccess(access, userToken))
+ {
+ TStringStream explanation;
+ explanation << "Access denied for " << userToken.GetUserSID()
+ << " with access " << NACLib::AccessRightsToString(access)
+ << " to table [" << Request->GetProtoRequest()->path() << "]";
+
+ errorMessage = explanation.Str();
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void ResolveShards(const NActors::TActorContext& ctx) {
+ auto& entry = ResolveNamesResult->ResultSet.front();
+
+ if (entry.TableId.IsSystemView()) {
+ // Add fake shard for sys view
+ auto* p = Result.add_partitions();
+ p->set_tablet_id(1);
+ p->set_end_key("");
+ p->set_end_key_inclusive(false);
+
+ return ReplySuccess(ctx);
+ } else if(TMaybe<ui64> tabletId = TryParseLocalDbPath(entry.Path)) {
+ // Add fake shard for sys view
+ auto* p = Result.add_partitions();
+ p->set_tablet_id(*tabletId);
+ p->set_end_key("");
+ p->set_end_key_inclusive(false);
+
+ return ReplySuccess(ctx);
+ }
+
+ // We are going to access all columns
+ TVector<TKeyDesc::TColumnOp> columns;
+ for (const auto& ci : entry.Columns) {
+ TKeyDesc::TColumnOp op = { ci.second.Id, TKeyDesc::EColumnOperation::Read, ci.second.PType, 0, 0 };
+ columns.push_back(op);
+ }
+
+ TVector<TCell> minusInf(KeyColumnTypes.size());
+ TVector<TCell> plusInf;
+ TTableRange range(minusInf, true, plusInf, true, false);
+ KeyRange.Reset(new TKeyDesc(entry.TableId, range, TKeyDesc::ERowOperation::Read, KeyColumnTypes, columns));
+
+ TAutoPtr<NSchemeCache::TSchemeCacheRequest> request(new NSchemeCache::TSchemeCacheRequest());
+
request->ResultSet.emplace_back(std::move(KeyRange));
-
- TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> resolveReq(new TEvTxProxySchemeCache::TEvResolveKeySet(request));
- ctx.Send(MakeSchemeCacheID(), resolveReq.Release());
-
- TBase::Become(&TThis::StateWaitResolveShards);
- WaitingResolveReply = true;
- }
-
- STFUNC(StateWaitResolveShards) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTxProxySchemeCache::TEvResolveKeySetResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, const TActorContext &ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- TEvTxProxySchemeCache::TEvResolveKeySetResult *msg = ev->Get();
+
+ TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> resolveReq(new TEvTxProxySchemeCache::TEvResolveKeySet(request));
+ ctx.Send(MakeSchemeCacheID(), resolveReq.Release());
+
+ TBase::Become(&TThis::StateWaitResolveShards);
+ WaitingResolveReply = true;
+ }
+
+ STFUNC(StateWaitResolveShards) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTxProxySchemeCache::TEvResolveKeySetResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, const TActorContext &ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ TEvTxProxySchemeCache::TEvResolveKeySetResult *msg = ev->Get();
Y_VERIFY(msg->Request->ResultSet.size() == 1);
KeyRange = std::move(msg->Request->ResultSet[0].KeyDescription);
-
+
if (msg->Request->ErrorCount > 0) {
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Failed to get partitions for table [%s]",
- Request->GetProtoRequest()->path().c_str()), ctx);
- }
-
- auto getShardsString = [] (const TVector<TKeyDesc::TPartitionInfo>& partitions) {
- TVector<ui64> shards;
- shards.reserve(partitions.size());
- for (auto& partition : partitions) {
- shards.push_back(partition.ShardId);
- }
-
- return JoinVectorIntoString(shards, ", ");
- };
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Table [" << Request->GetProtoRequest()->path()
- << "] shards: " << getShardsString(KeyRange->Partitions));
-
- for (const TKeyDesc::TPartitionInfo& partition : KeyRange->Partitions) {
- auto* p = Result.add_partitions();
- p->set_tablet_id(partition.ShardId);
- p->set_end_key(partition.Range->EndKeyPrefix.GetBuffer());
- p->set_end_key_inclusive(partition.Range->IsInclusive);
- }
-
- return ReplySuccess(ctx);
- }
-
- void ReplySuccess(const NActors::TActorContext& ctx) {
- Finished = true;
- ReplyWithResult(Ydb::StatusIds::SUCCESS, Result, ctx);
- }
-
- void ReplyWithError(StatusIds::StatusCode status, const TString& message, const TActorContext& ctx) {
- Finished = true;
- Request->RaiseIssue(NYql::TIssue(message));
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Failed to get partitions for table [%s]",
+ Request->GetProtoRequest()->path().c_str()), ctx);
+ }
+
+ auto getShardsString = [] (const TVector<TKeyDesc::TPartitionInfo>& partitions) {
+ TVector<ui64> shards;
+ shards.reserve(partitions.size());
+ for (auto& partition : partitions) {
+ shards.push_back(partition.ShardId);
+ }
+
+ return JoinVectorIntoString(shards, ", ");
+ };
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Table [" << Request->GetProtoRequest()->path()
+ << "] shards: " << getShardsString(KeyRange->Partitions));
+
+ for (const TKeyDesc::TPartitionInfo& partition : KeyRange->Partitions) {
+ auto* p = Result.add_partitions();
+ p->set_tablet_id(partition.ShardId);
+ p->set_end_key(partition.Range->EndKeyPrefix.GetBuffer());
+ p->set_end_key_inclusive(partition.Range->IsInclusive);
+ }
+
+ return ReplySuccess(ctx);
+ }
+
+ void ReplySuccess(const NActors::TActorContext& ctx) {
+ Finished = true;
+ ReplyWithResult(Ydb::StatusIds::SUCCESS, Result, ctx);
+ }
+
+ void ReplyWithError(StatusIds::StatusCode status, const TString& message, const TActorContext& ctx) {
+ Finished = true;
+ Request->RaiseIssue(NYql::TIssue(message));
Request->ReplyWithYdbStatus(status);
-
- // We cannot Die() while scheme cache request is in flight because that request has pointer to
- // KeyRange member so we must not destroy it before we get the response
- if (!WaitingResolveReply) {
- Die(ctx);
- }
- }
-
- void ReplyWithResult(StatusIds::StatusCode status,
- const Ydb::ClickhouseInternal::DescribeTableResult& result,
- const TActorContext& ctx) {
- Request->SendResult(result, status);
-
- if (!WaitingResolveReply) {
- Die(ctx);
- }
- }
-};
-
-void TGRpcRequestProxy::Handle(TEvKikhouseDescribeTableRequest::TPtr& ev, const TActorContext& ctx) {
- ctx.Register(new TKikhouseDescribeTableRPC(ev->Release().Release()));
-}
-
-} // namespace NKikimr
-} // namespace NGRpcService
+
+ // We cannot Die() while scheme cache request is in flight because that request has pointer to
+ // KeyRange member so we must not destroy it before we get the response
+ if (!WaitingResolveReply) {
+ Die(ctx);
+ }
+ }
+
+ void ReplyWithResult(StatusIds::StatusCode status,
+ const Ydb::ClickhouseInternal::DescribeTableResult& result,
+ const TActorContext& ctx) {
+ Request->SendResult(result, status);
+
+ if (!WaitingResolveReply) {
+ Die(ctx);
+ }
+ }
+};
+
+void TGRpcRequestProxy::Handle(TEvKikhouseDescribeTableRequest::TPtr& ev, const TActorContext& ctx) {
+ ctx.Register(new TKikhouseDescribeTableRPC(ev->Release().Release()));
+}
+
+} // namespace NKikimr
+} // namespace NGRpcService
diff --git a/ydb/core/grpc_services/rpc_kh_snapshots.cpp b/ydb/core/grpc_services/rpc_kh_snapshots.cpp
index a5e40261a46..c390c81faeb 100644
--- a/ydb/core/grpc_services/rpc_kh_snapshots.cpp
+++ b/ydb/core/grpc_services/rpc_kh_snapshots.cpp
@@ -1,5 +1,5 @@
#include "rpc_kh_snapshots.h"
-#include "resolve_local_db_table.h"
+#include "resolve_local_db_table.h"
#include "rpc_calls.h"
#include "rpc_common.h"
@@ -105,9 +105,9 @@ public:
auto* tx = req->Record.MutableTransaction()->MutableCreateVolatileSnapshot();
for (const TString& path : proto->path()) {
- if (proto->ignore_system_views() && TryParseLocalDbPath(::NKikimr::SplitPath(path))) {
- continue;
- }
+ if (proto->ignore_system_views() && TryParseLocalDbPath(::NKikimr::SplitPath(path))) {
+ continue;
+ }
tx->AddTables()->SetTablePath(path);
}
tx->SetTimeoutMs(SnapshotTimeout.MilliSeconds());
@@ -241,9 +241,9 @@ public:
auto* tx = req->Record.MutableTransaction()->MutableRefreshVolatileSnapshot();
for (const TString& path : proto->path()) {
- if (proto->ignore_system_views() && TryParseLocalDbPath(::NKikimr::SplitPath(path))) {
- continue;
- }
+ if (proto->ignore_system_views() && TryParseLocalDbPath(::NKikimr::SplitPath(path))) {
+ continue;
+ }
tx->AddTables()->SetTablePath(path);
}
tx->SetSnapshotStep(SnapshotId.Step);
@@ -382,9 +382,9 @@ public:
auto* tx = req->Record.MutableTransaction()->MutableDiscardVolatileSnapshot();
for (const TString& path : proto->path()) {
- if (proto->ignore_system_views() && TryParseLocalDbPath(::NKikimr::SplitPath(path))) {
- continue;
- }
+ if (proto->ignore_system_views() && TryParseLocalDbPath(::NKikimr::SplitPath(path))) {
+ continue;
+ }
tx->AddTables()->SetTablePath(path);
}
tx->SetSnapshotStep(SnapshotId.Step);
diff --git a/ydb/core/grpc_services/rpc_load_rows.cpp b/ydb/core/grpc_services/rpc_load_rows.cpp
index 15383a5e78a..127347e067e 100644
--- a/ydb/core/grpc_services/rpc_load_rows.cpp
+++ b/ydb/core/grpc_services/rpc_load_rows.cpp
@@ -1,8 +1,8 @@
-#include "grpc_request_proxy.h"
-
-#include "rpc_calls.h"
-#include "rpc_common.h"
-
+#include "grpc_request_proxy.h"
+
+#include "rpc_calls.h"
+#include "rpc_common.h"
+
#include <ydb/core/tx/tx_proxy/upload_rows_common_impl.h>
#include <ydb/library/yql/public/udf/udf_types.h>
#include <ydb/library/yql/minikql/dom/yson.h>
@@ -13,86 +13,86 @@
#include <ydb/library/binary_json/write.h>
#include <ydb/library/dynumber/dynumber.h>
-#include <util/string/vector.h>
+#include <util/string/vector.h>
#include <util/generic/size_literals.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-using namespace NActors;
-using namespace Ydb;
-
+
+namespace NKikimr {
+namespace NGRpcService {
+
+using namespace NActors;
+using namespace Ydb;
+
namespace {
-bool CheckValueData(NScheme::TTypeId type, const TCell& cell, TString& err) {
- bool ok = true;
- switch (type) {
- case NScheme::NTypeIds::Bool:
- case NScheme::NTypeIds::Int8:
- case NScheme::NTypeIds::Uint8:
- case NScheme::NTypeIds::Int16:
- case NScheme::NTypeIds::Uint16:
- case NScheme::NTypeIds::Int32:
- case NScheme::NTypeIds::Uint32:
- case NScheme::NTypeIds::Int64:
- case NScheme::NTypeIds::Uint64:
- case NScheme::NTypeIds::Float:
- case NScheme::NTypeIds::Double:
- case NScheme::NTypeIds::String:
- break;
-
- case NScheme::NTypeIds::Decimal:
- ok = !NYql::NDecimal::IsError(cell.AsValue<NYql::NDecimal::TInt128>());
- break;
-
- case NScheme::NTypeIds::Date:
- ok = cell.AsValue<ui16>() < NUdf::MAX_DATE;
- break;
-
- case NScheme::NTypeIds::Datetime:
- ok = cell.AsValue<ui32>() < NUdf::MAX_DATETIME;
- break;
-
- case NScheme::NTypeIds::Timestamp:
- ok = cell.AsValue<ui64>() < NUdf::MAX_TIMESTAMP;
- break;
-
- case NScheme::NTypeIds::Interval:
- ok = (ui64)std::abs(cell.AsValue<i64>()) < NUdf::MAX_TIMESTAMP;
- break;
-
- case NScheme::NTypeIds::Utf8:
- ok = NYql::IsUtf8(cell.AsBuf());
- break;
-
- case NScheme::NTypeIds::Yson:
- ok = NYql::NDom::IsValidYson(cell.AsBuf());
- break;
-
- case NScheme::NTypeIds::Json:
- ok = NYql::NDom::IsValidJson(cell.AsBuf());
- break;
-
- case NScheme::NTypeIds::JsonDocument:
- // JsonDocument value was verified at parsing time
- break;
-
- case NScheme::NTypeIds::DyNumber:
- // DyNumber value was verified at parsing time
- break;
-
- default:
- err = Sprintf("Unexpected type %d", type);
- return false;
- }
-
- if (!ok) {
- err = Sprintf("Invalid %s value", NScheme::TypeName(type));
- }
-
- return ok;
-}
-
+bool CheckValueData(NScheme::TTypeId type, const TCell& cell, TString& err) {
+ bool ok = true;
+ switch (type) {
+ case NScheme::NTypeIds::Bool:
+ case NScheme::NTypeIds::Int8:
+ case NScheme::NTypeIds::Uint8:
+ case NScheme::NTypeIds::Int16:
+ case NScheme::NTypeIds::Uint16:
+ case NScheme::NTypeIds::Int32:
+ case NScheme::NTypeIds::Uint32:
+ case NScheme::NTypeIds::Int64:
+ case NScheme::NTypeIds::Uint64:
+ case NScheme::NTypeIds::Float:
+ case NScheme::NTypeIds::Double:
+ case NScheme::NTypeIds::String:
+ break;
+
+ case NScheme::NTypeIds::Decimal:
+ ok = !NYql::NDecimal::IsError(cell.AsValue<NYql::NDecimal::TInt128>());
+ break;
+
+ case NScheme::NTypeIds::Date:
+ ok = cell.AsValue<ui16>() < NUdf::MAX_DATE;
+ break;
+
+ case NScheme::NTypeIds::Datetime:
+ ok = cell.AsValue<ui32>() < NUdf::MAX_DATETIME;
+ break;
+
+ case NScheme::NTypeIds::Timestamp:
+ ok = cell.AsValue<ui64>() < NUdf::MAX_TIMESTAMP;
+ break;
+
+ case NScheme::NTypeIds::Interval:
+ ok = (ui64)std::abs(cell.AsValue<i64>()) < NUdf::MAX_TIMESTAMP;
+ break;
+
+ case NScheme::NTypeIds::Utf8:
+ ok = NYql::IsUtf8(cell.AsBuf());
+ break;
+
+ case NScheme::NTypeIds::Yson:
+ ok = NYql::NDom::IsValidYson(cell.AsBuf());
+ break;
+
+ case NScheme::NTypeIds::Json:
+ ok = NYql::NDom::IsValidJson(cell.AsBuf());
+ break;
+
+ case NScheme::NTypeIds::JsonDocument:
+ // JsonDocument value was verified at parsing time
+ break;
+
+ case NScheme::NTypeIds::DyNumber:
+ // DyNumber value was verified at parsing time
+ break;
+
+ default:
+ err = Sprintf("Unexpected type %d", type);
+ return false;
+ }
+
+ if (!ok) {
+ err = Sprintf("Invalid %s value", NScheme::TypeName(type));
+ }
+
+ return ok;
+}
+
// TODO: no mapping for DATE, DATETIME, TZ_*, YSON, JSON, UUID, JSON_DOCUMENT, DYNUMBER
bool ConvertArrowToYdbPrimitive(const arrow::DataType& type, Ydb::Type& toType) {
switch (type.id()) {
@@ -173,58 +173,58 @@ bool ConvertArrowToYdbPrimitive(const arrow::DataType& type, Ydb::Type& toType)
}
-class TUploadRowsRPCPublic : public NTxProxy::TUploadRowsBase<NKikimrServices::TActivity::GRPC_REQ> {
- using TBase = NTxProxy::TUploadRowsBase<NKikimrServices::TActivity::GRPC_REQ>;
-public:
+class TUploadRowsRPCPublic : public NTxProxy::TUploadRowsBase<NKikimrServices::TActivity::GRPC_REQ> {
+ using TBase = NTxProxy::TUploadRowsBase<NKikimrServices::TActivity::GRPC_REQ>;
+public:
explicit TUploadRowsRPCPublic(TAutoPtr<TEvBulkUpsertRequest> request)
- : TBase(GetDuration(request->GetProtoRequest()->operation_params().operation_timeout()))
- , Request(request)
- {}
-
-private:
- static bool CellFromProtoVal(NScheme::TTypeId type, const Ydb::Value* vp,
- TCell& c, TString& err, TMemoryPool& valueDataPool)
- {
- if (vp->Hasnull_flag_value()) {
- c = TCell();
- return true;
- }
-
- if (vp->Hasnested_value()) {
- vp = &vp->Getnested_value();
- }
-
- const Ydb::Value& val = *vp;
-
-#define EXTRACT_VAL(cellType, protoType, cppType) \
- case NScheme::NTypeIds::cellType : { \
- cppType v = val.Get##protoType##_value(); \
- c = TCell((const char*)&v, sizeof(v)); \
- break; \
- }
-
- switch (type) {
- EXTRACT_VAL(Bool, bool, ui8);
- EXTRACT_VAL(Int8, int32, i8);
- EXTRACT_VAL(Uint8, uint32, ui8);
- EXTRACT_VAL(Int16, int32, i16);
- EXTRACT_VAL(Uint16, uint32, ui16);
- EXTRACT_VAL(Int32, int32, i32);
- EXTRACT_VAL(Uint32, uint32, ui32);
- EXTRACT_VAL(Int64, int64, i64);
- EXTRACT_VAL(Uint64, uint64, ui64);
- EXTRACT_VAL(Float, float, float);
- EXTRACT_VAL(Double, double, double);
- EXTRACT_VAL(Date, uint32, ui16);
- EXTRACT_VAL(Datetime, uint32, ui32);
- EXTRACT_VAL(Timestamp, uint64, ui64);
- EXTRACT_VAL(Interval, int64, i64);
- case NScheme::NTypeIds::Json :
- case NScheme::NTypeIds::Utf8 : {
- TString v = val.Gettext_value();
- c = TCell(v.data(), v.size());
- break;
- }
+ : TBase(GetDuration(request->GetProtoRequest()->operation_params().operation_timeout()))
+ , Request(request)
+ {}
+
+private:
+ static bool CellFromProtoVal(NScheme::TTypeId type, const Ydb::Value* vp,
+ TCell& c, TString& err, TMemoryPool& valueDataPool)
+ {
+ if (vp->Hasnull_flag_value()) {
+ c = TCell();
+ return true;
+ }
+
+ if (vp->Hasnested_value()) {
+ vp = &vp->Getnested_value();
+ }
+
+ const Ydb::Value& val = *vp;
+
+#define EXTRACT_VAL(cellType, protoType, cppType) \
+ case NScheme::NTypeIds::cellType : { \
+ cppType v = val.Get##protoType##_value(); \
+ c = TCell((const char*)&v, sizeof(v)); \
+ break; \
+ }
+
+ switch (type) {
+ EXTRACT_VAL(Bool, bool, ui8);
+ EXTRACT_VAL(Int8, int32, i8);
+ EXTRACT_VAL(Uint8, uint32, ui8);
+ EXTRACT_VAL(Int16, int32, i16);
+ EXTRACT_VAL(Uint16, uint32, ui16);
+ EXTRACT_VAL(Int32, int32, i32);
+ EXTRACT_VAL(Uint32, uint32, ui32);
+ EXTRACT_VAL(Int64, int64, i64);
+ EXTRACT_VAL(Uint64, uint64, ui64);
+ EXTRACT_VAL(Float, float, float);
+ EXTRACT_VAL(Double, double, double);
+ EXTRACT_VAL(Date, uint32, ui16);
+ EXTRACT_VAL(Datetime, uint32, ui32);
+ EXTRACT_VAL(Timestamp, uint64, ui64);
+ EXTRACT_VAL(Interval, int64, i64);
+ case NScheme::NTypeIds::Json :
+ case NScheme::NTypeIds::Utf8 : {
+ TString v = val.Gettext_value();
+ c = TCell(v.data(), v.size());
+ break;
+ }
case NScheme::NTypeIds::JsonDocument : {
const auto binaryJson = NBinaryJson::SerializeToBinaryJson(val.Gettext_value());
if (!binaryJson.Defined()) {
@@ -245,62 +245,62 @@ private:
c = TCell(dyNumberInPool.data(), dyNumberInPool.size());
break;
}
- case NScheme::NTypeIds::Yson :
- case NScheme::NTypeIds::String : {
- TString v = val.Getbytes_value();
- c = TCell(v.data(), v.size());
- break;
- }
- case NScheme::NTypeIds::Decimal : {
- std::pair<ui64,ui64>& decimalVal = *valueDataPool.Allocate<std::pair<ui64,ui64> >();
- decimalVal.first = val.low_128();
- decimalVal.second = val.high_128();
- c = TCell((const char*)&decimalVal, sizeof(decimalVal));
- break;
- }
- default:
- err = Sprintf("Unexpected type %d", type);
- return false;
- };
-
- return CheckValueData(type, c, err);
- }
-
- template <class TProto>
- static bool FillCellsFromProto(TVector<TCell>& cells, const TVector<TFieldDescription>& descr, const TProto& proto,
- TString& err, TMemoryPool& valueDataPool)
- {
- cells.clear();
- cells.reserve(descr.size());
-
- for (auto& fd : descr) {
- if (proto.items_size() <= (int)fd.PositionInStruct) {
- err = "Invalid request";
- return false;
- }
- cells.push_back({});
- if (!CellFromProtoVal(fd.Type, &proto.Getitems(fd.PositionInStruct), cells.back(), err, valueDataPool)) {
- return false;
- }
+ case NScheme::NTypeIds::Yson :
+ case NScheme::NTypeIds::String : {
+ TString v = val.Getbytes_value();
+ c = TCell(v.data(), v.size());
+ break;
+ }
+ case NScheme::NTypeIds::Decimal : {
+ std::pair<ui64,ui64>& decimalVal = *valueDataPool.Allocate<std::pair<ui64,ui64> >();
+ decimalVal.first = val.low_128();
+ decimalVal.second = val.high_128();
+ c = TCell((const char*)&decimalVal, sizeof(decimalVal));
+ break;
+ }
+ default:
+ err = Sprintf("Unexpected type %d", type);
+ return false;
+ };
+
+ return CheckValueData(type, c, err);
+ }
+
+ template <class TProto>
+ static bool FillCellsFromProto(TVector<TCell>& cells, const TVector<TFieldDescription>& descr, const TProto& proto,
+ TString& err, TMemoryPool& valueDataPool)
+ {
+ cells.clear();
+ cells.reserve(descr.size());
+
+ for (auto& fd : descr) {
+ if (proto.items_size() <= (int)fd.PositionInStruct) {
+ err = "Invalid request";
+ return false;
+ }
+ cells.push_back({});
+ if (!CellFromProtoVal(fd.Type, &proto.Getitems(fd.PositionInStruct), cells.back(), err, valueDataPool)) {
+ return false;
+ }
if (fd.NotNull && cells.back().IsNull()) {
err = TStringBuilder() << "Received NULL value for not null column: " << fd.ColName;
return false;
}
- }
-
- return true;
- }
-
+ }
+
+ return true;
+ }
+
private:
bool ReportCostInfoEnabled() const {
return Request->GetProtoRequest()->operation_params().report_cost_info() == Ydb::FeatureFlag::ENABLED;
}
- TString GetDatabase()override {
- return Request->GetDatabaseName().GetOrElse(DatabaseFromDomain(AppData()));
- }
-
+ TString GetDatabase()override {
+ return Request->GetDatabaseName().GetOrElse(DatabaseFromDomain(AppData()));
+ }
+
const TString& GetTable() override {
return Request->GetProtoRequest()->table();
}
@@ -331,25 +331,25 @@ private:
NACLib::TUserToken userToken(Request->GetInternalToken());
const ui32 access = NACLib::EAccessRights::UpdateRow;
- auto resolveResult = GetResolveNameResult();
- if (!resolveResult) {
+ auto resolveResult = GetResolveNameResult();
+ if (!resolveResult) {
TStringStream explanation;
explanation << "Access denied for " << userToken.GetUserSID()
- << " table '" << Request->GetProtoRequest()->table()
- << "' has not been resolved yet";
+ << " table '" << Request->GetProtoRequest()->table()
+ << "' has not been resolved yet";
errorMessage = explanation.Str();
return false;
}
- for (const NSchemeCache::TSchemeCacheNavigate::TEntry& entry : resolveResult->ResultSet) {
- if (entry.Status == NSchemeCache::TSchemeCacheNavigate::EStatus::Ok
- && entry.SecurityObject != nullptr
- && !entry.SecurityObject->CheckAccess(access, userToken))
+ for (const NSchemeCache::TSchemeCacheNavigate::TEntry& entry : resolveResult->ResultSet) {
+ if (entry.Status == NSchemeCache::TSchemeCacheNavigate::EStatus::Ok
+ && entry.SecurityObject != nullptr
+ && !entry.SecurityObject->CheckAccess(access, userToken))
{
TStringStream explanation;
explanation << "Access denied for " << userToken.GetUserSID()
<< " with access " << NACLib::AccessRightsToString(access)
- << " to table '" << Request->GetProtoRequest()->table() << "'";
+ << " to table '" << Request->GetProtoRequest()->table() << "'";
errorMessage = explanation.Str();
return false;
@@ -378,34 +378,34 @@ private:
}
bool ExtractRows(TString& errorMessage) override {
- // Parse type field
- // Check that it is a list of stuct
- // List all memebers and check their names and types
- // Save indexes of key column members and no-key members
-
- TVector<TCell> keyCells;
- TVector<TCell> valueCells;
+ // Parse type field
+ // Check that it is a list of stuct
+ // List all memebers and check their names and types
+ // Save indexes of key column members and no-key members
+
+ TVector<TCell> keyCells;
+ TVector<TCell> valueCells;
float cost = 0.0f;
-
- // TODO: check that value is a list of structs
-
- // For each row in values
- TMemoryPool valueDataPool(256);
- const auto& rows = Request->GetProtoRequest()->Getrows().Getvalue().Getitems();
- for (const auto& r : rows) {
- valueDataPool.Clear();
-
+
+ // TODO: check that value is a list of structs
+
+ // For each row in values
+ TMemoryPool valueDataPool(256);
+ const auto& rows = Request->GetProtoRequest()->Getrows().Getvalue().Getitems();
+ for (const auto& r : rows) {
+ valueDataPool.Clear();
+
ui64 sz = 0;
- // Take members corresponding to key columns
- if (!FillCellsFromProto(keyCells, KeyColumnPositions, r, errorMessage, valueDataPool)) {
- return false;
- }
-
- // Fill rest of cells with non-key column members
- if (!FillCellsFromProto(valueCells, ValueColumnPositions, r, errorMessage, valueDataPool)) {
- return false;
- }
-
+ // Take members corresponding to key columns
+ if (!FillCellsFromProto(keyCells, KeyColumnPositions, r, errorMessage, valueDataPool)) {
+ return false;
+ }
+
+ // Fill rest of cells with non-key column members
+ if (!FillCellsFromProto(valueCells, ValueColumnPositions, r, errorMessage, valueDataPool)) {
+ return false;
+ }
+
for (const auto& cell : keyCells) {
sz += cell.Size();
}
@@ -416,15 +416,15 @@ private:
cost += TUpsertCost::OneRowCost(sz);
- // Save serialized key and value
- TSerializedCellVec serializedKey(TSerializedCellVec::Serialize(keyCells));
- TString serializedValue = TSerializedCellVec::Serialize(valueCells);
- AllRows.emplace_back(serializedKey, serializedValue);
- }
-
+ // Save serialized key and value
+ TSerializedCellVec serializedKey(TSerializedCellVec::Serialize(keyCells));
+ TString serializedValue = TSerializedCellVec::Serialize(valueCells);
+ AllRows.emplace_back(serializedKey, serializedValue);
+ }
+
RuCost = TUpsertCost::CostToRu(cost);
- return true;
- }
+ return true;
+ }
bool ExtractBatch(TString& errorMessage) override {
Batch = RowsToBatch(AllRows, errorMessage);
@@ -434,8 +434,8 @@ private:
private:
TAutoPtr<TEvBulkUpsertRequest> Request;
TVector<std::pair<TSerializedCellVec, TString>> AllRows;
-};
-
+};
+
class TUploadColumnsRPCPublic : public NTxProxy::TUploadRowsBase<NKikimrServices::TActivity::GRPC_REQ> {
using TBase = NTxProxy::TUploadRowsBase<NKikimrServices::TActivity::GRPC_REQ>;
public:
@@ -654,7 +654,7 @@ private:
}
};
-void TGRpcRequestProxy::Handle(TEvBulkUpsertRequest::TPtr& ev, const TActorContext& ctx) {
+void TGRpcRequestProxy::Handle(TEvBulkUpsertRequest::TPtr& ev, const TActorContext& ctx) {
auto* req = ev->Get()->GetProtoRequest();
if (req->has_arrow_batch_settings()) {
ctx.Register(new TUploadColumnsRPCPublic(ev->Release().Release()));
@@ -663,7 +663,7 @@ void TGRpcRequestProxy::Handle(TEvBulkUpsertRequest::TPtr& ev, const TActorConte
} else {
ctx.Register(new TUploadRowsRPCPublic(ev->Release().Release()));
}
-}
-
-} // namespace NKikimr
-} // namespace NGRpcService
+}
+
+} // namespace NKikimr
+} // namespace NGRpcService
diff --git a/ydb/core/grpc_services/rpc_log_store.cpp b/ydb/core/grpc_services/rpc_log_store.cpp
index 1a4aaca0e3f..d77f7c7cc01 100644
--- a/ydb/core/grpc_services/rpc_log_store.cpp
+++ b/ydb/core/grpc_services/rpc_log_store.cpp
@@ -1,22 +1,22 @@
#include "service_logstore.h"
-#include "rpc_common.h"
-#include "rpc_scheme_base.h"
-
+#include "rpc_common.h"
+#include "rpc_scheme_base.h"
+
#include <ydb/core/ydb_convert/table_description.h>
#include <ydb/core/ydb_convert/ydb_convert.h>
#include <ydb/core/ydb_convert/table_settings.h>
#include <ydb/core/scheme/scheme_type_id.h>
#include <ydb/library/mkql_proto/mkql_proto.h>
-
+
#include <ydb/core/grpc_services/base/base.h>
#include <ydb/public/api/grpc/draft/ydb_logstore_v1.pb.h>
-namespace NKikimr {
-namespace NGRpcService {
-
-using namespace NActors;
-using namespace Ydb;
-
+namespace NKikimr {
+namespace NGRpcService {
+
+using namespace NActors;
+using namespace Ydb;
+
using TEvCreateLogStoreRequest =
TGrpcRequestOperationCall<Ydb::LogStore::CreateLogStoreRequest, Ydb::LogStore::CreateLogStoreResponse>;
using TEvDescribeLogStoreRequest =
@@ -33,512 +33,512 @@ using TEvAlterLogTableRequest =
TGrpcRequestOperationCall<Ydb::LogStore::AlterLogTableRequest, Ydb::LogStore::AlterLogTableResponse>;
bool ConvertSchemaFromPublicToInternal(const Ydb::LogStore::Schema& from, NKikimrSchemeOp::TColumnTableSchema& to,
- Ydb::StatusIds::StatusCode& status, TString& error)
-{
- to.MutableKeyColumnNames()->CopyFrom(from.primary_key());
- for (const auto& column : from.columns()) {
- auto* col = to.AddColumns();
- col->SetName(column.name());
- ui32 typeId;
- if (!ExtractColumnTypeId(typeId, column.type(), status, error)) {
- return false;
- }
- auto typeName = NScheme::TypeName(typeId);
- col->SetType(typeName);
- }
+ Ydb::StatusIds::StatusCode& status, TString& error)
+{
+ to.MutableKeyColumnNames()->CopyFrom(from.primary_key());
+ for (const auto& column : from.columns()) {
+ auto* col = to.AddColumns();
+ col->SetName(column.name());
+ ui32 typeId;
+ if (!ExtractColumnTypeId(typeId, column.type(), status, error)) {
+ return false;
+ }
+ auto typeName = NScheme::TypeName(typeId);
+ col->SetType(typeName);
+ }
to.SetEngine(NKikimrSchemeOp::COLUMN_ENGINE_REPLACING_TIMESERIES);
- return true;
-}
-
+ return true;
+}
+
bool ConvertSchemaFromInternalToPublic(const NKikimrSchemeOp::TColumnTableSchema& from, Ydb::LogStore::Schema& to,
- Ydb::StatusIds::StatusCode& status, TString& error)
-{
+ Ydb::StatusIds::StatusCode& status, TString& error)
+{
if (from.GetEngine() != NKikimrSchemeOp::COLUMN_ENGINE_REPLACING_TIMESERIES) {
- status = Ydb::StatusIds::INTERNAL_ERROR;
+ status = Ydb::StatusIds::INTERNAL_ERROR;
error = TStringBuilder() << "Unexpected table engine: " << NKikimrSchemeOp::EColumnTableEngine_Name(from.GetEngine());
- return false;
- }
- to.mutable_primary_key()->CopyFrom(from.GetKeyColumnNames());
- for (const auto& column : from.GetColumns()) {
- auto* col = to.add_columns();
- col->set_name(column.GetName());
- ui32 typeId = column.GetTypeId();
- auto& item = *col->mutable_type()->mutable_optional_type()->mutable_item();
- if (typeId == NYql::NProto::TypeIds::Decimal) {
- auto typeParams = item.mutable_decimal_type();
- typeParams->set_precision(22);
- typeParams->set_scale(9);
- } else {
- try {
- NMiniKQL::ExportPrimitiveTypeToProto(typeId, item);
- } catch (...) {
- status = Ydb::StatusIds::INTERNAL_ERROR;
- error = TStringBuilder() << "Unexpected type for column '" << column.GetName() << "': " << column.GetType();
- return false;
- }
- }
- }
- return true;
-}
-
-
-class TCreateLogStoreRPC : public TRpcSchemeRequestActor<TCreateLogStoreRPC, TEvCreateLogStoreRequest> {
- using TBase = TRpcSchemeRequestActor<TCreateLogStoreRPC, TEvCreateLogStoreRequest>;
-
-public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::GRPC_REQ;
- }
-
+ return false;
+ }
+ to.mutable_primary_key()->CopyFrom(from.GetKeyColumnNames());
+ for (const auto& column : from.GetColumns()) {
+ auto* col = to.add_columns();
+ col->set_name(column.GetName());
+ ui32 typeId = column.GetTypeId();
+ auto& item = *col->mutable_type()->mutable_optional_type()->mutable_item();
+ if (typeId == NYql::NProto::TypeIds::Decimal) {
+ auto typeParams = item.mutable_decimal_type();
+ typeParams->set_precision(22);
+ typeParams->set_scale(9);
+ } else {
+ try {
+ NMiniKQL::ExportPrimitiveTypeToProto(typeId, item);
+ } catch (...) {
+ status = Ydb::StatusIds::INTERNAL_ERROR;
+ error = TStringBuilder() << "Unexpected type for column '" << column.GetName() << "': " << column.GetType();
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+class TCreateLogStoreRPC : public TRpcSchemeRequestActor<TCreateLogStoreRPC, TEvCreateLogStoreRequest> {
+ using TBase = TRpcSchemeRequestActor<TCreateLogStoreRPC, TEvCreateLogStoreRequest>;
+
+public:
+ static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
+ return NKikimrServices::TActivity::GRPC_REQ;
+ }
+
explicit TCreateLogStoreRPC(IRequestOpCtx* request)
: TBase(request)
- {}
-
- void Bootstrap(const TActorContext &ctx) {
- TBase::Bootstrap(ctx);
-
- SendProposeRequest(ctx);
- Become(&TCreateLogStoreRPC::StateWork);
- }
-
-private:
- void SendProposeRequest(const TActorContext &ctx) {
- const auto req = GetProtoRequest();
- std::pair<TString, TString> destinationPathPair;
- try {
- destinationPathPair = SplitPath(req->path());
- } catch (const std::exception& ex) {
- Request_->RaiseIssue(NYql::ExceptionToIssue(ex));
- return Reply(StatusIds::BAD_REQUEST, "Invalid path: " + req->path(), NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
-
- const auto& workingDir = destinationPathPair.first;
- const auto& name = destinationPathPair.second;
-
- Ydb::StatusIds::StatusCode status;
- TString error;
-
- std::unique_ptr<TEvTxUserProxy::TEvProposeTransaction> proposeRequest = CreateProposeTransaction();
- NKikimrTxUserProxy::TEvProposeTransaction& record = proposeRequest->Record;
+ {}
+
+ void Bootstrap(const TActorContext &ctx) {
+ TBase::Bootstrap(ctx);
+
+ SendProposeRequest(ctx);
+ Become(&TCreateLogStoreRPC::StateWork);
+ }
+
+private:
+ void SendProposeRequest(const TActorContext &ctx) {
+ const auto req = GetProtoRequest();
+ std::pair<TString, TString> destinationPathPair;
+ try {
+ destinationPathPair = SplitPath(req->path());
+ } catch (const std::exception& ex) {
+ Request_->RaiseIssue(NYql::ExceptionToIssue(ex));
+ return Reply(StatusIds::BAD_REQUEST, "Invalid path: " + req->path(), NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+
+ const auto& workingDir = destinationPathPair.first;
+ const auto& name = destinationPathPair.second;
+
+ Ydb::StatusIds::StatusCode status;
+ TString error;
+
+ std::unique_ptr<TEvTxUserProxy::TEvProposeTransaction> proposeRequest = CreateProposeTransaction();
+ NKikimrTxUserProxy::TEvProposeTransaction& record = proposeRequest->Record;
NKikimrSchemeOp::TModifyScheme* modifyScheme = record.MutableTransaction()->MutableModifyScheme();
- modifyScheme->SetWorkingDir(workingDir);
+ modifyScheme->SetWorkingDir(workingDir);
modifyScheme->SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpCreateColumnStore);
auto create = modifyScheme->MutableCreateColumnStore();
- create->SetName(name);
- create->SetColumnShardCount(req->column_shard_count());
- for (const auto& schemaPreset : req->schema_presets()) {
- auto* toSchemaPreset = create->AddSchemaPresets();
- toSchemaPreset->SetName(schemaPreset.name());
- if (!ConvertSchemaFromPublicToInternal(schemaPreset.schema(), *toSchemaPreset->MutableSchema(), status, error)) {
- LOG_DEBUG(ctx, NKikimrServices::GRPC_SERVER, "LogStore schema error: %s", error.c_str());
- return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
- }
- ctx.Send(MakeTxProxyID(), proposeRequest.release());
- }
-};
-
-class TDescribeLogStoreRPC : public TRpcSchemeRequestActor<TDescribeLogStoreRPC, TEvDescribeLogStoreRequest> {
- using TBase = TRpcSchemeRequestActor<TDescribeLogStoreRPC, TEvDescribeLogStoreRequest>;
-
-public:
+ create->SetName(name);
+ create->SetColumnShardCount(req->column_shard_count());
+ for (const auto& schemaPreset : req->schema_presets()) {
+ auto* toSchemaPreset = create->AddSchemaPresets();
+ toSchemaPreset->SetName(schemaPreset.name());
+ if (!ConvertSchemaFromPublicToInternal(schemaPreset.schema(), *toSchemaPreset->MutableSchema(), status, error)) {
+ LOG_DEBUG(ctx, NKikimrServices::GRPC_SERVER, "LogStore schema error: %s", error.c_str());
+ return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+ }
+ ctx.Send(MakeTxProxyID(), proposeRequest.release());
+ }
+};
+
+class TDescribeLogStoreRPC : public TRpcSchemeRequestActor<TDescribeLogStoreRPC, TEvDescribeLogStoreRequest> {
+ using TBase = TRpcSchemeRequestActor<TDescribeLogStoreRPC, TEvDescribeLogStoreRequest>;
+
+public:
TDescribeLogStoreRPC(IRequestOpCtx* request)
: TBase(request) {}
-
- void Bootstrap(const TActorContext &ctx) {
- TBase::Bootstrap(ctx);
-
- SendProposeRequest(ctx);
- Become(&TDescribeLogStoreRPC::StateWork);
- }
-
-private:
- void StateWork(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx) {
- switch (ev->GetTypeRewrite()) {
+
+ void Bootstrap(const TActorContext &ctx) {
+ TBase::Bootstrap(ctx);
+
+ SendProposeRequest(ctx);
+ Become(&TDescribeLogStoreRPC::StateWork);
+ }
+
+private:
+ void StateWork(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx) {
+ switch (ev->GetTypeRewrite()) {
HFunc(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult, Handle);
- default: TBase::StateWork(ev, ctx);
- }
- }
-
+ default: TBase::StateWork(ev, ctx);
+ }
+ }
+
void Handle(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr& ev, const TActorContext& ctx) {
- const auto& record = ev->Get()->GetRecord();
- const auto status = record.GetStatus();
- if (record.HasReason()) {
- auto issue = NYql::TIssue(record.GetReason());
- Request_->RaiseIssue(issue);
- }
- Ydb::LogStore::DescribeLogStoreResult describeLogStoreResult;
- switch (status) {
+ const auto& record = ev->Get()->GetRecord();
+ const auto status = record.GetStatus();
+ if (record.HasReason()) {
+ auto issue = NYql::TIssue(record.GetReason());
+ Request_->RaiseIssue(issue);
+ }
+ Ydb::LogStore::DescribeLogStoreResult describeLogStoreResult;
+ switch (status) {
case NKikimrScheme::StatusSuccess: {
- const auto& pathDescription = record.GetPathDescription();
- Ydb::Scheme::Entry* selfEntry = describeLogStoreResult.mutable_self();
- selfEntry->set_name(pathDescription.GetSelf().GetName());
- selfEntry->set_type(static_cast<Ydb::Scheme::Entry::Type>(pathDescription.GetSelf().GetPathType()));
+ const auto& pathDescription = record.GetPathDescription();
+ Ydb::Scheme::Entry* selfEntry = describeLogStoreResult.mutable_self();
+ selfEntry->set_name(pathDescription.GetSelf().GetName());
+ selfEntry->set_type(static_cast<Ydb::Scheme::Entry::Type>(pathDescription.GetSelf().GetPathType()));
if (pathDescription.GetSelf().GetPathType() != NKikimrSchemeOp::EPathTypeColumnStore) {
- return Reply(Ydb::StatusIds::BAD_REQUEST, "Path is not LogStore", NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
- ConvertDirectoryEntry(pathDescription.GetSelf(), selfEntry, true);
+ return Reply(Ydb::StatusIds::BAD_REQUEST, "Path is not LogStore", NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+ ConvertDirectoryEntry(pathDescription.GetSelf(), selfEntry, true);
const auto& storeDescription = pathDescription.GetColumnStoreDescription();
- describeLogStoreResult.set_column_shard_count(storeDescription.GetColumnShardCount());
- for (const auto& schemaPreset : storeDescription.GetSchemaPresets()) {
- auto* toSchemaPreset = describeLogStoreResult.add_schema_presets();
- toSchemaPreset->set_name(schemaPreset.GetName());
- Ydb::StatusIds::StatusCode status;
- TString error;
- if (!ConvertSchemaFromInternalToPublic(schemaPreset.GetSchema(), *toSchemaPreset->mutable_schema(), status, error)) {
- LOG_DEBUG(ctx, NKikimrServices::GRPC_SERVER, "LogStore schema error: %s", error.c_str());
- return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
- }
- return ReplyWithResult(Ydb::StatusIds::SUCCESS, describeLogStoreResult, ctx);
- }
-
+ describeLogStoreResult.set_column_shard_count(storeDescription.GetColumnShardCount());
+ for (const auto& schemaPreset : storeDescription.GetSchemaPresets()) {
+ auto* toSchemaPreset = describeLogStoreResult.add_schema_presets();
+ toSchemaPreset->set_name(schemaPreset.GetName());
+ Ydb::StatusIds::StatusCode status;
+ TString error;
+ if (!ConvertSchemaFromInternalToPublic(schemaPreset.GetSchema(), *toSchemaPreset->mutable_schema(), status, error)) {
+ LOG_DEBUG(ctx, NKikimrServices::GRPC_SERVER, "LogStore schema error: %s", error.c_str());
+ return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+ }
+ return ReplyWithResult(Ydb::StatusIds::SUCCESS, describeLogStoreResult, ctx);
+ }
+
case NKikimrScheme::StatusPathDoesNotExist:
case NKikimrScheme::StatusSchemeError: {
- return Reply(Ydb::StatusIds::SCHEME_ERROR, ctx);
- }
-
+ return Reply(Ydb::StatusIds::SCHEME_ERROR, ctx);
+ }
+
case NKikimrScheme::StatusAccessDenied: {
- return Reply(Ydb::StatusIds::UNAUTHORIZED, ctx);
- }
-
+ return Reply(Ydb::StatusIds::UNAUTHORIZED, ctx);
+ }
+
case NKikimrScheme::StatusNotAvailable: {
- return Reply(Ydb::StatusIds::UNAVAILABLE, ctx);
- }
-
- default: {
- return Reply(Ydb::StatusIds::GENERIC_ERROR, ctx);
- }
- }
- }
-
- void SendProposeRequest(const TActorContext &ctx) {
- const auto req = GetProtoRequest();
-
- std::unique_ptr<TEvTxUserProxy::TEvNavigate> navigateRequest(new TEvTxUserProxy::TEvNavigate());
- SetAuthToken(navigateRequest, *Request_);
- SetDatabase(navigateRequest.get(), *Request_);
+ return Reply(Ydb::StatusIds::UNAVAILABLE, ctx);
+ }
+
+ default: {
+ return Reply(Ydb::StatusIds::GENERIC_ERROR, ctx);
+ }
+ }
+ }
+
+ void SendProposeRequest(const TActorContext &ctx) {
+ const auto req = GetProtoRequest();
+
+ std::unique_ptr<TEvTxUserProxy::TEvNavigate> navigateRequest(new TEvTxUserProxy::TEvNavigate());
+ SetAuthToken(navigateRequest, *Request_);
+ SetDatabase(navigateRequest.get(), *Request_);
NKikimrSchemeOp::TDescribePath* record = navigateRequest->Record.MutableDescribePath();
- record->SetPath(req->path());
-
- ctx.Send(MakeTxProxyID(), navigateRequest.release());
- }
-};
-
+ record->SetPath(req->path());
+
+ ctx.Send(MakeTxProxyID(), navigateRequest.release());
+ }
+};
+
template <class TEv, NKikimrSchemeOp::EOperationType EOpType>
-class TDropLogRPC : public TRpcSchemeRequestActor<TDropLogRPC<TEv, EOpType>, TEv> {
- using TSelf = TDropLogRPC<TEv, EOpType>;
- using TBase = TRpcSchemeRequestActor<TSelf, TEv>;
-
-public:
- TDropLogRPC(IRequestOpCtx* msg)
- : TBase(msg) {}
-
- void Bootstrap(const TActorContext &ctx) {
- TBase::Bootstrap(ctx);
-
- SendProposeRequest(ctx);
- this->Become(&TSelf::StateWork);
- }
-
-private:
- void SendProposeRequest(const TActorContext &ctx) {
- const auto req = this->GetProtoRequest();
- std::pair<TString, TString> pathPair;
- try {
- pathPair = SplitPath(req->path());
- } catch (const std::exception& ex) {
- this->Request_->RaiseIssue(NYql::ExceptionToIssue(ex));
- return ReplyWithResult(StatusIds::BAD_REQUEST, ctx);
- }
-
- const auto& workingDir = pathPair.first;
- const auto& name = pathPair.second;
-
- std::unique_ptr<TEvTxUserProxy::TEvProposeTransaction> proposeRequest = this->CreateProposeTransaction();
- NKikimrTxUserProxy::TEvProposeTransaction& record = proposeRequest->Record;
+class TDropLogRPC : public TRpcSchemeRequestActor<TDropLogRPC<TEv, EOpType>, TEv> {
+ using TSelf = TDropLogRPC<TEv, EOpType>;
+ using TBase = TRpcSchemeRequestActor<TSelf, TEv>;
+
+public:
+ TDropLogRPC(IRequestOpCtx* msg)
+ : TBase(msg) {}
+
+ void Bootstrap(const TActorContext &ctx) {
+ TBase::Bootstrap(ctx);
+
+ SendProposeRequest(ctx);
+ this->Become(&TSelf::StateWork);
+ }
+
+private:
+ void SendProposeRequest(const TActorContext &ctx) {
+ const auto req = this->GetProtoRequest();
+ std::pair<TString, TString> pathPair;
+ try {
+ pathPair = SplitPath(req->path());
+ } catch (const std::exception& ex) {
+ this->Request_->RaiseIssue(NYql::ExceptionToIssue(ex));
+ return ReplyWithResult(StatusIds::BAD_REQUEST, ctx);
+ }
+
+ const auto& workingDir = pathPair.first;
+ const auto& name = pathPair.second;
+
+ std::unique_ptr<TEvTxUserProxy::TEvProposeTransaction> proposeRequest = this->CreateProposeTransaction();
+ NKikimrTxUserProxy::TEvProposeTransaction& record = proposeRequest->Record;
NKikimrSchemeOp::TModifyScheme* modifyScheme = record.MutableTransaction()->MutableModifyScheme();
- modifyScheme->SetWorkingDir(workingDir);
- modifyScheme->SetOperationType(EOpType);
- auto drop = modifyScheme->MutableDrop();
- drop->SetName(name);
- ctx.Send(MakeTxProxyID(), proposeRequest.release());
- }
-
- void ReplyWithResult(StatusIds::StatusCode status, const TActorContext &ctx) {
- this->Request_->ReplyWithYdbStatus(status);
- this->Die(ctx);
- }
-};
-
-class TCreateLogTableRPC : public TRpcSchemeRequestActor<TCreateLogTableRPC, TEvCreateLogTableRequest> {
- using TBase = TRpcSchemeRequestActor<TCreateLogTableRPC, TEvCreateLogTableRequest>;
-
-public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::GRPC_REQ;
- }
-
+ modifyScheme->SetWorkingDir(workingDir);
+ modifyScheme->SetOperationType(EOpType);
+ auto drop = modifyScheme->MutableDrop();
+ drop->SetName(name);
+ ctx.Send(MakeTxProxyID(), proposeRequest.release());
+ }
+
+ void ReplyWithResult(StatusIds::StatusCode status, const TActorContext &ctx) {
+ this->Request_->ReplyWithYdbStatus(status);
+ this->Die(ctx);
+ }
+};
+
+class TCreateLogTableRPC : public TRpcSchemeRequestActor<TCreateLogTableRPC, TEvCreateLogTableRequest> {
+ using TBase = TRpcSchemeRequestActor<TCreateLogTableRPC, TEvCreateLogTableRequest>;
+
+public:
+ static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
+ return NKikimrServices::TActivity::GRPC_REQ;
+ }
+
explicit TCreateLogTableRPC(IRequestOpCtx* request)
: TBase(request)
- {}
-
- void Bootstrap(const TActorContext &ctx) {
- TBase::Bootstrap(ctx);
-
- SendProposeRequest(ctx);
- Become(&TCreateLogTableRPC::StateWork);
- }
-
-private:
- void SendProposeRequest(const TActorContext &ctx) {
- const auto req = GetProtoRequest();
- std::pair<TString, TString> destinationPathPair;
- try {
- destinationPathPair = SplitPath(req->path());
- } catch (const std::exception& ex) {
- Request_->RaiseIssue(NYql::ExceptionToIssue(ex));
- return Reply(StatusIds::BAD_REQUEST, "Invalid path: " + req->path(), NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
-
- const auto& workingDir = destinationPathPair.first;
- const auto& name = destinationPathPair.second;
-
- Ydb::StatusIds::StatusCode status;
- TString error;
-
- std::unique_ptr<TEvTxUserProxy::TEvProposeTransaction> proposeRequest = CreateProposeTransaction();
- NKikimrTxUserProxy::TEvProposeTransaction& record = proposeRequest->Record;
+ {}
+
+ void Bootstrap(const TActorContext &ctx) {
+ TBase::Bootstrap(ctx);
+
+ SendProposeRequest(ctx);
+ Become(&TCreateLogTableRPC::StateWork);
+ }
+
+private:
+ void SendProposeRequest(const TActorContext &ctx) {
+ const auto req = GetProtoRequest();
+ std::pair<TString, TString> destinationPathPair;
+ try {
+ destinationPathPair = SplitPath(req->path());
+ } catch (const std::exception& ex) {
+ Request_->RaiseIssue(NYql::ExceptionToIssue(ex));
+ return Reply(StatusIds::BAD_REQUEST, "Invalid path: " + req->path(), NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+
+ const auto& workingDir = destinationPathPair.first;
+ const auto& name = destinationPathPair.second;
+
+ Ydb::StatusIds::StatusCode status;
+ TString error;
+
+ std::unique_ptr<TEvTxUserProxy::TEvProposeTransaction> proposeRequest = CreateProposeTransaction();
+ NKikimrTxUserProxy::TEvProposeTransaction& record = proposeRequest->Record;
NKikimrSchemeOp::TModifyScheme* modifyScheme = record.MutableTransaction()->MutableModifyScheme();
- modifyScheme->SetWorkingDir(workingDir);
+ modifyScheme->SetWorkingDir(workingDir);
modifyScheme->SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpCreateColumnTable);
auto create = modifyScheme->MutableCreateColumnTable();
- create->SetName(name);
- if (!req->schema_preset_name().empty()) {
- create->SetSchemaPresetName(req->schema_preset_name());
- }
- if (req->has_schema()) {
- if (!ConvertSchemaFromPublicToInternal(req->schema(), *create->MutableSchema(), status, error)) {
- LOG_DEBUG(ctx, NKikimrServices::GRPC_SERVER, "LogTable schema error: %s", error.c_str());
- return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
- }
-
- if (req->has_ttl_settings()) {
- if (!FillTtlSettings(*create->MutableTtlSettings()->MutableEnabled(), req->ttl_settings(), status, error)) {
- return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
- }
-
- create->SetColumnShardCount(req->column_shard_count());
- auto* sharding = create->MutableSharding()->MutableHashSharding();
+ create->SetName(name);
+ if (!req->schema_preset_name().empty()) {
+ create->SetSchemaPresetName(req->schema_preset_name());
+ }
+ if (req->has_schema()) {
+ if (!ConvertSchemaFromPublicToInternal(req->schema(), *create->MutableSchema(), status, error)) {
+ LOG_DEBUG(ctx, NKikimrServices::GRPC_SERVER, "LogTable schema error: %s", error.c_str());
+ return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+ }
+
+ if (req->has_ttl_settings()) {
+ if (!FillTtlSettings(*create->MutableTtlSettings()->MutableEnabled(), req->ttl_settings(), status, error)) {
+ return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+ }
+
+ create->SetColumnShardCount(req->column_shard_count());
+ auto* sharding = create->MutableSharding()->MutableHashSharding();
sharding->SetFunction(NKikimrSchemeOp::TColumnTableSharding::THashSharding::HASH_FUNCTION_CLOUD_LOGS);
- sharding->MutableColumns()->CopyFrom(req->sharding_columns());
- ctx.Send(MakeTxProxyID(), proposeRequest.release());
- }
-};
-
-class TDescribeLogTableRPC : public TRpcSchemeRequestActor<TDescribeLogTableRPC, TEvDescribeLogTableRequest> {
- using TBase = TRpcSchemeRequestActor<TDescribeLogTableRPC, TEvDescribeLogTableRequest>;
-
-public:
+ sharding->MutableColumns()->CopyFrom(req->sharding_columns());
+ ctx.Send(MakeTxProxyID(), proposeRequest.release());
+ }
+};
+
+class TDescribeLogTableRPC : public TRpcSchemeRequestActor<TDescribeLogTableRPC, TEvDescribeLogTableRequest> {
+ using TBase = TRpcSchemeRequestActor<TDescribeLogTableRPC, TEvDescribeLogTableRequest>;
+
+public:
TDescribeLogTableRPC(IRequestOpCtx* request)
: TBase(request) {}
-
- void Bootstrap(const TActorContext &ctx) {
- TBase::Bootstrap(ctx);
-
- SendProposeRequest(ctx);
- Become(&TDescribeLogTableRPC::StateWork);
- }
-
-private:
- void StateWork(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx) {
- switch (ev->GetTypeRewrite()) {
+
+ void Bootstrap(const TActorContext &ctx) {
+ TBase::Bootstrap(ctx);
+
+ SendProposeRequest(ctx);
+ Become(&TDescribeLogTableRPC::StateWork);
+ }
+
+private:
+ void StateWork(TAutoPtr<IEventHandle>& ev, const TActorContext& ctx) {
+ switch (ev->GetTypeRewrite()) {
HFunc(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult, Handle);
- default: TBase::StateWork(ev, ctx);
- }
- }
-
+ default: TBase::StateWork(ev, ctx);
+ }
+ }
+
void Handle(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr& ev, const TActorContext& ctx) {
- const auto& record = ev->Get()->GetRecord();
- const auto status = record.GetStatus();
- if (record.HasReason()) {
- auto issue = NYql::TIssue(record.GetReason());
- Request_->RaiseIssue(issue);
- }
- Ydb::LogStore::DescribeLogTableResult describeLogTableResult;
- switch (status) {
+ const auto& record = ev->Get()->GetRecord();
+ const auto status = record.GetStatus();
+ if (record.HasReason()) {
+ auto issue = NYql::TIssue(record.GetReason());
+ Request_->RaiseIssue(issue);
+ }
+ Ydb::LogStore::DescribeLogTableResult describeLogTableResult;
+ switch (status) {
case NKikimrScheme::StatusSuccess: {
- const auto& pathDescription = record.GetPathDescription();
- Ydb::Scheme::Entry* selfEntry = describeLogTableResult.mutable_self();
- selfEntry->set_name(pathDescription.GetSelf().GetName());
- selfEntry->set_type(static_cast<Ydb::Scheme::Entry::Type>(pathDescription.GetSelf().GetPathType()));
+ const auto& pathDescription = record.GetPathDescription();
+ Ydb::Scheme::Entry* selfEntry = describeLogTableResult.mutable_self();
+ selfEntry->set_name(pathDescription.GetSelf().GetName());
+ selfEntry->set_type(static_cast<Ydb::Scheme::Entry::Type>(pathDescription.GetSelf().GetPathType()));
if (pathDescription.GetSelf().GetPathType() != NKikimrSchemeOp::EPathTypeColumnTable) {
- return Reply(Ydb::StatusIds::BAD_REQUEST, "Path is not LogTable", NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
- ConvertDirectoryEntry(pathDescription.GetSelf(), selfEntry, true);
+ return Reply(Ydb::StatusIds::BAD_REQUEST, "Path is not LogTable", NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+ ConvertDirectoryEntry(pathDescription.GetSelf(), selfEntry, true);
const auto& tableDescription = pathDescription.GetColumnTableDescription();
- describeLogTableResult.set_column_shard_count(tableDescription.GetColumnShardCount());
- Ydb::StatusIds::StatusCode status;
- TString error;
- if (!ConvertSchemaFromInternalToPublic(tableDescription.GetSchema(), *describeLogTableResult.mutable_schema(), status, error)) {
- LOG_DEBUG(ctx, NKikimrServices::GRPC_SERVER, "LogTable schema error: %s", error.c_str());
- return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
- if (tableDescription.HasSchemaPresetName()) {
- describeLogTableResult.set_schema_preset_name(tableDescription.GetSchemaPresetName());
- }
-
- if (tableDescription.HasTtlSettings() && tableDescription.GetTtlSettings().HasEnabled()) {
- const auto& inTTL = tableDescription.GetTtlSettings().GetEnabled();
-
- switch (inTTL.GetColumnUnit()) {
+ describeLogTableResult.set_column_shard_count(tableDescription.GetColumnShardCount());
+ Ydb::StatusIds::StatusCode status;
+ TString error;
+ if (!ConvertSchemaFromInternalToPublic(tableDescription.GetSchema(), *describeLogTableResult.mutable_schema(), status, error)) {
+ LOG_DEBUG(ctx, NKikimrServices::GRPC_SERVER, "LogTable schema error: %s", error.c_str());
+ return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+ if (tableDescription.HasSchemaPresetName()) {
+ describeLogTableResult.set_schema_preset_name(tableDescription.GetSchemaPresetName());
+ }
+
+ if (tableDescription.HasTtlSettings() && tableDescription.GetTtlSettings().HasEnabled()) {
+ const auto& inTTL = tableDescription.GetTtlSettings().GetEnabled();
+
+ switch (inTTL.GetColumnUnit()) {
case NKikimrSchemeOp::TTTLSettings::UNIT_AUTO: {
- auto& outTTL = *describeLogTableResult.mutable_ttl_settings()->mutable_date_type_column();
- outTTL.set_column_name(inTTL.GetColumnName());
- outTTL.set_expire_after_seconds(inTTL.GetExpireAfterSeconds());
- break;
- }
-
+ auto& outTTL = *describeLogTableResult.mutable_ttl_settings()->mutable_date_type_column();
+ outTTL.set_column_name(inTTL.GetColumnName());
+ outTTL.set_expire_after_seconds(inTTL.GetExpireAfterSeconds());
+ break;
+ }
+
case NKikimrSchemeOp::TTTLSettings::UNIT_SECONDS:
case NKikimrSchemeOp::TTTLSettings::UNIT_MILLISECONDS:
case NKikimrSchemeOp::TTTLSettings::UNIT_MICROSECONDS:
case NKikimrSchemeOp::TTTLSettings::UNIT_NANOSECONDS: {
- auto& outTTL = *describeLogTableResult.mutable_ttl_settings()->mutable_value_since_unix_epoch();
- outTTL.set_column_name(inTTL.GetColumnName());
- outTTL.set_column_unit(static_cast<Ydb::Table::ValueSinceUnixEpochModeSettings::Unit>(inTTL.GetColumnUnit()));
- outTTL.set_expire_after_seconds(inTTL.GetExpireAfterSeconds());
- break;
- }
-
- default:
- break;
- }
- }
-
- return ReplyWithResult(Ydb::StatusIds::SUCCESS, describeLogTableResult, ctx);
- }
-
+ auto& outTTL = *describeLogTableResult.mutable_ttl_settings()->mutable_value_since_unix_epoch();
+ outTTL.set_column_name(inTTL.GetColumnName());
+ outTTL.set_column_unit(static_cast<Ydb::Table::ValueSinceUnixEpochModeSettings::Unit>(inTTL.GetColumnUnit()));
+ outTTL.set_expire_after_seconds(inTTL.GetExpireAfterSeconds());
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ return ReplyWithResult(Ydb::StatusIds::SUCCESS, describeLogTableResult, ctx);
+ }
+
case NKikimrScheme::StatusPathDoesNotExist:
case NKikimrScheme::StatusSchemeError: {
- return Reply(Ydb::StatusIds::SCHEME_ERROR, ctx);
- }
-
+ return Reply(Ydb::StatusIds::SCHEME_ERROR, ctx);
+ }
+
case NKikimrScheme::StatusAccessDenied: {
- return Reply(Ydb::StatusIds::UNAUTHORIZED, ctx);
- }
-
+ return Reply(Ydb::StatusIds::UNAUTHORIZED, ctx);
+ }
+
case NKikimrScheme::StatusNotAvailable: {
- return Reply(Ydb::StatusIds::UNAVAILABLE, ctx);
- }
-
- default: {
- return Reply(Ydb::StatusIds::GENERIC_ERROR, ctx);
- }
- }
- }
-
- void SendProposeRequest(const TActorContext &ctx) {
- const auto req = GetProtoRequest();
-
- std::unique_ptr<TEvTxUserProxy::TEvNavigate> navigateRequest(new TEvTxUserProxy::TEvNavigate());
- SetAuthToken(navigateRequest, *Request_);
- SetDatabase(navigateRequest.get(), *Request_);
+ return Reply(Ydb::StatusIds::UNAVAILABLE, ctx);
+ }
+
+ default: {
+ return Reply(Ydb::StatusIds::GENERIC_ERROR, ctx);
+ }
+ }
+ }
+
+ void SendProposeRequest(const TActorContext &ctx) {
+ const auto req = GetProtoRequest();
+
+ std::unique_ptr<TEvTxUserProxy::TEvNavigate> navigateRequest(new TEvTxUserProxy::TEvNavigate());
+ SetAuthToken(navigateRequest, *Request_);
+ SetDatabase(navigateRequest.get(), *Request_);
NKikimrSchemeOp::TDescribePath* record = navigateRequest->Record.MutableDescribePath();
- record->SetPath(req->path());
-
- ctx.Send(MakeTxProxyID(), navigateRequest.release());
- }
-};
-
-class TAlterLogTableRPC : public TRpcSchemeRequestActor<TAlterLogTableRPC, TEvAlterLogTableRequest> {
- using TBase = TRpcSchemeRequestActor<TAlterLogTableRPC, TEvAlterLogTableRequest>;
-
-public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::GRPC_REQ;
- }
-
+ record->SetPath(req->path());
+
+ ctx.Send(MakeTxProxyID(), navigateRequest.release());
+ }
+};
+
+class TAlterLogTableRPC : public TRpcSchemeRequestActor<TAlterLogTableRPC, TEvAlterLogTableRequest> {
+ using TBase = TRpcSchemeRequestActor<TAlterLogTableRPC, TEvAlterLogTableRequest>;
+
+public:
+ static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
+ return NKikimrServices::TActivity::GRPC_REQ;
+ }
+
explicit TAlterLogTableRPC(IRequestOpCtx* request)
: TBase(request)
- {}
-
- void Bootstrap(const TActorContext &ctx) {
- TBase::Bootstrap(ctx);
-
- SendProposeRequest(ctx);
- Become(&TAlterLogTableRPC::StateWork);
- }
-
-private:
- void SendProposeRequest(const TActorContext &ctx) {
- const auto req = GetProtoRequest();
- std::pair<TString, TString> destinationPathPair;
- try {
- destinationPathPair = SplitPath(req->path());
- } catch (const std::exception& ex) {
- Request_->RaiseIssue(NYql::ExceptionToIssue(ex));
- return Reply(StatusIds::BAD_REQUEST, "Invalid path: " + req->path(), NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
-
- const auto& workingDir = destinationPathPair.first;
- const auto& name = destinationPathPair.second;
-
- std::unique_ptr<TEvTxUserProxy::TEvProposeTransaction> proposeRequest = CreateProposeTransaction();
- NKikimrTxUserProxy::TEvProposeTransaction& record = proposeRequest->Record;
+ {}
+
+ void Bootstrap(const TActorContext &ctx) {
+ TBase::Bootstrap(ctx);
+
+ SendProposeRequest(ctx);
+ Become(&TAlterLogTableRPC::StateWork);
+ }
+
+private:
+ void SendProposeRequest(const TActorContext &ctx) {
+ const auto req = GetProtoRequest();
+ std::pair<TString, TString> destinationPathPair;
+ try {
+ destinationPathPair = SplitPath(req->path());
+ } catch (const std::exception& ex) {
+ Request_->RaiseIssue(NYql::ExceptionToIssue(ex));
+ return Reply(StatusIds::BAD_REQUEST, "Invalid path: " + req->path(), NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+
+ const auto& workingDir = destinationPathPair.first;
+ const auto& name = destinationPathPair.second;
+
+ std::unique_ptr<TEvTxUserProxy::TEvProposeTransaction> proposeRequest = CreateProposeTransaction();
+ NKikimrTxUserProxy::TEvProposeTransaction& record = proposeRequest->Record;
NKikimrSchemeOp::TModifyScheme* modifyScheme = record.MutableTransaction()->MutableModifyScheme();
- modifyScheme->SetWorkingDir(workingDir);
+ modifyScheme->SetWorkingDir(workingDir);
modifyScheme->SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpAlterColumnTable);
auto alter = modifyScheme->MutableAlterColumnTable();
- alter->SetName(name);
-
- Ydb::StatusIds::StatusCode status;
- TString error;
- if (req->has_set_ttl_settings()) {
- if (!FillTtlSettings(*alter->MutableAlterTtlSettings()->MutableEnabled(), req->set_ttl_settings(), status, error)) {
- return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
- }
- } else if (req->has_drop_ttl_settings()) {
- alter->MutableAlterTtlSettings()->MutableDisabled();
- }
-
- ctx.Send(MakeTxProxyID(), proposeRequest.release());
- }
-};
-
-
+ alter->SetName(name);
+
+ Ydb::StatusIds::StatusCode status;
+ TString error;
+ if (req->has_set_ttl_settings()) {
+ if (!FillTtlSettings(*alter->MutableAlterTtlSettings()->MutableEnabled(), req->set_ttl_settings(), status, error)) {
+ return Reply(status, error, NKikimrIssues::TIssuesIds::DEFAULT_ERROR, ctx);
+ }
+ } else if (req->has_drop_ttl_settings()) {
+ alter->MutableAlterTtlSettings()->MutableDisabled();
+ }
+
+ ctx.Send(MakeTxProxyID(), proposeRequest.release());
+ }
+};
+
+
using TDropLogStoreRPC = TDropLogRPC<TEvDropLogStoreRequest, NKikimrSchemeOp::EOperationType::ESchemeOpDropColumnStore>;
using TDropLogTableRPC = TDropLogRPC<TEvDropLogTableRequest, NKikimrSchemeOp::EOperationType::ESchemeOpDropColumnTable>;
-
+
void DoCreateLogStoreRequest(std::unique_ptr<IRequestOpCtx> p, const IFacilityProvider&) {
TActivationContext::AsActorContext().Register(new TCreateLogStoreRPC(p.release()));
-}
-
+}
+
void DoDescribeLogStoreRequest(std::unique_ptr<IRequestOpCtx> p, const IFacilityProvider&) {
TActivationContext::AsActorContext().Register(new TDescribeLogStoreRPC(p.release()));
-}
-
+}
+
void DoDropLogStoreRequest(std::unique_ptr<IRequestOpCtx> p, const IFacilityProvider&) {
TActivationContext::AsActorContext().Register(new TDropLogStoreRPC(p.release()));
-}
-
-
+}
+
+
void DoCreateLogTableRequest(std::unique_ptr<IRequestOpCtx> p, const IFacilityProvider&) {
TActivationContext::AsActorContext().Register(new TCreateLogTableRPC(p.release()));
-}
-
+}
+
void DoDescribeLogTableRequest(std::unique_ptr<IRequestOpCtx> p, const IFacilityProvider&) {
TActivationContext::AsActorContext().Register(new TDescribeLogTableRPC(p.release()));
-}
-
+}
+
void DoDropLogTableRequest(std::unique_ptr<IRequestOpCtx> p, const IFacilityProvider&) {
TActivationContext::AsActorContext().Register(new TDropLogTableRPC(p.release()));
-}
-
+}
+
void DoAlterLogTableRequest(std::unique_ptr<IRequestOpCtx> p, const IFacilityProvider&) {
TActivationContext::AsActorContext().Register(new TAlterLogTableRPC(p.release()));
-}
-
-} // namespace NKikimr
-} // namespace NGRpcService
+}
+
+} // namespace NKikimr
+} // namespace NGRpcService
diff --git a/ydb/core/grpc_services/rpc_long_tx.cpp b/ydb/core/grpc_services/rpc_long_tx.cpp
index c136616aa30..442972f9930 100644
--- a/ydb/core/grpc_services/rpc_long_tx.cpp
+++ b/ydb/core/grpc_services/rpc_long_tx.cpp
@@ -28,10 +28,10 @@ std::shared_ptr<arrow::Schema> ExtractArrowSchema(const NKikimrSchemeOp::TColumn
return NArrow::MakeArrowSchema(columns);
}
-THashMap<ui64, TString> SplitData(const std::shared_ptr<arrow::RecordBatch>& batch,
- const NKikimrSchemeOp::TColumnTableDescription& description)
-{
- Y_VERIFY(batch);
+THashMap<ui64, TString> SplitData(const std::shared_ptr<arrow::RecordBatch>& batch,
+ const NKikimrSchemeOp::TColumnTableDescription& description)
+{
+ Y_VERIFY(batch);
Y_VERIFY(description.HasSharding() && description.GetSharding().HasHashSharding());
auto& descSharding = description.GetSharding();
@@ -75,23 +75,23 @@ THashMap<ui64, TString> SplitData(const std::shared_ptr<arrow::RecordBatch>& bat
return out;
}
-// Deserailizes arrow batch and splits it
-THashMap<ui64, TString> SplitData(const TString& data, const NKikimrSchemeOp::TColumnTableDescription& description) {
- Y_VERIFY(description.HasSchema());
- auto& olapSchema = description.GetSchema();
- Y_VERIFY(olapSchema.GetEngine() == NKikimrSchemeOp::COLUMN_ENGINE_REPLACING_TIMESERIES);
-
- std::shared_ptr<arrow::Schema> schema = ExtractArrowSchema(olapSchema);
- std::shared_ptr<arrow::RecordBatch> batch = NArrow::DeserializeBatch(data, schema);
- if (!batch || !batch->ValidateFull().ok()) {
- return {};
- }
-
- return SplitData(batch, description);
+// Deserailizes arrow batch and splits it
+THashMap<ui64, TString> SplitData(const TString& data, const NKikimrSchemeOp::TColumnTableDescription& description) {
+ Y_VERIFY(description.HasSchema());
+ auto& olapSchema = description.GetSchema();
+ Y_VERIFY(olapSchema.GetEngine() == NKikimrSchemeOp::COLUMN_ENGINE_REPLACING_TIMESERIES);
+
+ std::shared_ptr<arrow::Schema> schema = ExtractArrowSchema(olapSchema);
+ std::shared_ptr<arrow::RecordBatch> batch = NArrow::DeserializeBatch(data, schema);
+ if (!batch || !batch->ValidateFull().ok()) {
+ return {};
+ }
+
+ return SplitData(batch, description);
+}
+
}
-}
-
namespace NGRpcService {
using namespace NLongTxService;
@@ -310,43 +310,43 @@ private:
TLongTxId LongTxId;
};
-// Common logic of LongTx Write that takes care of splitting the data according to the sharding scheme,
-// sending it to shards and collecting their responses
-template <class TLongTxWriteImpl>
-class TLongTxWriteBase : public TActorBootstrapped<TLongTxWriteImpl> {
- using TBase = TActorBootstrapped<TLongTxWriteImpl>;
-protected:
- using TThis = typename TBase::TThis;
+// Common logic of LongTx Write that takes care of splitting the data according to the sharding scheme,
+// sending it to shards and collecting their responses
+template <class TLongTxWriteImpl>
+class TLongTxWriteBase : public TActorBootstrapped<TLongTxWriteImpl> {
+ using TBase = TActorBootstrapped<TLongTxWriteImpl>;
+protected:
+ using TThis = typename TBase::TThis;
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::GRPC_REQ;
}
- TLongTxWriteBase(const TString& databaseName, const TString& path, const TString& token,
- const TLongTxId& longTxId, const TString& dedupId)
+ TLongTxWriteBase(const TString& databaseName, const TString& path, const TString& token,
+ const TLongTxId& longTxId, const TString& dedupId)
: TBase()
- , DatabaseName(databaseName)
- , Path(path)
- , DedupId(dedupId)
- , LongTxId(longTxId)
+ , DatabaseName(databaseName)
+ , Path(path)
+ , DedupId(dedupId)
+ , LongTxId(longTxId)
, LeaderPipeCache(MakePipePeNodeCacheID(false))
{
- if (token) {
- UserToken.emplace(token);
+ if (token) {
+ UserToken.emplace(token);
}
}
void PassAway() override {
- this->Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(0));
+ this->Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(0));
TBase::PassAway();
}
-protected:
- void SetLongTxId(const TLongTxId& longTxId) {
- LongTxId = longTxId;
+protected:
+ void SetLongTxId(const TLongTxId& longTxId) {
+ LongTxId = longTxId;
}
- void ProceedWithSchema(const NSchemeCache::TSchemeCacheNavigate* resp) {
+ void ProceedWithSchema(const NSchemeCache::TSchemeCacheNavigate* resp) {
if (resp->ErrorCount > 0) {
// TODO: map to a correct error
return ReplyError(Ydb::StatusIds::SCHEME_ERROR, "There was an error during table query");
@@ -357,7 +357,7 @@ protected:
if (UserToken && entry.SecurityObject) {
const ui32 access = NACLib::UpdateRow;
if (!entry.SecurityObject->CheckAccess(access, *UserToken)) {
- RaiseIssue(MakeIssue(NKikimrIssues::TIssuesIds::ACCESS_DENIED, TStringBuilder()
+ RaiseIssue(MakeIssue(NKikimrIssues::TIssuesIds::ACCESS_DENIED, TStringBuilder()
<< "User has no permission to perform writes to this table"
<< " user: " << UserToken->GetUserSID()
<< " path: " << Path));
@@ -391,29 +391,29 @@ protected:
if (sharding.HasRandomSharding()) {
ui64 shard = sharding.GetColumnShards(0);
- SendWriteRequest(shard, tableId, DedupId, GetSerializedData());
+ SendWriteRequest(shard, tableId, DedupId, GetSerializedData());
} else if (sharding.HasHashSharding()) {
-
- auto batches = HasDeserializedBatch() ?
- SplitData(GetDeserializedBatch(), description) :
- SplitData(GetSerializedData(), description);
+
+ auto batches = HasDeserializedBatch() ?
+ SplitData(GetDeserializedBatch(), description) :
+ SplitData(GetSerializedData(), description);
if (batches.empty()) {
return ReplyError(Ydb::StatusIds::SCHEME_ERROR, "Cannot deserialize or split input data");
}
for (auto& [shard, batch] : batches) {
- SendWriteRequest(shard, tableId, DedupId, batch);
+ SendWriteRequest(shard, tableId, DedupId, batch);
}
} else {
return ReplyError(Ydb::StatusIds::SCHEME_ERROR, "Sharding method is not supported");
}
- this->Become(&TThis::StateWrite);
+ this->Become(&TThis::StateWrite);
}
private:
void SendWriteRequest(ui64 shardId, ui64 tableId, const TString& dedupId, const TString& data) {
WaitShards.insert(shardId);
- SendToTablet(shardId, MakeHolder<TEvColumnShard::TEvWrite>(this->SelfId(), LongTxId, tableId, dedupId, data));
+ SendToTablet(shardId, MakeHolder<TEvColumnShard::TEvWrite>(this->SelfId(), LongTxId, tableId, dedupId, data));
}
STFUNC(StateWrite) {
@@ -424,37 +424,37 @@ private:
}
}
- // Expects NKikimrTxColumnShard::EResultStatus
- static Ydb::StatusIds::StatusCode ConvertToYdbStatus(ui32 columnShardStatus) {
- switch (columnShardStatus) {
- case NKikimrTxColumnShard::UNSPECIFIED:
- return Ydb::StatusIds::STATUS_CODE_UNSPECIFIED;
-
- case NKikimrTxColumnShard::PREPARED:
- case NKikimrTxColumnShard::SUCCESS:
- return Ydb::StatusIds::SUCCESS;
-
- case NKikimrTxColumnShard::ABORTED:
- return Ydb::StatusIds::ABORTED;
-
- case NKikimrTxColumnShard::ERROR:
- return Ydb::StatusIds::GENERIC_ERROR;
-
- case NKikimrTxColumnShard::TIMEOUT:
- return Ydb::StatusIds::TIMEOUT;
-
- case NKikimrTxColumnShard::SCHEMA_ERROR:
- case NKikimrTxColumnShard::SCHEMA_CHANGED:
- return Ydb::StatusIds::SCHEME_ERROR;
-
+ // Expects NKikimrTxColumnShard::EResultStatus
+ static Ydb::StatusIds::StatusCode ConvertToYdbStatus(ui32 columnShardStatus) {
+ switch (columnShardStatus) {
+ case NKikimrTxColumnShard::UNSPECIFIED:
+ return Ydb::StatusIds::STATUS_CODE_UNSPECIFIED;
+
+ case NKikimrTxColumnShard::PREPARED:
+ case NKikimrTxColumnShard::SUCCESS:
+ return Ydb::StatusIds::SUCCESS;
+
+ case NKikimrTxColumnShard::ABORTED:
+ return Ydb::StatusIds::ABORTED;
+
+ case NKikimrTxColumnShard::ERROR:
+ return Ydb::StatusIds::GENERIC_ERROR;
+
+ case NKikimrTxColumnShard::TIMEOUT:
+ return Ydb::StatusIds::TIMEOUT;
+
+ case NKikimrTxColumnShard::SCHEMA_ERROR:
+ case NKikimrTxColumnShard::SCHEMA_CHANGED:
+ return Ydb::StatusIds::SCHEME_ERROR;
+
case NKikimrTxColumnShard::OVERLOADED:
return Ydb::StatusIds::OVERLOADED;
- default:
- return Ydb::StatusIds::GENERIC_ERROR;
- }
- }
-
+ default:
+ return Ydb::StatusIds::GENERIC_ERROR;
+ }
+ }
+
void Handle(TEvColumnShard::TEvWriteResult::TPtr& ev) {
const auto* msg = ev->Get();
ui64 shardId = msg->Record.GetOrigin();
@@ -462,8 +462,8 @@ private:
auto status = msg->Record.GetStatus();
if (status != NKikimrTxColumnShard::SUCCESS) {
- auto ydbStatus = ConvertToYdbStatus(status);
- return ReplyError(ydbStatus, "Write error");
+ auto ydbStatus = ConvertToYdbStatus(status);
+ return ReplyError(ydbStatus, "Write error");
}
if (!WaitShards.count(shardId)) {
@@ -493,8 +493,8 @@ private:
for (auto& [shardId, writeId] : ShardsWrites) {
req->AddWrite(shardId, writeId);
}
- this->Send(MakeLongTxServiceID(this->SelfId().NodeId()), req.Release());
- this->Become(&TThis::StateAttachWrite);
+ this->Send(MakeLongTxServiceID(this->SelfId().NodeId()), req.Release());
+ this->Become(&TThis::StateAttachWrite);
}
@@ -511,122 +511,122 @@ private:
if (msg->Record.GetStatus() != Ydb::StatusIds::SUCCESS) {
NYql::TIssues issues;
NYql::IssuesFromMessage(msg->Record.GetIssues(), issues);
- for (auto& issue : issues) {
- RaiseIssue(issue);
+ for (auto& issue : issues) {
+ RaiseIssue(issue);
}
- ReplyError(msg->Record.GetStatus());
+ ReplyError(msg->Record.GetStatus());
return PassAway();
}
- ReplySuccess();
- }
-
-private:
- void SendToTablet(ui64 tabletId, THolder<IEventBase> event) {
- this->Send(LeaderPipeCache, new TEvPipeCache::TEvForward(event.Release(), tabletId, true),
- IEventHandle::FlagTrackDelivery);
- }
-
-protected:
- virtual bool HasDeserializedBatch() const {
- return false;
- }
-
- virtual std::shared_ptr<arrow::RecordBatch> GetDeserializedBatch() const {
- return nullptr;
- }
-
- virtual TString GetSerializedData() = 0;
- virtual void RaiseIssue(const NYql::TIssue& issue) = 0;
- virtual void ReplyError(Ydb::StatusIds::StatusCode status, const TString& message = TString()) = 0;
- virtual void ReplySuccess() = 0;
-
-protected:
- const TString DatabaseName;
- const TString Path;
- const TString DedupId;
-private:
- TLongTxId LongTxId;
- const TActorId LeaderPipeCache;
- std::optional<NACLib::TUserToken> UserToken;
- THashSet<ui64> WaitShards;
- THashMap<ui64, ui64> ShardsWrites;
-};
-
-
-// GRPC call implementation of LongTx Write
-class TLongTxWriteRPC : public TLongTxWriteBase<TLongTxWriteRPC> {
- using TBase = TLongTxWriteBase<TLongTxWriteRPC>;
-public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::GRPC_REQ;
- }
-
- explicit TLongTxWriteRPC(TAutoPtr<IRequestOpCtx> request)
- : TBase(request->GetDatabaseName().GetOrElse(DatabaseFromDomain(AppData())),
- TEvLongTxWriteRequest::GetProtoRequest(request)->path(),
- request->GetInternalToken(),
- TLongTxId(),
- TEvLongTxWriteRequest::GetProtoRequest(request)->dedup_id())
- , Request(request.Release())
- , SchemeCache(MakeSchemeCacheID())
- {
- }
-
- void Bootstrap() {
- const auto* req = GetProtoRequest();
-
- TString errMsg;
- TLongTxId longTxId;
- if (!longTxId.ParseString(req->tx_id(), &errMsg)) {
- return ReplyError(Ydb::StatusIds::BAD_REQUEST, errMsg);
- }
- SetLongTxId(longTxId);
-
- if (GetProtoRequest()->data().format() != Ydb::LongTx::Data::APACHE_ARROW) {
- return ReplyError(Ydb::StatusIds::BAD_REQUEST, "Only APACHE_ARROW data format is supported");
- }
-
- SendNavigateRequest();
- }
-
- void SendNavigateRequest() {
- auto request = MakeHolder<NSchemeCache::TSchemeCacheNavigate>();
- request->DatabaseName = this->DatabaseName;
- auto& entry = request->ResultSet.emplace_back();
- entry.Path = ::NKikimr::SplitPath(Path);
- entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpPath;
- Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(request.Release()));
- Become(&TThis::StateNavigate);
- }
-
- STFUNC(StateNavigate) {
- Y_UNUSED(ctx);
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
- }
- }
-
- void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev) {
- NSchemeCache::TSchemeCacheNavigate* resp = ev->Get()->Request.Get();
- ProceedWithSchema(resp);
- }
-
+ ReplySuccess();
+ }
+
+private:
+ void SendToTablet(ui64 tabletId, THolder<IEventBase> event) {
+ this->Send(LeaderPipeCache, new TEvPipeCache::TEvForward(event.Release(), tabletId, true),
+ IEventHandle::FlagTrackDelivery);
+ }
+
+protected:
+ virtual bool HasDeserializedBatch() const {
+ return false;
+ }
+
+ virtual std::shared_ptr<arrow::RecordBatch> GetDeserializedBatch() const {
+ return nullptr;
+ }
+
+ virtual TString GetSerializedData() = 0;
+ virtual void RaiseIssue(const NYql::TIssue& issue) = 0;
+ virtual void ReplyError(Ydb::StatusIds::StatusCode status, const TString& message = TString()) = 0;
+ virtual void ReplySuccess() = 0;
+
+protected:
+ const TString DatabaseName;
+ const TString Path;
+ const TString DedupId;
+private:
+ TLongTxId LongTxId;
+ const TActorId LeaderPipeCache;
+ std::optional<NACLib::TUserToken> UserToken;
+ THashSet<ui64> WaitShards;
+ THashMap<ui64, ui64> ShardsWrites;
+};
+
+
+// GRPC call implementation of LongTx Write
+class TLongTxWriteRPC : public TLongTxWriteBase<TLongTxWriteRPC> {
+ using TBase = TLongTxWriteBase<TLongTxWriteRPC>;
+public:
+ static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
+ return NKikimrServices::TActivity::GRPC_REQ;
+ }
+
+ explicit TLongTxWriteRPC(TAutoPtr<IRequestOpCtx> request)
+ : TBase(request->GetDatabaseName().GetOrElse(DatabaseFromDomain(AppData())),
+ TEvLongTxWriteRequest::GetProtoRequest(request)->path(),
+ request->GetInternalToken(),
+ TLongTxId(),
+ TEvLongTxWriteRequest::GetProtoRequest(request)->dedup_id())
+ , Request(request.Release())
+ , SchemeCache(MakeSchemeCacheID())
+ {
+ }
+
+ void Bootstrap() {
+ const auto* req = GetProtoRequest();
+
+ TString errMsg;
+ TLongTxId longTxId;
+ if (!longTxId.ParseString(req->tx_id(), &errMsg)) {
+ return ReplyError(Ydb::StatusIds::BAD_REQUEST, errMsg);
+ }
+ SetLongTxId(longTxId);
+
+ if (GetProtoRequest()->data().format() != Ydb::LongTx::Data::APACHE_ARROW) {
+ return ReplyError(Ydb::StatusIds::BAD_REQUEST, "Only APACHE_ARROW data format is supported");
+ }
+
+ SendNavigateRequest();
+ }
+
+ void SendNavigateRequest() {
+ auto request = MakeHolder<NSchemeCache::TSchemeCacheNavigate>();
+ request->DatabaseName = this->DatabaseName;
+ auto& entry = request->ResultSet.emplace_back();
+ entry.Path = ::NKikimr::SplitPath(Path);
+ entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpPath;
+ Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(request.Release()));
+ Become(&TThis::StateNavigate);
+ }
+
+ STFUNC(StateNavigate) {
+ Y_UNUSED(ctx);
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
+ }
+ }
+
+ void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev) {
+ NSchemeCache::TSchemeCacheNavigate* resp = ev->Get()->Request.Get();
+ ProceedWithSchema(resp);
+ }
+
private:
- const TEvLongTxWriteRequest::TRequest* GetProtoRequest() const {
- return TEvLongTxWriteRequest::GetProtoRequest(Request);
- }
-
-protected:
- TString GetSerializedData() override {
- return GetProtoRequest()->data().data();
- }
-
- void RaiseIssue(const NYql::TIssue& issue) override {
- Request->RaiseIssue(issue);
- }
-
- void ReplyError(Ydb::StatusIds::StatusCode status, const TString& message = TString()) override {
+ const TEvLongTxWriteRequest::TRequest* GetProtoRequest() const {
+ return TEvLongTxWriteRequest::GetProtoRequest(Request);
+ }
+
+protected:
+ TString GetSerializedData() override {
+ return GetProtoRequest()->data().data();
+ }
+
+ void RaiseIssue(const NYql::TIssue& issue) override {
+ Request->RaiseIssue(issue);
+ }
+
+ void ReplyError(Ydb::StatusIds::StatusCode status, const TString& message = TString()) override {
if (!message.empty()) {
Request->RaiseIssue(NYql::TIssue(message));
}
@@ -634,100 +634,100 @@ protected:
PassAway();
}
- void ReplySuccess() override {
- Ydb::LongTx::WriteResult result;
- result.set_tx_id(GetProtoRequest()->tx_id());
- result.set_path(Path);
- result.set_dedup_id(DedupId);
-
+ void ReplySuccess() override {
+ Ydb::LongTx::WriteResult result;
+ result.set_tx_id(GetProtoRequest()->tx_id());
+ result.set_path(Path);
+ result.set_dedup_id(DedupId);
+
Request->SendResult(result, Ydb::StatusIds::SUCCESS);
PassAway();
}
private:
- std::unique_ptr<IRequestOpCtx> Request;
+ std::unique_ptr<IRequestOpCtx> Request;
TActorId SchemeCache;
};
-
-template<>
-IActor* TEvLongTxWriteRequest::CreateRpcActor(NKikimr::NGRpcService::IRequestOpCtx* msg) {
- return new TLongTxWriteRPC(msg);
-}
-
-// LongTx Write implementation called from the inside of YDB (e.g. as a part of BulkUpsert call)
-// NOTE: permission checks must have been done by the caller
-class TLongTxWriteInternal : public TLongTxWriteBase<TLongTxWriteInternal> {
- using TBase = TLongTxWriteBase<TLongTxWriteInternal>;
-public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::GRPC_REQ;
- }
-
- explicit TLongTxWriteInternal(const TActorId& replyTo, const TLongTxId& longTxId, const TString& dedupId,
- const TString& databaseName, const TString& path,
- const NSchemeCache::TSchemeCacheNavigate& navigateResult,
- std::shared_ptr<arrow::RecordBatch> batch, NYql::TIssues& issues)
- : TBase(databaseName, path, TString(), longTxId, dedupId)
- , ReplyTo(replyTo)
- , NavigateResult(navigateResult)
- , Batch(batch)
- , Issues(issues)
- {
- }
-
- void Bootstrap() {
- ProceedWithSchema(&NavigateResult);
- }
-
-protected:
- bool HasDeserializedBatch() const override {
- return true;
- }
-
- std::shared_ptr<arrow::RecordBatch> GetDeserializedBatch() const override {
- return Batch;
- }
-
- TString GetSerializedData() override {
- return NArrow::SerializeBatchNoCompression(Batch);
- }
-
- void RaiseIssue(const NYql::TIssue& issue) override {
- Issues.AddIssue(issue);
- }
-
- void ReplyError(Ydb::StatusIds::StatusCode status, const TString& message = TString()) override {
- if (!message.empty()) {
- Issues.AddIssue(NYql::TIssue(message));
- }
- this->Send(ReplyTo, new TEvents::TEvCompleted(0, status));
- PassAway();
- }
-
- void ReplySuccess() override {
- this->Send(ReplyTo, new TEvents::TEvCompleted(0, Ydb::StatusIds::SUCCESS));
- PassAway();
- }
-
-private:
- const TActorId ReplyTo;
- const NSchemeCache::TSchemeCacheNavigate& NavigateResult;
- std::shared_ptr<arrow::RecordBatch> Batch;
- NYql::TIssues& Issues;
-};
-
-
-TActorId DoLongTxWriteSameMailbox(const TActorContext& ctx, const TActorId& replyTo,
- const NLongTxService::TLongTxId& longTxId, const TString& dedupId,
- const TString& databaseName, const TString& path, const NSchemeCache::TSchemeCacheNavigate& navigateResult,
- std::shared_ptr<arrow::RecordBatch> batch, NYql::TIssues& issues)
-{
- return ctx.RegisterWithSameMailbox(
- new TLongTxWriteInternal(replyTo, longTxId, dedupId, databaseName, path, navigateResult, batch, issues));
-}
-
-
+
+template<>
+IActor* TEvLongTxWriteRequest::CreateRpcActor(NKikimr::NGRpcService::IRequestOpCtx* msg) {
+ return new TLongTxWriteRPC(msg);
+}
+
+// LongTx Write implementation called from the inside of YDB (e.g. as a part of BulkUpsert call)
+// NOTE: permission checks must have been done by the caller
+class TLongTxWriteInternal : public TLongTxWriteBase<TLongTxWriteInternal> {
+ using TBase = TLongTxWriteBase<TLongTxWriteInternal>;
+public:
+ static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
+ return NKikimrServices::TActivity::GRPC_REQ;
+ }
+
+ explicit TLongTxWriteInternal(const TActorId& replyTo, const TLongTxId& longTxId, const TString& dedupId,
+ const TString& databaseName, const TString& path,
+ const NSchemeCache::TSchemeCacheNavigate& navigateResult,
+ std::shared_ptr<arrow::RecordBatch> batch, NYql::TIssues& issues)
+ : TBase(databaseName, path, TString(), longTxId, dedupId)
+ , ReplyTo(replyTo)
+ , NavigateResult(navigateResult)
+ , Batch(batch)
+ , Issues(issues)
+ {
+ }
+
+ void Bootstrap() {
+ ProceedWithSchema(&NavigateResult);
+ }
+
+protected:
+ bool HasDeserializedBatch() const override {
+ return true;
+ }
+
+ std::shared_ptr<arrow::RecordBatch> GetDeserializedBatch() const override {
+ return Batch;
+ }
+
+ TString GetSerializedData() override {
+ return NArrow::SerializeBatchNoCompression(Batch);
+ }
+
+ void RaiseIssue(const NYql::TIssue& issue) override {
+ Issues.AddIssue(issue);
+ }
+
+ void ReplyError(Ydb::StatusIds::StatusCode status, const TString& message = TString()) override {
+ if (!message.empty()) {
+ Issues.AddIssue(NYql::TIssue(message));
+ }
+ this->Send(ReplyTo, new TEvents::TEvCompleted(0, status));
+ PassAway();
+ }
+
+ void ReplySuccess() override {
+ this->Send(ReplyTo, new TEvents::TEvCompleted(0, Ydb::StatusIds::SUCCESS));
+ PassAway();
+ }
+
+private:
+ const TActorId ReplyTo;
+ const NSchemeCache::TSchemeCacheNavigate& NavigateResult;
+ std::shared_ptr<arrow::RecordBatch> Batch;
+ NYql::TIssues& Issues;
+};
+
+
+TActorId DoLongTxWriteSameMailbox(const TActorContext& ctx, const TActorId& replyTo,
+ const NLongTxService::TLongTxId& longTxId, const TString& dedupId,
+ const TString& databaseName, const TString& path, const NSchemeCache::TSchemeCacheNavigate& navigateResult,
+ std::shared_ptr<arrow::RecordBatch> batch, NYql::TIssues& issues)
+{
+ return ctx.RegisterWithSameMailbox(
+ new TLongTxWriteInternal(replyTo, longTxId, dedupId, databaseName, path, navigateResult, batch, issues));
+}
+
+
class TLongTxReadRPC : public TActorBootstrapped<TLongTxReadRPC> {
using TBase = TActorBootstrapped<TLongTxReadRPC>;
diff --git a/ydb/core/grpc_services/rpc_long_tx.h b/ydb/core/grpc_services/rpc_long_tx.h
index 4cf7a129fd2..15edf2be380 100644
--- a/ydb/core/grpc_services/rpc_long_tx.h
+++ b/ydb/core/grpc_services/rpc_long_tx.h
@@ -1,13 +1,13 @@
-#pragma once
-
-#include <ydb/core/tx/scheme_cache/scheme_cache.h>
-#include <ydb/core/formats/arrow_helpers.h>
-
-namespace NKikimr::NGRpcService {
-
-TActorId DoLongTxWriteSameMailbox(const TActorContext& ctx, const TActorId& replyTo,
- const NLongTxService::TLongTxId& longTxId, const TString& dedupId,
- const TString& databaseName, const TString& path, const NSchemeCache::TSchemeCacheNavigate& navigateResult,
- std::shared_ptr<arrow::RecordBatch> batch, NYql::TIssues& issues);
-
-}
+#pragma once
+
+#include <ydb/core/tx/scheme_cache/scheme_cache.h>
+#include <ydb/core/formats/arrow_helpers.h>
+
+namespace NKikimr::NGRpcService {
+
+TActorId DoLongTxWriteSameMailbox(const TActorContext& ctx, const TActorId& replyTo,
+ const NLongTxService::TLongTxId& longTxId, const TString& dedupId,
+ const TString& databaseName, const TString& path, const NSchemeCache::TSchemeCacheNavigate& navigateResult,
+ std::shared_ptr<arrow::RecordBatch> batch, NYql::TIssues& issues);
+
+}
diff --git a/ydb/core/grpc_services/rpc_read_columns.cpp b/ydb/core/grpc_services/rpc_read_columns.cpp
index ed13a174fb4..4baae606a97 100644
--- a/ydb/core/grpc_services/rpc_read_columns.cpp
+++ b/ydb/core/grpc_services/rpc_read_columns.cpp
@@ -1,9 +1,9 @@
-#include "grpc_request_proxy.h"
-
-#include "rpc_calls.h"
-#include "rpc_common.h"
+#include "grpc_request_proxy.h"
+
+#include "rpc_calls.h"
+#include "rpc_common.h"
#include "rpc_kh_snapshots.h"
-#include "resolve_local_db_table.h"
+#include "resolve_local_db_table.h"
#include <ydb/core/tx/scheme_cache/scheme_cache.h>
#include <ydb/core/tx/datashard/datashard.h>
#include <ydb/core/base/kikimr_issue.h>
@@ -13,223 +13,223 @@
#include <ydb/core/sys_view/scan.h>
#include <ydb/core/formats/factory.h>
#include <ydb/core/tablet_flat/tablet_flat_executed.h>
-
-#include <util/string/vector.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-using namespace NActors;
-using namespace Ydb;
-
-class TReadColumnsRPC : public TActorBootstrapped<TReadColumnsRPC> {
- using TBase = TActorBootstrapped<TReadColumnsRPC>;
-
-private:
-
- static constexpr ui32 DEFAULT_TIMEOUT_SEC = 5*60;
-
- TAutoPtr<TEvReadColumnsRequest> Request;
+
+#include <util/string/vector.h>
+
+namespace NKikimr {
+namespace NGRpcService {
+
+using namespace NActors;
+using namespace Ydb;
+
+class TReadColumnsRPC : public TActorBootstrapped<TReadColumnsRPC> {
+ using TBase = TActorBootstrapped<TReadColumnsRPC>;
+
+private:
+
+ static constexpr ui32 DEFAULT_TIMEOUT_SEC = 5*60;
+
+ TAutoPtr<TEvReadColumnsRequest> Request;
TActorId SchemeCache;
TActorId LeaderPipeCache;
- TDuration Timeout;
+ TDuration Timeout;
TActorId TimeoutTimerActorId;
- bool WaitingResolveReply;
- bool Finished;
- Ydb::ClickhouseInternal::ScanResult Result;
-
- TAutoPtr<TKeyDesc> KeyRange;
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate> ResolveNamesResult;
- TVector<NScheme::TTypeId> KeyColumnTypes;
-
- // Positions of key and value fields in the request proto struct
- struct TFieldDescription {
- ui32 ColId;
- ui32 PositionInStruct;
- NScheme::TTypeId Type;
- };
- TVector<TFieldDescription> KeyColumnPositions;
- TVector<TFieldDescription> ValueColumnPositions;
-
- TSerializedCellVec MinKey;
- bool MinKeyInclusive;
- TSerializedCellVec MaxKey;
- bool MaxKeyInclusive;
-
- ui32 ShardRequestCount;
- ui32 ShardReplyCount;
-
+ bool WaitingResolveReply;
+ bool Finished;
+ Ydb::ClickhouseInternal::ScanResult Result;
+
+ TAutoPtr<TKeyDesc> KeyRange;
+ TAutoPtr<NSchemeCache::TSchemeCacheNavigate> ResolveNamesResult;
+ TVector<NScheme::TTypeId> KeyColumnTypes;
+
+ // Positions of key and value fields in the request proto struct
+ struct TFieldDescription {
+ ui32 ColId;
+ ui32 PositionInStruct;
+ NScheme::TTypeId Type;
+ };
+ TVector<TFieldDescription> KeyColumnPositions;
+ TVector<TFieldDescription> ValueColumnPositions;
+
+ TSerializedCellVec MinKey;
+ bool MinKeyInclusive;
+ TSerializedCellVec MaxKey;
+ bool MaxKeyInclusive;
+
+ ui32 ShardRequestCount;
+ ui32 ShardReplyCount;
+
TActorId SysViewScanActor;
- std::unique_ptr<IBlockBuilder> BlockBuilder;
- TVector<NScheme::TTypeId> ValueColumnTypes;
- ui64 SysViewMaxRows;
- ui64 SysViewMaxBytes;
- ui64 SysViewRowsReceived;
-
+ std::unique_ptr<IBlockBuilder> BlockBuilder;
+ TVector<NScheme::TTypeId> ValueColumnTypes;
+ ui64 SysViewMaxRows;
+ ui64 SysViewMaxBytes;
+ ui64 SysViewRowsReceived;
+
TKikhouseSnapshotId SnapshotId;
-public:
+public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::GRPC_REQ;
- }
-
- explicit TReadColumnsRPC(TAutoPtr<TEvReadColumnsRequest> request)
- : TBase()
- , Request(request)
- , SchemeCache(MakeSchemeCacheID())
+ }
+
+ explicit TReadColumnsRPC(TAutoPtr<TEvReadColumnsRequest> request)
+ : TBase()
+ , Request(request)
+ , SchemeCache(MakeSchemeCacheID())
, LeaderPipeCache(MakePipePeNodeCacheID(false))
- , Timeout(TDuration::Seconds(DEFAULT_TIMEOUT_SEC))
- , WaitingResolveReply(false)
- , Finished(false)
+ , Timeout(TDuration::Seconds(DEFAULT_TIMEOUT_SEC))
+ , WaitingResolveReply(false)
+ , Finished(false)
, MinKeyInclusive(0)
, MaxKeyInclusive(0)
- , ShardRequestCount(0)
- , ShardReplyCount(0)
- , SysViewMaxRows(100000)
+ , ShardRequestCount(0)
+ , ShardReplyCount(0)
+ , SysViewMaxRows(100000)
, SysViewMaxBytes(10*1024*1024)
- , SysViewRowsReceived(0)
- {}
-
- void Bootstrap(const NActors::TActorContext& ctx) {
+ , SysViewRowsReceived(0)
+ {}
+
+ void Bootstrap(const NActors::TActorContext& ctx) {
if (const auto& snapshotId = Request->GetProtoRequest()->snapshot_id()) {
if (!SnapshotId.Parse(snapshotId)) {
return ReplyWithError(Ydb::StatusIds::BAD_REQUEST, "Invalid snapshot id specified", ctx);
}
}
- ResolveTable(Request->GetProtoRequest()->Gettable(), ctx);
- }
-
- void Die(const NActors::TActorContext& ctx) override {
- Y_VERIFY(Finished);
- Y_VERIFY(!WaitingResolveReply);
+ ResolveTable(Request->GetProtoRequest()->Gettable(), ctx);
+ }
+
+ void Die(const NActors::TActorContext& ctx) override {
+ Y_VERIFY(Finished);
+ Y_VERIFY(!WaitingResolveReply);
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(0));
- if (TimeoutTimerActorId) {
- ctx.Send(TimeoutTimerActorId, new TEvents::TEvPoisonPill());
- }
- if (SysViewScanActor) {
- ctx.Send(SysViewScanActor, new TEvents::TEvPoisonPill());
- }
- TBase::Die(ctx);
- }
-
-private:
- STFUNC(StateWaitResolveTable) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTablet::TEvLocalSchemeTxResponse, Handle);
- HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
- HFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void ResolveTable(const TString& table, const NActors::TActorContext& ctx) {
- // TODO: check all params;
- // Cerr << *Request->GetProtoRequest() << Endl;
-
- auto path = ::NKikimr::SplitPath(table);
- TMaybe<ui64> tabletId = TryParseLocalDbPath(path);
- if (tabletId) {
- if (Request->GetInternalToken().empty() || !IsSuperUser(Request->GetInternalToken(), *AppData(ctx))) {
- return ReplyWithError(Ydb::StatusIds::NOT_FOUND, "Invalid table path specified", ctx);
- }
-
- std::unique_ptr<TEvTablet::TEvLocalSchemeTx> ev(new TEvTablet::TEvLocalSchemeTx());
- ctx.Send(MakePipePeNodeCacheID(true), new TEvPipeCache::TEvForward(ev.release(), *tabletId, true), IEventHandle::FlagTrackDelivery);
-
- TBase::Become(&TThis::StateWaitResolveTable);
- WaitingResolveReply = true;
- } else {
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate> request(new NSchemeCache::TSchemeCacheNavigate());
- NSchemeCache::TSchemeCacheNavigate::TEntry entry;
- entry.Path = std::move(path);
- if (entry.Path.empty()) {
- return ReplyWithError(Ydb::StatusIds::NOT_FOUND, "Invalid table path specified", ctx);
- }
- entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
- request->ResultSet.emplace_back(entry);
-
- ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(request));
-
- TimeoutTimerActorId = CreateLongTimer(ctx, Timeout,
- new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
-
- TBase::Become(&TThis::StateWaitResolveTable);
- WaitingResolveReply = true;
- }
- }
-
- void HandleTimeout(const TActorContext& ctx) {
- return ReplyWithError(Ydb::StatusIds::TIMEOUT, "Request timed out", ctx);
- }
-
- void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev, const TActorContext& ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- ResolveNamesResult = ev->Get()->Request;
-
- return ProceedWithSchema(ctx);
- }
-
- void Handle(TEvTablet::TEvLocalSchemeTxResponse::TPtr &ev, const TActorContext &ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- ResolveNamesResult = new NSchemeCache::TSchemeCacheNavigate();
- auto &record = ev->Get()->Record;
-
- const TString table = Request->GetProtoRequest()->table();
- auto path = ::NKikimr::SplitPath(table);
- FillLocalDbTableSchema(*ResolveNamesResult, record.GetFullScheme(), path.back());
- ResolveNamesResult->ResultSet.back().Path = path;
-
- return ProceedWithSchema(ctx);
- }
-
- void ProceedWithSchema(const TActorContext& ctx) {
- Y_VERIFY(ResolveNamesResult->ResultSet.size() == 1);
- const auto& entry = ResolveNamesResult->ResultSet.front();
- if (entry.Status != NSchemeCache::TSchemeCacheNavigate::EStatus::Ok) {
+ if (TimeoutTimerActorId) {
+ ctx.Send(TimeoutTimerActorId, new TEvents::TEvPoisonPill());
+ }
+ if (SysViewScanActor) {
+ ctx.Send(SysViewScanActor, new TEvents::TEvPoisonPill());
+ }
+ TBase::Die(ctx);
+ }
+
+private:
+ STFUNC(StateWaitResolveTable) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTablet::TEvLocalSchemeTxResponse, Handle);
+ HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
+ HFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void ResolveTable(const TString& table, const NActors::TActorContext& ctx) {
+ // TODO: check all params;
+ // Cerr << *Request->GetProtoRequest() << Endl;
+
+ auto path = ::NKikimr::SplitPath(table);
+ TMaybe<ui64> tabletId = TryParseLocalDbPath(path);
+ if (tabletId) {
+ if (Request->GetInternalToken().empty() || !IsSuperUser(Request->GetInternalToken(), *AppData(ctx))) {
+ return ReplyWithError(Ydb::StatusIds::NOT_FOUND, "Invalid table path specified", ctx);
+ }
+
+ std::unique_ptr<TEvTablet::TEvLocalSchemeTx> ev(new TEvTablet::TEvLocalSchemeTx());
+ ctx.Send(MakePipePeNodeCacheID(true), new TEvPipeCache::TEvForward(ev.release(), *tabletId, true), IEventHandle::FlagTrackDelivery);
+
+ TBase::Become(&TThis::StateWaitResolveTable);
+ WaitingResolveReply = true;
+ } else {
+ TAutoPtr<NSchemeCache::TSchemeCacheNavigate> request(new NSchemeCache::TSchemeCacheNavigate());
+ NSchemeCache::TSchemeCacheNavigate::TEntry entry;
+ entry.Path = std::move(path);
+ if (entry.Path.empty()) {
+ return ReplyWithError(Ydb::StatusIds::NOT_FOUND, "Invalid table path specified", ctx);
+ }
+ entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
+ request->ResultSet.emplace_back(entry);
+
+ ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(request));
+
+ TimeoutTimerActorId = CreateLongTimer(ctx, Timeout,
+ new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
+
+ TBase::Become(&TThis::StateWaitResolveTable);
+ WaitingResolveReply = true;
+ }
+ }
+
+ void HandleTimeout(const TActorContext& ctx) {
+ return ReplyWithError(Ydb::StatusIds::TIMEOUT, "Request timed out", ctx);
+ }
+
+ void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev, const TActorContext& ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ ResolveNamesResult = ev->Get()->Request;
+
+ return ProceedWithSchema(ctx);
+ }
+
+ void Handle(TEvTablet::TEvLocalSchemeTxResponse::TPtr &ev, const TActorContext &ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ ResolveNamesResult = new NSchemeCache::TSchemeCacheNavigate();
+ auto &record = ev->Get()->Record;
+
+ const TString table = Request->GetProtoRequest()->table();
+ auto path = ::NKikimr::SplitPath(table);
+ FillLocalDbTableSchema(*ResolveNamesResult, record.GetFullScheme(), path.back());
+ ResolveNamesResult->ResultSet.back().Path = path;
+
+ return ProceedWithSchema(ctx);
+ }
+
+ void ProceedWithSchema(const TActorContext& ctx) {
+ Y_VERIFY(ResolveNamesResult->ResultSet.size() == 1);
+ const auto& entry = ResolveNamesResult->ResultSet.front();
+ if (entry.Status != NSchemeCache::TSchemeCacheNavigate::EStatus::Ok) {
return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, ToString(entry.Status), ctx);
- }
-
- TString errorMessage;
- if (!CheckAccess(errorMessage)) {
- return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, errorMessage, ctx);
- }
-
- if (!BuildSchema(ctx)) {
- return;
- }
-
- if (!ExtractAllKeys(errorMessage)) {
- return ReplyWithError(Ydb::StatusIds::BAD_REQUEST, errorMessage, ctx);
- }
-
- if (Request->GetProtoRequest()->columns().empty()) {
- return ReplyWithError(Ydb::StatusIds::BAD_REQUEST,
- TStringBuilder() << "Empty column list",
- ctx);
- }
-
- if (ResolveNamesResult->ResultSet.front().TableId.IsSystemView()) {
- return ScanSystemView(ctx);
- } if (TryParseLocalDbPath(ResolveNamesResult->ResultSet.front().Path)) {
- return ScanLocalDbTable(ctx);
- } else {
- return ResolveShards(ctx);
- }
- }
-
- void ScanSystemView(const NActors::TActorContext& ctx) {
+ }
+
+ TString errorMessage;
+ if (!CheckAccess(errorMessage)) {
+ return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, errorMessage, ctx);
+ }
+
+ if (!BuildSchema(ctx)) {
+ return;
+ }
+
+ if (!ExtractAllKeys(errorMessage)) {
+ return ReplyWithError(Ydb::StatusIds::BAD_REQUEST, errorMessage, ctx);
+ }
+
+ if (Request->GetProtoRequest()->columns().empty()) {
+ return ReplyWithError(Ydb::StatusIds::BAD_REQUEST,
+ TStringBuilder() << "Empty column list",
+ ctx);
+ }
+
+ if (ResolveNamesResult->ResultSet.front().TableId.IsSystemView()) {
+ return ScanSystemView(ctx);
+ } if (TryParseLocalDbPath(ResolveNamesResult->ResultSet.front().Path)) {
+ return ScanLocalDbTable(ctx);
+ } else {
+ return ResolveShards(ctx);
+ }
+ }
+
+ void ScanSystemView(const NActors::TActorContext& ctx) {
if (SnapshotId) {
Request->RaiseIssue(
MakeIssue(
@@ -239,195 +239,195 @@ private:
if (auto maxRows = Request->GetProtoRequest()->max_rows(); maxRows && maxRows <= 100)
SysViewMaxRows = maxRows;
-
+
if (auto maxBytes = Request->GetProtoRequest()->max_bytes(); maxBytes && maxBytes <= 10000)
SysViewMaxBytes = maxBytes;
-
- // List of columns requested by user
- TVector<std::pair<TString, NScheme::TTypeId>> valueColumnNamesAndTypes;
-
- // This list of columns will be requested from sys view scan actor
- // It starts with all key columns followed by all the columns requested by user possibly including key columns again
- TSmallVec<NMiniKQL::TKqpComputeContextBase::TColumn> columns;
-
- {
- auto& entry = ResolveNamesResult->ResultSet.front();
- THashMap<TString, ui32> columnsByName;
- for (const auto& ci : entry.Columns) {
- columnsByName[ci.second.Name] = ci.second.Id;
- if (ci.second.KeyOrder != -1) {
- KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), ci.second.KeyOrder + 1));
- KeyColumnTypes[ci.second.KeyOrder] = ci.second.PType;
-
- columns.resize(Max<size_t>(columns.size(), ci.second.KeyOrder + 1));
- columns[ci.second.KeyOrder] = {ci.second.Id, (NScheme::TTypeId)ci.second.PType};
- }
- }
-
- for (TString col : Request->GetProtoRequest()->columns()) {
- auto id = columnsByName.find(col);
- if (id == columnsByName.end()) {
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
- TStringBuilder() << "Unknown column: " << col,
- ctx);
- }
-
- auto ci = entry.Columns.find(id->second);
- columns.push_back({ci->second.Id, (NScheme::TTypeId)ci->second.PType});
-
- valueColumnNamesAndTypes.push_back({ci->second.Name, (NScheme::TTypeId)ci->second.PType});
- ValueColumnTypes.push_back((NScheme::TTypeId)ci->second.PType);
- }
- }
-
- Y_VERIFY_DEBUG(columns.size() == KeyColumnTypes.size() + ValueColumnTypes.size());
-
- {
- TString format = "clickhouse_native";
- BlockBuilder = AppData()->FormatFactory->CreateBlockBuilder(format);
- if (!BlockBuilder) {
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
- TStringBuilder() << "Unsupported block format: " << format.data(),
- ctx);
- }
-
- ui64 rowsPerBlock = 64000;
- ui64 bytesPerBlock = 64000;
-
- TString err;
- if (!BlockBuilder->Start(valueColumnNamesAndTypes, rowsPerBlock, bytesPerBlock, err)) {
- return ReplyWithError(Ydb::StatusIds::BAD_REQUEST,
- TStringBuilder() << "Block format error: " << err.data(),
- ctx);
- }
- }
-
- {
- TTableRange range(MinKey.GetCells(), MinKeyInclusive, MaxKey.GetCells(), MaxKeyInclusive);
- auto tableScanActor = NSysView::CreateSystemViewScan(ctx.SelfID, 0,
- ResolveNamesResult->ResultSet.front().TableId,
- range,
- columns);
-
- if (!tableScanActor) {
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
- TStringBuilder() << "Failed to create system view scan, table id: " << ResolveNamesResult->ResultSet.front().TableId,
- ctx);
- }
-
+
+ // List of columns requested by user
+ TVector<std::pair<TString, NScheme::TTypeId>> valueColumnNamesAndTypes;
+
+ // This list of columns will be requested from sys view scan actor
+ // It starts with all key columns followed by all the columns requested by user possibly including key columns again
+ TSmallVec<NMiniKQL::TKqpComputeContextBase::TColumn> columns;
+
+ {
+ auto& entry = ResolveNamesResult->ResultSet.front();
+ THashMap<TString, ui32> columnsByName;
+ for (const auto& ci : entry.Columns) {
+ columnsByName[ci.second.Name] = ci.second.Id;
+ if (ci.second.KeyOrder != -1) {
+ KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), ci.second.KeyOrder + 1));
+ KeyColumnTypes[ci.second.KeyOrder] = ci.second.PType;
+
+ columns.resize(Max<size_t>(columns.size(), ci.second.KeyOrder + 1));
+ columns[ci.second.KeyOrder] = {ci.second.Id, (NScheme::TTypeId)ci.second.PType};
+ }
+ }
+
+ for (TString col : Request->GetProtoRequest()->columns()) {
+ auto id = columnsByName.find(col);
+ if (id == columnsByName.end()) {
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
+ TStringBuilder() << "Unknown column: " << col,
+ ctx);
+ }
+
+ auto ci = entry.Columns.find(id->second);
+ columns.push_back({ci->second.Id, (NScheme::TTypeId)ci->second.PType});
+
+ valueColumnNamesAndTypes.push_back({ci->second.Name, (NScheme::TTypeId)ci->second.PType});
+ ValueColumnTypes.push_back((NScheme::TTypeId)ci->second.PType);
+ }
+ }
+
+ Y_VERIFY_DEBUG(columns.size() == KeyColumnTypes.size() + ValueColumnTypes.size());
+
+ {
+ TString format = "clickhouse_native";
+ BlockBuilder = AppData()->FormatFactory->CreateBlockBuilder(format);
+ if (!BlockBuilder) {
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
+ TStringBuilder() << "Unsupported block format: " << format.data(),
+ ctx);
+ }
+
+ ui64 rowsPerBlock = 64000;
+ ui64 bytesPerBlock = 64000;
+
+ TString err;
+ if (!BlockBuilder->Start(valueColumnNamesAndTypes, rowsPerBlock, bytesPerBlock, err)) {
+ return ReplyWithError(Ydb::StatusIds::BAD_REQUEST,
+ TStringBuilder() << "Block format error: " << err.data(),
+ ctx);
+ }
+ }
+
+ {
+ TTableRange range(MinKey.GetCells(), MinKeyInclusive, MaxKey.GetCells(), MaxKeyInclusive);
+ auto tableScanActor = NSysView::CreateSystemViewScan(ctx.SelfID, 0,
+ ResolveNamesResult->ResultSet.front().TableId,
+ range,
+ columns);
+
+ if (!tableScanActor) {
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
+ TStringBuilder() << "Failed to create system view scan, table id: " << ResolveNamesResult->ResultSet.front().TableId,
+ ctx);
+ }
+
SysViewScanActor = ctx.Register(tableScanActor.Release());
-
+
auto ackEv = MakeHolder<NKqp::TEvKqpCompute::TEvScanDataAck>(0);
- ctx.Send(SysViewScanActor, ackEv.Release());
- }
-
- TBase::Become(&TThis::StateSysViewScan);
- }
-
- void ScanLocalDbTable(const NActors::TActorContext& ctx) {
- Y_VERIFY(ResolveNamesResult);
-
- ui64 tabletId = -1;
- TString tabletIdStr = ResolveNamesResult->ResultSet.front().Path[2];
- try {
- tabletId = FromString<ui64>(tabletIdStr);
- } catch (...) {
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
- TStringBuilder() << "Invalid tabeltId: " << tabletIdStr,
- ctx);
- }
-
- TString tableName = ResolveNamesResult->ResultSet.front().Path.back();
-
- // Send request to the first shard
- std::unique_ptr<TEvTablet::TEvLocalReadColumns> ev =
- std::make_unique<TEvTablet::TEvLocalReadColumns>();
- ev->Record.SetTableName(tableName);
- for (TString col : Request->GetProtoRequest()->columns()) {
- ev->Record.AddColumns(col);
- }
- ev->Record.SetFromKey(MinKey.GetBuffer());
- ev->Record.SetFromKeyInclusive(MinKeyInclusive);
- ev->Record.SetMaxRows(Request->GetProtoRequest()->max_rows());
- ev->Record.SetMaxBytes(Request->GetProtoRequest()->max_bytes());
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Sending request to tablet " << tabletId);
-
+ ctx.Send(SysViewScanActor, ackEv.Release());
+ }
+
+ TBase::Become(&TThis::StateSysViewScan);
+ }
+
+ void ScanLocalDbTable(const NActors::TActorContext& ctx) {
+ Y_VERIFY(ResolveNamesResult);
+
+ ui64 tabletId = -1;
+ TString tabletIdStr = ResolveNamesResult->ResultSet.front().Path[2];
+ try {
+ tabletId = FromString<ui64>(tabletIdStr);
+ } catch (...) {
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
+ TStringBuilder() << "Invalid tabeltId: " << tabletIdStr,
+ ctx);
+ }
+
+ TString tableName = ResolveNamesResult->ResultSet.front().Path.back();
+
+ // Send request to the first shard
+ std::unique_ptr<TEvTablet::TEvLocalReadColumns> ev =
+ std::make_unique<TEvTablet::TEvLocalReadColumns>();
+ ev->Record.SetTableName(tableName);
+ for (TString col : Request->GetProtoRequest()->columns()) {
+ ev->Record.AddColumns(col);
+ }
+ ev->Record.SetFromKey(MinKey.GetBuffer());
+ ev->Record.SetFromKeyInclusive(MinKeyInclusive);
+ ev->Record.SetMaxRows(Request->GetProtoRequest()->max_rows());
+ ev->Record.SetMaxBytes(Request->GetProtoRequest()->max_bytes());
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Sending request to tablet " << tabletId);
+
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvForward(ev.release(), tabletId, true), IEventHandle::FlagTrackDelivery);
-
- ++ShardRequestCount;
-
- TBase::Become(&TThis::StateWaitResults);
- }
-
- void Handle(NKqp::TEvKqpCompute::TEvScanData::TPtr &ev, const TActorContext &ctx) {
- auto scanId = ev->Get()->ScanId;
- Y_UNUSED(scanId);
-
- size_t keyColumnCount = KeyColumnTypes.size();
-
- TString lastKey;
- size_t rowsExtracted = 0;
- bool skippedBeforeMinKey = false;
+
+ ++ShardRequestCount;
+
+ TBase::Become(&TThis::StateWaitResults);
+ }
+
+ void Handle(NKqp::TEvKqpCompute::TEvScanData::TPtr &ev, const TActorContext &ctx) {
+ auto scanId = ev->Get()->ScanId;
+ Y_UNUSED(scanId);
+
+ size_t keyColumnCount = KeyColumnTypes.size();
+
+ TString lastKey;
+ size_t rowsExtracted = 0;
+ bool skippedBeforeMinKey = false;
if (ev->Get()->GetDataFormat() == NKikimrTxDataShard::ARROW) {
return ReplyWithError(Ydb::StatusIds::INTERNAL_ERROR, "Arrow format not supported yet", ctx);
}
for (auto&& row : ev->Get()->Rows) {
- ++rowsExtracted;
+ ++rowsExtracted;
if (row.size() != keyColumnCount + ValueColumnTypes.size()) {
- return ReplyWithError(Ydb::StatusIds::INTERNAL_ERROR,
- "System view row format doesn't match the schema",
- ctx);
- }
-
+ return ReplyWithError(Ydb::StatusIds::INTERNAL_ERROR,
+ "System view row format doesn't match the schema",
+ ctx);
+ }
+
TDbTupleRef rowKey(KeyColumnTypes.data(), row.data(), keyColumnCount);
-
- if (!skippedBeforeMinKey) {
- int cmp = CompareTypedCellVectors(MinKey.GetCells().data(), rowKey.Cells().data(),
- KeyColumnTypes.data(),
- MinKey.GetCells().size(), rowKey.Cells().size());
-
+
+ if (!skippedBeforeMinKey) {
+ int cmp = CompareTypedCellVectors(MinKey.GetCells().data(), rowKey.Cells().data(),
+ KeyColumnTypes.data(),
+ MinKey.GetCells().size(), rowKey.Cells().size());
+
// Skip rows before MinKey just in case (because currently sys view scan ignores key range)
- if (cmp > 0 || (cmp == 0 && !MinKeyInclusive)) {
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Skipped rows by sys view scan");
- continue;
- } else {
- skippedBeforeMinKey = true;
- }
- }
-
+ if (cmp > 0 || (cmp == 0 && !MinKeyInclusive)) {
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Skipped rows by sys view scan");
+ continue;
+ } else {
+ skippedBeforeMinKey = true;
+ }
+ }
+
TDbTupleRef rowValues(ValueColumnTypes.data(), row.data() + keyColumnCount, row.size() - keyColumnCount);
- BlockBuilder->AddRow(rowKey, rowValues);
- ++SysViewRowsReceived;
-
- if (SysViewRowsReceived >= SysViewMaxRows || BlockBuilder->Bytes() >= SysViewMaxBytes) {
- lastKey = TSerializedCellVec::Serialize(rowKey.Cells());
- break;
- }
- }
-
+ BlockBuilder->AddRow(rowKey, rowValues);
+ ++SysViewRowsReceived;
+
+ if (SysViewRowsReceived >= SysViewMaxRows || BlockBuilder->Bytes() >= SysViewMaxBytes) {
+ lastKey = TSerializedCellVec::Serialize(rowKey.Cells());
+ break;
+ }
+ }
+
auto ackEv = MakeHolder<NKqp::TEvKqpCompute::TEvScanDataAck>(0);
- ctx.Send(ev->Sender, ackEv.Release());
-
- bool done =
- ev->Get()->Finished ||
- SysViewRowsReceived >= SysViewMaxRows ||
- BlockBuilder->Bytes() >= SysViewMaxBytes;
-
- if (done) {
- TString buffer = BlockBuilder->Finish();
- buffer.resize(BlockBuilder->Bytes());
-
- Result.add_blocks(buffer);
- Result.set_last_key(lastKey);
- Result.set_last_key_inclusive(true);
- Result.set_eos(ev->Get()->Finished && rowsExtracted == ev->Get()->Rows.size());
- return ReplySuccess(ctx);
- }
- }
-
+ ctx.Send(ev->Sender, ackEv.Release());
+
+ bool done =
+ ev->Get()->Finished ||
+ SysViewRowsReceived >= SysViewMaxRows ||
+ BlockBuilder->Bytes() >= SysViewMaxBytes;
+
+ if (done) {
+ TString buffer = BlockBuilder->Finish();
+ buffer.resize(BlockBuilder->Bytes());
+
+ Result.add_blocks(buffer);
+ Result.set_last_key(lastKey);
+ Result.set_last_key_inclusive(true);
+ Result.set_eos(ev->Get()->Finished && rowsExtracted == ev->Get()->Rows.size());
+ return ReplySuccess(ctx);
+ }
+ }
+
void Handle(NKqp::TEvKqpCompute::TEvScanError::TPtr& ev, const TActorContext& ctx) {
NYql::TIssues issues;
Ydb::StatusIds::StatusCode status = ev->Get()->Record.GetStatus();
@@ -436,324 +436,324 @@ private:
ReplyWithError(status, issues, ctx);
}
- STFUNC(StateSysViewScan) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NKqp::TEvKqpCompute::TEvScanData, Handle);
+ STFUNC(StateSysViewScan) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(NKqp::TEvKqpCompute::TEvScanData, Handle);
HFunc(NKqp::TEvKqpCompute::TEvScanError, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- bool CheckAccess(TString& errorMessage) {
- if (Request->GetInternalToken().empty())
- return true;
-
- NACLib::TUserToken userToken(Request->GetInternalToken());
-
- const ui32 access = NACLib::EAccessRights::SelectRow;
- for (const NSchemeCache::TSchemeCacheNavigate::TEntry& entry : ResolveNamesResult->ResultSet) {
- if (access != 0 && entry.SecurityObject != nullptr &&
- !entry.SecurityObject->CheckAccess(access, userToken))
- {
- TStringStream explanation;
- explanation << "Access denied for " << userToken.GetUserSID()
- << " with access " << NACLib::AccessRightsToString(access)
- << " to table [" << Request->GetProtoRequest()->Gettable() << "]";
-
- errorMessage = explanation.Str();
- return false;
- }
- }
- return true;
- }
-
- bool BuildSchema(const NActors::TActorContext& ctx) {
- Y_UNUSED(ctx);
-
- auto& entry = ResolveNamesResult->ResultSet.front();
-
- TVector<ui32> keyColumnIds;
- THashMap<TString, ui32> columnByName;
- for (const auto& ci : entry.Columns) {
- columnByName[ci.second.Name] = ci.second.Id;
- i32 keyOrder = ci.second.KeyOrder;
- if (keyOrder != -1) {
- Y_VERIFY(keyOrder >= 0);
- KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), keyOrder + 1));
- KeyColumnTypes[keyOrder] = ci.second.PType;
- keyColumnIds.resize(Max<size_t>(keyColumnIds.size(), keyOrder + 1));
- keyColumnIds[keyOrder] = ci.second.Id;
- }
- }
-
- KeyColumnPositions.resize(KeyColumnTypes.size());
-
- return true;
- }
-
- bool CheckCellSizes(const TConstArrayRef<TCell>& cells, const TConstArrayRef<NScheme::TTypeId>& types) {
- if (cells.size() > types.size())
- return false;
-
- for (size_t i = 0; i < cells.size(); ++i) {
- if (!cells[i].IsNull() &&
- NScheme::GetFixedSize(types[i]) != 0 &&
- NScheme::GetFixedSize(types[i]) != cells[i].Size())
- {
- return false;
- }
- }
-
- return true;
- }
-
- bool ExtractAllKeys(TString& errorMessage) {
- if (!Request->GetProtoRequest()->from_key().empty()) {
- if (!TSerializedCellVec::TryParse(Request->GetProtoRequest()->from_key(), MinKey) ||
- !CheckCellSizes(MinKey.GetCells(), KeyColumnTypes))
- {
- errorMessage = "Invalid from key";
- return false;
- }
- MinKeyInclusive = Request->GetProtoRequest()->from_key_inclusive();
- } else {
- TVector<TCell> allNulls(KeyColumnTypes.size());
- MinKey.Parse(TSerializedCellVec::Serialize(allNulls));
- MinKeyInclusive = true;
- }
-
- if (!Request->GetProtoRequest()->to_key().empty()) {
- if (!TSerializedCellVec::TryParse(Request->GetProtoRequest()->to_key(), MaxKey) ||
- !CheckCellSizes(MaxKey.GetCells(), KeyColumnTypes))
- {
- errorMessage = "Invalid to key";
- return false;
- }
- MaxKeyInclusive = Request->GetProtoRequest()->to_key_inclusive();
- } else {
- TVector<TCell> infinity;
- MaxKey.Parse(TSerializedCellVec::Serialize(infinity));
- MaxKeyInclusive = false;
- }
-
- return true;
- }
-
- void ResolveShards(const NActors::TActorContext& ctx) {
- auto& entry = ResolveNamesResult->ResultSet.front();
-
- // We are going to set all columns
- TVector<TKeyDesc::TColumnOp> columns;
- for (const auto& ci : entry.Columns) {
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ bool CheckAccess(TString& errorMessage) {
+ if (Request->GetInternalToken().empty())
+ return true;
+
+ NACLib::TUserToken userToken(Request->GetInternalToken());
+
+ const ui32 access = NACLib::EAccessRights::SelectRow;
+ for (const NSchemeCache::TSchemeCacheNavigate::TEntry& entry : ResolveNamesResult->ResultSet) {
+ if (access != 0 && entry.SecurityObject != nullptr &&
+ !entry.SecurityObject->CheckAccess(access, userToken))
+ {
+ TStringStream explanation;
+ explanation << "Access denied for " << userToken.GetUserSID()
+ << " with access " << NACLib::AccessRightsToString(access)
+ << " to table [" << Request->GetProtoRequest()->Gettable() << "]";
+
+ errorMessage = explanation.Str();
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool BuildSchema(const NActors::TActorContext& ctx) {
+ Y_UNUSED(ctx);
+
+ auto& entry = ResolveNamesResult->ResultSet.front();
+
+ TVector<ui32> keyColumnIds;
+ THashMap<TString, ui32> columnByName;
+ for (const auto& ci : entry.Columns) {
+ columnByName[ci.second.Name] = ci.second.Id;
+ i32 keyOrder = ci.second.KeyOrder;
+ if (keyOrder != -1) {
+ Y_VERIFY(keyOrder >= 0);
+ KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), keyOrder + 1));
+ KeyColumnTypes[keyOrder] = ci.second.PType;
+ keyColumnIds.resize(Max<size_t>(keyColumnIds.size(), keyOrder + 1));
+ keyColumnIds[keyOrder] = ci.second.Id;
+ }
+ }
+
+ KeyColumnPositions.resize(KeyColumnTypes.size());
+
+ return true;
+ }
+
+ bool CheckCellSizes(const TConstArrayRef<TCell>& cells, const TConstArrayRef<NScheme::TTypeId>& types) {
+ if (cells.size() > types.size())
+ return false;
+
+ for (size_t i = 0; i < cells.size(); ++i) {
+ if (!cells[i].IsNull() &&
+ NScheme::GetFixedSize(types[i]) != 0 &&
+ NScheme::GetFixedSize(types[i]) != cells[i].Size())
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool ExtractAllKeys(TString& errorMessage) {
+ if (!Request->GetProtoRequest()->from_key().empty()) {
+ if (!TSerializedCellVec::TryParse(Request->GetProtoRequest()->from_key(), MinKey) ||
+ !CheckCellSizes(MinKey.GetCells(), KeyColumnTypes))
+ {
+ errorMessage = "Invalid from key";
+ return false;
+ }
+ MinKeyInclusive = Request->GetProtoRequest()->from_key_inclusive();
+ } else {
+ TVector<TCell> allNulls(KeyColumnTypes.size());
+ MinKey.Parse(TSerializedCellVec::Serialize(allNulls));
+ MinKeyInclusive = true;
+ }
+
+ if (!Request->GetProtoRequest()->to_key().empty()) {
+ if (!TSerializedCellVec::TryParse(Request->GetProtoRequest()->to_key(), MaxKey) ||
+ !CheckCellSizes(MaxKey.GetCells(), KeyColumnTypes))
+ {
+ errorMessage = "Invalid to key";
+ return false;
+ }
+ MaxKeyInclusive = Request->GetProtoRequest()->to_key_inclusive();
+ } else {
+ TVector<TCell> infinity;
+ MaxKey.Parse(TSerializedCellVec::Serialize(infinity));
+ MaxKeyInclusive = false;
+ }
+
+ return true;
+ }
+
+ void ResolveShards(const NActors::TActorContext& ctx) {
+ auto& entry = ResolveNamesResult->ResultSet.front();
+
+ // We are going to set all columns
+ TVector<TKeyDesc::TColumnOp> columns;
+ for (const auto& ci : entry.Columns) {
TKeyDesc::TColumnOp op = { ci.second.Id, TKeyDesc::EColumnOperation::Set, ci.second.PType, 0, 0 };
- columns.push_back(op);
- }
-
- // Set MaxKey = MinKey to touch only 1 shard in request
- TTableRange range(MinKey.GetCells(), true, MinKey.GetCells(), true, false);
- KeyRange.Reset(new TKeyDesc(entry.TableId, range, TKeyDesc::ERowOperation::Read, KeyColumnTypes, columns));
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Resolving range: "
- << " fromKey: " << PrintKey(MinKey.GetBuffer(), *AppData(ctx)->TypeRegistry)
- << " fromInclusive: " << true);
-
- TAutoPtr<NSchemeCache::TSchemeCacheRequest> request(new NSchemeCache::TSchemeCacheRequest());
-
+ columns.push_back(op);
+ }
+
+ // Set MaxKey = MinKey to touch only 1 shard in request
+ TTableRange range(MinKey.GetCells(), true, MinKey.GetCells(), true, false);
+ KeyRange.Reset(new TKeyDesc(entry.TableId, range, TKeyDesc::ERowOperation::Read, KeyColumnTypes, columns));
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Resolving range: "
+ << " fromKey: " << PrintKey(MinKey.GetBuffer(), *AppData(ctx)->TypeRegistry)
+ << " fromInclusive: " << true);
+
+ TAutoPtr<NSchemeCache::TSchemeCacheRequest> request(new NSchemeCache::TSchemeCacheRequest());
+
request->ResultSet.emplace_back(std::move(KeyRange));
-
- TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> resolveReq(new TEvTxProxySchemeCache::TEvResolveKeySet(request));
- ctx.Send(SchemeCache, resolveReq.Release());
-
- TBase::Become(&TThis::StateWaitResolveShards);
- WaitingResolveReply = true;
- }
-
- STFUNC(StateWaitResolveShards) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTxProxySchemeCache::TEvResolveKeySetResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, const TActorContext &ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- TEvTxProxySchemeCache::TEvResolveKeySetResult *msg = ev->Get();
+
+ TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> resolveReq(new TEvTxProxySchemeCache::TEvResolveKeySet(request));
+ ctx.Send(SchemeCache, resolveReq.Release());
+
+ TBase::Become(&TThis::StateWaitResolveShards);
+ WaitingResolveReply = true;
+ }
+
+ STFUNC(StateWaitResolveShards) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTxProxySchemeCache::TEvResolveKeySetResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, const TActorContext &ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ TEvTxProxySchemeCache::TEvResolveKeySetResult *msg = ev->Get();
Y_VERIFY(msg->Request->ResultSet.size() == 1);
KeyRange = std::move(msg->Request->ResultSet[0].KeyDescription);
-
+
if (msg->Request->ErrorCount > 0) {
- return ReplyWithError(Ydb::StatusIds::NOT_FOUND, Sprintf("Unknown table '%s'", Request->GetProtoRequest()->Gettable().data()), ctx);
- }
-
- auto getShardsString = [] (const TVector<TKeyDesc::TPartitionInfo>& partitions) {
- TVector<ui64> shards;
- shards.reserve(partitions.size());
- for (auto& partition : partitions) {
- shards.push_back(partition.ShardId);
- }
-
- return JoinVectorIntoString(shards, ", ");
- };
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Range shards: " << getShardsString(KeyRange->Partitions));
-
- MakeShardRequests(ctx);
- }
-
- void MakeShardRequests(const NActors::TActorContext& ctx) {
- Y_VERIFY(!KeyRange->Partitions.empty());
-
- // Send request to the first shard
- std::unique_ptr<TEvDataShard::TEvReadColumnsRequest> ev =
- std::make_unique<TEvDataShard::TEvReadColumnsRequest>();
+ return ReplyWithError(Ydb::StatusIds::NOT_FOUND, Sprintf("Unknown table '%s'", Request->GetProtoRequest()->Gettable().data()), ctx);
+ }
+
+ auto getShardsString = [] (const TVector<TKeyDesc::TPartitionInfo>& partitions) {
+ TVector<ui64> shards;
+ shards.reserve(partitions.size());
+ for (auto& partition : partitions) {
+ shards.push_back(partition.ShardId);
+ }
+
+ return JoinVectorIntoString(shards, ", ");
+ };
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Range shards: " << getShardsString(KeyRange->Partitions));
+
+ MakeShardRequests(ctx);
+ }
+
+ void MakeShardRequests(const NActors::TActorContext& ctx) {
+ Y_VERIFY(!KeyRange->Partitions.empty());
+
+ // Send request to the first shard
+ std::unique_ptr<TEvDataShard::TEvReadColumnsRequest> ev =
+ std::make_unique<TEvDataShard::TEvReadColumnsRequest>();
ev->Record.SetTableId(KeyRange->TableId.PathId.LocalPathId);
- for (TString col : Request->GetProtoRequest()->columns()) {
- ev->Record.AddColumns(col);
- }
- ev->Record.SetFromKey(MinKey.GetBuffer());
- ev->Record.SetFromKeyInclusive(MinKeyInclusive);
- ev->Record.SetMaxRows(Request->GetProtoRequest()->max_rows());
- ev->Record.SetMaxBytes(Request->GetProtoRequest()->max_bytes());
+ for (TString col : Request->GetProtoRequest()->columns()) {
+ ev->Record.AddColumns(col);
+ }
+ ev->Record.SetFromKey(MinKey.GetBuffer());
+ ev->Record.SetFromKeyInclusive(MinKeyInclusive);
+ ev->Record.SetMaxRows(Request->GetProtoRequest()->max_rows());
+ ev->Record.SetMaxBytes(Request->GetProtoRequest()->max_bytes());
if (SnapshotId) {
ev->Record.SetSnapshotStep(SnapshotId.Step);
ev->Record.SetSnapshotTxId(SnapshotId.TxId);
}
-
- ui64 shardId = KeyRange->Partitions[0].ShardId;
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Sending request to shards " << shardId);
-
+
+ ui64 shardId = KeyRange->Partitions[0].ShardId;
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Sending request to shards " << shardId);
+
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvForward(ev.release(), shardId, true), IEventHandle::FlagTrackDelivery);
-
- ++ShardRequestCount;
-
- TBase::Become(&TThis::StateWaitResults);
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr &ev, const TActorContext &ctx) {
- Y_UNUSED(ev);
- ReplyWithError(Ydb::StatusIds::INTERNAL_ERROR, "Internal error: pipe cache is not available, the cluster might not be configured properly", ctx);
- }
-
- void Handle(TEvPipeCache::TEvDeliveryProblem::TPtr &ev, const TActorContext &ctx) {
- ReplyWithError(Ydb::StatusIds::UNAVAILABLE, Sprintf("Failed to connect to shard %lu", ev->Get()->TabletId), ctx);
- }
-
- STFUNC(StateWaitResults) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvDataShard::TEvReadColumnsResponse, Handle);
- HFunc(TEvTablet::TEvLocalReadColumnsResponse, Handle);
- HFunc(TEvents::TEvUndelivered, Handle);
- HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
-
- TString PrintKey(const TString& serialized, const NScheme::TTypeRegistry& typeRegistry) {
- TSerializedCellVec lastKeyCells(serialized);
- TDbTupleRef lastKeyTuple(KeyColumnTypes.data(), lastKeyCells.GetCells().data(), std::min(KeyColumnTypes.size(), lastKeyCells.GetCells().size()));
- return DbgPrintTuple(lastKeyTuple, typeRegistry);
- }
-
- void Handle(TEvDataShard::TEvReadColumnsResponse::TPtr& ev, const NActors::TActorContext& ctx) {
- const auto& shardResponse = ev->Get()->Record;
-
- // Notify the cache that we are done with the pipe
+
+ ++ShardRequestCount;
+
+ TBase::Become(&TThis::StateWaitResults);
+ }
+
+ void Handle(TEvents::TEvUndelivered::TPtr &ev, const TActorContext &ctx) {
+ Y_UNUSED(ev);
+ ReplyWithError(Ydb::StatusIds::INTERNAL_ERROR, "Internal error: pipe cache is not available, the cluster might not be configured properly", ctx);
+ }
+
+ void Handle(TEvPipeCache::TEvDeliveryProblem::TPtr &ev, const TActorContext &ctx) {
+ ReplyWithError(Ydb::StatusIds::UNAVAILABLE, Sprintf("Failed to connect to shard %lu", ev->Get()->TabletId), ctx);
+ }
+
+ STFUNC(StateWaitResults) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvDataShard::TEvReadColumnsResponse, Handle);
+ HFunc(TEvTablet::TEvLocalReadColumnsResponse, Handle);
+ HFunc(TEvents::TEvUndelivered, Handle);
+ HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+
+ TString PrintKey(const TString& serialized, const NScheme::TTypeRegistry& typeRegistry) {
+ TSerializedCellVec lastKeyCells(serialized);
+ TDbTupleRef lastKeyTuple(KeyColumnTypes.data(), lastKeyCells.GetCells().data(), std::min(KeyColumnTypes.size(), lastKeyCells.GetCells().size()));
+ return DbgPrintTuple(lastKeyTuple, typeRegistry);
+ }
+
+ void Handle(TEvDataShard::TEvReadColumnsResponse::TPtr& ev, const NActors::TActorContext& ctx) {
+ const auto& shardResponse = ev->Get()->Record;
+
+ // Notify the cache that we are done with the pipe
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(shardResponse.GetTabletID()));
-
- if (shardResponse.GetStatus() != NKikimrTxDataShard::TError::OK) {
- StatusIds::StatusCode status = Ydb::StatusIds::GENERIC_ERROR;
- switch (shardResponse.GetStatus()) {
- case NKikimrTxDataShard::TError::WRONG_SHARD_STATE:
- status = Ydb::StatusIds::UNAVAILABLE;
- break;
- case NKikimrTxDataShard::TError::BAD_ARGUMENT:
- status = Ydb::StatusIds::BAD_REQUEST;
- break;
- case NKikimrTxDataShard::TError::SCHEME_ERROR:
- status = Ydb::StatusIds::SCHEME_ERROR;
- break;
+
+ if (shardResponse.GetStatus() != NKikimrTxDataShard::TError::OK) {
+ StatusIds::StatusCode status = Ydb::StatusIds::GENERIC_ERROR;
+ switch (shardResponse.GetStatus()) {
+ case NKikimrTxDataShard::TError::WRONG_SHARD_STATE:
+ status = Ydb::StatusIds::UNAVAILABLE;
+ break;
+ case NKikimrTxDataShard::TError::BAD_ARGUMENT:
+ status = Ydb::StatusIds::BAD_REQUEST;
+ break;
+ case NKikimrTxDataShard::TError::SCHEME_ERROR:
+ status = Ydb::StatusIds::SCHEME_ERROR;
+ break;
case NKikimrTxDataShard::TError::SNAPSHOT_NOT_EXIST:
status = Ydb::StatusIds::NOT_FOUND;
break;
- }
-
- ReplyWithError(status, shardResponse.GetErrorDescription(), ctx);
- return;
- }
-
- ++ShardReplyCount;
-
- Result.add_blocks(shardResponse.GetBlocks());
- Result.set_last_key(shardResponse.GetLastKey());
- Result.set_last_key_inclusive(shardResponse.GetLastKeyInclusive());
- Result.set_eos(shardResponse.GetLastKey().empty()); // TODO: ??
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Got reply from shard: " << shardResponse.GetTabletID()
- << " lastKey: " << PrintKey(shardResponse.GetLastKey(), *AppData(ctx)->TypeRegistry)
- << " inclusive: " << shardResponse.GetLastKeyInclusive());
-
- if (ShardReplyCount == ShardRequestCount)
- ReplySuccess(ctx);
- }
-
- void Handle(TEvTablet::TEvLocalReadColumnsResponse::TPtr& ev, const NActors::TActorContext& ctx) {
- const auto& shardResponse = ev->Get()->Record;
-
- // Notify the cache that we are done with the pipe
+ }
+
+ ReplyWithError(status, shardResponse.GetErrorDescription(), ctx);
+ return;
+ }
+
+ ++ShardReplyCount;
+
+ Result.add_blocks(shardResponse.GetBlocks());
+ Result.set_last_key(shardResponse.GetLastKey());
+ Result.set_last_key_inclusive(shardResponse.GetLastKeyInclusive());
+ Result.set_eos(shardResponse.GetLastKey().empty()); // TODO: ??
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Got reply from shard: " << shardResponse.GetTabletID()
+ << " lastKey: " << PrintKey(shardResponse.GetLastKey(), *AppData(ctx)->TypeRegistry)
+ << " inclusive: " << shardResponse.GetLastKeyInclusive());
+
+ if (ShardReplyCount == ShardRequestCount)
+ ReplySuccess(ctx);
+ }
+
+ void Handle(TEvTablet::TEvLocalReadColumnsResponse::TPtr& ev, const NActors::TActorContext& ctx) {
+ const auto& shardResponse = ev->Get()->Record;
+
+ // Notify the cache that we are done with the pipe
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(shardResponse.GetTabletID()));
-
- if (shardResponse.GetStatus() != Ydb::StatusIds::SUCCESS) {
- ReplyWithError((StatusIds::StatusCode)shardResponse.GetStatus(), shardResponse.GetErrorDescription(), ctx);
- return;
- }
-
- ++ShardReplyCount;
-
- Result.add_blocks(shardResponse.GetBlocks());
- Result.set_last_key(shardResponse.GetLastKey());
- Result.set_last_key_inclusive(shardResponse.GetLastKeyInclusive());
- Result.set_eos(shardResponse.GetLastKey().empty()); // TODO: ??
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Got reply from shard: " << shardResponse.GetTabletID()
- << " lastKey: " << PrintKey(shardResponse.GetLastKey(), *AppData(ctx)->TypeRegistry)
- << " inclusive: " << shardResponse.GetLastKeyInclusive());
-
- if (ShardReplyCount == ShardRequestCount)
- ReplySuccess(ctx);
- }
-
- void ReplySuccess(const NActors::TActorContext& ctx) {
- Finished = true;
- ReplyWithResult(Ydb::StatusIds::SUCCESS, Result, ctx);
- }
-
- void ReplyWithError(StatusIds::StatusCode status, const TString& message, const TActorContext& ctx) {
- Finished = true;
- Request->RaiseIssue(NYql::TIssue(message));
+
+ if (shardResponse.GetStatus() != Ydb::StatusIds::SUCCESS) {
+ ReplyWithError((StatusIds::StatusCode)shardResponse.GetStatus(), shardResponse.GetErrorDescription(), ctx);
+ return;
+ }
+
+ ++ShardReplyCount;
+
+ Result.add_blocks(shardResponse.GetBlocks());
+ Result.set_last_key(shardResponse.GetLastKey());
+ Result.set_last_key_inclusive(shardResponse.GetLastKeyInclusive());
+ Result.set_eos(shardResponse.GetLastKey().empty()); // TODO: ??
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Got reply from shard: " << shardResponse.GetTabletID()
+ << " lastKey: " << PrintKey(shardResponse.GetLastKey(), *AppData(ctx)->TypeRegistry)
+ << " inclusive: " << shardResponse.GetLastKeyInclusive());
+
+ if (ShardReplyCount == ShardRequestCount)
+ ReplySuccess(ctx);
+ }
+
+ void ReplySuccess(const NActors::TActorContext& ctx) {
+ Finished = true;
+ ReplyWithResult(Ydb::StatusIds::SUCCESS, Result, ctx);
+ }
+
+ void ReplyWithError(StatusIds::StatusCode status, const TString& message, const TActorContext& ctx) {
+ Finished = true;
+ Request->RaiseIssue(NYql::TIssue(message));
Request->ReplyWithYdbStatus(status);
-
- // We cannot Die() while scheme cache request is in flight because that request has pointer to
- // KeyRange member so we must not destroy it before we get the response
- if (!WaitingResolveReply) {
- Die(ctx);
- }
- }
-
+
+ // We cannot Die() while scheme cache request is in flight because that request has pointer to
+ // KeyRange member so we must not destroy it before we get the response
+ if (!WaitingResolveReply) {
+ Die(ctx);
+ }
+ }
+
void ReplyWithError(StatusIds::StatusCode status, const NYql::TIssues& issues, const TActorContext& ctx) {
Finished = true;
Request->RaiseIssues(issues);
@@ -764,19 +764,19 @@ private:
}
}
- void ReplyWithResult(StatusIds::StatusCode status,
- const Ydb::ClickhouseInternal::ScanResult& result,
- const TActorContext& ctx) {
- Request->SendResult(result, status);
- if (!WaitingResolveReply) {
- Die(ctx);
- }
- }
-};
-
-void TGRpcRequestProxy::Handle(TEvReadColumnsRequest::TPtr& ev, const TActorContext& ctx) {
- ctx.Register(new TReadColumnsRPC(ev->Release().Release()));
-}
-
-} // namespace NKikimr
-} // namespace NGRpcService
+ void ReplyWithResult(StatusIds::StatusCode status,
+ const Ydb::ClickhouseInternal::ScanResult& result,
+ const TActorContext& ctx) {
+ Request->SendResult(result, status);
+ if (!WaitingResolveReply) {
+ Die(ctx);
+ }
+ }
+};
+
+void TGRpcRequestProxy::Handle(TEvReadColumnsRequest::TPtr& ev, const TActorContext& ctx) {
+ ctx.Register(new TReadColumnsRPC(ev->Release().Release()));
+}
+
+} // namespace NKikimr
+} // namespace NGRpcService
diff --git a/ydb/core/grpc_services/rpc_read_table.cpp b/ydb/core/grpc_services/rpc_read_table.cpp
index 6db9c43c583..bbb87907289 100644
--- a/ydb/core/grpc_services/rpc_read_table.cpp
+++ b/ydb/core/grpc_services/rpc_read_table.cpp
@@ -105,8 +105,8 @@ class TReadTableRPC : public TActorBootstrapped<TReadTableRPC> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::GRPC_STREAM_REQ;
- }
-
+ }
+
TReadTableRPC(TEvReadTableRequest* msg)
: Request_(msg)
, QuotaLimit_(10)
diff --git a/ydb/core/grpc_services/rpc_s3_listing.cpp b/ydb/core/grpc_services/rpc_s3_listing.cpp
index 470e4f50e80..db2f23ac652 100644
--- a/ydb/core/grpc_services/rpc_s3_listing.cpp
+++ b/ydb/core/grpc_services/rpc_s3_listing.cpp
@@ -1,14 +1,14 @@
-#include "grpc_request_proxy.h"
-#include "rpc_calls.h"
-
-namespace NKikimr {
-namespace NGRpcService {
-
-IActor* CreateGrpcS3ListingRequest(TAutoPtr<TEvS3ListingRequest> request);
-
-void TGRpcRequestProxy::Handle(TEvS3ListingRequest::TPtr& ev, const TActorContext& ctx) {
- ctx.Register(CreateGrpcS3ListingRequest(ev->Release().Release()));
-}
-
-} // namespace NKikimr
-} // namespace NGRpcService
+#include "grpc_request_proxy.h"
+#include "rpc_calls.h"
+
+namespace NKikimr {
+namespace NGRpcService {
+
+IActor* CreateGrpcS3ListingRequest(TAutoPtr<TEvS3ListingRequest> request);
+
+void TGRpcRequestProxy::Handle(TEvS3ListingRequest::TPtr& ev, const TActorContext& ctx) {
+ ctx.Register(CreateGrpcS3ListingRequest(ev->Release().Release()));
+}
+
+} // namespace NKikimr
+} // namespace NGRpcService
diff --git a/ydb/core/grpc_services/ya.make b/ydb/core/grpc_services/ya.make
index c8224601564..05b156bf043 100644
--- a/ydb/core/grpc_services/ya.make
+++ b/ydb/core/grpc_services/ya.make
@@ -8,12 +8,12 @@ OWNER(
SRCS(
grpc_endpoint_publish_actor.cpp
grpc_helper.cpp
- grpc_mon.cpp
+ grpc_mon.cpp
grpc_publisher_service_actor.cpp
grpc_request_proxy.cpp
local_rate_limiter.cpp
operation_helpers.cpp
- resolve_local_db_table.cpp
+ resolve_local_db_table.cpp
rpc_alter_coordination_node.cpp
rpc_alter_table.cpp
rpc_begin_transaction.cpp
@@ -42,29 +42,29 @@ SRCS(
rpc_explain_data_query.cpp
rpc_forget_operation.cpp
rpc_get_operation.cpp
- rpc_get_shard_locations.cpp
+ rpc_get_shard_locations.cpp
rpc_import.cpp
rpc_import_data.cpp
rpc_keep_alive.cpp
- rpc_kh_describe.cpp
+ rpc_kh_describe.cpp
rpc_kh_snapshots.cpp
rpc_kqp_base.cpp
rpc_list_operations.cpp
rpc_login.cpp
- rpc_load_rows.cpp
- rpc_log_store.cpp
+ rpc_load_rows.cpp
+ rpc_log_store.cpp
rpc_long_tx.cpp
rpc_make_directory.cpp
rpc_modify_permissions.cpp
rpc_monitoring.cpp
rpc_prepare_data_query.cpp
rpc_rate_limiter_api.cpp
- rpc_read_columns.cpp
+ rpc_read_columns.cpp
rpc_read_table.cpp
rpc_remove_directory.cpp
rpc_rename_tables.cpp
rpc_rollback_transaction.cpp
- rpc_s3_listing.cpp
+ rpc_s3_listing.cpp
rpc_scheme_base.cpp
rpc_stream_execute_scan_query.cpp
rpc_stream_execute_yql_script.cpp
diff --git a/ydb/core/grpc_streaming/grpc_streaming.h b/ydb/core/grpc_streaming/grpc_streaming.h
index e8b4538143a..ecac6fcf171 100644
--- a/ydb/core/grpc_streaming/grpc_streaming.h
+++ b/ydb/core/grpc_streaming/grpc_streaming.h
@@ -246,7 +246,7 @@ private:
Flags |= FlagRegistered;
- if (IncRequest()) {
+ if (IncRequest()) {
if (Counters) {
Counters->StartProcessing(0);
}
@@ -591,7 +591,7 @@ private:
if (Counters) {
Counters->FinishProcessing(0, 0, Status->ok(), 0, TDuration::Seconds(RequestTimer.Passed()));
}
- DecRequest();
+ DecRequest();
flags = Flags.load(std::memory_order_acquire);
}
@@ -610,20 +610,20 @@ private:
}
private:
- bool IncRequest() {
- if (Limiter) {
- return Limiter->IncRequest();
- }
- return true;
- }
-
- void DecRequest() {
- if (Limiter) {
- Limiter->DecRequest();
- }
- }
-
-private:
+ bool IncRequest() {
+ if (Limiter) {
+ return Limiter->IncRequest();
+ }
+ return true;
+ }
+
+ void DecRequest() {
+ if (Limiter) {
+ Limiter->DecRequest();
+ }
+ }
+
+private:
class TFacade : public IContext {
public:
explicit TFacade(TSelf* self)
diff --git a/ydb/core/kesus/proxy/proxy.cpp b/ydb/core/kesus/proxy/proxy.cpp
index 5efc999fa2a..9633816e60a 100644
--- a/ydb/core/kesus/proxy/proxy.cpp
+++ b/ydb/core/kesus/proxy/proxy.cpp
@@ -242,8 +242,8 @@ public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::KESUS_RESOLVE_ACTOR;
- }
-
+ }
+
private:
void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev) {
const auto& ctx = TActivationContext::AsActorContext();
diff --git a/ydb/core/keyvalue/keyvalue_flat_impl.h b/ydb/core/keyvalue/keyvalue_flat_impl.h
index a66c03443bb..292f2feff93 100644
--- a/ydb/core/keyvalue/keyvalue_flat_impl.h
+++ b/ydb/core/keyvalue/keyvalue_flat_impl.h
@@ -61,7 +61,7 @@ protected:
alter.AddColumn(TABLE_ID, "key", KEY_TAG, NScheme::TSmallBoundedString::TypeId, false);
alter.AddColumnToKey(TABLE_ID, KEY_TAG);
alter.AddColumn(TABLE_ID, "value", VALUE_TAG, NScheme::TString::TypeId, false);
- // Init log batching settings
+ // Init log batching settings
alter.SetExecutorAllowLogBatching(true);
alter.SetExecutorLogFlushPeriod(TDuration::MicroSeconds(500));
Self.State.Clear();
@@ -467,7 +467,7 @@ public:
STFUNC(StateWork) {
if (HandleHook(ev, ctx))
return;
- RestoreActorActivity();
+ RestoreActorActivity();
switch (ev->GetTypeRewrite()) {
hFunc(TEvKeyValue::TEvRead, Handle);
hFunc(TEvKeyValue::TEvReadRange, Handle);
diff --git a/ydb/core/keyvalue/keyvalue_state.cpp b/ydb/core/keyvalue/keyvalue_state.cpp
index 0b9a25aa492..6b31c463f83 100644
--- a/ydb/core/keyvalue/keyvalue_state.cpp
+++ b/ydb/core/keyvalue/keyvalue_state.cpp
@@ -13,7 +13,7 @@
#include <ydb/core/util/stlog.h>
#include <library/cpp/monlib/service/pages/templates.h>
#include <library/cpp/json/writer/json_value.h>
-#include <util/string/escape.h>
+#include <util/string/escape.h>
#include <util/charset/utf8.h>
// Set to 1 in order for tablet to reboot instead of failing a Y_VERIFY on database damage
@@ -1041,10 +1041,10 @@ void TKeyValueState::ProcessCmd(TIntermediate::TWrite &request,
} else {
// all blobs from the same write must be within the same channel
Y_VERIFY(channel == (int)logoBlobId.Channel());
- }
+ }
}
storage_channel = channel + MainStorageChannelInPublicApi;
-
+
ctx.Send(ChannelBalancerActorId, new TChannelBalancer::TEvReportWriteLatency(channel, request.Latency));
}
diff --git a/ydb/core/keyvalue/keyvalue_ut.cpp b/ydb/core/keyvalue/keyvalue_ut.cpp
index a43d13d3fbf..4ae1fcf56a6 100644
--- a/ydb/core/keyvalue/keyvalue_ut.cpp
+++ b/ydb/core/keyvalue/keyvalue_ut.cpp
@@ -1318,34 +1318,34 @@ Y_UNIT_TEST(TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi) {
Y_UNIT_TEST(TestEmptyWriteReadDeleteWithRestartsThenResponseOk) {
- TTestContext tc;
- RunTestWithReboots(tc.TabletIds, [&]() {
- return tc.InitialEventsFilter.Prepare();
+ TTestContext tc;
+ RunTestWithReboots(tc.TabletIds, [&]() {
+ return tc.InitialEventsFilter.Prepare();
}, [&](const TString &dispatchName, std::function<void(TTestActorRuntime&)> setup, bool &activeZone) {
- TFinalizer finalizer(tc);
- tc.Prepare(dispatchName, setup, activeZone);
+ TFinalizer finalizer(tc);
+ tc.Prepare(dispatchName, setup, activeZone);
CmdWrite("key", "", NKikimrClient::TKeyValueRequest::MAIN,
NKikimrClient::TKeyValueRequest::REALTIME, tc);
CmdRead({"key"}, NKikimrClient::TKeyValueRequest::REALTIME,
- {""}, {}, tc);
-
+ {""}, {}, tc);
+
TDeque<TString> expectedKeys;
TDeque<TString> expectedValues;
- expectedKeys.push_back("key");
- expectedValues.push_back("");
+ expectedKeys.push_back("key");
+ expectedValues.push_back("");
{
TDesiredPair<TEvKeyValue::TEvRequest> dp;
AddCmdReadRange("a", true, "z", false, true, Max<ui64>(), NKikimrClient::TKeyValueRequest::REALTIME,
expectedKeys, expectedValues, NKikimrProto::OK, tc, dp);
RunRequest(dp, tc, __LINE__);
}
-
- CmdDeleteRange("key", true, "key", true, tc);
+
+ CmdDeleteRange("key", true, "key", true, tc);
CmdRead({"key"}, NKikimrClient::TKeyValueRequest::REALTIME,
- {""}, {true}, tc);
- });
-}
-
+ {""}, {true}, tc);
+ });
+}
+
Y_UNIT_TEST(TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi) {
TTestContext tc;
diff --git a/ydb/core/kqp/common/kqp_resolve.h b/ydb/core/kqp/common/kqp_resolve.h
index 5c987cb3845..517d1bc451d 100644
--- a/ydb/core/kqp/common/kqp_resolve.h
+++ b/ydb/core/kqp/common/kqp_resolve.h
@@ -12,13 +12,13 @@
namespace NKikimr {
namespace NKqp {
-enum class ETableKind {
- Unknown = 0,
- Datashard,
- SysView,
- Olap
-};
-
+enum class ETableKind {
+ Unknown = 0,
+ Datashard,
+ SysView,
+ Olap
+};
+
class TKqpTableKeys {
public:
struct TColumn {
@@ -31,7 +31,7 @@ public:
TMap<TString, TColumn> Columns;
TVector<TString> KeyColumns;
TVector<NUdf::TDataTypeId> KeyColumnTypes;
- ETableKind TableKind = ETableKind::Unknown;
+ ETableKind TableKind = ETableKind::Unknown;
};
TTable* FindTablePtr(const TTableId& id) {
diff --git a/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp b/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp
index dadded0cbb8..f8e3cd0c772 100644
--- a/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp
+++ b/ydb/core/kqp/compute_actor/kqp_scan_compute_actor.cpp
@@ -81,7 +81,7 @@ public:
{
YQL_ENSURE(GetTask().GetMeta().UnpackTo(&Meta), "Invalid task meta: " << GetTask().GetMeta().DebugString());
YQL_ENSURE(!Meta.GetReads().empty());
- YQL_ENSURE(Meta.GetTable().GetTableKind() != (ui32)ETableKind::SysView);
+ YQL_ENSURE(Meta.GetTable().GetTableKind() != (ui32)ETableKind::SysView);
KeyColumnTypes.assign(Meta.GetKeyColumnTypes().begin(), Meta.GetKeyColumnTypes().end());
}
diff --git a/ydb/core/kqp/counters/kqp_counters.h b/ydb/core/kqp/counters/kqp_counters.h
index f0c0ef2cac4..53a8f7d2c23 100644
--- a/ydb/core/kqp/counters/kqp_counters.h
+++ b/ydb/core/kqp/counters/kqp_counters.h
@@ -355,7 +355,7 @@ public:
NMonitoring::THistogramPtr DataTxTotalTimeHistogram;
NMonitoring::THistogramPtr ScanTxTotalTimeHistogram;
- TAlignedPagePoolCounters AllocCounters;
+ TAlignedPagePoolCounters AllocCounters;
// db counters
TConcurrentRWHashMap<TString, TKqpDbCountersPtr, 256> DbCounters;
diff --git a/ydb/core/kqp/executer/kqp_executer_impl.h b/ydb/core/kqp/executer/kqp_executer_impl.h
index f9221f673cf..6ecf78ab059 100644
--- a/ydb/core/kqp/executer/kqp_executer_impl.h
+++ b/ydb/core/kqp/executer/kqp_executer_impl.h
@@ -445,7 +445,7 @@ protected:
meta->MutableTableId()->SetOwnerId(stageInfo.Meta.TableId.PathId.OwnerId);
meta->SetSchemaVersion(stageInfo.Meta.TableId.SchemaVersion);
meta->SetSysViewInfo(stageInfo.Meta.TableId.SysViewInfo);
- meta->SetTableKind((ui32)stageInfo.Meta.TableKind);
+ meta->SetTableKind((ui32)stageInfo.Meta.TableKind);
}
void ExtractItemsLimit(const TStageInfo& stageInfo, const NKqpProto::TKqpPhyParamValue& paramValue,
diff --git a/ydb/core/kqp/executer/kqp_partition_helper.cpp b/ydb/core/kqp/executer/kqp_partition_helper.cpp
index 0baeb70b86f..ac786f6c24d 100644
--- a/ydb/core/kqp/executer/kqp_partition_helper.cpp
+++ b/ydb/core/kqp/executer/kqp_partition_helper.cpp
@@ -425,8 +425,8 @@ TSerializedTableRange MakeKeyRange(const TVector<NUdf::TDataTypeId>& keyColumnTy
return serialized;
}
-namespace {
-
+namespace {
+
void FillFullRange(const TStageInfo& stageInfo, THashMap<ui64, TShardInfo>& shardInfoMap, bool read) {
for (ui64 i = 0; i < stageInfo.Meta.ShardKey->Partitions.size(); ++i) {
auto& partition = stageInfo.Meta.ShardKey->Partitions[i];
diff --git a/ydb/core/kqp/executer/kqp_partition_helper.h b/ydb/core/kqp/executer/kqp_partition_helper.h
index 5cf2f45253f..f01f25ab4a5 100644
--- a/ydb/core/kqp/executer/kqp_partition_helper.h
+++ b/ydb/core/kqp/executer/kqp_partition_helper.h
@@ -22,10 +22,10 @@ struct TShardInfo {
TString ToString(const TVector<NScheme::TTypeId>& keyTypes, const NScheme::TTypeRegistry& typeRegistry) const;
};
-TSerializedTableRange MakeKeyRange(const TVector<NUdf::TDataTypeId>& keyColumnTypes,
- const NKqpProto::TKqpPhyKeyRange& range, const TStageInfo& stageInfo, const NMiniKQL::THolderFactory& holderFactory,
- const NMiniKQL::TTypeEnvironment& typeEnv);
-
+TSerializedTableRange MakeKeyRange(const TVector<NUdf::TDataTypeId>& keyColumnTypes,
+ const NKqpProto::TKqpPhyKeyRange& range, const TStageInfo& stageInfo, const NMiniKQL::THolderFactory& holderFactory,
+ const NMiniKQL::TTypeEnvironment& typeEnv);
+
TVector<TSerializedPointOrRange> FillReadRanges(const TVector<NUdf::TDataTypeId>& keyColumnTypes,
const NKqpProto::TKqpPhyOpReadOlapRanges& readRange, const TStageInfo& stageInfo,
const NMiniKQL::THolderFactory& holderFactory, const NMiniKQL::TTypeEnvironment& typeEnv);
diff --git a/ydb/core/kqp/executer/kqp_planner.cpp b/ydb/core/kqp/executer/kqp_planner.cpp
index 53b41f3bd8f..6bcfa85c66f 100644
--- a/ydb/core/kqp/executer/kqp_planner.cpp
+++ b/ydb/core/kqp/executer/kqp_planner.cpp
@@ -145,11 +145,11 @@ void TKqpPlanner::Process(const TVector<NKikimrKqp::TKqpNodeResources>& snapshot
planner->SetLogFunc([TxId = TxId](TStringBuf msg) { LOG_D(msg); });
}
- THashMap<ui64, size_t> nodeIdtoIdx;
- for (size_t idx = 0; idx < snapshot.size(); ++idx) {
- nodeIdtoIdx[snapshot[idx].nodeid()] = idx;
- }
-
+ THashMap<ui64, size_t> nodeIdtoIdx;
+ for (size_t idx = 0; idx < snapshot.size(); ++idx) {
+ nodeIdtoIdx[snapshot[idx].nodeid()] = idx;
+ }
+
auto plan = planner->Plan(snapshot, std::move(est));
if (!plan.empty()) {
@@ -206,11 +206,11 @@ void TKqpPlanner::RunLocal(const TVector<NKikimrKqp::TKqpNodeResources>& snapsho
}
THashMap<ui64, size_t> nodeIdToIdx;
- for (size_t idx = 0; idx < snapshot.size(); ++idx) {
+ for (size_t idx = 0; idx < snapshot.size(); ++idx) {
nodeIdToIdx[snapshot[idx].nodeid()] = idx;
LOG_D("snapshot #" << idx << ": " << snapshot[idx].ShortDebugString());
- }
-
+ }
+
for (auto nodeId: nodes) {
auto ev = PrepareKqpNodeRequest({});
AddScansToKqpNodeRequest(ev, nodeId);
diff --git a/ydb/core/kqp/executer/kqp_planner_strategy.cpp b/ydb/core/kqp/executer/kqp_planner_strategy.cpp
index db5fa4d992d..b0f323dbc24 100644
--- a/ydb/core/kqp/executer/kqp_planner_strategy.cpp
+++ b/ydb/core/kqp/executer/kqp_planner_strategy.cpp
@@ -36,7 +36,7 @@ public:
struct TComp {
bool operator ()(const TNodeDesc& l, const TNodeDesc& r) {
- return r.RemainsMemory > l.RemainsMemory && l.RemainsComputeActors > 0;
+ return r.RemainsMemory > l.RemainsMemory && l.RemainsComputeActors > 0;
}
};
diff --git a/ydb/core/kqp/executer/kqp_scan_executer.cpp b/ydb/core/kqp/executer/kqp_scan_executer.cpp
index e99586f5ff0..07c29f64e21 100644
--- a/ydb/core/kqp/executer/kqp_scan_executer.cpp
+++ b/ydb/core/kqp/executer/kqp_scan_executer.cpp
@@ -546,85 +546,85 @@ private:
}
}
- // Returns the list of ColumnShards that can store rows from the specified range
- // NOTE: Unlike OLTP tables that store data in DataShards, data in OLAP tables is not range
- // partitioned and multiple ColumnShards store data from the same key range
- THashMap<ui64, TShardInfo> ListColumnshadsForRange(const TKqpTableKeys& tableKeys,
+ // Returns the list of ColumnShards that can store rows from the specified range
+ // NOTE: Unlike OLTP tables that store data in DataShards, data in OLAP tables is not range
+ // partitioned and multiple ColumnShards store data from the same key range
+ THashMap<ui64, TShardInfo> ListColumnshadsForRange(const TKqpTableKeys& tableKeys,
const NKqpProto::TKqpPhyOpReadOlapRanges& readRanges, const TStageInfo& stageInfo,
- const NMiniKQL::THolderFactory& holderFactory, const NMiniKQL::TTypeEnvironment& typeEnv)
- {
- const auto* table = tableKeys.FindTablePtr(stageInfo.Meta.TableId);
- YQL_ENSURE(table);
- YQL_ENSURE(table->TableKind == ETableKind::Olap);
- YQL_ENSURE(stageInfo.Meta.TableKind == ETableKind::Olap);
-
+ const NMiniKQL::THolderFactory& holderFactory, const NMiniKQL::TTypeEnvironment& typeEnv)
+ {
+ const auto* table = tableKeys.FindTablePtr(stageInfo.Meta.TableId);
+ YQL_ENSURE(table);
+ YQL_ENSURE(table->TableKind == ETableKind::Olap);
+ YQL_ENSURE(stageInfo.Meta.TableKind == ETableKind::Olap);
+
const auto& keyColumnTypes = table->KeyColumnTypes;
auto ranges = FillReadRanges(keyColumnTypes, readRanges, stageInfo, holderFactory, typeEnv);
-
- THashMap<ui64, TShardInfo> shardInfoMap;
- for (const auto& partition : stageInfo.Meta.ShardKey->Partitions) {
- auto& shardInfo = shardInfoMap[partition.ShardId];
-
- YQL_ENSURE(!shardInfo.KeyReadRanges);
- shardInfo.KeyReadRanges.ConstructInPlace();
+
+ THashMap<ui64, TShardInfo> shardInfoMap;
+ for (const auto& partition : stageInfo.Meta.ShardKey->Partitions) {
+ auto& shardInfo = shardInfoMap[partition.ShardId];
+
+ YQL_ENSURE(!shardInfo.KeyReadRanges);
+ shardInfo.KeyReadRanges.ConstructInPlace();
shardInfo.KeyReadRanges->CopyFrom(ranges);
- }
-
- return shardInfoMap;
- }
-
- // Creates scan tasks for reading OLAP table range
- void BuildColumnshardScanTasks(TStageInfo& stageInfo, const NMiniKQL::THolderFactory& holderFactory,
- const NMiniKQL::TTypeEnvironment& typeEnv)
- {
- YQL_ENSURE(stageInfo.Meta.TableKind == ETableKind::Olap);
- auto& stage = GetStage(stageInfo);
-
- const auto& table = TableKeys.GetTable(stageInfo.Meta.TableId);
+ }
+
+ return shardInfoMap;
+ }
+
+ // Creates scan tasks for reading OLAP table range
+ void BuildColumnshardScanTasks(TStageInfo& stageInfo, const NMiniKQL::THolderFactory& holderFactory,
+ const NMiniKQL::TTypeEnvironment& typeEnv)
+ {
+ YQL_ENSURE(stageInfo.Meta.TableKind == ETableKind::Olap);
+ auto& stage = GetStage(stageInfo);
+
+ const auto& table = TableKeys.GetTable(stageInfo.Meta.TableId);
const auto& keyTypes = table.KeyColumnTypes;
-
- TMap<ui64, TKeyDesc::TPartitionRangeInfo> shard2range;
- for (const auto& part: stageInfo.Meta.ShardKey->Partitions) {
- shard2range[part.ShardId] = part.Range.GetRef();
- }
-
- ui64 taskCount = 0;
-
- for (auto& op : stage.GetTableOps()) {
- Y_VERIFY_DEBUG(stageInfo.Meta.TablePath == op.GetTable().GetPath());
-
+
+ TMap<ui64, TKeyDesc::TPartitionRangeInfo> shard2range;
+ for (const auto& part: stageInfo.Meta.ShardKey->Partitions) {
+ shard2range[part.ShardId] = part.Range.GetRef();
+ }
+
+ ui64 taskCount = 0;
+
+ for (auto& op : stage.GetTableOps()) {
+ Y_VERIFY_DEBUG(stageInfo.Meta.TablePath == op.GetTable().GetPath());
+
auto columns = BuildKqpColumns(op, table);
-
+
YQL_ENSURE(
op.GetTypeCase() == NKqpProto::TKqpPhyTableOperation::kReadOlapRange,
"Unexpected OLAP table scan operation: " << (ui32) op.GetTypeCase()
);
const auto& readRange = op.GetReadOlapRange();
-
+
auto allShards = ListColumnshadsForRange(TableKeys, readRange, stageInfo, holderFactory, typeEnv);
-
+
bool reverse = readRange.GetReverse();
ui64 itemsLimit = 0;
TString itemsLimitParamName;
NDqProto::TData itemsLimitBytes;
NKikimr::NMiniKQL::TType* itemsLimitType;
-
+
ExtractItemsLimit(stageInfo, op.GetReadOlapRange().GetItemsLimit(), holderFactory, typeEnv,
itemsLimit, itemsLimitParamName, itemsLimitBytes, itemsLimitType);
for (auto& [shardId, shardInfo] : allShards) {
YQL_ENSURE(!shardInfo.KeyWriteRanges);
-
+
if (shardInfo.KeyReadRanges->GetRanges().empty()) {
continue;
}
-
+
ui64 nodeId = ShardIdToNodeId.at(shardId);
auto& task = TasksGraph.AddTask(stageInfo);
task.Meta.NodeId = nodeId;
++taskCount;
-
+
for (auto& [name, value] : shardInfo.Params) {
auto ret = task.Meta.Params.emplace(name, std::move(value));
YQL_ENSURE(ret.second);
@@ -633,31 +633,31 @@ private:
auto retType = task.Meta.ParamTypes.emplace(name, typeIterator->second);
YQL_ENSURE(retType.second);
}
-
+
TTaskMeta::TShardReadInfo readInfo = {
.Ranges = std::move(*shardInfo.KeyReadRanges),
.Columns = columns,
.ShardId = shardId,
};
-
+
FillReadInfo(task.Meta, itemsLimit, reverse, readRange);
-
+
if (itemsLimit) {
task.Meta.Params.emplace(itemsLimitParamName, itemsLimitBytes);
task.Meta.ParamTypes.emplace(itemsLimitParamName, itemsLimitType);
- }
-
+ }
+
task.Meta.Reads.ConstructInPlace();
task.Meta.Reads->emplace_back(std::move(readInfo));
LOG_D("Stage " << stageInfo.Id << " create columnshard scan task at node: " << nodeId
<< ", meta: " << task.Meta.ToString(keyTypes, *AppData()->TypeRegistry));
- }
- }
-
- LOG_D("Stage " << stageInfo.Id << " will be executed using " << taskCount << " tasks.");
- }
-
+ }
+ }
+
+ LOG_D("Stage " << stageInfo.Id << " will be executed using " << taskCount << " tasks.");
+ }
+
void BuildComputeTasks(TStageInfo& stageInfo) {
auto& stage = GetStage(stageInfo);
@@ -735,8 +735,8 @@ private:
BuildComputeTasks(stageInfo);
} else if (stageInfo.Meta.IsSysView()) {
BuildSysViewScanTasks(stageInfo, holderFactory, typeEnv);
- } else if (stageInfo.Meta.IsOlap()) {
- BuildColumnshardScanTasks(stageInfo, holderFactory, typeEnv);
+ } else if (stageInfo.Meta.IsOlap()) {
+ BuildColumnshardScanTasks(stageInfo, holderFactory, typeEnv);
} else if (stageInfo.Meta.IsDatashard()) {
BuildDatashardScanTasks(stageInfo, holderFactory, typeEnv);
} else {
diff --git a/ydb/core/kqp/executer/kqp_table_resolver.cpp b/ydb/core/kqp/executer/kqp_table_resolver.cpp
index 1c1e256068d..6d02c3d07c4 100644
--- a/ydb/core/kqp/executer/kqp_table_resolver.cpp
+++ b/ydb/core/kqp/executer/kqp_table_resolver.cpp
@@ -81,16 +81,16 @@ private:
return;
}
- if (entry.Kind == NSchemeCache::TSchemeCacheNavigate::KindOlapTable) {
- YQL_ENSURE(entry.OlapTableInfo || entry.OlapStoreInfo);
- // NOTE: entry.SysViewInfo might not be empty for OLAP stats virtual tables
- table->TableKind = ETableKind::Olap;
- } else if (entry.TableId.IsSystemView()) {
- table->TableKind = ETableKind::SysView;
- } else {
- table->TableKind = ETableKind::Datashard;
- }
-
+ if (entry.Kind == NSchemeCache::TSchemeCacheNavigate::KindOlapTable) {
+ YQL_ENSURE(entry.OlapTableInfo || entry.OlapStoreInfo);
+ // NOTE: entry.SysViewInfo might not be empty for OLAP stats virtual tables
+ table->TableKind = ETableKind::Olap;
+ } else if (entry.TableId.IsSystemView()) {
+ table->TableKind = ETableKind::SysView;
+ } else {
+ table->TableKind = ETableKind::Datashard;
+ }
+
// TODO: Resolve columns by id
TMap<TStringBuf, ui32> columnsMap;
for (auto& [columnId, column] : entry.Columns) {
@@ -266,9 +266,9 @@ private:
YQL_ENSURE(stageInfo.Meta.ShardOperations.size() == 1);
auto operation = *stageInfo.Meta.ShardOperations.begin();
- const TKqpTableKeys::TTable* table = TableKeys.FindTablePtr(stageInfo.Meta.TableId);
- stageInfo.Meta.TableKind = table->TableKind;
-
+ const TKqpTableKeys::TTable* table = TableKeys.FindTablePtr(stageInfo.Meta.TableId);
+ stageInfo.Meta.TableKind = table->TableKind;
+
stageInfo.Meta.ShardKey = ExtractKey(stageInfo.Meta.TableId, operation);
auto& entry = request->ResultSet.emplace_back(std::move(stageInfo.Meta.ShardKey));
diff --git a/ydb/core/kqp/executer/kqp_tasks_graph.cpp b/ydb/core/kqp/executer/kqp_tasks_graph.cpp
index 8d27e58925d..1ccfa260471 100644
--- a/ydb/core/kqp/executer/kqp_tasks_graph.cpp
+++ b/ydb/core/kqp/executer/kqp_tasks_graph.cpp
@@ -57,7 +57,7 @@ void FillKqpTasksGraphStages(TKqpTasksGraph& tasksGraph, const TVector<IKqpGatew
YQL_ENSURE(!stageInfo.Meta.TablePath);
stageInfo.Meta.TableId = MakeTableId(op.GetTable());
stageInfo.Meta.TablePath = op.GetTable().GetPath();
- stageInfo.Meta.TableKind = ETableKind::Unknown;
+ stageInfo.Meta.TableKind = ETableKind::Unknown;
tables.insert(MakeTableId(op.GetTable()));
} else {
YQL_ENSURE(stageInfo.Meta.TableId == MakeTableId(op.GetTable()));
diff --git a/ydb/core/kqp/executer/kqp_tasks_graph.h b/ydb/core/kqp/executer/kqp_tasks_graph.h
index 0b53ac9602c..7c356ab623f 100644
--- a/ydb/core/kqp/executer/kqp_tasks_graph.h
+++ b/ydb/core/kqp/executer/kqp_tasks_graph.h
@@ -31,7 +31,7 @@ struct TStageInfoMeta {
TTableId TableId;
TString TablePath;
- ETableKind TableKind;
+ ETableKind TableKind;
TVector<bool> SkipNullKeys;
@@ -40,26 +40,26 @@ struct TStageInfoMeta {
NSchemeCache::TSchemeCacheRequest::EKind ShardKind = NSchemeCache::TSchemeCacheRequest::EKind::KindUnknown;
explicit TStageInfoMeta(const IKqpGateway::TPhysicalTxData& tx)
- : Tx(tx)
- , TableKind(ETableKind::Unknown)
- {}
-
- bool IsDatashard() const {
- return TableKind == ETableKind::Datashard;
- }
-
- bool IsSysView() const {
+ : Tx(tx)
+ , TableKind(ETableKind::Unknown)
+ {}
+
+ bool IsDatashard() const {
+ return TableKind == ETableKind::Datashard;
+ }
+
+ bool IsSysView() const {
if (!ShardKey) {
return false;
}
- YQL_ENSURE((TableKind == ETableKind::SysView) == ShardKey->IsSystemView());
- return TableKind == ETableKind::SysView;
- }
-
- bool IsOlap() const {
- return TableKind == ETableKind::Olap;
- }
-
+ YQL_ENSURE((TableKind == ETableKind::SysView) == ShardKey->IsSystemView());
+ return TableKind == ETableKind::SysView;
+ }
+
+ bool IsOlap() const {
+ return TableKind == ETableKind::Olap;
+ }
+
};
struct TTaskInputMeta {};
diff --git a/ydb/core/kqp/executer/kqp_tasks_validate.cpp b/ydb/core/kqp/executer/kqp_tasks_validate.cpp
index 834e2e9df46..dea7056ff09 100644
--- a/ydb/core/kqp/executer/kqp_tasks_validate.cpp
+++ b/ydb/core/kqp/executer/kqp_tasks_validate.cpp
@@ -69,17 +69,17 @@ private:
for (auto& output : task.Outputs) {
ValidateOutput(output);
}
-
+
if (task.Meta.Writes) {
YQL_ENSURE(task.Outputs.size() == 1, "Read-write tasks should have single output.");
}
- const auto& stageInfo = TasksGraph.GetStageInfo(task.StageId);
- if (stageInfo.Meta.TableKind == ETableKind::Olap) {
- YQL_ENSURE(task.Meta.Reads->size() == 1,
- "OLAP scan task must read exactly 1 range from 1 columnshard");
- YQL_ENSURE(!task.Meta.Writes, "OLAP writes are not supported yet");
- }
+ const auto& stageInfo = TasksGraph.GetStageInfo(task.StageId);
+ if (stageInfo.Meta.TableKind == ETableKind::Olap) {
+ YQL_ENSURE(task.Meta.Reads->size() == 1,
+ "OLAP scan task must read exactly 1 range from 1 columnshard");
+ YQL_ENSURE(!task.Meta.Writes, "OLAP writes are not supported yet");
+ }
}
bool IsDataExec() {
diff --git a/ydb/core/kqp/host/kqp_run_prepared.cpp b/ydb/core/kqp/host/kqp_run_prepared.cpp
index b5854451bb0..e71a3bacee2 100644
--- a/ydb/core/kqp/host/kqp_run_prepared.cpp
+++ b/ydb/core/kqp/host/kqp_run_prepared.cpp
@@ -39,7 +39,7 @@ public:
Promise = NewPromise();
- if (!Execute(mkql, MkqlExecuteResult.Future)) {
+ if (!Execute(mkql, MkqlExecuteResult.Future)) {
return TStatus::Error;
}
@@ -137,7 +137,7 @@ private:
}
if (TransformCtx->QueryCtx->StatsMode == EKikimrStatsMode::Profile) {
- MkqlExecuteResult.Program = mkql.GetProgramText();
+ MkqlExecuteResult.Program = mkql.GetProgramText();
}
future = Gateway->ExecuteMkqlPrepared(Cluster, mkql.GetProgram(), std::move(execParams),
@@ -193,7 +193,7 @@ private:
ui32 CurrentMkqlIndex;
bool AcquireLocks;
- TMkqlExecuteResult MkqlExecuteResult;
+ TMkqlExecuteResult MkqlExecuteResult;
TPromise<void> Promise;
};
diff --git a/ydb/core/kqp/kqp_ic_gateway.cpp b/ydb/core/kqp/kqp_ic_gateway.cpp
index f1fad812ce3..47489a9bd54 100644
--- a/ydb/core/kqp/kqp_ic_gateway.cpp
+++ b/ydb/core/kqp/kqp_ic_gateway.cpp
@@ -498,10 +498,10 @@ public:
: TBase(request, promise, callback)
, ParamsMap(std::move(paramsMap))
, CompilationPending(false)
- , CompilationRetried(false)
- , AllocCounters(allocCounters)
+ , CompilationRetried(false)
+ , AllocCounters(allocCounters)
, MiniKqlComplileServiceActorId(miniKqlComplileServiceActorId)
- {}
+ {}
void Bootstrap(const TActorContext& ctx) {
auto& mkqlTx = *Request->Record.MutableTransaction()->MutableMiniKQLTransaction();
@@ -653,7 +653,7 @@ private:
bool CompilationRetried;
TString MkqlProgramText;
THashMap<TString, ui64> CompileResolveCookies;
- TAlignedPagePoolCounters AllocCounters;
+ TAlignedPagePoolCounters AllocCounters;
TActorId MiniKqlComplileServiceActorId;
};
@@ -988,7 +988,7 @@ public:
: Cluster(cluster)
, Database(database)
, ActorSystem(actorSystem)
- , NodeId(nodeId)
+ , NodeId(nodeId)
, Counters(counters)
, MetadataLoader(std::move(metadataLoader))
, MkqlComplileService(mkqlComplileService)
@@ -1851,7 +1851,7 @@ public:
}
TFuture<TKqpSnapshotHandle> CreatePersistentSnapshot(const TVector<TString>& tablePaths, TDuration queryTimeout) override {
- auto* snapMgr = CreateKqpSnapshotManager(Database, queryTimeout);
+ auto* snapMgr = CreateKqpSnapshotManager(Database, queryTimeout);
auto snapMgrActorId = RegisterActor(snapMgr);
auto ev = MakeHolder<TEvKqpSnapshot::TEvCreateSnapshotRequest>(tablePaths);
@@ -2101,10 +2101,10 @@ private:
}
}
- if (settings.CollectStats) {
- mkqlTx.SetCollectStats(true);
- }
-
+ if (settings.CollectStats) {
+ mkqlTx.SetCollectStats(true);
+ }
+
return SendMkqlRequest(ev.Release(), std::move(params),
[compileOnly] (TPromise<TMkqlResult> promise, TTransactionResponse&& response) {
try {
@@ -2182,7 +2182,7 @@ private:
response.MiniKQLCompileResults = ev.GetMiniKQLCompileResults();
response.ExecutionEngineEvaluatedResponse.Swap(ev.MutableExecutionEngineEvaluatedResponse());
- response.TxStats = ev.GetTxStats();
+ response.TxStats = ev.GetTxStats();
return response;
}
@@ -2520,19 +2520,19 @@ private:
}
if (metadata->TableSettings.AutoPartitioningByLoad) {
- auto& partitioningSettings = *proto.mutable_partitioning_settings();
- TString value = to_lower(metadata->TableSettings.AutoPartitioningByLoad.GetRef());
- if (value == "enabled") {
- partitioningSettings.set_partitioning_by_load(Ydb::FeatureFlag::ENABLED);
- } else if (value == "disabled") {
- partitioningSettings.set_partitioning_by_load(Ydb::FeatureFlag::DISABLED);
- } else {
- code = Ydb::StatusIds::BAD_REQUEST;
- error = TStringBuilder() << "Unknown feature flag '"
- << metadata->TableSettings.AutoPartitioningByLoad.GetRef()
- << "' for auto partitioning by load";
- return false;
- }
+ auto& partitioningSettings = *proto.mutable_partitioning_settings();
+ TString value = to_lower(metadata->TableSettings.AutoPartitioningByLoad.GetRef());
+ if (value == "enabled") {
+ partitioningSettings.set_partitioning_by_load(Ydb::FeatureFlag::ENABLED);
+ } else if (value == "disabled") {
+ partitioningSettings.set_partitioning_by_load(Ydb::FeatureFlag::DISABLED);
+ } else {
+ code = Ydb::StatusIds::BAD_REQUEST;
+ error = TStringBuilder() << "Unknown feature flag '"
+ << metadata->TableSettings.AutoPartitioningByLoad.GetRef()
+ << "' for auto partitioning by load";
+ return false;
+ }
}
if (metadata->TableSettings.MinPartitions) {
@@ -2642,7 +2642,7 @@ private:
TActorSystem* ActorSystem;
ui32 NodeId;
TKqpRequestCounters::TPtr Counters;
- TAlignedPagePoolCounters AllocCounters;
+ TAlignedPagePoolCounters AllocCounters;
TMaybe<TUserTokenData> UserToken;
std::shared_ptr<IKqpTableMetadataLoader> MetadataLoader;
TActorId MkqlComplileService;
diff --git a/ydb/core/kqp/kqp_metadata_loader.cpp b/ydb/core/kqp/kqp_metadata_loader.cpp
index ecbfb4c6733..167b8df3c42 100644
--- a/ydb/core/kqp/kqp_metadata_loader.cpp
+++ b/ydb/core/kqp/kqp_metadata_loader.cpp
@@ -121,12 +121,12 @@ TTableMetadataResult GetLoadTableMetadataResult(const NSchemeCache::TSchemeCache
tableMeta->SchemaVersion = entry.TableId.SchemaVersion;
if (!tableMeta->SysView.empty()) {
- if (entry.Kind == EKind::KindOlapTable) {
- // NOTE: OLAP sys views for stats are themselves represented by OLAP tables
- tableMeta->Kind = NYql::EKikimrTableKind::Olap;
- } else {
- tableMeta->Kind = NYql::EKikimrTableKind::SysView;
- }
+ if (entry.Kind == EKind::KindOlapTable) {
+ // NOTE: OLAP sys views for stats are themselves represented by OLAP tables
+ tableMeta->Kind = NYql::EKikimrTableKind::Olap;
+ } else {
+ tableMeta->Kind = NYql::EKikimrTableKind::SysView;
+ }
} else {
switch (entry.Kind) {
case EKind::KindTable:
diff --git a/ydb/core/kqp/node/kqp_node.cpp b/ydb/core/kqp/node/kqp_node.cpp
index a652ff93563..24f8f2ac79a 100644
--- a/ydb/core/kqp/node/kqp_node.cpp
+++ b/ydb/core/kqp/node/kqp_node.cpp
@@ -296,20 +296,20 @@ private:
actorSystem->Send(rm, new TEvKqpNode::TEvFinishKqpTask(txId, taskId, success, message));
};
- ETableKind tableKind = ETableKind::Unknown;
- {
- NKikimrTxDataShard::TKqpTransaction::TScanTaskMeta meta;
- if (dqTask.GetMeta().UnpackTo(&meta)) {
- tableKind = (ETableKind)meta.GetTable().GetTableKind();
- if (tableKind == ETableKind::Unknown) {
- // For backward compatibility
- tableKind = meta.GetTable().GetSysViewInfo().empty() ? ETableKind::Datashard : ETableKind::SysView;
- }
- }
- }
-
+ ETableKind tableKind = ETableKind::Unknown;
+ {
+ NKikimrTxDataShard::TKqpTransaction::TScanTaskMeta meta;
+ if (dqTask.GetMeta().UnpackTo(&meta)) {
+ tableKind = (ETableKind)meta.GetTable().GetTableKind();
+ if (tableKind == ETableKind::Unknown) {
+ // For backward compatibility
+ tableKind = meta.GetTable().GetSysViewInfo().empty() ? ETableKind::Datashard : ETableKind::SysView;
+ }
+ }
+ }
+
IActor* computeActor;
- if (tableKind == ETableKind::Datashard || tableKind == ETableKind::Olap) {
+ if (tableKind == ETableKind::Datashard || tableKind == ETableKind::Olap) {
computeActor = CreateKqpScanComputeActor(msg.GetSnapshot(), request.Executer, txId, std::move(dqTask),
nullptr, nullptr, runtimeSettings, memoryLimits, Counters);
taskCtx.ComputeActorId = Register(computeActor);
diff --git a/ydb/core/kqp/prepare/kqp_query_exec.cpp b/ydb/core/kqp/prepare/kqp_query_exec.cpp
index 8e9fc142958..bc2d4e94ce9 100644
--- a/ydb/core/kqp/prepare/kqp_query_exec.cpp
+++ b/ydb/core/kqp/prepare/kqp_query_exec.cpp
@@ -397,39 +397,39 @@ void ExtractQueryStats(NKqpProto::TKqpStatsQuery& dst, const NKikimrQueryStats::
dstComputeTime.SetSum(txStats.GetComputeCpuTimeUsec());
dstComputeTime.SetCnt(1);
- {
- i64 cnt = 0;
- ui64 minCpu = Max<ui64>();
- ui64 maxCpu = 0;
- ui64 sumCpu = 0;
+ {
+ i64 cnt = 0;
+ ui64 minCpu = Max<ui64>();
+ ui64 maxCpu = 0;
+ ui64 sumCpu = 0;
ui64 sumReadSets = 0;
ui64 maxProgramSize = 0;
ui64 maxReplySize = 0;
- for (const auto& perShard : txStats.GetPerShardStats()) {
- ui64 cpu = perShard.GetCpuTimeUsec();
- minCpu = Min(minCpu, cpu);
- maxCpu = Max(maxCpu, cpu);
- sumCpu += cpu;
+ for (const auto& perShard : txStats.GetPerShardStats()) {
+ ui64 cpu = perShard.GetCpuTimeUsec();
+ minCpu = Min(minCpu, cpu);
+ maxCpu = Max(maxCpu, cpu);
+ sumCpu += cpu;
sumReadSets += perShard.GetOutgoingReadSetsCount();
maxProgramSize = Max(maxProgramSize, perShard.GetProgramSize());
maxReplySize = Max(maxReplySize, perShard.GetReplySize());
- ++cnt;
- }
- if (cnt) {
+ ++cnt;
+ }
+ if (cnt) {
auto& dstShardTime = *executionExtraStats.MutableShardsCpuTimeUs();
- dstShardTime.SetMin(minCpu);
- dstShardTime.SetMax(maxCpu);
- dstShardTime.SetSum(sumCpu);
- dstShardTime.SetCnt(cnt);
+ dstShardTime.SetMin(minCpu);
+ dstShardTime.SetMax(maxCpu);
+ dstShardTime.SetSum(sumCpu);
+ dstShardTime.SetCnt(cnt);
dst.SetReadSetsCount(dst.GetReadSetsCount() + sumReadSets);
dst.SetMaxShardProgramSize(Max(dst.GetMaxShardProgramSize(), maxProgramSize));
dst.SetMaxShardReplySize(Max(dst.GetMaxShardReplySize(), maxReplySize));
dstExec.SetCpuTimeUs(dstExec.GetCpuTimeUs() + sumCpu);
- }
- }
-
+ }
+ }
+
ui32 affectedShards = 0;
for (auto& table : txStats.GetTableAccessStats()) {
auto& dstTable = *dstExec.AddTables();
diff --git a/ydb/core/kqp/provider/yql_kikimr_exec.cpp b/ydb/core/kqp/provider/yql_kikimr_exec.cpp
index c68298beb39..ad360fcaebe 100644
--- a/ydb/core/kqp/provider/yql_kikimr_exec.cpp
+++ b/ydb/core/kqp/provider/yql_kikimr_exec.cpp
@@ -1020,8 +1020,8 @@ private:
settings.StatsMode = SessionCtx->Query().StatsMode;
auto profile = config->Profile.Get(cluster);
- if (profile && *profile) {
- // Do not disable profiling if it was enabled at request level
+ if (profile && *profile) {
+ // Do not disable profiling if it was enabled at request level
settings.StatsMode = EKikimrStatsMode::Profile;
}
diff --git a/ydb/core/kqp/provider/yql_kikimr_results.cpp b/ydb/core/kqp/provider/yql_kikimr_results.cpp
index bb695b362be..8ec93f416d1 100644
--- a/ydb/core/kqp/provider/yql_kikimr_results.cpp
+++ b/ydb/core/kqp/provider/yql_kikimr_results.cpp
@@ -995,80 +995,80 @@ void TransformerStatsFromProto(const NKikimrKqp::TTransformProfile& proto, IGrap
}
void KikimrReadOpStatsToYson(const NKikimrQueryStats::TReadOpStats& opStats, NYson::TYsonWriter& writer) {
- writer.OnBeginMap();
- writer.OnKeyedItem("Count");
- writer.OnUint64Scalar(opStats.GetCount());
- writer.OnKeyedItem("Rows");
- writer.OnUint64Scalar(opStats.GetRows());
- writer.OnKeyedItem("Bytes");
- writer.OnUint64Scalar(opStats.GetBytes());
- writer.OnEndMap();
-}
-
+ writer.OnBeginMap();
+ writer.OnKeyedItem("Count");
+ writer.OnUint64Scalar(opStats.GetCount());
+ writer.OnKeyedItem("Rows");
+ writer.OnUint64Scalar(opStats.GetRows());
+ writer.OnKeyedItem("Bytes");
+ writer.OnUint64Scalar(opStats.GetBytes());
+ writer.OnEndMap();
+}
+
void KikimrWriteOpStatsToYson(const NKikimrQueryStats::TWriteOpStats& opStats, NYson::TYsonWriter& writer) {
- writer.OnBeginMap();
- writer.OnKeyedItem("Count");
- writer.OnUint64Scalar(opStats.GetCount());
- writer.OnKeyedItem("Rows");
- writer.OnUint64Scalar(opStats.GetRows());
- writer.OnKeyedItem("Bytes");
- writer.OnUint64Scalar(opStats.GetBytes());
- writer.OnEndMap();
-}
-
+ writer.OnBeginMap();
+ writer.OnKeyedItem("Count");
+ writer.OnUint64Scalar(opStats.GetCount());
+ writer.OnKeyedItem("Rows");
+ writer.OnUint64Scalar(opStats.GetRows());
+ writer.OnKeyedItem("Bytes");
+ writer.OnUint64Scalar(opStats.GetBytes());
+ writer.OnEndMap();
+}
+
void KikimrTxStatsToYson(const NKikimrQueryStats::TTxStats& txStats, NYson::TYsonWriter& writer) {
- writer.OnBeginMap();
- writer.OnKeyedItem("TableAccessStats");
-
- writer.OnBeginList();
- for (auto& tableStats : txStats.GetTableAccessStats()) {
- writer.OnListItem();
- writer.OnBeginMap();
-
- writer.OnKeyedItem("TableInfo");
- {
- writer.OnBeginMap();
- writer.OnKeyedItem("SchemeshardId");
- writer.OnUint64Scalar(tableStats.GetTableInfo().GetSchemeshardId());
- writer.OnKeyedItem("PathId");
- writer.OnUint64Scalar(tableStats.GetTableInfo().GetPathId());
- writer.OnKeyedItem("Name");
- writer.OnStringScalar(tableStats.GetTableInfo().GetName());
- writer.OnEndMap();
- }
-
- if (tableStats.HasSelectRow()) {
- writer.OnKeyedItem("SelectRow");
- KikimrReadOpStatsToYson(tableStats.GetSelectRow(), writer);
- }
-
- if (tableStats.HasSelectRange()) {
- writer.OnKeyedItem("SelectRange");
- KikimrReadOpStatsToYson(tableStats.GetSelectRange(), writer);
- }
-
- if (tableStats.HasUpdateRow()) {
- writer.OnKeyedItem("UpdateRow");
- KikimrWriteOpStatsToYson(tableStats.GetUpdateRow(), writer);
- }
-
- if (tableStats.HasEraseRow()) {
- writer.OnKeyedItem("EraseRow");
- KikimrWriteOpStatsToYson(tableStats.GetEraseRow(), writer);
- }
-
- writer.OnEndMap();
- }
- writer.OnEndList();
-
- if (txStats.HasDurationUs()) {
- writer.OnKeyedItem("DurationUs");
- writer.OnUint64Scalar(txStats.GetDurationUs());
- }
-
- writer.OnEndMap();
-}
-
+ writer.OnBeginMap();
+ writer.OnKeyedItem("TableAccessStats");
+
+ writer.OnBeginList();
+ for (auto& tableStats : txStats.GetTableAccessStats()) {
+ writer.OnListItem();
+ writer.OnBeginMap();
+
+ writer.OnKeyedItem("TableInfo");
+ {
+ writer.OnBeginMap();
+ writer.OnKeyedItem("SchemeshardId");
+ writer.OnUint64Scalar(tableStats.GetTableInfo().GetSchemeshardId());
+ writer.OnKeyedItem("PathId");
+ writer.OnUint64Scalar(tableStats.GetTableInfo().GetPathId());
+ writer.OnKeyedItem("Name");
+ writer.OnStringScalar(tableStats.GetTableInfo().GetName());
+ writer.OnEndMap();
+ }
+
+ if (tableStats.HasSelectRow()) {
+ writer.OnKeyedItem("SelectRow");
+ KikimrReadOpStatsToYson(tableStats.GetSelectRow(), writer);
+ }
+
+ if (tableStats.HasSelectRange()) {
+ writer.OnKeyedItem("SelectRange");
+ KikimrReadOpStatsToYson(tableStats.GetSelectRange(), writer);
+ }
+
+ if (tableStats.HasUpdateRow()) {
+ writer.OnKeyedItem("UpdateRow");
+ KikimrWriteOpStatsToYson(tableStats.GetUpdateRow(), writer);
+ }
+
+ if (tableStats.HasEraseRow()) {
+ writer.OnKeyedItem("EraseRow");
+ KikimrWriteOpStatsToYson(tableStats.GetEraseRow(), writer);
+ }
+
+ writer.OnEndMap();
+ }
+ writer.OnEndList();
+
+ if (txStats.HasDurationUs()) {
+ writer.OnKeyedItem("DurationUs");
+ writer.OnUint64Scalar(txStats.GetDurationUs());
+ }
+
+ writer.OnEndMap();
+}
+
void KikimrProfileToYson(const NKikimrKqp::TKqlProfile& kqlProfile, NYson::TYsonWriter& writer) {
writer.OnBeginMap();
if (kqlProfile.HasQuery()) {
@@ -1080,16 +1080,16 @@ void KikimrProfileToYson(const NKikimrKqp::TKqlProfile& kqlProfile, NYson::TYson
writer.OnBeginList();
for (auto& mkql : kqlProfile.GetMkqlProfiles()) {
writer.OnListItem();
- writer.OnBeginMap();
- writer.OnKeyedItem("Query");
+ writer.OnBeginMap();
+ writer.OnKeyedItem("Query");
writer.OnStringScalar(mkql.HasQuery() ? mkql.GetQuery() : "");
-
- if (mkql.HasTxStats()) {
- writer.OnKeyedItem("TxStats");
- KikimrTxStatsToYson(mkql.GetTxStats(), writer);
- }
-
- writer.OnEndMap();
+
+ if (mkql.HasTxStats()) {
+ writer.OnKeyedItem("TxStats");
+ KikimrTxStatsToYson(mkql.GetTxStats(), writer);
+ }
+
+ writer.OnEndMap();
}
writer.OnEndList();
diff --git a/ydb/core/kqp/rm/kqp_snapshot_manager.cpp b/ydb/core/kqp/rm/kqp_snapshot_manager.cpp
index 1e28227df0a..e461d85ab90 100644
--- a/ydb/core/kqp/rm/kqp_snapshot_manager.cpp
+++ b/ydb/core/kqp/rm/kqp_snapshot_manager.cpp
@@ -18,9 +18,9 @@ namespace {
class TSnapshotManagerActor: public TActorBootstrapped<TSnapshotManagerActor> {
public:
- TSnapshotManagerActor(const TString& database, TDuration queryTimeout)
- : Database(database)
- , RequestTimeout(queryTimeout)
+ TSnapshotManagerActor(const TString& database, TDuration queryTimeout)
+ : Database(database)
+ , RequestTimeout(queryTimeout)
{}
void Bootstrap() {
@@ -216,7 +216,7 @@ private:
}
void HandleUnexpectedEvent(const TString& state, ui32 eventType) {
- LOG_E("KqpSnapshotManager: unexpected event, state: " << state
+ LOG_E("KqpSnapshotManager: unexpected event, state: " << state
<< ", event type: " << eventType);
ReplyErrorAndDie(NKikimrIssues::TStatusIds::INTERNAL_ERROR, {});
}
@@ -232,7 +232,7 @@ private:
}
private:
- const TString Database;
+ const TString Database;
TVector<TString> Tables;
TActorId ClientActorId;
IKqpGateway::TKqpSnapshot Snapshot;
@@ -252,8 +252,8 @@ private:
} // anonymous namespace
-IActor* CreateKqpSnapshotManager(const TString& database, TDuration queryTimeout) {
- return new TSnapshotManagerActor(database, queryTimeout);
+IActor* CreateKqpSnapshotManager(const TString& database, TDuration queryTimeout) {
+ return new TSnapshotManagerActor(database, queryTimeout);
}
} // namespace NKqp
diff --git a/ydb/core/kqp/rm/kqp_snapshot_manager.h b/ydb/core/kqp/rm/kqp_snapshot_manager.h
index e54be804751..ee56469308b 100644
--- a/ydb/core/kqp/rm/kqp_snapshot_manager.h
+++ b/ydb/core/kqp/rm/kqp_snapshot_manager.h
@@ -47,7 +47,7 @@ struct TEvKqpSnapshot {
};
};
-NActors::IActor* CreateKqpSnapshotManager(const TString& database, TDuration queryTimeout);
+NActors::IActor* CreateKqpSnapshotManager(const TString& database, TDuration queryTimeout);
} // namespace NKqp
} // namespace NKikimr
diff --git a/ydb/core/kqp/ut/common/kqp_ut_common.cpp b/ydb/core/kqp/ut/common/kqp_ut_common.cpp
index 5e91c5fb40a..9bc7889f92f 100644
--- a/ydb/core/kqp/ut/common/kqp_ut_common.cpp
+++ b/ydb/core/kqp/ut/common/kqp_ut_common.cpp
@@ -122,8 +122,8 @@ TKikimrRunner::TKikimrRunner(const TKikimrSettings& settings) {
DriverConfig = NYdb::TDriverConfig()
.SetEndpoint(Endpoint)
- .SetDatabase("/" + settings.DomainRoot)
- .SetDiscoveryMode(NYdb::EDiscoveryMode::Async)
+ .SetDatabase("/" + settings.DomainRoot)
+ .SetDiscoveryMode(NYdb::EDiscoveryMode::Async)
.SetAuthToken(settings.AuthToken);
Driver.Reset(MakeHolder<NYdb::TDriver>(DriverConfig));
@@ -196,16 +196,16 @@ void TKikimrRunner::CreateSampleTables() {
)");
Client->CreateTable("/Root", R"(
- Name: "Logs"
- Columns { Name: "App", Type: "Utf8" }
- Columns { Name: "Message", Type: "Utf8" }
- Columns { Name: "Ts", Type: "Int64" }
- Columns { Name: "Host", Type: "Utf8" }
- KeyColumnNames: ["App", "Ts", "Host"],
- SplitBoundary { KeyPrefix { Tuple { Optional { Text: "a" } } } }
- SplitBoundary { KeyPrefix { Tuple { Optional { Text: "b" } } } }
+ Name: "Logs"
+ Columns { Name: "App", Type: "Utf8" }
+ Columns { Name: "Message", Type: "Utf8" }
+ Columns { Name: "Ts", Type: "Int64" }
+ Columns { Name: "Host", Type: "Utf8" }
+ KeyColumnNames: ["App", "Ts", "Host"],
+ SplitBoundary { KeyPrefix { Tuple { Optional { Text: "a" } } } }
+ SplitBoundary { KeyPrefix { Tuple { Optional { Text: "b" } } } }
)");
-
+
Client->CreateTable("/Root", R"(
Name: "BatchUpload"
Columns {
@@ -237,27 +237,27 @@ void TKikimrRunner::CreateSampleTables() {
)");
// TODO: Reuse driver (YDB-626)
- NYdb::TDriver driver(NYdb::TDriverConfig().SetEndpoint(Endpoint).SetDatabase("/Root"));
+ NYdb::TDriver driver(NYdb::TDriverConfig().SetEndpoint(Endpoint).SetDatabase("/Root"));
NYdb::NTable::TTableClient client(driver);
auto session = client.CreateSession().GetValueSync().GetSession();
AssertSuccessResult(session.ExecuteSchemeQuery(R"(
--!syntax_v1
- CREATE TABLE `KeyValue` (
+ CREATE TABLE `KeyValue` (
Key Uint64,
Value String,
PRIMARY KEY (Key)
);
- CREATE TABLE `KeyValue2` (
+ CREATE TABLE `KeyValue2` (
Key String,
Value String,
PRIMARY KEY (Key)
);
- CREATE TABLE `Test` (
+ CREATE TABLE `Test` (
Group Uint32,
Name String,
Amount Uint64,
@@ -265,7 +265,7 @@ void TKikimrRunner::CreateSampleTables() {
PRIMARY KEY (Group, Name)
);
- CREATE TABLE `Join1` (
+ CREATE TABLE `Join1` (
Key Int32,
Fk21 Uint32,
Fk22 String,
@@ -276,7 +276,7 @@ void TKikimrRunner::CreateSampleTables() {
PARTITION_AT_KEYS = (5)
);
- CREATE TABLE `Join2` (
+ CREATE TABLE `Join2` (
Key1 Uint32,
Key2 String,
Name String,
@@ -291,7 +291,7 @@ void TKikimrRunner::CreateSampleTables() {
AssertSuccessResult(session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- REPLACE INTO `TwoShard` (Key, Value1, Value2) VALUES
+ REPLACE INTO `TwoShard` (Key, Value1, Value2) VALUES
(1u, "One", -1),
(2u, "Two", 0),
(3u, "Three", 1),
@@ -299,7 +299,7 @@ void TKikimrRunner::CreateSampleTables() {
(4000000002u, "BigTwo", 0),
(4000000003u, "BigThree", 1);
- REPLACE INTO `EightShard` (Key, Text, Data) VALUES
+ REPLACE INTO `EightShard` (Key, Text, Data) VALUES
(101u, "Value1", 1),
(201u, "Value1", 2),
(301u, "Value1", 3),
@@ -325,31 +325,31 @@ void TKikimrRunner::CreateSampleTables() {
(703u, "Value3", 2),
(803u, "Value3", 3);
- REPLACE INTO `KeyValue` (Key, Value) VALUES
+ REPLACE INTO `KeyValue` (Key, Value) VALUES
(1u, "One"),
(2u, "Two");
- REPLACE INTO `KeyValue2` (Key, Value) VALUES
+ REPLACE INTO `KeyValue2` (Key, Value) VALUES
("1", "One"),
("2", "Two");
- REPLACE INTO `Test` (Group, Name, Amount, Comment) VALUES
+ REPLACE INTO `Test` (Group, Name, Amount, Comment) VALUES
(1u, "Anna", 3500ul, "None"),
(1u, "Paul", 300ul, "None"),
(2u, "Tony", 7200ul, "None");
-
- REPLACE INTO `Logs` (App, Ts, Host, Message) VALUES
- ("apache", 0, "front-42", " GET /index.html HTTP/1.1"),
- ("nginx", 1, "nginx-10", "GET /index.html HTTP/1.1"),
- ("nginx", 2, "nginx-23", "PUT /form HTTP/1.1"),
- ("nginx", 3, "nginx-23", "GET /cat.jpg HTTP/1.1"),
+
+ REPLACE INTO `Logs` (App, Ts, Host, Message) VALUES
+ ("apache", 0, "front-42", " GET /index.html HTTP/1.1"),
+ ("nginx", 1, "nginx-10", "GET /index.html HTTP/1.1"),
+ ("nginx", 2, "nginx-23", "PUT /form HTTP/1.1"),
+ ("nginx", 3, "nginx-23", "GET /cat.jpg HTTP/1.1"),
("kikimr-db", 1, "kikimr-db-10", "Write Data"),
("kikimr-db", 2, "kikimr-db-21", "Read Data"),
("kikimr-db", 3, "kikimr-db-21", "Stream Read Data"),
("kikimr-db", 4, "kikimr-db-53", "Discover"),
- ("ydb", 0, "ydb-1000", "some very very very very long string");
+ ("ydb", 0, "ydb-1000", "some very very very very long string");
- REPLACE INTO `Join1` (Key, Fk21, Fk22, Value) VALUES
+ REPLACE INTO `Join1` (Key, Fk21, Fk22, Value) VALUES
(1, 101, "One", "Value1"),
(2, 102, "Two", "Value1"),
(3, 103, "One", "Value2"),
@@ -360,7 +360,7 @@ void TKikimrRunner::CreateSampleTables() {
(8, 108, "One", "Value5"),
(9, 101, "Two", "Value1");
- REPLACE INTO `Join2` (Key1, Key2, Name, Value2) VALUES
+ REPLACE INTO `Join2` (Key1, Key2, Name, Value2) VALUES
(101, "One", "Name1", "Value21"),
(101, "Two", "Name1", "Value22"),
(101, "Three", "Name3", "Value23"),
@@ -402,8 +402,8 @@ void TKikimrRunner::Initialize(const TKikimrSettings& settings) {
Client->InitRootScheme(settings.DomainRoot);
- NKikimr::NKqp::WaitForKqpProxyInit(GetDriver());
-
+ NKikimr::NKqp::WaitForKqpProxyInit(GetDriver());
+
if (settings.WithSampleTables) {
CreateSampleTables();
}
@@ -820,13 +820,13 @@ void CreateSampleTablesWithIndex(TSession& session) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- REPLACE INTO `KeyValue` (Key, Value) VALUES
+ REPLACE INTO `KeyValue` (Key, Value) VALUES
(3u, "Three"),
(4u, "Four"),
(10u, "Ten"),
(NULL, "Null Value");
- REPLACE INTO `Test` (Group, Name, Amount, Comment) VALUES
+ REPLACE INTO `Test` (Group, Name, Amount, Comment) VALUES
(1u, "Jack", 100500ul, "Just Jack"),
(3u, "Harry", 5600ul, "Not Potter"),
(3u, "Joshua", 8202ul, "Very popular name in GB"),
@@ -857,25 +857,25 @@ void CreateSampleTablesWithIndex(TSession& session) {
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
}
-void WaitForKqpProxyInit(const NYdb::TDriver& driver) {
- NYdb::NTable::TTableClient client(driver);
-
- while (true) {
+void WaitForKqpProxyInit(const NYdb::TDriver& driver) {
+ NYdb::NTable::TTableClient client(driver);
+
+ while (true) {
auto it = client.RetryOperationSync([=](TSession session) {
return session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
SELECT 1;
)",
- TTxControl::BeginTx().CommitTx()
- ).GetValueSync();
- });
-
- if (it.IsSuccess()) {
- break;
- }
- Sleep(TDuration::MilliSeconds(100));
- }
-}
-
+ TTxControl::BeginTx().CommitTx()
+ ).GetValueSync();
+ });
+
+ if (it.IsSuccess()) {
+ break;
+ }
+ Sleep(TDuration::MilliSeconds(100));
+ }
+}
+
} // namspace NKqp
} // namespace NKikimr
diff --git a/ydb/core/kqp/ut/common/kqp_ut_common.h b/ydb/core/kqp/ut/common/kqp_ut_common.h
index 6257596d6fb..c1d64b65c3e 100644
--- a/ydb/core/kqp/ut/common/kqp_ut_common.h
+++ b/ydb/core/kqp/ut/common/kqp_ut_common.h
@@ -138,7 +138,7 @@ public:
private:
void Initialize(const TKikimrSettings& settings);
- void WaitForKqpProxyInit();
+ void WaitForKqpProxyInit();
void CreateSampleTables();
private:
@@ -233,10 +233,10 @@ inline void AssertSuccessResult(const NYdb::TStatus& result) {
void CreateSampleTablesWithIndex(NYdb::NTable::TSession& session);
-// KQP proxy needs to asynchronously receive tenants info before it is able to serve requests that have
-// database name specified. Before that it returns errors.
-// This method retries a simple query until it succeeds.
-void WaitForKqpProxyInit(const NYdb::TDriver& driver);
-
+// KQP proxy needs to asynchronously receive tenants info before it is able to serve requests that have
+// database name specified. Before that it returns errors.
+// This method retries a simple query until it succeeds.
+void WaitForKqpProxyInit(const NYdb::TDriver& driver);
+
} // namespace NKqp
} // namespace NKikimr
diff --git a/ydb/core/kqp/ut/kqp_explain_ut.cpp b/ydb/core/kqp/ut/kqp_explain_ut.cpp
index f051f6314ad..c3a06836367 100644
--- a/ydb/core/kqp/ut/kqp_explain_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_explain_ut.cpp
@@ -26,7 +26,7 @@ void CreateSampleTables(TKikimrRunner& kikimr) {
auto session = tableClient.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
- REPLACE INTO `/Root/FourShard` (Key, Value1, Value2) VALUES
+ REPLACE INTO `/Root/FourShard` (Key, Value1, Value2) VALUES
(1u, "Value-001", "1"),
(2u, "Value-002", "2"),
(101u, "Value-101", "101"),
@@ -53,7 +53,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
settings.Explain(true);
auto it = db.StreamExecuteScanQuery(R"(
- SELECT count(*) FROM `/Root/EightShard` AS t JOIN `/Root/KeyValue` AS kv ON t.Data = kv.Key;
+ SELECT count(*) FROM `/Root/EightShard` AS t JOIN `/Root/KeyValue` AS kv ON t.Data = kv.Key;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -80,7 +80,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
settings.Explain(true);
auto it = db.StreamExecuteScanQuery(R"(
- SELECT count(*) FROM `/Root/EightShard` AS t JOIN `/Root/KeyValue` AS kv ON t.Data = kv.Key;
+ SELECT count(*) FROM `/Root/EightShard` AS t JOIN `/Root/KeyValue` AS kv ON t.Data = kv.Key;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -106,7 +106,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
settings.Explain(true);
auto it = db.StreamExecuteScanQuery(R"(
- --SELECT count(*) FROM `/Root/EightShard` AS t JOIN `/Root/KeyValue` AS kv ON t.Data = kv.Key;
+ --SELECT count(*) FROM `/Root/EightShard` AS t JOIN `/Root/KeyValue` AS kv ON t.Data = kv.Key;
PRAGMA Kikimr.UseNewEngine = "false";
DECLARE $value as Utf8;
SELECT $value as value;
@@ -123,7 +123,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
settings.Explain(true);
auto it = db.StreamExecuteScanQuery(R"(
- SELECT min(Message), max(Message) FROM `/Root/Logs` WHERE Ts > 1 and Ts <= 4 or App="ydb" GROUP BY App;
+ SELECT min(Message), max(Message) FROM `/Root/Logs` WHERE Ts > 1 and Ts <= 4 or App="ydb" GROUP BY App;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -159,14 +159,14 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
auto it = db.StreamExecuteScanQuery(R"(
$join = (
SELECT l.Key as Key, l.Text as Text, l.Data as Data, r.Value1 as Value1, r.Value2 as Value2
- FROM `/Root/EightShard` AS l JOIN `/Root/FourShard` AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l JOIN `/Root/FourShard` AS r ON l.Key = r.Key
);
SELECT Key, COUNT(*) AS Cnt
FROM $join
WHERE Cast(Data As Int64) < (Key - 100) and Value1 != 'Value-101'
GROUP BY Key
UNION ALL
- (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1)
+ (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1)
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -195,7 +195,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
settings.Explain(true);
auto it = db.StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/EightShard` WHERE Key BETWEEN 150 AND 266 ORDER BY Data LIMIT 4;
+ SELECT * FROM `/Root/EightShard` WHERE Key BETWEEN 150 AND 266 ORDER BY Data LIMIT 4;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -224,7 +224,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
auto it = db.StreamExecuteScanQuery(R"(
PRAGMA Kikimr.OptEnablePredicateExtract = "false";
- SELECT * FROM `/Root/Logs` WHERE App = "new_app_1" AND Host < "xyz" AND Ts = (42+7) Limit 10;
+ SELECT * FROM `/Root/Logs` WHERE App = "new_app_1" AND Host < "xyz" AND Ts = (42+7) Limit 10;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -251,7 +251,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
settings.Explain(true);
auto it = db.StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/EightShard` WHERE Key BETWEEN 150 AND 266 ORDER BY Text;
+ SELECT * FROM `/Root/EightShard` WHERE Key BETWEEN 150 AND 266 ORDER BY Text;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -273,7 +273,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
settings.Explain(true);
auto it = db.StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/EightShard` ORDER BY Text LIMIT 10 OFFSET 15;
+ SELECT * FROM `/Root/EightShard` ORDER BY Text LIMIT 10 OFFSET 15;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -299,14 +299,14 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
auto it = db.StreamExecuteScanQuery(R"(
$foo = (
SELECT t1.Key AS Key
- FROM `/Root/KeyValue` AS t1
- JOIN `/Root/KeyValue` AS t2
+ FROM `/Root/KeyValue` AS t1
+ JOIN `/Root/KeyValue` AS t2
ON t1.Key = t2.Key
GROUP BY t1.Key
);
SELECT t1.Key AS Key
FROM $foo AS Foo
- JOIN `/Root/KeyValue` AS t1
+ JOIN `/Root/KeyValue` AS t1
ON t1.Key = Foo.Key
ORDER BY Key
)", settings).GetValueSync();
@@ -359,7 +359,7 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
settings.Explain(true);
auto it = db.StreamExecuteScanQuery(R"(
- select count(*) from `/Root/KeyValue` AS t1 join `/Root/KeyValue` AS t2 on t1.Key = t2.Key;
+ select count(*) from `/Root/KeyValue` AS t1 join `/Root/KeyValue` AS t2 on t1.Key = t2.Key;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -457,9 +457,9 @@ Y_UNIT_TEST_SUITE(KqpExplain) {
auto result = session.ExplainDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPDATE `/Root/EightShard` SET Data=Data+1;
- UPDATE `/Root/EightShard` SET Data=Data-1 WHERE Key In (100,200,300);
- DELETE FROM `/Root/EightShard` WHERE Key > 350;
+ UPDATE `/Root/EightShard` SET Data=Data+1;
+ UPDATE `/Root/EightShard` SET Data=Data-1 WHERE Key In (100,200,300);
+ DELETE FROM `/Root/EightShard` WHERE Key > 350;
)").ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
diff --git a/ydb/core/kqp/ut/kqp_flip_join_ut.cpp b/ydb/core/kqp/ut/kqp_flip_join_ut.cpp
index 00833183bae..593e1a1876d 100644
--- a/ydb/core/kqp/ut/kqp_flip_join_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_flip_join_ut.cpp
@@ -7,39 +7,39 @@ using namespace NYdb::NTable;
static void CreateSampleTables(TSession session) {
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
- CREATE TABLE [/Root/FJ_Table_1] (
+ CREATE TABLE [/Root/FJ_Table_1] (
Key Int32, Fk2 Int32, Fk3 Int32, Value String,
PRIMARY KEY (Key)
);
- CREATE TABLE [/Root/FJ_Table_2] (
+ CREATE TABLE [/Root/FJ_Table_2] (
Key Int32, Fk3 Int32, Fk1 Int32, Value String,
PRIMARY KEY (Key)
);
- CREATE TABLE [/Root/FJ_Table_3] (
+ CREATE TABLE [/Root/FJ_Table_3] (
Key Int32, Fk1 Int32, Fk2 Int32, Value String,
PRIMARY KEY (Key)
);
- CREATE TABLE [/Root/FJ_Table_4] (
+ CREATE TABLE [/Root/FJ_Table_4] (
Key Int32, Value String,
PRIMARY KEY (Key)
);
)").GetValueSync().IsSuccess());
UNIT_ASSERT(session.ExecuteDataQuery(R"(
- REPLACE INTO [/Root/FJ_Table_1] (Key, Fk2, Fk3, Value) VALUES
+ REPLACE INTO [/Root/FJ_Table_1] (Key, Fk2, Fk3, Value) VALUES
(1, 101, 1001, "Value11"),
(2, 102, 1002, "Value12"),
(3, 103, 1003, "Value13"),
(4, 104, 1004, "Value14");
- REPLACE INTO [/Root/FJ_Table_2] (Key, Fk3, Fk1, Value) VALUES
+ REPLACE INTO [/Root/FJ_Table_2] (Key, Fk3, Fk1, Value) VALUES
(101, 1001, 1, "Value21"),
(102, 1002, 2, "Value22");
- REPLACE INTO [/Root/FJ_Table_3] (Key, Fk1, Fk2, Value) VALUES
+ REPLACE INTO [/Root/FJ_Table_3] (Key, Fk1, Fk2, Value) VALUES
(1001, 1, 101, "Value31"),
(1002, 2, 102, "Value32"),
(1003, 3, 103, "Value33"),
(1005, 5, 105, "Value35");
- REPLACE INTO [/Root/FJ_Table_4] (Key, Value) VALUES
+ REPLACE INTO [/Root/FJ_Table_4] (Key, Value) VALUES
(1, "Value4_1"),
(101, "Value4_101"),
(1001, "Value4_1001");
@@ -81,8 +81,8 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
PRAGMA Kikimr.UseNewEngine = 'false';
%s
SELECT t1.Value, t2.Value
- FROM [/Root/FJ_Table_1] AS t1
- INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
+ FROM [/Root/FJ_Table_1] AS t1
+ INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
ORDER BY t1.Value, t2.Value
)", FormatPragma(disableFlip));
@@ -114,9 +114,9 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
const TString query = Sprintf(R"(
%s
SELECT t1.Value, t2.Value, t3.Value
- FROM `/Root/FJ_Table_3` AS t1
- INNER JOIN `/Root/FJ_Table_2` AS t2 ON t1.Key = t2.Fk3
- INNER JOIN `/Root/FJ_Table_4` AS t3 ON t2.Key = t3.Key
+ FROM `/Root/FJ_Table_3` AS t1
+ INNER JOIN `/Root/FJ_Table_2` AS t2 ON t1.Key = t2.Fk3
+ INNER JOIN `/Root/FJ_Table_4` AS t3 ON t2.Key = t3.Key
ORDER BY t1.Value, t2.Value, t3.Value
)", FormatPragma(disableFlip));
@@ -151,11 +151,11 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
%s
$join = (
SELECT t1.Value AS Value1, t2.Value AS Value2, t1.Fk3 AS Fk
- FROM [/Root/FJ_Table_1] AS t1
- INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
+ FROM [/Root/FJ_Table_1] AS t1
+ INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
);
SELECT t.Value1, t.Value2, t3.Value
- FROM [/Root/FJ_Table_3] AS t3
+ FROM [/Root/FJ_Table_3] AS t3
INNER JOIN $join AS t ON t3.Key = t.Fk
ORDER BY t.Value1, t.Value2, t3.Value
)", FormatPragma(disableFlip)));
@@ -190,8 +190,8 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
const TString query = Q_(Sprintf(R"(
%s
SELECT t1.Value
- FROM [/Root/FJ_Table_1] AS t1
- LEFT SEMI JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
+ FROM [/Root/FJ_Table_1] AS t1
+ LEFT SEMI JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
ORDER BY t1.Value
)", FormatLeftSemiPragma(disableFlip)));
@@ -222,9 +222,9 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
const TString query = Q_(Sprintf(R"(
%s
SELECT t1.Key, t1.Value
- FROM [/Root/FJ_Table_1] AS t1
- LEFT SEMI JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
- LEFT SEMI JOIN [/Root/FJ_Table_3] AS t3 ON t1.Key = t3.Fk1
+ FROM [/Root/FJ_Table_1] AS t1
+ LEFT SEMI JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
+ LEFT SEMI JOIN [/Root/FJ_Table_3] AS t3 ON t1.Key = t3.Fk1
ORDER BY t1.Key, t1.Value
)", FormatLeftSemiPragma(disableFlip)));
@@ -258,11 +258,11 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
%s
$join = (
SELECT t1.Value AS Value1, t2.Value AS Value2, t1.Fk3 AS Fk
- FROM [/Root/FJ_Table_1] AS t1
- INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
+ FROM [/Root/FJ_Table_1] AS t1
+ INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
);
SELECT t3.Value
- FROM [/Root/FJ_Table_3] AS t3
+ FROM [/Root/FJ_Table_3] AS t3
LEFT SEMI JOIN $join AS t ON t3.Key = t.Fk
ORDER BY t3.Value
)", FormatLeftSemiPragma(disableFlip)));
@@ -297,8 +297,8 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
const TString query = Q_(Sprintf(R"(
%s
SELECT t2.Value
- FROM [/Root/FJ_Table_1] AS t1
- RIGHT SEMI JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
+ FROM [/Root/FJ_Table_1] AS t1
+ RIGHT SEMI JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
ORDER BY t2.Value
)", FormatPragma(disableFlip)));
@@ -329,9 +329,9 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
const TString query = Q_(Sprintf(R"(
%s
SELECT t3.Key, t3.Value
- FROM [/Root/FJ_Table_1] AS t1
- RIGHT SEMI JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
- RIGHT SEMI JOIN [/Root/FJ_Table_3] AS t3 ON t2.Key = t3.Fk2
+ FROM [/Root/FJ_Table_1] AS t1
+ RIGHT SEMI JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
+ RIGHT SEMI JOIN [/Root/FJ_Table_3] AS t3 ON t2.Key = t3.Fk2
ORDER BY t3.Key, t3.Value
)", FormatPragma(disableFlip)));
@@ -366,11 +366,11 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
%s
$join = (
SELECT t1.Value AS Value1, t2.Value AS Value2, t1.Fk3 AS Fk3
- FROM [/Root/FJ_Table_1] AS t1
- INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
+ FROM [/Root/FJ_Table_1] AS t1
+ INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
);
SELECT t.Value1, t.Value2
- FROM [/Root/FJ_Table_3] AS t3
+ FROM [/Root/FJ_Table_3] AS t3
RIGHT SEMI JOIN $join AS t ON t3.Key = t.Fk3
ORDER BY t.Value1, t.Value2
)", FormatPragma(disableFlip)));
@@ -406,8 +406,8 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
PRAGMA Kikimr.UseNewEngine = 'false';
%s
SELECT t2.Value
- FROM [/Root/FJ_Table_1] AS t1
- RIGHT JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
+ FROM [/Root/FJ_Table_1] AS t1
+ RIGHT JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
ORDER BY t2.Value
)", FormatPragma(disableFlip));
@@ -439,9 +439,9 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
PRAGMA Kikimr.UseNewEngine = 'false';
%s
SELECT t3.Key, t3.Value
- FROM [/Root/FJ_Table_1] AS t1
- RIGHT JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
- RIGHT JOIN [/Root/FJ_Table_3] AS t3 ON t2.Key = t3.Fk2
+ FROM [/Root/FJ_Table_1] AS t1
+ RIGHT JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk1
+ RIGHT JOIN [/Root/FJ_Table_3] AS t3 ON t2.Key = t3.Fk2
ORDER BY t3.Key, t3.Value
)", FormatPragma(disableFlip));
@@ -477,11 +477,11 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
%s
$join = (
SELECT t1.Value AS Value1, t2.Value AS Value2, t1.Fk3 AS Fk3
- FROM [/Root/FJ_Table_1] AS t1
- INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
+ FROM [/Root/FJ_Table_1] AS t1
+ INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
);
SELECT t.Value1, t.Value2
- FROM [/Root/FJ_Table_3] AS t3
+ FROM [/Root/FJ_Table_3] AS t3
RIGHT JOIN $join AS t ON t3.Key = t.Fk3
ORDER BY t.Value1, t.Value2
)", FormatPragma(disableFlip));
@@ -517,8 +517,8 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
PRAGMA Kikimr.UseNewEngine = 'false';
%s
SELECT t2.Value
- FROM [/Root/FJ_Table_3] AS t1
- RIGHT ONLY JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk3
+ FROM [/Root/FJ_Table_3] AS t1
+ RIGHT ONLY JOIN [/Root/FJ_Table_2] AS t2 ON t1.Key = t2.Fk3
ORDER BY t2.Value
)", FormatPragma(disableFlip));
@@ -551,12 +551,12 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
%s
$join = (
SELECT t2.Key AS Key, t2.Fk1 AS Fk1, t2.Fk2 AS Fk2, t2.Value AS Value
- FROM [/Root/FJ_Table_1] AS t1
- RIGHT ONLY JOIN [/Root/FJ_Table_3] AS t2 ON t1.Key = t2.Fk1
+ FROM [/Root/FJ_Table_1] AS t1
+ RIGHT ONLY JOIN [/Root/FJ_Table_3] AS t2 ON t1.Key = t2.Fk1
);
SELECT t3.Key, t3.Value
FROM $join AS t
- INNER JOIN [/Root/FJ_Table_2] AS t3 ON t.Fk2 = t3.Key
+ INNER JOIN [/Root/FJ_Table_2] AS t3 ON t.Fk2 = t3.Key
ORDER BY t3.Key, t3.Value
)", FormatPragma(disableFlip));
@@ -591,11 +591,11 @@ Y_UNIT_TEST_SUITE(KqpFlipJoin) {
%s
$join = (
SELECT t1.Value AS Value1, t2.Value AS Value2, t1.Fk3 AS Fk3
- FROM [/Root/FJ_Table_1] AS t1
- INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
+ FROM [/Root/FJ_Table_1] AS t1
+ INNER JOIN [/Root/FJ_Table_2] AS t2 ON t1.Fk2 = t2.Key
);
SELECT t.Value1, t.Value2
- FROM [/Root/FJ_Table_4] AS t3
+ FROM [/Root/FJ_Table_4] AS t3
RIGHT ONLY JOIN $join AS t ON t3.Key = t.Fk3
ORDER BY t.Value1, t.Value2
)", FormatPragma(disableFlip));
diff --git a/ydb/core/kqp/ut/kqp_join_ut.cpp b/ydb/core/kqp/ut/kqp_join_ut.cpp
index b5ff1e7197e..8fa59d31b36 100644
--- a/ydb/core/kqp/ut/kqp_join_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_join_ut.cpp
@@ -821,10 +821,10 @@ Y_UNIT_TEST_SUITE(KqpJoin) {
)"), TTxControl::BeginTx().CommitTx()).GetValueSync().IsSuccess());
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT t1.Value, t2.Value, t3.Value FROM `/Root/Join1_1` AS t1
- INNER JOIN `/Root/Join1_2` AS t2
+ SELECT t1.Value, t2.Value, t3.Value FROM `/Root/Join1_1` AS t1
+ INNER JOIN `/Root/Join1_2` AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
- LEFT JOIN `/Root/Join1_3_ui64` AS t3
+ LEFT JOIN `/Root/Join1_3_ui64` AS t3
ON t2.Key1 = t3.Value
WHERE t1.Value == "Value5";
)"), TTxControl::BeginTx().CommitTx()).ExtractValueSync();
diff --git a/ydb/core/kqp/ut/kqp_locks_ut.cpp b/ydb/core/kqp/ut/kqp_locks_ut.cpp
index 23d4dab8106..bd9d9ede5b2 100644
--- a/ydb/core/kqp/ut/kqp_locks_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_locks_ut.cpp
@@ -39,7 +39,7 @@ Y_UNIT_TEST_SUITE(KqpLocks) {
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(HasIssue(result.GetIssues(), NYql::TIssuesIds::KIKIMR_LOCKS_INVALIDATED,
[] (const NYql::TIssue& issue) {
- return issue.Message.Contains("/Root/Test");
+ return issue.Message.Contains("/Root/Test");
}));
result = session2.ExecuteDataQuery(Q_(R"(
@@ -78,7 +78,7 @@ Y_UNIT_TEST_SUITE(KqpLocks) {
commitResult.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(HasIssue(commitResult.GetIssues(), NYql::TIssuesIds::KIKIMR_LOCKS_INVALIDATED,
[] (const NYql::TIssue& issue) {
- return issue.Message.Contains("/Root/Test");
+ return issue.Message.Contains("/Root/Test");
}));
result = session2.ExecuteDataQuery(Q_(R"(
@@ -96,7 +96,7 @@ Y_UNIT_TEST_SUITE(KqpLocks) {
auto session2 = db.CreateSession().GetValueSync().GetSession();
auto result = session1.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/Test` WHERE Group = 1;
+ SELECT * FROM `/Root/Test` WHERE Group = 1;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW())).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -104,7 +104,7 @@ Y_UNIT_TEST_SUITE(KqpLocks) {
UNIT_ASSERT(tx1);
result = session2.ExecuteDataQuery(Q_(R"(
- UPSERT INTO `/Root/Test` (Group, Name, Comment)
+ UPSERT INTO `/Root/Test` (Group, Name, Comment)
VALUES (2U, "Paul", "Changed");
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
diff --git a/ydb/core/kqp/ut/kqp_newengine_flowcontrol_ut.cpp b/ydb/core/kqp/ut/kqp_newengine_flowcontrol_ut.cpp
index be704509c03..16c89eceb58 100644
--- a/ydb/core/kqp/ut/kqp_newengine_flowcontrol_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_newengine_flowcontrol_ut.cpp
@@ -29,7 +29,7 @@ void CreateSampleTables(TKikimrRunner& kikimr) {
auto session = tableClient.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
- REPLACE INTO `/Root/FourShard` (Key, Value1, Value2) VALUES
+ REPLACE INTO `/Root/FourShard` (Key, Value1, Value2) VALUES
(1u, "Value-001", "1"),
(2u, "Value-002", "2"),
(101u, "Value-101", "101"),
@@ -75,10 +75,10 @@ void DoFlowControlTest(ui64 limit, bool hasBlockedByCapacity) {
.ProfileMode(NExperimental::EStreamQueryProfileMode::Full);
auto result = db.ExecuteStreamQuery(R"(
- $r = (select * from `/Root/FourShard` where Key > 201);
+ $r = (select * from `/Root/FourShard` where Key > 201);
SELECT l.Key as key, l.Text as text, r.Value1 as value
- FROM `/Root/EightShard` AS l JOIN $r AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l JOIN $r AS r ON l.Key = r.Key
ORDER BY key, text, value
)", settings).GetValueSync();
@@ -137,7 +137,7 @@ void SlowClient() {
for (int q = 0; q < 100; ++q) {
TStringBuilder query;
- query << "REPLACE INTO [/Root/KeyValue] (Key, Value) VALUES (" << q << ", \"" << value << "\")";
+ query << "REPLACE INTO [/Root/KeyValue] (Key, Value) VALUES (" << q << ", \"" << value << "\")";
auto result = session.ExecuteDataQuery(query, TTxControl::BeginTx().CommitTx()).GetValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -146,7 +146,7 @@ void SlowClient() {
NExperimental::TStreamQueryClient db(kikimr.GetDriver());
- auto it = db.ExecuteStreamQuery("SELECT Key, Value FROM `/Root/KeyValue`").GetValueSync();
+ auto it = db.ExecuteStreamQuery("SELECT Key, Value FROM `/Root/KeyValue`").GetValueSync();
auto part = it.ReadNext().GetValueSync();
auto counters = kikimr.GetTestServer().GetRuntime()->GetAppData(0).Counters;
diff --git a/ydb/core/kqp/ut/kqp_newengine_ut.cpp b/ydb/core/kqp/ut/kqp_newengine_ut.cpp
index bf6e6ac6ea8..3737687d8d6 100644
--- a/ydb/core/kqp/ut/kqp_newengine_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_newengine_ut.cpp
@@ -26,7 +26,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT Value1, Value2, Key FROM [/Root/TwoShard] WHERE Value2 != 0 ORDER BY Key DESC;
+ SELECT Value1, Value2, Key FROM [/Root/TwoShard] WHERE Value2 != 0 ORDER BY Key DESC;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
@@ -60,7 +60,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $key AS Uint64;
- SELECT * FROM `/Root/EightShard` WHERE Key = $key;
+ SELECT * FROM `/Root/EightShard` WHERE Key = $key;
)";
auto explainResult = session.ExplainDataQuery(query).GetValueSync();
@@ -115,7 +115,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $group AS Uint32?;
DECLARE $name AS String?;
- SELECT * FROM `/Root/Test` WHERE Group = $group AND Name = $name;
+ SELECT * FROM `/Root/Test` WHERE Group = $group AND Name = $name;
)";
auto explainResult = session.ExplainDataQuery(query).GetValueSync();
@@ -162,7 +162,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $low AS Uint64;
DECLARE $high AS Uint64;
- SELECT * FROM [/Root/EightShard] WHERE Key > $low AND Key < $high ORDER BY Key;
+ SELECT * FROM [/Root/EightShard] WHERE Key > $low AND Key < $high ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
AssertSuccessResult(result);
@@ -196,7 +196,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $low AS Uint64;
DECLARE $high AS Uint64;
- SELECT * FROM [/Root/EightShard] WHERE Key >= $low AND Key <= $high ORDER BY Key;
+ SELECT * FROM [/Root/EightShard] WHERE Key >= $low AND Key <= $high ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
AssertSuccessResult(result);
@@ -228,7 +228,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $low AS Uint64;
- SELECT * FROM [/Root/EightShard] WHERE Key > $low ORDER BY Key;
+ SELECT * FROM [/Root/EightShard] WHERE Key > $low ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
AssertSuccessResult(result);
@@ -260,7 +260,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $high AS Uint64;
- SELECT * FROM [/Root/EightShard] WHERE Key < $high ORDER BY Key;
+ SELECT * FROM [/Root/EightShard] WHERE Key < $high ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
AssertSuccessResult(result);
@@ -280,8 +280,8 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Value2 = 0 ORDER BY Value1 DESC, Key;
- SELECT * FROM [/Root/Test] WHERE Group = 1 ORDER BY Amount, Group, Name;
+ SELECT * FROM [/Root/TwoShard] WHERE Value2 = 0 ORDER BY Value1 DESC, Key;
+ SELECT * FROM [/Root/Test] WHERE Group = 1 ORDER BY Amount, Group, Name;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -297,7 +297,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/Test] WHERE Group = 2 ORDER BY Amount, Group, Name;
+ SELECT * FROM [/Root/Test] WHERE Group = 2 ORDER BY Amount, Group, Name;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -313,8 +313,8 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- $left = (select Key, Value1, Value2 from [/Root/TwoShard] where Value2 = 1);
- $right = (select Key, Value1, Value2 from [/Root/TwoShard] where Value2 = -1);
+ $left = (select Key, Value1, Value2 from [/Root/TwoShard] where Value2 = 1);
+ $right = (select Key, Value1, Value2 from [/Root/TwoShard] where Value2 = -1);
select Key, Value1, Value2 from $left order by Key;
select Key, Value1, Value2 from $right order by Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
@@ -338,14 +338,14 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO [/Root/TwoShard]
- SELECT Key, Value1, Value2 + 1 AS Value2 FROM [/Root/TwoShard];
+ UPSERT INTO [/Root/TwoShard]
+ SELECT Key, Value1, Value2 + 1 AS Value2 FROM [/Root/TwoShard];
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
@@ -368,14 +368,14 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO [/Root/TwoShard]
- SELECT Key - 3u AS Key, Value1, Value2 + 100 AS Value2 FROM [/Root/TwoShard];
+ UPSERT INTO [/Root/TwoShard]
+ SELECT Key - 3u AS Key, Value1, Value2 + 100 AS Value2 FROM [/Root/TwoShard];
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Value2 > 10 ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] WHERE Value2 > 10 ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
@@ -478,7 +478,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO [/Root/TwoShard] (Key, Value1, Value2) VALUES
+ UPSERT INTO [/Root/TwoShard] (Key, Value1, Value2) VALUES
(10u, "One", -10),
(20u, "Two", -20);
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
@@ -486,7 +486,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Value2 <= -10 ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] WHERE Value2 <= -10 ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
@@ -526,7 +526,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $key2 AS Uint32;
DECLARE $value2 AS String;
- UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES
+ UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES
($key1, $value1),
($key2, $value2);
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(),
@@ -536,7 +536,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Value1 = "New" ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] WHERE Value1 = "New" ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
@@ -579,14 +579,14 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $items AS 'List<Struct<Key:Uint32?, Value1:String?>>';
- UPSERT INTO [/Root/TwoShard]
+ UPSERT INTO [/Root/TwoShard]
SELECT * FROM AS_TABLE($items);
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), std::move(params)).ExtractValueSync();
AssertSuccessResult(result);
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Value1 = "New" ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] WHERE Value1 = "New" ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
@@ -681,7 +681,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT Data, SUM(Key) AS Total FROM [/Root/EightShard] GROUP BY Data ORDER BY Data;
+ SELECT Data, SUM(Key) AS Total FROM [/Root/EightShard] GROUP BY Data ORDER BY Data;
)", TTxControl::BeginTx(TTxSettings::OnlineRO()).CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
AssertSuccessResult(result);
@@ -697,7 +697,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT Data, Text, COUNT(Key) AS Total FROM [/Root/EightShard] GROUP BY Data, Text ORDER BY Data, Text;
+ SELECT Data, Text, COUNT(Key) AS Total FROM [/Root/EightShard] GROUP BY Data, Text ORDER BY Data, Text;
)", TTxControl::BeginTx(TTxSettings::OnlineRO()).CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
AssertSuccessResult(result);
@@ -761,9 +761,9 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
SELECT 1;
- SELECT Key FROM [/Root/TwoShard] ORDER BY Key DESC LIMIT 1;
+ SELECT Key FROM [/Root/TwoShard] ORDER BY Key DESC LIMIT 1;
SELECT 2;
- SELECT Key FROM [/Root/EightShard] ORDER BY Key ASC LIMIT 1;
+ SELECT Key FROM [/Root/EightShard] ORDER BY Key ASC LIMIT 1;
)", TTxControl::BeginTx(TTxSettings::OnlineRO()).CommitTx()).ExtractValueSync();
CompareYson(R"([[1]])", FormatResultSetYson(result.GetResultSet(0)));
@@ -779,7 +779,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session1.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Key = 1;
+ SELECT * FROM [/Root/TwoShard] WHERE Key = 1;
)", TTxControl::BeginTx(TTxSettings::SerializableRW())).GetValueSync();
AssertSuccessResult(result);
@@ -788,13 +788,13 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto session2 = db.CreateSession().GetValueSync().GetSession();
result = session2.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES(1, "NewValue");
+ UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES(1, "NewValue");
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).GetValueSync();
AssertSuccessResult(result);
result = session1.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Key = 2;
+ SELECT * FROM [/Root/TwoShard] WHERE Key = 2;
)", TTxControl::Tx(*tx).CommitTx()).GetValueSync();
if (kikimr.IsUsingSnapshotReads()) {
@@ -814,7 +814,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session1.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard];
+ SELECT * FROM [/Root/TwoShard];
)", TTxControl::BeginTx(TTxSettings::SerializableRW())).GetValueSync();
AssertSuccessResult(result);
@@ -823,13 +823,13 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto session2 = db.CreateSession().GetValueSync().GetSession();
result = session2.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES(101, "NewValue");
+ UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES(101, "NewValue");
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).GetValueSync();
AssertSuccessResult(result);
result = session1.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/EightShard];
+ SELECT * FROM [/Root/EightShard];
)", TTxControl::Tx(*tx).CommitTx()).GetValueSync();
if (kikimr.IsUsingSnapshotReads()) {
@@ -849,7 +849,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard];
+ SELECT * FROM [/Root/TwoShard];
)", TTxControl::BeginTx(TTxSettings::SerializableRW())).GetValueSync();
AssertSuccessResult(result);
@@ -857,7 +857,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/EightShard];
+ SELECT * FROM [/Root/EightShard];
)", TTxControl::Tx(*tx).CommitTx()).GetValueSync();
AssertSuccessResult(result);
}
@@ -869,7 +869,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session1.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Key = 1;
+ SELECT * FROM [/Root/TwoShard] WHERE Key = 1;
)", TTxControl::BeginTx(TTxSettings::SerializableRW())).GetValueSync();
AssertSuccessResult(result);
@@ -878,13 +878,13 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto session2 = db.CreateSession().GetValueSync().GetSession();
result = session2.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES(1, "NewValue");
+ UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES(1, "NewValue");
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).GetValueSync();
AssertSuccessResult(result);
result = session1.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO [/Root/TwoShard] (Key,Value1) VALUES(2, "NewValue");
+ UPSERT INTO [/Root/TwoShard] (Key,Value1) VALUES(2, "NewValue");
)", TTxControl::Tx(*tx).CommitTx()).GetValueSync();
UNIT_ASSERT(!result.IsSuccess());
result.GetIssues().PrintTo(Cerr);
@@ -893,7 +893,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session2.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Key <= 2;
+ SELECT * FROM [/Root/TwoShard] WHERE Key <= 2;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).GetValueSync();
AssertSuccessResult(result);
@@ -908,7 +908,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM `/Root/KeyValue`
+ SELECT * FROM `/Root/KeyValue`
)", TTxControl::BeginTx(TTxSettings::SerializableRW())).GetValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -930,7 +930,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard]
+ SELECT * FROM [/Root/TwoShard]
)", TTxControl::BeginTx(TTxSettings::SerializableRW())).GetValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -952,7 +952,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM `/Root/KeyValue`
+ SELECT * FROM `/Root/KeyValue`
)", TTxControl::BeginTx(TTxSettings::SerializableRW())).GetValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -962,7 +962,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto session2 = db.CreateSession().GetValueSync().GetSession();
result = session2.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO `/Root/KeyValue` (Key, Value)
+ UPSERT INTO `/Root/KeyValue` (Key, Value)
VALUES (3u, "Three")
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -989,7 +989,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM `/Root/TwoShard`
+ SELECT * FROM `/Root/TwoShard`
)", TTxControl::BeginTx(TTxSettings::SerializableRW())).GetValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -999,7 +999,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto session2 = db.CreateSession().GetValueSync().GetSession();
result = session2.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO `/Root/TwoShard` (Key, Value1, Value2)
+ UPSERT INTO `/Root/TwoShard` (Key, Value1, Value2)
VALUES (4u, "Four", 4)
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1027,9 +1027,9 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM `/Root/TwoShard` WHERE Key = 4000000001u; -- read 2nd shard
+ SELECT * FROM `/Root/TwoShard` WHERE Key = 4000000001u; -- read 2nd shard
- UPSERT INTO `/Root/TwoShard` (Key, Value1, Value2) VALUES -- write 1st shard
+ UPSERT INTO `/Root/TwoShard` (Key, Value1, Value2) VALUES -- write 1st shard
(11u, "Eleven", 11);
)", TTxControl::BeginTx()).GetValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -1040,7 +1040,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto session2 = db.CreateSession().GetValueSync().GetSession();
result = session2.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO `/Root/TwoShard` (Key, Value1, Value2) VALUES -- write 2nd shard
+ UPSERT INTO `/Root/TwoShard` (Key, Value1, Value2) VALUES -- write 2nd shard
(4000000001u, "XXX", -101)
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1052,7 +1052,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT Key, Value1, Value2 FROM `/Root/TwoShard` WHERE Key = 11u
+ SELECT Key, Value1, Value2 FROM `/Root/TwoShard` WHERE Key = 11u
)", TTxControl::BeginTx().CommitTx()).GetValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
CompareYson(R"([])", FormatResultSetYson(result.GetResultSet(0)));
@@ -1066,8 +1066,8 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPSERT INTO [/Root/TwoShard]
- SELECT Key + 1u AS Key, Value1 FROM [/Root/TwoShard];
+ UPSERT INTO [/Root/TwoShard]
+ SELECT Key + 1u AS Key, Value1 FROM [/Root/TwoShard];
)", TTxControl::BeginTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1089,14 +1089,14 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $key AS Uint32;
DECLARE $value AS String;
- UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES
+ UPSERT INTO [/Root/TwoShard] (Key, Value1) VALUES
($key, $value);
)", TTxControl::Tx(*tx), std::move(params)).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT COUNT(*) FROM [/Root/TwoShard];
+ SELECT COUNT(*) FROM [/Root/TwoShard];
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
CompareYson(R"([[6u]])", FormatResultSetYson(result.GetResultSet(0)));
@@ -1106,7 +1106,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1133,7 +1133,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/EightShard] WHERE Key = 101 OR Key = 301
+ SELECT * FROM [/Root/EightShard] WHERE Key = 101 OR Key = 301
ORDER BY Key;
)", TTxControl::BeginTx().CommitTx(), execSettings).GetValueSync();
@@ -1168,7 +1168,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
DECLARE $key AS Uint64;
- SELECT * FROM [/Root/EightShard] WHERE Key = $key + 1;
+ SELECT * FROM [/Root/EightShard] WHERE Key = $key + 1;
)", TTxControl::BeginTx().CommitTx(), params, execSettings).GetValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1207,7 +1207,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
PRAGMA kikimr.UseNewEngine = "true";
DECLARE $Key AS UInt32;
- UPSERT INTO [/Root/TwoShard] (Key, Value1, Value2) VALUES
+ UPSERT INTO [/Root/TwoShard] (Key, Value1, Value2) VALUES
($Key, "One", -10)
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params, execSettings).ExtractValueSync();
AssertSuccessResult(result);
@@ -1227,7 +1227,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Value2 <= -10 ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] WHERE Value2 <= -10 ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
AssertSuccessResult(result);
@@ -1249,7 +1249,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/EightShard];
+ SELECT * FROM [/Root/EightShard];
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1266,7 +1266,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- REPLACE INTO [/Root/TwoShard] (Value1, Key) VALUES
+ REPLACE INTO [/Root/TwoShard] (Value1, Key) VALUES
("Newvalue 1", 1u),
("Newvalue 5", 5u);
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
@@ -1275,7 +1275,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Key <= 5 ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] WHERE Key <= 5 ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -1289,7 +1289,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- REPLACE INTO `/Root/Logs` (App, Ts, Host, Message) VALUES
+ REPLACE INTO `/Root/Logs` (App, Ts, Host, Message) VALUES
("new_app_1", 100, "new_app_host_1.search.yandex.net", "Initialize"),
("new_app_1", 200, "new_app_host_2.search.yandex.net", "Initialized");
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
@@ -1298,7 +1298,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM `/Root/Logs` WHERE App = "new_app_1" ORDER BY App, Ts, Host;
+ SELECT * FROM `/Root/Logs` WHERE App = "new_app_1" ORDER BY App, Ts, Host;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -1310,7 +1310,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- REPLACE INTO `/Root/Logs` (App, Host, Message) VALUES
+ REPLACE INTO `/Root/Logs` (App, Host, Message) VALUES
("new_app_2", "host_2_1", "Empty"),
("new_app_2", "host_2_2", "Empty");
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
@@ -1326,8 +1326,8 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
PRAGMA kikimr.UseNewEngine = "true";
SELECT t1.Key AS Key, t2.Value2 AS Value
- FROM [/Root/KeyValue] AS t1
- INNER JOIN [/Root/Join2] AS t2
+ FROM [/Root/KeyValue] AS t1
+ INNER JOIN [/Root/Join2] AS t2
ON t1.Value = t2.Key2
WHERE t2.Name == "Name1"
ORDER BY Key, Value;
@@ -1386,12 +1386,12 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
$input = (
SELECT Key, CAST(Fk21 AS Uint32) AS Fk21
- FROM [/Root/Join1] WHERE Value == "Value1"
+ FROM [/Root/Join1] WHERE Value == "Value1"
);
SELECT t1.Key AS Key, t2.Value2 AS Value
FROM $input AS t1
- INNER JOIN [/Root/Join2] AS t2
+ INNER JOIN [/Root/Join2] AS t2
ON t1.Fk21 = t2.Key1
ORDER BY Key, Value;
)", TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
@@ -1428,8 +1428,8 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
// add nulls
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- REPLACE INTO `/Root/KeyValue` (Key, Value) VALUES (4u, "Four"), (NULL, "Null");
- REPLACE INTO `/Root/Join2` (Key1, Key2, Name, Value2) VALUES (1, NULL, "Name Null", "Value Null");
+ REPLACE INTO `/Root/KeyValue` (Key, Value) VALUES (4u, "Four"), (NULL, "Null");
+ REPLACE INTO `/Root/Join2` (Key1, Key2, Name, Value2) VALUES (1, NULL, "Name Null", "Value Null");
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1437,8 +1437,8 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
PRAGMA kikimr.UseNewEngine = "true";
SELECT Key1, Key2, Name, Value2
- FROM `/Root/Join2` AS t1
- LEFT SEMI JOIN `/Root/KeyValue` AS t2
+ FROM `/Root/Join2` AS t1
+ LEFT SEMI JOIN `/Root/KeyValue` AS t2
ON t1.Key2 == t2.Value
ORDER BY Key1, Key2, Name;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
@@ -1556,7 +1556,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPDATE [/Root/TwoShard]
+ UPDATE [/Root/TwoShard]
SET Value1 = "Updated"
WHERE Value2 = 1;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), execSettings).ExtractValueSync();
@@ -1577,7 +1577,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1603,7 +1603,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
PRAGMA kikimr.UseNewEngine = "true";
PRAGMA kikimr.OptEnableInplaceUpdate = "true";
- UPDATE [/Root/TwoShard]
+ UPDATE [/Root/TwoShard]
SET Value1 = "Updated"
WHERE Value2 = 1;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), execSettings).ExtractValueSync();
@@ -1620,7 +1620,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1645,7 +1645,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- DELETE FROM [/Root/TwoShard]
+ DELETE FROM [/Root/TwoShard]
WHERE Value2 = -1;
)", TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1668,7 +1668,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] ORDER BY Key;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1691,7 +1691,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- DELETE FROM [/Root/TwoShard] ON
+ DELETE FROM [/Root/TwoShard] ON
SELECT * FROM [/Root/TwoShard] WHERE Value2 = 1;
)", TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1714,7 +1714,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] ORDER BY Key;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1737,8 +1737,8 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- UPDATE [/Root/TwoShard] SET Value1 = "Updated" WHERE Value2 = 1;
- UPSERT INTO [/Root/TwoShard] (Key, Value1, Value2) VALUES
+ UPDATE [/Root/TwoShard] SET Value1 = "Updated" WHERE Value2 = 1;
+ UPSERT INTO [/Root/TwoShard] (Key, Value1, Value2) VALUES
(4u, "Four", 4);
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), execSettings).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1746,7 +1746,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1792,7 +1792,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
DECLARE $data AS List<Struct<Key: Uint32, Value1: String, Value2: Int32>>;
- UPSERT INTO `/Root/TwoShard`
+ UPSERT INTO `/Root/TwoShard`
SELECT Key, Value1, Value2 FROM AS_TABLE($data)
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params, execSettings).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1800,7 +1800,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UseNewEngine = "true";
- SELECT * FROM [/Root/TwoShard] WHERE Key > 5 AND Key < 12 ORDER BY Key;
+ SELECT * FROM [/Root/TwoShard] WHERE Key > 5 AND Key < 12 ORDER BY Key;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -1822,7 +1822,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
declare $key as UInt64;
declare $text as String;
- update `/Root/EightShard` set Text = $text where Key = $key
+ update `/Root/EightShard` set Text = $text where Key = $key
)";
auto params = TParamsBuilder()
@@ -1856,7 +1856,7 @@ Y_UNIT_TEST_SUITE(KqpNewEngine) {
it = session.ExecuteDataQuery(R"(
pragma kikimr.UseNewEngine = "true";
- select Key, Text, Data from `/Root/EightShard` where Text = "foo" order by Key
+ select Key, Text, Data from `/Root/EightShard` where Text = "foo" order by Key
)",TTxControl::BeginTx().CommitTx()).GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
diff --git a/ydb/core/kqp/ut/kqp_olap_ut.cpp b/ydb/core/kqp/ut/kqp_olap_ut.cpp
index e084b58fbaf..908fc34a13d 100644
--- a/ydb/core/kqp/ut/kqp_olap_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_olap_ut.cpp
@@ -1,271 +1,271 @@
#include <ydb/core/kqp/ut/common/kqp_ut_common.h>
#include <ydb/public/sdk/cpp/client/draft/ydb_long_tx.h>
-
+
#include <ydb/core/sys_view/service/query_history.h>
#include <ydb/core/tx/columnshard/columnshard_ut_common.h>
-
+
#include <contrib/libs/apache/arrow/cpp/src/arrow/api.h>
#include <contrib/libs/apache/arrow/cpp/src/arrow/ipc/writer.h>
-
-namespace NKikimr {
-namespace NKqp {
-
-using namespace NYdb;
-using namespace NYdb::NTable;
-using namespace NYdb::NScheme;
-
-Y_UNIT_TEST_SUITE(KqpOlap) {
- void EnableDebugLogging(TKikimrRunner& kikimr) {
- // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY_SCHEME_CACHE, NActors::NLog::PRI_DEBUG);
- // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::SCHEME_BOARD_REPLICA, NActors::NLog::PRI_DEBUG);
- // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
- // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_EXECUTER, NActors::NLog::PRI_DEBUG);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_COMPUTE, NActors::NLog::PRI_DEBUG);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_GATEWAY, NActors::NLog::PRI_DEBUG);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_RESOURCE_MANAGER, NActors::NLog::PRI_DEBUG);
+
+namespace NKikimr {
+namespace NKqp {
+
+using namespace NYdb;
+using namespace NYdb::NTable;
+using namespace NYdb::NScheme;
+
+Y_UNIT_TEST_SUITE(KqpOlap) {
+ void EnableDebugLogging(TKikimrRunner& kikimr) {
+ // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY_SCHEME_CACHE, NActors::NLog::PRI_DEBUG);
+ // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::SCHEME_BOARD_REPLICA, NActors::NLog::PRI_DEBUG);
+ // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_PROXY, NActors::NLog::PRI_DEBUG);
+ // kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_EXECUTER, NActors::NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_COMPUTE, NActors::NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_GATEWAY, NActors::NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_RESOURCE_MANAGER, NActors::NLog::PRI_DEBUG);
kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::LONG_TX_SERVICE, NActors::NLog::PRI_DEBUG);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD, NActors::NLog::PRI_DEBUG);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD_SCAN, NActors::NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD, NActors::NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD_SCAN, NActors::NLog::PRI_DEBUG);
kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_OLAPSHARD, NActors::NLog::PRI_DEBUG);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BLOB_CACHE, NActors::NLog::PRI_DEBUG);
- }
-
- void CreateTestOlapTable(TKikimrRunner& kikimr, TString tableName = "olapTable") {
- auto& legacyClient = kikimr.GetTestClient();
-
- legacyClient.CreateOlapStore("/Root", R"(
- Name: "olapStore"
- ColumnShardCount: 4
- SchemaPresets {
- Name: "default"
- Schema {
- Columns { Name: "timestamp" Type: "Timestamp" }
- #Columns { Name: "resource_type" Type: "Utf8" }
- Columns { Name: "resource_id" Type: "Utf8" }
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BLOB_CACHE, NActors::NLog::PRI_DEBUG);
+ }
+
+ void CreateTestOlapTable(TKikimrRunner& kikimr, TString tableName = "olapTable") {
+ auto& legacyClient = kikimr.GetTestClient();
+
+ legacyClient.CreateOlapStore("/Root", R"(
+ Name: "olapStore"
+ ColumnShardCount: 4
+ SchemaPresets {
+ Name: "default"
+ Schema {
+ Columns { Name: "timestamp" Type: "Timestamp" }
+ #Columns { Name: "resource_type" Type: "Utf8" }
+ Columns { Name: "resource_id" Type: "Utf8" }
Columns { Name: "uid" Type: "Utf8" }
Columns { Name: "level" Type: "Int32" }
- Columns { Name: "message" Type: "Utf8" }
- #Columns { Name: "json_payload" Type: "Json" }
- #Columns { Name: "ingested_at" Type: "Timestamp" }
- #Columns { Name: "saved_at" Type: "Timestamp" }
- #Columns { Name: "request_id" Type: "Utf8" }
- KeyColumnNames: "timestamp"
+ Columns { Name: "message" Type: "Utf8" }
+ #Columns { Name: "json_payload" Type: "Json" }
+ #Columns { Name: "ingested_at" Type: "Timestamp" }
+ #Columns { Name: "saved_at" Type: "Timestamp" }
+ #Columns { Name: "request_id" Type: "Utf8" }
+ KeyColumnNames: "timestamp"
Engine: COLUMN_ENGINE_REPLACING_TIMESERIES
- }
- }
- )");
- legacyClient.CreateOlapTable("/Root/olapStore", Sprintf(R"(
- Name: "%s"
- ColumnShardCount: 3
+ }
+ }
+ )");
+ legacyClient.CreateOlapTable("/Root/olapStore", Sprintf(R"(
+ Name: "%s"
+ ColumnShardCount: 3
Sharding {
HashSharding {
Function: HASH_FUNCTION_CLOUD_LOGS
Columns: ["timestamp", "uid"]
}
- })", tableName.c_str()));
-
- legacyClient.Ls("/Root");
- legacyClient.Ls("/Root/olapStore");
- legacyClient.Ls("/Root/olapStore/" + tableName);
- }
-
- std::shared_ptr<arrow::RecordBatch> TestArrowBatch(ui64 pathIdBegin, ui64 tsBegin, size_t rowCount) {
- auto schema = std::make_shared<arrow::Schema>(
- std::vector<std::shared_ptr<arrow::Field>>{
- arrow::field("timestamp", arrow::timestamp(arrow::TimeUnit::TimeUnit::MICRO)),
- arrow::field("resource_id", arrow::utf8()),
+ })", tableName.c_str()));
+
+ legacyClient.Ls("/Root");
+ legacyClient.Ls("/Root/olapStore");
+ legacyClient.Ls("/Root/olapStore/" + tableName);
+ }
+
+ std::shared_ptr<arrow::RecordBatch> TestArrowBatch(ui64 pathIdBegin, ui64 tsBegin, size_t rowCount) {
+ auto schema = std::make_shared<arrow::Schema>(
+ std::vector<std::shared_ptr<arrow::Field>>{
+ arrow::field("timestamp", arrow::timestamp(arrow::TimeUnit::TimeUnit::MICRO)),
+ arrow::field("resource_id", arrow::utf8()),
arrow::field("uid", arrow::utf8()),
arrow::field("level", arrow::int32()),
- arrow::field("message", arrow::utf8())
- });
-
+ arrow::field("message", arrow::utf8())
+ });
+
arrow::TimestampBuilder b1(arrow::timestamp(arrow::TimeUnit::TimeUnit::MICRO), arrow::default_memory_pool());
arrow::StringBuilder b2;
- arrow::StringBuilder b3;
+ arrow::StringBuilder b3;
arrow::Int32Builder b4;
arrow::StringBuilder b5;
-
- for (size_t i = 0; i < rowCount; ++i) {
- std::string uid("uid_" + std::to_string(tsBegin + i));
- std::string message("some prefix " + std::string(1024 + i % 200, 'x'));
+
+ for (size_t i = 0; i < rowCount; ++i) {
+ std::string uid("uid_" + std::to_string(tsBegin + i));
+ std::string message("some prefix " + std::string(1024 + i % 200, 'x'));
Y_VERIFY(b1.Append(tsBegin + i).ok());
Y_VERIFY(b2.Append(std::to_string(pathIdBegin + i)).ok());
- Y_VERIFY(b3.Append(uid).ok());
+ Y_VERIFY(b3.Append(uid).ok());
Y_VERIFY(b4.Append(i % 5).ok());
Y_VERIFY(b5.Append(message).ok());
- }
-
+ }
+
std::shared_ptr<arrow::TimestampArray> a1;
std::shared_ptr<arrow::StringArray> a2;
- std::shared_ptr<arrow::StringArray> a3;
+ std::shared_ptr<arrow::StringArray> a3;
std::shared_ptr<arrow::Int32Array> a4;
std::shared_ptr<arrow::StringArray> a5;
-
- Y_VERIFY(b1.Finish(&a1).ok());
- Y_VERIFY(b2.Finish(&a2).ok());
- Y_VERIFY(b3.Finish(&a3).ok());
+
+ Y_VERIFY(b1.Finish(&a1).ok());
+ Y_VERIFY(b2.Finish(&a2).ok());
+ Y_VERIFY(b3.Finish(&a3).ok());
Y_VERIFY(b4.Finish(&a4).ok());
Y_VERIFY(b5.Finish(&a5).ok());
-
+
return arrow::RecordBatch::Make(schema, rowCount, { a1, a2, a3, a4, a5 });
- }
-
- TString TestBlob(ui64 pathIdBegin, ui64 tsBegin, size_t rowCount) {
- auto batch = TestArrowBatch(pathIdBegin, tsBegin, rowCount);
- int64_t size;
- auto status = arrow::ipc::GetRecordBatchSize(*batch, &size);
- Y_VERIFY(status.ok());
-
- TString buf;
- buf.resize(size);
- auto writer = arrow::Buffer::GetWriter(arrow::MutableBuffer::Wrap(&buf[0], size));
- Y_VERIFY(writer.ok());
-
- // UNCOMPRESSED
- status = SerializeRecordBatch(*batch, arrow::ipc::IpcWriteOptions::Defaults(), (*writer).get());
- Y_VERIFY(status.ok());
- return buf;
- }
-
- void WriteTestData(TKikimrRunner& kikimr, TString testTable, ui64 pathIdBegin, ui64 tsBegin, size_t rowCount) {
- NYdb::NLongTx::TClient client(kikimr.GetDriver());
-
+ }
+
+ TString TestBlob(ui64 pathIdBegin, ui64 tsBegin, size_t rowCount) {
+ auto batch = TestArrowBatch(pathIdBegin, tsBegin, rowCount);
+ int64_t size;
+ auto status = arrow::ipc::GetRecordBatchSize(*batch, &size);
+ Y_VERIFY(status.ok());
+
+ TString buf;
+ buf.resize(size);
+ auto writer = arrow::Buffer::GetWriter(arrow::MutableBuffer::Wrap(&buf[0], size));
+ Y_VERIFY(writer.ok());
+
+ // UNCOMPRESSED
+ status = SerializeRecordBatch(*batch, arrow::ipc::IpcWriteOptions::Defaults(), (*writer).get());
+ Y_VERIFY(status.ok());
+ return buf;
+ }
+
+ void WriteTestData(TKikimrRunner& kikimr, TString testTable, ui64 pathIdBegin, ui64 tsBegin, size_t rowCount) {
+ NYdb::NLongTx::TClient client(kikimr.GetDriver());
+
NLongTx::TLongTxBeginResult resBeginTx = client.BeginWriteTx().GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(resBeginTx.Status().GetStatus(), EStatus::SUCCESS, resBeginTx.Status().GetIssues().ToString());
-
+ UNIT_ASSERT_VALUES_EQUAL_C(resBeginTx.Status().GetStatus(), EStatus::SUCCESS, resBeginTx.Status().GetIssues().ToString());
+
auto txId = resBeginTx.GetResult().tx_id();
- TString data = TestBlob(pathIdBegin, tsBegin, rowCount);
-
- NLongTx::TLongTxWriteResult resWrite =
- client.Write(txId, testTable, txId, data, Ydb::LongTx::Data::APACHE_ARROW).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(resWrite.Status().GetStatus(), EStatus::SUCCESS, resWrite.Status().GetIssues().ToString());
-
+ TString data = TestBlob(pathIdBegin, tsBegin, rowCount);
+
+ NLongTx::TLongTxWriteResult resWrite =
+ client.Write(txId, testTable, txId, data, Ydb::LongTx::Data::APACHE_ARROW).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(resWrite.Status().GetStatus(), EStatus::SUCCESS, resWrite.Status().GetIssues().ToString());
+
NLongTx::TLongTxCommitResult resCommitTx = client.CommitTx(txId).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(resCommitTx.Status().GetStatus(), EStatus::SUCCESS, resCommitTx.Status().GetIssues().ToString());
- }
-
- TVector<THashMap<TString, NYdb::TValue>> CollectRows(NYdb::NTable::TScanQueryPartIterator& it) {
- TVector<THashMap<TString, NYdb::TValue>> rows;
-
- for (;;) {
- auto streamPart = it.ReadNext().GetValueSync();
- if (!streamPart.IsSuccess()) {
- UNIT_ASSERT_C(streamPart.EOS(), streamPart.GetIssues().ToString());
- break;
- }
-
- UNIT_ASSERT_C(streamPart.HasResultSet() || streamPart.HasQueryStats(),
- "Unexpected empty scan query response.");
-
- if (streamPart.HasResultSet()) {
- auto resultSet = streamPart.ExtractResultSet();
- NYdb::TResultSetParser rsParser(resultSet);
- while (rsParser.TryNextRow()) {
- THashMap<TString, NYdb::TValue> row;
- for (size_t ci = 0; ci < resultSet.ColumnsCount(); ++ci) {
- row.emplace(resultSet.GetColumnsMeta()[ci].Name, rsParser.GetValue(ci));
- }
- rows.emplace_back(std::move(row));
- }
- }
- }
- return rows;
- }
-
- void PrintValue(IOutputStream& out, const NYdb::TValue& v) {
- NYdb::TValueParser value(v);
-
- while (value.GetKind() == NYdb::TTypeParser::ETypeKind::Optional) {
- if (value.IsNull()) {
- out << "<NULL>";
- return;
- } else {
- value.OpenOptional();
- }
- }
-
- switch (value.GetPrimitiveType()) {
- case NYdb::EPrimitiveType::Uint32: {
- out << value.GetUint32();
- break;
- }
- case NYdb::EPrimitiveType::Uint64: {
- out << value.GetUint64();
- break;
- }
- case NYdb::EPrimitiveType::Utf8: {
- out << value.GetUtf8();
- break;
- }
- case NYdb::EPrimitiveType::Timestamp: {
- out << value.GetTimestamp();
- break;
- }
- default: {
- UNIT_ASSERT_C(false, "PrintValue not iplemented for this type");
- }
- }
- }
-
- void PrintRow(IOutputStream& out, const THashMap<TString, NYdb::TValue>& fields) {
- for (const auto& f : fields) {
- out << f.first << ": ";
- PrintValue(out, f.second);
- out << " ";
- }
- }
-
- void PrintRows(IOutputStream& out, const TVector<THashMap<TString, NYdb::TValue>>& rows) {
- for (const auto& r : rows) {
- PrintRow(out, r);
- out << "\n";
- }
- }
-
- TVector<THashMap<TString, NYdb::TValue>> ExecuteScanQuery(NYdb::NTable::TTableClient& tableClient, const TString& query) {
- Cerr << "====================================\n"
- << "QUERY:\n" << query
- << "\n\nRESULT:\n";
-
- TStreamExecScanQuerySettings scanSettings;
- auto it = tableClient.StreamExecuteScanQuery(query, scanSettings).GetValueSync();
- auto rows = CollectRows(it);
-
- PrintRows(Cerr, rows);
- Cerr << "\n";
-
- return rows;
- }
-
- ui64 GetUint32(const NYdb::TValue& v) {
- NYdb::TValueParser value(v);
- if (value.GetKind() == NYdb::TTypeParser::ETypeKind::Optional) {
- return *value.GetOptionalUint32();
- } else {
- return value.GetUint32();
- }
- }
-
- ui64 GetUint64(const NYdb::TValue& v) {
- NYdb::TValueParser value(v);
- if (value.GetKind() == NYdb::TTypeParser::ETypeKind::Optional) {
- return *value.GetOptionalUint64();
- } else {
- return value.GetUint64();
- }
- }
-
- TInstant GetTimestamp(const NYdb::TValue& v) {
- NYdb::TValueParser value(v);
- if (value.GetKind() == NYdb::TTypeParser::ETypeKind::Optional) {
- return *value.GetOptionalTimestamp();
- } else {
- return value.GetTimestamp();
- }
- }
-
+ UNIT_ASSERT_VALUES_EQUAL_C(resCommitTx.Status().GetStatus(), EStatus::SUCCESS, resCommitTx.Status().GetIssues().ToString());
+ }
+
+ TVector<THashMap<TString, NYdb::TValue>> CollectRows(NYdb::NTable::TScanQueryPartIterator& it) {
+ TVector<THashMap<TString, NYdb::TValue>> rows;
+
+ for (;;) {
+ auto streamPart = it.ReadNext().GetValueSync();
+ if (!streamPart.IsSuccess()) {
+ UNIT_ASSERT_C(streamPart.EOS(), streamPart.GetIssues().ToString());
+ break;
+ }
+
+ UNIT_ASSERT_C(streamPart.HasResultSet() || streamPart.HasQueryStats(),
+ "Unexpected empty scan query response.");
+
+ if (streamPart.HasResultSet()) {
+ auto resultSet = streamPart.ExtractResultSet();
+ NYdb::TResultSetParser rsParser(resultSet);
+ while (rsParser.TryNextRow()) {
+ THashMap<TString, NYdb::TValue> row;
+ for (size_t ci = 0; ci < resultSet.ColumnsCount(); ++ci) {
+ row.emplace(resultSet.GetColumnsMeta()[ci].Name, rsParser.GetValue(ci));
+ }
+ rows.emplace_back(std::move(row));
+ }
+ }
+ }
+ return rows;
+ }
+
+ void PrintValue(IOutputStream& out, const NYdb::TValue& v) {
+ NYdb::TValueParser value(v);
+
+ while (value.GetKind() == NYdb::TTypeParser::ETypeKind::Optional) {
+ if (value.IsNull()) {
+ out << "<NULL>";
+ return;
+ } else {
+ value.OpenOptional();
+ }
+ }
+
+ switch (value.GetPrimitiveType()) {
+ case NYdb::EPrimitiveType::Uint32: {
+ out << value.GetUint32();
+ break;
+ }
+ case NYdb::EPrimitiveType::Uint64: {
+ out << value.GetUint64();
+ break;
+ }
+ case NYdb::EPrimitiveType::Utf8: {
+ out << value.GetUtf8();
+ break;
+ }
+ case NYdb::EPrimitiveType::Timestamp: {
+ out << value.GetTimestamp();
+ break;
+ }
+ default: {
+ UNIT_ASSERT_C(false, "PrintValue not iplemented for this type");
+ }
+ }
+ }
+
+ void PrintRow(IOutputStream& out, const THashMap<TString, NYdb::TValue>& fields) {
+ for (const auto& f : fields) {
+ out << f.first << ": ";
+ PrintValue(out, f.second);
+ out << " ";
+ }
+ }
+
+ void PrintRows(IOutputStream& out, const TVector<THashMap<TString, NYdb::TValue>>& rows) {
+ for (const auto& r : rows) {
+ PrintRow(out, r);
+ out << "\n";
+ }
+ }
+
+ TVector<THashMap<TString, NYdb::TValue>> ExecuteScanQuery(NYdb::NTable::TTableClient& tableClient, const TString& query) {
+ Cerr << "====================================\n"
+ << "QUERY:\n" << query
+ << "\n\nRESULT:\n";
+
+ TStreamExecScanQuerySettings scanSettings;
+ auto it = tableClient.StreamExecuteScanQuery(query, scanSettings).GetValueSync();
+ auto rows = CollectRows(it);
+
+ PrintRows(Cerr, rows);
+ Cerr << "\n";
+
+ return rows;
+ }
+
+ ui64 GetUint32(const NYdb::TValue& v) {
+ NYdb::TValueParser value(v);
+ if (value.GetKind() == NYdb::TTypeParser::ETypeKind::Optional) {
+ return *value.GetOptionalUint32();
+ } else {
+ return value.GetUint32();
+ }
+ }
+
+ ui64 GetUint64(const NYdb::TValue& v) {
+ NYdb::TValueParser value(v);
+ if (value.GetKind() == NYdb::TTypeParser::ETypeKind::Optional) {
+ return *value.GetOptionalUint64();
+ } else {
+ return value.GetUint64();
+ }
+ }
+
+ TInstant GetTimestamp(const NYdb::TValue& v) {
+ NYdb::TValueParser value(v);
+ if (value.GetKind() == NYdb::TTypeParser::ETypeKind::Optional) {
+ return *value.GetOptionalTimestamp();
+ } else {
+ return value.GetTimestamp();
+ }
+ }
+
void CreateTableOfAllTypes(TKikimrRunner& kikimr) {
auto& legacyClient = kikimr.GetTestClient();
@@ -399,101 +399,101 @@ Y_UNIT_TEST_SUITE(KqpOlap) {
};
}
- Y_UNIT_TEST(SimpleQueryOlap) {
+ Y_UNIT_TEST(SimpleQueryOlap) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- // EnableDebugLogging(kikimr);
-
- CreateTestOlapTable(kikimr);
-
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2);
-
- auto client = kikimr.GetTableClient();
-
- // EnableDebugLogging(kikimr);
-
- {
- auto it = client.StreamExecuteScanQuery(R"(
- --!syntax_v1
-
- SELECT `resource_id`, `timestamp`
- FROM `/Root/olapStore/olapTable`
- ORDER BY `resource_id`, `timestamp`
- )").GetValueSync();
-
- UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
- TString result = StreamResultToYson(it);
- Cout << result << Endl;
- CompareYson(result, R"([[["0"];[1000000u]];[["1"];[1000001u]]])");
- }
- }
-
- Y_UNIT_TEST(SimpleLookupOlap) {
+
+ // EnableDebugLogging(kikimr);
+
+ CreateTestOlapTable(kikimr);
+
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2);
+
+ auto client = kikimr.GetTableClient();
+
+ // EnableDebugLogging(kikimr);
+
+ {
+ auto it = client.StreamExecuteScanQuery(R"(
+ --!syntax_v1
+
+ SELECT `resource_id`, `timestamp`
+ FROM `/Root/olapStore/olapTable`
+ ORDER BY `resource_id`, `timestamp`
+ )").GetValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ TString result = StreamResultToYson(it);
+ Cout << result << Endl;
+ CompareYson(result, R"([[["0"];[1000000u]];[["1"];[1000001u]]])");
+ }
+ }
+
+ Y_UNIT_TEST(SimpleLookupOlap) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- // EnableDebugLogging(kikimr);
-
- CreateTestOlapTable(kikimr);
-
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2);
-
- auto client = kikimr.GetTableClient();
-
- {
- auto it = client.StreamExecuteScanQuery(R"(
- --!syntax_v1
-
- SELECT `resource_id`, `timestamp`
- FROM `/Root/olapStore/olapTable`
- WHERE `timestamp` == CAST(1000000 AS Timestamp)
- )").GetValueSync();
-
- UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
- TString result = StreamResultToYson(it);
- Cout << result << Endl;
+
+ // EnableDebugLogging(kikimr);
+
+ CreateTestOlapTable(kikimr);
+
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2);
+
+ auto client = kikimr.GetTableClient();
+
+ {
+ auto it = client.StreamExecuteScanQuery(R"(
+ --!syntax_v1
+
+ SELECT `resource_id`, `timestamp`
+ FROM `/Root/olapStore/olapTable`
+ WHERE `timestamp` == CAST(1000000 AS Timestamp)
+ )").GetValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ TString result = StreamResultToYson(it);
+ Cout << result << Endl;
CompareYson(result, R"([[["0"];[1000000u]]])");
- }
- }
-
- Y_UNIT_TEST(SimpleRangeOlap) {
+ }
+ }
+
+ Y_UNIT_TEST(SimpleRangeOlap) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- // EnableDebugLogging(kikimr);
-
- CreateTestOlapTable(kikimr);
-
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2);
-
- auto client = kikimr.GetTableClient();
-
- {
- auto it = client.StreamExecuteScanQuery(R"(
- --!syntax_v1
-
- SELECT `resource_id`, `timestamp`
- FROM `/Root/olapStore/olapTable`
- WHERE `timestamp` >= CAST(1000000 AS Timestamp)
- AND `timestamp` <= CAST(2000000 AS Timestamp)
- )").GetValueSync();
-
- UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
- TString result = StreamResultToYson(it);
- Cout << result << Endl;
+
+ // EnableDebugLogging(kikimr);
+
+ CreateTestOlapTable(kikimr);
+
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2);
+
+ auto client = kikimr.GetTableClient();
+
+ {
+ auto it = client.StreamExecuteScanQuery(R"(
+ --!syntax_v1
+
+ SELECT `resource_id`, `timestamp`
+ FROM `/Root/olapStore/olapTable`
+ WHERE `timestamp` >= CAST(1000000 AS Timestamp)
+ AND `timestamp` <= CAST(2000000 AS Timestamp)
+ )").GetValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ TString result = StreamResultToYson(it);
+ Cout << result << Endl;
CompareYson(result, R"([[["0"];[1000000u]];[["1"];[1000001u]]])");
- }
- }
-
+ }
+ }
+
Y_UNIT_TEST(CompositeRangeOlap) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
@@ -628,64 +628,64 @@ Y_UNIT_TEST_SUITE(KqpOlap) {
}
}
- void CreateSampleOltpTable(TKikimrRunner& kikimr) {
- kikimr.GetTestClient().CreateTable("/Root", R"(
- Name: "OltpTable"
- Columns { Name: "Key", Type: "Uint64" }
- Columns { Name: "Value1", Type: "String" }
- Columns { Name: "Value2", Type: "String" }
- KeyColumnNames: ["Key"]
- )");
-
- TTableClient tableClient{kikimr.GetDriver()};
- auto session = tableClient.CreateSession().GetValueSync().GetSession();
-
- auto result = session.ExecuteDataQuery(R"(
- REPLACE INTO `/Root/OltpTable` (Key, Value1, Value2) VALUES
- (1u, "Value-001", "1"),
- (2u, "Value-002", "2"),
- (42u, "Value-002", "2"),
- (101u, "Value-101", "101")
- )", TTxControl::BeginTx().CommitTx()).GetValueSync();
-
- UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
-
- session.Close();
- }
-
- Y_UNIT_TEST(QueryOltpAndOlap) {
+ void CreateSampleOltpTable(TKikimrRunner& kikimr) {
+ kikimr.GetTestClient().CreateTable("/Root", R"(
+ Name: "OltpTable"
+ Columns { Name: "Key", Type: "Uint64" }
+ Columns { Name: "Value1", Type: "String" }
+ Columns { Name: "Value2", Type: "String" }
+ KeyColumnNames: ["Key"]
+ )");
+
+ TTableClient tableClient{kikimr.GetDriver()};
+ auto session = tableClient.CreateSession().GetValueSync().GetSession();
+
+ auto result = session.ExecuteDataQuery(R"(
+ REPLACE INTO `/Root/OltpTable` (Key, Value1, Value2) VALUES
+ (1u, "Value-001", "1"),
+ (2u, "Value-002", "2"),
+ (42u, "Value-002", "2"),
+ (101u, "Value-101", "101")
+ )", TTxControl::BeginTx().CommitTx()).GetValueSync();
+
+ UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
+
+ session.Close();
+ }
+
+ Y_UNIT_TEST(QueryOltpAndOlap) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- // EnableDebugLogging(kikimr);
-
- auto client = kikimr.GetTableClient();
-
- CreateTestOlapTable(kikimr);
-
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 3);
-
- CreateSampleOltpTable(kikimr);
-
- {
- auto it = client.StreamExecuteScanQuery(R"(
- --!syntax_v1
-
- SELECT a.`resource_id`, a.`timestamp`, t.*
- FROM `/Root/OltpTable` AS t
- JOIN `/Root/olapStore/olapTable` AS a ON CAST(t.Key AS Utf8) = a.resource_id
- ORDER BY a.`resource_id`, a.`timestamp`
- )").GetValueSync();
-
- UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
- TString result = StreamResultToYson(it);
- Cout << result << Endl;
- CompareYson(result, R"([[[1u];["Value-001"];["1"];["1"];[1000001u]];[[2u];["Value-002"];["2"];["2"];[1000002u]]])");
- }
- }
-
+
+ // EnableDebugLogging(kikimr);
+
+ auto client = kikimr.GetTableClient();
+
+ CreateTestOlapTable(kikimr);
+
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 3);
+
+ CreateSampleOltpTable(kikimr);
+
+ {
+ auto it = client.StreamExecuteScanQuery(R"(
+ --!syntax_v1
+
+ SELECT a.`resource_id`, a.`timestamp`, t.*
+ FROM `/Root/OltpTable` AS t
+ JOIN `/Root/olapStore/olapTable` AS a ON CAST(t.Key AS Utf8) = a.resource_id
+ ORDER BY a.`resource_id`, a.`timestamp`
+ )").GetValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ TString result = StreamResultToYson(it);
+ Cout << result << Endl;
+ CompareYson(result, R"([[[1u];["Value-001"];["1"];["1"];[1000001u]];[[2u];["Value-002"];["2"];["2"];[1000002u]]])");
+ }
+ }
+
Y_UNIT_TEST(EmptyRange) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
@@ -709,79 +709,79 @@ Y_UNIT_TEST_SUITE(KqpOlap) {
CompareYson(StreamResultToYson(it), "[]");
}
- Y_UNIT_TEST(Aggregation) {
+ Y_UNIT_TEST(Aggregation) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- // EnableDebugLogging(kikimr);
-
- CreateTestOlapTable(kikimr);
-
- auto tableClient = kikimr.GetTableClient();
-
- // EnableDebugLogging(kikimr);
-
- {
- auto it = tableClient.StreamExecuteScanQuery(R"(
- --!syntax_v1
-
- SELECT
- COUNT(*)
- FROM `/Root/olapStore/olapTable`
- )").GetValueSync();
-
- UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
- TString result = StreamResultToYson(it);
- Cout << result << Endl;
- CompareYson(result, R"([[0u;]])");
- }
-
- // EnableDebugLogging(kikimr);
-
- {
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 10000, 3000000, 1000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 11000, 3001000, 1000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 12000, 3002000, 1000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 13000, 3003000, 1000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 14000, 3004000, 1000);
+
+ // EnableDebugLogging(kikimr);
+
+ CreateTestOlapTable(kikimr);
+
+ auto tableClient = kikimr.GetTableClient();
+
+ // EnableDebugLogging(kikimr);
+
+ {
+ auto it = tableClient.StreamExecuteScanQuery(R"(
+ --!syntax_v1
+
+ SELECT
+ COUNT(*)
+ FROM `/Root/olapStore/olapTable`
+ )").GetValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ TString result = StreamResultToYson(it);
+ Cout << result << Endl;
+ CompareYson(result, R"([[0u;]])");
+ }
+
+ // EnableDebugLogging(kikimr);
+
+ {
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 10000, 3000000, 1000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 11000, 3001000, 1000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 12000, 3002000, 1000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 13000, 3003000, 1000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 14000, 3004000, 1000);
WriteTestData(kikimr, "/Root/olapStore/olapTable", 20000, 2000000, 7000);
WriteTestData(kikimr, "/Root/olapStore/olapTable", 30000, 1000000, 11000);
- }
-
- // EnableDebugLogging(kikimr);
-
- {
- auto it = tableClient.StreamExecuteScanQuery(R"(
- --!syntax_v1
-
- SELECT
- COUNT(*), MAX(`resource_id`), MAX(`timestamp`), MIN(LENGTH(`message`))
- FROM `/Root/olapStore/olapTable`
- )").GetValueSync();
-
- UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
- TString result = StreamResultToYson(it);
- Cout << result << Endl;
- CompareYson(result, R"([[23000u;["40999"];[3004999u];[1036u]]])");
- }
-
- {
- auto it = tableClient.StreamExecuteScanQuery(R"(
- --!syntax_v1
-
- SELECT
- COUNT(*)
- FROM `/Root/olapStore/olapTable`
- )").GetValueSync();
-
- UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
- TString result = StreamResultToYson(it);
- Cout << result << Endl;
- CompareYson(result, R"([[23000u;]])");
- }
- }
+ }
+
+ // EnableDebugLogging(kikimr);
+
+ {
+ auto it = tableClient.StreamExecuteScanQuery(R"(
+ --!syntax_v1
+
+ SELECT
+ COUNT(*), MAX(`resource_id`), MAX(`timestamp`), MIN(LENGTH(`message`))
+ FROM `/Root/olapStore/olapTable`
+ )").GetValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ TString result = StreamResultToYson(it);
+ Cout << result << Endl;
+ CompareYson(result, R"([[23000u;["40999"];[3004999u];[1036u]]])");
+ }
+
+ {
+ auto it = tableClient.StreamExecuteScanQuery(R"(
+ --!syntax_v1
+
+ SELECT
+ COUNT(*)
+ FROM `/Root/olapStore/olapTable`
+ )").GetValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ TString result = StreamResultToYson(it);
+ Cout << result << Endl;
+ CompareYson(result, R"([[23000u;]])");
+ }
+ }
Y_UNIT_TEST(PushdownFilter) {
static bool enableLog = false;
@@ -967,45 +967,45 @@ Y_UNIT_TEST_SUITE(KqpOlap) {
ORDER BY `timestamp`;
)");
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- TInstant tsPrev = TInstant::MicroSeconds(1000000);
- for (const auto& r : rows) {
- TInstant ts = GetTimestamp(r.at("timestamp"));
- UNIT_ASSERT_GE_C(ts, tsPrev, "result is not sorted in ASC order");
- tsPrev = ts;
- }
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ TInstant tsPrev = TInstant::MicroSeconds(1000000);
+ for (const auto& r : rows) {
+ TInstant ts = GetTimestamp(r.at("timestamp"));
+ UNIT_ASSERT_GE_C(ts, tsPrev, "result is not sorted in ASC order");
+ tsPrev = ts;
+ }
}
- Y_UNIT_TEST(ExtractRangesReverse) {
+ Y_UNIT_TEST(ExtractRangesReverse) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- CreateTestOlapTable(kikimr);
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2000);
-
- // EnableDebugLogging(kikimr);
-
- auto tableClient = kikimr.GetTableClient();
- auto selectQuery = TString(R"(
- SELECT `timestamp` FROM `/Root/olapStore/olapTable`
- WHERE
- (`timestamp` < CAST(1000100 AS Timestamp) AND `timestamp` > CAST(1000095 AS Timestamp)) OR
- (`timestamp` <= CAST(1001000 AS Timestamp) AND `timestamp` >= CAST(1000999 AS Timestamp)) OR
- (`timestamp` > CAST(1002000 AS Timestamp))
- ORDER BY `timestamp` DESC;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- TInstant tsPrev = TInstant::MicroSeconds(2000000);
- for (const auto& r : rows) {
- TInstant ts = GetTimestamp(r.at("timestamp"));
- UNIT_ASSERT_LE_C(ts, tsPrev, "result is not sorted in DESC order");
- tsPrev = ts;
- }
- }
-
+
+ CreateTestOlapTable(kikimr);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2000);
+
+ // EnableDebugLogging(kikimr);
+
+ auto tableClient = kikimr.GetTableClient();
+ auto selectQuery = TString(R"(
+ SELECT `timestamp` FROM `/Root/olapStore/olapTable`
+ WHERE
+ (`timestamp` < CAST(1000100 AS Timestamp) AND `timestamp` > CAST(1000095 AS Timestamp)) OR
+ (`timestamp` <= CAST(1001000 AS Timestamp) AND `timestamp` >= CAST(1000999 AS Timestamp)) OR
+ (`timestamp` > CAST(1002000 AS Timestamp))
+ ORDER BY `timestamp` DESC;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ TInstant tsPrev = TInstant::MicroSeconds(2000000);
+ for (const auto& r : rows) {
+ TInstant ts = GetTimestamp(r.at("timestamp"));
+ UNIT_ASSERT_LE_C(ts, tsPrev, "result is not sorted in DESC order");
+ tsPrev = ts;
+ }
+ }
+
Y_UNIT_TEST(PredicatePushdown) {
constexpr bool logQueries = false;
auto settings = TKikimrSettings()
@@ -1129,501 +1129,501 @@ Y_UNIT_TEST_SUITE(KqpOlap) {
}
}
- Y_UNIT_TEST(StatsSysView) {
+ Y_UNIT_TEST(StatsSysView) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- CreateTestOlapTable(kikimr);
- for (ui64 i = 0; i < 100; ++i) {
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000 + i*10000, 1000);
- }
-
- // EnableDebugLogging(kikimr);
-
- auto tableClient = kikimr.GetTableClient();
- auto selectQuery = TString(R"(
- SELECT *
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- ORDER BY PathId, Kind, TabletId
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 4*3);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 1ull);
- UNIT_ASSERT_GE(GetUint64(rows[0].at("TabletId")), 72075186224037888ull);
- UNIT_ASSERT_GE(GetUint64(rows[1].at("TabletId")), GetUint64(rows[0].at("TabletId")));
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[2].at("Kind")), 1ull);
- UNIT_ASSERT_GE(GetUint64(rows[2].at("TabletId")), GetUint64(rows[1].at("TabletId")));
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[6].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[6].at("Kind")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[6].at("TabletId")), GetUint64(rows[0].at("TabletId")));
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[7].at("Kind")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[7].at("TabletId")), GetUint64(rows[1].at("TabletId")));
- UNIT_ASSERT_GE(
- GetUint64(rows[0].at("Rows")) + GetUint64(rows[1].at("Rows")) + GetUint64(rows[2].at("Rows")) +
- GetUint64(rows[3].at("Rows")) + GetUint64(rows[4].at("Rows")) + GetUint64(rows[5].at("Rows")) +
- GetUint64(rows[6].at("Rows")) + GetUint64(rows[7].at("Rows")) + GetUint64(rows[8].at("Rows")),
- 0.3*0.9*100*1000); // >= 90% of 100K inserted rows
- }
-
- Y_UNIT_TEST(StatsSysViewTable) {
+
+ CreateTestOlapTable(kikimr);
+ for (ui64 i = 0; i < 100; ++i) {
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000 + i*10000, 1000);
+ }
+
+ // EnableDebugLogging(kikimr);
+
+ auto tableClient = kikimr.GetTableClient();
+ auto selectQuery = TString(R"(
+ SELECT *
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ ORDER BY PathId, Kind, TabletId
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 4*3);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 1ull);
+ UNIT_ASSERT_GE(GetUint64(rows[0].at("TabletId")), 72075186224037888ull);
+ UNIT_ASSERT_GE(GetUint64(rows[1].at("TabletId")), GetUint64(rows[0].at("TabletId")));
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[2].at("Kind")), 1ull);
+ UNIT_ASSERT_GE(GetUint64(rows[2].at("TabletId")), GetUint64(rows[1].at("TabletId")));
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[6].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[6].at("Kind")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[6].at("TabletId")), GetUint64(rows[0].at("TabletId")));
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[7].at("Kind")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[7].at("TabletId")), GetUint64(rows[1].at("TabletId")));
+ UNIT_ASSERT_GE(
+ GetUint64(rows[0].at("Rows")) + GetUint64(rows[1].at("Rows")) + GetUint64(rows[2].at("Rows")) +
+ GetUint64(rows[3].at("Rows")) + GetUint64(rows[4].at("Rows")) + GetUint64(rows[5].at("Rows")) +
+ GetUint64(rows[6].at("Rows")) + GetUint64(rows[7].at("Rows")) + GetUint64(rows[8].at("Rows")),
+ 0.3*0.9*100*1000); // >= 90% of 100K inserted rows
+ }
+
+ Y_UNIT_TEST(StatsSysViewTable) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- CreateTestOlapTable(kikimr, "olapTable_1");
- CreateTestOlapTable(kikimr, "olapTable_2");
- for (ui64 i = 0; i < 10; ++i) {
- WriteTestData(kikimr, "/Root/olapStore/olapTable_1", 0, 1000000 + i*10000, 1000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable_2", 0, 1000000 + i*10000, 2000);
- }
-
- // EnableDebugLogging(kikimr);
-
- auto tableClient = kikimr.GetTableClient();
- {
- auto selectQuery = TString(R"(
- SELECT *
- FROM `/Root/olapStore/olapTable_1/.sys/primary_index_stats`
- ORDER BY PathId, Kind, TabletId
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_GT(rows.size(), 1*4);
- UNIT_ASSERT_LE(rows.size(), 3*4);
- UNIT_ASSERT_VALUES_EQUAL(rows.size() % 4, 0); // 4 Kinds
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows.front().at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows.back().at("PathId")), 3ull);
- }
- {
- auto selectQuery = TString(R"(
- SELECT *
- FROM `/Root/olapStore/olapTable_2/.sys/primary_index_stats`
- ORDER BY PathId, Kind, TabletId
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_GT(rows.size(), 1*4);
- UNIT_ASSERT_LE(rows.size(), 3*4);
- UNIT_ASSERT_VALUES_EQUAL(rows.size() % 4, 0); // 4 Kinds
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows.front().at("PathId")), 4ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows.back().at("PathId")), 4ull);
- }
- {
- auto selectQuery = TString(R"(
- SELECT *
- FROM `/Root/olapStore/olapTable_1/.sys/primary_index_stats`
- WHERE
+
+ CreateTestOlapTable(kikimr, "olapTable_1");
+ CreateTestOlapTable(kikimr, "olapTable_2");
+ for (ui64 i = 0; i < 10; ++i) {
+ WriteTestData(kikimr, "/Root/olapStore/olapTable_1", 0, 1000000 + i*10000, 1000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable_2", 0, 1000000 + i*10000, 2000);
+ }
+
+ // EnableDebugLogging(kikimr);
+
+ auto tableClient = kikimr.GetTableClient();
+ {
+ auto selectQuery = TString(R"(
+ SELECT *
+ FROM `/Root/olapStore/olapTable_1/.sys/primary_index_stats`
+ ORDER BY PathId, Kind, TabletId
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_GT(rows.size(), 1*4);
+ UNIT_ASSERT_LE(rows.size(), 3*4);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size() % 4, 0); // 4 Kinds
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows.front().at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows.back().at("PathId")), 3ull);
+ }
+ {
+ auto selectQuery = TString(R"(
+ SELECT *
+ FROM `/Root/olapStore/olapTable_2/.sys/primary_index_stats`
+ ORDER BY PathId, Kind, TabletId
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_GT(rows.size(), 1*4);
+ UNIT_ASSERT_LE(rows.size(), 3*4);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size() % 4, 0); // 4 Kinds
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows.front().at("PathId")), 4ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows.back().at("PathId")), 4ull);
+ }
+ {
+ auto selectQuery = TString(R"(
+ SELECT *
+ FROM `/Root/olapStore/olapTable_1/.sys/primary_index_stats`
+ WHERE
PathId > UInt64("3")
- ORDER BY PathId, Kind, TabletId
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 0);
- }
- }
-
- Y_UNIT_TEST(StatsSysViewColumns) {
+ ORDER BY PathId, Kind, TabletId
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 0);
+ }
+ }
+
+ Y_UNIT_TEST(StatsSysViewColumns) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- CreateTestOlapTable(kikimr);
- for (ui64 i = 0; i < 10; ++i) {
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000 + i*10000, 2000);
- }
-
- // EnableDebugLogging(kikimr);
-
- auto tableClient = kikimr.GetTableClient();
-
- {
- auto selectQuery = TString(R"(
- SELECT TabletId, PathId, Kind
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- ORDER BY PathId, Kind, TabletId
- LIMIT 4;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 4);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 1ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[3].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[3].at("Kind")), 2ull);
- }
- {
- auto selectQuery = TString(R"(
- SELECT Bytes, Rows
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- ORDER BY Bytes
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3*4);
- UNIT_ASSERT_LE(GetUint64(rows[0].at("Bytes")), GetUint64(rows[1].at("Bytes")));
- }
- {
- auto selectQuery = TString(R"(
- SELECT Rows, Kind, RawBytes, Rows as Rows2, Rows as Rows3, PathId
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- ORDER BY PathId, Kind, Rows3
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3*4);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("Rows2")), GetUint64(rows[0].at("Rows3")));
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("Rows")), GetUint64(rows[1].at("Rows3")));
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[2].at("Rows")), GetUint64(rows[2].at("Rows2")));
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[5].at("Rows")), GetUint64(rows[5].at("Rows3")));
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[11].at("Rows")), GetUint64(rows[11].at("Rows2")));
- }
- }
-
- Y_UNIT_TEST(StatsSysViewRanges) {
+
+ CreateTestOlapTable(kikimr);
+ for (ui64 i = 0; i < 10; ++i) {
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000 + i*10000, 2000);
+ }
+
+ // EnableDebugLogging(kikimr);
+
+ auto tableClient = kikimr.GetTableClient();
+
+ {
+ auto selectQuery = TString(R"(
+ SELECT TabletId, PathId, Kind
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ ORDER BY PathId, Kind, TabletId
+ LIMIT 4;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 4);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 1ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[3].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[3].at("Kind")), 2ull);
+ }
+ {
+ auto selectQuery = TString(R"(
+ SELECT Bytes, Rows
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ ORDER BY Bytes
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3*4);
+ UNIT_ASSERT_LE(GetUint64(rows[0].at("Bytes")), GetUint64(rows[1].at("Bytes")));
+ }
+ {
+ auto selectQuery = TString(R"(
+ SELECT Rows, Kind, RawBytes, Rows as Rows2, Rows as Rows3, PathId
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ ORDER BY PathId, Kind, Rows3
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3*4);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("Rows2")), GetUint64(rows[0].at("Rows3")));
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("Rows")), GetUint64(rows[1].at("Rows3")));
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[2].at("Rows")), GetUint64(rows[2].at("Rows2")));
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[5].at("Rows")), GetUint64(rows[5].at("Rows3")));
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[11].at("Rows")), GetUint64(rows[11].at("Rows2")));
+ }
+ }
+
+ Y_UNIT_TEST(StatsSysViewRanges) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- CreateTestOlapTable(kikimr, "olapTable_1");
- CreateTestOlapTable(kikimr, "olapTable_2");
- CreateTestOlapTable(kikimr, "olapTable_3");
-
- for (ui64 i = 0; i < 10; ++i) {
- WriteTestData(kikimr, "/Root/olapStore/olapTable_1", 0, 1000000 + i*10000, 2000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable_2", 0, 1000000 + i*10000, 3000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable_3", 0, 1000000 + i*10000, 5000);
- }
-
- // EnableDebugLogging(kikimr);
-
- auto tableClient = kikimr.GetTableClient();
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
- SELECT *
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- WHERE
+
+ CreateTestOlapTable(kikimr, "olapTable_1");
+ CreateTestOlapTable(kikimr, "olapTable_2");
+ CreateTestOlapTable(kikimr, "olapTable_3");
+
+ for (ui64 i = 0; i < 10; ++i) {
+ WriteTestData(kikimr, "/Root/olapStore/olapTable_1", 0, 1000000 + i*10000, 2000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable_2", 0, 1000000 + i*10000, 3000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable_3", 0, 1000000 + i*10000, 5000);
+ }
+
+ // EnableDebugLogging(kikimr);
+
+ auto tableClient = kikimr.GetTableClient();
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+ SELECT *
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ WHERE
PathId == UInt64("3") AND Kind < UInt32("4")
- ORDER BY PathId, Kind, TabletId;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3*3);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 1ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[2].at("Kind")), 1ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[8].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[8].at("Kind")), 3ull);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
- SELECT *
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- ORDER BY
- PathId DESC, Kind DESC, TabletId DESC
- ;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3*3*4);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 5ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 4ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[35].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[35].at("Kind")), 1ull);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
- SELECT *
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- WHERE
+ ORDER BY PathId, Kind, TabletId;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3*3);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 1ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[2].at("Kind")), 1ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[8].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[8].at("Kind")), 3ull);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+ SELECT *
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ ORDER BY
+ PathId DESC, Kind DESC, TabletId DESC
+ ;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3*3*4);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 5ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 4ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[35].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[35].at("Kind")), 1ull);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+ SELECT *
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ WHERE
PathId > UInt64("0") AND PathId < UInt32("4")
OR PathId > UInt64("4") AND PathId <= UInt64("5")
- ORDER BY
- PathId DESC, Kind DESC, TabletId DESC
- ;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 2*3*4);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 5ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 4ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[23].at("PathId")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[23].at("Kind")), 1ull);
- }
- }
-
- Y_UNIT_TEST(StatsSysViewFilter) {
+ ORDER BY
+ PathId DESC, Kind DESC, TabletId DESC
+ ;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 2*3*4);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 5ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[0].at("Kind")), 4ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[23].at("PathId")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint32(rows[23].at("Kind")), 1ull);
+ }
+ }
+
+ Y_UNIT_TEST(StatsSysViewFilter) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- CreateTestOlapTable(kikimr);
- for (ui64 i = 0; i < 10; ++i) {
- WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000 + i*10000, 2000);
- }
-
- // EnableDebugLogging(kikimr);
-
- auto tableClient = kikimr.GetTableClient();
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
-
- SELECT *
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
+
+ CreateTestOlapTable(kikimr);
+ for (ui64 i = 0; i < 10; ++i) {
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000 + i*10000, 2000);
+ }
+
+ // EnableDebugLogging(kikimr);
+
+ auto tableClient = kikimr.GetTableClient();
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+
+ SELECT *
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
WHERE Bytes > UInt64("0")
- ORDER BY PathId, Kind, TabletId;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_GE(rows.size(), 3);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
-
- SELECT PathId, Kind, TabletId
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ ORDER BY PathId, Kind, TabletId;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_GE(rows.size(), 3);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+
+ SELECT PathId, Kind, TabletId
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
WHERE Bytes > UInt64("0")
- ORDER BY PathId, Kind, TabletId;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_GE(rows.size(), 3);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
- SELECT *
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ ORDER BY PathId, Kind, TabletId;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_GE(rows.size(), 3);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+ SELECT *
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
WHERE Kind == UInt32("5")
- ORDER BY PathId, Kind, TabletId;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_GE(rows.size(), 0);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
- SELECT *
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ ORDER BY PathId, Kind, TabletId;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_GE(rows.size(), 0);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+ SELECT *
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
WHERE Kind >= UInt32("3")
- ORDER BY PathId, Kind, TabletId;
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
-
- UNIT_ASSERT_GE(rows.size(), 3*2);
- }
- }
-
- Y_UNIT_TEST(StatsSysViewAggregation) {
+ ORDER BY PathId, Kind, TabletId;
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+
+ UNIT_ASSERT_GE(rows.size(), 3*2);
+ }
+ }
+
+ Y_UNIT_TEST(StatsSysViewAggregation) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false)
.SetEnableOlapSchemaOperations(true);
TKikimrRunner kikimr(settings);
-
- CreateTestOlapTable(kikimr, "olapTable_1");
- CreateTestOlapTable(kikimr, "olapTable_2");
- CreateTestOlapTable(kikimr, "olapTable_3");
-
- for (ui64 i = 0; i < 100; ++i) {
- WriteTestData(kikimr, "/Root/olapStore/olapTable_1", 0, 1000000 + i*10000, 1000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable_2", 0, 1000000 + i*10000, 2000);
- WriteTestData(kikimr, "/Root/olapStore/olapTable_3", 0, 1000000 + i*10000, 3000);
- }
-
- // EnableDebugLogging(kikimr);
-
- auto tableClient = kikimr.GetTableClient();
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
-
- SELECT
- SUM(Rows) as rows,
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- WHERE
+
+ CreateTestOlapTable(kikimr, "olapTable_1");
+ CreateTestOlapTable(kikimr, "olapTable_2");
+ CreateTestOlapTable(kikimr, "olapTable_3");
+
+ for (ui64 i = 0; i < 100; ++i) {
+ WriteTestData(kikimr, "/Root/olapStore/olapTable_1", 0, 1000000 + i*10000, 1000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable_2", 0, 1000000 + i*10000, 2000);
+ WriteTestData(kikimr, "/Root/olapStore/olapTable_3", 0, 1000000 + i*10000, 3000);
+ }
+
+ // EnableDebugLogging(kikimr);
+
+ auto tableClient = kikimr.GetTableClient();
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+
+ SELECT
+ SUM(Rows) as rows,
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ WHERE
Kind != UInt32("4") -- not INACTIVE
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 1ull);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
-
- SELECT
- PathId,
- SUM(Rows) as rows,
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- WHERE
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 1ull);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+
+ SELECT
+ PathId,
+ SUM(Rows) as rows,
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ WHERE
Kind != UInt32("4") -- not INACTIVE
- GROUP BY
- PathId
- ORDER BY
- PathId
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("PathId")), 4);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[2].at("PathId")), 5);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
-
- SELECT
- PathId,
- SUM(Rows) as rows,
- SUM(Bytes) as bytes,
- SUM(RawBytes) as bytes_raw,
- SUM(Portions) as portions,
- SUM(Blobs) as blobs
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- WHERE
+ GROUP BY
+ PathId
+ ORDER BY
+ PathId
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("PathId")), 4);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[2].at("PathId")), 5);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+
+ SELECT
+ PathId,
+ SUM(Rows) as rows,
+ SUM(Bytes) as bytes,
+ SUM(RawBytes) as bytes_raw,
+ SUM(Portions) as portions,
+ SUM(Blobs) as blobs
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ WHERE
Kind < UInt32("4")
- GROUP BY PathId
- ORDER BY rows DESC
- LIMIT 10
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 5);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("PathId")), 4);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[2].at("PathId")), 3);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
-
- SELECT
- PathId,
- SUM(Rows) as rows,
- SUM(Bytes) as bytes,
- SUM(RawBytes) as bytes_raw,
- SUM(Portions) as portions,
- SUM(Blobs) as blobs
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- WHERE
+ GROUP BY PathId
+ ORDER BY rows DESC
+ LIMIT 10
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 5);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("PathId")), 4);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[2].at("PathId")), 3);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+
+ SELECT
+ PathId,
+ SUM(Rows) as rows,
+ SUM(Bytes) as bytes,
+ SUM(RawBytes) as bytes_raw,
+ SUM(Portions) as portions,
+ SUM(Blobs) as blobs
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ WHERE
PathId == UInt64("3") AND Kind < UInt32("4")
- GROUP BY PathId
- ORDER BY rows DESC
- LIMIT 10
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 1ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3);
- }
-
- {
- auto selectQuery = TString(R"(
- PRAGMA Kikimr.KqpPushOlapProcess = "true";
-
- SELECT
- PathId,
- SUM(Rows) as rows,
- SUM(Bytes) as bytes,
- SUM(RawBytes) as bytes_raw,
- SUM(Portions) as portions,
- SUM(Blobs) as blobs
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- WHERE
+ GROUP BY PathId
+ ORDER BY rows DESC
+ LIMIT 10
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 1ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 3);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ PRAGMA Kikimr.KqpPushOlapProcess = "true";
+
+ SELECT
+ PathId,
+ SUM(Rows) as rows,
+ SUM(Bytes) as bytes,
+ SUM(RawBytes) as bytes_raw,
+ SUM(Portions) as portions,
+ SUM(Blobs) as blobs
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ WHERE
PathId >= UInt64("4") AND Kind < UInt32("4")
- GROUP BY PathId
- ORDER BY rows DESC
- LIMIT 10
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 2ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 5);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("PathId")), 4);
- }
-
- {
- auto selectQuery = TString(R"(
- SELECT count(*)
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- // 3 Tables with 3 Shards each and 4 KindId-s of stats
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("column0")), 3*3*4);
- }
-
- {
- auto selectQuery = TString(R"(
- SELECT
- count(distinct(PathId)),
- count(distinct(Kind)),
- count(distinct(TabletId))
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("column0")), 3ull);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("column1")), 4ull);
- UNIT_ASSERT_GE(GetUint64(rows[0].at("column2")), 3ull);
- }
-
- {
- auto selectQuery = TString(R"(
- SELECT PathId, count(*), sum(Rows), sum(Bytes), sum(RawBytes)
- FROM `/Root/olapStore/.sys/store_primary_index_stats`
- GROUP BY PathId
- ORDER BY PathId
- )");
-
- auto rows = ExecuteScanQuery(tableClient, selectQuery);
- UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3ull);
- for (ui64 pathId = 3, row = 0; pathId <= 5; ++pathId, ++row) {
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[row].at("PathId")), pathId);
- UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[row].at("column1")), 12);
- }
- }
- }
-
+ GROUP BY PathId
+ ORDER BY rows DESC
+ LIMIT 10
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 2ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("PathId")), 5);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[1].at("PathId")), 4);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ SELECT count(*)
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ // 3 Tables with 3 Shards each and 4 KindId-s of stats
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("column0")), 3*3*4);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ SELECT
+ count(distinct(PathId)),
+ count(distinct(Kind)),
+ count(distinct(TabletId))
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("column0")), 3ull);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[0].at("column1")), 4ull);
+ UNIT_ASSERT_GE(GetUint64(rows[0].at("column2")), 3ull);
+ }
+
+ {
+ auto selectQuery = TString(R"(
+ SELECT PathId, count(*), sum(Rows), sum(Bytes), sum(RawBytes)
+ FROM `/Root/olapStore/.sys/store_primary_index_stats`
+ GROUP BY PathId
+ ORDER BY PathId
+ )");
+
+ auto rows = ExecuteScanQuery(tableClient, selectQuery);
+ UNIT_ASSERT_VALUES_EQUAL(rows.size(), 3ull);
+ for (ui64 pathId = 3, row = 0; pathId <= 5; ++pathId, ++row) {
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[row].at("PathId")), pathId);
+ UNIT_ASSERT_VALUES_EQUAL(GetUint64(rows[row].at("column1")), 12);
+ }
+ }
+ }
+
Y_UNIT_TEST(PredicatePushdownWithParameters) {
constexpr bool logQueries = true;
auto settings = TKikimrSettings()
@@ -1917,7 +1917,7 @@ Y_UNIT_TEST_SUITE(KqpOlap) {
b << "------------------------------------------------" << Endl;
UNIT_ASSERT_C(falsePositive.empty() && falseNegative.empty(), b);
}
-}
-
+}
+
} // namespace NKqp
-} // namespace NKikimr
+} // namespace NKikimr
diff --git a/ydb/core/kqp/ut/kqp_params_ut.cpp b/ydb/core/kqp/ut/kqp_params_ut.cpp
index 00061742499..1eee1113292 100644
--- a/ydb/core/kqp/ut/kqp_params_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_params_ut.cpp
@@ -71,7 +71,7 @@ Y_UNIT_TEST_SUITE(KqpParams) {
DECLARE $group AS Uint32;
DECLARE $name AS String;
- SELECT * FROM `/Root/Test` WHERE Group = $group AND Name = $name;
+ SELECT * FROM `/Root/Test` WHERE Group = $group AND Name = $name;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::BAD_REQUEST, result.GetIssues().ToString());
@@ -95,7 +95,7 @@ Y_UNIT_TEST_SUITE(KqpParams) {
DECLARE $group AS Uint32;
DECLARE $name AS String;
- SELECT * FROM `/Root/Test` WHERE Group = $group AND Name = $name;
+ SELECT * FROM `/Root/Test` WHERE Group = $group AND Name = $name;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), params).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::BAD_REQUEST, result.GetIssues().ToString());
diff --git a/ydb/core/kqp/ut/kqp_pragma_ut.cpp b/ydb/core/kqp/ut/kqp_pragma_ut.cpp
index b00b13d48e4..cdf99e40430 100644
--- a/ydb/core/kqp/ut/kqp_pragma_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_pragma_ut.cpp
@@ -16,7 +16,7 @@ Y_UNIT_TEST_SUITE(KqpPragma) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UnwrapReadTableValues = "true";
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -58,13 +58,13 @@ Y_UNIT_TEST_SUITE(KqpPragma) {
auto result = session.ExecuteDataQuery(R"(
PRAGMA kikimr.UnwrapReadTableValues = "true";
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
CompareYson(R"([[1u;"One"]])", FormatResultSetYson(result.GetResultSet(0)));
result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
CompareYson(R"([[[1u];["One"]]])", FormatResultSetYson(result.GetResultSet(0)));
diff --git a/ydb/core/kqp/ut/kqp_query_ut.cpp b/ydb/core/kqp/ut/kqp_query_ut.cpp
index 426d6a3aaa8..5ef6d18f381 100644
--- a/ydb/core/kqp/ut/kqp_query_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_query_ut.cpp
@@ -33,7 +33,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto result = query.Execute(TTxControl::BeginTx().CommitTx()).GetValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- auto alterResult = session.AlterTable("/Root/Test",
+ auto alterResult = session.AlterTable("/Root/Test",
TAlterTableSettings()
.AppendDropColumns("Comment")
).GetValueSync();
@@ -221,7 +221,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto query = Q_(R"(
- SELECT * FROM `/Root/TwoShard`;
+ SELECT * FROM `/Root/TwoShard`;
)");
auto txControl = TTxControl::BeginTx().CommitTx();
@@ -251,7 +251,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto query = Q_(R"(
- SELECT * FROM `/Root/TwoShard` WHERE Key == 1;
+ SELECT * FROM `/Root/TwoShard` WHERE Key == 1;
)");
auto txControl = TTxControl::BeginTx().CommitTx();
@@ -336,7 +336,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto query = Q_(R"(
- SELECT * FROM `/Root/TwoShard`
+ SELECT * FROM `/Root/TwoShard`
)");
auto txControl = TTxControl::BeginTx().CommitTx();
@@ -364,7 +364,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto query = Q_(R"(
- SELECT * FROM `/Root/TwoShard` WHERE Key == 1;
+ SELECT * FROM `/Root/TwoShard` WHERE Key == 1;
)");
auto txControl = TTxControl::BeginTx().CommitTx();
@@ -392,7 +392,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto query = Q_(R"(
- UPSERT INTO `/Root/TwoShard` (Key, Value1) VALUES
+ UPSERT INTO `/Root/TwoShard` (Key, Value1) VALUES
(4, "Four"),
(4000000000u, "BigZero");
)");
@@ -416,7 +416,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto query = Q_(R"(
- UPSERT INTO `/Root/TwoShard` (Key, Value1) VALUES
+ UPSERT INTO `/Root/TwoShard` (Key, Value1) VALUES
(4, "Four");
)");
@@ -439,7 +439,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
- CREATE TABLE `/Root/Tmp` (
+ CREATE TABLE `/Root/Tmp` (
Key Uint64,
Value String,
PRIMARY KEY (Key)
@@ -482,7 +482,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
}
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/Tmp`;
+ SELECT * FROM `/Root/Tmp`;
)"), TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
UNIT_ASSERT(!result.GetResultSet(0).Truncated());
@@ -509,7 +509,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
}
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/Tmp`;
+ SELECT * FROM `/Root/Tmp`;
)"), TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
UNIT_ASSERT(result.GetResultSet(0).Truncated());
@@ -539,7 +539,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
execSettings.CollectQueryStats(ECollectQueryStatsMode::Basic);
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/Tmp`;
+ SELECT * FROM `/Root/Tmp`;
)"), TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
UNIT_ASSERT(result.GetResultSet(0).Truncated());
@@ -568,19 +568,19 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
result = session.ExecuteDataQuery(R"(
--!syntax_v1
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(!result.IsSuccess());
result = session.ExecuteDataQuery(R"(
- SELECT * FROM `/Root/KeyValue` WHERE Key = 1;
+ SELECT * FROM `/Root/KeyValue` WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
}
@@ -595,19 +595,19 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(!result.IsSuccess());
result = session.ExecuteDataQuery(R"(
--!syntax_v0
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
result = session.ExecuteDataQuery(R"(
- SELECT * FROM `/Root/KeyValue` WHERE Key = 1;
+ SELECT * FROM `/Root/KeyValue` WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
}
@@ -642,19 +642,19 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(!result.IsSuccess());
result = session.ExecuteDataQuery(R"(
--!syntax_v0
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
result = session.ExecuteDataQuery(R"(
- SELECT * FROM `/Root/KeyValue` WHERE Key = 1;
+ SELECT * FROM `/Root/KeyValue` WHERE Key = 1;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
}
@@ -666,7 +666,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto result = session.ExplainDataQuery(R"(
PRAGMA Kikimr.UseNewEngine = 'false';
- SELECT * FROM [/Root/Test] WHERE Group = 1 AND Name > "Name";
+ SELECT * FROM [/Root/Test] WHERE Group = 1 AND Name > "Name";
)").GetValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -697,11 +697,11 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
declare $text as String;
declare $data as Int32;
- update [/Root/EightShard]
+ update [/Root/EightShard]
set Text = $text, Data = $data
where Length(Text) != 7 and Data = $data and Key = $key;
- upsert into [/Root/EightShard] (Key, Text, Data) values
+ upsert into [/Root/EightShard] (Key, Text, Data) values
($key, $text || "_10", $data + 100);
)");
@@ -1158,7 +1158,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(Q_(R"(
- CREATE TABLE `/Root/Tmp` (
+ CREATE TABLE `/Root/Tmp` (
Key Uint64,
Value String,
PRIMARY KEY (Key)
@@ -1171,7 +1171,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
}));
result = session.ExecuteDataQuery(Q_(R"(
- DROP TABLE `/Root/KeyValue`;
+ DROP TABLE `/Root/KeyValue`;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::GENERIC_ERROR);
@@ -1180,7 +1180,7 @@ Y_UNIT_TEST_SUITE(KqpQuery) {
}));
result = session.ExecuteDataQuery(Q_(R"(
- ALTER TABLE `/Root/KeyValue` DROP COLUMN Value;
+ ALTER TABLE `/Root/KeyValue` DROP COLUMN Value;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::GENERIC_ERROR);
diff --git a/ydb/core/kqp/ut/kqp_scan_ut.cpp b/ydb/core/kqp/ut/kqp_scan_ut.cpp
index 9fc25b12dbc..347ca354412 100644
--- a/ydb/core/kqp/ut/kqp_scan_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_scan_ut.cpp
@@ -43,7 +43,7 @@ void CreateSampleTables(TKikimrRunner& kikimr) {
auto session = tableClient.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
- REPLACE INTO `/Root/FourShard` (Key, Value1, Value2) VALUES
+ REPLACE INTO `/Root/FourShard` (Key, Value1, Value2) VALUES
(1u, "Value-001", "1"),
(2u, "Value-002", "2"),
(101u, "Value-101", "101"),
@@ -64,7 +64,7 @@ void CreateNullSampleTables(TKikimrRunner& kikimr) {
auto session = db.CreateSession().GetValueSync().GetSession();
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
- CREATE TABLE [/Root/TestNulls] (
+ CREATE TABLE [/Root/TestNulls] (
Key1 Uint32,
Key2 Uint32,
Value String,
@@ -73,7 +73,7 @@ void CreateNullSampleTables(TKikimrRunner& kikimr) {
)").GetValueSync().IsSuccess());
UNIT_ASSERT(session.ExecuteDataQuery(R"(
- REPLACE INTO [/Root/TestNulls] (Key1, Key2, Value) VALUES
+ REPLACE INTO [/Root/TestNulls] (Key1, Key2, Value) VALUES
(NULL, NULL, "One"),
(NULL, 100u, "Two"),
(NULL, 200u, "Three"),
@@ -99,7 +99,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
CreateNullSampleTables(kikimr);
auto it = kikimr.GetTableClient().StreamExecuteScanQuery(R"(
- SELECT Value FROM `/Root/TestNulls`
+ SELECT Value FROM `/Root/TestNulls`
WHERE Key1 IS NULL AND Key2 IS NULL
)").GetValueSync();
@@ -112,7 +112,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
CreateNullSampleTables(kikimr);
auto it = kikimr.GetTableClient().StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/Test`
+ SELECT * FROM `/Root/Test`
WHERE Group == 1 AND Name IS NULL
)").GetValueSync();
@@ -125,7 +125,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
CreateNullSampleTables(kikimr);
auto it = kikimr.GetTableClient().StreamExecuteScanQuery(R"(
- SELECT Value FROM `/Root/TestNulls`
+ SELECT Value FROM `/Root/TestNulls`
WHERE Key1 <= 1
ORDER BY Value
)").GetValueSync();
@@ -140,7 +140,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = kikimr.GetTableClient().StreamExecuteScanQuery(R"(
DECLARE $key1 AS Uint32?;
- SELECT Value FROM `/Root/TestNulls`
+ SELECT Value FROM `/Root/TestNulls`
WHERE Key1 > 1
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -202,7 +202,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT Key, Text, Data FROM `/Root/EightShard` ORDER BY Key LIMIT 3 OFFSET 6
+ SELECT Key, Text, Data FROM `/Root/EightShard` ORDER BY Key LIMIT 3 OFFSET 6
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -218,7 +218,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/KeyValue` LIMIT 10
+ SELECT * FROM `/Root/KeyValue` LIMIT 10
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -230,23 +230,23 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/KeyValue` ORDER BY Key LIMIT 1
+ SELECT * FROM `/Root/KeyValue` ORDER BY Key LIMIT 1
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
CompareYson(R"([[[1u];["One"]]])", StreamResultToYson(it));
}
- Y_UNIT_TEST(Grep) {
+ Y_UNIT_TEST(Grep) {
TKikimrRunner kikimr(AppCfg());
auto db = kikimr.GetTableClient();
-
+
auto it = db.StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/EightShard` WHERE Data == 1 ORDER BY Key;
+ SELECT * FROM `/Root/EightShard` WHERE Data == 1 ORDER BY Key;
)").GetValueSync();
-
- UNIT_ASSERT(it.IsSuccess());
-
+
+ UNIT_ASSERT(it.IsSuccess());
+
CompareYson(R"([
[[1];[101u];["Value1"]];
[[1];[202u];["Value2"]];
@@ -257,21 +257,21 @@ Y_UNIT_TEST_SUITE(KqpScan) {
[[1];[701u];["Value1"]];
[[1];[802u];["Value2"]]])", StreamResultToYson(it));
}
-
- Y_UNIT_TEST(GrepByString) {
+
+ Y_UNIT_TEST(GrepByString) {
TKikimrRunner kikimr(AppCfg());
auto db = kikimr.GetTableClient();
-
+
auto it = db.StreamExecuteScanQuery(R"(
- $value = 'some very very very very long string';
- SELECT * FROM `/Root/Logs` WHERE Message == $value ORDER BY App, Ts;
- )").GetValueSync();;
-
- UNIT_ASSERT(it.IsSuccess());
-
- CompareYson(R"([
- [["ydb"];["ydb-1000"];["some very very very very long string"];[0]]])", StreamResultToYson(it));
- }
+ $value = 'some very very very very long string';
+ SELECT * FROM `/Root/Logs` WHERE Message == $value ORDER BY App, Ts;
+ )").GetValueSync();;
+
+ UNIT_ASSERT(it.IsSuccess());
+
+ CompareYson(R"([
+ [["ydb"];["ydb-1000"];["some very very very very long string"];[0]]])", StreamResultToYson(it));
+ }
Y_UNIT_TEST(Order) {
TKikimrRunner kikimr(AppCfg());
@@ -317,7 +317,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
DECLARE $low AS Uint64;
DECLARE $high AS Uint64;
- SELECT * FROM `/Root/EightShard` WHERE Key >= $low AND Key <= $high AND Data == 1 ORDER BY Key;
+ SELECT * FROM `/Root/EightShard` WHERE Key >= $low AND Key <= $high AND Data == 1 ORDER BY Key;
)", params).GetValueSync();
UNIT_ASSERT(it.IsSuccess());
@@ -329,7 +329,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
[[1];[502u];["Value2"]]
])", StreamResultToYson(it));
}
-
+
Y_UNIT_TEST(GrepLimit) {
TKikimrRunner kikimr(AppCfg());
auto db = kikimr.GetTableClient();
@@ -352,7 +352,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
DECLARE $tsTo AS Int64;
SELECT *
- FROM `/Root/Logs`
+ FROM `/Root/Logs`
WHERE
App == $app
AND Ts > $tsFrom
@@ -371,36 +371,36 @@ Y_UNIT_TEST_SUITE(KqpScan) {
Y_UNIT_TEST(GrepNonKeyColumns) {
TKikimrRunner kikimr(AppCfg());
auto db = kikimr.GetTableClient();
-
- TParamsBuilder params;
- params.AddParam("$app").Utf8("nginx").Build();
- params.AddParam("$tsFrom").Uint64(0).Build();
- params.AddParam("$tsTo").Uint64(5).Build();
-
+
+ TParamsBuilder params;
+ params.AddParam("$app").Utf8("nginx").Build();
+ params.AddParam("$tsFrom").Uint64(0).Build();
+ params.AddParam("$tsTo").Uint64(5).Build();
+
auto it = db.StreamExecuteScanQuery(R"(
- DECLARE $app AS Utf8;
- DECLARE $tsFrom AS Uint64;
- DECLARE $tsTo AS Uint64;
-
- SELECT
+ DECLARE $app AS Utf8;
+ DECLARE $tsFrom AS Uint64;
+ DECLARE $tsTo AS Uint64;
+
+ SELECT
Message,
Ts
- FROM `/Root/Logs`
- WHERE
+ FROM `/Root/Logs`
+ WHERE
App == $app
AND Ts > $tsFrom
AND Ts <= $tsTo
ORDER BY Ts;
- )", params.Build()).GetValueSync();;
-
- UNIT_ASSERT(it.IsSuccess());
-
- CompareYson(R"([
+ )", params.Build()).GetValueSync();;
+
+ UNIT_ASSERT(it.IsSuccess());
+
+ CompareYson(R"([
[["GET /index.html HTTP/1.1"];[1]];
[["PUT /form HTTP/1.1"];[2]];
[["GET /cat.jpg HTTP/1.1"];[3]]
- ])", StreamResultToYson(it));
- }
+ ])", StreamResultToYson(it));
+ }
Y_UNIT_TEST(SingleKey) {
TKikimrRunner kikimr(AppCfg());
@@ -415,7 +415,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
DECLARE $key AS Uint64;
- SELECT * FROM `/Root/EightShard` WHERE Key = $key;
+ SELECT * FROM `/Root/EightShard` WHERE Key = $key;
)", params).GetValueSync();
UNIT_ASSERT(it.IsSuccess());
@@ -430,7 +430,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT Text, SUM(Key) AS Total FROM `/Root/EightShard`
+ SELECT Text, SUM(Key) AS Total FROM `/Root/EightShard`
GROUP BY Text
ORDER BY Total DESC;
)").GetValueSync();
@@ -450,7 +450,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT SUM(Data), AVG(Data), COUNT(*), MAX(Data), MIN(Data), SUM(Data * 3 + Key * 2) as foo
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Key > 300
)").GetValueSync();
@@ -465,7 +465,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT SUM(Data), AVG(Data), COUNT(*)
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Key > 300
)").GetValueSync();
@@ -480,7 +480,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT (SUM(Data) * 100) / (MIN(Data) + 10)
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -492,7 +492,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
TKikimrRunner kikimr(AppCfg());
auto db = kikimr.GetTableClient();
- auto it = db.StreamExecuteScanQuery("SELECT COUNT(*) FROM `/Root/EightShard`").GetValueSync();
+ auto it = db.StreamExecuteScanQuery("SELECT COUNT(*) FROM `/Root/EightShard`").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
CompareYson(R"([[24u]])", StreamResultToYson(it));
}
@@ -501,7 +501,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
TKikimrRunner kikimr(AppCfg());
auto db = kikimr.GetTableClient();
- auto it = db.StreamExecuteScanQuery("SELECT COUNT(*) FROM `/Root/EightShard` WHERE Key < 10").GetValueSync();
+ auto it = db.StreamExecuteScanQuery("SELECT COUNT(*) FROM `/Root/EightShard` WHERE Key < 10").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
CompareYson(R"([[0u]])", StreamResultToYson(it));
}
@@ -510,7 +510,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
TKikimrRunner kikimr(AppCfg());
auto db = kikimr.GetTableClient();
- auto it = db.StreamExecuteScanQuery("SELECT SUM(Data) FROM `/Root/EightShard` WHERE Key < 10").GetValueSync();
+ auto it = db.StreamExecuteScanQuery("SELECT SUM(Data) FROM `/Root/EightShard` WHERE Key < 10").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
CompareYson(R"([[#]])", StreamResultToYson(it));
}
@@ -522,7 +522,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT *
- FROM `/Root/EightShard` AS l JOIN `/Root/FourShard` AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l JOIN `/Root/FourShard` AS r ON l.Key = r.Key
ORDER BY Key, Text, Data, Value1, Value2
)").GetValueSync();
@@ -546,10 +546,10 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto test = [&](bool simpleColumns) {
auto it = db.StreamExecuteScanQuery(Sprintf(R"(
PRAGMA %sSimpleColumns;
- $r = (select * from `/Root/FourShard` where Key > 201);
+ $r = (select * from `/Root/FourShard` where Key > 201);
SELECT l.Key as key, l.Text as text, r.Value1 as value
- FROM `/Root/EightShard` AS l JOIN $r AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l JOIN $r AS r ON l.Key = r.Key
ORDER BY key, text, value
)", simpleColumns ? "" : "Disable")).GetValueSync();
@@ -573,8 +573,8 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT *
- FROM `/Root/EightShard` AS l
- JOIN `/Root/FourShard` AS r
+ FROM `/Root/EightShard` AS l
+ JOIN `/Root/FourShard` AS r
ON l.Key = r.Key
WHERE l.Text != "Value1" AND r.Value2 > "1"
ORDER BY Key, Text, Data, Value1, Value2
@@ -599,7 +599,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
PRAGMA %sSimpleColumns;
$join = (
SELECT l.Key as Key, l.Text as Text, l.Data as Data, r.Value1 as Value1, r.Value2 as Value2
- FROM `/Root/EightShard` AS l JOIN `/Root/FourShard` AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l JOIN `/Root/FourShard` AS r ON l.Key = r.Key
);
SELECT Key, COUNT(*) AS Cnt
@@ -630,7 +630,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT *
- FROM `/Root/FourShard` AS l LEFT SEMI JOIN `/Root/EightShard` AS r ON l.Key = r.Key
+ FROM `/Root/FourShard` AS l LEFT SEMI JOIN `/Root/EightShard` AS r ON l.Key = r.Key
ORDER BY Key
)").GetValueSync();
@@ -652,7 +652,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT l.Key, l.Text, l.Data, r.Key, r.Value1, r.Value2
- FROM `/Root/EightShard` AS l RIGHT JOIN `/Root/FourShard` AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l RIGHT JOIN `/Root/FourShard` AS r ON l.Key = r.Key
WHERE r.Key < 200
ORDER BY l.Key, l.Text, l.Data, r.Key, r.Value1, r.Value2
)").GetValueSync();
@@ -674,7 +674,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT Key, Value1, Value2
- FROM `/Root/EightShard` AS l RIGHT ONLY JOIN `/Root/FourShard` AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l RIGHT ONLY JOIN `/Root/FourShard` AS r ON l.Key = r.Key
WHERE Key < 200
ORDER BY Key, Value1, Value2
)").GetValueSync();
@@ -694,7 +694,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT Key, Value1, Value2
- FROM `/Root/EightShard` AS l RIGHT SEMI JOIN `/Root/FourShard` AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l RIGHT SEMI JOIN `/Root/FourShard` AS r ON l.Key = r.Key
WHERE Key < 200
ORDER BY Key, Value1, Value2
)").GetValueSync();
@@ -719,13 +719,13 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto query1 = R"(
declare $in as List<Struct<key: UInt64>>;
select l.Key, l.Value
- from `/Root/KeyValue` as l join AS_TABLE($in) as r on l.Key = r.key
+ from `/Root/KeyValue` as l join AS_TABLE($in) as r on l.Key = r.key
)";
// params join table
auto query2 = R"(
declare $in as List<Struct<key: UInt64>>;
select r.Key, r.Value
- from AS_TABLE($in) as l join `/Root/KeyValue` as r on l.key = r.Key
+ from AS_TABLE($in) as l join `/Root/KeyValue` as r on l.key = r.Key
)";
for (auto& query : {query1, query2}) {
auto it = db.StreamExecuteScanQuery(query, params).GetValueSync();
@@ -740,7 +740,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto session = db.CreateSession().GetValueSync().GetSession();
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
- CREATE TABLE `/Root/Tmp` (
+ CREATE TABLE `/Root/Tmp` (
Key Uint64,
Value String,
PRIMARY KEY (Key)
@@ -815,10 +815,10 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
PRAGMA DisableSimpleColumns;
SELECT *
- FROM `/Root/EightShard` AS t8
- JOIN `/Root/FourShard` AS t4
+ FROM `/Root/EightShard` AS t8
+ JOIN `/Root/FourShard` AS t4
ON t8.Key = t4.Key
- JOIN `/Root/TwoShard` AS t2
+ JOIN `/Root/TwoShard` AS t2
ON t8.Data = t2.Key
ORDER BY t8.Key, t8.Text, t8.Data, t4.Value1, t4.Value2, t2.Value1, t2.Value2
)").GetValueSync();
@@ -845,10 +845,10 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(Sprintf(R"(
PRAGMA %sSimpleColumns;
SELECT t8.Key as key, t8.Text as text, t4.Value1, t2.Value2
- FROM `/Root/EightShard` AS t8
- JOIN `/Root/FourShard` AS t4
+ FROM `/Root/EightShard` AS t8
+ JOIN `/Root/FourShard` AS t4
ON t8.Key = t4.Key
- JOIN `/Root/TwoShard` AS t2
+ JOIN `/Root/TwoShard` AS t2
ON t8.Data = t2.Key
WHERE t8.Key > 200 AND t2.Value2 >= 0
ORDER BY key, text, t4.Value1, t2.Value2
@@ -877,8 +877,8 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(Sprintf(R"(
PRAGMA %sSimpleColumns;
SELECT *
- FROM `/Root/EightShard` AS l
- LEFT ONLY JOIN `/Root/FourShard` AS r
+ FROM `/Root/EightShard` AS l
+ LEFT ONLY JOIN `/Root/FourShard` AS r
ON l.Key = r.Key
WHERE Data = 1
ORDER BY Key, Text, Data
@@ -908,7 +908,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT l.Key, r.Key, l.Text, r.Value1
- FROM `/Root/EightShard` AS l CROSS JOIN `/Root/FourShard` AS r
+ FROM `/Root/EightShard` AS l CROSS JOIN `/Root/FourShard` AS r
WHERE l.Key > r.Key AND l.Data = 1 AND r.Value2 > "200"
ORDER BY l.Key, l.Text, r.Value1
)").GetValueSync();
@@ -952,15 +952,15 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
$foo = (
SELECT t1.Key AS Key
- FROM `/Root/KeyValue` AS t1
- JOIN `/Root/KeyValue` AS t2
+ FROM `/Root/KeyValue` AS t1
+ JOIN `/Root/KeyValue` AS t2
ON t1.Key = t2.Key
GROUP BY t1.Key
);
SELECT t1.Key AS Key
FROM $foo AS Foo
- JOIN `/Root/KeyValue` AS t1
+ JOIN `/Root/KeyValue` AS t1
ON t1.Key = Foo.Key
ORDER BY Key
)").GetValueSync();
@@ -978,15 +978,15 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
$foo = (
SELECT t1.Key AS Key
- FROM `/Root/KeyValue` AS t1
- JOIN `/Root/KeyValue` AS t2
+ FROM `/Root/KeyValue` AS t1
+ JOIN `/Root/KeyValue` AS t2
ON t1.Key = t2.Key
GROUP BY t1.Key
);
SELECT t3.Key AS Key
FROM $foo AS Foo
- JOIN `/Root/KeyValue` AS t3
+ JOIN `/Root/KeyValue` AS t3
ON t3.Key = Foo.Key
ORDER BY Key
)").GetValueSync();
@@ -1009,7 +1009,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
DECLARE $in AS 'List<Struct<k: Uint64, v: String>>';
SELECT *
- FROM `/Root/KeyValue` AS l
+ FROM `/Root/KeyValue` AS l
JOIN AS_TABLE($in) AS r
ON l.Key = r.k
)", params).GetValueSync();
@@ -1041,7 +1041,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
FROM AS_TABLE($in1) AS l
JOIN AS_TABLE($in2) AS r
ON l.k = r.k;
- UPSERT INTO [/Root/KeyValue] (Key, Value) Values (1, "test");
+ UPSERT INTO [/Root/KeyValue] (Key, Value) Values (1, "test");
)", params).GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1091,7 +1091,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
.ProfileMode(NExperimental::EStreamQueryProfileMode::Basic);
auto it = db.ExecuteStreamQuery(R"(
- SELECT * FROM `/Root/EightShard` WHERE Key = 301;
+ SELECT * FROM `/Root/EightShard` WHERE Key = 301;
)", settings).GetValueSync();
UNIT_ASSERT(it.IsSuccess());
@@ -1135,7 +1135,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.ExecuteStreamQuery(R"(
DECLARE $key AS Uint64;
- SELECT * FROM `/Root/EightShard` WHERE Key = $key + 1;
+ SELECT * FROM `/Root/EightShard` WHERE Key = $key + 1;
)", params, settings).GetValueSync();
UNIT_ASSERT(it.IsSuccess());
@@ -1180,7 +1180,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
$join = (
SELECT l.Key as Key, l.Text as Text, l.Data as Data, r.Value1 as Value1, r.Value2 as Value2
- FROM `/Root/EightShard` AS l JOIN `/Root/FourShard` AS r ON l.Key = r.Key
+ FROM `/Root/EightShard` AS l JOIN `/Root/FourShard` AS r ON l.Key = r.Key
);
SELECT Key, COUNT(*) AS Cnt
@@ -1219,7 +1219,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
for (int i = 0; i < 100; ++i) {
AssertSuccessResult(session.ExecuteDataQuery(
Sprintf(R"(
- REPLACE INTO `/Root/EightShard` (Key, Text, Data) VALUES
+ REPLACE INTO `/Root/EightShard` (Key, Text, Data) VALUES
(%d, "Value1", 0),
(%d, "Value2", 1),
(%d, "Value3", 2),
@@ -1233,7 +1233,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
}
auto db = kikimr.GetTableClient();
- auto it = db.StreamExecuteScanQuery("SELECT * FROM `/Root/EightShard` LIMIT 2").GetValueSync();
+ auto it = db.StreamExecuteScanQuery("SELECT * FROM `/Root/EightShard` LIMIT 2").GetValueSync();
Cerr << StreamResultToYson(it) << Endl;
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1244,8 +1244,8 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/KeyValue`;
- SELECT * FROM `/Root/EightShard`;
+ SELECT * FROM `/Root/KeyValue`;
+ SELECT * FROM `/Root/EightShard`;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1259,10 +1259,10 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- UPSERT INTO `/Root/KeyValue`
- SELECT Key, Text AS Value FROM `/Root/EightShard`;
+ UPSERT INTO `/Root/KeyValue`
+ SELECT Key, Text AS Value FROM `/Root/EightShard`;
- SELECT * FROM `/Root/EightShard`;
+ SELECT * FROM `/Root/EightShard`;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1302,7 +1302,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1;
+ SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1;
SELECT 2;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1316,9 +1316,9 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1)
+ (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1)
UNION ALL
- (SELECT Key FROM `/Root/EightShard` ORDER BY Key LIMIT 1);
+ (SELECT Key FROM `/Root/EightShard` ORDER BY Key LIMIT 1);
)").GetValueSync();
auto res = StreamResultToYson(it);
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1332,7 +1332,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT 42
UNION ALL
- (SELECT Key FROM `/Root/EightShard` ORDER BY Key LIMIT 1);
+ (SELECT Key FROM `/Root/EightShard` ORDER BY Key LIMIT 1);
)").GetValueSync();
auto res = StreamResultToYson(it);
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1344,11 +1344,11 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1)
+ (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1)
UNION ALL
- (SELECT Key FROM `/Root/EightShard` ORDER BY Key LIMIT 1)
+ (SELECT Key FROM `/Root/EightShard` ORDER BY Key LIMIT 1)
UNION ALL
- (SELECT Key FROM `/Root/TwoShard` ORDER BY Key DESC LIMIT 1);
+ (SELECT Key FROM `/Root/TwoShard` ORDER BY Key DESC LIMIT 1);
)").GetValueSync();
auto res = StreamResultToYson(it);
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1360,9 +1360,9 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1)
+ (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1)
UNION ALL
- (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1);
+ (SELECT Key FROM `/Root/KeyValue` ORDER BY Key LIMIT 1);
)").GetValueSync();
auto res = StreamResultToYson(it);
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1374,11 +1374,11 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT COUNT(*) FROM `/Root/KeyValue`
+ SELECT COUNT(*) FROM `/Root/KeyValue`
UNION ALL
- SELECT COUNT(*) FROM `/Root/EightShard`
+ SELECT COUNT(*) FROM `/Root/EightShard`
UNION ALL
- SELECT SUM(Amount) FROM `/Root/Test`;
+ SELECT SUM(Amount) FROM `/Root/Test`;
)").GetValueSync();
auto res = StreamResultToYson(it);
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1391,7 +1391,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT count(*) AS c1, count(DISTINCT Value) AS c2
- FROM `/Root/KeyValue` GROUP BY Key;
+ FROM `/Root/KeyValue` GROUP BY Key;
)").GetValueSync();
auto res = StreamResultToYson(it);
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1405,7 +1405,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT group_total, count(*) FROM (
SELECT Key, Text, SUM(Data) OVER w1 AS group_total
- FROM `/Root/EightShard` WINDOW w1 AS (partition by Text)
+ FROM `/Root/EightShard` WINDOW w1 AS (partition by Text)
) GROUP BY group_total ORDER BY group_total;
)").GetValueSync();
auto res = StreamResultToYson(it);
@@ -1420,7 +1420,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT Text, running_total FROM (
SELECT Key, Text, SUM(Data) OVER w1 AS running_total
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Text = 'Value2'
WINDOW w1 AS (partition by Text order by Key)
) ORDER BY running_total;
@@ -1447,7 +1447,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT tot, avg, count(*) FROM (
SELECT Key, Text, SUM(Data) OVER w1 AS tot, avg(Data) OVER w1 AS avg
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Text = 'Value3'
WINDOW w1 AS (partition by Text)
) GROUP BY tot, avg
@@ -1463,7 +1463,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT Key, Text, sum(Data) OVER w1 AS tot, sum(Data) OVER w2 AS avg
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Text = 'Value2'
WINDOW w1 AS (partition by Text),
w2 AS (partition by Text order by Key)
@@ -1490,7 +1490,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
SELECT Key, sum_short_win FROM (
SELECT Key, Text, SUM(Data) OVER w1 AS sum_short_win
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Text = 'Value2'
WINDOW w1 AS (partition by Text order by Key ROWS BETWEEN CURRENT ROW and 2 FOLLOWING)
) ORDER BY Key;
@@ -1514,7 +1514,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT Key FROM `/Root/EightShard` WHERE false;
+ SELECT Key FROM `/Root/EightShard` WHERE false;
)").GetValueSync();
auto res = StreamResultToYson(it);
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1527,7 +1527,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto it = db.StreamExecuteScanQuery(R"(
--!syntax_v0
- SELECT * FROM [/Root/EightShard] WHERE Key = 1;
+ SELECT * FROM [/Root/EightShard] WHERE Key = 1;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
auto part = it.ReadNext().GetValueSync();
@@ -1541,7 +1541,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT min(Message), max(Message) FROM `/Root/Logs`;
+ SELECT min(Message), max(Message) FROM `/Root/Logs`;
)").GetValueSync();
auto res = StreamResultToYson(it);
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -1553,7 +1553,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto result = db.CreateSession().GetValueSync().GetSession().ExecuteDataQuery(R"(
- REPLACE INTO `/Root/KeyValue` (Key, Value) VALUES
+ REPLACE INTO `/Root/KeyValue` (Key, Value) VALUES
(3u, "Three"),
(4u, "Four"),
(10u, "Ten"),
@@ -1587,7 +1587,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto result = db.CreateSession().GetValueSync().GetSession().ExecuteDataQuery(R"(
- REPLACE INTO `/Root/KeyValue` (Key, Value) VALUES
+ REPLACE INTO `/Root/KeyValue` (Key, Value) VALUES
(3u, "Three"),
(4u, "Four"),
(10u, "Ten"),
@@ -1718,7 +1718,7 @@ Y_UNIT_TEST_SUITE(KqpScan) {
auto db = kikimr.GetTableClient();
auto it = db.StreamExecuteScanQuery(R"(
- SELECT * FROM `/Root/KeyValue` WHERE Key IN (1,2,3) LIMIT 10;
+ SELECT * FROM `/Root/KeyValue` WHERE Key IN (1,2,3) LIMIT 10;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
diff --git a/ydb/core/kqp/ut/kqp_scheme_ut.cpp b/ydb/core/kqp/ut/kqp_scheme_ut.cpp
index c4c0dc9302f..ca7ffa42a4f 100644
--- a/ydb/core/kqp/ut/kqp_scheme_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_scheme_ut.cpp
@@ -21,14 +21,14 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
bool useSchemeCacheMeta = tableSettings.GetUseSchemeCacheMetadata();
auto result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue];
+ SELECT * FROM [/Root/KeyValue];
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(),
useSchemeCacheMeta ? EStatus::SCHEME_ERROR : EStatus::UNAUTHORIZED, result.GetIssues().ToString());
result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/NonExistent];
+ SELECT * FROM [/Root/NonExistent];
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(),
@@ -41,12 +41,12 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue];
+ SELECT * FROM [/Root/KeyValue];
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/NonExistent];
+ SELECT * FROM [/Root/NonExistent];
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SCHEME_ERROR);
@@ -58,17 +58,17 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue];
+ SELECT * FROM [/Root/KeyValue];
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
auto schemeResult = session.ExecuteSchemeQuery(R"(
- DROP TABLE [/Root/KeyValue];
+ DROP TABLE [/Root/KeyValue];
)").ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(schemeResult.GetStatus(), EStatus::SUCCESS);
result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue];
+ SELECT * FROM [/Root/KeyValue];
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SCHEME_ERROR);
@@ -80,12 +80,12 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto schemeResult = session.ExecuteSchemeQuery(R"(
- DROP TABLE [/Root/KeyValue];
+ DROP TABLE [/Root/KeyValue];
)").ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(schemeResult.GetStatus(), EStatus::SUCCESS);
schemeResult = session.ExecuteSchemeQuery(R"(
- CREATE TABLE [/Root/KeyValue] (
+ CREATE TABLE [/Root/KeyValue] (
Key Uint32,
Value String,
PRIMARY KEY(Key)
@@ -94,7 +94,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
UNIT_ASSERT_VALUES_EQUAL(schemeResult.GetStatus(), EStatus::SUCCESS);
auto result = session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/KeyValue];
+ SELECT * FROM [/Root/KeyValue];
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
}
@@ -107,7 +107,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
const size_t limit = 1000;
const static TString createTableQuery = R"(
- CREATE TABLE [/Root/Test1234/KeyValue] (
+ CREATE TABLE [/Root/Test1234/KeyValue] (
Key Uint32,
Value String,
PRIMARY KEY(Key)
@@ -115,7 +115,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
)";
const static TString dropTableQuery = R"(
- DROP TABLE [/Root/Test1234/KeyValue];
+ DROP TABLE [/Root/Test1234/KeyValue];
)";
NPar::LocalExecutor().RunAdditionalThreads(inflight);
@@ -154,7 +154,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
const size_t inflight = 4;
const size_t limit = 1000;
- const static TString tableName = "/Root/Test1234/KeyValue";
+ const static TString tableName = "/Root/Test1234/KeyValue";
NPar::LocalExecutor().RunAdditionalThreads(inflight);
NPar::LocalExecutor().ExecRange([=, &db](int /*id*/) mutable {
@@ -206,7 +206,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
auto status = db.RetryOperationSync([](TSession session) {
return session.ExecuteSchemeQuery(R"(
- ALTER TABLE [/Root/EightShard] DROP COLUMN Data;
+ ALTER TABLE [/Root/EightShard] DROP COLUMN Data;
)").ExtractValueSync();
});
UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
@@ -224,7 +224,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
// Immediate
auto status = db.RetryOperationSync([](TSession session) {
return session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/EightShard] WHERE Key = 501u;
+ SELECT * FROM [/Root/EightShard] WHERE Key = 501u;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
}, retrySettings);
UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
@@ -232,7 +232,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
// Planned
auto status = db.RetryOperationSync([](TSession session) {
return session.ExecuteDataQuery(R"(
- SELECT * FROM [/Root/EightShard];
+ SELECT * FROM [/Root/EightShard];
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
}, retrySettings);
UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
@@ -248,7 +248,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
auto session = db.CreateSession().GetValueSync().GetSession();
const TString query = Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
+ SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
)");
NYdb::NTable::TExecDataQuerySettings execSettings;
@@ -278,7 +278,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
.EndOptional()
.Build();
- auto result = session.AlterTable("/Root/KeyValue", TAlterTableSettings()
+ auto result = session.AlterTable("/Root/KeyValue", TAlterTableSettings()
.AppendAddColumns(TColumn{"NewColumn", type})).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
}
@@ -298,7 +298,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
- const TString sql = "UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES(1, \"One\")";
+ const TString sql = "UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES(1, \"One\")";
NYdb::NTable::TExecDataQuerySettings execSettings;
execSettings.KeepInQueryCache(true);
@@ -321,7 +321,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
}
{
- auto result = session.DropTable("/Root/KeyValue").ExtractValueSync();
+ auto result = session.DropTable("/Root/KeyValue").ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
}
@@ -340,7 +340,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
.SetPrimaryKeyColumns({"Key", "Value"})
.Build();
- auto result = session.CreateTable("/Root/KeyValue",
+ auto result = session.CreateTable("/Root/KeyValue",
std::move(desc)).GetValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
@@ -379,8 +379,8 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
auto db = kikimr.GetTableClient();
const TString sql = select
- ? "SELECT * FROM [/Root/KeyValue];"
- : "UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES(1, \"One\")";
+ ? "SELECT * FROM [/Root/KeyValue];"
+ : "UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES(1, \"One\")";
auto action = [db, sql, multistageTx]() mutable {
return db.RetryOperationSync(
@@ -416,7 +416,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
}
{
- auto result = db.GetSession().GetValueSync().GetSession().DropTable("/Root/KeyValue").ExtractValueSync();
+ auto result = db.GetSession().GetValueSync().GetSession().DropTable("/Root/KeyValue").ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
}
@@ -429,7 +429,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
.SetPrimaryKeyColumns({"Key"})
.Build();
- auto result = db.GetSession().GetValueSync().GetSession().CreateTable("/Root/KeyValue",
+ auto result = db.GetSession().GetValueSync().GetSession().CreateTable("/Root/KeyValue",
std::move(desc)).GetValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
@@ -883,31 +883,31 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
AUTO_PARTITIONING_BY_LOAD = ENABLED
);)";
auto result = session.ExecuteSchemeQuery(query).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
}
{
- auto describeResult = kikimr.GetTestClient().Ls(tableName);
- bool enabled = describeResult->Record.GetPathDescription().GetTable()
- .GetPartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings().GetEnabled();
- UNIT_ASSERT_VALUES_EQUAL(enabled, true);
- }
-
+ auto describeResult = kikimr.GetTestClient().Ls(tableName);
+ bool enabled = describeResult->Record.GetPathDescription().GetTable()
+ .GetPartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings().GetEnabled();
+ UNIT_ASSERT_VALUES_EQUAL(enabled, true);
+ }
+
AlterTableSetttings(session, tableName, {{"AUTO_PARTITIONING_BY_LOAD", "DISABLED"}}, compat);
- {
- auto describeResult = kikimr.GetTestClient().Ls(tableName);
- bool enabled = describeResult->Record.GetPathDescription().GetTable()
- .GetPartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings().GetEnabled();
- UNIT_ASSERT_VALUES_EQUAL(enabled, false);
- }
-
+ {
+ auto describeResult = kikimr.GetTestClient().Ls(tableName);
+ bool enabled = describeResult->Record.GetPathDescription().GetTable()
+ .GetPartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings().GetEnabled();
+ UNIT_ASSERT_VALUES_EQUAL(enabled, false);
+ }
+
AlterTableSetttings(session, tableName, {{"AUTO_PARTITIONING_BY_LOAD", "ENABLED"}}, compat);
- {
- auto describeResult = kikimr.GetTestClient().Ls(tableName);
- bool enabled = describeResult->Record.GetPathDescription().GetTable()
- .GetPartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings().GetEnabled();
- UNIT_ASSERT_VALUES_EQUAL(enabled, true);
- }
+ {
+ auto describeResult = kikimr.GetTestClient().Ls(tableName);
+ bool enabled = describeResult->Record.GetPathDescription().GetTable()
+ .GetPartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings().GetEnabled();
+ UNIT_ASSERT_VALUES_EQUAL(enabled, true);
+ }
}
Y_UNIT_TEST(CreateAndAlterTableWithPartitioningByLoadUncompat) {
@@ -1670,7 +1670,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
auto result = session.ExecuteSchemeQuery(R"(
--!syntax_v1
- CREATE TABLE `/TablePathWithNoRoot` (
+ CREATE TABLE `/TablePathWithNoRoot` (
Id Uint32,
Value String,
PRIMARY KEY (Id)
@@ -1754,13 +1754,13 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
{
auto status = session.ExecuteSchemeQuery(Sprintf(R"(
--!syntax_v1
- ALTER TABLE `/Root/Test` ADD INDEX NameIndex %s ON (Name);
+ ALTER TABLE `/Root/Test` ADD INDEX NameIndex %s ON (Name);
)", typeStr.data())).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), expectedStatus, status.GetIssues().ToString());
}
{
- TDescribeTableResult describe = session.DescribeTable("/Root/Test").GetValueSync();
+ TDescribeTableResult describe = session.DescribeTable("/Root/Test").GetValueSync();
UNIT_ASSERT_EQUAL(describe.GetStatus(), EStatus::SUCCESS);
auto indexDesc = describe.GetTableDescription().GetIndexDescriptions();
@@ -1779,13 +1779,13 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
{
auto status = session.ExecuteSchemeQuery(R"(
--!syntax_v1
- ALTER TABLE `/Root/Test` DROP INDEX NameIndex;
+ ALTER TABLE `/Root/Test` DROP INDEX NameIndex;
)").ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
}
{
- TDescribeTableResult describe = session.DescribeTable("/Root/Test").GetValueSync();
+ TDescribeTableResult describe = session.DescribeTable("/Root/Test").GetValueSync();
UNIT_ASSERT_EQUAL(describe.GetStatus(), EStatus::SUCCESS);
auto indexDesc = describe.GetTableDescription().GetIndexDescriptions();
UNIT_ASSERT_VALUES_EQUAL(indexDesc.size(), 0);
@@ -1794,13 +1794,13 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
{
auto status = session.ExecuteSchemeQuery(Sprintf(R"(
--!syntax_v1
- ALTER TABLE `/Root/Test` ADD INDEX NameIndex %s ON (Name) COVER (Amount);
+ ALTER TABLE `/Root/Test` ADD INDEX NameIndex %s ON (Name) COVER (Amount);
)", typeStr.data())).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
}
{
- TDescribeTableResult describe = session.DescribeTable("/Root/Test").GetValueSync();
+ TDescribeTableResult describe = session.DescribeTable("/Root/Test").GetValueSync();
UNIT_ASSERT_EQUAL(describe.GetStatus(), EStatus::SUCCESS);
auto indexDesc = describe.GetTableDescription().GetIndexDescriptions();
UNIT_ASSERT_VALUES_EQUAL(indexDesc.size(), 1);
diff --git a/ydb/core/kqp/ut/kqp_scripting_ut.cpp b/ydb/core/kqp/ut/kqp_scripting_ut.cpp
index 3c9a7b6b6cd..65e75ac303e 100644
--- a/ydb/core/kqp/ut/kqp_scripting_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_scripting_ut.cpp
@@ -101,10 +101,10 @@ Y_UNIT_TEST_SUITE(KqpScripting) {
result = client.ExecuteYqlScript(R"(
PRAGMA kikimr.ScanQuery = "true";
- UPSERT INTO [/Root/KeyValue]
- SELECT Key, Text AS Value FROM [/Root/EightShard];
+ UPSERT INTO [/Root/KeyValue]
+ SELECT Key, Text AS Value FROM [/Root/EightShard];
- SELECT * FROM [/Root/EightShard];
+ SELECT * FROM [/Root/EightShard];
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::PRECONDITION_FAILED, result.GetIssues().ToString());
@@ -201,24 +201,24 @@ Y_UNIT_TEST_SUITE(KqpScripting) {
TScriptingClient client(kikimr.GetDriver());
auto result = client.ExecuteYqlScript(R"(
- SELECT * FROM `/Root/.sys/partition_stats`;
+ SELECT * FROM `/Root/.sys/partition_stats`;
COMMIT;
- SELECT * FROM `/Root/.sys/partition_stats`;
+ SELECT * FROM `/Root/.sys/partition_stats`;
)").GetValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
UNIT_ASSERT_VALUES_EQUAL(result.GetResultSets().size(), 2);
result = client.ExecuteYqlScript(R"(
- SELECT * FROM `/Root/.sys/partition_stats`;
- SELECT * FROM `/Root/.sys/partition_stats`;
+ SELECT * FROM `/Root/.sys/partition_stats`;
+ SELECT * FROM `/Root/.sys/partition_stats`;
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::GENERIC_ERROR);
result = client.ExecuteYqlScript(R"(
SELECT *
- FROM `/Root/TwoShard` AS ts
- JOIN `/Root/.sys/partition_stats`AS ps
+ FROM `/Root/TwoShard` AS ts
+ JOIN `/Root/.sys/partition_stats`AS ps
ON ts.Key = ps.PartIdx;
)").GetValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -250,7 +250,7 @@ Y_UNIT_TEST_SUITE(KqpScripting) {
TScriptingClient client(kikimr.GetDriver());
auto result = client.ExecuteYqlScript(R"(
- CREATE TABLE `/Root/TestTable` (
+ CREATE TABLE `/Root/TestTable` (
Key Uint64,
Value String,
PRIMARY KEY (Key)
@@ -260,7 +260,7 @@ Y_UNIT_TEST_SUITE(KqpScripting) {
result = client.ExecuteYqlScript(R"(
- REPLACE INTO `/Root/TestTable` (Key, Value) VALUES
+ REPLACE INTO `/Root/TestTable` (Key, Value) VALUES
(1u, "One"),
(2u, "Two");
)").GetValueSync();
diff --git a/ydb/core/kqp/ut/kqp_service_ut.cpp b/ydb/core/kqp/ut/kqp_service_ut.cpp
index 96e7e415adc..37e178c8108 100644
--- a/ydb/core/kqp/ut/kqp_service_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_service_ut.cpp
@@ -44,9 +44,9 @@ Y_UNIT_TEST_SUITE(KqpService) {
DECLARE $key AS Uint32;
DECLARE $value AS Int32;
- SELECT * FROM [/Root/EightShard];
+ SELECT * FROM [/Root/EightShard];
- UPSERT INTO [/Root/TwoShard] (Key, Value2) VALUES
+ UPSERT INTO [/Root/TwoShard] (Key, Value2) VALUES
($key, $value);
)", TTxControl::BeginTx().CommitTx(), params).GetValueSync();
@@ -107,14 +107,14 @@ Y_UNIT_TEST_SUITE(KqpService) {
}
auto query = Sprintf(R"(
- SELECT Key, Text, Data FROM [/Root/EightShard] WHERE Key=%1$d + 0;
- SELECT Key, Data, Text FROM [/Root/EightShard] WHERE Key=%1$d + 1;
- SELECT Text, Key, Data FROM [/Root/EightShard] WHERE Key=%1$d + 2;
- SELECT Text, Data, Key FROM [/Root/EightShard] WHERE Key=%1$d + 3;
- SELECT Data, Key, Text FROM [/Root/EightShard] WHERE Key=%1$d + 4;
- SELECT Data, Text, Key FROM [/Root/EightShard] WHERE Key=%1$d + 5;
-
- UPSERT INTO [/Root/EightShard] (Key, Text) VALUES
+ SELECT Key, Text, Data FROM [/Root/EightShard] WHERE Key=%1$d + 0;
+ SELECT Key, Data, Text FROM [/Root/EightShard] WHERE Key=%1$d + 1;
+ SELECT Text, Key, Data FROM [/Root/EightShard] WHERE Key=%1$d + 2;
+ SELECT Text, Data, Key FROM [/Root/EightShard] WHERE Key=%1$d + 3;
+ SELECT Data, Key, Text FROM [/Root/EightShard] WHERE Key=%1$d + 4;
+ SELECT Data, Text, Key FROM [/Root/EightShard] WHERE Key=%1$d + 5;
+
+ UPSERT INTO [/Root/EightShard] (Key, Text) VALUES
(%2$dul, "New");
)", RandomNumber<ui32>(), RandomNumber<ui32>());
@@ -132,7 +132,7 @@ Y_UNIT_TEST_SUITE(KqpService) {
TVector<TAsyncDataQueryResult> futures;
for (ui32 i = 0; i < count; ++i) {
auto query = Sprintf(R"(
- SELECT * FROM [/Root/EightShard] WHERE Key=%1$d;
+ SELECT * FROM [/Root/EightShard] WHERE Key=%1$d;
)", i);
auto future = session.ExecuteDataQuery(query, TTxControl::BeginTx().CommitTx());
diff --git a/ydb/core/kqp/ut/kqp_sort_ut.cpp b/ydb/core/kqp/ut/kqp_sort_ut.cpp
index 0c3850ee5bb..0b7c65a3d5c 100644
--- a/ydb/core/kqp/ut/kqp_sort_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_sort_ut.cpp
@@ -14,7 +14,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Group, Name, Amount, Comment
- FROM `/Root/Test`
+ FROM `/Root/Test`
ORDER BY Group DESC, Name DESC;
)");
@@ -41,7 +41,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Group, Name, Amount, Comment
- FROM `/Root/Test`
+ FROM `/Root/Test`
ORDER BY Group DESC, Name DESC;
)");
@@ -85,7 +85,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Group, Name, Amount, Comment
- FROM `/Root/Test`
+ FROM `/Root/Test`
WHERE Group < 2u
ORDER BY Group DESC, Name DESC;
)");
@@ -128,7 +128,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Group, Name, Amount, Comment
- FROM [/Root/Test]
+ FROM [/Root/Test]
ORDER BY Group DESC;
)");
@@ -173,7 +173,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Group, Name, Amount, Comment
- FROM `/Root/Test`
+ FROM `/Root/Test`
ORDER BY Group DESC, Name ASC;
)");
@@ -216,7 +216,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Group, Name, Amount, Comment
- FROM `/Root/Test`
+ FROM `/Root/Test`
WHERE Group < 2
ORDER BY Group DESC, Name DESC;
)");
@@ -261,7 +261,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Group, Name, Amount, Comment
- FROM `/Root/Test`
+ FROM `/Root/Test`
ORDER BY Group DESC, Name DESC
LIMIT 1;
)");
@@ -310,7 +310,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Group, Name, Amount, Comment
- FROM `/Root/Test`
+ FROM `/Root/Test`
WHERE Group < 2
ORDER BY Group DESC, Name DESC
LIMIT 1;
@@ -360,7 +360,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
TString query = Q_(R"(
SELECT Key, Text, Data
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
ORDER BY Key DESC
LIMIT 8;
)");
@@ -413,7 +413,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $minKey AS Uint64;
SELECT *
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Key >= $minKey
ORDER BY Data, Key DESC
LIMIT $limit OFFSET $offset;
@@ -471,7 +471,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $minKey AS Uint64;
SELECT *
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Key >= $minKey
ORDER BY Data, Key DESC
LIMIT $limit + 1 OFFSET $offset - 1;
@@ -529,7 +529,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $minKey AS Uint64;
SELECT *
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
WHERE Key >= $minKey
ORDER BY Key
LIMIT $limit + 1 OFFSET $offset - 1;
@@ -579,7 +579,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $y AS "String?";
SELECT *
- FROM `/Root/Join2`
+ FROM `/Root/Join2`
WHERE Key1 = $x AND Key2 > $y
ORDER BY Key1, Key2
LIMIT 10;
@@ -629,7 +629,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $y AS "String?";
SELECT *
- FROM `/Root/Join2`
+ FROM `/Root/Join2`
WHERE Key1 = $x AND Key2 < $y
ORDER BY Key1, Key2
LIMIT 10;
@@ -685,7 +685,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $y AS "String?";
SELECT *
- FROM `/Root/Join2`
+ FROM `/Root/Join2`
WHERE Key1 = $x AND Key2 >= $y
ORDER BY Key1, Key2
LIMIT 10;
@@ -735,7 +735,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $y AS "String?";
SELECT *
- FROM `/Root/Join2`
+ FROM `/Root/Join2`
WHERE Key1 = $x AND Key2 <= $y
ORDER BY Key1, Key2
LIMIT 10;
@@ -794,7 +794,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
);
SELECT *
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
ORDER BY Data DESC, Key
LIMIT CAST($fetch AS Uint64) ?? 0;
)");
@@ -832,12 +832,12 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $key AS Uint32;
$fetch = (
- SELECT Value2 + 1 AS Take FROM `/Root/TwoShard`
+ SELECT Value2 + 1 AS Take FROM `/Root/TwoShard`
WHERE Key = $key
);
SELECT *
- FROM `/Root/EightShard`
+ FROM `/Root/EightShard`
ORDER BY Data DESC, Key
LIMIT 2 OFFSET CAST($fetch AS Uint64) ?? 0;
)");
@@ -977,7 +977,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $limit AS Uint64;
SELECT *
- FROM `/Root/TwoShard`
+ FROM `/Root/TwoShard`
ORDER BY Key
LIMIT $limit;
)");
@@ -1025,7 +1025,7 @@ Y_UNIT_TEST_SUITE(KqpSort) {
DECLARE $value AS Int32;
SELECT *
- FROM `/Root/TwoShard`
+ FROM `/Root/TwoShard`
WHERE Value2 != $value
LIMIT $limit;
)");
diff --git a/ydb/core/kqp/ut/kqp_stats_ut.cpp b/ydb/core/kqp/ut/kqp_stats_ut.cpp
index e0c6e01ace5..f7815c016ee 100644
--- a/ydb/core/kqp/ut/kqp_stats_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_stats_ut.cpp
@@ -21,7 +21,7 @@ Y_UNIT_TEST(MultiTxStatsFullExp) {
settings.ProfileMode(NYdb::NExperimental::EStreamQueryProfileMode::Full);
auto it = db.ExecuteStreamQuery(R"(
- SELECT * FROM `/Root/EightShard` WHERE Key BETWEEN 150 AND 266 ORDER BY Data LIMIT 4;
+ SELECT * FROM `/Root/EightShard` WHERE Key BETWEEN 150 AND 266 ORDER BY Data LIMIT 4;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
@@ -42,7 +42,7 @@ Y_UNIT_TEST(JoinNoStats) {
settings.CollectQueryStats(ECollectQueryStatsMode::None);
auto it = db.StreamExecuteScanQuery(R"(
- SELECT count(*) FROM `/Root/EightShard` AS t JOIN `/Root/KeyValue` AS kv ON t.Data = kv.Key;
+ SELECT count(*) FROM `/Root/EightShard` AS t JOIN `/Root/KeyValue` AS kv ON t.Data = kv.Key;
)", settings).GetValueSync();
auto res = CollectStreamResult(it);
diff --git a/ydb/core/kqp/ut/kqp_sys_col_ut.cpp b/ydb/core/kqp/ut/kqp_sys_col_ut.cpp
index 60a9de420c5..13fb8fa8ecc 100644
--- a/ydb/core/kqp/ut/kqp_sys_col_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_sys_col_ut.cpp
@@ -102,7 +102,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
{
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- REPLACE INTO [/Root/TwoShard] (Key, Value1, Value2)
+ REPLACE INTO [/Root/TwoShard] (Key, Value1, Value2)
VALUES (4u, "Four", -4);
)";
auto result = ExecuteDataQuery(kikimr, query);
@@ -110,7 +110,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
{
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT COUNT(*) FROM [/Root/TwoShard]
+ SELECT COUNT(*) FROM [/Root/TwoShard]
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -4;
)";
auto result = ExecuteDataQuery(kikimr, query);
@@ -121,7 +121,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
{
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- UPDATE [/Root/TwoShard] SET Value2 = -44
+ UPDATE [/Root/TwoShard] SET Value2 = -44
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -4;
)";
ExecuteDataQuery(kikimr, query);
@@ -129,7 +129,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
{
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT COUNT(*) FROM [/Root/TwoShard]
+ SELECT COUNT(*) FROM [/Root/TwoShard]
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -44;
)";
auto result = ExecuteDataQuery(kikimr, query);
@@ -140,7 +140,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
{
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- DELETE FROM [/Root/TwoShard]
+ DELETE FROM [/Root/TwoShard]
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -44;
)";
auto result = ExecuteDataQuery(kikimr, query);
@@ -148,7 +148,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
{
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT COUNT(*) FROM [/Root/TwoShard]
+ SELECT COUNT(*) FROM [/Root/TwoShard]
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -44;
)";
auto result = ExecuteDataQuery(kikimr, query);
@@ -161,8 +161,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
Y_UNIT_TEST(InnerJoinTables) {
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM [/Root/Join1] AS t1
- INNER JOIN [/Root/Join2] AS t2
+ SELECT * FROM [/Root/Join1] AS t1
+ INNER JOIN [/Root/Join2] AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)";
@@ -175,8 +175,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
Y_UNIT_TEST(InnerJoinSelect) {
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM [/Root/Join1] AS t1
- INNER JOIN (SELECT Key1, Key2, Value2 FROM [/Root/Join2]) AS t2
+ SELECT * FROM [/Root/Join1] AS t1
+ INNER JOIN (SELECT Key1, Key2, Value2 FROM [/Root/Join2]) AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)";
@@ -189,8 +189,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV0) {
Y_UNIT_TEST(InnerJoinSelectAsterisk) {
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM [/Root/Join1] AS t1
- INNER JOIN (SELECT * FROM [/Root/Join2]) AS t2
+ SELECT * FROM [/Root/Join1] AS t1
+ INNER JOIN (SELECT * FROM [/Root/Join2]) AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)";
@@ -205,7 +205,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
Y_UNIT_TEST_NEW_ENGINE(SelectRowAsterisk) {
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/TwoShard` WHERE Key = 1;
+ SELECT * FROM `/Root/TwoShard` WHERE Key = 1;
)");
auto result = ExecuteDataQuery(query);
UNIT_ASSERT(result.GetResultSets().size());
@@ -216,7 +216,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
Y_UNIT_TEST_NEW_ENGINE(SelectRowById) {
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/TwoShard` WHERE _yql_partition_id = 72075186224037888ul;
+ SELECT * FROM `/Root/TwoShard` WHERE _yql_partition_id = 72075186224037888ul;
)");
auto result = ExecuteDataQuery(query);
UNIT_ASSERT(result.GetResultSets().size());
@@ -227,7 +227,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
Y_UNIT_TEST_NEW_ENGINE(SelectRange) {
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT _yql_partition_id FROM `/Root/TwoShard` WHERE Key < 3;
+ SELECT _yql_partition_id FROM `/Root/TwoShard` WHERE Key < 3;
)");
auto result = ExecuteDataQuery(query);
UNIT_ASSERT(result.GetResultSets().size());
@@ -240,7 +240,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
{
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- REPLACE INTO `/Root/TwoShard` (Key, Value1, Value2)
+ REPLACE INTO `/Root/TwoShard` (Key, Value1, Value2)
VALUES (4u, "Four", -4);
)");
auto result = ExecuteDataQuery(kikimr, query);
@@ -248,7 +248,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
{
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT COUNT(*) FROM `/Root/TwoShard`
+ SELECT COUNT(*) FROM `/Root/TwoShard`
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -4;
)");
auto result = ExecuteDataQuery(kikimr, query);
@@ -259,7 +259,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
{
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- UPDATE `/Root/TwoShard` SET Value2 = -44
+ UPDATE `/Root/TwoShard` SET Value2 = -44
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -4;
)");
ExecuteDataQuery(kikimr, query);
@@ -267,7 +267,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
{
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT COUNT(*) FROM `/Root/TwoShard`
+ SELECT COUNT(*) FROM `/Root/TwoShard`
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -44;
)");
auto result = ExecuteDataQuery(kikimr, query);
@@ -278,7 +278,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
{
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- DELETE FROM `/Root/TwoShard`
+ DELETE FROM `/Root/TwoShard`
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -44;
)");
auto result = ExecuteDataQuery(kikimr, query);
@@ -286,7 +286,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
{
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT COUNT(*) FROM `/Root/TwoShard`
+ SELECT COUNT(*) FROM `/Root/TwoShard`
WHERE _yql_partition_id = 72075186224037888ul AND Value2 = -44;
)");
auto result = ExecuteDataQuery(kikimr, query);
@@ -299,8 +299,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
Y_UNIT_TEST_NEW_ENGINE(InnerJoinTables) {
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/Join1` AS t1
- INNER JOIN `/Root/Join2` AS t2
+ SELECT * FROM `/Root/Join1` AS t1
+ INNER JOIN `/Root/Join2` AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)");
@@ -313,8 +313,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
Y_UNIT_TEST_NEW_ENGINE(InnerJoinSelect) {
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/Join1` AS t1
- INNER JOIN (SELECT Key1, Key2, Value2 FROM `/Root/Join2`) AS t2
+ SELECT * FROM `/Root/Join1` AS t1
+ INNER JOIN (SELECT Key1, Key2, Value2 FROM `/Root/Join2`) AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)");
@@ -327,8 +327,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
Y_UNIT_TEST_NEW_ENGINE(InnerJoinSelectAsterisk) {
auto query = Q_(R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/Join1` AS t1
- INNER JOIN (SELECT * FROM `/Root/Join2`) AS t2
+ SELECT * FROM `/Root/Join1` AS t1
+ INNER JOIN (SELECT * FROM `/Root/Join2`) AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)");
@@ -342,7 +342,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
TKikimrRunner kikimr;
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/TwoShard` WHERE Key = 1;
+ SELECT * FROM `/Root/TwoShard` WHERE Key = 1;
)";
auto it = ExecuteStreamQuery(kikimr, query);
CompareYson(R"([[[1u];["One"];[-1]]])", StreamResultToYson(it));
@@ -352,7 +352,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
TKikimrRunner kikimr;
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/TwoShard` WHERE _yql_partition_id = 72075186224037888ul;
+ SELECT * FROM `/Root/TwoShard` WHERE _yql_partition_id = 72075186224037888ul;
)";
auto it = ExecuteStreamQuery(kikimr, query);
CompareYson(R"([[[1u];["One"];[-1]];[[2u];["Two"];[0]];[[3u];["Three"];[1]]])", StreamResultToYson(it));
@@ -362,7 +362,7 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
TKikimrRunner kikimr;
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT _yql_partition_id FROM `/Root/TwoShard` WHERE Key < 3;
+ SELECT _yql_partition_id FROM `/Root/TwoShard` WHERE Key < 3;
)";
auto it = ExecuteStreamQuery(kikimr, query);
CompareYson(R"([[[72075186224037888u]];[[72075186224037888u]]])", StreamResultToYson(it));
@@ -372,8 +372,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
TKikimrRunner kikimr;
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/Join1` AS t1
- INNER JOIN `/Root/Join2` AS t2
+ SELECT * FROM `/Root/Join1` AS t1
+ INNER JOIN `/Root/Join2` AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)";
@@ -385,8 +385,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
TKikimrRunner kikimr;
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/Join1` AS t1
- INNER JOIN (SELECT Key1, Key2, Value2 FROM `/Root/Join2`) AS t2
+ SELECT * FROM `/Root/Join1` AS t1
+ INNER JOIN (SELECT Key1, Key2, Value2 FROM `/Root/Join2`) AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)";
@@ -398,8 +398,8 @@ Y_UNIT_TEST_SUITE(KqpSysColV1) {
TKikimrRunner kikimr;
auto query = R"(
PRAGMA kikimr.EnableSystemColumns = "true";
- SELECT * FROM `/Root/Join1` AS t1
- INNER JOIN (SELECT * FROM `/Root/Join2`) AS t2
+ SELECT * FROM `/Root/Join1` AS t1
+ INNER JOIN (SELECT * FROM `/Root/Join2`) AS t2
ON t1.Fk21 == t2.Key1 AND t1.Fk22 == t2.Key2
WHERE t1.Value == "Value5" AND t2.Value2 == "Value31";
)";
diff --git a/ydb/core/kqp/ut/kqp_sys_view_ut.cpp b/ydb/core/kqp/ut/kqp_sys_view_ut.cpp
index 47049b80a09..1f8988517c4 100644
--- a/ydb/core/kqp/ut/kqp_sys_view_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_sys_view_ut.cpp
@@ -50,7 +50,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
auto it = client.StreamExecuteScanQuery(R"(
SELECT OwnerId, PartIdx, Path, PathId
- FROM `/Root/.sys/partition_stats`
+ FROM `/Root/.sys/partition_stats`
ORDER BY PathId, PartIdx;
)").GetValueSync();
@@ -100,7 +100,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
TString query = R"(
SELECT OwnerId, PathId, PartIdx, Path
- FROM `/Root/.sys/partition_stats`
+ FROM `/Root/.sys/partition_stats`
WHERE OwnerId = 72057594046644480ul AND PathId > 5u AND PathId <= 9u
ORDER BY PathId, PartIdx;
)";
@@ -131,7 +131,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
TString query = R"(
SELECT OwnerId, PathId, PartIdx, Path
- FROM `/Root/.sys/partition_stats`
+ FROM `/Root/.sys/partition_stats`
WHERE OwnerId = 72057594046644480ul AND PathId >= 6u AND PathId < 9u
ORDER BY PathId, PartIdx;
)";
@@ -157,7 +157,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
auto it = client.StreamExecuteScanQuery(R"(
SELECT OwnerId, PathId, PartIdx, Path
- FROM `/Root/.sys/partition_stats`
+ FROM `/Root/.sys/partition_stats`
WHERE OwnerId = 72057594046644480ul AND PathId = 5u AND PartIdx > 1u AND PartIdx < 7u
ORDER BY PathId, PartIdx;
)").GetValueSync();
@@ -183,7 +183,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
auto it = client.StreamExecuteScanQuery(R"(
SELECT NodeId, Host
- FROM `/Root/.sys/nodes`;
+ FROM `/Root/.sys/nodes`;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -207,7 +207,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
auto query = Sprintf(R"(
SELECT NodeId, Host
- FROM `/Root/.sys/nodes`
+ FROM `/Root/.sys/nodes`
WHERE NodeId >= %du AND NodeId <= %du
)", offset + 1, offset + 3);
@@ -233,7 +233,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
auto query = Sprintf(R"(
SELECT NodeId, Host
- FROM `/Root/.sys/nodes`
+ FROM `/Root/.sys/nodes`
WHERE NodeId > %du AND NodeId < %du
)", offset + 1, offset + 3);
@@ -254,19 +254,19 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
auto client = kikimr.GetTableClient();
auto session = client.CreateSession().GetValueSync().GetSession();
{
- auto result = session.ExecuteDataQuery("SELECT 1;",
- TTxControl::BeginTx().CommitTx()).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
- {
+ auto result = session.ExecuteDataQuery("SELECT 1;",
+ TTxControl::BeginTx().CommitTx()).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+ {
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/TwoShard`
+ SELECT * FROM `/Root/TwoShard`
)"), TTxControl::BeginTx().CommitTx()).GetValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
}
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/EightShard`
+ SELECT * FROM `/Root/EightShard`
)"), TTxControl::BeginTx().CommitTx()).GetValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
}
@@ -303,12 +303,12 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
UNIT_ASSERT(readBytesSet.contains(432)); // EightShard
};
- checkTable("`/Root/.sys/top_queries_by_read_bytes_one_minute`");
- checkTable("`/Root/.sys/top_queries_by_read_bytes_one_hour`");
- checkTable("`/Root/.sys/top_queries_by_duration_one_minute`");
- checkTable("`/Root/.sys/top_queries_by_duration_one_hour`");
- checkTable("`/Root/.sys/top_queries_by_cpu_time_one_minute`");
- checkTable("`/Root/.sys/top_queries_by_cpu_time_one_hour`");
+ checkTable("`/Root/.sys/top_queries_by_read_bytes_one_minute`");
+ checkTable("`/Root/.sys/top_queries_by_read_bytes_one_hour`");
+ checkTable("`/Root/.sys/top_queries_by_duration_one_minute`");
+ checkTable("`/Root/.sys/top_queries_by_duration_one_hour`");
+ checkTable("`/Root/.sys/top_queries_by_cpu_time_one_minute`");
+ checkTable("`/Root/.sys/top_queries_by_cpu_time_one_hour`");
}
Y_UNIT_TEST(FailNavigate) {
@@ -316,7 +316,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
auto client = kikimr.GetTableClient();
auto it = client.StreamExecuteScanQuery(R"(
- SELECT PathId FROM `/Root/.sys/partition_stats`;
+ SELECT PathId FROM `/Root/.sys/partition_stats`;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -344,7 +344,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
TTableClient client(driver);
auto it = client.StreamExecuteScanQuery(R"(
- SELECT PathId FROM `/Root/.sys/partition_stats`;
+ SELECT PathId FROM `/Root/.sys/partition_stats`;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
@@ -373,7 +373,7 @@ Y_UNIT_TEST_SUITE(KqpSystemView) {
TTableClient client(driver);
auto it = client.StreamExecuteScanQuery(R"(
- SELECT PathId FROM `/Root/.sys/partition_stats`;
+ SELECT PathId FROM `/Root/.sys/partition_stats`;
)").GetValueSync();
UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
diff --git a/ydb/core/kqp/ut/kqp_table_predicate_ut.cpp b/ydb/core/kqp/ut/kqp_table_predicate_ut.cpp
index 8daa5ac1cc7..5ab5108968f 100644
--- a/ydb/core/kqp/ut/kqp_table_predicate_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_table_predicate_ut.cpp
@@ -17,7 +17,7 @@ using namespace NYdb::NTable;
static void CreateSampleTables(TSession session) {
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
- CREATE TABLE [/Root/TestNulls] (
+ CREATE TABLE [/Root/TestNulls] (
Key1 Uint32,
Key2 Uint32,
Value String,
@@ -26,7 +26,7 @@ static void CreateSampleTables(TSession session) {
)").GetValueSync().IsSuccess());
UNIT_ASSERT(session.ExecuteDataQuery(R"(
- REPLACE INTO [/Root/TestNulls] (Key1, Key2, Value) VALUES
+ REPLACE INTO [/Root/TestNulls] (Key1, Key2, Value) VALUES
(NULL, NULL, "One"),
(NULL, 100u, "Two"),
(NULL, 200u, "Three"),
@@ -44,7 +44,7 @@ static void CreateSampleTables(TSession session) {
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).GetValueSync().IsSuccess());
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
- CREATE TABLE [/Root/TestDate] (
+ CREATE TABLE [/Root/TestDate] (
Key Date,
Value String,
PRIMARY KEY (Key)
@@ -52,7 +52,7 @@ static void CreateSampleTables(TSession session) {
)").GetValueSync().IsSuccess());
UNIT_ASSERT(session.ExecuteDataQuery(R"(
- REPLACE INTO [/Root/TestDate] (Key, Value) VALUES
+ REPLACE INTO [/Root/TestDate] (Key, Value) VALUES
(NULL, "One"),
(Date("2019-05-08"), "Two"),
(Date("2019-07-01"), "Three");
@@ -65,7 +65,7 @@ static void CreateSampleTables(TSession session) {
.AddNullableColumn("ValueInt", EPrimitiveType::Int32)
.SetPrimaryKeyColumn("Key");
- UNIT_ASSERT(session.CreateTable("/Root/MultiShardTable",
+ UNIT_ASSERT(session.CreateTable("/Root/MultiShardTable",
builder.Build(),
TCreateTableSettings()
.PartitioningPolicy(
@@ -75,7 +75,7 @@ static void CreateSampleTables(TSession session) {
).GetValueSync().IsSuccess());
UNIT_ASSERT(session.ExecuteDataQuery(R"(
- REPLACE INTO [/Root/MultiShardTable] (Key, Value) VALUES
+ REPLACE INTO [/Root/MultiShardTable] (Key, Value) VALUES
(1, "One"),
(2, "Two"),
(3, "Three"),
@@ -84,7 +84,7 @@ static void CreateSampleTables(TSession session) {
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).GetValueSync().IsSuccess());
UNIT_ASSERT(session.ExecuteDataQuery(R"(
- REPLACE INTO [/Root/MultiShardTable] (Key, ValueInt) VALUES
+ REPLACE INTO [/Root/MultiShardTable] (Key, ValueInt) VALUES
(10, 10);
)", TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).GetValueSync().IsSuccess());
}
@@ -96,7 +96,7 @@ static void CreateSampleTables(TSession session) {
.AddNullableColumn("ValueInt", EPrimitiveType::Int32)
.SetPrimaryKeyColumns({"Key1", "Key2"});
- UNIT_ASSERT(session.CreateTable("/Root/MultiShardTableCk",
+ UNIT_ASSERT(session.CreateTable("/Root/MultiShardTableCk",
builder.Build(),
TCreateTableSettings()
.PartitioningPolicy(
@@ -106,7 +106,7 @@ static void CreateSampleTables(TSession session) {
).GetValueSync().IsSuccess());
UNIT_ASSERT(session.ExecuteDataQuery(R"(
- REPLACE INTO [/Root/MultiShardTableCk] (Key1, Key2, ValueInt) VALUES
+ REPLACE INTO [/Root/MultiShardTableCk] (Key1, Key2, ValueInt) VALUES
(1, "One", NULL),
(2, "Two", NULL),
(3, "Three", NULL),
@@ -350,7 +350,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Value FROM [/Root/TestNulls] WHERE
+ SELECT Value FROM [/Root/TestNulls] WHERE
Key1 IS NULL AND Key2 IS NULL
)"),
TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
@@ -367,7 +367,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/Test]
+ SELECT * FROM [/Root/Test]
WHERE Group == 1 AND Name IS NULL
)"),
TTxControl::BeginTx().CommitTx()).ExtractValueSync();
@@ -384,7 +384,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Value FROM [/Root/TestNulls] WHERE
+ SELECT Value FROM [/Root/TestNulls] WHERE
Key1 <= 1
ORDER BY Value
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
@@ -401,7 +401,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Value FROM [/Root/TestNulls] WHERE Key1 > 1
+ SELECT Value FROM [/Root/TestNulls] WHERE Key1 > 1
)"),
TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -427,7 +427,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
auto result = session.ExecuteDataQuery(Q_(R"(
DECLARE $key1 AS 'Uint32?';
DECLARE $key2 AS 'Uint32?';
- SELECT Value FROM [/Root/TestNulls] WHERE
+ SELECT Value FROM [/Root/TestNulls] WHERE
Key1 = $key1 AND Key2 >= $key2
)"),
TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), std::move(params)).ExtractValueSync();
@@ -452,7 +452,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
auto result = session.ExecuteDataQuery(Q_(R"(
DECLARE $key1 AS 'Uint32?';
DECLARE $key2 AS 'Uint32?';
- SELECT Value FROM [/Root/TestNulls] WHERE
+ SELECT Value FROM [/Root/TestNulls] WHERE
Key1 = $key1 AND Key2 == $key2
)"),
TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), std::move(params)).ExtractValueSync();
@@ -480,7 +480,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
DECLARE $key1 AS 'Uint32?';
DECLARE $key2 AS 'Uint32?';
- SELECT Value FROM [/Root/TestNulls] WHERE
+ SELECT Value FROM [/Root/TestNulls] WHERE
Key1 = $key1 AND Key2 <= $key2
)",
TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx(), std::move(params)).ExtractValueSync();
@@ -503,8 +503,8 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
auto result = session.ExecuteDataQuery(Q_(R"(
DECLARE $name AS String;
- $groups = (SELECT Group FROM `/Root/Test` WHERE Name = $name);
- SELECT * FROM `/Root/TwoShard` WHERE Key in $groups;
+ $groups = (SELECT Group FROM `/Root/Test` WHERE Name = $name);
+ SELECT * FROM `/Root/TwoShard` WHERE Key in $groups;
)"), TTxControl::BeginTx().CommitTx(), std::move(params)).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(result.IsSuccess());
@@ -518,14 +518,14 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
auto session = db.CreateSession().GetValueSync().GetSession();
{
- auto query = Q_(R"(UPDATE `/Root/KeyValue` SET Value = Value || "_updated" WHERE Key IN (1, 2, 3, 4))");
+ auto query = Q_(R"(UPDATE `/Root/KeyValue` SET Value = Value || "_updated" WHERE Key IN (1, 2, 3, 4))");
auto result = session.ExecuteDataQuery(query, TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
}
{
- auto query = Q_(R"(SELECT Key, Value FROM `/Root/KeyValue` ORDER BY Key)");
+ auto query = Q_(R"(SELECT Key, Value FROM `/Root/KeyValue` ORDER BY Key)");
auto result = session.ExecuteDataQuery(query, TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
@@ -542,7 +542,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
- const TString query = Q_("UPDATE [/Root/MultiShardTable] SET Value = 'aaaaa' WHERE Key IN (1, 500)");
+ const TString query = Q_("UPDATE [/Root/MultiShardTable] SET Value = 'aaaaa' WHERE Key IN (1, 500)");
if (!UseNewEngine) {
auto result = session.ExplainDataQuery(query).GetValueSync();
@@ -594,7 +594,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
const TString query = Q_(R"(
- UPDATE [/Root/MultiShardTable] SET ValueInt = ValueInt + 1
+ UPDATE [/Root/MultiShardTable] SET ValueInt = ValueInt + 1
WHERE Key IN (1,2,3,4,NULL))");
{
auto result = session.ExecuteDataQuery(query, TTxControl::BeginTx().CommitTx()).GetValueSync();
@@ -604,7 +604,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Key, ValueInt FROM [/Root/MultiShardTable] ORDER BY Key;
+ SELECT Key, ValueInt FROM [/Root/MultiShardTable] ORDER BY Key;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -622,7 +622,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
auto result = session.ExecuteDataQuery(Q_(R"(
- UPDATE `/Root/MultiShardTable` SET ValueInt = ValueInt + 1 WHERE Key IN
+ UPDATE `/Root/MultiShardTable` SET ValueInt = ValueInt + 1 WHERE Key IN
(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,
31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,
61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,
@@ -632,7 +632,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Key, ValueInt FROM `/Root/MultiShardTable` ORDER BY Key
+ SELECT Key, ValueInt FROM `/Root/MultiShardTable` ORDER BY Key
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -649,7 +649,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
auto result = session.ExecuteDataQuery(Q_(R"(
- UPDATE `/Root/MultiShardTableCk` SET ValueInt = ValueInt + 1 WHERE Key1 IN
+ UPDATE `/Root/MultiShardTableCk` SET ValueInt = ValueInt + 1 WHERE Key1 IN
(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,
31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,
61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,
@@ -660,7 +660,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Key1, ValueInt FROM `/Root/MultiShardTableCk` ORDER BY Key1;
+ SELECT Key1, ValueInt FROM `/Root/MultiShardTableCk` ORDER BY Key1;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -679,8 +679,8 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
const TString query = Q_(R"(
- UPDATE `/Root/MultiShardTable` SET ValueInt = ValueInt + 1 WHERE Key IN (1,2,10);
- UPDATE `/Root/TestNulls` SET Value = 'qq' WHERE Key1 IN (1,2);
+ UPDATE `/Root/MultiShardTable` SET ValueInt = ValueInt + 1 WHERE Key IN (1,2,10);
+ UPDATE `/Root/TestNulls` SET Value = 'qq' WHERE Key1 IN (1,2);
)");
{
@@ -690,7 +690,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Key, ValueInt FROM `/Root/MultiShardTable` ORDER BY Key;
+ SELECT Key, ValueInt FROM `/Root/MultiShardTable` ORDER BY Key;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -700,7 +700,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Key1, Value FROM `/Root/TestNulls` ORDER BY Key1;
+ SELECT Key1, Value FROM `/Root/TestNulls` ORDER BY Key1;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -732,7 +732,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
const TString query = "PRAGMA Kikimr.UseNewEngine = 'false'; "
"SELECT Key, ValueInt FROM [/Root/MultiShardTable] WHERE ValueInt = 1 "
- "UNION ALL SELECT Key, ValueInt FROM [/Root/MultiShardTable] WHERE ValueInt = 1;";
+ "UNION ALL SELECT Key, ValueInt FROM [/Root/MultiShardTable] WHERE ValueInt = 1;";
auto result = session.ExplainDataQuery(query).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
@@ -763,7 +763,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
- const TString query = Q_("UPDATE `/Root/MultiShardTable` SET Value = 'aaaaa' WHERE Value IN ('One', 'www')");
+ const TString query = Q_("UPDATE `/Root/MultiShardTable` SET Value = 'aaaaa' WHERE Value IN ('One', 'www')");
if (!UseNewEngine) {
auto result = session.ExplainDataQuery(query).GetValueSync();
@@ -819,7 +819,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
NYdb::NTable::TExecDataQuerySettings execSettings;
execSettings.CollectQueryStats(ECollectQueryStatsMode::Basic);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Value FROM [/Root/TestDate]
+ SELECT Value FROM [/Root/TestDate]
WHERE Key = Date("2019-07-01")
)"), TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
@@ -841,7 +841,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Key FROM [/Root/EightShard] WHERE
+ SELECT Key FROM [/Root/EightShard] WHERE
Key > 200 AND Key >= 301 AND
Key < 600 AND Key <= 501
ORDER BY Key;
@@ -870,7 +870,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
DECLARE $key_to_1 AS Uint64;
DECLARE $key_to_2 AS Uint64;
- SELECT Key FROM [/Root/EightShard] WHERE
+ SELECT Key FROM [/Root/EightShard] WHERE
Key > $key_from_1 AND Key >= $key_from_2 AND
Key < $key_to_1 AND Key <= $key_to_2
ORDER BY Key;
@@ -896,7 +896,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
DECLARE $key_from_1 AS Uint64;
DECLARE $key_to_1 AS Uint64;
- SELECT Key FROM [/Root/EightShard] WHERE
+ SELECT Key FROM [/Root/EightShard] WHERE
Key > $key_from_1 AND Key >= 301 AND
Key < $key_to_1 AND Key <= 501
ORDER BY Key;
@@ -915,7 +915,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
CreateSampleTables(session);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT Key2 FROM [/Root/TestNulls] WHERE Key1 = 3 AND
+ SELECT Key2 FROM [/Root/TestNulls] WHERE Key1 = 3 AND
Key2 >= 100 AND Key2 > 200 AND
Key2 <= 600 AND Key2 < 500
ORDER BY Key2;
@@ -938,7 +938,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
DECLARE $key_to_1 AS 'Uint32?';
DECLARE $key_to_2 AS 'Uint32?';
- SELECT Key2 FROM [/Root/TestNulls] WHERE Key1 = 3 AND
+ SELECT Key2 FROM [/Root/TestNulls] WHERE Key1 = 3 AND
Key2 >= $key_from_1 AND Key2 > $key_from_2 AND
Key2 <= $key_to_1 AND Key2 < $key_to_2
ORDER BY Key2;
@@ -957,7 +957,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
DECLARE $key_from_2 AS 'Uint32?';
DECLARE $key_to_2 AS 'Uint32?';
- SELECT Key2 FROM [/Root/TestNulls] WHERE Key1 = 3 AND
+ SELECT Key2 FROM [/Root/TestNulls] WHERE Key1 = 3 AND
Key2 >= 100 AND Key2 > $key_from_2 AND
Key2 <= 600 AND Key2 < $key_to_2
ORDER BY Key2;
@@ -977,7 +977,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
PRAGMA Kikimr.UseNewEngine = 'false';
DECLARE $key1_from AS Uint32;
DECLARE $name AS String;
- SELECT * FROM [/Root/Join2] WHERE Key1 > $key1_from AND Name = $name;
+ SELECT * FROM [/Root/Join2] WHERE Key1 > $key1_from AND Name = $name;
)";
auto result = session.ExplainDataQuery(query).GetValueSync();
@@ -987,7 +987,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
UNIT_ASSERT(NJson::ReadJsonTree(result.GetPlan(), &plan));
UNIT_ASSERT_VALUES_EQUAL(plan["tables"].GetArray().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(plan["tables"][0]["name"], "/Root/Join2");
+ UNIT_ASSERT_VALUES_EQUAL(plan["tables"][0]["name"], "/Root/Join2");
UNIT_ASSERT_VALUES_EQUAL(plan["tables"][0]["reads"].GetArray().size(), 1);
auto& read = plan["tables"][0]["reads"][0];
UNIT_ASSERT(!read.Has("lookup_by"));
@@ -1000,7 +1000,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto query = Q_(R"(
- DELETE FROM `/Root/Join2`
+ DELETE FROM `/Root/Join2`
WHERE Key1 = 102 AND Key2 = "One" OR
Key1 = 101 AND Key2 = "Two" OR
Key1 = 101 AND Key2 = "Three"
@@ -1050,7 +1050,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
NYdb::NTable::TExecDataQuerySettings execSettings;
execSettings.CollectQueryStats(ECollectQueryStatsMode::Basic);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/EightShard`
+ SELECT * FROM `/Root/EightShard`
WHERE Key = 101 OR Key = 302 OR Key = 403 OR Key = 705
ORDER BY Key;
)"), TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
@@ -1079,7 +1079,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
NYdb::NTable::TExecDataQuerySettings execSettings;
execSettings.CollectQueryStats(ECollectQueryStatsMode::Basic);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/Logs`
+ SELECT * FROM `/Root/Logs`
WHERE
App = "apache" AND Ts = 0 OR
App = "nginx" AND Ts = 2 OR
@@ -1120,7 +1120,7 @@ Y_UNIT_TEST_SUITE(KqpTablePredicate) {
NYdb::NTable::TExecDataQuerySettings execSettings;
execSettings.CollectQueryStats(ECollectQueryStatsMode::Basic);
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/Logs`
+ SELECT * FROM `/Root/Logs`
WHERE
App = "apache" AND Ts = 0 OR
App = "nginx" AND Ts = 2 OR
diff --git a/ydb/core/kqp/ut/kqp_tx_ut.cpp b/ydb/core/kqp/ut/kqp_tx_ut.cpp
index 2229325c833..5837331604c 100644
--- a/ydb/core/kqp/ut/kqp_tx_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_tx_ut.cpp
@@ -15,16 +15,16 @@ Y_UNIT_TEST_SUITE(KqpTx) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(Q_(R"(
- UPSERT INTO [/Root/Test]
+ UPSERT INTO [/Root/Test]
SELECT Group, "Sergey" AS Name
- FROM [/Root/Test];
+ FROM [/Root/Test];
)"), TTxControl::BeginTx(TTxSettings::SerializableRW())).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
auto tx = result.GetTransaction();
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/Test] WHERE Group = 1;
+ SELECT * FROM [/Root/Test] WHERE Group = 1;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
CompareYson(R"([
@@ -36,7 +36,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT_VALUES_EQUAL_C(commitResult.GetStatus(), EStatus::SUCCESS, commitResult.GetIssues().ToString());
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/Test] WHERE Group = 1;
+ SELECT * FROM [/Root/Test] WHERE Group = 1;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
CompareYson(R"([
@@ -57,12 +57,12 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT(tx.IsActive());
auto result = session.ExecuteDataQuery(Q_(R"(
- UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
+ UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
)"), TTxControl::Tx(tx)).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
+ SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
)"), TTxControl::BeginTx(TTxSettings::OnlineRO()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
CompareYson(R"([])", FormatResultSetYson(result.GetResultSet(0)));
@@ -71,7 +71,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT_C(commitResult.IsSuccess(), commitResult.GetIssues().ToString());
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
+ SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
CompareYson(R"([[[10u];["New"]]])", FormatResultSetYson(result.GetResultSet(0)));
@@ -87,7 +87,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
+ SELECT * FROM [/Root/KeyValue] WHERE Key = 1;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW())).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -105,13 +105,13 @@ Y_UNIT_TEST_SUITE(KqpTx) {
result = session.ExecuteDataQuery(Q_(R"(
DECLARE $name AS String;
- UPSERT INTO [/Root/Test] (Group, Name, Amount) VALUES
+ UPSERT INTO [/Root/Test] (Group, Name, Amount) VALUES
(10, $name, 500);
)"), TTxControl::Tx(*tx).CommitTx(), params).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/Test] WHERE Group = 10;
+ SELECT * FROM [/Root/Test] WHERE Group = 10;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
CompareYson(R"([[[500u];#;[10u];["One"]]])", FormatResultSetYson(result.GetResultSet(0)));
@@ -135,12 +135,12 @@ Y_UNIT_TEST_SUITE(KqpTx) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
+ SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
)"), TTxControl::BeginTx(TTxSettings::OnlineRO())).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::BAD_REQUEST);
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
+ SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
)"), TTxControl::BeginTx(TTxSettings::StaleRO())).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::BAD_REQUEST);
}
@@ -152,7 +152,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
// with effects, without locks
auto result = session.ExecuteDataQuery(Q_(R"(
- UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
+ UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
)"), TTxControl::BeginTx(TTxSettings::SerializableRW())).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -164,7 +164,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT(rollbackResult.IsSuccess());
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
+ SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
)"), TTxControl::BeginTx(TTxSettings::OnlineRO()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
CompareYson(R"([])", FormatResultSetYson(result.GetResultSet(0)));
@@ -181,7 +181,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
// with effects, with locks
auto result = session.ExecuteDataQuery(Q_(R"(
- UPDATE [/Root/KeyValue] SET Value = "New" WHERE Key = 1;
+ UPDATE [/Root/KeyValue] SET Value = "New" WHERE Key = 1;
)"), TTxControl::BeginTx(TTxSettings::SerializableRW())).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -193,7 +193,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT(rollbackResult.IsSuccess());
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
+ SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
)"), TTxControl::BeginTx(TTxSettings::OnlineRO()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
CompareYson(R"([])", FormatResultSetYson(result.GetResultSet(0)));
@@ -259,7 +259,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
{
result = session.ExecuteDataQuery(Q_(R"(
- UPDATE `/Root/KeyValue` SET Value = "New" WHERE Key = 1;
+ UPDATE `/Root/KeyValue` SET Value = "New" WHERE Key = 1;
)"), TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
}
@@ -279,7 +279,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(Q_(R"(
- UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
+ UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
)"), TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
@@ -351,7 +351,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto result = session.ExecuteDataQuery(Q_(R"(
- UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
+ UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
)"), TTxControl::BeginTx(TTxSettings::SerializableRW())).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -376,7 +376,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
auto session = db.CreateSession().GetValueSync().GetSession();
auto query = session.PrepareDataQuery(Q_(R"(
- UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
+ UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
)")).ExtractValueSync().GetQuery();
auto result = query.Execute(TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
@@ -386,7 +386,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT(!tx->IsActive());
result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
+ SELECT * FROM [/Root/KeyValue] WHERE Value = "New";
)"), TTxControl::BeginTx(TTxSettings::OnlineRO()).CommitTx()).ExtractValueSync();
UNIT_ASSERT(result.IsSuccess());
CompareYson(R"([[[10u];["New"]]])", FormatResultSetYson(result.GetResultSet(0)));
@@ -403,13 +403,13 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT(tx.IsActive());
auto result = session.ExecuteDataQuery(Q_(R"(
- INSERT INTO [/Root/KeyValue] (Key, Value) VALUES (1u, "New");
+ INSERT INTO [/Root/KeyValue] (Key, Value) VALUES (1u, "New");
)"), TTxControl::Tx(tx)).ExtractValueSync();
// result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::PRECONDITION_FAILED, result.GetIssues().ToString());
result = session.ExecuteDataQuery(Q_(R"(
- UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (1u, "New");
+ UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (1u, "New");
)"), TTxControl::Tx(tx)).ExtractValueSync();
// result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::NOT_FOUND, result.GetIssues().ToString());
@@ -426,7 +426,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT(tx.IsActive());
auto result = session.ExecuteDataQuery(Q_(R"(
- UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
+ UPSERT INTO [/Root/KeyValue] (Key, Value) VALUES (10u, "New");
)"), TTxControl::Tx(tx)).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -466,7 +466,7 @@ Y_UNIT_TEST_SUITE(KqpTx) {
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
result = session.ExecuteDataQuery(R"(
- SELECT * FROM `/Root/KeyValue`
+ SELECT * FROM `/Root/KeyValue`
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
CompareYson(R"([
diff --git a/ydb/core/kqp/ut/kqp_write_ut.cpp b/ydb/core/kqp/ut/kqp_write_ut.cpp
index 7328747bece..d260211f583 100644
--- a/ydb/core/kqp/ut/kqp_write_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_write_ut.cpp
@@ -32,7 +32,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/KeyValue`;
+ SELECT * FROM `/Root/KeyValue`;
)"), TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(result.IsSuccess());
@@ -54,7 +54,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/KeyValue`;
+ SELECT * FROM `/Root/KeyValue`;
)"), TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(result.IsSuccess());
@@ -76,7 +76,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
{
auto result = session.ExecuteDataQuery(Q_(R"(
- SELECT * FROM `/Root/KeyValue`;
+ SELECT * FROM `/Root/KeyValue`;
)"), TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT(result.IsSuccess());
@@ -103,11 +103,11 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
DECLARE $key AS Uint64;
- $data = (SELECT Data FROM [/Root/EightShard] WHERE Key = $key);
+ $data = (SELECT Data FROM [/Root/EightShard] WHERE Key = $key);
$newData = COALESCE($data, 0u) + 1;
$tuple = (SELECT $key AS Key, $newData AS Data);
- UPSERT INTO [/Root/EightShard]
+ UPSERT INTO [/Root/EightShard]
SELECT * FROM $tuple;
)", TTxControl::BeginTx().CommitTx(), params, execSettings).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
@@ -137,13 +137,13 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
)";
auto readTemplate = R"(
- $data%1$d = (SELECT Data FROM [/Root/EightShard] WHERE Key = $key%1$d);
+ $data%1$d = (SELECT Data FROM [/Root/EightShard] WHERE Key = $key%1$d);
$newData%1$d = COALESCE($data%1$d, 0u) + 1;
$tuple%1$d = (SELECT $key%1$d AS Key, $newData%1$d AS Data);
)";
auto writeTemplate = R"(
- UPSERT INTO [/Root/EightShard]
+ UPSERT INTO [/Root/EightShard]
SELECT * FROM $tuple%1$d;
)";
@@ -194,7 +194,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
auto session = db.CreateSession().GetValueSync().GetSession();
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
- CREATE TABLE [/Root/Temp] (
+ CREATE TABLE [/Root/Temp] (
Key Uint32,
Value1 String,
Value2 String,
@@ -203,7 +203,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
)").GetValueSync().IsSuccess());
auto result = session.ExecuteDataQuery(R"(
- REPLACE INTO [/Root/Temp] (Key, Value1) VALUES
+ REPLACE INTO [/Root/Temp] (Key, Value1) VALUES
(1u, "123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"),
(3u, "123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890");
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
@@ -215,10 +215,10 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
DECLARE $Key AS Uint32;
- $value1 = (SELECT Value1 FROM [/Root/Temp] WHERE Key = $Key);
+ $value1 = (SELECT Value1 FROM [/Root/Temp] WHERE Key = $Key);
$tuple = (SELECT $Key AS Key, $value1 AS Value1, $value1 AS Value2);
- UPSERT INTO [/Root/Temp]
+ UPSERT INTO [/Root/Temp]
SELECT * FROM $tuple;
)");
@@ -235,7 +235,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
}));
result = session.ExecuteDataQuery(R"(
- SELECT Value2 FROM [/Root/Temp] ORDER BY Value2;
+ SELECT Value2 FROM [/Root/Temp] ORDER BY Value2;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
CompareYson(R"([[#];[#]])", FormatResultSetYson(result.GetResultSet(0)));
@@ -245,7 +245,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
result = session.ExecuteDataQuery(R"(
- SELECT Value2 FROM [/Root/Temp] ORDER BY Value2;
+ SELECT Value2 FROM [/Root/Temp] ORDER BY Value2;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
CompareYson(R"([
@@ -271,11 +271,11 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
DECLARE $key AS Uint64;
- $data = (SELECT Data FROM [/Root/EightShard] WHERE Key = $key);
+ $data = (SELECT Data FROM [/Root/EightShard] WHERE Key = $key);
$newData = COALESCE($data, 0u) + 1;
$tuple = (SELECT $key AS Key, $newData AS Data);
- UPSERT INTO [/Root/EightShard]
+ UPSERT INTO [/Root/EightShard]
SELECT * FROM $tuple;
)", TTxControl::BeginTx().CommitTx(), params).ExtractValueSync();
@@ -290,7 +290,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
auto session = db.CreateSession().GetValueSync().GetSession();
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
- CREATE TABLE [/Root/TxCheck] (
+ CREATE TABLE [/Root/TxCheck] (
Id Uint32,
PRIMARY KEY (Id)
);
@@ -562,7 +562,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
In_Amount : Uint64?
>>;
- REPLACE INTO `/Root/Test`
+ REPLACE INTO `/Root/Test`
SELECT
In_Group AS Group,
In_Name AS Name,
@@ -583,7 +583,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
UNIT_ASSERT(session.ExecuteSchemeQuery(R"(
--!syntax_v1
- CREATE TABLE `/Root/Temp` (
+ CREATE TABLE `/Root/Temp` (
Key Uint8,
PRIMARY KEY (Key)
);
@@ -592,14 +592,14 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
auto result = session.ExecuteDataQuery(R"(
--!syntax_v1
PRAGMA Kikimr.UseNewEngine = 'false';
- UPSERT INTO `/Root/Temp` (Key) VALUES (127);
+ UPSERT INTO `/Root/Temp` (Key) VALUES (127);
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
result = session.ExecuteDataQuery(R"(
--!syntax_v1
PRAGMA Kikimr.UseNewEngine = 'false';
- UPSERT INTO `/Root/Temp` (Key) VALUES (128);
+ UPSERT INTO `/Root/Temp` (Key) VALUES (128);
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::GENERIC_ERROR);
@@ -607,7 +607,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
result = session.ExecuteDataQuery(R"(
--!syntax_v1
PRAGMA Kikimr.UseNewEngine = 'false';
- DELETE FROM `/Root/Temp` ON (Key) VALUES (140);
+ DELETE FROM `/Root/Temp` ON (Key) VALUES (140);
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::GENERIC_ERROR);
@@ -615,7 +615,7 @@ Y_UNIT_TEST_SUITE(KqpWrite) {
result = session.ExecuteDataQuery(R"(
--!syntax_v1
PRAGMA Kikimr.UseNewEngine = 'false';
- SELECT * FROM `/Root/Temp`;
+ SELECT * FROM `/Root/Temp`;
)", TTxControl::BeginTx().CommitTx()).ExtractValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
diff --git a/ydb/core/kqp/ut/kqp_yql_ut.cpp b/ydb/core/kqp/ut/kqp_yql_ut.cpp
index 203bbcb8ef3..99fee295a61 100644
--- a/ydb/core/kqp/ut/kqp_yql_ut.cpp
+++ b/ydb/core/kqp/ut/kqp_yql_ut.cpp
@@ -17,7 +17,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
PRAGMA RefSelect;
- SELECT * FROM `/Root/Test`;
+ SELECT * FROM `/Root/Test`;
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::GENERIC_ERROR);
@@ -32,7 +32,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- SELECT * FROM CONCAT(`/Root/Test`, `/Root/Test`)
+ SELECT * FROM CONCAT(`/Root/Test`, `/Root/Test`)
WHERE Group = 1;
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
@@ -48,7 +48,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- SELECT * FROM RANGE(`Root`, `/Root/Test`, `/Root/Test`)
+ SELECT * FROM RANGE(`Root`, `/Root/Test`, `/Root/Test`)
WHERE Group = 1;
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
@@ -64,11 +64,11 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- SELECT * FROM `/Root/NewTable`;
+ SELECT * FROM `/Root/NewTable`;
COMMIT;
- CREATE TABLE `/Root/NewTable` (
+ CREATE TABLE `/Root/NewTable` (
Id Uint32,
Value String,
PRIMARY KEY(Id)
@@ -107,13 +107,13 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- SELECT * FROM `/Root/Test`;
+ SELECT * FROM `/Root/Test`;
COMMIT;
- DROP TABLE `/Root/Test`;
+ DROP TABLE `/Root/Test`;
- CREATE TABLE `/Root/Test` (
+ CREATE TABLE `/Root/Test` (
Id Uint32,
Value String,
PRIMARY KEY (Id)
@@ -121,7 +121,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
COMMIT;
- SELECT * FROM `/Root/Test`;
+ SELECT * FROM `/Root/Test`;
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::GENERIC_ERROR);
@@ -136,8 +136,8 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- SELECT * FROM `/Root/Test`;
- DROP TABLE `/Root/Test`;
+ SELECT * FROM `/Root/Test`;
+ DROP TABLE `/Root/Test`;
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::GENERIC_ERROR);
@@ -173,7 +173,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- UPDATE `/Root/Test`
+ UPDATE `/Root/Test`
SET Group = Group + 1
WHERE Name != "Paul";
)").GetValueSync();
@@ -190,7 +190,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- UPDATE `/Root/Test`
+ UPDATE `/Root/Test`
SET Amount = Name;
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
@@ -206,7 +206,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- INSERT INTO `/Root/Test` (Group, Name, Amount) VALUES
+ INSERT INTO `/Root/Test` (Group, Name, Amount) VALUES
(1u, "Anna", 10000);
)").GetValueSync();
result.GetIssues().PrintTo(Cerr);
@@ -221,7 +221,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- INSERT INTO `/Root/Test` (Group, Name, Amount) VALUES
+ INSERT INTO `/Root/Test` (Group, Name, Amount) VALUES
(100u, "NewName1", 10),
(110u, "NewName2", 20),
(100u, "NewName1", 30);
@@ -238,7 +238,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- INSERT OR IGNORE INTO `/Root/Test` (Group, Name, Amount) VALUES
+ INSERT OR IGNORE INTO `/Root/Test` (Group, Name, Amount) VALUES
(1u, "Anna", 10000),
(100u, "NewName1", 10);
)").GetValueSync();
@@ -256,8 +256,8 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = session.ExecuteDataQuery(Q_(R"(
--!syntax_v1
- DELETE FROM `/Root/Test` WHERE Group = 1;
- UPDATE `/Root/Test` SET Comment = "Updated" WHERE Group = 2;
+ DELETE FROM `/Root/Test` WHERE Group = 1;
+ UPDATE `/Root/Test` SET Comment = "Updated" WHERE Group = 2;
)"), TTxControl::BeginTx().CommitTx()).ExtractValueSync();
result.GetIssues().PrintTo(Cerr);
@@ -273,7 +273,7 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
PRAGMA kikimr.UnwrapReadTableValues = "true";
- SELECT LENGTH(Name) == 4 AND Amount > 1000 FROM `/Root/Test`;
+ SELECT LENGTH(Name) == 4 AND Amount > 1000 FROM `/Root/Test`;
)").GetValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
@@ -288,19 +288,19 @@ Y_UNIT_TEST_SUITE(KqpYql) {
auto result = client.ExecuteYqlScript(R"(
--!syntax_v1
- CREATE TABLE `/Root/NewTable` (
+ CREATE TABLE `/Root/NewTable` (
Id Uint32,
Value String,
PRIMARY KEY(Id)
);
COMMIT;
- REPLACE INTO `/Root/NewTable` (Id, Value) VALUES
+ REPLACE INTO `/Root/NewTable` (Id, Value) VALUES
(1, "One"),
(2, "Two");
COMMIT;
- SELECT * FROM `/Root/NewTable`;
+ SELECT * FROM `/Root/NewTable`;
)").GetValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
diff --git a/ydb/core/kqp/ut/ya.make b/ydb/core/kqp/ut/ya.make
index cce9c76c64c..609ec3237f8 100644
--- a/ydb/core/kqp/ut/ya.make
+++ b/ydb/core/kqp/ut/ya.make
@@ -37,7 +37,7 @@ SRCS(
kqp_newengine_flowcontrol_ut.cpp
kqp_newengine_ut.cpp
kqp_not_null_columns_ut.cpp
- kqp_olap_ut.cpp
+ kqp_olap_ut.cpp
kqp_params_ut.cpp
kqp_pragma_ut.cpp
kqp_query_ut.cpp
diff --git a/ydb/core/mind/hive/monitoring.cpp b/ydb/core/mind/hive/monitoring.cpp
index 843de0a606b..7e9a83b354e 100644
--- a/ydb/core/mind/hive/monitoring.cpp
+++ b/ydb/core/mind/hive/monitoring.cpp
@@ -2948,8 +2948,8 @@ public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::HIVE_MON_REQUEST;
- }
-
+ }
+
TUpdateResourcesActor(const TActorId& source, const TActorId& hive, const NKikimrHive::TTabletMetrics& metrics)
: Source(source)
, Hive(hive)
@@ -2991,8 +2991,8 @@ public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::HIVE_MON_REQUEST;
- }
-
+ }
+
TCreateTabletActor(const TActorId& source, ui64 owner, ui64 ownerIdx, TTabletTypes::EType type, ui32 channelsProfile, ui32 followers, THive* hive)
: Source(source)
, Event(new TEvHive::TEvCreateTablet())
@@ -3039,75 +3039,75 @@ public:
};
class TDeleteTabletActor : public TActorBootstrapped<TDeleteTabletActor> {
-private:
- ui64 FAKE_TXID = -1;
-
-public:
+private:
+ ui64 FAKE_TXID = -1;
+
+public:
TActorId Source;
- TAutoPtr<TEvHive::TEvDeleteTablet> Event;
+ TAutoPtr<TEvHive::TEvDeleteTablet> Event;
THive* Hive;
-
+
TDeleteTabletActor(const TActorId& source, ui64 owner, ui64 ownerIdx, THive* hive)
- : Source(source)
- , Event(new TEvHive::TEvDeleteTablet())
- , Hive(hive)
- {
+ : Source(source)
+ , Event(new TEvHive::TEvDeleteTablet())
+ , Hive(hive)
+ {
Event->Record.SetShardOwnerId(owner);
Event->Record.AddShardLocalIdx(ownerIdx);
Event->Record.SetTxId_Deprecated(FAKE_TXID);
- }
-
+ }
+
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::HIVE_MON_REQUEST;
- }
-
+ }
+
TDeleteTabletActor(const TActorId& source, ui64 tabletId, THive* hive)
- : Source(source)
- , Event(new TEvHive::TEvDeleteTablet())
- , Hive(hive)
- {
- ui64 owner = 0;
- ui64 ownerIdx = -1;
- for (auto it = hive->OwnerToTablet.begin(); it != hive->OwnerToTablet.end(); ++it) {
- if (it->second == tabletId) {
- owner = it->first.first;
- ownerIdx = it->first.second;
- break;
- }
- }
-
+ : Source(source)
+ , Event(new TEvHive::TEvDeleteTablet())
+ , Hive(hive)
+ {
+ ui64 owner = 0;
+ ui64 ownerIdx = -1;
+ for (auto it = hive->OwnerToTablet.begin(); it != hive->OwnerToTablet.end(); ++it) {
+ if (it->second == tabletId) {
+ owner = it->first.first;
+ ownerIdx = it->first.second;
+ break;
+ }
+ }
+
Event->Record.SetShardOwnerId(owner);
Event->Record.AddShardLocalIdx(ownerIdx);
Event->Record.SetTxId_Deprecated(FAKE_TXID);
- }
-
-private:
- void HandleTimeout(const TActorContext& ctx) {
- ctx.Send(Source, new NMon::TEvRemoteJsonInfoRes("{\"Error\": \"Timeout\"}"));
- Die(ctx);
- }
-
- void Handle(TEvHive::TEvDeleteTabletReply::TPtr& ptr, const TActorContext& ctx) {
- TStringStream stream;
- stream << ptr->Get()->Record.AsJSON();
- ctx.Send(Source, new NMon::TEvRemoteJsonInfoRes(stream.Str()));
- Die(ctx);
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvHive::TEvDeleteTabletReply, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
- }
- }
-
-public:
- void Bootstrap(const TActorContext& ctx) {
+ }
+
+private:
+ void HandleTimeout(const TActorContext& ctx) {
+ ctx.Send(Source, new NMon::TEvRemoteJsonInfoRes("{\"Error\": \"Timeout\"}"));
+ Die(ctx);
+ }
+
+ void Handle(TEvHive::TEvDeleteTabletReply::TPtr& ptr, const TActorContext& ctx) {
+ TStringStream stream;
+ stream << ptr->Get()->Record.AsJSON();
+ ctx.Send(Source, new NMon::TEvRemoteJsonInfoRes(stream.Str()));
+ Die(ctx);
+ }
+
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvHive::TEvDeleteTabletReply, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+ }
+ }
+
+public:
+ void Bootstrap(const TActorContext& ctx) {
ctx.Send(Hive->SelfId(), Event.Release());
- Become(&TThis::StateWork, ctx, TDuration::Seconds(30), new TEvents::TEvWakeup());
- }
-};
-
+ Become(&TThis::StateWork, ctx, TDuration::Seconds(30), new TEvents::TEvWakeup());
+ }
+};
+
class TTxMonEvent_Groups : public TTransactionBase<THive> {
public:
const TActorId Source;
@@ -3396,19 +3396,19 @@ void THive::CreateEvMonitoring(NMon::TEvRemoteHttpInfo::TPtr& ev, const TActorCo
ctx.RegisterWithSameMailbox(new TCreateTabletActor(ev->Sender, owner, ownerIdx, type, channelsProfile, followers, this));
return;
}
- if (page == "DeleteTablet") {
- if (cgi.Has("owner") && cgi.Has("owner_idx")) {
- ui64 owner = FromStringWithDefault<ui64>(cgi.Get("owner"), 0);
- ui64 ownerIdx = FromStringWithDefault<ui64>(cgi.Get("owner_idx"), 0);
+ if (page == "DeleteTablet") {
+ if (cgi.Has("owner") && cgi.Has("owner_idx")) {
+ ui64 owner = FromStringWithDefault<ui64>(cgi.Get("owner"), 0);
+ ui64 ownerIdx = FromStringWithDefault<ui64>(cgi.Get("owner_idx"), 0);
ctx.RegisterWithSameMailbox(new TDeleteTabletActor(ev->Sender, owner, ownerIdx, this));
- } else if (cgi.Has("tablet")) {
- TTabletId tabletId = FromStringWithDefault<TTabletId>(cgi.Get("tablet"), 0);
+ } else if (cgi.Has("tablet")) {
+ TTabletId tabletId = FromStringWithDefault<TTabletId>(cgi.Get("tablet"), 0);
ctx.RegisterWithSameMailbox(new TDeleteTabletActor(ev->Sender, tabletId, this));
- } else {
- ctx.Send(ev->Sender, new NMon::TEvRemoteJsonInfoRes("{\"Error\": \"tablet or (owner, owner_idx) params must be specified\"}"));
- }
- return;
- }
+ } else {
+ ctx.Send(ev->Sender, new NMon::TEvRemoteJsonInfoRes("{\"Error\": \"tablet or (owner, owner_idx) params must be specified\"}"));
+ }
+ return;
+ }
if (page == "Resources") {
return Execute(new TTxMonEvent_Resources(ev->Sender, ev, this), ctx);
}
diff --git a/ydb/core/mind/lease_holder.cpp b/ydb/core/mind/lease_holder.cpp
index 4591414daca..5d47d04c71f 100644
--- a/ydb/core/mind/lease_holder.cpp
+++ b/ydb/core/mind/lease_holder.cpp
@@ -35,8 +35,8 @@ public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::NODE_BROKER_LEASE_HOLDER;
- }
-
+ }
+
TLeaseHolder(TInstant expire)
: LastPingEpoch(0)
, Expire(expire)
diff --git a/ydb/core/mon/mon.cpp b/ydb/core/mon/mon.cpp
index 81c123e68c2..d4722266f34 100644
--- a/ydb/core/mon/mon.cpp
+++ b/ydb/core/mon/mon.cpp
@@ -37,18 +37,18 @@ namespace NActors {
: TActor(&TMonRequest::StateFunc)
, TargetActorId(targetActorId)
, Request(request)
- , Result(result)
+ , Result(result)
, AllowedSIDs(sids)
, Authorizer(authorizer)
{
}
- ~TMonRequest() {
- if (!Result.HasValue()) {
- Result.SetValue(nullptr);
- }
- }
-
+ ~TMonRequest() {
+ if (!Result.HasValue()) {
+ Result.SetValue(nullptr);
+ }
+ }
+
STFUNC(StateFunc) {
switch (ev->GetTypeRewrite()) {
HFunc(TEvents::TEvBootstrap, HandleBootstrap);
@@ -76,21 +76,21 @@ namespace NActors {
}
void HandlePoisonPill(TEvents::TEvPoisonPill::TPtr &, const TActorContext &ctx) {
- Die(ctx);
+ Die(ctx);
}
- void HandleWakeup(TEvents::TEvWakeup::TPtr &, const TActorContext &ctx) {
- Result.SetValue(nullptr);
- Die(ctx);
+ void HandleWakeup(TEvents::TEvWakeup::TPtr &, const TActorContext &ctx) {
+ Result.SetValue(nullptr);
+ Die(ctx);
}
void HandleUndelivered(TEvents::TEvUndelivered::TPtr &, const TActorContext &ctx) {
ReplyActorUnavailableAndDie(ctx);
}
- void HandleInfoRes(NMon::IEvHttpInfoRes::TPtr &ev, const NActors::TActorContext &ctx) {
+ void HandleInfoRes(NMon::IEvHttpInfoRes::TPtr &ev, const NActors::TActorContext &ctx) {
Result.SetValue(THolder<NMon::IEvHttpInfoRes>(ev->Release().Release()));
- Die(ctx);
+ Die(ctx);
}
void Handle(NKikimr::TEvTicketParser::TEvAuthorizeTicketResult::TPtr &ev, const TActorContext &ctx) {
@@ -347,9 +347,9 @@ namespace NActors {
auto future = promise.GetFuture();
ActorSystem->Register(new TMonRequest(TargetActorId, request, promise, AllowedSIDs, Authorizer));
-
+
THttpResponsePtr result = future.ExtractValue(TDuration::Max());
-
+
if (result) {
Output(request, *result);
} else {
@@ -391,7 +391,7 @@ namespace NActors {
}
TMon::~TMon() {
- Stop();
+ Stop();
}
void TMon::Start() {
diff --git a/ydb/core/mon_alloc/monitor.cpp b/ydb/core/mon_alloc/monitor.cpp
index ef6b079a5a4..8afc7852c97 100644
--- a/ydb/core/mon_alloc/monitor.cpp
+++ b/ydb/core/mon_alloc/monitor.cpp
@@ -109,14 +109,14 @@ namespace NKikimr {
// update counters
for (const auto& [tagName, tagStats] : stats) {
- const auto& total = tagStats.Total;
- TPerTagCounters& perTag = PerTag[tagName];
-
- if (total.Count == 0 && total.Size == 0 && !perTag.Total.Count) {
- // Skip the tag that didn't have any allocations so far
- continue;
- }
-
+ const auto& total = tagStats.Total;
+ TPerTagCounters& perTag = PerTag[tagName];
+
+ if (total.Count == 0 && total.Size == 0 && !perTag.Total.Count) {
+ // Skip the tag that didn't have any allocations so far
+ continue;
+ }
+
auto tagCounters = CounterGroup->GetSubgroup("tag", tagName);
if (!perTag.Total.Count) {
perTag.Total.Init(tagCounters->GetSubgroup("bucket", "total"));
@@ -127,14 +127,14 @@ namespace NKikimr {
for (int sizeIdx = 0; sizeIdx < numSizes; ++sizeIdx) {
const auto sizeName = ToString(sizeIdx);
- const auto& bucket = tagStats.Buckets[sizeIdx];
+ const auto& bucket = tagStats.Buckets[sizeIdx];
TMemCounters& bySize = perTag.BySize[sizeName];
-
- if (bucket.Count == 0 && bucket.Size == 0 && !bySize.Count) {
- // Skip the bucket that didn't have any allocations so far
- continue;
- }
-
+
+ if (bucket.Count == 0 && bucket.Size == 0 && !bySize.Count) {
+ // Skip the bucket that didn't have any allocations so far
+ continue;
+ }
+
if (!bySize.Count) {
bySize.Init(tagCounters->GetSubgroup("bucket", sizeName));
}
diff --git a/ydb/core/persqueue/events/internal.h b/ydb/core/persqueue/events/internal.h
index 56ed87c76cb..bc892988b47 100644
--- a/ydb/core/persqueue/events/internal.h
+++ b/ydb/core/persqueue/events/internal.h
@@ -439,8 +439,8 @@ struct TEvPQ {
struct TEvChangeConfig : public TEventLocal<TEvChangeConfig, EvChangeConfig> {
TEvChangeConfig(const TString& topicName, const NKikimrPQ::TPQTabletConfig& config)
- : TopicName(topicName)
- , Config(config)
+ : TopicName(topicName)
+ , Config(config)
{}
TString TopicName;
diff --git a/ydb/core/persqueue/partition.cpp b/ydb/core/persqueue/partition.cpp
index a238199b53a..cc8e2419cf2 100644
--- a/ydb/core/persqueue/partition.cpp
+++ b/ydb/core/persqueue/partition.cpp
@@ -20,7 +20,7 @@
#include <util/folder/path.h>
#include <util/string/escape.h>
#include <util/system/byteorder.h>
-
+
#define VERIFY_RESULT_BLOB(blob, pos) \
Y_VERIFY(!blob.Data.empty(), "Empty data. SourceId: %s, SeqNo: %" PRIu64, blob.SourceId.data(), blob.SeqNo); \
Y_VERIFY(blob.SeqNo <= (ui64)Max<i64>(), "SeqNo is too big: %" PRIu64, blob.SeqNo);
@@ -456,10 +456,10 @@ TPartition::TPartition(ui64 tabletId, ui32 partition, const TActorId& tablet, co
const TString& topicName, const TString& topicPath, const bool localDC, TString dcId,
const NKikimrPQ::TPQTabletConfig& config, const TTabletCountersBase& counters,
const TActorContext &ctx, bool newPartition)
- : TabletID(tabletId)
- , Partition(partition)
+ : TabletID(tabletId)
+ , Partition(partition)
, Config(config)
- , TopicName(topicName)
+ , TopicName(topicName)
, TopicPath(topicPath)
, LocalDC(localDC)
, DCId(std::move(dcId))
@@ -557,9 +557,9 @@ void TPartition::HandleMonitoring(TEvPQ::TEvMonRequest::TPtr& ev, const TActorCo
out << "MaxCurrently writing: " << MaxWriteResponsesSize; res.push_back(out.Str()); out.Clear();
out << "DataKeysBody size: " << DataKeysBody.size(); res.push_back(out.Str()); out.Clear();
for (ui32 i = 0; i < DataKeysHead.size(); ++i) {
- out << "DataKeysHead[" << i << "] size: " << DataKeysHead[i].KeysCount() << " sum: " << DataKeysHead[i].Sum()
+ out << "DataKeysHead[" << i << "] size: " << DataKeysHead[i].KeysCount() << " sum: " << DataKeysHead[i].Sum()
<< " border: " << DataKeysHead[i].Border() << " recs: " << DataKeysHead[i].RecsCount() << " intCount: " << DataKeysHead[i].InternalPartsCount();
- res.push_back(out.Str()); out.Clear();
+ res.push_back(out.Str()); out.Clear();
}
for (auto& avg : AvgWriteBytes) {
out << "AvgWriteSize per " << avg.GetDuration().ToString() << " is " << avg.GetValue() << " bytes";
@@ -1225,29 +1225,29 @@ void TPartition::Handle(NReadSpeedLimiterEvents::TEvCounters::TPtr& ev, const TA
}
}
-void TPartition::Handle(TEvents::TEvPoisonPill::TPtr&, const TActorContext& ctx) {
- // Reply to all outstanding requests in order to destroy corresponding actors
-
+void TPartition::Handle(TEvents::TEvPoisonPill::TPtr&, const TActorContext& ctx) {
+ // Reply to all outstanding requests in order to destroy corresponding actors
+
TStringBuilder ss;
ss << "Tablet is restarting, topic '" << TopicName << "'";
for (const auto& ev : WaitToChangeOwner) {
ReplyError(ctx, ev->Cookie, NPersQueue::NErrorCode::INITIALIZING, ss);
- }
-
+ }
+
for (const auto& w : Requests) {
ReplyError(ctx, w.GetCookie(), NPersQueue::NErrorCode::INITIALIZING, ss);
- }
-
+ }
+
for (const auto& wr : Responses) {
ReplyError(ctx, wr.GetCookie(), NPersQueue::NErrorCode::INITIALIZING, TStringBuilder() << ss << " (WriteResponses)");
- }
-
- for (const auto& ri : ReadInfo) {
+ }
+
+ for (const auto& ri : ReadInfo) {
ReplyError(ctx, ri.second.Destination, NPersQueue::NErrorCode::INITIALIZING,
TStringBuilder() << ss << " (ReadInfo) cookie " << ri.first);
- }
-
+ }
+
if (Mirrorer) {
Send(Mirrorer->Actor, new TEvents::TEvPoisonPill());
}
@@ -1564,7 +1564,7 @@ void TPartition::HandleDataRead(const NKikimrClient::TResponse& response, const
LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "tablet " << TabletID << " HandleOnInit topic '" << TopicName << "' partition " << Partition
<< " ReadResult " << i << " status NKikimrProto::ERROR result message: \"" << read.GetMessage()
<< " \" errorReason: \"" << response.GetErrorReason() << "\"");
- ctx.Send(Tablet, new TEvents::TEvPoisonPill());
+ ctx.Send(Tablet, new TEvents::TEvPoisonPill());
return;
default:
Cerr << "ERROR " << read.GetStatus() << " message: \"" << read.GetMessage() << "\"\n";
@@ -1784,7 +1784,7 @@ void TPartition::InitUserInfoForImportantClients(const TActorContext& ctx) {
void TPartition::Handle(TEvPQ::TEvChangeConfig::TPtr& ev, const TActorContext& ctx) {
Config = ev->Get()->Config;
- TopicName = ev->Get()->TopicName;
+ TopicName = ev->Get()->TopicName;
Y_VERIFY(Config.GetPartitionConfig().GetTotalPartitions() > 0);
diff --git a/ydb/core/persqueue/partition.h b/ydb/core/persqueue/partition.h
index 0f1f9f61507..2eec26f2516 100644
--- a/ydb/core/persqueue/partition.h
+++ b/ydb/core/persqueue/partition.h
@@ -133,7 +133,7 @@ private:
void Handle(TEvPQ::TEvReadTimeout::TPtr& ev, const TActorContext& ctx);
void HandleWakeup(const TActorContext& ctx);
- void Handle(TEvents::TEvPoisonPill::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvents::TEvPoisonPill::TPtr& ev, const TActorContext& ctx);
void Handle(TEvPQ::TEvRead::TPtr& ev, const TActorContext& ctx);
void Handle(NReadSpeedLimiterEvents::TEvResponse::TPtr& ev, const TActorContext& ctx);
@@ -254,13 +254,13 @@ private:
{
NPersQueue::TCounterTimeKeeper keeper(Counters.Cumulative()[COUNTER_PQ_TABLET_CPU_USAGE]);
- LOG_TRACE_S(ctx, NKikimrServices::PERSQUEUE, EventStr("StateInit", ev));
+ LOG_TRACE_S(ctx, NKikimrServices::PERSQUEUE, EventStr("StateInit", ev));
TRACE_EVENT(NKikimrServices::PERSQUEUE);
switch (ev->GetTypeRewrite()) {
CFunc(TEvents::TSystem::Wakeup, HandleWakeup);
HFuncTraced(TEvKeyValue::TEvResponse, HandleOnInit); //result of reads
- HFuncTraced(TEvents::TEvPoisonPill, Handle);
+ HFuncTraced(TEvents::TEvPoisonPill, Handle);
HFuncTraced(TEvPQ::TEvMonRequest, HandleMonitoring);
HFuncTraced(TEvPQ::TEvChangeConfig, Handle);
HFuncTraced(TEvPQ::TEvPartitionOffsets, HandleOnInit);
@@ -271,7 +271,7 @@ private:
HFuncTraced(NReadSpeedLimiterEvents::TEvCounters, Handle);
HFuncTraced(TEvPQ::TEvGetPartitionClientInfo, Handle);
default:
- LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected " << EventStr("StateInit", ev));
+ LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected " << EventStr("StateInit", ev));
break;
};
}
@@ -280,7 +280,7 @@ private:
{
NPersQueue::TCounterTimeKeeper keeper(Counters.Cumulative()[COUNTER_PQ_TABLET_CPU_USAGE]);
- LOG_TRACE_S(ctx, NKikimrServices::PERSQUEUE, EventStr("StateIdle", ev));
+ LOG_TRACE_S(ctx, NKikimrServices::PERSQUEUE, EventStr("StateIdle", ev));
TRACE_EVENT(NKikimrServices::PERSQUEUE);
switch (ev->GetTypeRewrite()) {
@@ -291,7 +291,7 @@ private:
HFuncTraced(TEvPQ::TEvRead, Handle);
HFuncTraced(NReadSpeedLimiterEvents::TEvResponse, Handle);
HFuncTraced(TEvPQ::TEvReadTimeout, Handle);
- HFuncTraced(TEvents::TEvPoisonPill, Handle);
+ HFuncTraced(TEvents::TEvPoisonPill, Handle);
HFuncTraced(TEvPQ::TEvMonRequest, HandleMonitoring);
HFuncTraced(TEvPQ::TEvGetMaxSeqNoRequest, Handle);
HFuncTraced(TEvPQ::TEvChangeConfig, Handle);
@@ -318,7 +318,7 @@ private:
HFuncTraced(TEvPQ::TEvSplitMessageGroup, HandleOnIdle);
default:
- LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected " << EventStr("StateIdle", ev));
+ LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected " << EventStr("StateIdle", ev));
break;
};
}
@@ -327,7 +327,7 @@ private:
{
NPersQueue::TCounterTimeKeeper keeper(Counters.Cumulative()[COUNTER_PQ_TABLET_CPU_USAGE]);
- LOG_TRACE_S(ctx, NKikimrServices::PERSQUEUE, EventStr("StateWrite", ev));
+ LOG_TRACE_S(ctx, NKikimrServices::PERSQUEUE, EventStr("StateWrite", ev));
TRACE_EVENT(NKikimrServices::PERSQUEUE);
switch (ev->GetTypeRewrite()) {
@@ -339,7 +339,7 @@ private:
HFuncTraced(TEvPQ::TEvRead, Handle);
HFuncTraced(NReadSpeedLimiterEvents::TEvResponse, Handle);
HFuncTraced(TEvPQ::TEvReadTimeout, Handle);
- HFuncTraced(TEvents::TEvPoisonPill, Handle);
+ HFuncTraced(TEvents::TEvPoisonPill, Handle);
HFuncTraced(TEvPQ::TEvMonRequest, HandleMonitoring);
HFuncTraced(TEvPQ::TEvGetMaxSeqNoRequest, Handle);
HFuncTraced(TEvPQ::TEvGetClientOffset, Handle);
@@ -366,7 +366,7 @@ private:
HFuncTraced(TEvPQ::TEvSplitMessageGroup, HandleOnWrite);
default:
- LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected " << EventStr("StateWrite", ev));
+ LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Unexpected " << EventStr("StateWrite", ev));
break;
};
}
@@ -412,7 +412,7 @@ private:
};
- ui64 TabletID;
+ ui64 TabletID;
ui32 Partition;
NKikimrPQ::TPQTabletConfig Config;
TString TopicName;
diff --git a/ydb/core/persqueue/pq_impl.cpp b/ydb/core/persqueue/pq_impl.cpp
index bf92e6245e1..9937dbe561c 100644
--- a/ydb/core/persqueue/pq_impl.cpp
+++ b/ydb/core/persqueue/pq_impl.cpp
@@ -17,7 +17,7 @@
#include <ydb/core/tablet/tablet_counters_aggregator.h>
#include <library/cpp/monlib/service/pages/templates.h>
-#include <util/string/escape.h>
+#include <util/string/escape.h>
namespace NKikimr {
namespace NPQ {
@@ -425,8 +425,8 @@ public:
, Cache(cache)
, TotalRequests(partitions.size() + 1)
, TotalResponses(0)
- , TopicName(topicName)
- , TabletID(tabletId)
+ , TopicName(topicName)
+ , TabletID(tabletId)
, Inflight(inflight)
{
for (auto& p: Partitions) {
@@ -529,7 +529,7 @@ private:
ui32 TotalRequests;
ui32 TotalResponses;
TString TopicName;
- ui64 TabletID;
+ ui64 TabletID;
ui32 Inflight;
};
@@ -537,12 +537,12 @@ private:
/******************************************************* TPersQueue *********************************************************/
void TPersQueue::ReplyError(const TActorContext& ctx, const ui64 responseCookie, NPersQueue::NErrorCode::EErrorCode errorCode, const TString& error)
-{
+{
ReplyPersQueueError(
ctx.SelfID, ctx, TabletID(), TopicName, Nothing(), *Counters, NKikimrServices::PERSQUEUE,
responseCookie, errorCode, error
);
-}
+}
void TPersQueue::FillMeteringParams(const TActorContext& ctx)
{
@@ -665,7 +665,7 @@ void TPersQueue::HandleConfigWriteResponse(const NKikimrClient::TResponse& resp,
else
NewConfigShouldBeApplied = true; //when config will be inited with old value new config will be applied
}
-
+
void TPersQueue::HandleConfigReadResponse(const NKikimrClient::TResponse& resp, const TActorContext& ctx)
{
bool ok = (resp.GetStatus() == NMsgBusProxy::MSTATUS_OK) && (resp.ReadResultSize() == 2) && (resp.HasSetExecutorFastLogPolicyResult()) &&
@@ -676,7 +676,7 @@ void TPersQueue::HandleConfigReadResponse(const NKikimrClient::TResponse& resp,
ctx.Send(ctx.SelfID, new TEvents::TEvPoisonPill());
return;
}
-
+
ReadConfig(resp.GetReadResult(0), ctx);
ReadState(resp.GetReadResult(1), ctx);
}
@@ -719,11 +719,11 @@ void TPersQueue::ReadConfig(const NKikimrClient::TKeyValueResponse::TReadResult&
Y_VERIFY(TopicName.size(), "Need topic name here");
CacheActor = ctx.Register(new TPQCacheProxy(ctx.SelfID, TopicName, cacheSize));
} else if (read.GetStatus() == NKikimrProto::NODATA) {
- LOG_INFO_S(ctx, NKikimrServices::PERSQUEUE, "Tablet " << TabletID() << " no config, start with empty partitions and default config");
+ LOG_INFO_S(ctx, NKikimrServices::PERSQUEUE, "Tablet " << TabletID() << " no config, start with empty partitions and default config");
} else {
Y_FAIL("Unexpected config read status: %d", read.GetStatus());
}
-
+
for (const auto& partition : Config.GetPartitions()) { // no partitions will be created with empty config
const auto partitionId = partition.GetPartitionId();
Partitions.emplace(partitionId, TPartitionInfo(
@@ -734,7 +734,7 @@ void TPersQueue::ReadConfig(const NKikimrClient::TKeyValueResponse::TReadResult&
));
}
ConfigInited = true;
-
+
auto now = ctx.Now();
ShardsMetricsLastFlush = now;
RequestsMetricsLastFlush = now;
@@ -827,9 +827,9 @@ void TPersQueue::Handle(TEvKeyValue::TEvResponse::TPtr& ev, const TActorContext&
HandleStateWriteResponse(resp, ctx);
break;
default:
- LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Tablet " << TabletID()
- << " Unexpected KV response: " << ev->Get()->ToString() << " " << ctx.SelfID);
- ctx.Send(ctx.SelfID, new TEvents::TEvPoisonPill());
+ LOG_ERROR_S(ctx, NKikimrServices::PERSQUEUE, "Tablet " << TabletID()
+ << " Unexpected KV response: " << ev->Get()->ToString() << " " << ctx.SelfID);
+ ctx.Send(ctx.SelfID, new TEvents::TEvPoisonPill());
}
}
@@ -1038,9 +1038,9 @@ void TPersQueue::ProcessUpdateConfigRequest(TAutoPtr<TEvPersQueue::TEvUpdateConf
return;
}
if (curConfigVersion == newConfigVersion) { //nothing to change, will be answered on cfg write from prev step
- LOG_INFO_S(ctx, NKikimrServices::PERSQUEUE, "Tablet " << TabletID()
+ LOG_INFO_S(ctx, NKikimrServices::PERSQUEUE, "Tablet " << TabletID()
<< " Config update version " << Config.GetVersion() << " is already in progress actor " << sender
- << " txId " << record.GetTxId() << " config:\n" << cfg.DebugString());
+ << " txId " << record.GetTxId() << " config:\n" << cfg.DebugString());
ChangeConfigNotification.insert(TChangeNotification(sender, record.GetTxId()));
return;
}
@@ -1141,7 +1141,7 @@ void TPersQueue::ProcessUpdateConfigRequest(TAutoPtr<TEvPersQueue::TEvUpdateConf
Y_VERIFY(res);
TAutoPtr<TEvKeyValue::TEvRequest> request(new TEvKeyValue::TEvRequest);
- request->Record.SetCookie(WRITE_CONFIG_COOKIE);
+ request->Record.SetCookie(WRITE_CONFIG_COOKIE);
auto write = request->Record.AddCmdWrite();
write->SetKey(KeyConfig());
@@ -1952,9 +1952,9 @@ void TPersQueue::HandleDie(const TActorContext& ctx)
FlushMetrics(true, ctx);
for (const auto& p : Partitions) {
- ctx.Send(p.second.Actor, new TEvents::TEvPoisonPill());
+ ctx.Send(p.second.Actor, new TEvents::TEvPoisonPill());
}
- ctx.Send(CacheActor, new TEvents::TEvPoisonPill());
+ ctx.Send(CacheActor, new TEvents::TEvPoisonPill());
for (const auto& p : ResponseProxy) {
@@ -2015,7 +2015,7 @@ void TPersQueue::Handle(TEvInterconnect::TEvNodeInfo::TPtr& ev, const TActorCont
ResourceMetrics = Executor()->GetResourceMetrics();
THolder<TEvKeyValue::TEvRequest> request(new TEvKeyValue::TEvRequest);
- request->Record.SetCookie(READ_CONFIG_COOKIE);
+ request->Record.SetCookie(READ_CONFIG_COOKIE);
request->Record.AddCmdRead()->SetKey(KeyConfig());
request->Record.AddCmdRead()->SetKey(KeyState());
request->Record.MutableCmdSetExecutorFastLogPolicy()
diff --git a/ydb/core/persqueue/pq_ut.cpp b/ydb/core/persqueue/pq_ut.cpp
index a013ff869d5..13a61229105 100644
--- a/ydb/core/persqueue/pq_ut.cpp
+++ b/ydb/core/persqueue/pq_ut.cpp
@@ -1776,7 +1776,7 @@ Y_UNIT_TEST(TestReadSubscription) {
tc.Prepare(dispatchName, setup, activeZone);
activeZone = false;
tc.Runtime->SetScheduledLimit(600);
- tc.Runtime->SetScheduledEventFilter(&tc.ImmediateLogFlushAndRequestTimeoutFilter);
+ tc.Runtime->SetScheduledEventFilter(&tc.ImmediateLogFlushAndRequestTimeoutFilter);
TVector<std::pair<ui64, TString>> data;
@@ -1875,7 +1875,7 @@ Y_UNIT_TEST(TestPQCacheSizeManagement) {
tc.Runtime->SetScheduledLimit(200);
- activeZone = false;
+ activeZone = false;
PQTabletPrepare(20000000, 100 * 1024 * 1024, 0, {{"aaa", true}}, tc); //important client - never delete
TVector<std::pair<ui64, TString>> data;
diff --git a/ydb/core/persqueue/read.h b/ydb/core/persqueue/read.h
index 0b6aeb73298..d0bc79c6929 100644
--- a/ydb/core/persqueue/read.h
+++ b/ydb/core/persqueue/read.h
@@ -95,7 +95,7 @@ namespace NPQ {
Cache.Touch(ctx);
}
- void Handle(TEvents::TEvPoisonPill::TPtr& ev, const TActorContext& ctx)
+ void Handle(TEvents::TEvPoisonPill::TPtr& ev, const TActorContext& ctx)
{
Y_VERIFY(ev->Sender == Tablet);
Die(ctx);
@@ -367,7 +367,7 @@ namespace NPQ {
STFUNC(StateFunc) {
switch (ev->GetTypeRewrite()) {
HFunc(TEvPQ::TEvBlobRequest, Handle); // read requests
- HFunc(TEvents::TEvPoisonPill, Handle);
+ HFunc(TEvents::TEvPoisonPill, Handle);
HFunc(TEvPQ::TEvMonRequest, HandleMonitoring);
HFunc(TEvKeyValue::TEvRequest, Handle); // write requests
HFunc(TEvKeyValue::TEvResponse, Handle); // read & write responses
diff --git a/ydb/core/protos/config.proto b/ydb/core/protos/config.proto
index e293c80a2f2..d64169d4fc0 100644
--- a/ydb/core/protos/config.proto
+++ b/ydb/core/protos/config.proto
@@ -174,12 +174,12 @@ message TDomainsConfig {
}
message TDomain {
- message TTxLimits {
- optional uint64 PerRequestDataSizeLimit = 1;
- optional uint64 PerShardReadSizeLimit = 2;
- optional uint64 PerShardIncomingReadSetSizeLimit = 3;
- }
-
+ message TTxLimits {
+ optional uint64 PerRequestDataSizeLimit = 1;
+ optional uint64 PerShardReadSizeLimit = 2;
+ optional uint64 PerShardIncomingReadSetSizeLimit = 3;
+ }
+
optional uint32 DomainId = 1;
optional fixed64 SchemeRoot = 2;
repeated uint64 Coordinator = 3;
@@ -205,11 +205,11 @@ message TDomainsConfig {
message TExecLevel {
}
- message TNamedCompactionPolicy {
- optional string Name = 1;
+ message TNamedCompactionPolicy {
+ optional string Name = 1;
optional NKikimrSchemeOp.TCompactionPolicy Policy = 2;
- }
-
+ }
+
message TSecurityConfig {
optional bool EnforceUserTokenRequirement = 1 [default = false];
repeated string MonitoringAllowedSIDs = 2;
@@ -223,7 +223,7 @@ message TDomainsConfig {
repeated TStateStorage StateStorage = 2;
repeated TExecLevel ExecLevel = 3;
repeated THiveConfig HiveConfig = 4;
- repeated TNamedCompactionPolicy NamedCompactionPolicy = 5;
+ repeated TNamedCompactionPolicy NamedCompactionPolicy = 5;
optional TSecurityConfig SecurityConfig = 6;
optional bool ForbidImplicitStoragePools = 7 [default = true];
}
@@ -281,7 +281,7 @@ message TLogConfig {
optional uint32 DefaultSamplingRate = 5 [default = 0];
optional string Format = 6 [default = "full"]; // "full" | "short" | "json"
optional string ClusterName = 7;
- optional bool AllowDropEntries = 8 [default = true];
+ optional bool AllowDropEntries = 8 [default = true];
optional bool UseLocalTimestamps = 9 [default = false];
optional string BackendFileName = 10;
optional string SysLogService = 11;
@@ -313,7 +313,7 @@ message TBootstrap {
SCHEMESHARD = 30;
DATASHARD = 31;
- FLAT_SCHEMESHARD = 32;
+ FLAT_SCHEMESHARD = 32;
KEYVALUEFLAT = 33;
JOBRUNNER_POOL_MANAGER = 40;
@@ -342,7 +342,7 @@ message TBootstrap {
optional uint64 ProxySchemeCacheNodes = 2;
optional uint64 ProxySchemeCacheDistNodes = 3;
- optional NKikimrTablet.TCompactionBroker CompactionBroker = 4;
+ optional NKikimrTablet.TCompactionBroker CompactionBroker = 4;
optional NKikimrNodeLimits.TNodeLimitsConfig NodeLimits = 5;
optional NKikimrResourceBroker.TResourceBrokerConfig ResourceBroker = 6;
optional NKikimrSharedCache.TSharedCacheConfig SharedCacheConfig = 7;
@@ -563,7 +563,7 @@ message TGRpcConfig {
optional uint32 Port = 3;
optional uint32 WorkerThreads = 4 [default = 2];
optional uint64 GRpcMemoryQuotaBytes = 5 [default = 1073741824]; // 1 GB default; 0 == unlimited
- optional uint64 MaxMessageSize = 6; // default = DEFAULT_GRPC_MESSAGE_SIZE_LIMIT
+ optional uint64 MaxMessageSize = 6; // default = DEFAULT_GRPC_MESSAGE_SIZE_LIMIT
optional uint32 MaxInFlight = 7; // 0 == unlimited [default]
optional NKikimrStream.TStreamingConfig StreamingConfig = 8;
// Ssl part
@@ -1164,7 +1164,7 @@ message TImmediateControlsConfig {
optional uint64 PerRequestDataSizeLimit = 1 [(ControlOptions) = {
Description: "Maximum read data size per transaction",
MinValue: 0,
- MaxValue: 256000000000000,
+ MaxValue: 256000000000000,
DefaultValue: 53687091200 }];
optional uint64 PerShardReadSizeLimit = 2 [(ControlOptions) = {
Description: "Maximum read data size per transaction per shard",
diff --git a/ydb/core/protos/counters.proto b/ydb/core/protos/counters.proto
index a3cb34b982d..05a8c93d905 100644
--- a/ydb/core/protos/counters.proto
+++ b/ydb/core/protos/counters.proto
@@ -79,7 +79,7 @@ enum ETxTypeCumulativeCounters {
COUNTER_TT_BOOKKEEPING_CPUTIME = 8 [(CounterOpts) = {Name: "BookkeepingCPUTime"}];
COUNTER_TT_COMMITED_CPUTIME = 9 [(CounterOpts) = {Name: "CommitedCPUTime"}];
COUNTER_TT_TOUCHED_BLOCKS = 10 [(CounterOpts) = {Name: "TouchedBlocks"}];
- COUNTER_TT_LOADED_BLOCKS = 11 [(CounterOpts) = {Name: "LoadedBlocks"}];
+ COUNTER_TT_LOADED_BLOCKS = 11 [(CounterOpts) = {Name: "LoadedBlocks"}];
COUNTER_TT_TERMINATED = 12 [(CounterOpts) = {Name: "Terminated"}];
COUNTER_TT_REDO_WRITTEN_BYTES = 13 [(CounterOpts) = {Name: "RedoWrittenBytes"}];
COUNTER_TT_ANNEX_WRITTEN_BYTES = 14 [(CounterOpts) = {Name: "AnnexWrittenBytes"}];
diff --git a/ydb/core/protos/counters_columnshard.proto b/ydb/core/protos/counters_columnshard.proto
index 8f7a13f49a8..ada7786f486 100644
--- a/ydb/core/protos/counters_columnshard.proto
+++ b/ydb/core/protos/counters_columnshard.proto
@@ -56,18 +56,18 @@ enum ECumulativeCounters {
COUNTER_PREPARE_SUCCESS = 7 [(CounterOpts) = {Name: "PrepareSuccess"}];
COUNTER_PLAN_STEP_IGNORED = 8 [(CounterOpts) = {Name: "PlanStepIgnored"}];
COUNTER_PLAN_STEP_ACCEPTED = 9 [(CounterOpts) = {Name: "PlanStepAccepted"}];
- COUNTER_SCANNED_ROWS = 10 [(CounterOpts) = {Name: "ScannedRows"}];
- COUNTER_SCANNED_BYTES = 11 [(CounterOpts) = {Name: "ScannedBytes"}];
- COUNTER_UPSERT_BLOBS_WRITTEN = 12 [(CounterOpts) = {Name: "UpsertBlobsWritten"}];
- COUNTER_UPSERT_BYTES_WRITTEN = 13 [(CounterOpts) = {Name: "UpsertBytesWritten"}];
- COUNTER_INDEXING_BLOBS_WRITTEN = 14 [(CounterOpts) = {Name: "IndexingBlobsWritten"}];
- COUNTER_INDEXING_BYTES_WRITTEN = 15 [(CounterOpts) = {Name: "IndexingBytesWritten"}];
- COUNTER_COMPACTION_BLOBS_WRITTEN = 16 [(CounterOpts) = {Name: "CompactionBlobsWritten"}];
- COUNTER_COMPACTION_BYTES_WRITTEN = 17 [(CounterOpts) = {Name: "CompactionBytesWritten"}];
- COUNTER_BLOB_MANAGER_GC_REQUESTS = 18 [(CounterOpts) = {Name: "BlobManager/GcRequests"}];
- COUNTER_BLOB_MANAGER_KEEP_BLOBS = 19 [(CounterOpts) = {Name: "BlobManager/KeepBlobs"}];
- COUNTER_BLOB_MANAGER_DONT_KEEP_BLOBS = 20 [(CounterOpts) = {Name: "BlobManager/DontKeepBlobs"}];
- COUNTER_BLOB_MANAGER_SKIPPED_BLOBS = 21 [(CounterOpts) = {Name: "BlobManager/SkippedBlobs"}];
+ COUNTER_SCANNED_ROWS = 10 [(CounterOpts) = {Name: "ScannedRows"}];
+ COUNTER_SCANNED_BYTES = 11 [(CounterOpts) = {Name: "ScannedBytes"}];
+ COUNTER_UPSERT_BLOBS_WRITTEN = 12 [(CounterOpts) = {Name: "UpsertBlobsWritten"}];
+ COUNTER_UPSERT_BYTES_WRITTEN = 13 [(CounterOpts) = {Name: "UpsertBytesWritten"}];
+ COUNTER_INDEXING_BLOBS_WRITTEN = 14 [(CounterOpts) = {Name: "IndexingBlobsWritten"}];
+ COUNTER_INDEXING_BYTES_WRITTEN = 15 [(CounterOpts) = {Name: "IndexingBytesWritten"}];
+ COUNTER_COMPACTION_BLOBS_WRITTEN = 16 [(CounterOpts) = {Name: "CompactionBlobsWritten"}];
+ COUNTER_COMPACTION_BYTES_WRITTEN = 17 [(CounterOpts) = {Name: "CompactionBytesWritten"}];
+ COUNTER_BLOB_MANAGER_GC_REQUESTS = 18 [(CounterOpts) = {Name: "BlobManager/GcRequests"}];
+ COUNTER_BLOB_MANAGER_KEEP_BLOBS = 19 [(CounterOpts) = {Name: "BlobManager/KeepBlobs"}];
+ COUNTER_BLOB_MANAGER_DONT_KEEP_BLOBS = 20 [(CounterOpts) = {Name: "BlobManager/DontKeepBlobs"}];
+ COUNTER_BLOB_MANAGER_SKIPPED_BLOBS = 21 [(CounterOpts) = {Name: "BlobManager/SkippedBlobs"}];
COUNTER_RAW_BYTES_UPSERTED = 22 [(CounterOpts) = {Name: "RawBytesUpserted"}];
COUNTER_BLOBS_COMMITTED = 23 [(CounterOpts) = {Name: "BlobsCommitted"}];
COUNTER_BYTES_COMMITTED = 24 [(CounterOpts) = {Name: "BytesCommitted"}];
@@ -96,20 +96,20 @@ enum ECumulativeCounters {
COUNTER_TTL_SUCCESS = 47 [(CounterOpts) = {Name: "TtlSuccess"}];
COUNTER_TTL_FAIL = 48 [(CounterOpts) = {Name: "TtlFail"}];
COUNTER_OUT_OF_SPACE = 49 [(CounterOpts) = {Name: "OutOfSpace"}];
- COUNTER_SCAN_RESTARTED = 50 [(CounterOpts) = {Name: "ScanRestarted"}];
+ COUNTER_SCAN_RESTARTED = 50 [(CounterOpts) = {Name: "ScanRestarted"}];
COUNTER_READ_INDEX_GRANULES = 51 [(CounterOpts) = {Name: "ReadIndexGranules"}];
COUNTER_READ_INDEX_PORTIONS = 52 [(CounterOpts) = {Name: "ReadIndexPortions"}];
COUNTER_READ_INDEX_BLOBS = 53 [(CounterOpts) = {Name: "ReadIndexBlobs"}];
COUNTER_READ_INDEX_ROWS = 54 [(CounterOpts) = {Name: "ReadIndexRows"}];
COUNTER_READ_INDEX_BYTES = 55 [(CounterOpts) = {Name: "ReadIndexBytes"}];
COUNTER_WRITE_DUPLICATE = 56 [(CounterOpts) = {Name: "WriteDuplicate"}];
- COUNTER_SMALL_BLOB_WRITE_COUNT = 57 [(CounterOpts) = {Name: "SmallBlobWriteCount"}];
- COUNTER_SMALL_BLOB_WRITE_BYTES = 58 [(CounterOpts) = {Name: "SmallBlobWriteBytes"}];
- COUNTER_SMALL_BLOB_READ_SUCCESS = 59 [(CounterOpts) = {Name: "SmallBlobReadSuccess"}];
- COUNTER_SMALL_BLOB_READ_ERROR = 60 [(CounterOpts) = {Name: "SmallBlobReadError"}];
- COUNTER_SMALL_BLOB_READ_BYTES = 61 [(CounterOpts) = {Name: "SmallBlobReadBytes"}];
- COUNTER_SMALL_BLOB_DELETE_COUNT = 62 [(CounterOpts) = {Name: "SmallBlobDeleteCount"}];
- COUNTER_SMALL_BLOB_DELETE_BYTES = 63 [(CounterOpts) = {Name: "SmallBlobDeleteBytes"}];
+ COUNTER_SMALL_BLOB_WRITE_COUNT = 57 [(CounterOpts) = {Name: "SmallBlobWriteCount"}];
+ COUNTER_SMALL_BLOB_WRITE_BYTES = 58 [(CounterOpts) = {Name: "SmallBlobWriteBytes"}];
+ COUNTER_SMALL_BLOB_READ_SUCCESS = 59 [(CounterOpts) = {Name: "SmallBlobReadSuccess"}];
+ COUNTER_SMALL_BLOB_READ_ERROR = 60 [(CounterOpts) = {Name: "SmallBlobReadError"}];
+ COUNTER_SMALL_BLOB_READ_BYTES = 61 [(CounterOpts) = {Name: "SmallBlobReadBytes"}];
+ COUNTER_SMALL_BLOB_DELETE_COUNT = 62 [(CounterOpts) = {Name: "SmallBlobDeleteCount"}];
+ COUNTER_SMALL_BLOB_DELETE_BYTES = 63 [(CounterOpts) = {Name: "SmallBlobDeleteBytes"}];
}
enum EPercentileCounters {
@@ -147,6 +147,6 @@ enum ETxTypes {
TXTYPE_NOTIFY_TX_COMPLETION = 8 [(TxTypeOpts) = {Name: "TxNotifyTxCompletion"}];
TXTYPE_PROPOSE_CANCEL = 9 [(TxTypeOpts) = {Name: "TxProposeCancel"}];
TXTYPE_PROGRESS = 10 [(TxTypeOpts) = {Name: "TxProgress"}];
- TXTYPE_START_SCAN = 11 [(TxTypeOpts) = {Name: "TxStartScan"}];
- TXTYPE_READ_BLOB_RANGES = 12 [(TxTypeOpts) = {Name: "TxReadBlobRanges"}];
+ TXTYPE_START_SCAN = 11 [(TxTypeOpts) = {Name: "TxStartScan"}];
+ TXTYPE_READ_BLOB_RANGES = 12 [(TxTypeOpts) = {Name: "TxReadBlobRanges"}];
}
diff --git a/ydb/core/protos/counters_datashard.proto b/ydb/core/protos/counters_datashard.proto
index 531a00ec144..956a1369378 100644
--- a/ydb/core/protos/counters_datashard.proto
+++ b/ydb/core/protos/counters_datashard.proto
@@ -46,16 +46,16 @@ enum ECumulativeCounters {
COUNTER_ACK_SENT_DELAYED = 20 [(CounterOpts) = {Name: "AckSentDelayed"}];
COUNTER_CANCEL_TX_NOTFOUND = 21 [(CounterOpts) = {Name: "CancelTxNotFound"}];
COUNTER_MINIKQL_PROGRAM_SIZE = 22 [(CounterOpts) = {Name: "MiniKQLProgramSize"}];
- COUNTER_ENGINE_HOST_SELECT_ROW = 23 [(CounterOpts) = {Name: "EngineHostRowReads"}];
- COUNTER_ENGINE_HOST_SELECT_RANGE = 24 [(CounterOpts) = {Name: "EngineHostRangeReads"}];
- COUNTER_ENGINE_HOST_UPDATE_ROW = 25 [(CounterOpts) = {Name: "EngineHostRowUpdates"}];
- COUNTER_ENGINE_HOST_ERASE_ROW = 26 [(CounterOpts) = {Name: "EngineHostRowErases"}];
- COUNTER_PREPARE_CANCELLED = 27 [(CounterOpts) = {Name: "PrepareCancelled"}];
- COUNTER_READSET_SENT_COUNT = 28 [(CounterOpts) = {Name: "ReadsetSentCount"}];
- COUNTER_READSET_SENT_SIZE = 29 [(CounterOpts) = {Name: "ReadsetSentSize"}];
- COUNTER_READSET_RECEIVED_COUNT = 30 [(CounterOpts) = {Name: "ReadsetReceivedCount"}];
- COUNTER_READSET_RECEIVED_SIZE = 31 [(CounterOpts) = {Name: "ReadsetReceivedSize"}];
- COUNTER_TX_RESULT_SIZE = 32 [(CounterOpts) = {Name: "TxResultSize"}];
+ COUNTER_ENGINE_HOST_SELECT_ROW = 23 [(CounterOpts) = {Name: "EngineHostRowReads"}];
+ COUNTER_ENGINE_HOST_SELECT_RANGE = 24 [(CounterOpts) = {Name: "EngineHostRangeReads"}];
+ COUNTER_ENGINE_HOST_UPDATE_ROW = 25 [(CounterOpts) = {Name: "EngineHostRowUpdates"}];
+ COUNTER_ENGINE_HOST_ERASE_ROW = 26 [(CounterOpts) = {Name: "EngineHostRowErases"}];
+ COUNTER_PREPARE_CANCELLED = 27 [(CounterOpts) = {Name: "PrepareCancelled"}];
+ COUNTER_READSET_SENT_COUNT = 28 [(CounterOpts) = {Name: "ReadsetSentCount"}];
+ COUNTER_READSET_SENT_SIZE = 29 [(CounterOpts) = {Name: "ReadsetSentSize"}];
+ COUNTER_READSET_RECEIVED_COUNT = 30 [(CounterOpts) = {Name: "ReadsetReceivedCount"}];
+ COUNTER_READSET_RECEIVED_SIZE = 31 [(CounterOpts) = {Name: "ReadsetReceivedSize"}];
+ COUNTER_TX_RESULT_SIZE = 32 [(CounterOpts) = {Name: "TxResultSize"}];
COUNTER_LOCKS_REJECTED = 33 [(CounterOpts) = {Name: "LocksRejected"}];
COUNTER_LOCKS_ACQUIRED = 34 [(CounterOpts) = {Name: "LocksAcquired"}];
COUNTER_LOCKS_EVICTED = 35 [(CounterOpts) = {Name: "LocksEvicted"}];
@@ -73,26 +73,26 @@ enum ECumulativeCounters {
COUNTER_TX_WAIT_READ_SETS = 47 [(CounterOpts) = {Name: "TxWaitReadSets"}];
COUNTER_TX_WAIT_RESOURCE = 48 [(CounterOpts) = {Name: "TxWaitResource"}];
COUNTER_LOCKS_WHOLE_SHARD = 49 [(CounterOpts) = {Name: "LocksWholeShard"}];
- COUNTER_ENGINE_HOST_SELECT_ROW_BYTES = 50 [(CounterOpts) = {Name: "EngineHostRowReadBytes"}];
- COUNTER_ENGINE_HOST_SELECT_RANGE_ROWS = 51 [(CounterOpts) = {Name: "EngineHostRangeReadRows"}];
- COUNTER_ENGINE_HOST_SELECT_RANGE_BYTES = 52 [(CounterOpts) = {Name: "EngineHostRangeReadBytes"}];
- COUNTER_ENGINE_HOST_UPDATE_ROW_BYTES = 53 [(CounterOpts) = {Name: "EngineHostRowUpdateBytes"}];
- COUNTER_ENGINE_HOST_ERASE_ROW_BYTES = 54 [(CounterOpts) = {Name: "EngineHostRowEraseBytes"}];
- COUNTER_PLANNED_TX_COMPLETE = 55 [(CounterOpts) = {Name: "PlannedTxComplete"}];
- COUNTER_PREPARE_OUT_OF_SPACE = 56 [(CounterOpts) = {Name: "PrepareOutOfSpace"}];
- COUNTER_IMMEDIATE_TX_CANCELLED = 57 [(CounterOpts) = {Name: "ImmediateTxCancelled"}];
- COUNTER_PLANNED_TX_CANCELLED = 58 [(CounterOpts) = {Name: "PlannedTxCancelled"}];
- COUNTER_UPLOAD_ROWS = 59 [(CounterOpts) = {Name: "UploadRows"}];
- COUNTER_UPLOAD_ROWS_BYTES = 60 [(CounterOpts) = {Name: "UploadRowsBytes"}];
- COUNTER_ENGINE_HOST_SELECT_RANGE_ROW_SKIPS = 61 [(CounterOpts) = {Name: "EngineHostRangeReadDeletedRowSkips"}];
- COUNTER_READ_COLUMNS_ROWS = 62 [(CounterOpts) = {Name: "ReadColumnsRows"}];
- COUNTER_READ_COLUMNS_BYTES = 63 [(CounterOpts) = {Name: "ReadColumnsBytes"}];
- COUNTER_SCANNED_ROWS = 64 [(CounterOpts) = {Name: "ScannedRows"}];
- COUNTER_SCANNED_BYTES = 65 [(CounterOpts) = {Name: "ScannedBytes"}];
+ COUNTER_ENGINE_HOST_SELECT_ROW_BYTES = 50 [(CounterOpts) = {Name: "EngineHostRowReadBytes"}];
+ COUNTER_ENGINE_HOST_SELECT_RANGE_ROWS = 51 [(CounterOpts) = {Name: "EngineHostRangeReadRows"}];
+ COUNTER_ENGINE_HOST_SELECT_RANGE_BYTES = 52 [(CounterOpts) = {Name: "EngineHostRangeReadBytes"}];
+ COUNTER_ENGINE_HOST_UPDATE_ROW_BYTES = 53 [(CounterOpts) = {Name: "EngineHostRowUpdateBytes"}];
+ COUNTER_ENGINE_HOST_ERASE_ROW_BYTES = 54 [(CounterOpts) = {Name: "EngineHostRowEraseBytes"}];
+ COUNTER_PLANNED_TX_COMPLETE = 55 [(CounterOpts) = {Name: "PlannedTxComplete"}];
+ COUNTER_PREPARE_OUT_OF_SPACE = 56 [(CounterOpts) = {Name: "PrepareOutOfSpace"}];
+ COUNTER_IMMEDIATE_TX_CANCELLED = 57 [(CounterOpts) = {Name: "ImmediateTxCancelled"}];
+ COUNTER_PLANNED_TX_CANCELLED = 58 [(CounterOpts) = {Name: "PlannedTxCancelled"}];
+ COUNTER_UPLOAD_ROWS = 59 [(CounterOpts) = {Name: "UploadRows"}];
+ COUNTER_UPLOAD_ROWS_BYTES = 60 [(CounterOpts) = {Name: "UploadRowsBytes"}];
+ COUNTER_ENGINE_HOST_SELECT_RANGE_ROW_SKIPS = 61 [(CounterOpts) = {Name: "EngineHostRangeReadDeletedRowSkips"}];
+ COUNTER_READ_COLUMNS_ROWS = 62 [(CounterOpts) = {Name: "ReadColumnsRows"}];
+ COUNTER_READ_COLUMNS_BYTES = 63 [(CounterOpts) = {Name: "ReadColumnsBytes"}];
+ COUNTER_SCANNED_ROWS = 64 [(CounterOpts) = {Name: "ScannedRows"}];
+ COUNTER_SCANNED_BYTES = 65 [(CounterOpts) = {Name: "ScannedBytes"}];
COUNTER_PROPOSE_QUEUE_EV = 66 [(CounterOpts) = {Name: "TxProposeQueueEvents"}];
- COUNTER_BULK_UPSERT_SUCCESS = 67 [(CounterOpts) = {Name: "BulkUpsertSuccess"}];
- COUNTER_BULK_UPSERT_ERROR = 68 [(CounterOpts) = {Name: "BulkUpsertError"}];
- COUNTER_BULK_UPSERT_OVERLOADED = 69 [(CounterOpts) = {Name: "BulkUpsertOverloaded"}];
+ COUNTER_BULK_UPSERT_SUCCESS = 67 [(CounterOpts) = {Name: "BulkUpsertSuccess"}];
+ COUNTER_BULK_UPSERT_ERROR = 68 [(CounterOpts) = {Name: "BulkUpsertError"}];
+ COUNTER_BULK_UPSERT_OVERLOADED = 69 [(CounterOpts) = {Name: "BulkUpsertOverloaded"}];
COUNTER_ERASE_ROWS_SUCCESS = 70 [(CounterOpts) = {Name: "EraseRowsSuccess"}];
COUNTER_ERASE_ROWS_ERROR = 71 [(CounterOpts) = {Name: "EraseRowsError"}];
COUNTER_ERASE_ROWS_OVERLOADED = 72 [(CounterOpts) = {Name: "EraseRowsOverloaded"}];
@@ -191,28 +191,28 @@ enum EPercentileCounters {
Ranges { Value: 67108864 Name: "134217728" }
Ranges { Value: 134217728 Name: "inf" }
}];
-
- COUNTER_SELECT_ROWS_PER_REQUEST = 11 [(CounterOpts) = {Name: "SelectRowsPerRequest",
- Ranges: { Value: 0 Name: "0"},
- Ranges: { Value: 1 Name: "1"},
- Ranges: { Value: 2 Name: "10"},
- Ranges: { Value: 11 Name: "100"}
- Ranges: { Value: 101 Name: "1000"}
- Ranges: { Value: 1001 Name: "10000"}
- Ranges: { Value: 10001 Name: "100000"}
- Ranges: { Value: 100001 Name: "inf"}
- }];
-
- COUNTER_RANGE_READ_ROWS_PER_REQUEST = 12 [(CounterOpts) = {Name: "RangeReadRowsPerRequest",
- Ranges: { Value: 0 Name: "0"},
- Ranges: { Value: 1 Name: "1"},
- Ranges: { Value: 2 Name: "10"},
- Ranges: { Value: 11 Name: "100"}
- Ranges: { Value: 101 Name: "1000"}
- Ranges: { Value: 1001 Name: "10000"}
- Ranges: { Value: 10001 Name: "100000"}
- Ranges: { Value: 100001 Name: "inf"}
- }];
+
+ COUNTER_SELECT_ROWS_PER_REQUEST = 11 [(CounterOpts) = {Name: "SelectRowsPerRequest",
+ Ranges: { Value: 0 Name: "0"},
+ Ranges: { Value: 1 Name: "1"},
+ Ranges: { Value: 2 Name: "10"},
+ Ranges: { Value: 11 Name: "100"}
+ Ranges: { Value: 101 Name: "1000"}
+ Ranges: { Value: 1001 Name: "10000"}
+ Ranges: { Value: 10001 Name: "100000"}
+ Ranges: { Value: 100001 Name: "inf"}
+ }];
+
+ COUNTER_RANGE_READ_ROWS_PER_REQUEST = 12 [(CounterOpts) = {Name: "RangeReadRowsPerRequest",
+ Ranges: { Value: 0 Name: "0"},
+ Ranges: { Value: 1 Name: "1"},
+ Ranges: { Value: 2 Name: "10"},
+ Ranges: { Value: 11 Name: "100"}
+ Ranges: { Value: 101 Name: "1000"}
+ Ranges: { Value: 1001 Name: "10000"}
+ Ranges: { Value: 10001 Name: "100000"}
+ Ranges: { Value: 100001 Name: "inf"}
+ }];
COUNTER_PLAN_QUEUE_LATENCY_MS = 13 [(CounterOpts) = {Name: "PlanQueueLatencyMs",
Ranges: { Value: 0 Name: "0"},
@@ -354,37 +354,37 @@ enum ETxTypes {
TXTYPE_PROGRESS_START = 5 [(TxTypeOpts) = {Name: "TxProgressStart"}];
TXTYPE_PROGRESS_WAIT = 6 [(TxTypeOpts) = {Name: "TxProgressWait"}];
TXTYPE_PROGRESS_COMPLETE = 7 [(TxTypeOpts) = {Name: "TxProgressComplete"}];
- TXTYPE_GET_STARD_STATE = 8 [(TxTypeOpts) = {Name: "TxGetShardState"}];
- TXTYPE_PROGRESS_RESEND_RS = 9 [(TxTypeOpts) = {Name: "TxProgressResendRS"}];
- TXTYPE_CANCEL_TX_PROPOSAL = 10 [(TxTypeOpts) = {Name: "TxCancelTxProposal"}];
- TXTYPE_CANCEL_BACKUP = 11 [(TxTypeOpts) = {Name: "TxCancelBackup"}];
- TXTYPE_MONITORING = 12 [(TxTypeOpts) = {Name: "TxMonitoring"}];
- TXTYPE_SCHEMA_CHANGED = 13 [(TxTypeOpts) = {Name: "TxSchemaChanged"}];
- TXTYPE_INIT_SPLIT_MERGE_DESTINATION = 14 [(TxTypeOpts) = {Name: "TxInitSplitMergeDestination"}];
- TXTYPE_SPLIT_TRANSFER_SNAPSHOT = 15 [(TxTypeOpts) = {Name: "TxSplitTransferSnapshot"}];
- TXTYPE_GET_TABLE_STATS = 16 [(TxTypeOpts) = {Name: "TxGetTableStats"}];
- TXTYPE_INITIATE_BORROWED_PARTS_RETURN = 17 [(TxTypeOpts) = {Name: "TxInitiateBorrowedPartsReturn"}];
- TXTYPE_RETURN_BORROWED_PART = 18 [(TxTypeOpts) = {Name: "TxReturnBorrowedPart"}];
- TXTYPE_RETURN_BORROWED_PART_ACK = 19 [(TxTypeOpts) = {Name: "TxReturnBorrowedPartAck"}];
- TXTYPE_GO_OFFLINE = 20 [(TxTypeOpts) = {Name: "TxGoOffline"}];
- TXTYPE_SPLIT = 21 [(TxTypeOpts) = {Name: "TxSplit"}];
- TXTYPE_START_SPLIT = 22 [(TxTypeOpts) = {Name: "TxStartSplit"}];
- TXTYPE_SPLIT_SNASHOT_COMPLETE = 23 [(TxTypeOpts) = {Name: "TxSplitSnapshotComplete"}];
- TXTYPE_SPLIT_TRANSFER_SNAPSHOT_ACK = 24 [(TxTypeOpts) = {Name: "TxTransferSnapshotAck"}];
- TXTYPE_SPLIT_PARTITIONING_CHANGED = 25 [(TxTypeOpts) = {Name: "TxSplitPartitioningChanged"}];
- TXTYPE_INIT_SCHEMA = 26 [(TxTypeOpts) = {Name: "TxInitSchema"}];
- TXTYPE_COPY_TABLE_SNAPSHOT_COMPLETE = 27 [(TxTypeOpts) = {Name: "TxCopyTableSnapshotComplete"}];
- TXTYPE_FULL_SCAN_COMPLETE = 28 [(TxTypeOpts) = {Name: "TxFullScanComplete"}];
+ TXTYPE_GET_STARD_STATE = 8 [(TxTypeOpts) = {Name: "TxGetShardState"}];
+ TXTYPE_PROGRESS_RESEND_RS = 9 [(TxTypeOpts) = {Name: "TxProgressResendRS"}];
+ TXTYPE_CANCEL_TX_PROPOSAL = 10 [(TxTypeOpts) = {Name: "TxCancelTxProposal"}];
+ TXTYPE_CANCEL_BACKUP = 11 [(TxTypeOpts) = {Name: "TxCancelBackup"}];
+ TXTYPE_MONITORING = 12 [(TxTypeOpts) = {Name: "TxMonitoring"}];
+ TXTYPE_SCHEMA_CHANGED = 13 [(TxTypeOpts) = {Name: "TxSchemaChanged"}];
+ TXTYPE_INIT_SPLIT_MERGE_DESTINATION = 14 [(TxTypeOpts) = {Name: "TxInitSplitMergeDestination"}];
+ TXTYPE_SPLIT_TRANSFER_SNAPSHOT = 15 [(TxTypeOpts) = {Name: "TxSplitTransferSnapshot"}];
+ TXTYPE_GET_TABLE_STATS = 16 [(TxTypeOpts) = {Name: "TxGetTableStats"}];
+ TXTYPE_INITIATE_BORROWED_PARTS_RETURN = 17 [(TxTypeOpts) = {Name: "TxInitiateBorrowedPartsReturn"}];
+ TXTYPE_RETURN_BORROWED_PART = 18 [(TxTypeOpts) = {Name: "TxReturnBorrowedPart"}];
+ TXTYPE_RETURN_BORROWED_PART_ACK = 19 [(TxTypeOpts) = {Name: "TxReturnBorrowedPartAck"}];
+ TXTYPE_GO_OFFLINE = 20 [(TxTypeOpts) = {Name: "TxGoOffline"}];
+ TXTYPE_SPLIT = 21 [(TxTypeOpts) = {Name: "TxSplit"}];
+ TXTYPE_START_SPLIT = 22 [(TxTypeOpts) = {Name: "TxStartSplit"}];
+ TXTYPE_SPLIT_SNASHOT_COMPLETE = 23 [(TxTypeOpts) = {Name: "TxSplitSnapshotComplete"}];
+ TXTYPE_SPLIT_TRANSFER_SNAPSHOT_ACK = 24 [(TxTypeOpts) = {Name: "TxTransferSnapshotAck"}];
+ TXTYPE_SPLIT_PARTITIONING_CHANGED = 25 [(TxTypeOpts) = {Name: "TxSplitPartitioningChanged"}];
+ TXTYPE_INIT_SCHEMA = 26 [(TxTypeOpts) = {Name: "TxInitSchema"}];
+ TXTYPE_COPY_TABLE_SNAPSHOT_COMPLETE = 27 [(TxTypeOpts) = {Name: "TxCopyTableSnapshotComplete"}];
+ TXTYPE_FULL_SCAN_COMPLETE = 28 [(TxTypeOpts) = {Name: "TxFullScanComplete"}];
TXTYPE_STREAM_RESPONSE_QUOTA = 29 [(TxTypeOpts) = {Name: "TxStreamResponseQuota"}];
TXTYPE_UNDELIVERED = 30 [(TxTypeOpts) = {Name: "TxUndelivered"}];
TXTYPE_DISCONNECTED = 31 [(TxTypeOpts) = {Name: "TxDisconnected"}];
- TXTYPE_S3_LISTING = 32 [(TxTypeOpts) = {Name: "TxS3Listing"}];
- TXTYPE_INITIATE_STATS_UPDATE = 33 [(TxTypeOpts) = {Name: "TTxInitiateStatsUpdate"}];
+ TXTYPE_S3_LISTING = 32 [(TxTypeOpts) = {Name: "TxS3Listing"}];
+ TXTYPE_INITIATE_STATS_UPDATE = 33 [(TxTypeOpts) = {Name: "TTxInitiateStatsUpdate"}];
TXTYPE_CHECK_IN_READ_SETS = 34 [(TxTypeOpts) = {Name: "TTxCheckInReadSets"}];
TXTYPE_REMOVE_OLD_IN_READ_SETS = 35 [(TxTypeOpts) = {Name: "TTxRemoveOldInReadSets"}];
- TXTYPE_UPLOAD_ROWS = 36 [(TxTypeOpts) = {Name: "TTxUploadRows"}];
+ TXTYPE_UPLOAD_ROWS = 36 [(TxTypeOpts) = {Name: "TTxUploadRows"}];
TXTYPE_STORE_TABLE_PATH = 37 [(TxTypeOpts) = {Name: "TTxStoreTablePath"}];
- TXTYPE_READ_COLUMNS = 38 [(TxTypeOpts) = {Name: "TTxReadColumns"}];
+ TXTYPE_READ_COLUMNS = 38 [(TxTypeOpts) = {Name: "TTxReadColumns"}];
TXTYPE_PROPOSE = 39 [(TxTypeOpts) = {Name: "TxPropose"}];
TXTYPE_STORE_SCAN_STATE = 40 [(TxTypeOpts) = {Name: "TxStoreScanState"}];
TXTYPE_INIT_SCHEMA_DEFAULTS = 41 [(TxTypeOpts) = {Name: "TxInitSchemaDefaults"}];
@@ -395,7 +395,7 @@ enum ETxTypes {
TXTYPE_GET_S3_UPLOAD_ID = 46 [(TxTypeOpts) = {Name: "TxGetS3UploadId"}];
TXTYPE_STORE_S3_UPLOAD_ID = 47 [(TxTypeOpts) = {Name: "TxStoreS3UploadId"}];
TXTYPE_ERASE_ROWS = 48 [(TxTypeOpts) = {Name: "TxEraseRows"}];
- TXTYPE_RELEASE_SNAPSHOT_REFERENCE = 49 [(TxTypeOpts) = {Name: "TxReleaseSnaphotReference"}];
+ TXTYPE_RELEASE_SNAPSHOT_REFERENCE = 49 [(TxTypeOpts) = {Name: "TxReleaseSnaphotReference"}];
TXTYPE_STOP_GUARD = 50 [(TxTypeOpts) = {Name: "TxStopGuard"}];
TXTYPE_GET_S3_DOWNLOAD_INFO = 51 [(TxTypeOpts) = {Name: "TxGetS3DownloadInfo"}];
TXTYPE_STORE_S3_DOWNLOAD_INFO = 52 [(TxTypeOpts) = {Name: "TxStoreS3DownloadInfo"}];
diff --git a/ydb/core/protos/counters_schemeshard.proto b/ydb/core/protos/counters_schemeshard.proto
index d842f37548c..a839915c98f 100644
--- a/ydb/core/protos/counters_schemeshard.proto
+++ b/ydb/core/protos/counters_schemeshard.proto
@@ -9,13 +9,13 @@ option (TabletTypeName) = "SchemeShard"; // Used as prefix for all counters
enum ESimpleCounters {
COUNTER_SIMPLE_IGNORE = 0;
- COUNTER_RESPONSE_TIME_USEC = 1 [(CounterOpts) = {Name: "ResponseTimeMicrosec"}];
- COUNTER_DIR_COUNT = 2 [(CounterOpts) = {Name: "Directories"}];
- COUNTER_TABLE_COUNT = 3 [(CounterOpts) = {Name: "Tables"}];
- COUNTER_PQ_GROUP_COUNT = 4 [(CounterOpts) = {Name: "PqGroups"}];
- COUNTER_TABLE_SHARD_ACTIVE_COUNT = 5 [(CounterOpts) = {Name: "TableShardsActive"}];
- COUNTER_TABLE_SHARD_INACTIVE_COUNT = 6 [(CounterOpts) = {Name: "TableShardsInactive"}];
- COUNTER_PQ_SHARD_COUNT = 7 [(CounterOpts) = {Name: "PqGroupShards"}];
+ COUNTER_RESPONSE_TIME_USEC = 1 [(CounterOpts) = {Name: "ResponseTimeMicrosec"}];
+ COUNTER_DIR_COUNT = 2 [(CounterOpts) = {Name: "Directories"}];
+ COUNTER_TABLE_COUNT = 3 [(CounterOpts) = {Name: "Tables"}];
+ COUNTER_PQ_GROUP_COUNT = 4 [(CounterOpts) = {Name: "PqGroups"}];
+ COUNTER_TABLE_SHARD_ACTIVE_COUNT = 5 [(CounterOpts) = {Name: "TableShardsActive"}];
+ COUNTER_TABLE_SHARD_INACTIVE_COUNT = 6 [(CounterOpts) = {Name: "TableShardsInactive"}];
+ COUNTER_PQ_SHARD_COUNT = 7 [(CounterOpts) = {Name: "PqGroupShards"}];
COUNTER_PQ_RB_SHARD_COUNT = 8 [(CounterOpts) = {Name: "PqReadBalancerShards"}];
COUNTER_SUB_DOMAIN_COUNT = 9 [(CounterOpts) = {Name: "SubDomains"}];
COUNTER_SUB_DOMAIN_COORDINATOR_COUNT = 10 [(CounterOpts) = {Name: "SubDomainsCoordinators"}];
@@ -25,28 +25,28 @@ enum ESimpleCounters {
COUNTER_BLOCKSTORE_VOLUME_COUNT = 14 [(CounterOpts) = {Name: "BlockStoreVolumes"}];
COUNTER_BLOCKSTORE_VOLUME_SHARD_COUNT = 15 [(CounterOpts) = {Name: "BlockStoreVolumeShards"}];
COUNTER_BLOCKSTORE_PARTITION_SHARD_COUNT = 16 [(CounterOpts) = {Name: "BlockStorePartitionShards"}];
-
- COUNTER_IN_FLIGHT_OPS_TxInvalid = 17 [(CounterOpts) = {Name: "InFlightOps/Invalid"}];
- COUNTER_IN_FLIGHT_OPS_TxMkDir = 18 [(CounterOpts) = {Name: "InFlightOps/MkDir"}];
- COUNTER_IN_FLIGHT_OPS_TxCreateTable = 19 [(CounterOpts) = {Name: "InFlightOps/CreateTable"}];
- COUNTER_IN_FLIGHT_OPS_TxCreatePQGroup = 20 [(CounterOpts) = {Name: "InFlightOps/CreatePQGroup"}];
- COUNTER_IN_FLIGHT_OPS_TxAlterPQGroup = 21 [(CounterOpts) = {Name: "InFlightOps/AlterPQGroup"}];
- COUNTER_IN_FLIGHT_OPS_TxAlterTable = 22 [(CounterOpts) = {Name: "InFlightOps/AlterTable"}];
- COUNTER_IN_FLIGHT_OPS_TxDropTable = 23 [(CounterOpts) = {Name: "InFlightOps/DropTable"}];
- COUNTER_IN_FLIGHT_OPS_TxDropPQGroup = 24 [(CounterOpts) = {Name: "InFlightOps/DropPQGroup"}];
- COUNTER_IN_FLIGHT_OPS_TxModifyACL = 25 [(CounterOpts) = {Name: "InFlightOps/ModifyACL"}];
- COUNTER_IN_FLIGHT_OPS_TxRmDir = 26 [(CounterOpts) = {Name: "InFlightOps/RmDir"}];
- COUNTER_IN_FLIGHT_OPS_TxCopyTable = 27 [(CounterOpts) = {Name: "InFlightOps/CopyTable"}];
- COUNTER_IN_FLIGHT_OPS_TxSplitTablePartition = 28 [(CounterOpts) = {Name: "InFlightOps/SplitTablePartition"}];
- COUNTER_IN_FLIGHT_OPS_TxBackup = 29 [(CounterOpts) = {Name: "InFlightOps/Backup"}];
- COUNTER_IN_FLIGHT_OPS_TxCreateSubDomain = 30 [(CounterOpts) = {Name: "InFlightOps/CreateSubDomain"}];
- COUNTER_IN_FLIGHT_OPS_TxDropSubDomain = 31 [(CounterOpts) = {Name: "InFlightOps/DropSubDomain"}];
- COUNTER_IN_FLIGHT_OPS_TxCreateRtmrVolume = 32 [(CounterOpts) = {Name: "InFlightOps/CreateRtmrVolume"}];
- COUNTER_IN_FLIGHT_OPS_TxCreateBlockStoreVolume = 33 [(CounterOpts) = {Name: "InFlightOps/CreateBlockStoreVolume"}];
- COUNTER_IN_FLIGHT_OPS_TxAlterBlockStoreVolume = 34 [(CounterOpts) = {Name: "InFlightOps/AlterBlockStoreVolume"}];
- COUNTER_IN_FLIGHT_OPS_TxAssignBlockStoreVolume = 35 [(CounterOpts) = {Name: "InFlightOps/AssignBlockStoreVolume"}];
- COUNTER_IN_FLIGHT_OPS_TxDropBlockStoreVolume = 36 [(CounterOpts) = {Name: "InFlightOps/DropBlockStoreVolume"}];
- COUNTER_IN_FLIGHT_OPS_UNKNOWN = 37 [(CounterOpts) = {Name: "InFlightOps/UNKNOWN"}];
+
+ COUNTER_IN_FLIGHT_OPS_TxInvalid = 17 [(CounterOpts) = {Name: "InFlightOps/Invalid"}];
+ COUNTER_IN_FLIGHT_OPS_TxMkDir = 18 [(CounterOpts) = {Name: "InFlightOps/MkDir"}];
+ COUNTER_IN_FLIGHT_OPS_TxCreateTable = 19 [(CounterOpts) = {Name: "InFlightOps/CreateTable"}];
+ COUNTER_IN_FLIGHT_OPS_TxCreatePQGroup = 20 [(CounterOpts) = {Name: "InFlightOps/CreatePQGroup"}];
+ COUNTER_IN_FLIGHT_OPS_TxAlterPQGroup = 21 [(CounterOpts) = {Name: "InFlightOps/AlterPQGroup"}];
+ COUNTER_IN_FLIGHT_OPS_TxAlterTable = 22 [(CounterOpts) = {Name: "InFlightOps/AlterTable"}];
+ COUNTER_IN_FLIGHT_OPS_TxDropTable = 23 [(CounterOpts) = {Name: "InFlightOps/DropTable"}];
+ COUNTER_IN_FLIGHT_OPS_TxDropPQGroup = 24 [(CounterOpts) = {Name: "InFlightOps/DropPQGroup"}];
+ COUNTER_IN_FLIGHT_OPS_TxModifyACL = 25 [(CounterOpts) = {Name: "InFlightOps/ModifyACL"}];
+ COUNTER_IN_FLIGHT_OPS_TxRmDir = 26 [(CounterOpts) = {Name: "InFlightOps/RmDir"}];
+ COUNTER_IN_FLIGHT_OPS_TxCopyTable = 27 [(CounterOpts) = {Name: "InFlightOps/CopyTable"}];
+ COUNTER_IN_FLIGHT_OPS_TxSplitTablePartition = 28 [(CounterOpts) = {Name: "InFlightOps/SplitTablePartition"}];
+ COUNTER_IN_FLIGHT_OPS_TxBackup = 29 [(CounterOpts) = {Name: "InFlightOps/Backup"}];
+ COUNTER_IN_FLIGHT_OPS_TxCreateSubDomain = 30 [(CounterOpts) = {Name: "InFlightOps/CreateSubDomain"}];
+ COUNTER_IN_FLIGHT_OPS_TxDropSubDomain = 31 [(CounterOpts) = {Name: "InFlightOps/DropSubDomain"}];
+ COUNTER_IN_FLIGHT_OPS_TxCreateRtmrVolume = 32 [(CounterOpts) = {Name: "InFlightOps/CreateRtmrVolume"}];
+ COUNTER_IN_FLIGHT_OPS_TxCreateBlockStoreVolume = 33 [(CounterOpts) = {Name: "InFlightOps/CreateBlockStoreVolume"}];
+ COUNTER_IN_FLIGHT_OPS_TxAlterBlockStoreVolume = 34 [(CounterOpts) = {Name: "InFlightOps/AlterBlockStoreVolume"}];
+ COUNTER_IN_FLIGHT_OPS_TxAssignBlockStoreVolume = 35 [(CounterOpts) = {Name: "InFlightOps/AssignBlockStoreVolume"}];
+ COUNTER_IN_FLIGHT_OPS_TxDropBlockStoreVolume = 36 [(CounterOpts) = {Name: "InFlightOps/DropBlockStoreVolume"}];
+ COUNTER_IN_FLIGHT_OPS_UNKNOWN = 37 [(CounterOpts) = {Name: "InFlightOps/UNKNOWN"}];
COUNTER_KESUS_COUNT = 38 [(CounterOpts) = {Name: "Kesus"}];
COUNTER_KESUS_SHARD_COUNT = 39 [(CounterOpts) = {Name: "KesusShards"}];
@@ -152,28 +152,28 @@ enum ESimpleCounters {
enum ECumulativeCounters {
COUNTER_CUMULATIVE_IGNORE = 0;
-
- COUNTER_FINISHED_OPS_TxInvalid = 1 [(CounterOpts) = {Name: "FinishedOps/Invalid"}];
- COUNTER_FINISHED_OPS_TxMkDir = 2 [(CounterOpts) = {Name: "FinishedOps/MkDir"}];
- COUNTER_FINISHED_OPS_TxCreateTable = 3 [(CounterOpts) = {Name: "FinishedOps/CreateTable"}];
- COUNTER_FINISHED_OPS_TxCreatePQGroup = 4 [(CounterOpts) = {Name: "FinishedOps/CreatePQGroup"}];
- COUNTER_FINISHED_OPS_TxAlterPQGroup = 5 [(CounterOpts) = {Name: "FinishedOps/AlterPQGroup"}];
- COUNTER_FINISHED_OPS_TxAlterTable = 6 [(CounterOpts) = {Name: "FinishedOps/AlterTable"}];
- COUNTER_FINISHED_OPS_TxDropTable = 7 [(CounterOpts) = {Name: "FinishedOps/DropTable"}];
- COUNTER_FINISHED_OPS_TxDropPQGroup = 8 [(CounterOpts) = {Name: "FinishedOps/DropPQGroup"}];
- COUNTER_FINISHED_OPS_TxModifyACL = 9 [(CounterOpts) = {Name: "FinishedOps/ModifyACL"}];
- COUNTER_FINISHED_OPS_TxRmDir = 10 [(CounterOpts) = {Name: "FinishedOps/RmDir"}];
- COUNTER_FINISHED_OPS_TxCopyTable = 11 [(CounterOpts) = {Name: "FinishedOps/CopyTable"}];
- COUNTER_FINISHED_OPS_TxSplitTablePartition = 12 [(CounterOpts) = {Name: "FinishedOps/SplitTablePartition"}];
- COUNTER_FINISHED_OPS_TxBackup = 13 [(CounterOpts) = {Name: "FinishedOps/Backup"}];
- COUNTER_FINISHED_OPS_TxCreateSubDomain = 14 [(CounterOpts) = {Name: "FinishedOps/CreateSubDomain"}];
- COUNTER_FINISHED_OPS_TxDropSubDomain = 15 [(CounterOpts) = {Name: "FinishedOps/DropSubDomain"}];
- COUNTER_FINISHED_OPS_TxCreateRtmrVolume = 16 [(CounterOpts) = {Name: "FinishedOps/CreateRtmrVolume"}];
- COUNTER_FINISHED_OPS_TxCreateBlockStoreVolume = 17 [(CounterOpts) = {Name: "FinishedOps/CreateBlockStoreVolume"}];
- COUNTER_FINISHED_OPS_TxAlterBlockStoreVolume = 18 [(CounterOpts) = {Name: "FinishedOps/AlterBlockStoreVolume"}];
- COUNTER_FINISHED_OPS_TxAssignBlockStoreVolume = 19 [(CounterOpts) = {Name: "FinishedOps/AssignBlockStoreVolume"}];
- COUNTER_FINISHED_OPS_TxDropBlockStoreVolume = 20 [(CounterOpts) = {Name: "FinishedOps/DropBlockStoreVolume"}];
- COUNTER_FINISHED_OPS_UNKNOWN = 21 [(CounterOpts) = {Name: "FinishedOps/UNKNOWN"}];
+
+ COUNTER_FINISHED_OPS_TxInvalid = 1 [(CounterOpts) = {Name: "FinishedOps/Invalid"}];
+ COUNTER_FINISHED_OPS_TxMkDir = 2 [(CounterOpts) = {Name: "FinishedOps/MkDir"}];
+ COUNTER_FINISHED_OPS_TxCreateTable = 3 [(CounterOpts) = {Name: "FinishedOps/CreateTable"}];
+ COUNTER_FINISHED_OPS_TxCreatePQGroup = 4 [(CounterOpts) = {Name: "FinishedOps/CreatePQGroup"}];
+ COUNTER_FINISHED_OPS_TxAlterPQGroup = 5 [(CounterOpts) = {Name: "FinishedOps/AlterPQGroup"}];
+ COUNTER_FINISHED_OPS_TxAlterTable = 6 [(CounterOpts) = {Name: "FinishedOps/AlterTable"}];
+ COUNTER_FINISHED_OPS_TxDropTable = 7 [(CounterOpts) = {Name: "FinishedOps/DropTable"}];
+ COUNTER_FINISHED_OPS_TxDropPQGroup = 8 [(CounterOpts) = {Name: "FinishedOps/DropPQGroup"}];
+ COUNTER_FINISHED_OPS_TxModifyACL = 9 [(CounterOpts) = {Name: "FinishedOps/ModifyACL"}];
+ COUNTER_FINISHED_OPS_TxRmDir = 10 [(CounterOpts) = {Name: "FinishedOps/RmDir"}];
+ COUNTER_FINISHED_OPS_TxCopyTable = 11 [(CounterOpts) = {Name: "FinishedOps/CopyTable"}];
+ COUNTER_FINISHED_OPS_TxSplitTablePartition = 12 [(CounterOpts) = {Name: "FinishedOps/SplitTablePartition"}];
+ COUNTER_FINISHED_OPS_TxBackup = 13 [(CounterOpts) = {Name: "FinishedOps/Backup"}];
+ COUNTER_FINISHED_OPS_TxCreateSubDomain = 14 [(CounterOpts) = {Name: "FinishedOps/CreateSubDomain"}];
+ COUNTER_FINISHED_OPS_TxDropSubDomain = 15 [(CounterOpts) = {Name: "FinishedOps/DropSubDomain"}];
+ COUNTER_FINISHED_OPS_TxCreateRtmrVolume = 16 [(CounterOpts) = {Name: "FinishedOps/CreateRtmrVolume"}];
+ COUNTER_FINISHED_OPS_TxCreateBlockStoreVolume = 17 [(CounterOpts) = {Name: "FinishedOps/CreateBlockStoreVolume"}];
+ COUNTER_FINISHED_OPS_TxAlterBlockStoreVolume = 18 [(CounterOpts) = {Name: "FinishedOps/AlterBlockStoreVolume"}];
+ COUNTER_FINISHED_OPS_TxAssignBlockStoreVolume = 19 [(CounterOpts) = {Name: "FinishedOps/AssignBlockStoreVolume"}];
+ COUNTER_FINISHED_OPS_TxDropBlockStoreVolume = 20 [(CounterOpts) = {Name: "FinishedOps/DropBlockStoreVolume"}];
+ COUNTER_FINISHED_OPS_UNKNOWN = 21 [(CounterOpts) = {Name: "FinishedOps/UNKNOWN"}];
COUNTER_FINISHED_OPS_TxCreateKesus = 22 [(CounterOpts) = {Name: "FinishedOps/CreateKesus"}];
COUNTER_FINISHED_OPS_TxDropKesus = 23 [(CounterOpts) = {Name: "FinishedOps/DropKesus"}];
diff --git a/ydb/core/protos/flat_scheme_op.proto b/ydb/core/protos/flat_scheme_op.proto
index ac190339992..33e598c1c25 100644
--- a/ydb/core/protos/flat_scheme_op.proto
+++ b/ydb/core/protos/flat_scheme_op.proto
@@ -15,12 +15,12 @@ import "ydb/library/mkql_proto/protos/minikql.proto";
package NKikimrSchemeOp;
-option java_package = "ru.yandex.kikimr.proto";
-
-message TMkDir {
- optional string Name = 1;
-}
-
+option java_package = "ru.yandex.kikimr.proto";
+
+message TMkDir {
+ optional string Name = 1;
+}
+
enum EDropWaitPolicy {
EDropFailOnChanges = 0;
EDropAbortChanges = 1; //depricated
@@ -53,8 +53,8 @@ enum EColumnStorage {
ColumnStorage1Ext2 = 3;
ColumnStorage2Ext1 = 4;
ColumnStorage2Ext2 = 5;
- ColumnStorage1Med2Ext2 = 6;
- ColumnStorage2Med2Ext2 = 7;
+ ColumnStorage1Med2Ext2 = 6;
+ ColumnStorage2Med2Ext2 = 7;
ColumnStorageTest_1_2_1k = 999;
}
@@ -70,19 +70,19 @@ enum EMvccState {
MvccDisabled = 2;
}
-message TColumnDescription {
- optional string Name = 1;
- optional string Type = 2;
- optional uint32 TypeId = 3;
- optional uint32 Id = 4;
+message TColumnDescription {
+ optional string Name = 1;
+ optional string Type = 2;
+ optional uint32 TypeId = 3;
+ optional uint32 Id = 4;
optional uint32 Family = 5; // On default place column to default(0) family
optional string FamilyName = 6; // set Family by name (0 - "default")
oneof DefaultValue {
string DefaultFromSequence = 7; // Path to sequence for default values
}
optional bool NotNull = 8;
-}
-
+}
+
message TStorageSettings {
optional string PreferredPoolKind = 1;
optional bool AllowOtherKinds = 2 [default = true];
@@ -115,7 +115,7 @@ enum ECompactionStrategy {
CompactionStrategySharded = 2;
}
-message TCompactionPolicy {
+message TCompactionPolicy {
message TBackgroundPolicy {
// How much (in %) of forced compaction criteria should be met to submit background task.
optional uint32 Threshold = 1 [default = 101]; // no background compaction by default
@@ -130,14 +130,14 @@ message TCompactionPolicy {
optional string ResourceBrokerTask = 5;
}
- message TGenerationPolicy {
- optional uint32 GenerationId = 1;
- optional uint64 SizeToCompact = 2;
- optional uint32 CountToCompact = 3;
- optional uint32 ForceCountToCompact = 4; // OR one of force limits happend
- optional uint64 ForceSizeToCompact = 5;
+ message TGenerationPolicy {
+ optional uint32 GenerationId = 1;
+ optional uint64 SizeToCompact = 2;
+ optional uint32 CountToCompact = 3;
+ optional uint32 ForceCountToCompact = 4; // OR one of force limits happend
+ optional uint64 ForceSizeToCompact = 5;
optional uint32 CompactionBrokerQueue = 6; // DEPRECATED
- optional bool KeepInCache = 7;
+ optional bool KeepInCache = 7;
optional TBackgroundPolicy BackgroundCompactionPolicy = 8;
optional string ResourceBrokerTask = 9;
optional uint32 ExtraCompactionPercent = 10;
@@ -145,8 +145,8 @@ message TCompactionPolicy {
optional uint32 ExtraCompactionExpPercent = 12;
optional uint64 ExtraCompactionExpMaxSize = 13;
optional uint64 UpliftPartSize = 14;
- }
-
+ }
+
message TShardPolicy {
// Adjacent shards smaller than this will be merged
optional uint64 MinShardSize = 1 [default = 33554432];
@@ -191,14 +191,14 @@ message TCompactionPolicy {
optional uint64 MinSliceSizeToReuse = 14 [default = 524288];
}
- optional uint64 InMemSizeToSnapshot = 1;
- optional uint32 InMemStepsToSnapshot = 2; // snapshot inmem state when size AND steps from last snapshot passed
- optional uint32 InMemForceStepsToSnapshot = 3; // OR steps passed
- optional uint64 InMemForceSizeToSnapshot = 4; // OR size reached
+ optional uint64 InMemSizeToSnapshot = 1;
+ optional uint32 InMemStepsToSnapshot = 2; // snapshot inmem state when size AND steps from last snapshot passed
+ optional uint32 InMemForceStepsToSnapshot = 3; // OR steps passed
+ optional uint64 InMemForceSizeToSnapshot = 4; // OR size reached
optional uint32 InMemCompactionBrokerQueue = 5 [default = 0]; // DEPRECATED
optional uint64 ReadAheadHiThreshold = 6 [default = 67108864];
optional uint64 ReadAheadLoThreshold = 7 [default = 16777216];
- optional uint32 MinDataPageSize = 8 [default = 7168]; // 7KB is smallest optimal for 512 byte sectors
+ optional uint32 MinDataPageSize = 8 [default = 7168]; // 7KB is smallest optimal for 512 byte sectors
optional uint32 SnapBrokerQueue = 9 [default = 0]; // DEPRECATED
optional uint32 BackupBrokerQueue = 11 [default = 1]; // DEPRECATED
optional uint32 DefaultTaskPriority = 12 [default = 5];
@@ -212,33 +212,33 @@ message TCompactionPolicy {
optional ECompactionStrategy CompactionStrategy = 20 [default = CompactionStrategyUnset];
optional TShardPolicy ShardPolicy = 21;
optional bool KeepEraseMarkers = 22;
-
- repeated TGenerationPolicy Generation = 10;
-}
-
-message TFastSplitSettings {
- optional uint64 SizeThreshold = 1;
- optional uint64 RowCountThreshold = 2;
- optional uint32 CpuPercentageThreshold = 3;
-}
-
-message TSplitByLoadSettings {
- optional bool Enabled = 1;
- optional uint32 CpuPercentageThreshold = 2;
- // TODO: optional uint32 KeySampleSize = 3;
- // TODO: optional uint32 KeySampleCollectTimeSec = 4;
-}
-
-message TPartitioningPolicy {
- optional uint64 SizeToSplit = 1; // Partition gets split when this threshold is exceeded
-
- optional uint32 MinPartitionsCount = 2;
- optional uint32 MaxPartitionsCount = 3;
-
- optional TFastSplitSettings FastSplitSettings = 4;
- optional TSplitByLoadSettings SplitByLoadSettings = 5;
-}
-
+
+ repeated TGenerationPolicy Generation = 10;
+}
+
+message TFastSplitSettings {
+ optional uint64 SizeThreshold = 1;
+ optional uint64 RowCountThreshold = 2;
+ optional uint32 CpuPercentageThreshold = 3;
+}
+
+message TSplitByLoadSettings {
+ optional bool Enabled = 1;
+ optional uint32 CpuPercentageThreshold = 2;
+ // TODO: optional uint32 KeySampleSize = 3;
+ // TODO: optional uint32 KeySampleCollectTimeSec = 4;
+}
+
+message TPartitioningPolicy {
+ optional uint64 SizeToSplit = 1; // Partition gets split when this threshold is exceeded
+
+ optional uint32 MinPartitionsCount = 2;
+ optional uint32 MaxPartitionsCount = 3;
+
+ optional TFastSplitSettings FastSplitSettings = 4;
+ optional TSplitByLoadSettings SplitByLoadSettings = 5;
+}
+
message TPipelineConfig {
optional uint32 NumActiveTx = 1 [default = 8];
optional uint32 DataTxCacheSize = 2;
@@ -247,23 +247,23 @@ message TPipelineConfig {
optional bool EnableSoftUpdates = 5;
}
-message TPartitionConfig {
+message TPartitionConfig {
optional string NamedCompactionPolicy = 1; // One of the predefined policies
optional TCompactionPolicy CompactionPolicy = 2; // Customized policy
optional uint64 FollowerCount = 3;
optional uint64 ExecutorCacheSize = 4; // Cache size for the whole tablet including all user and system tables
optional bool AllowFollowerPromotion = 5 [default = true]; // if true followers can upgrade to leader, if false followers only handle reads
- optional uint64 TxReadSizeLimit = 6; // Maximum size in bytes that is allowed to be read by a single Tx
+ optional uint64 TxReadSizeLimit = 6; // Maximum size in bytes that is allowed to be read by a single Tx
//optional bool CrossDataCenterFollowers = 7; // deprecated -> CrossDataCenterFollowerCount
optional uint32 CrossDataCenterFollowerCount = 8; // deprecated -> FollowerGroups
- optional uint32 ChannelProfileId = 9; // for configuring erasure and disk categories
- optional TPartitioningPolicy PartitioningPolicy = 10;
+ optional uint32 ChannelProfileId = 9; // for configuring erasure and disk categories
+ optional TPartitioningPolicy PartitioningPolicy = 10;
optional TPipelineConfig PipelineConfig = 11;
- repeated TFamilyDescription ColumnFamilies = 12;
+ repeated TFamilyDescription ColumnFamilies = 12;
optional string ResourceProfile = 13;
optional bool DisableStatisticsCalculation = 14; // KIKIMR-3861 hotfix
optional bool EnableFilterByKey = 15 [default = false]; // Build and use per-part bloom filter for fast key non-existence check
- optional bool ExecutorFastLogPolicy = 16 [default = true]; // Commit log faster at the expense of bandwidth for cross-DC
+ optional bool ExecutorFastLogPolicy = 16 [default = true]; // Commit log faster at the expense of bandwidth for cross-DC
repeated NKikimrStorageSettings.TStorageRoom StorageRooms = 17;
optional bool EnableEraseCache = 18 [default = true]; // Use erase cache for faster iteration over erased rows
optional uint32 EraseCacheMinRows = 19; // Minimum number of erased rows worth caching (default 16)
@@ -273,13 +273,13 @@ message TPartitionConfig {
repeated NKikimrHive.TFollowerGroup FollowerGroups = 23;
reserved 24; // EMvccState MvccState = 24; no longer used
optional uint64 KeepSnapshotTimeout = 25; // milliseconds
-}
-
-message TSplitBoundary {
- optional NKikimrMiniKQL.TValue KeyPrefix = 1; // A tuple representing full key or key prefix
- optional bytes SerializedKeyPrefix = 2; // Or same as above but already serialized
-}
-
+}
+
+message TSplitBoundary {
+ optional NKikimrMiniKQL.TValue KeyPrefix = 1; // A tuple representing full key or key prefix
+ optional bytes SerializedKeyPrefix = 2; // Or same as above but already serialized
+}
+
message TShardIdx {
optional uint64 OwnerId = 1;
optional uint64 LocalId = 2;
@@ -319,27 +319,27 @@ message TTTLSettings {
}
}
-message TTableDescription {
- optional string Name = 1;
+message TTableDescription {
+ optional string Name = 1;
optional uint64 Id_Deprecated = 2; // LocalPathId, deprecated
- repeated TColumnDescription Columns = 3;
- repeated string KeyColumnNames = 4;
- repeated uint32 KeyColumnIds = 5;
- optional uint32 UniformPartitionsCount = 6; // Describes uniform partitioning on first key column into
- // N ranges. The first key column must be of integer type
-
- optional TPartitionConfig PartitionConfig = 7;
+ repeated TColumnDescription Columns = 3;
+ repeated string KeyColumnNames = 4;
+ repeated uint32 KeyColumnIds = 5;
+ optional uint32 UniformPartitionsCount = 6; // Describes uniform partitioning on first key column into
+ // N ranges. The first key column must be of integer type
+
+ optional TPartitionConfig PartitionConfig = 7;
repeated TColumnDescription DropColumns = 8;
optional string Path = 9;
-
+
// It shouldn't be there
- optional bytes PartitionRangeBegin = 20;
- optional bytes PartitionRangeEnd = 21;
- optional bool PartitionRangeBeginIsInclusive = 22;
- optional bool PartitionRangeEndIsInclusive = 23;
-
- optional string CopyFromTable = 30;
- repeated TSplitBoundary SplitBoundary = 31; // Boundaries for non-uniform split
+ optional bytes PartitionRangeBegin = 20;
+ optional bytes PartitionRangeEnd = 21;
+ optional bool PartitionRangeBeginIsInclusive = 22;
+ optional bool PartitionRangeEndIsInclusive = 23;
+
+ optional string CopyFromTable = 30;
+ repeated TSplitBoundary SplitBoundary = 31; // Boundaries for non-uniform split
repeated TIndexDescription TableIndexes = 32;
@@ -354,8 +354,8 @@ message TTableDescription {
repeated TCdcStreamDescription CdcStreams = 38;
repeated TSequenceDescription Sequences = 39;
-}
-
+}
+
message TCompressionOptions {
optional EColumnCodec CompressionCodec = 2; // LZ4 (in arrow LZ4_FRAME variant) if not set
optional int32 CompressionLevel = 3; // Use default compression level if not set (0 != not set)
@@ -469,7 +469,7 @@ message TColumnDataLifeCycle {
uint32 ExpireAfterSeconds = 2;
uint64 ExpireAfterBytes = 4;
}
- optional TTTLSettings.EUnit ColumnUnit = 3;
+ optional TTTLSettings.EUnit ColumnUnit = 3;
}
message TStorageTier {
@@ -1014,16 +1014,16 @@ message TModifyACL {
optional string NewOwner = 3;
}
-message TSplitMergeTablePartitions {
- optional uint64 TxId = 1;
- optional string TablePath = 2;
+message TSplitMergeTablePartitions {
+ optional uint64 TxId = 1;
+ optional string TablePath = 2;
optional uint64 TableLocalId = 3;
- repeated uint64 SourceTabletId = 4;
- repeated TSplitBoundary SplitBoundary = 5; // Points of split (there will be N+1 parts)
- optional uint64 SchemeshardId = 6; // Only needed if TableId is used instead of path
+ repeated uint64 SourceTabletId = 4;
+ repeated TSplitBoundary SplitBoundary = 5; // Points of split (there will be N+1 parts)
+ optional uint64 SchemeshardId = 6; // Only needed if TableId is used instead of path
optional uint64 TableOwnerId = 7;
-}
-
+}
+
message TUserAttribute {
optional string Key = 1;
optional string Value = 2;
@@ -1066,9 +1066,9 @@ message TReplicationDescription {
optional uint64 ControllerId = 5; // replication controller's tablet id
}
-enum EOperationType {
- ESchemeOpMkDir = 1;
- ESchemeOpCreateTable = 2;
+enum EOperationType {
+ ESchemeOpMkDir = 1;
+ ESchemeOpCreateTable = 2;
ESchemeOpCreatePersQueueGroup = 3;
ESchemeOpDropTable = 4;
ESchemeOpDropPersQueueGroup = 5;
@@ -1076,7 +1076,7 @@ enum EOperationType {
ESchemeOpAlterPersQueueGroup = 7;
ESchemeOpModifyACL = 8;
ESchemeOpRmDir = 9;
- ESchemeOpSplitMergeTablePartitions = 10;
+ ESchemeOpSplitMergeTablePartitions = 10;
ESchemeOpBackup = 11;
ESchemeOpCreateSubDomain = 12;
ESchemeOpDropSubDomain = 13;
@@ -1172,8 +1172,8 @@ enum EOperationType {
ESchemeOpCreateReplication = 76;
ESchemeOpAlterReplication = 77;
ESchemeOpDropReplication = 78;
-}
-
+}
+
message TApplyIf {
optional uint64 PathId = 1;
optional uint64 PathVersion = 2;
@@ -1217,22 +1217,22 @@ message TDropIndex {
optional string IndexName = 2;
}
-// Request for scheme modification
-// Has only one of the operations
-message TModifyScheme {
- optional string WorkingDir = 1;
- optional EOperationType OperationType = 2;
+// Request for scheme modification
+// Has only one of the operations
+message TModifyScheme {
+ optional string WorkingDir = 1;
+ optional EOperationType OperationType = 2;
optional bool Internal = 36 [default = false]; // internal operations are not generated directly by the user
optional bool FailOnExist = 50 [default = false]; // as a replacement for TEvModifySchemeTransaction.FailOnExist
- optional TMkDir MkDir = 3;
- optional TTableDescription CreateTable = 4;
+ optional TMkDir MkDir = 3;
+ optional TTableDescription CreateTable = 4;
optional TPersQueueGroupDescription CreatePersQueueGroup = 5;
optional TPersQueueGroupDescription AlterPersQueueGroup = 6;
optional TDrop Drop = 7;
optional TModifyACL ModifyACL = 8;
optional TTableDescription AlterTable = 9;
- optional TSplitMergeTablePartitions SplitMergeTablePartitions = 10;
+ optional TSplitMergeTablePartitions SplitMergeTablePartitions = 10;
optional TBackupTask Backup = 11;
optional NKikimrSubDomains.TSubDomainSettings SubDomain = 12;
optional TRtmrVolumeDescription CreateRtmrVolume = 13;
@@ -1273,8 +1273,8 @@ message TModifyScheme {
optional TMove MoveTableIndex = 49;
optional TSequenceDescription Sequence = 51;
optional TReplicationDescription Replication = 52;
-}
-
+}
+
// "Script", used by client to parse text files with multiple DDL commands
message TModifyScript {
repeated TModifyScheme ModifyScheme = 1;
@@ -1284,34 +1284,34 @@ message TDescribeOptions {
optional bool ReturnPartitioningInfo = 1 [default = true];
optional bool ReturnPartitionConfig = 2 [default = true];
optional bool BackupInfo = 3 [default = false];
- optional bool ReturnPartitionStats = 4 [default = false];
+ optional bool ReturnPartitionStats = 4 [default = false];
optional bool ReturnChildren = 5 [default = true];
- optional bool ReturnBoundaries = 6 [default = false];
+ optional bool ReturnBoundaries = 6 [default = false];
optional bool ShowPrivateTable = 7 [default = false];
optional bool ReturnChannelsBinding = 8 [default = false];
}
-// Request to read scheme for a specific path
-// Path can be specified in two ways:
-// 1. full path
-// 2. (schemeshard id, path id) pair that is unique within the whole system
-message TDescribePath {
- optional string Path = 1;
- optional uint64 PathId = 2;
- optional uint64 SchemeshardId = 3;
+// Request to read scheme for a specific path
+// Path can be specified in two ways:
+// 1. full path
+// 2. (schemeshard id, path id) pair that is unique within the whole system
+message TDescribePath {
+ optional string Path = 1;
+ optional uint64 PathId = 2;
+ optional uint64 SchemeshardId = 3;
optional bool ReturnPartitioningInfo = 4 [default = true]; // deprecated
optional bool ReturnPartitionConfig = 5 [default = true]; // deprecated
optional bool BackupInfo = 6 [default = false]; // deprecated
optional TDescribeOptions Options = 7;
-}
-
+}
+
// Must be sync with Ydb::EntryType
-enum EPathType {
+enum EPathType {
EPathTypeInvalid = 0;
- EPathTypeDir = 1;
- EPathTypeTable = 2;
+ EPathTypeDir = 1;
+ EPathTypeTable = 2;
EPathTypePersQueueGroup = 3;
EPathTypeSubDomain = 4;
EPathTypeRtmrVolume = 5;
@@ -1326,8 +1326,8 @@ enum EPathType {
EPathTypeCdcStream = 14;
EPathTypeSequence = 15;
EPathTypeReplication = 16;
-}
-
+}
+
enum EPathSubType {
EPathSubTypeEmpty = 0;
EPathSubTypeSyncIndexImplTable = 1;
@@ -1341,7 +1341,7 @@ enum EPathState {
EPathStateCreate = 3;
EPathStateAlter = 4;
EPathStateDrop = 5;
- EPathStateCopying = 6;
+ EPathStateCopying = 6;
EPathStateBackup = 7;
EPathStateUpgrade = 8;
EPathStateMigrated = 9;
@@ -1378,15 +1378,15 @@ message TPathVersion {
optional uint64 ReplicationVersion = 25;
}
-// Describes single path
-message TDirEntry {
- optional string Name = 1;
- optional uint64 PathId = 2;
+// Describes single path
+message TDirEntry {
+ optional string Name = 1;
+ optional uint64 PathId = 2;
optional uint64 SchemeshardId = 3; // PathOwnerId
- optional EPathType PathType = 4;
- optional bool CreateFinished = 5;
- optional uint64 CreateTxId = 6;
- optional uint64 CreateStep = 7;
+ optional EPathType PathType = 4;
+ optional bool CreateFinished = 5;
+ optional uint64 CreateTxId = 6;
+ optional uint64 CreateStep = 7;
optional uint64 ParentPathId = 8; // parentPathOwnerId ?
optional EPathState PathState = 9;
optional string Owner = 10;
@@ -1397,16 +1397,16 @@ message TDirEntry {
optional TPathVersion Version = 15;
optional uint64 BalancerTabletID = 999; //temporary optimization for old PQ read/write protocol. Must be removed later
-}
-
-// Describes single partition (range or point) of a table
-message TTablePartition {
- optional bytes EndOfRangeKeyPrefix = 1; // Serialize/deserialize using TSerializedCellVec
- optional bool IsPoint = 2;
- optional bool IsInclusive = 3;
- optional uint64 DatashardId = 4;
-}
-
+}
+
+// Describes single partition (range or point) of a table
+message TTablePartition {
+ optional bytes EndOfRangeKeyPrefix = 1; // Serialize/deserialize using TSerializedCellVec
+ optional bool IsPoint = 2;
+ optional bool IsInclusive = 3;
+ optional uint64 DatashardId = 4;
+}
+
message TShardError {
optional uint64 ShardId = 1;
optional string Explain = 2;
@@ -1433,26 +1433,26 @@ message TLastBackupResult {
optional uint64 TxId = 7;
};
-// Result for TDescribePath request
-message TPathDescription {
- optional TDirEntry Self = 1; // info about the path itself
- repeated TDirEntry Children = 2; // for directory
- optional TTableDescription Table = 3; // for table
- repeated TTablePartition TablePartitions = 4; // for table
+// Result for TDescribePath request
+message TPathDescription {
+ optional TDirEntry Self = 1; // info about the path itself
+ repeated TDirEntry Children = 2; // for directory
+ optional TTableDescription Table = 3; // for table
+ repeated TTablePartition TablePartitions = 4; // for table
optional TPersQueueGroupDescription PersQueueGroup = 5; // for pq group
optional TBackupProgress BackupProgress = 6;
repeated TLastBackupResult LastBackupResult = 7;
- optional NKikimrTableStats.TTableStats TableStats = 8;
+ optional NKikimrTableStats.TTableStats TableStats = 8;
optional NKikimrTabletBase.TMetrics TabletMetrics = 9;
optional NKikimrSubDomains.TDomainDescription DomainDescription = 10;
optional TRtmrVolumeDescription RtmrVolumeDescription = 11; // for rtmr volume
optional TBlockStoreVolumeDescription BlockStoreVolumeDescription = 12;
optional TKesusDescription Kesus = 13;
optional TSolomonVolumeDescription SolomonDescription = 14;
- repeated NKikimrTableStats.TTableStats TablePartitionStats = 15;
+ repeated NKikimrTableStats.TTableStats TablePartitionStats = 15;
repeated TUserAttribute UserAttributes = 16;
optional TIndexDescription TableIndex = 17;
- repeated NKikimrTabletBase.TMetrics TablePartitionMetrics = 18;
+ repeated NKikimrTabletBase.TMetrics TablePartitionMetrics = 18;
repeated uint64 AbandonedTenantsSchemeShards = 19;
optional TFileStoreDescription FileStoreDescription = 20;
optional TColumnStoreDescription ColumnStoreDescription = 21;
@@ -1460,12 +1460,12 @@ message TPathDescription {
optional TCdcStreamDescription CdcStreamDescription = 23;
optional TSequenceDescription SequenceDescription = 24;
optional TReplicationDescription ReplicationDescription = 25;
-}
-
-// For persisting AlterTable Tx description in Schemeshard internal DB
-message TAlterExtraData {
- optional TPartitionConfig PartitionConfig = 1;
-}
+}
+
+// For persisting AlterTable Tx description in Schemeshard internal DB
+message TAlterExtraData {
+ optional TPartitionConfig PartitionConfig = 1;
+}
message TResourceProfile {
// Here is how tablet resource profile with specified NAME and TYPE is searched:
diff --git a/ydb/core/protos/flat_tx_scheme.proto b/ydb/core/protos/flat_tx_scheme.proto
index 0f466938dd3..dc06523e5cc 100644
--- a/ydb/core/protos/flat_tx_scheme.proto
+++ b/ydb/core/protos/flat_tx_scheme.proto
@@ -4,31 +4,31 @@ import "ydb/core/protos/subdomains.proto";
import "ydb/core/protos/bind_channel_storage_pool.proto";
import "ydb/core/protos/flat_scheme_op.proto";
import "ydb/public/api/protos/ydb_cms.proto";
-
+
package NKikimrScheme;
-option java_package = "ru.yandex.kikimr.proto";
-
-message TSchemeConfig {
- optional uint64 PipeClientCachePoolLimit = 1;
-}
-
-message TConfig {
- optional TSchemeConfig Current = 1;
- //optional NKikimrTxDataShard.TConfig DefaultDataShardConfig = 2;
- optional TSchemeConfig DefaultSchemeShardConfig = 3;
-}
-
-enum EStatus {
- StatusSuccess = 0;
- StatusAccepted = 1;
- StatusPathDoesNotExist = 2;
- StatusPathIsNotDirectory = 3;
- StatusAlreadyExists = 4;
- StatusSchemeError = 5;
- StatusNameConflict = 6;
+option java_package = "ru.yandex.kikimr.proto";
+
+message TSchemeConfig {
+ optional uint64 PipeClientCachePoolLimit = 1;
+}
+
+message TConfig {
+ optional TSchemeConfig Current = 1;
+ //optional NKikimrTxDataShard.TConfig DefaultDataShardConfig = 2;
+ optional TSchemeConfig DefaultSchemeShardConfig = 3;
+}
+
+enum EStatus {
+ StatusSuccess = 0;
+ StatusAccepted = 1;
+ StatusPathDoesNotExist = 2;
+ StatusPathIsNotDirectory = 3;
+ StatusAlreadyExists = 4;
+ StatusSchemeError = 5;
+ StatusNameConflict = 6;
StatusInvalidParameter = 7;
StatusMultipleModifications = 8;
- StatusReadOnly = 9;
+ StatusReadOnly = 9;
StatusTxIdNotExists = 10;
StatusTxIsNotCancellable = 11;
StatusAccessDenied = 12;
@@ -42,31 +42,31 @@ enum EStatus {
// when adding a new status and keeping parse compatibility with the old version
// rename existing reserved status to desired one, and add new reserved status to
// the end of reserved statuses
-}
-
-message TEvModifySchemeTransaction {
+}
+
+message TEvModifySchemeTransaction {
repeated NKikimrSchemeOp.TModifyScheme Transaction = 1;
- optional uint64 TxId = 2;
- optional uint64 TabletId = 3;
+ optional uint64 TxId = 2;
+ optional uint64 TabletId = 3;
optional string Owner = 5;
optional bool FailOnExist = 6; // depricated, TModifyScheme.FailOnExist is recomended
- optional string UserToken = 7; // serialized NACLib::TUserToken
-}
-
-message TEvModifySchemeTransactionResult {
+ optional string UserToken = 7; // serialized NACLib::TUserToken
+}
+
+message TEvModifySchemeTransactionResult {
optional EStatus Status = 1;
optional string Reason = 2;
- optional uint64 TxId = 3;
- optional uint64 SchemeshardId = 4;
+ optional uint64 TxId = 3;
+ optional uint64 SchemeshardId = 4;
optional uint64 PathId = 5;
optional uint64 PathCreateTxId = 6;
optional uint64 PathDropTxId = 7;
-}
-
-message TEvDescribeSchemeResult {
+}
+
+message TEvDescribeSchemeResult {
optional EStatus Status = 1;
optional string Reason = 2;
- optional string Path = 3;
+ optional string Path = 3;
optional NKikimrSchemeOp.TPathDescription PathDescription = 4;
optional fixed64 PathOwner = 5;
optional fixed64 PathId = 6;
@@ -75,8 +75,8 @@ message TEvDescribeSchemeResult {
optional fixed64 LastExistedPrefixPathId = 8;
optional NKikimrSchemeOp.TPathDescription LastExistedPrefixDescription = 9;
optional fixed64 PathOwnerId = 10;
-}
-
+}
+
message TEvCancelTx {
optional uint64 TargetTxId = 1;
optional string Path = 2;
@@ -91,17 +91,17 @@ message TEvCancelTxResult {
optional uint64 TxId = 4;
}
-message TEvUpdateConfig {
+message TEvUpdateConfig {
optional NActorsProto.TActorId Source = 1;
- optional TConfig Config = 2;
-}
-
-message TEvUpdateConfigResult {
- optional uint64 Origin = 1;
- optional NKikimrProto.EReplyStatus Status = 2;
+ optional TConfig Config = 2;
+}
+
+message TEvUpdateConfigResult {
+ optional uint64 Origin = 1;
+ optional NKikimrProto.EReplyStatus Status = 2;
optional string Reason = 3;
-}
-
+}
+
message TEvLogin {
optional string User = 1;
optional string Password = 2;
@@ -112,18 +112,18 @@ message TEvLoginResult {
optional string Token = 2;
}
-// Sending actor registers itself to be notified when tx completes
-message TEvNotifyTxCompletion {
- optional uint64 TxId = 1;
-}
-
-message TEvNotifyTxCompletionRegistered {
- optional uint64 TxId = 1;
-}
-
-message TEvNotifyTxCompletionResult {
- optional uint64 TxId = 1;
-}
+// Sending actor registers itself to be notified when tx completes
+message TEvNotifyTxCompletion {
+ optional uint64 TxId = 1;
+}
+
+message TEvNotifyTxCompletionRegistered {
+ optional uint64 TxId = 1;
+}
+
+message TEvNotifyTxCompletionResult {
+ optional uint64 TxId = 1;
+}
message TSchemeLimits {
optional uint64 MaxDepth = 1;
diff --git a/ydb/core/protos/grpc.proto b/ydb/core/protos/grpc.proto
index 678c44b388e..797cc2de829 100644
--- a/ydb/core/protos/grpc.proto
+++ b/ydb/core/protos/grpc.proto
@@ -76,11 +76,11 @@ service TGRpcServer {
rpc SqsRequest(TSqsRequest) returns (TSqsResponse);
/////////////////////////////////////////////////////////////////////////////////////////////////
- // S3 LISTING INTERFACE
- /////////////////////////////////////////////////////////////////////////////////////////////////
- rpc S3Listing(TS3ListingRequest) returns (TS3ListingResponse);
-
- /////////////////////////////////////////////////////////////////////////////////////////////////
+ // S3 LISTING INTERFACE
+ /////////////////////////////////////////////////////////////////////////////////////////////////
+ rpc S3Listing(TS3ListingRequest) returns (TS3ListingResponse);
+
+ /////////////////////////////////////////////////////////////////////////////////////////////////
// CONSOLE INTERFACE
/////////////////////////////////////////////////////////////////////////////////////////////////
rpc ConsoleRequest(TConsoleRequest) returns (TConsoleResponse);
diff --git a/ydb/core/protos/kqp.proto b/ydb/core/protos/kqp.proto
index c444546d037..9b46b720e11 100644
--- a/ydb/core/protos/kqp.proto
+++ b/ydb/core/protos/kqp.proto
@@ -155,7 +155,7 @@ message TEvQueryRequest {
message TMkqlProfile {
optional string Query = 1;
- optional NKikimrQueryStats.TTxStats TxStats = 2;
+ optional NKikimrQueryStats.TTxStats TxStats = 2;
};
message TTransformProfile {
diff --git a/ydb/core/protos/minikql_engine.proto b/ydb/core/protos/minikql_engine.proto
index 2d2677d892a..ca3ea7212b5 100644
--- a/ydb/core/protos/minikql_engine.proto
+++ b/ydb/core/protos/minikql_engine.proto
@@ -14,6 +14,6 @@ enum EResult {
SnapshotNotReady = 9;
TooManyRS = 10;
ResultTooBig = 11;
- Cancelled = 12;
+ Cancelled = 12;
};
diff --git a/ydb/core/protos/msgbus.proto b/ydb/core/protos/msgbus.proto
index 4633c7a6bf5..df7cda59800 100644
--- a/ydb/core/protos/msgbus.proto
+++ b/ydb/core/protos/msgbus.proto
@@ -163,7 +163,7 @@ message TResponse {
optional bytes DataShardErrors = 105;
repeated fixed64 ComplainingDataShards = 106;
repeated bytes UnresolvedKeys = 107; // text for each key
- optional NKikimrQueryStats.TTxStats TxStats = 111;
+ optional NKikimrQueryStats.TTxStats TxStats = 111;
optional NKikimrTxUserProxy.TMiniKQLCompileResults MiniKQLCompileResults = 108;
optional bool HadFollowerReads = 109;
@@ -227,12 +227,12 @@ message TResponse {
optional NKikimrTabletBase.TTabletTypes.EType TabletType = 2;
}
repeated TTabletInfo TabletInfo = 1001;
-
- // TTabletLocalSchemeTx
- optional NTabletFlatScheme.TSchemeChanges LocalDbScheme = 1010;
-
- // TTabletCountersRequest
- optional NKikimrTabletBase.TTabletCounters TabletCounters = 1020;
+
+ // TTabletLocalSchemeTx
+ optional NTabletFlatScheme.TSchemeChanges LocalDbScheme = 1010;
+
+ // TTabletCountersRequest
+ optional NKikimrTabletBase.TTabletCounters TabletCounters = 1020;
// TBlobStorageConfigRequest
optional NKikimrBlobStorage.TConfigResponse BlobStorageConfigResponse = 1030;
@@ -252,14 +252,14 @@ message TFakeConfigDummy {
optional uint64 DataShardID = 1;
}
-message TTabletCountersRequest {
- optional uint64 TabletID = 1;
+message TTabletCountersRequest {
+ optional uint64 TabletID = 1;
optional bool ConnectToFollower = 2;
-
- optional bool WithRetry = 10;
- optional uint64 Timeout = 11;
-}
-
+
+ optional bool WithRetry = 10;
+ optional uint64 Timeout = 11;
+}
+
message TLocalMKQL {
message TProgram {
optional bytes Bin = 1;
@@ -281,17 +281,17 @@ message TLocalMKQL {
optional uint64 Timeout = 11;
}
-message TLocalSchemeTx {
- optional uint64 TabletID = 1;
+message TLocalSchemeTx {
+ optional uint64 TabletID = 1;
optional bool ConnectToFollower = 2;
- optional NTabletFlatScheme.TSchemeChanges SchemeChanges = 3;
- optional bool DryRun = 4;
+ optional NTabletFlatScheme.TSchemeChanges SchemeChanges = 3;
+ optional bool DryRun = 4;
optional string SecurityToken = 5;
-
- optional bool WithRetry = 10;
- optional uint64 Timeout = 11;
-}
-
+
+ optional bool WithRetry = 10;
+ optional uint64 Timeout = 11;
+}
+
message TSchemeNavigate {
optional bytes Path = 1;
optional uint32 ReadMaterializedFamily = 2;
@@ -652,27 +652,27 @@ message TSqsResponse {
optional string ResourceId = 32;
optional bool IsFifo = 33;
}
-
-message TS3ListingRequest {
- optional string SecurityToken = 1;
-
- optional string TableName = 2;
- optional NKikimrMiniKQL.TParams KeyPrefix = 3; // A tuple representing all key columns that preceed path column
- optional string PathColumnPrefix = 4;
- optional string PathColumnDelimiter = 5;
- optional NKikimrMiniKQL.TParams StartAfterKeySuffix = 9; // A tuple representing key columns that succeed path column
- optional uint32 MaxKeys = 7;
- repeated string ColumnsToReturn = 8;
- optional uint32 Timeout = 10; // millisec
-}
-
-message TS3ListingResponse {
- optional uint32 Status = 1;
- optional string Description = 2;
- optional NKikimrMiniKQL.TResult Result = 3; // Every Contents row starts with key suffix with KeySuffixSize columns
- optional uint32 KeySuffixSize = 4; // Number of key columns starting from path and up to the end
- optional uint32 ErrorCode = 5; // Extended error code from NTXProxy::TResultStatus::EStatus enum
-}
+
+message TS3ListingRequest {
+ optional string SecurityToken = 1;
+
+ optional string TableName = 2;
+ optional NKikimrMiniKQL.TParams KeyPrefix = 3; // A tuple representing all key columns that preceed path column
+ optional string PathColumnPrefix = 4;
+ optional string PathColumnDelimiter = 5;
+ optional NKikimrMiniKQL.TParams StartAfterKeySuffix = 9; // A tuple representing key columns that succeed path column
+ optional uint32 MaxKeys = 7;
+ repeated string ColumnsToReturn = 8;
+ optional uint32 Timeout = 10; // millisec
+}
+
+message TS3ListingResponse {
+ optional uint32 Status = 1;
+ optional string Description = 2;
+ optional NKikimrMiniKQL.TResult Result = 3; // Every Contents row starts with key suffix with KeySuffixSize columns
+ optional uint32 KeySuffixSize = 4; // Number of key columns starting from path and up to the end
+ optional uint32 ErrorCode = 5; // Extended error code from NTXProxy::TResultStatus::EStatus enum
+}
message TInterconnectDebug {
optional string Name = 1;
diff --git a/ydb/core/protos/msgbus_pq.proto b/ydb/core/protos/msgbus_pq.proto
index 011be74fb71..849b292383a 100644
--- a/ydb/core/protos/msgbus_pq.proto
+++ b/ydb/core/protos/msgbus_pq.proto
@@ -154,7 +154,7 @@ message TPersQueueMetaRequest {
}
message TCmdDeleteTopic {
- optional string Topic = 1; //mandatory
+ optional string Topic = 1; //mandatory
}
message TCmdGetTopicMetadata {
diff --git a/ydb/core/protos/query_stats.proto b/ydb/core/protos/query_stats.proto
index 29c9bc6a390..453b3034013 100644
--- a/ydb/core/protos/query_stats.proto
+++ b/ydb/core/protos/query_stats.proto
@@ -1,51 +1,51 @@
-package NKikimrQueryStats;
-option java_package = "ru.yandex.kikimr.proto";
-
-message TReadOpStats {
- optional uint64 Count = 1;
- optional uint64 Rows = 2;
- optional uint64 Bytes = 3;
- // TODO: optional uint64 PagesFromCache = 4;
- // TODO: optional uint64 PagesFromDisk = 5;
-}
-
-message TWriteOpStats {
- optional uint64 Count = 1;
- optional uint64 Rows = 2;
- optional uint64 Bytes = 3;
-}
-
-message TTableInfo {
- optional uint64 SchemeshardId = 1;
- optional uint64 PathId = 2;
- optional string Name = 3;
-}
-
-message TTableAccessStats {
- optional TTableInfo TableInfo = 1;
-
- optional TReadOpStats SelectRow = 2;
- optional TReadOpStats SelectRange = 3;
- optional TWriteOpStats UpdateRow = 4;
- optional TWriteOpStats EraseRow = 5;
+package NKikimrQueryStats;
+option java_package = "ru.yandex.kikimr.proto";
+
+message TReadOpStats {
+ optional uint64 Count = 1;
+ optional uint64 Rows = 2;
+ optional uint64 Bytes = 3;
+ // TODO: optional uint64 PagesFromCache = 4;
+ // TODO: optional uint64 PagesFromDisk = 5;
+}
+
+message TWriteOpStats {
+ optional uint64 Count = 1;
+ optional uint64 Rows = 2;
+ optional uint64 Bytes = 3;
+}
+
+message TTableInfo {
+ optional uint64 SchemeshardId = 1;
+ optional uint64 PathId = 2;
+ optional string Name = 3;
+}
+
+message TTableAccessStats {
+ optional TTableInfo TableInfo = 1;
+
+ optional TReadOpStats SelectRow = 2;
+ optional TReadOpStats SelectRange = 3;
+ optional TWriteOpStats UpdateRow = 4;
+ optional TWriteOpStats EraseRow = 5;
optional uint64 ShardCount = 6;
-}
-
-message TPerShardStats {
- optional uint64 ShardId = 1;
- optional uint64 CpuTimeUsec = 2;
+}
+
+message TPerShardStats {
+ optional uint64 ShardId = 1;
+ optional uint64 CpuTimeUsec = 2;
optional uint64 OutgoingReadSetsCount = 3;
optional uint64 ProgramSize = 4;
optional uint64 ReplySize = 5;
-}
-
-message TTxStats {
- repeated TTableAccessStats TableAccessStats = 1;
- optional uint64 DurationUs = 2;
+}
+
+message TTxStats {
+ repeated TTableAccessStats TableAccessStats = 1;
+ optional uint64 DurationUs = 2;
optional uint64 ComputeCpuTimeUsec = 3;
- repeated TPerShardStats PerShardStats = 4;
-
- // TODO:
- // optional uint64 CommitLatencyUsec = 3;
- // optional uint64 LogBytesWritten = 4;
-}
+ repeated TPerShardStats PerShardStats = 4;
+
+ // TODO:
+ // optional uint64 CommitLatencyUsec = 3;
+ // optional uint64 LogBytesWritten = 4;
+}
diff --git a/ydb/core/protos/scheme_log.proto b/ydb/core/protos/scheme_log.proto
index 51cbeaea0ab..fcf4ba52b94 100644
--- a/ydb/core/protos/scheme_log.proto
+++ b/ydb/core/protos/scheme_log.proto
@@ -1,5 +1,5 @@
import "ydb/core/protos/flat_scheme_op.proto";
-
+
package NTabletFlatScheme;
option java_package = "ru.yandex.kikimr.proto";
@@ -13,7 +13,7 @@ message TAlterRecord {
AddColumnToFamily = 6;
AddFamily = 7;
UpdateExecutorInfo = 8;
- SetCompactionPolicy = 9;
+ SetCompactionPolicy = 9;
SetRoom = 10;
SetFamily = 11;
SetRedo = 12;
diff --git a/ydb/core/protos/services.proto b/ydb/core/protos/services.proto
index 4adb0a44642..c17c8a7dc37 100644
--- a/ydb/core/protos/services.proto
+++ b/ydb/core/protos/services.proto
@@ -62,14 +62,14 @@ enum EServiceKikimr {
// SCHEMESHARD section
SCHEMESHARD_DESCRIBE = 295;
- FLAT_TX_SCHEMESHARD = 296;
+ FLAT_TX_SCHEMESHARD = 296;
SHEME_SHARD = 700; // stillborn tag
// OLAP section
TX_OLAPSHARD = 331;
TX_COLUMNSHARD = 332;
- BLOB_CACHE = 333;
- TX_COLUMNSHARD_SCAN = 334;
+ BLOB_CACHE = 333;
+ TX_COLUMNSHARD_SCAN = 334;
// BLOBSTORAGE again
BS_HANDOFF = 298;
@@ -161,7 +161,7 @@ enum EServiceKikimr {
// PERSQUEUE section
PERSQUEUE = 440;
- PQ_METACACHE = 441;
+ PQ_METACACHE = 441;
PQ_READ_PROXY = 442;
PQ_WRITE_PROXY = 443;
PQ_MIRRORER = 446;
@@ -304,7 +304,7 @@ enum EServiceKikimr {
// Replication
REPLICATION_CONTROLLER = 1200;
};
-
+
message TActivity {
// Must be consistent with IActor::EActorActivity in ActorLib's part.
@@ -790,10 +790,10 @@ message TActivity {
IMPORT_S3_DOWNLOADER_ACTOR = 475;
MEMORY_TRACKER = 476;
TX_COLUMNSHARD_ACTOR = 477;
- TX_COLUMNSHARD_WRITE_ACTOR = 496;
- TX_COLUMNSHARD_READ_ACTOR = 497;
- TX_COLUMNSHARD_INDEXING_ACTOR = 498;
- TX_COLUMNSHARD_COMPACTION_ACTOR = 499;
+ TX_COLUMNSHARD_WRITE_ACTOR = 496;
+ TX_COLUMNSHARD_READ_ACTOR = 497;
+ TX_COLUMNSHARD_INDEXING_ACTOR = 498;
+ TX_COLUMNSHARD_COMPACTION_ACTOR = 499;
SCHEME_BOARD_MONITORING_ACTOR = 478;
SCHEME_BOARD_INFO_REQUESTER_ACTOR = 479;
METERING_WRITER_ACTOR = 480;
@@ -808,7 +808,7 @@ message TActivity {
KQP_LITERAL_EXECUTER_ACTOR = 489;
FILESTORE_SERVICE_PROXY = 490;
TX_DATASHARD_SOURCE_OFFSETS_SERVER = 491;
- KQP_OLAP_SCAN = 492;
+ KQP_OLAP_SCAN = 492;
KQP_DATA_EXECUTER_ACTOR = 493;
KQP_NODE_SERVICE = 494;
TX_DATASHARD_SOURCE_OFFSETS_CLIENT = 500;
@@ -822,7 +822,7 @@ message TActivity {
YQL_RUN_ACTOR = 517;
YQL_GET_HISTORY_REQUEST_ACTOR = 518;
YQL_GET_RESULT_DATA_ACTOR = 519;
- BLOB_CACHE_ACTOR = 520;
+ BLOB_CACHE_ACTOR = 520;
MONITORING_SERVICE = 521;
MONITORING_REQUEST = 522;
ACTOR_SERVICE_CACHE = 523;
@@ -868,4 +868,4 @@ message TActivity {
REPLICATION_CONTROLLER_DST_CREATOR = 562;
BLOCKSTORE_STATS_SERVICE = 563;
};
-};
+};
diff --git a/ydb/core/protos/table_stats.proto b/ydb/core/protos/table_stats.proto
index 9028707883f..a555e0b659d 100644
--- a/ydb/core/protos/table_stats.proto
+++ b/ydb/core/protos/table_stats.proto
@@ -1,46 +1,46 @@
-package NKikimrTableStats;
+package NKikimrTableStats;
option java_package = "ru.yandex.kikimr.proto";
-message THistogramBucket {
- optional bytes Key = 1;
- optional uint64 Value = 2;
-}
-
-message THistogram {
- repeated THistogramBucket Buckets = 1;
-}
-
-message TTableStats {
- optional uint64 DataSize = 1;
- optional uint64 RowCount = 2;
- optional uint64 IndexSize = 3;
- optional uint64 InMemSize = 4;
-
- optional uint64 LastAccessTime = 5; // unix time in millisec
- optional uint64 LastUpdateTime = 6; // unix time in millisec
-
- optional THistogram RowCountHistogram = 7;
- optional THistogram DataSizeHistogram = 8;
-
- optional uint64 ImmediateTxCompleted = 9;
- optional uint64 PlannedTxCompleted = 10;
- optional uint64 TxRejectedByOverload = 11;
- optional uint64 TxRejectedBySpace = 12;
- optional uint64 TxCompleteLagMsec = 13;
- optional uint64 InFlightTxCount = 14;
-
- optional uint64 RowUpdates = 20;
- optional uint64 RowDeletes = 21;
- optional uint64 RowReads = 22;
- optional uint64 RangeReads = 23;
- optional uint64 RangeReadRows = 25;
-
- optional uint64 PartCount = 24;
-
- optional THistogram KeyAccessSample = 26;
+message THistogramBucket {
+ optional bytes Key = 1;
+ optional uint64 Value = 2;
+}
+
+message THistogram {
+ repeated THistogramBucket Buckets = 1;
+}
+
+message TTableStats {
+ optional uint64 DataSize = 1;
+ optional uint64 RowCount = 2;
+ optional uint64 IndexSize = 3;
+ optional uint64 InMemSize = 4;
+
+ optional uint64 LastAccessTime = 5; // unix time in millisec
+ optional uint64 LastUpdateTime = 6; // unix time in millisec
+
+ optional THistogram RowCountHistogram = 7;
+ optional THistogram DataSizeHistogram = 8;
+
+ optional uint64 ImmediateTxCompleted = 9;
+ optional uint64 PlannedTxCompleted = 10;
+ optional uint64 TxRejectedByOverload = 11;
+ optional uint64 TxRejectedBySpace = 12;
+ optional uint64 TxCompleteLagMsec = 13;
+ optional uint64 InFlightTxCount = 14;
+
+ optional uint64 RowUpdates = 20;
+ optional uint64 RowDeletes = 21;
+ optional uint64 RowReads = 22;
+ optional uint64 RangeReads = 23;
+ optional uint64 RangeReadRows = 25;
+
+ optional uint64 PartCount = 24;
+
+ optional THistogram KeyAccessSample = 26;
optional uint64 SearchHeight = 27;
// seconds since epoch
optional uint64 LastFullCompactionTs = 28;
-}
+}
diff --git a/ydb/core/protos/tablet_database.proto b/ydb/core/protos/tablet_database.proto
index f4a559dd09a..a6658688a66 100644
--- a/ydb/core/protos/tablet_database.proto
+++ b/ydb/core/protos/tablet_database.proto
@@ -54,20 +54,20 @@ message TExecutorSettings {
optional TRuntimeSettings RuntimeSettings = 2;
}
-message TCompactionBroker {
+message TCompactionBroker {
message TQueueType {
- optional uint32 QueueID = 1;
- optional uint32 Quota = 2;
- }
-
+ optional uint32 QueueID = 1;
+ optional uint32 Quota = 2;
+ }
+
repeated TQueueType Queue = 1;
-}
-
-message TEvSetCompactionBrokerConfig {
- optional TCompactionBroker Config = 1;
-}
-
-message TEvSetCompactionBrokerConfigResult {
- optional NKikimrProto.EReplyStatus Status = 1;
- optional bytes Reason = 2;
-}
+}
+
+message TEvSetCompactionBrokerConfig {
+ optional TCompactionBroker Config = 1;
+}
+
+message TEvSetCompactionBrokerConfigResult {
+ optional NKikimrProto.EReplyStatus Status = 1;
+ optional bytes Reason = 2;
+}
diff --git a/ydb/core/protos/tablet_tx.proto b/ydb/core/protos/tablet_tx.proto
index 1d9051e1731..e8db9062114 100644
--- a/ydb/core/protos/tablet_tx.proto
+++ b/ydb/core/protos/tablet_tx.proto
@@ -32,28 +32,28 @@ message TEvLocalSchemeTxResponse {
optional NTabletFlatScheme.TSchemeChanges FullScheme = 4;
}
-
-message TEvLocalReadColumns {
- optional string TableName = 1;
- repeated string Columns = 2;
- optional uint64 MaxRows = 3;
- optional uint64 MaxBytes = 4;
-
- optional bytes FromKey = 5;
- optional bool FromKeyInclusive = 6;
- optional bytes ToKey = 7;
- optional bool ToKeyInclusive = 8;
-
- optional string Format = 9;
-}
-
-message TEvLocalReadColumnsResponse {
- optional uint64 TabletID = 1;
- optional uint32 Status = 2; // Ydb::StatusIds
- optional string ErrorDescription = 3;
-
- optional bytes Blocks = 4; // The data
- optional bytes LastKey = 5; // For continuation
- optional bool LastKeyInclusive = 6;
- optional bool EndOfShard = 7;
-}
+
+message TEvLocalReadColumns {
+ optional string TableName = 1;
+ repeated string Columns = 2;
+ optional uint64 MaxRows = 3;
+ optional uint64 MaxBytes = 4;
+
+ optional bytes FromKey = 5;
+ optional bool FromKeyInclusive = 6;
+ optional bytes ToKey = 7;
+ optional bool ToKeyInclusive = 8;
+
+ optional string Format = 9;
+}
+
+message TEvLocalReadColumnsResponse {
+ optional uint64 TabletID = 1;
+ optional uint32 Status = 2; // Ydb::StatusIds
+ optional string ErrorDescription = 3;
+
+ optional bytes Blocks = 4; // The data
+ optional bytes LastKey = 5; // For continuation
+ optional bool LastKeyInclusive = 6;
+ optional bool EndOfShard = 7;
+}
diff --git a/ydb/core/protos/tx_columnshard.proto b/ydb/core/protos/tx_columnshard.proto
index 84dc5469a56..d407b2cc413 100644
--- a/ydb/core/protos/tx_columnshard.proto
+++ b/ydb/core/protos/tx_columnshard.proto
@@ -260,25 +260,25 @@ message TTtlTxBody {
optional string TtlPresetName = 4;
repeated uint64 PathIds = 5;
}
-
-message TBlobRange {
- optional bytes BlobId = 1;
- optional uint64 Offset = 2;
- optional uint64 Size = 3;
-}
-
-// Read ranges of multiple small blobs directly from Tablet
-message TEvReadBlobRanges {
- repeated TBlobRange BlobRanges = 1;
-}
-
-message TEvReadBlobRangesResult {
- message TResult {
- optional TBlobRange BlobRange = 1;
- optional uint32 Status = 2; // NKikimrProto::EReplyStatus
- optional bytes Data = 3;
- };
-
- optional uint64 TabletId = 1;
- repeated TResult Results = 2;
-}
+
+message TBlobRange {
+ optional bytes BlobId = 1;
+ optional uint64 Offset = 2;
+ optional uint64 Size = 3;
+}
+
+// Read ranges of multiple small blobs directly from Tablet
+message TEvReadBlobRanges {
+ repeated TBlobRange BlobRanges = 1;
+}
+
+message TEvReadBlobRangesResult {
+ message TResult {
+ optional TBlobRange BlobRange = 1;
+ optional uint32 Status = 2; // NKikimrProto::EReplyStatus
+ optional bytes Data = 3;
+ };
+
+ optional uint64 TabletId = 1;
+ repeated TResult Results = 2;
+}
diff --git a/ydb/core/protos/tx_datashard.proto b/ydb/core/protos/tx_datashard.proto
index 14d65f6f47b..89863b25400 100644
--- a/ydb/core/protos/tx_datashard.proto
+++ b/ydb/core/protos/tx_datashard.proto
@@ -1,6 +1,6 @@
-
-option cc_enable_arenas = true;
-
+
+option cc_enable_arenas = true;
+
import "library/cpp/actors/protos/actors.proto";
import "ydb/core/protos/base.proto";
import "ydb/core/protos/kqp.proto";
@@ -21,27 +21,27 @@ import "google/protobuf/empty.proto";
package NKikimrTxDataShard;
option java_package = "ru.yandex.kikimr.proto";
-enum EDatashardState {
- Uninitialized = 0;
- WaitScheme = 1;
- Ready = 2;
- Readonly = 3;
- Offline = 4;
- PreOffline = 5; // Offline but waits for loaned snapshots to be returned and for SchemaChangedResult to be received
+enum EDatashardState {
+ Uninitialized = 0;
+ WaitScheme = 1;
+ Ready = 2;
+ Readonly = 3;
+ Offline = 4;
+ PreOffline = 5; // Offline but waits for loaned snapshots to be returned and for SchemaChangedResult to be received
Frozen = 6; // Read only transactions are allowed. Scheme modification is forbiden
-
- // Split/Merge Src states
- SplitSrcWaitForNoTxInFlight = 101; // Temporary state: split src waits for all Tx to finish and then starts splitting
- SplitSrcMakeSnapshot = 102;
- SplitSrcSendingSnapshot = 103;
- SplitSrcWaitForPartitioningChanged = 104;
-
- // Split/Merge Dst states
- SplitDstReceivingSnapshot = 201;
-
- Unknown = 0xFFFF;
-}
-
+
+ // Split/Merge Src states
+ SplitSrcWaitForNoTxInFlight = 101; // Temporary state: split src waits for all Tx to finish and then starts splitting
+ SplitSrcMakeSnapshot = 102;
+ SplitSrcSendingSnapshot = 103;
+ SplitSrcWaitForPartitioningChanged = 104;
+
+ // Split/Merge Dst states
+ SplitDstReceivingSnapshot = 201;
+
+ Unknown = 0xFFFF;
+}
+
message TEvGetShardState {
optional NActorsProto.TActorId Source = 1;
}
@@ -62,7 +62,7 @@ message TShardOpResult {
message TEvSchemaChanged {
optional NActorsProto.TActorId Source = 1;
optional uint64 Origin = 2;
- optional uint32 State = 3; // EDatashardState
+ optional uint32 State = 3; // EDatashardState
optional uint64 TxId = 4;
optional uint64 Step = 5;
optional uint32 Generation = 6;
@@ -72,19 +72,19 @@ message TEvSchemaChanged {
message TEvSchemaChangedResult {
optional uint64 TxId = 1;
- // DEPRECATED: optional bool Retry = 2;
+ // DEPRECATED: optional bool Retry = 2;
}
-message TEvStateChanged {
+message TEvStateChanged {
optional NActorsProto.TActorId Source = 1;
- optional uint64 TabletId = 2;
- optional uint32 State = 3; // EDatashardState
-}
-
-message TEvStateChangedResult {
- optional uint64 TabletId = 1;
- optional uint32 State = 2; // EDatashardState
-}
+ optional uint64 TabletId = 2;
+ optional uint32 State = 3; // EDatashardState
+}
+
+message TEvStateChangedResult {
+ optional uint64 TabletId = 1;
+ optional uint32 State = 2; // EDatashardState
+}
message TRWTransaction {
}
@@ -162,7 +162,7 @@ message TKqpTransaction {
optional uint64 SchemaVersion = 3;
// reserved 4
optional string SysViewInfo = 5;
- optional uint32 TableKind = 6; // NKikimr::NKqp::ETableKind
+ optional uint32 TableKind = 6; // NKikimr::NKqp::ETableKind
}
// Data-query task meta
@@ -241,8 +241,8 @@ message TKqpReadset {
}
message TDataTransaction {
- optional TRWTransaction RWTransaction = 1; // DEPRECATED
- optional TROTransaction ROTransaction = 2; // DEPRECATED
+ optional TRWTransaction RWTransaction = 1; // DEPRECATED
+ optional TROTransaction ROTransaction = 2; // DEPRECATED
optional bytes MiniKQL = 3;
optional bool Immediate = 4;
optional fixed64 LockTxId = 5;
@@ -290,27 +290,27 @@ message TSnapshotTransferReadSet {
optional uint64 MvccLowWatermarkTxId = 9;
}
-message TSnapshotTransferInfo {
- optional uint64 Shard = 1;
-};
-
-message TReceiveSnapshot {
+message TSnapshotTransferInfo {
+ optional uint64 Shard = 1;
+};
+
+message TReceiveSnapshot {
optional uint64 TableId_Deprecated = 1;
- repeated TSnapshotTransferInfo ReceiveFrom = 2;
+ repeated TSnapshotTransferInfo ReceiveFrom = 2;
optional TTableId TableId = 3;
-}
-
-message TSendSnapshot {
+}
+
+message TSendSnapshot {
optional uint64 TableId_Deprecated = 1;
- repeated TSnapshotTransferInfo SendTo = 2;
+ repeated TSnapshotTransferInfo SendTo = 2;
optional TTableId TableId = 3;
-}
-
-message TSchemeOpSeqNo {
- optional uint64 Generation = 1;
- optional uint64 Round = 2;
-}
-
+}
+
+message TSchemeOpSeqNo {
+ optional uint64 Generation = 1;
+ optional uint64 Round = 2;
+}
+
message TCreatePersistentSnapshot {
optional uint64 OwnerId = 1;
optional uint64 PathId = 2;
@@ -388,15 +388,15 @@ message TMoveTable {
repeated TRemapIndexPathId ReMapIndexes = 5;
}
-message TFlatSchemeTransaction {
+message TFlatSchemeTransaction {
optional NKikimrSchemeOp.TTableDescription CreateTable = 1;
optional NKikimrSchemeOp.TTableDescription DropTable = 2;
optional NKikimrSchemeOp.TTableDescription AlterTable = 3;
- optional TReceiveSnapshot ReceiveSnapshot = 4;
- optional TSendSnapshot SendSnapshot = 5;
+ optional TReceiveSnapshot ReceiveSnapshot = 4;
+ optional TSendSnapshot SendSnapshot = 5;
optional NKikimrSchemeOp.TBackupTask Backup = 6;
-
- optional TSchemeOpSeqNo SeqNo = 7;
+
+ optional TSchemeOpSeqNo SeqNo = 7;
optional bool ReadOnly = 8;
reserved 9;
@@ -416,8 +416,8 @@ message TFlatSchemeTransaction {
optional TCreateCdcStreamNotice CreateCdcStreamNotice = 18;
optional TAlterCdcStreamNotice AlterCdcStreamNotice = 19;
optional TDropCdcStreamNotice DropCdcStreamNotice = 20;
-}
-
+}
+
message TDistributedEraseTransaction {
message TDependent {
optional uint64 ShardId = 1;
@@ -502,8 +502,8 @@ message TError {
DUPLICATED_SNAPSHOT_POLICY = 11;
MISSING_SNAPSHOT_POLICY = 12;
PROGRAM_ERROR = 13;
- OUT_OF_SPACE = 14;
- READ_SIZE_EXECEEDED = 15;
+ OUT_OF_SPACE = 14;
+ READ_SIZE_EXECEEDED = 15;
SHARD_IS_BLOCKED = 16;
UNKNOWN = 17;
REPLY_SIZE_EXECEEDED = 18;
@@ -548,11 +548,11 @@ message TEvProposeTransactionResult {
BAD_REQUEST = 12;
};
- message TReadSetInfo {
- optional uint64 ShardId = 1;
- optional uint64 Size = 2;
- };
-
+ message TReadSetInfo {
+ optional uint64 ShardId = 1;
+ optional uint64 Size = 2;
+ };
+
optional ETransactionKind TxKind = 1;
optional uint64 Origin = 2;
optional EStatus Status = 3;
@@ -568,15 +568,15 @@ message TEvProposeTransactionResult {
optional uint64 ExecLatency = 13;
optional uint64 ProposeLatency = 14;
repeated TLock TxLocks = 15;
- optional uint64 ReadSize = 16;
- optional uint64 ReplySize = 17;
- repeated TReadSetInfo OutgoingReadSetInfo = 18;
+ optional uint64 ReadSize = 16;
+ optional uint64 ReplySize = 17;
+ repeated TReadSetInfo OutgoingReadSetInfo = 18;
optional TTabletInfo TabletInfo = 19;
// For read table tx result holds offsets in TxResult to cut response
repeated uint32 RowOffsets = 20;
repeated fixed64 DomainCoordinators = 21;
optional uint32 ApiVersion = 22; // Version of TxResult response data
- optional NKikimrQueryStats.TTxStats TxStats = 23;
+ optional NKikimrQueryStats.TTxStats TxStats = 23;
optional uint64 DataSeqNo = 24; // Response data seqno (1, 2, ...)
optional bytes DataLastKey = 25; // Response data last key (for retries)
reserved 26; // optional NKqpProto.TKqpStatsRun KqpRunStats = 26;
@@ -605,58 +605,58 @@ message TAddSnapshotPolicyTransaction {
optional uint64 DeltaStep = 3;
}
-message TEvReturnBorrowedPart {
- optional uint64 FromTabletId = 1;
- repeated NKikimrProto.TLogoBlobID PartMetadata = 2;
-}
-
-message TEvReturnBorrowedPartAck {
- repeated NKikimrProto.TLogoBlobID PartMetadata = 1;
-}
-
-message TKeyRangeToTablet {
- optional bytes KeyRangeBegin = 1;
- optional bytes KeyRangeEnd = 2;
- optional uint64 TabletID = 3; // datashard tabletId
- optional uint64 ShardIdx = 4; // Internal idx of a datashard in schemeshard
-}
-
-message TSplitMergeDescription {
- repeated TKeyRangeToTablet SourceRanges = 1;
- repeated TKeyRangeToTablet DestinationRanges = 2;
-}
-
-// Schemeshard -> Dst datashard
-message TEvInitSplitMergeDestination {
- optional uint64 OperationCookie = 1;
+message TEvReturnBorrowedPart {
+ optional uint64 FromTabletId = 1;
+ repeated NKikimrProto.TLogoBlobID PartMetadata = 2;
+}
+
+message TEvReturnBorrowedPartAck {
+ repeated NKikimrProto.TLogoBlobID PartMetadata = 1;
+}
+
+message TKeyRangeToTablet {
+ optional bytes KeyRangeBegin = 1;
+ optional bytes KeyRangeEnd = 2;
+ optional uint64 TabletID = 3; // datashard tabletId
+ optional uint64 ShardIdx = 4; // Internal idx of a datashard in schemeshard
+}
+
+message TSplitMergeDescription {
+ repeated TKeyRangeToTablet SourceRanges = 1;
+ repeated TKeyRangeToTablet DestinationRanges = 2;
+}
+
+// Schemeshard -> Dst datashard
+message TEvInitSplitMergeDestination {
+ optional uint64 OperationCookie = 1;
optional uint64 SchemeshardTabletId = 2; // OwnerSchemeShardTabletId
- optional TSplitMergeDescription SplitDescription = 3;
+ optional TSplitMergeDescription SplitDescription = 3;
optional NKikimrSubDomains.TProcessingParams ProcessingParams = 4;
optional NKikimrSchemeOp.TTableDescription CreateTable = 5;
optional uint64 SubDomainPathId = 6; // LocalPathId (SchemeshardTabletId is the OwnerId)
-}
-
-message TEvInitSplitMergeDestinationAck {
- optional uint64 OperationCookie = 1;
- optional uint64 TabletId = 2;
-}
-
-// Schemeshard -> Src datashard
-message TEvSplit {
- optional uint64 OperationCookie = 1;
- optional TSplitMergeDescription SplitDescription = 2;
-}
-
-message TEvSplitAck {
- optional uint64 OperationCookie = 1;
- optional uint64 TabletId = 2;
-}
-
-message TTableSnapshot {
+}
+
+message TEvInitSplitMergeDestinationAck {
+ optional uint64 OperationCookie = 1;
+ optional uint64 TabletId = 2;
+}
+
+// Schemeshard -> Src datashard
+message TEvSplit {
+ optional uint64 OperationCookie = 1;
+ optional TSplitMergeDescription SplitDescription = 2;
+}
+
+message TEvSplitAck {
+ optional uint64 OperationCookie = 1;
+ optional uint64 TabletId = 2;
+}
+
+message TTableSnapshot {
optional uint64 TableId = 1; // tableid in term of localDB
- optional bytes SnapshotData = 2;
-}
-
+ optional bytes SnapshotData = 2;
+}
+
message TPersistentSnapshot {
optional uint64 OwnerId = 1;
optional uint64 PathId = 2;
@@ -667,12 +667,12 @@ message TPersistentSnapshot {
optional uint64 TimeoutMs = 7;
}
-// Src datashard -> Dst datashard
-message TEvSplitTransferSnapshot {
- optional uint64 OperationCookie = 1;
- optional uint64 SrcTabletId = 2;
+// Src datashard -> Dst datashard
+message TEvSplitTransferSnapshot {
+ optional uint64 OperationCookie = 1;
+ optional uint64 SrcTabletId = 2;
optional NKikimrSchemeOp.TTableDescription UserTableScheme = 3;
- repeated TTableSnapshot TableSnapshot = 4;
+ repeated TTableSnapshot TableSnapshot = 4;
optional uint64 MinWriteVersionStep = 5;
optional uint64 MinWriteVersionTxId = 6;
repeated TPersistentSnapshot PersistentSnapshots = 7;
@@ -689,148 +689,148 @@ message TEvSplitTransferSnapshot {
// Number of bytes that are in the ReplicationSourceOffsets table
optional uint64 ReplicationSourceOffsetsBytes = 16;
-}
-
-message TEvSplitTransferSnapshotAck {
- optional uint64 TabletId = 1;
- optional uint64 OperationCookie = 2;
-}
-
-// Schemeshard -> Src datashard
-message TEvSplitPartitioningChanged {
+}
+
+message TEvSplitTransferSnapshotAck {
+ optional uint64 TabletId = 1;
+ optional uint64 OperationCookie = 2;
+}
+
+// Schemeshard -> Src datashard
+message TEvSplitPartitioningChanged {
optional NActorsProto.TActorId Sender = 2;
- optional uint64 OperationCookie = 1;
-}
-
-message TEvSplitPartitioningChangedAck {
- optional uint64 OperationCookie = 1;
- optional uint64 TabletId = 2;
-}
+ optional uint64 OperationCookie = 1;
+}
+
+message TEvSplitPartitioningChangedAck {
+ optional uint64 OperationCookie = 1;
+ optional uint64 TabletId = 2;
+}
message TEvCancelBackup {
optional uint64 BackupTxId = 1;
optional uint64 TableId = 2;
}
-
+
message TEvCancelRestore {
optional uint64 RestoreTxId = 1;
optional uint64 TableId = 2;
}
-message TEvGetTableStats {
- optional uint64 TableId = 1;
- optional uint64 DataSizeResolution = 2;
- optional uint64 RowCountResolution = 3;
- optional bool CollectKeySample = 4;
-}
-
-message TEvGetTableStatsResult {
- optional uint64 DatashardId = 1;
+message TEvGetTableStats {
+ optional uint64 TableId = 1;
+ optional uint64 DataSizeResolution = 2;
+ optional uint64 RowCountResolution = 3;
+ optional bool CollectKeySample = 4;
+}
+
+message TEvGetTableStatsResult {
+ optional uint64 DatashardId = 1;
optional uint64 TableLocalId = 2;
- optional NKikimrTableStats.TTableStats TableStats = 3;
+ optional NKikimrTableStats.TTableStats TableStats = 3;
optional NKikimrTabletBase.TMetrics TabletMetrics = 4;
- optional uint32 ShardState = 5;
- repeated uint64 UserTablePartOwners = 6;
- repeated uint64 SysTablesPartOwners = 7;
- optional bool FullStatsReady = 8;
+ optional uint32 ShardState = 5;
+ repeated uint64 UserTablePartOwners = 6;
+ repeated uint64 SysTablesPartOwners = 7;
+ optional bool FullStatsReady = 8;
optional uint64 TableOwnerId = 9;
-}
-
-message TEvPeriodicTableStats {
- optional uint64 DatashardId = 1;
+}
+
+message TEvPeriodicTableStats {
+ optional uint64 DatashardId = 1;
optional uint64 TableLocalId = 2;
-
- optional uint64 Generation = 3;
- optional uint64 Round = 4;
-
- optional NKikimrTableStats.TTableStats TableStats = 5;
+
+ optional uint64 Generation = 3;
+ optional uint64 Round = 4;
+
+ optional NKikimrTableStats.TTableStats TableStats = 5;
optional NKikimrTabletBase.TMetrics TabletMetrics = 6;
-
- optional uint32 ShardState = 7;
- repeated uint64 UserTablePartOwners = 8;
- repeated uint64 SysTablesPartOwners = 9;
+
+ optional uint32 ShardState = 7;
+ repeated uint64 UserTablePartOwners = 8;
+ repeated uint64 SysTablesPartOwners = 9;
optional uint32 NodeId = 10;
optional uint64 StartTime = 11; // milliseconds since epoch
optional uint64 TableOwnerId = 12;
-}
-
-message TEvS3ListingRequest {
- optional uint64 TableId = 1;
- optional bytes SerializedKeyPrefix = 2;
-
- optional string PathColumnPrefix = 3;
- optional string PathColumnDelimiter = 4;
-
- optional bytes SerializedStartAfterKeySuffix = 5;
-
- repeated uint32 ColumnsToReturn = 6;
- optional uint32 MaxKeys = 7;
-
- optional string LastCommonPrefix = 8;
-}
-
-message TEvS3ListingResponse {
- optional uint64 TabletID = 1;
- optional uint32 Status = 2;
- optional string ErrorDescription = 3;
- repeated bytes CommonPrefixesRows = 4; // TSerializedCellVec
- repeated bytes ContentsRows = 5; // TSerializedCellVec
- optional bool MoreRows = 6;
-}
-
-message TSerializedRowColumnsScheme {
- repeated uint32 KeyColumnIds = 1;
- repeated uint32 ValueColumnIds = 2;
-}
-
-// In SerilializedCellVector format
-// Order of cells in KeyColumns and NonKeyColumns is described by TSerializedRowColumnsScheme
-message TSerializedRowToLoad {
- optional bytes KeyColumns = 1;
- optional bytes ValueColumns = 2;
-}
-
-message TEvUploadRowsRequest {
- optional uint64 TableId = 1;
- optional TSerializedRowColumnsScheme RowScheme = 2;
- repeated TSerializedRowToLoad Rows = 3;
- optional uint64 CancelDeadlineMs = 4; // Wallclock timestamp (not duration)
+}
+
+message TEvS3ListingRequest {
+ optional uint64 TableId = 1;
+ optional bytes SerializedKeyPrefix = 2;
+
+ optional string PathColumnPrefix = 3;
+ optional string PathColumnDelimiter = 4;
+
+ optional bytes SerializedStartAfterKeySuffix = 5;
+
+ repeated uint32 ColumnsToReturn = 6;
+ optional uint32 MaxKeys = 7;
+
+ optional string LastCommonPrefix = 8;
+}
+
+message TEvS3ListingResponse {
+ optional uint64 TabletID = 1;
+ optional uint32 Status = 2;
+ optional string ErrorDescription = 3;
+ repeated bytes CommonPrefixesRows = 4; // TSerializedCellVec
+ repeated bytes ContentsRows = 5; // TSerializedCellVec
+ optional bool MoreRows = 6;
+}
+
+message TSerializedRowColumnsScheme {
+ repeated uint32 KeyColumnIds = 1;
+ repeated uint32 ValueColumnIds = 2;
+}
+
+// In SerilializedCellVector format
+// Order of cells in KeyColumns and NonKeyColumns is described by TSerializedRowColumnsScheme
+message TSerializedRowToLoad {
+ optional bytes KeyColumns = 1;
+ optional bytes ValueColumns = 2;
+}
+
+message TEvUploadRowsRequest {
+ optional uint64 TableId = 1;
+ optional TSerializedRowColumnsScheme RowScheme = 2;
+ repeated TSerializedRowToLoad Rows = 3;
+ optional uint64 CancelDeadlineMs = 4; // Wallclock timestamp (not duration)
optional bool WriteToTableShadow = 5;
-}
-
-message TEvUploadRowsResponse {
- optional uint64 TabletID = 1;
- optional uint32 Status = 2;
- optional string ErrorDescription = 3;
-}
-
-message TEvReadColumnsRequest {
- optional uint64 TableId = 1;
- repeated string Columns = 2;
- optional uint64 MaxRows = 3;
- optional uint64 MaxBytes = 4;
-
- optional bytes FromKey = 5;
- optional bool FromKeyInclusive = 6;
+}
+
+message TEvUploadRowsResponse {
+ optional uint64 TabletID = 1;
+ optional uint32 Status = 2;
+ optional string ErrorDescription = 3;
+}
+
+message TEvReadColumnsRequest {
+ optional uint64 TableId = 1;
+ repeated string Columns = 2;
+ optional uint64 MaxRows = 3;
+ optional uint64 MaxBytes = 4;
+
+ optional bytes FromKey = 5;
+ optional bool FromKeyInclusive = 6;
optional uint64 SnapshotStep = 7;
optional uint64 SnapshotTxId = 8;
optional string Format = 9;
-}
-
-message TEvReadColumnsResponse {
- optional uint64 TabletID = 1;
- optional uint32 Status = 2;
- optional string ErrorDescription = 3;
-
- optional bytes Blocks = 4; // The data
- optional bytes LastKey = 5; // For continuation
- optional bool LastKeyInclusive = 6;
+}
+
+message TEvReadColumnsResponse {
+ optional uint64 TabletID = 1;
+ optional uint32 Status = 2;
+ optional string ErrorDescription = 3;
+
+ optional bytes Blocks = 4; // The data
+ optional bytes LastKey = 5; // For continuation
+ optional bool LastKeyInclusive = 6;
optional bool EndOfShard = 7;
-}
+}
message TStatus {
optional Ydb.StatusIds.StatusCode Code = 1;
@@ -857,7 +857,7 @@ message TEvGetInfoResponse {
optional uint64 PathId = 4;
optional NKikimrSchemeOp.TTableDescription Description = 5;
optional TStats Stats = 6;
- optional NKikimrTabletBase.TMetrics Metrics = 7;
+ optional NKikimrTabletBase.TMetrics Metrics = 7;
optional uint64 SchemaVersion = 8;
}
@@ -1144,12 +1144,12 @@ message TEvGetDataHistogramResponse {
repeated string KeyNames = 2;
optional THistogram SizeHistogram = 3;
optional THistogram CountHistogram = 4;
- optional THistogram KeyAccessSample = 5;
+ optional THistogram KeyAccessSample = 5;
}
optional TStatus Status = 1;
repeated TTableHistograms TableHistograms = 2;
-}
+}
message TEvRefreshVolatileSnapshotRequest {
optional uint64 OwnerId = 1;
diff --git a/ydb/core/protos/tx_proxy.proto b/ydb/core/protos/tx_proxy.proto
index 54ee8b79db0..dedd2f9cbb5 100644
--- a/ydb/core/protos/tx_proxy.proto
+++ b/ydb/core/protos/tx_proxy.proto
@@ -154,7 +154,7 @@ message TMiniKQLTransaction {
optional bool FlatMKQL = 52 [default = true];
optional bool LlvmRuntime = 53;
optional uint64 PerShardKeysSizeLimitBytes = 54;
- optional bool CollectStats = 55;
+ optional bool CollectStats = 55;
optional TLimits Limits = 56;
optional uint64 SnapshotStep = 9;
optional uint64 SnapshotTxId = 10;
@@ -171,7 +171,7 @@ message TTransaction {
optional TMiniKQLTransaction MiniKQLTransaction = 4;
optional uint32 Flags = 6; // See ydb/core/tx/tx_datashard.h NKikimr::NTxDataShard::TTxFlags
- optional string UserRequestId = 8;
+ optional string UserRequestId = 8;
optional NKikimrSchemeOp.TModifyScheme ModifyScheme = 10;
repeated NKikimrSchemeOp.TModifyScheme TransactionalModification = 11;
@@ -227,7 +227,7 @@ message TEvProposeTransactionStatus {
optional TMiniKQLCompileResults MiniKQLCompileResults = 17;
optional NKikimrMiniKQL.TResult ExecutionEngineEvaluatedResponse = 18;
- optional NKikimrQueryStats.TTxStats TxStats = 19;
+ optional NKikimrQueryStats.TTxStats TxStats = 19;
optional TTxProxyTimings Timings = 20;
optional bool HadFollowerReads = 21;
diff --git a/ydb/core/protos/ya.make b/ydb/core/protos/ya.make
index 9b099116fb5..70bb65514cd 100644
--- a/ydb/core/protos/ya.make
+++ b/ydb/core/protos/ya.make
@@ -57,8 +57,8 @@ SRCS(
database_basic_sausage_metainfo.proto
drivemodel.proto
export.proto
- flat_tx_scheme.proto
- flat_scheme_op.proto
+ flat_tx_scheme.proto
+ flat_scheme_op.proto
health.proto
hive.proto
http_config.proto
@@ -82,10 +82,10 @@ SRCS(
node_broker.proto
node_limits.proto
profiler.proto
- query_stats.proto
+ query_stats.proto
replication.proto
resource_broker.proto
- scheme_log.proto
+ scheme_log.proto
scheme_type_metadata.proto
scheme_type_operation.proto
serverless_proxy_config.proto
@@ -97,7 +97,7 @@ SRCS(
statestorage.proto
stream.proto
subdomains.proto
- table_stats.proto
+ table_stats.proto
tablet.proto
tablet_counters_aggregator.proto
tablet_counters.proto
diff --git a/ydb/core/scheme/scheme_tablecell.cpp b/ydb/core/scheme/scheme_tablecell.cpp
index 89fbf31f85f..c9ffaccc8e9 100644
--- a/ydb/core/scheme/scheme_tablecell.cpp
+++ b/ydb/core/scheme/scheme_tablecell.cpp
@@ -1,10 +1,10 @@
#include <ydb/core/scheme/scheme_tablecell.h>
#include <ydb/core/scheme/scheme_type_registry.h>
-
-#include <util/string/escape.h>
-
-namespace NKikimr {
-
+
+#include <util/string/escape.h>
+
+namespace NKikimr {
+
void TOwnedCellVec::TData::operator delete(void* mem) {
::free(mem);
}
@@ -62,12 +62,12 @@ TOwnedCellVec::TInit TOwnedCellVec::Allocate(TOwnedCellVec::TCellVec cells) {
TString DbgPrintCell(const TCell& r, NScheme::TTypeId typeId, const NScheme::TTypeRegistry &reg) {
NScheme::ITypeSP t = reg.GetType(typeId);
-
- if (!t.IsKnownType())
- return Sprintf("Unknow typeId 0x%x", (ui32)typeId);
-
+
+ if (!t.IsKnownType())
+ return Sprintf("Unknow typeId 0x%x", (ui32)typeId);
+
TString res = t->GetName();
- res += " : ";
+ res += " : ";
DbgPrintValue(res, r, typeId);
@@ -110,18 +110,18 @@ void DbgPrintValue(TString &res, const TCell &r, ui32 type) {
res += EscapeC(r.Data(), r.Size());
}
}
-}
-
+}
+
TString DbgPrintTuple(const TDbTupleRef& row, const NScheme::TTypeRegistry& typeRegistry) {
TString res = "(";
- for (ui32 i = 0; i < row.ColumnCount; ++i) {
- res += DbgPrintCell(row.Columns[i], row.Types[i], typeRegistry);
- if (i < row.ColumnCount-1)
- res += ", ";
- }
- res += ")";
- return res;
-}
-
-} // namespace NKikimr
-
+ for (ui32 i = 0; i < row.ColumnCount; ++i) {
+ res += DbgPrintCell(row.Columns[i], row.Types[i], typeRegistry);
+ if (i < row.ColumnCount-1)
+ res += ", ";
+ }
+ res += ")";
+ return res;
+}
+
+} // namespace NKikimr
+
diff --git a/ydb/core/scheme/scheme_tablecell.h b/ydb/core/scheme/scheme_tablecell.h
index b2f36ab838d..bccfdca97db 100644
--- a/ydb/core/scheme/scheme_tablecell.h
+++ b/ydb/core/scheme/scheme_tablecell.h
@@ -1,6 +1,6 @@
-#pragma once
+#pragma once
-#include "defs.h"
+#include "defs.h"
#include "scheme_type_id.h"
#include "scheme_type_order.h"
#include "scheme_types_defs.h"
@@ -9,55 +9,55 @@
#include <util/system/unaligned_mem.h>
#include <type_traits>
-
-namespace NKikimr {
-
-#pragma pack(push,4)
-// Represents one element in a tuple
-// Doesn't own the memory buffer that stores the actual value
-// Small values (<= 8 bytes) are stored inline
-struct TCell {
+
+namespace NKikimr {
+
+#pragma pack(push,4)
+// Represents one element in a tuple
+// Doesn't own the memory buffer that stores the actual value
+// Small values (<= 8 bytes) are stored inline
+struct TCell {
template<typename T>
using TStdLayout = std::enable_if_t<std::is_standard_layout<T>::value, T>;
-private:
- ui32 DataSize_ : 30;
- ui32 IsInline_ : 1;
- ui32 IsNull_ : 1;
- union {
- i64 IntVal;
- const char* Ptr;
- double DoubleVal;
- float FloatVal;
- char Bytes[8];
- };
-
-public:
- TCell()
- : TCell(nullptr, 0)
- {}
-
+private:
+ ui32 DataSize_ : 30;
+ ui32 IsInline_ : 1;
+ ui32 IsNull_ : 1;
+ union {
+ i64 IntVal;
+ const char* Ptr;
+ double DoubleVal;
+ float FloatVal;
+ char Bytes[8];
+ };
+
+public:
+ TCell()
+ : TCell(nullptr, 0)
+ {}
+
TCell(TArrayRef<const char> ref)
: TCell(ref.begin(), ui32(ref.size()))
{
- Y_VERIFY(ref.size() < Max<ui32>(), " Too large blob size for TCell");
+ Y_VERIFY(ref.size() < Max<ui32>(), " Too large blob size for TCell");
}
- TCell(const char* ptr, ui32 sz)
- : DataSize_(sz)
- , IsInline_(0)
- , IsNull_(ptr == nullptr)
- , Ptr(ptr)
- {
+ TCell(const char* ptr, ui32 sz)
+ : DataSize_(sz)
+ , IsInline_(0)
+ , IsNull_(ptr == nullptr)
+ , Ptr(ptr)
+ {
Y_VERIFY_DEBUG(ptr || sz == 0);
- if (CanInline(sz)) {
- IsInline_ = 1;
- IntVal = 0;
+ if (CanInline(sz)) {
+ IsInline_ = 1;
+ IntVal = 0;
if (ptr)
memcpy(&IntVal, ptr, sz);
- }
- }
-
+ }
+ }
+
explicit TCell(const TRawTypeValue* v)
: TCell((const char*)v->Data(), v->Size())
{}
@@ -67,10 +67,10 @@ public:
return !IsNull();
}
- bool IsInline() const { return IsInline_; }
- bool IsNull() const { return IsNull_; }
- ui32 Size() const { return DataSize_; }
-
+ bool IsInline() const { return IsInline_; }
+ bool IsNull() const { return IsNull_; }
+ ui32 Size() const { return DataSize_; }
+
TArrayRef<const char> AsRef() const noexcept
{
return { Data(), Size() };
@@ -97,66 +97,66 @@ public:
return TCell{ ptr, sizeof(val) };
}
-#if 1
- // Optimization to store small values (<= 8 bytes) inplace
+#if 1
+ // Optimization to store small values (<= 8 bytes) inplace
static constexpr bool CanInline(ui32 sz) { return sz <= 8; }
static constexpr size_t MaxInlineSize() { return 8; }
const char* InlineData() const { Y_VERIFY_DEBUG(IsInline_); return IsNull_ ? nullptr : (char*)&IntVal; }
const char* Data() const { return IsNull_ ? nullptr : (IsInline_ ? (char*)&IntVal : Ptr); }
-#else
- // Non-inlinable version for perf comparisons
- static bool CanInline(ui32) { return false; }
+#else
+ // Non-inlinable version for perf comparisons
+ static bool CanInline(ui32) { return false; }
const char* InlineData() const { Y_VERIFY_DEBUG(!IsInline_); return Ptr; }
const char* Data() const { Y_VERIFY_DEBUG(!IsInline_); return Ptr; }
-#endif
-};
-
-#pragma pack(pop)
-
-static_assert(sizeof(TCell) == 12, "TCell must be 12 bytes");
+#endif
+};
+
+#pragma pack(pop)
+
+static_assert(sizeof(TCell) == 12, "TCell must be 12 bytes");
using TCellsRef = TConstArrayRef<const TCell>;
-
-
+
+
// NULL is considered equal to another NULL and less than non-NULL
-// ATTENTION!!! return value is int!! (NOT just -1,0,1)
+// ATTENTION!!! return value is int!! (NOT just -1,0,1)
inline int CompareTypedCells(const TCell& a, const TCell& b, NScheme::TTypeIdOrder type) {
using TPair = std::pair<ui64, ui64>;
- if (a.IsNull())
- return b.IsNull() ? 0 : -1;
- if (b.IsNull())
- return 1;
-
+ if (a.IsNull())
+ return b.IsNull() ? 0 : -1;
+ if (b.IsNull())
+ return 1;
+
switch (type.GetTypeId()) {
-
-#define SIMPLE_TYPE_SWITCH(typeEnum, castType) \
+
+#define SIMPLE_TYPE_SWITCH(typeEnum, castType) \
case NKikimr::NScheme::NTypeIds::typeEnum: \
- { \
+ { \
Y_VERIFY_DEBUG(a.IsInline()); \
Y_VERIFY_DEBUG(b.IsInline()); \
castType va = ReadUnaligned<castType>((const castType*)a.InlineData()); \
castType vb = ReadUnaligned<castType>((const castType*)b.InlineData()); \
return va == vb ? 0 : ((va < vb) != type.IsDescending() ? -1 : 1); \
- }
-
- SIMPLE_TYPE_SWITCH(Int8, i8);
- SIMPLE_TYPE_SWITCH(Int16, i16);
- SIMPLE_TYPE_SWITCH(Uint16, ui16);
- SIMPLE_TYPE_SWITCH(Int32, i32);
- SIMPLE_TYPE_SWITCH(Uint32, ui32);
- SIMPLE_TYPE_SWITCH(Int64, i64);
- SIMPLE_TYPE_SWITCH(Uint64, ui64);
- SIMPLE_TYPE_SWITCH(Byte, ui8);
- SIMPLE_TYPE_SWITCH(Bool, ui8);
- SIMPLE_TYPE_SWITCH(Double, double);
- SIMPLE_TYPE_SWITCH(Float, float);
+ }
+
+ SIMPLE_TYPE_SWITCH(Int8, i8);
+ SIMPLE_TYPE_SWITCH(Int16, i16);
+ SIMPLE_TYPE_SWITCH(Uint16, ui16);
+ SIMPLE_TYPE_SWITCH(Int32, i32);
+ SIMPLE_TYPE_SWITCH(Uint32, ui32);
+ SIMPLE_TYPE_SWITCH(Int64, i64);
+ SIMPLE_TYPE_SWITCH(Uint64, ui64);
+ SIMPLE_TYPE_SWITCH(Byte, ui8);
+ SIMPLE_TYPE_SWITCH(Bool, ui8);
+ SIMPLE_TYPE_SWITCH(Double, double);
+ SIMPLE_TYPE_SWITCH(Float, float);
SIMPLE_TYPE_SWITCH(PairUi64Ui64, TPair);
SIMPLE_TYPE_SWITCH(Date, ui16);
SIMPLE_TYPE_SWITCH(Datetime, ui32);
SIMPLE_TYPE_SWITCH(Timestamp, ui64);
SIMPLE_TYPE_SWITCH(Interval, i64);
-
-#undef SIMPLE_TYPE_SWITCH
-
+
+#undef SIMPLE_TYPE_SWITCH
+
case NKikimr::NScheme::NTypeIds::String:
case NKikimr::NScheme::NTypeIds::String4k:
case NKikimr::NScheme::NTypeIds::String2m:
@@ -166,17 +166,17 @@ inline int CompareTypedCells(const TCell& a, const TCell& b, NScheme::TTypeIdOrd
// XXX: using memcmp is meaningless for both JsonDocument and Json
case NKikimr::NScheme::NTypeIds::JsonDocument:
case NKikimr::NScheme::NTypeIds::DyNumber:
- {
- const char* pa = (const char*)a.Data();
- const char* pb = (const char*)b.Data();
- size_t sza = a.Size();
- size_t szb = b.Size();
- int cmp = memcmp(pa, pb, sza < szb ? sza : szb);
- if (cmp != 0)
+ {
+ const char* pa = (const char*)a.Data();
+ const char* pb = (const char*)b.Data();
+ size_t sza = a.Size();
+ size_t szb = b.Size();
+ int cmp = memcmp(pa, pb, sza < szb ? sza : szb);
+ if (cmp != 0)
return type.IsDescending() ? (cmp > 0 ? -1 : +1) : cmp; // N.B. cannot multiply, may overflow
return sza == szb ? 0 : ((sza < szb) != type.IsDescending() ? -1 : 1);
- }
-
+ }
+
case NKikimr::NScheme::NTypeIds::Decimal:
{
Y_VERIFY_DEBUG(a.Size() == sizeof(std::pair<ui64, i64>));
@@ -188,26 +188,26 @@ inline int CompareTypedCells(const TCell& a, const TCell& b, NScheme::TTypeIdOrd
return (va.second < vb.second) != type.IsDescending() ? -1 : 1;
}
- default:
+ default:
Y_VERIFY_DEBUG(false, "Unknown type");
- };
-
- return 0;
-}
-
-// ATTENTION!!! return value is int!! (NOT just -1,0,1)
+ };
+
+ return 0;
+}
+
+// ATTENTION!!! return value is int!! (NOT just -1,0,1)
template<class TTypeClass>
inline int CompareTypedCellVectors(const TCell* a, const TCell* b, const TTypeClass* type, const ui32 cnt) {
- for (ui32 i = 0; i < cnt; ++i) {
- int cmpRes = CompareTypedCells(a[i], b[i], type[i]);
- if (cmpRes != 0)
- return cmpRes;
- }
- return 0;
-}
-
+ for (ui32 i = 0; i < cnt; ++i) {
+ int cmpRes = CompareTypedCells(a[i], b[i], type[i]);
+ if (cmpRes != 0)
+ return cmpRes;
+ }
+ return 0;
+}
+
/// @warning Do not use this func to compare key with a range border. Partial key means it ends with Nulls here.
-// ATTENTION!!! return value is int!! (NOT just -1,0,1)
+// ATTENTION!!! return value is int!! (NOT just -1,0,1)
template<class TTypeClass>
inline int CompareTypedCellVectors(const TCell* a, const TCell* b, const TTypeClass* type, const ui32 cnt_a, const ui32 cnt_b) {
Y_VERIFY_DEBUG(cnt_b <= cnt_a);
@@ -223,7 +223,7 @@ inline int CompareTypedCellVectors(const TCell* a, const TCell* b, const TTypeCl
}
return 0;
}
-
+
// TODO: use NYql ops when TCell and TUnboxedValuePod had merged
inline ui64 GetValueHash(NScheme::TTypeId type, const TCell& cell) {
if (cell.IsNull())
@@ -280,24 +280,24 @@ inline ui64 GetValueHash(NScheme::TTypeId type, const TCell& cell) {
return 0;
}
-// Only references a vector of cells and corresponding types
-// Doesn't own the memory
-struct TDbTupleRef {
- const NKikimr::NScheme::TTypeId* Types;
- const TCell* Columns;
- ui32 ColumnCount;
-
+// Only references a vector of cells and corresponding types
+// Doesn't own the memory
+struct TDbTupleRef {
+ const NKikimr::NScheme::TTypeId* Types;
+ const TCell* Columns;
+ ui32 ColumnCount;
+
TArrayRef<const TCell> Cells() const {
return { Columns, ColumnCount };
}
- TDbTupleRef(const NScheme::TTypeId* types = nullptr, const TCell* storage = nullptr, ui32 colCnt = 0)
- : Types(types)
- , Columns(storage)
- , ColumnCount(colCnt)
- {}
-};
-
+ TDbTupleRef(const NScheme::TTypeId* types = nullptr, const TCell* storage = nullptr, ui32 colCnt = 0)
+ : Types(types)
+ , Columns(storage)
+ , ColumnCount(colCnt)
+ {}
+};
+
// An array of cells that owns its data and may be safely copied/moved
class TOwnedCellVec
: public TConstArrayRef<TCell>
@@ -390,22 +390,22 @@ private:
size_t DataSize_;
};
-// Used to store/load a vector of TCell in bytes array
-// When loading from a buffer the cells will point to the buffer contents
-class TSerializedCellVec {
-public:
+// Used to store/load a vector of TCell in bytes array
+// When loading from a buffer the cells will point to the buffer contents
+class TSerializedCellVec {
+public:
explicit TSerializedCellVec(TString buf)
- {
- Parse(buf);
- }
-
- TSerializedCellVec() {}
+ {
+ Parse(buf);
+ }
+
+ TSerializedCellVec() {}
TSerializedCellVec(const TSerializedCellVec &other)
- : Buf(other.Buf)
- , Cells(other.Cells)
+ : Buf(other.Buf)
+ , Cells(other.Cells)
{
- Y_VERIFY(Buf.data() == other.Buf.data(), "Buffer must be shared");
+ Y_VERIFY(Buf.data() == other.Buf.data(), "Buffer must be shared");
}
TSerializedCellVec(TSerializedCellVec &&other)
@@ -415,66 +415,66 @@ public:
TSerializedCellVec &operator=(const TSerializedCellVec &other)
{
- if (this == &other)
- return *this;
-
- TSerializedCellVec tmp(other);
- *this = std::move(tmp);
+ if (this == &other)
+ return *this;
+
+ TSerializedCellVec tmp(other);
+ *this = std::move(tmp);
return *this;
}
TSerializedCellVec &operator=(TSerializedCellVec &&other)
{
- if (this == &other)
- return *this;
-
- const char* otherPtr = other.Buf.data();
+ if (this == &other)
+ return *this;
+
+ const char* otherPtr = other.Buf.data();
Buf = std::move(other.Buf);
- Y_VERIFY(Buf.data() == otherPtr, "Buffer address must not change");
- Cells = std::move(other.Cells);
+ Y_VERIFY(Buf.data() == otherPtr, "Buffer address must not change");
+ Cells = std::move(other.Cells);
return *this;
}
- static bool TryParse(const TString& data, TSerializedCellVec& vec) {
- bool ok = DoTryParse(data, vec);
- if (!ok) {
- vec.Cells.clear();
- vec.Buf.clear();
- }
- return ok;
- }
-
+ static bool TryParse(const TString& data, TSerializedCellVec& vec) {
+ bool ok = DoTryParse(data, vec);
+ if (!ok) {
+ vec.Cells.clear();
+ vec.Buf.clear();
+ }
+ return ok;
+ }
+
void Parse(const TString &buf) {
- Y_VERIFY(TryParse(buf, *this));
- }
-
+ Y_VERIFY(TryParse(buf, *this));
+ }
+
TConstArrayRef<TCell> GetCells() const {
- return Cells;
- }
-
+ return Cells;
+ }
+
static TString Serialize(const TConstArrayRef<TCell>& cells) {
if (cells.empty())
return TString();
-
- size_t sz = sizeof(ui16);
- for (auto& c : cells) {
- sz += sizeof(TValue) + c.Size();
- }
-
+
+ size_t sz = sizeof(ui16);
+ for (auto& c : cells) {
+ sz += sizeof(TValue) + c.Size();
+ }
+
TString res;
- res.reserve(sz);
+ res.reserve(sz);
ui16 cnt = cells.size();
- res.append((const char*)&cnt, sizeof(ui16));
- for (auto& c : cells) {
+ res.append((const char*)&cnt, sizeof(ui16));
+ for (auto& c : cells) {
TValue header;
header.Size = c.Size();
header.IsNull = c.IsNull();
- res.append((const char*)&header, sizeof(header));
- res.append(c.Data(), c.Size());
- }
- return res;
- }
-
+ res.append((const char*)&header, sizeof(header));
+ res.append(c.Data(), c.Size());
+ }
+ return res;
+ }
+
const TString &GetBuffer() const { return Buf; }
TString ReleaseBuffer() {
@@ -482,49 +482,49 @@ public:
return std::move(Buf);
}
-private:
-
-#pragma pack(push,4)
- struct TValue {
- ui32 Size : 31;
- ui32 IsNull : 1;
- };
-#pragma pack(pop)
-
- static bool DoTryParse(const TString& data, TSerializedCellVec& vec) {
- vec.Cells.clear();
- if (data.empty())
- return true;
-
- if (data.size() < sizeof(ui16))
- return false;
-
- ui16 count = ReadUnaligned<ui16>(data.data());
- vec.Cells.resize(count);
- const char* buf = data.data() + sizeof(count);
- const char* bufEnd = data.data() + data.size();
- for (ui32 ki = 0; ki < count; ++ki) {
- if (bufEnd - buf < (long)sizeof(TValue))
- return false;
-
- const TValue v = ReadUnaligned<TValue>((const TValue*)buf);
- if (bufEnd - buf < (long)sizeof(TValue) + v.Size)
- return false;
- vec.Cells[ki] = v.IsNull ? TCell() : TCell((const char*)((const TValue*)buf + 1), v.Size);
- buf += sizeof(TValue) + v.Size;
- }
-
- vec.Buf = data;
- return true;
- }
-
-private:
+private:
+
+#pragma pack(push,4)
+ struct TValue {
+ ui32 Size : 31;
+ ui32 IsNull : 1;
+ };
+#pragma pack(pop)
+
+ static bool DoTryParse(const TString& data, TSerializedCellVec& vec) {
+ vec.Cells.clear();
+ if (data.empty())
+ return true;
+
+ if (data.size() < sizeof(ui16))
+ return false;
+
+ ui16 count = ReadUnaligned<ui16>(data.data());
+ vec.Cells.resize(count);
+ const char* buf = data.data() + sizeof(count);
+ const char* bufEnd = data.data() + data.size();
+ for (ui32 ki = 0; ki < count; ++ki) {
+ if (bufEnd - buf < (long)sizeof(TValue))
+ return false;
+
+ const TValue v = ReadUnaligned<TValue>((const TValue*)buf);
+ if (bufEnd - buf < (long)sizeof(TValue) + v.Size)
+ return false;
+ vec.Cells[ki] = v.IsNull ? TCell() : TCell((const char*)((const TValue*)buf + 1), v.Size);
+ buf += sizeof(TValue) + v.Size;
+ }
+
+ vec.Buf = data;
+ return true;
+ }
+
+private:
TString Buf;
TVector<TCell> Cells;
-};
-
+};
+
void DbgPrintValue(TString&, const TCell&, ui32 type);
TString DbgPrintCell(const TCell& r, NScheme::TTypeId typeId, const NScheme::TTypeRegistry& typeRegistry);
TString DbgPrintTuple(const TDbTupleRef& row, const NScheme::TTypeRegistry& typeRegistry);
-
+
}
diff --git a/ydb/core/scheme/scheme_tablecell_ut.cpp b/ydb/core/scheme/scheme_tablecell_ut.cpp
index c3ef930be70..5bd4a27ab95 100644
--- a/ydb/core/scheme/scheme_tablecell_ut.cpp
+++ b/ydb/core/scheme/scheme_tablecell_ut.cpp
@@ -1,14 +1,14 @@
#include <ydb/core/scheme/scheme_tablecell.h>
-
+
#include <library/cpp/testing/unittest/registar.h>
-#include <util/generic/vector.h>
-
-using namespace NActors;
-
-Y_UNIT_TEST_SUITE(Scheme) {
-
- using namespace NKikimr;
-
+#include <util/generic/vector.h>
+
+using namespace NActors;
+
+Y_UNIT_TEST_SUITE(Scheme) {
+
+ using namespace NKikimr;
+
Y_UNIT_TEST(EmptyOwnedCellVec) {
TOwnedCellVec empty;
UNIT_ASSERT_VALUES_EQUAL(empty.size(), 0u);
@@ -58,155 +58,155 @@ Y_UNIT_TEST_SUITE(Scheme) {
UNIT_ASSERT_VALUES_EQUAL(moved[1].AsBuf(), TStringBuf(bigStrVal, sizeof(bigStrVal)));
}
- Y_UNIT_TEST(TSerializedCellVec) {
- ui64 intVal = 42;
- char smallStrVal[] = "str1";
- char bigStrVal[] =
- "> You have requested to link your commit to an existing review request 849684\n"
- "> This review request is not ready yet to be merged, see its merge requirements below";
- float floatVal = 0.42;
- double doubleVal = -0.0025;
-
- TVector<TCell> cells;
- TVector<NScheme::TTypeId> types;
-
- cells.push_back(TCell((const char*)&doubleVal, sizeof(doubleVal)));
- types.push_back(NScheme::NTypeIds::Double);
- cells.push_back(TCell(smallStrVal, sizeof(smallStrVal)));
- types.push_back(NScheme::NTypeIds::String);
- cells.push_back(TCell());
- types.push_back(NScheme::NTypeIds::Utf8);
- cells.push_back(TCell(smallStrVal, sizeof(smallStrVal)));
- types.push_back(NScheme::NTypeIds::Utf8);
- cells.push_back(TCell((const char*)&floatVal, sizeof(floatVal)));
- types.push_back(NScheme::NTypeIds::Float);
- cells.push_back(TCell());
- types.push_back(NScheme::NTypeIds::Decimal);
- cells.push_back(TCell((const char*)&intVal, sizeof(ui64)));
- types.push_back(NScheme::NTypeIds::Uint64);
- cells.push_back(TCell());
- types.push_back(NScheme::NTypeIds::Decimal);
- cells.push_back(TCell());
- types.push_back(NScheme::NTypeIds::Uint8);
- cells.push_back(TCell(bigStrVal, sizeof(bigStrVal)));
- types.push_back(NScheme::NTypeIds::Utf8);
- cells.push_back(TCell());
- types.push_back(NScheme::NTypeIds::Double);
- cells.push_back(TCell((const char*)&intVal, sizeof(i32)));
- types.push_back(NScheme::NTypeIds::Int32);
-
- TSerializedCellVec vec(TSerializedCellVec::Serialize(cells));
-
- UNIT_ASSERT_VALUES_EQUAL(cells.size(), 12);
- UNIT_ASSERT_VALUES_EQUAL(types.size(), cells.size());
- UNIT_ASSERT_VALUES_EQUAL(vec.GetCells().size(), cells.size());
-
- UNIT_ASSERT_DOUBLES_EQUAL(cells[0].AsValue<double>(), doubleVal, 0.00001);
- UNIT_ASSERT_VALUES_EQUAL(cells[1].AsBuf().data(), smallStrVal);
- UNIT_ASSERT_VALUES_EQUAL(cells[3].AsBuf().data(), smallStrVal);
- UNIT_ASSERT_DOUBLES_EQUAL(cells[4].AsValue<float>(), floatVal, 0.00001);
- UNIT_ASSERT_VALUES_EQUAL(cells[6].AsValue<ui64>(), intVal);
- UNIT_ASSERT_VALUES_EQUAL(cells[9].AsBuf().data(), bigStrVal);
- UNIT_ASSERT_VALUES_EQUAL(cells[11].AsValue<i32>(), intVal);
-
- UNIT_ASSERT_VALUES_EQUAL(CompareTypedCellVectors(vec.GetCells().data(), cells.data(),
- types.data(),
- vec.GetCells().size(), cells.size()),
- 0);
-
- TSerializedCellVec vecCopy(vec);
-
- UNIT_ASSERT_VALUES_EQUAL(CompareTypedCellVectors(vecCopy.GetCells().data(), cells.data(),
- types.data(),
- vecCopy.GetCells().size(), cells.size()),
- 0);
-
-
- TSerializedCellVec vec2(std::move(vecCopy));
-
- UNIT_ASSERT_VALUES_EQUAL(CompareTypedCellVectors(vec2.GetCells().data(), cells.data(),
- types.data(),
- vec2.GetCells().size(), cells.size()),
- 0);
-
- TSerializedCellVec vec3;
- UNIT_ASSERT(vec3.GetCells().empty());
- UNIT_ASSERT(vec3.GetBuffer().empty());
-
- TString buf = vec.GetBuffer();
- UNIT_ASSERT(buf.size() > cells.size()*2);
- vec3.Parse(buf);
-
- UNIT_ASSERT_VALUES_EQUAL(CompareTypedCellVectors(vec3.GetCells().data(), cells.data(),
- types.data(),
- vec3.GetCells().size(), cells.size()),
- 0);
-
- const int ITERATIONS = 1000;//10000000;
-
- {
- TInstant start = TInstant::Now();
- for (int i = 0; i < ITERATIONS; ++i) {
- TSerializedCellVec::Serialize(cells);
- }
- TInstant finish = TInstant::Now();
- Cerr << "Serialize: " << finish - start << Endl;
- }
-
- {
- TString buf = vec.GetBuffer();
- TInstant start = TInstant::Now();
- for (int i = 0; i < ITERATIONS; ++i) {
- vec3.Parse(buf);
- }
- TInstant finish = TInstant::Now();
- Cerr << "Parse: " << finish - start << Endl;
- }
-
- {
- TInstant start = TInstant::Now();
- for (int i = 0; i < ITERATIONS; ++i) {
- vec3 = vec;
- }
- TInstant finish = TInstant::Now();
- Cerr << "Copy: " << finish - start << Endl;
- }
-
- {
- size_t unused = 0;
- TInstant start = TInstant::Now();
- for (int i = 0; i < ITERATIONS; ++i) {
- TSerializedCellVec vec3(std::move(vec));
- unused += vec3.GetCells().size();
- if (unused % 10000 != 0) {
- vec = std::move(vec3);
- } else {
- vec = vec2;
- }
- }
- TInstant finish = TInstant::Now();
- UNIT_ASSERT_VALUES_EQUAL(unused, ITERATIONS*cells.size());
- Cerr << "Move: " << finish - start << Endl;
- }
- }
-
- Y_UNIT_TEST(CellVecTryParse) {
- TSerializedCellVec vec;
- UNIT_ASSERT(!TSerializedCellVec::TryParse("\1", vec));
- UNIT_ASSERT(!TSerializedCellVec::TryParse("\1\1", vec));
-
- const TString buf = TSerializedCellVec::Serialize({TCell(), TCell()});
- UNIT_ASSERT_VALUES_EQUAL(buf.size(), 2 + 2*4);
-
- {
- for (size_t i = 0; i < buf.size(); ++i) {
- TString hacked = buf;
- hacked[1] = hacked[i] + 1;
- UNIT_ASSERT(!TSerializedCellVec::TryParse(hacked, vec));
- }
- }
- }
-
+ Y_UNIT_TEST(TSerializedCellVec) {
+ ui64 intVal = 42;
+ char smallStrVal[] = "str1";
+ char bigStrVal[] =
+ "> You have requested to link your commit to an existing review request 849684\n"
+ "> This review request is not ready yet to be merged, see its merge requirements below";
+ float floatVal = 0.42;
+ double doubleVal = -0.0025;
+
+ TVector<TCell> cells;
+ TVector<NScheme::TTypeId> types;
+
+ cells.push_back(TCell((const char*)&doubleVal, sizeof(doubleVal)));
+ types.push_back(NScheme::NTypeIds::Double);
+ cells.push_back(TCell(smallStrVal, sizeof(smallStrVal)));
+ types.push_back(NScheme::NTypeIds::String);
+ cells.push_back(TCell());
+ types.push_back(NScheme::NTypeIds::Utf8);
+ cells.push_back(TCell(smallStrVal, sizeof(smallStrVal)));
+ types.push_back(NScheme::NTypeIds::Utf8);
+ cells.push_back(TCell((const char*)&floatVal, sizeof(floatVal)));
+ types.push_back(NScheme::NTypeIds::Float);
+ cells.push_back(TCell());
+ types.push_back(NScheme::NTypeIds::Decimal);
+ cells.push_back(TCell((const char*)&intVal, sizeof(ui64)));
+ types.push_back(NScheme::NTypeIds::Uint64);
+ cells.push_back(TCell());
+ types.push_back(NScheme::NTypeIds::Decimal);
+ cells.push_back(TCell());
+ types.push_back(NScheme::NTypeIds::Uint8);
+ cells.push_back(TCell(bigStrVal, sizeof(bigStrVal)));
+ types.push_back(NScheme::NTypeIds::Utf8);
+ cells.push_back(TCell());
+ types.push_back(NScheme::NTypeIds::Double);
+ cells.push_back(TCell((const char*)&intVal, sizeof(i32)));
+ types.push_back(NScheme::NTypeIds::Int32);
+
+ TSerializedCellVec vec(TSerializedCellVec::Serialize(cells));
+
+ UNIT_ASSERT_VALUES_EQUAL(cells.size(), 12);
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), cells.size());
+ UNIT_ASSERT_VALUES_EQUAL(vec.GetCells().size(), cells.size());
+
+ UNIT_ASSERT_DOUBLES_EQUAL(cells[0].AsValue<double>(), doubleVal, 0.00001);
+ UNIT_ASSERT_VALUES_EQUAL(cells[1].AsBuf().data(), smallStrVal);
+ UNIT_ASSERT_VALUES_EQUAL(cells[3].AsBuf().data(), smallStrVal);
+ UNIT_ASSERT_DOUBLES_EQUAL(cells[4].AsValue<float>(), floatVal, 0.00001);
+ UNIT_ASSERT_VALUES_EQUAL(cells[6].AsValue<ui64>(), intVal);
+ UNIT_ASSERT_VALUES_EQUAL(cells[9].AsBuf().data(), bigStrVal);
+ UNIT_ASSERT_VALUES_EQUAL(cells[11].AsValue<i32>(), intVal);
+
+ UNIT_ASSERT_VALUES_EQUAL(CompareTypedCellVectors(vec.GetCells().data(), cells.data(),
+ types.data(),
+ vec.GetCells().size(), cells.size()),
+ 0);
+
+ TSerializedCellVec vecCopy(vec);
+
+ UNIT_ASSERT_VALUES_EQUAL(CompareTypedCellVectors(vecCopy.GetCells().data(), cells.data(),
+ types.data(),
+ vecCopy.GetCells().size(), cells.size()),
+ 0);
+
+
+ TSerializedCellVec vec2(std::move(vecCopy));
+
+ UNIT_ASSERT_VALUES_EQUAL(CompareTypedCellVectors(vec2.GetCells().data(), cells.data(),
+ types.data(),
+ vec2.GetCells().size(), cells.size()),
+ 0);
+
+ TSerializedCellVec vec3;
+ UNIT_ASSERT(vec3.GetCells().empty());
+ UNIT_ASSERT(vec3.GetBuffer().empty());
+
+ TString buf = vec.GetBuffer();
+ UNIT_ASSERT(buf.size() > cells.size()*2);
+ vec3.Parse(buf);
+
+ UNIT_ASSERT_VALUES_EQUAL(CompareTypedCellVectors(vec3.GetCells().data(), cells.data(),
+ types.data(),
+ vec3.GetCells().size(), cells.size()),
+ 0);
+
+ const int ITERATIONS = 1000;//10000000;
+
+ {
+ TInstant start = TInstant::Now();
+ for (int i = 0; i < ITERATIONS; ++i) {
+ TSerializedCellVec::Serialize(cells);
+ }
+ TInstant finish = TInstant::Now();
+ Cerr << "Serialize: " << finish - start << Endl;
+ }
+
+ {
+ TString buf = vec.GetBuffer();
+ TInstant start = TInstant::Now();
+ for (int i = 0; i < ITERATIONS; ++i) {
+ vec3.Parse(buf);
+ }
+ TInstant finish = TInstant::Now();
+ Cerr << "Parse: " << finish - start << Endl;
+ }
+
+ {
+ TInstant start = TInstant::Now();
+ for (int i = 0; i < ITERATIONS; ++i) {
+ vec3 = vec;
+ }
+ TInstant finish = TInstant::Now();
+ Cerr << "Copy: " << finish - start << Endl;
+ }
+
+ {
+ size_t unused = 0;
+ TInstant start = TInstant::Now();
+ for (int i = 0; i < ITERATIONS; ++i) {
+ TSerializedCellVec vec3(std::move(vec));
+ unused += vec3.GetCells().size();
+ if (unused % 10000 != 0) {
+ vec = std::move(vec3);
+ } else {
+ vec = vec2;
+ }
+ }
+ TInstant finish = TInstant::Now();
+ UNIT_ASSERT_VALUES_EQUAL(unused, ITERATIONS*cells.size());
+ Cerr << "Move: " << finish - start << Endl;
+ }
+ }
+
+ Y_UNIT_TEST(CellVecTryParse) {
+ TSerializedCellVec vec;
+ UNIT_ASSERT(!TSerializedCellVec::TryParse("\1", vec));
+ UNIT_ASSERT(!TSerializedCellVec::TryParse("\1\1", vec));
+
+ const TString buf = TSerializedCellVec::Serialize({TCell(), TCell()});
+ UNIT_ASSERT_VALUES_EQUAL(buf.size(), 2 + 2*4);
+
+ {
+ for (size_t i = 0; i < buf.size(); ++i) {
+ TString hacked = buf;
+ hacked[1] = hacked[i] + 1;
+ UNIT_ASSERT(!TSerializedCellVec::TryParse(hacked, vec));
+ }
+ }
+ }
+
/**
* CompareOrder test for cell1 < cell2 < cell3 given a type id
*/
@@ -353,4 +353,4 @@ Y_UNIT_TEST_SUITE(Scheme) {
}
}
}
-}
+}
diff --git a/ydb/core/scheme/scheme_tabledefs.cpp b/ydb/core/scheme/scheme_tabledefs.cpp
index 897cb0ae21a..8c91e03d0e4 100644
--- a/ydb/core/scheme/scheme_tabledefs.cpp
+++ b/ydb/core/scheme/scheme_tabledefs.cpp
@@ -2,11 +2,11 @@
namespace NKikimr {
-bool TTableRange::IsEmptyRange(TConstArrayRef<const NScheme::TTypeId> cellTypeIds) const {
+bool TTableRange::IsEmptyRange(TConstArrayRef<const NScheme::TTypeId> cellTypeIds) const {
if (Point)
return false;
- const int compares = CompareBorders<true, false>(To, From, InclusiveTo, InclusiveFrom, cellTypeIds);
+ const int compares = CompareBorders<true, false>(To, From, InclusiveTo, InclusiveFrom, cellTypeIds);
return (compares < 0);
}
diff --git a/ydb/core/scheme/scheme_tabledefs.h b/ydb/core/scheme/scheme_tabledefs.h
index 11a509cb24b..190233fdeee 100644
--- a/ydb/core/scheme/scheme_tabledefs.h
+++ b/ydb/core/scheme/scheme_tabledefs.h
@@ -174,7 +174,7 @@ public:
}
}
- bool IsEmptyRange(TConstArrayRef<const NScheme::TTypeId> cellTypeIds) const;
+ bool IsEmptyRange(TConstArrayRef<const NScheme::TTypeId> cellTypeIds) const;
};
class TSerializedTableRange {
@@ -308,7 +308,7 @@ int CompareBorders(TConstArrayRef<TCell> first, TConstArrayRef<TCell> second, bo
return -1;
} else if (second[idx].IsNull()) {
return 1;
- } else if (const int compares = CompareTypedCells(first[idx], second[idx], cellTypes[idx])) {
+ } else if (const int compares = CompareTypedCells(first[idx], second[idx], cellTypes[idx])) {
return compares;
}
}
@@ -636,16 +636,16 @@ public:
EStatus Status;
};
- struct TRangeLimits {
- ui64 ItemsLimit;
- ui64 BytesLimit;
-
- TRangeLimits(ui64 itemsLimit = 0, ui64 bytesLimit = 0)
- : ItemsLimit(itemsLimit)
- , BytesLimit(bytesLimit)
- {}
- };
-
+ struct TRangeLimits {
+ ui64 ItemsLimit;
+ ui64 BytesLimit;
+
+ TRangeLimits(ui64 itemsLimit = 0, ui64 bytesLimit = 0)
+ : ItemsLimit(itemsLimit)
+ , BytesLimit(bytesLimit)
+ {}
+ };
+
struct TPartitionRangeInfo {
TSerializedCellVec EndKeyPrefix;
bool IsInclusive = false;
@@ -665,7 +665,7 @@ public:
// in
const TTableId TableId;
const TOwnedTableRange Range;
- const TRangeLimits RangeLimits;
+ const TRangeLimits RangeLimits;
const ERowOperation RowOperation;
const TVector<NScheme::TTypeId> KeyColumnTypes; // For SelectRange there can be not full key
const TVector<TColumnOp> Columns;
@@ -686,7 +686,7 @@ public:
ui64 itemsLimit = 0, ui64 bytesLimit = 0, bool reverse = false)
: TableId(tableId)
, Range(range.From, range.InclusiveFrom, range.To, range.InclusiveTo, range.Point)
- , RangeLimits(itemsLimit, bytesLimit)
+ , RangeLimits(itemsLimit, bytesLimit)
, RowOperation(rowOperation)
, KeyColumnTypes(keyColumnTypes.begin(), keyColumnTypes.end())
, Columns(columns.begin(), columns.end())
diff --git a/ydb/core/scheme/ut/ya.make b/ydb/core/scheme/ut/ya.make
index 6ffb7007fa6..803c635be38 100644
--- a/ydb/core/scheme/ut/ya.make
+++ b/ydb/core/scheme/ut/ya.make
@@ -1,18 +1,18 @@
UNITTEST_FOR(ydb/core/scheme)
-
-OWNER(g:kikimr)
-
-FORK_SUBTESTS()
-
-SIZE(SMALL)
-
-PEERDIR(
+
+OWNER(g:kikimr)
+
+FORK_SUBTESTS()
+
+SIZE(SMALL)
+
+PEERDIR(
ydb/core/scheme
-)
-
-SRCS(
+)
+
+SRCS(
scheme_borders_ut.cpp
- scheme_tablecell_ut.cpp
-)
-
-END()
+ scheme_tablecell_ut.cpp
+)
+
+END()
diff --git a/ydb/core/scheme_types/scheme_raw_type_value.h b/ydb/core/scheme_types/scheme_raw_type_value.h
index 2ed02a5c389..1cb0946b6d7 100644
--- a/ydb/core/scheme_types/scheme_raw_type_value.h
+++ b/ydb/core/scheme_types/scheme_raw_type_value.h
@@ -66,7 +66,7 @@ private:
} // namspace NKikimr
-inline IOutputStream& operator << (IOutputStream& out, const NKikimr::TRawTypeValue& v) {
- out << v.ToString();
- return out;
-}
+inline IOutputStream& operator << (IOutputStream& out, const NKikimr::TRawTypeValue& v) {
+ out << v.ToString();
+ return out;
+}
diff --git a/ydb/core/scheme_types/scheme_type_registry.h b/ydb/core/scheme_types/scheme_type_registry.h
index 4724f5bf210..a82af192675 100644
--- a/ydb/core/scheme_types/scheme_type_registry.h
+++ b/ydb/core/scheme_types/scheme_type_registry.h
@@ -55,7 +55,7 @@ public:
return "Null";
}
auto type = GetType(typeId);
- return type.IsKnownType() ? ::TString(type->GetName()) : (TStringBuilder() << "Unknown(" << typeId << ")");
+ return type.IsKnownType() ? ::TString(type->GetName()) : (TStringBuilder() << "Unknown(" << typeId << ")");
}
ITypeSP GetKnownType(TTypeId typeId) const {
diff --git a/ydb/core/sys_view/common/schema.cpp b/ydb/core/sys_view/common/schema.cpp
index 032cb99f955..1917ea35a49 100644
--- a/ydb/core/sys_view/common/schema.cpp
+++ b/ydb/core/sys_view/common/schema.cpp
@@ -38,9 +38,9 @@ public:
} else if (MaybeSystemViewPath(path)) {
auto maybeSystemViewName = path.back();
if (!DomainSystemViews.contains(maybeSystemViewName) &&
- !SubDomainSystemViews.contains(maybeSystemViewName) &&
- !OlapStoreSystemViews.contains(maybeSystemViewName) &&
- !OlapTableSystemViews.contains(maybeSystemViewName))
+ !SubDomainSystemViews.contains(maybeSystemViewName) &&
+ !OlapStoreSystemViews.contains(maybeSystemViewName) &&
+ !OlapTableSystemViews.contains(maybeSystemViewName))
{
return false;
}
@@ -61,12 +61,12 @@ public:
case ETarget::SubDomain:
view = SubDomainSystemViews.FindPtr(viewName);
break;
- case ETarget::OlapStore:
- view = OlapStoreSystemViews.FindPtr(viewName);
- break;
- case ETarget::OlapTable:
- view = OlapTableSystemViews.FindPtr(viewName);
- break;
+ case ETarget::OlapStore:
+ view = OlapStoreSystemViews.FindPtr(viewName);
+ break;
+ case ETarget::OlapTable:
+ view = OlapTableSystemViews.FindPtr(viewName);
+ break;
}
return view ? TMaybe<TSchema>(*view) : Nothing();
}
@@ -86,18 +86,18 @@ public:
result.push_back(name);
}
break;
- case ETarget::OlapStore:
- result.reserve(OlapStoreSystemViews.size());
- for (const auto& [name, _] : OlapStoreSystemViews) {
- result.push_back(name);
- }
- break;
- case ETarget::OlapTable:
- result.reserve(OlapTableSystemViews.size());
- for (const auto& [name, _] : OlapTableSystemViews) {
- result.push_back(name);
- }
- break;
+ case ETarget::OlapStore:
+ result.reserve(OlapStoreSystemViews.size());
+ for (const auto& [name, _] : OlapStoreSystemViews) {
+ result.push_back(name);
+ }
+ break;
+ case ETarget::OlapTable:
+ result.reserve(OlapTableSystemViews.size());
+ for (const auto& [name, _] : OlapTableSystemViews) {
+ result.push_back(name);
+ }
+ break;
}
return result;
}
@@ -179,16 +179,16 @@ private:
TSchemaFiller<Table>::Fill(DomainSystemViews[name]);
}
- template <typename Table>
- void RegisterOlapStoreSystemView(const TStringBuf& name) {
- TSchemaFiller<Table>::Fill(OlapStoreSystemViews[name]);
- }
-
- template <typename Table>
- void RegisterOlapTableSystemView(const TStringBuf& name) {
- TSchemaFiller<Table>::Fill(OlapTableSystemViews[name]);
- }
-
+ template <typename Table>
+ void RegisterOlapStoreSystemView(const TStringBuf& name) {
+ TSchemaFiller<Table>::Fill(OlapStoreSystemViews[name]);
+ }
+
+ template <typename Table>
+ void RegisterOlapTableSystemView(const TStringBuf& name) {
+ TSchemaFiller<Table>::Fill(OlapTableSystemViews[name]);
+ }
+
void RegisterSystemViews() {
RegisterSystemView<Schema::PartitionStats>(PartitionStatsName);
@@ -213,16 +213,16 @@ private:
RegisterDomainSystemView<Schema::Tablets>(TabletsName);
RegisterSystemView<Schema::QueryMetrics>(QueryMetricsName);
-
- RegisterOlapStoreSystemView<Schema::PrimaryIndexStats>(StorePrimaryIndexStatsName);
- RegisterOlapTableSystemView<Schema::PrimaryIndexStats>(TablePrimaryIndexStatsName);
+
+ RegisterOlapStoreSystemView<Schema::PrimaryIndexStats>(StorePrimaryIndexStatsName);
+ RegisterOlapTableSystemView<Schema::PrimaryIndexStats>(TablePrimaryIndexStatsName);
}
private:
THashMap<TString, TSchema> DomainSystemViews;
THashMap<TString, TSchema> SubDomainSystemViews;
- THashMap<TString, TSchema> OlapStoreSystemViews;
- THashMap<TString, TSchema> OlapTableSystemViews;
+ THashMap<TString, TSchema> OlapStoreSystemViews;
+ THashMap<TString, TSchema> OlapTableSystemViews;
};
ISystemViewResolver* CreateSystemViewResolver() {
diff --git a/ydb/core/sys_view/common/schema.h b/ydb/core/sys_view/common/schema.h
index 99eed150c8d..f7a4f191e13 100644
--- a/ydb/core/sys_view/common/schema.h
+++ b/ydb/core/sys_view/common/schema.h
@@ -30,9 +30,9 @@ constexpr TStringBuf TabletsName = "hive_tablets";
constexpr TStringBuf QueryMetricsName = "query_metrics_one_minute";
-constexpr TStringBuf StorePrimaryIndexStatsName = "store_primary_index_stats";
-constexpr TStringBuf TablePrimaryIndexStatsName = "primary_index_stats";
-
+constexpr TStringBuf StorePrimaryIndexStatsName = "store_primary_index_stats";
+constexpr TStringBuf TablePrimaryIndexStatsName = "primary_index_stats";
+
struct Schema : NIceDb::Schema {
struct PartitionStats : Table<1> {
struct OwnerId : Column<1, NScheme::NTypeIds::Uint64> {};
@@ -379,31 +379,31 @@ struct Schema : NIceDb::Schema {
SumDeleteRows, MinDeleteRows, MaxDeleteRows,
SumRequestUnits, MinRequestUnits, MaxRequestUnits>;
};
-
- struct PrimaryIndexStats : Table<10> {
- struct PathId : Column<1, NScheme::NTypeIds::Uint64> {};
- struct Kind : Column<2, NScheme::NTypeIds::Uint32> {};
- struct TabletId : Column<3, NScheme::NTypeIds::Uint64> {};
- struct Rows : Column<4, NScheme::NTypeIds::Uint64> {};
- struct Bytes : Column<5, NScheme::NTypeIds::Uint64> {};
- struct RawBytes : Column<6, NScheme::NTypeIds::Uint64> {};
- struct Portions : Column<7, NScheme::NTypeIds::Uint64> {};
- struct Blobs : Column<8, NScheme::NTypeIds::Uint64> {};
-
- using TKey = TableKey<
- PathId,
- Kind,
- TabletId>;
- using TColumns = TableColumns<
- PathId,
- Kind,
- TabletId,
- Rows,
- Bytes,
- RawBytes,
- Portions,
- Blobs>;
- };
+
+ struct PrimaryIndexStats : Table<10> {
+ struct PathId : Column<1, NScheme::NTypeIds::Uint64> {};
+ struct Kind : Column<2, NScheme::NTypeIds::Uint32> {};
+ struct TabletId : Column<3, NScheme::NTypeIds::Uint64> {};
+ struct Rows : Column<4, NScheme::NTypeIds::Uint64> {};
+ struct Bytes : Column<5, NScheme::NTypeIds::Uint64> {};
+ struct RawBytes : Column<6, NScheme::NTypeIds::Uint64> {};
+ struct Portions : Column<7, NScheme::NTypeIds::Uint64> {};
+ struct Blobs : Column<8, NScheme::NTypeIds::Uint64> {};
+
+ using TKey = TableKey<
+ PathId,
+ Kind,
+ TabletId>;
+ using TColumns = TableColumns<
+ PathId,
+ Kind,
+ TabletId,
+ Rows,
+ Bytes,
+ RawBytes,
+ Portions,
+ Blobs>;
+ };
struct StorageStats : Table<11> {
struct PDiskFilter : Column<2, NScheme::NTypeIds::Utf8> {};
@@ -429,9 +429,9 @@ public:
enum class ETarget : ui8 {
Domain,
- SubDomain,
- OlapStore,
- OlapTable
+ SubDomain,
+ OlapStore,
+ OlapTable
};
struct TSystemViewPath {
diff --git a/ydb/core/tablet/bootstrapper.h b/ydb/core/tablet/bootstrapper.h
index 0c41f6baca9..2e246ea2b07 100644
--- a/ydb/core/tablet/bootstrapper.h
+++ b/ydb/core/tablet/bootstrapper.h
@@ -23,7 +23,7 @@ struct TEvBootstrapper {
DEFINE_SIMPLE_NONLOCAL_EVENT(TEvActivate, "TEvBootstrapper::Activate");
};
- struct TEvStandBy : public TEventBase<TEvStandBy, EvStandBy> {
+ struct TEvStandBy : public TEventBase<TEvStandBy, EvStandBy> {
DEFINE_SIMPLE_NONLOCAL_EVENT(TEvStandBy, "TEvBootstrapper::StandBy");
};
diff --git a/ydb/core/tablet/tablet_counters_aggregator.h b/ydb/core/tablet/tablet_counters_aggregator.h
index b2cc773f4be..db350d41cbf 100644
--- a/ydb/core/tablet/tablet_counters_aggregator.h
+++ b/ydb/core/tablet/tablet_counters_aggregator.h
@@ -39,9 +39,9 @@ struct TEvTabletCounters {
static_assert(EvEnd < EventSpaceEnd(TKikimrEvents::ES_TABLET_COUNTERS_AGGREGATOR), "expect EvEnd < EventSpaceEnd(TKikimrEvents::ES_TABLET_COUNTERS)");
- // Used just as an atomic counter
- struct TInFlightCookie : TThrRefBase {};
-
+ // Used just as an atomic counter
+ struct TInFlightCookie : TThrRefBase {};
+
struct TEvTabletAddCounters : public TEventLocal<TEvTabletAddCounters, EvTabletAddCounters> {
//
const ui64 TabletID;
@@ -49,7 +49,7 @@ struct TEvTabletCounters {
const TPathId TenantPathId;
TAutoPtr<TTabletCountersBase> ExecutorCounters;
TAutoPtr<TTabletCountersBase> AppCounters;
- TIntrusivePtr<TInFlightCookie> InFlightCounter; // Used to detect when previous event has been consumed by the aggregator
+ TIntrusivePtr<TInFlightCookie> InFlightCounter; // Used to detect when previous event has been consumed by the aggregator
TEvTabletAddCounters(TIntrusivePtr<TInFlightCookie> inFlightCounter, ui64 tabletID, TTabletTypes::EType tabletType, TPathId tenantPathId,
TAutoPtr<TTabletCountersBase> executorCounters, TAutoPtr<TTabletCountersBase> appCounters)
@@ -58,7 +58,7 @@ struct TEvTabletCounters {
, TenantPathId(tenantPathId)
, ExecutorCounters(executorCounters)
, AppCounters(appCounters)
- , InFlightCounter(inFlightCounter)
+ , InFlightCounter(inFlightCounter)
{}
};
diff --git a/ydb/core/tablet/tablet_counters_protobuf.h b/ydb/core/tablet/tablet_counters_protobuf.h
index a6ed37e5cca..11d3a334def 100644
--- a/ydb/core/tablet/tablet_counters_protobuf.h
+++ b/ydb/core/tablet/tablet_counters_protobuf.h
@@ -186,9 +186,9 @@ public:
return Ranges[idx];
} else {
if (idx < TxOffset) {
- if (!AppGlobalRanges.empty())
+ if (!AppGlobalRanges.empty())
return AppGlobalRanges;
- } else if (!TxGlobalRanges.empty()) {
+ } else if (!TxGlobalRanges.empty()) {
return TxGlobalRanges;
}
}
diff --git a/ydb/core/tablet/tablet_exception.h b/ydb/core/tablet/tablet_exception.h
index 82385cb8bb2..b2d7f655bf0 100644
--- a/ydb/core/tablet/tablet_exception.h
+++ b/ydb/core/tablet/tablet_exception.h
@@ -8,15 +8,15 @@ namespace NKikimr {
////////////////////////////////////////////
/// The TTabletException class
////////////////////////////////////////////
-#define TABLET_EXCEPTION(TExcName) \
-class TExcName : public yexception {};
+#define TABLET_EXCEPTION(TExcName) \
+class TExcName : public yexception {};
-TABLET_EXCEPTION(TNotReadyTabletException)
-TABLET_EXCEPTION(TNotExistTabletException)
-TABLET_EXCEPTION(TSchemeErrorTabletException)
+TABLET_EXCEPTION(TNotReadyTabletException)
+TABLET_EXCEPTION(TNotExistTabletException)
+TABLET_EXCEPTION(TSchemeErrorTabletException)
TABLET_EXCEPTION(TTooLongTxException)
-#undef TABLET_EXCEPTION
+#undef TABLET_EXCEPTION
} // end of the NKikimr namespace
diff --git a/ydb/core/tablet/tablet_metrics_ut.cpp b/ydb/core/tablet/tablet_metrics_ut.cpp
index d9cc6c0b47a..57cab15a5d6 100644
--- a/ydb/core/tablet/tablet_metrics_ut.cpp
+++ b/ydb/core/tablet/tablet_metrics_ut.cpp
@@ -175,7 +175,7 @@ Y_UNIT_TEST_SUITE(TFlatMetrics) {
UNIT_ASSERT(!value.IsValueObsolete(time));
auto avg = value.GetValue();
UNIT_ASSERT_C(avg == 60, avg);
- value.Set(avg, time);
+ value.Set(avg, time);
auto avg2 = value.GetValue();
UNIT_ASSERT_C(avg2 == avg, avg2);
time += TDuration::Minutes(1);
diff --git a/ydb/core/tablet/tablet_monitoring_proxy.cpp b/ydb/core/tablet/tablet_monitoring_proxy.cpp
index 22bb6ed6da4..a61a43b7e5c 100644
--- a/ydb/core/tablet/tablet_monitoring_proxy.cpp
+++ b/ydb/core/tablet/tablet_monitoring_proxy.cpp
@@ -235,9 +235,9 @@ TTabletMonitoringProxyActor::Handle(NMon::TEvHttpInfo::TPtr &ev, const TActorCon
TStringStream str;
-
- const NKikimr::TDomainsInfo* domainsInfo = AppData(ctx)->DomainsInfo.Get();
- auto& domains = domainsInfo->Domains;
+
+ const NKikimr::TDomainsInfo* domainsInfo = AppData(ctx)->DomainsInfo.Get();
+ auto& domains = domainsInfo->Domains;
HTML(str) {
for (auto di: domains) {
ui32 domainId = di.first;
@@ -252,7 +252,7 @@ TTabletMonitoringProxyActor::Handle(NMon::TEvHttpInfo::TPtr &ev, const TActorCon
TABLEH_CLASS("sorter-false") {}
TABLEH_CLASS("sorter-false") {}
}
- }
+ }
TABLEBODY() {
if (const ui64 schemeRootTabletId = di.second->SchemeRoot) {
TABLER() {
diff --git a/ydb/core/tablet/tablet_pipe_client_cache.cpp b/ydb/core/tablet/tablet_pipe_client_cache.cpp
index 7edbcf8eef3..47b81e45681 100644
--- a/ydb/core/tablet/tablet_pipe_client_cache.cpp
+++ b/ydb/core/tablet/tablet_pipe_client_cache.cpp
@@ -147,16 +147,16 @@ namespace NTabletPipe {
PoolContainer->Erase(tabletId);
}
- void ForceClose(const TActorContext& ctx, ui64 tabletId) override {
- TClientCacheEntry* currentClient;
- if (!Container->Find(tabletId, currentClient))
- return;
-
+ void ForceClose(const TActorContext& ctx, ui64 tabletId) override {
+ TClientCacheEntry* currentClient;
+ if (!Container->Find(tabletId, currentClient))
+ return;
+
TActorId client = currentClient->Client;
- CloseClient(ctx, client);
- Container->Erase(tabletId);
- }
-
+ CloseClient(ctx, client);
+ Container->Erase(tabletId);
+ }
+
void Shutdown(const TActorContext& ctx, ui64 tabletId) override {
TClientCacheEntry* currentClient;
if (!Container->Find(tabletId, currentClient))
diff --git a/ydb/core/tablet/tablet_pipe_client_cache.h b/ydb/core/tablet/tablet_pipe_client_cache.h
index 939204008ec..a6548c20e95 100644
--- a/ydb/core/tablet/tablet_pipe_client_cache.h
+++ b/ydb/core/tablet/tablet_pipe_client_cache.h
@@ -29,7 +29,7 @@ namespace NTabletPipe {
virtual bool OnConnect(TEvTabletPipe::TEvClientConnected::TPtr& ev) = 0;
virtual void OnDisconnect(TEvTabletPipe::TEvClientDestroyed::TPtr& ev) = 0;
virtual void Close(const TActorContext& ctx, ui64 tabletId) = 0;
- virtual void ForceClose(const TActorContext& ctx, ui64 tabletId) = 0;
+ virtual void ForceClose(const TActorContext& ctx, ui64 tabletId) = 0;
virtual void Shutdown(const TActorContext& ctx, ui64 tabletId) = 0;
virtual void PopWhileOverflow() = 0;
};
diff --git a/ydb/core/tablet/tablet_pipe_server.cpp b/ydb/core/tablet/tablet_pipe_server.cpp
index 79523e6b69c..b7c669e72cc 100644
--- a/ydb/core/tablet/tablet_pipe_server.cpp
+++ b/ydb/core/tablet/tablet_pipe_server.cpp
@@ -3,7 +3,7 @@
#include <library/cpp/actors/core/hfunc.h>
#include <library/cpp/actors/core/interconnect.h>
#include <library/cpp/actors/core/log.h>
-#include <util/generic/hash_set.h>
+#include <util/generic/hash_set.h>
namespace NKikimr {
@@ -39,7 +39,7 @@ namespace NTabletPipe {
HFunc(TEvTabletPipe::TEvShutdown, Handle);
HFunc(TEvents::TEvPoisonPill, Handle);
HFunc(TEvInterconnect::TEvNodeDisconnected, Handle);
- HFunc(TEvents::TEvUndelivered, Handle);
+ HFunc(TEvents::TEvUndelivered, Handle);
}
}
@@ -168,8 +168,8 @@ namespace NTabletPipe {
void Handle(TEvTabletPipe::TEvPeerClosed::TPtr& ev, const TActorContext& ctx) {
Y_VERIFY(ev->Get()->Record.GetTabletId() == TabletId);
- LOG_DEBUG_S(ctx, NKikimrServices::PIPE_SERVER, "[" << TabletId << "]"
- << " Got PeerClosed from# " << ev->Sender);
+ LOG_DEBUG_S(ctx, NKikimrServices::PIPE_SERVER, "[" << TabletId << "]"
+ << " Got PeerClosed from# " << ev->Sender);
Reset(ctx);
}
@@ -256,9 +256,9 @@ namespace NTabletPipe {
class TConnectAcceptor: public IConnectAcceptor {
public:
- explicit TConnectAcceptor(ui64 tabletId)
+ explicit TConnectAcceptor(ui64 tabletId)
: TabletId(tabletId)
- , Active(false)
+ , Active(false)
, Stopped(false)
{
}
@@ -322,7 +322,7 @@ namespace NTabletPipe {
}
ActivatePending.clear();
- Active = true;
+ Active = true;
}
void Erase(TEvTabletPipe::TEvServerDestroyed::TPtr &ev) override {
@@ -331,10 +331,10 @@ namespace NTabletPipe {
ActivatePending.erase(serverId);
}
- bool IsActive() const override {
- return Active;
- }
-
+ bool IsActive() const override {
+ return Active;
+ }
+
bool IsStopped() const override {
return Stopped;
}
@@ -344,7 +344,7 @@ namespace NTabletPipe {
const ui64 TabletId;
THashSet<TActorId> ServerIds;
THashSet<TActorId> ActivatePending;
- bool Active;
+ bool Active;
bool Stopped;
};
diff --git a/ydb/core/tablet/tablet_pipe_ut.cpp b/ydb/core/tablet/tablet_pipe_ut.cpp
index 3fa38caf05a..2a528070fb6 100644
--- a/ydb/core/tablet/tablet_pipe_ut.cpp
+++ b/ydb/core/tablet/tablet_pipe_ut.cpp
@@ -184,28 +184,28 @@ namespace NKikimr {
NTabletPipe::TClientConfig Config;
};
- struct TEvPrivate {
- enum EEv {
- EvGetServerPipeInfo = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvServerPipeInfo,
-
- EvEnd
- };
-
- struct TEvGetServerPipeInfo : public TEventLocal<TEvGetServerPipeInfo, EvGetServerPipeInfo> {
- };
-
- struct TEvServerPipeInfo : public TEventLocal<TEvServerPipeInfo, EvServerPipeInfo> {
- TEvServerPipeInfo(ui32 opened, ui32 closed)
- : ServerPipesOpened(opened)
- , ServerPipesClosed(closed)
- {}
-
- ui32 ServerPipesOpened;
- ui32 ServerPipesClosed;
- };
- };
-
+ struct TEvPrivate {
+ enum EEv {
+ EvGetServerPipeInfo = EventSpaceBegin(TEvents::ES_PRIVATE),
+ EvServerPipeInfo,
+
+ EvEnd
+ };
+
+ struct TEvGetServerPipeInfo : public TEventLocal<TEvGetServerPipeInfo, EvGetServerPipeInfo> {
+ };
+
+ struct TEvServerPipeInfo : public TEventLocal<TEvServerPipeInfo, EvServerPipeInfo> {
+ TEvServerPipeInfo(ui32 opened, ui32 closed)
+ : ServerPipesOpened(opened)
+ , ServerPipesClosed(closed)
+ {}
+
+ ui32 ServerPipesOpened;
+ ui32 ServerPipesClosed;
+ };
+ };
+
class TConsumerTablet : public TActor<TConsumerTablet>, public NTabletFlatExecutor::TTabletExecutedFlat {
public:
TConsumerTablet(const TActorId &tablet, TTabletStorageInfo *info)
@@ -213,8 +213,8 @@ namespace NKikimr {
, TTabletExecutedFlat(info, tablet, nullptr)
, PipeConnectAcceptor(NTabletPipe::CreateConnectAcceptor(TabletID()))
, RejectAll(false)
- , ServerPipesOpened(0)
- , ServerPipesClosed(0)
+ , ServerPipesOpened(0)
+ , ServerPipesClosed(0)
{
}
@@ -239,7 +239,7 @@ namespace NKikimr {
HFunc(TEvents::TEvPing, Handle);
HFunc(TEvents::TEvPoisonPill, Handle);
HFunc(TEvConsumerTablet::TEvReject, Handle);
- HFunc(TEvPrivate::TEvGetServerPipeInfo, Handle);
+ HFunc(TEvPrivate::TEvGetServerPipeInfo, Handle);
default:
HandleDefaultEvents(ev, ctx);
}
@@ -279,14 +279,14 @@ namespace NKikimr {
Y_UNUSED(ev);
Y_UNUSED(ctx);
Cout << "Server pipe is opened\n";
- ++ServerPipesOpened;
+ ++ServerPipesOpened;
}
void Handle(TEvTabletPipe::TEvServerDisconnected::TPtr &ev, const TActorContext &ctx) {
Y_UNUSED(ev);
Y_UNUSED(ctx);
Cout << "Pipe reset on server\n";
- ++ServerPipesClosed;
+ ++ServerPipesClosed;
}
void Handle(TEvTabletPipe::TEvServerDestroyed::TPtr &ev, const TActorContext &ctx) {
@@ -302,11 +302,11 @@ namespace NKikimr {
RejectAll = true;
}
- void Handle(TEvPrivate::TEvGetServerPipeInfo::TPtr &ev, const TActorContext &ctx) {
- Cout << "Server pipes opened: " << ServerPipesOpened << ", closed: " << ServerPipesClosed << "\n";
- ctx.Send(ev->Sender, new TEvPrivate::TEvServerPipeInfo(ServerPipesOpened, ServerPipesClosed));
- }
-
+ void Handle(TEvPrivate::TEvGetServerPipeInfo::TPtr &ev, const TActorContext &ctx) {
+ Cout << "Server pipes opened: " << ServerPipesOpened << ", closed: " << ServerPipesClosed << "\n";
+ ctx.Send(ev->Sender, new TEvPrivate::TEvServerPipeInfo(ServerPipesOpened, ServerPipesClosed));
+ }
+
void OnDetach(const TActorContext &ctx) override {
Cout << "Consumer dead\n";
PipeConnectAcceptor->Detach(SelfId());
@@ -329,8 +329,8 @@ namespace NKikimr {
THolder<NTabletPipe::IConnectAcceptor> PipeConnectAcceptor;
bool RejectAll;
TActorId LastServerId;
- ui32 ServerPipesOpened;
- ui32 ServerPipesClosed;
+ ui32 ServerPipesOpened;
+ ui32 ServerPipesClosed;
};
class TConsumerTabletWithoutAcceptor : public TActor<TConsumerTabletWithoutAcceptor>, public NTabletFlatExecutor::TTabletExecutedFlat {
@@ -470,47 +470,47 @@ Y_UNIT_TEST_SUITE(TTabletPipeTest) {
Y_UNIT_TEST(TestKillClientBeforServerIdKnown) {
TTestBasicRuntime runtime;
SetupTabletServices(runtime);
- runtime.SetLogPriority(NKikimrServices::PIPE_SERVER, NActors::NLog::PRI_DEBUG);
- runtime.SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
-
+ runtime.SetLogPriority(NKikimrServices::PIPE_SERVER, NActors::NLog::PRI_DEBUG);
+ runtime.SetLogPriority(NKikimrServices::PIPE_CLIENT, NActors::NLog::PRI_DEBUG);
+
TActorId sender = runtime.AllocateEdgeActor();
CreateTestBootstrapper(runtime, CreateTestTabletInfo(TTestTxConfig::TxTablet1, TTabletTypes::TX_DUMMY), [](const TActorId & tablet, TTabletStorageInfo* info) {
- return new TConsumerTablet(tablet, info);
- });
-
- {
- TDispatchOptions options;
- options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot, 1));
- runtime.DispatchEvents(options);
- }
-
- NTabletPipe::TClientConfig config;
- config.ConnectToUserTablet = false;
-
- i32 i = 3;
- while (i --> 0) {
+ return new TConsumerTablet(tablet, info);
+ });
+
+ {
+ TDispatchOptions options;
+ options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot, 1));
+ runtime.DispatchEvents(options);
+ }
+
+ NTabletPipe::TClientConfig config;
+ config.ConnectToUserTablet = false;
+
+ i32 i = 3;
+ while (i --> 0) {
auto client = NTabletPipe::CreateClient(sender, TTestTxConfig::TxTablet1, config);
TActorId clientId = runtime.Register(client);
-
- // We want to close the client right after it has sent EvConnect to the target tablet but before
- // the client received the EvConnectResult
+
+ // We want to close the client right after it has sent EvConnect to the target tablet but before
+ // the client received the EvConnectResult
runtime.SetObserverFunc([clientId, sender](TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event) {
- if (event->Type == TEvTabletPipe::EvConnect) {
- runtime.Send(new IEventHandle(clientId, sender, new TEvents::TEvPoisonPill()), 0);
- }
- return TTestActorRuntime::EEventAction::PROCESS;
- });
- runtime.DispatchEvents();
- }
-
- {
+ if (event->Type == TEvTabletPipe::EvConnect) {
+ runtime.Send(new IEventHandle(clientId, sender, new TEvents::TEvPoisonPill()), 0);
+ }
+ return TTestActorRuntime::EEventAction::PROCESS;
+ });
+ runtime.DispatchEvents();
+ }
+
+ {
ForwardToTablet(runtime, TTestTxConfig::TxTablet1, sender, new TEvPrivate::TEvGetServerPipeInfo());
- TAutoPtr<IEventHandle> handle;
- const TEvPrivate::TEvServerPipeInfo* ev = runtime.GrabEdgeEvent<TEvPrivate::TEvServerPipeInfo>(handle);
- UNIT_ASSERT_VALUES_EQUAL(ev->ServerPipesOpened, ev->ServerPipesClosed);
- }
- }
-
+ TAutoPtr<IEventHandle> handle;
+ const TEvPrivate::TEvServerPipeInfo* ev = runtime.GrabEdgeEvent<TEvPrivate::TEvServerPipeInfo>(handle);
+ UNIT_ASSERT_VALUES_EQUAL(ev->ServerPipesOpened, ev->ServerPipesClosed);
+ }
+ }
+
Y_UNIT_TEST(TestSendWithoutWaitOpenToWrongTablet) {
TTestBasicRuntime runtime;
SetupTabletServices(runtime);
diff --git a/ydb/core/tablet/tablet_responsiveness_pinger.h b/ydb/core/tablet/tablet_responsiveness_pinger.h
index 57aba0ee3b7..93954ea0b4c 100644
--- a/ydb/core/tablet/tablet_responsiveness_pinger.h
+++ b/ydb/core/tablet/tablet_responsiveness_pinger.h
@@ -19,8 +19,8 @@ class TTabletResponsivenessPinger : public TActorBootstrapped<TTabletResponsiven
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::TABLET_RESPONSIVENESS_PINGER;
- }
-
+ }
+
TTabletResponsivenessPinger(TTabletSimpleCounter &counter, TDuration pingInterval);
void Bootstrap(const TActorContext &ctx);
diff --git a/ydb/core/tablet/tablet_sys.h b/ydb/core/tablet/tablet_sys.h
index 62caf1b1d86..661d75a4e0a 100644
--- a/ydb/core/tablet/tablet_sys.h
+++ b/ydb/core/tablet/tablet_sys.h
@@ -214,8 +214,8 @@ class TTablet : public TActor<TTablet> {
bool NeedCleanupOnLockedPath;
ui32 GcCounter;
THolder<NTabletPipe::IConnectAcceptor> PipeConnectAcceptor;
- TInstant BoostrapTime;
- TInstant ActivateTime;
+ TInstant BoostrapTime;
+ TInstant ActivateTime;
bool Leader;
ui32 FollowerId;
ui32 DiscoveredLastBlocked;
diff --git a/ydb/core/tablet_flat/flat_comp_shard.cpp b/ydb/core/tablet_flat/flat_comp_shard.cpp
index 76e1844ca31..a3a58f923e2 100644
--- a/ydb/core/tablet_flat/flat_comp_shard.cpp
+++ b/ydb/core/tablet_flat/flat_comp_shard.cpp
@@ -1780,7 +1780,7 @@ namespace NCompShard {
auto cmpFullyInside = [&nulls](const TItem& a, const TSerializedCellVec& boundary) noexcept -> bool {
auto left = a.Slice.LastKey.GetCells();
if (Y_UNLIKELY(!left)) {
- return false; // +inf
+ return false; // +inf
}
if (int cmp = ComparePartKeys(left, boundary.GetCells(), nulls)) {
return cmp < 0;
@@ -1792,7 +1792,7 @@ namespace NCompShard {
auto cmpFullyOutside = [&nulls](const TItem& a, const TSerializedCellVec& boundary) noexcept -> bool {
auto right = a.Slice.FirstKey.GetCells();
if (Y_UNLIKELY(!right)) {
- return false; // -inf
+ return false; // -inf
}
return ComparePartKeys(boundary.GetCells(), right, nulls) <= 0;
};
@@ -1825,7 +1825,7 @@ namespace NCompShard {
auto cmpSplitFullyInside = [&nulls](const TSplitItem& a, const TSerializedCellVec& boundary) noexcept -> bool {
auto left = a.Slice.LastKey.GetCells();
if (Y_UNLIKELY(!left)) {
- return false; // +inf
+ return false; // +inf
}
if (int cmp = ComparePartKeys(left, boundary.GetCells(), nulls)) {
return cmp < 0;
diff --git a/ydb/core/tablet_flat/flat_cxx_database.h b/ydb/core/tablet_flat/flat_cxx_database.h
index f6e1efd42ae..27a260bcb27 100644
--- a/ydb/core/tablet_flat/flat_cxx_database.h
+++ b/ydb/core/tablet_flat/flat_cxx_database.h
@@ -57,7 +57,7 @@ public:
: TRawTypeValue(&value, sizeof(value), type)
{}
- template <typename ElementType>
+ template <typename ElementType>
TTypeValue(const TVector<ElementType> &value, NScheme::TTypeId type = NScheme::NTypeIds::String)
: TRawTypeValue(value.empty() ? (const ElementType*)0xDEADBEEFDEADBEEF : value.data(), value.size() * sizeof(ElementType), type)
{}
@@ -187,7 +187,7 @@ public:
return *reinterpret_cast<const std::pair<ui64, i64>*>(Data());
}
- template <typename ElementType>
+ template <typename ElementType>
operator TVector<ElementType>() const {
static_assert(std::is_pod<ElementType>::value, "ElementType should be a POD type");
Y_VERIFY(Type() == NScheme::NTypeIds::String || Type() == NScheme::NTypeIds::String4k || Type() == NScheme::NTypeIds::String2m);
@@ -198,7 +198,7 @@ public:
return TVector<ElementType>(begin, end);
}
- template <typename ElementType>
+ template <typename ElementType>
void ExtractArray(THashSet<ElementType> &container) const {
static_assert(std::is_pod<ElementType>::value, "ElementType should be a POD type");
Y_VERIFY(Type() == NScheme::NTypeIds::String || Type() == NScheme::NTypeIds::String4k || Type() == NScheme::NTypeIds::String2m);
@@ -233,8 +233,8 @@ template <> struct NSchemeTypeMapper<NScheme::NTypeIds::Timestamp> { typedef ui6
template <> struct NSchemeTypeMapper<NScheme::NTypeIds::Interval> { typedef i64 Type; };
/// only for compatibility with old code
-template <NScheme::TTypeId ValType>
-class TConvertTypeValue : public TRawTypeValue {
+template <NScheme::TTypeId ValType>
+class TConvertTypeValue : public TRawTypeValue {
public:
TConvertTypeValue(const TRawTypeValue& value)
: TRawTypeValue(value.Data(), value.Size(), value.IsEmpty() ? 0 : ValType)
@@ -470,15 +470,15 @@ template <typename ColumnType, typename VectorType>
struct TConvertValue<ColumnType, TVector<VectorType>, TRawTypeValue> {
TVector<VectorType> Value;
- TConvertValue(const TRawTypeValue& value) {
+ TConvertValue(const TRawTypeValue& value) {
Y_VERIFY(value.Type() == NScheme::NTypeIds::String);
Y_VERIFY(value.Size() % sizeof(VectorType) == 0);
- const size_t count = value.Size() / sizeof(VectorType);
- Value.reserve(count);
- for (TUnalignedMemoryIterator<VectorType> it(value.Data(), value.Size()); !it.AtEnd(); it.Next()) {
- Value.emplace_back(it.Cur());
- }
- Y_VERIFY(Value.size() == count);
+ const size_t count = value.Size() / sizeof(VectorType);
+ Value.reserve(count);
+ for (TUnalignedMemoryIterator<VectorType> it(value.Data(), value.Size()); !it.AtEnd(); it.Next()) {
+ Value.emplace_back(it.Cur());
+ }
+ Y_VERIFY(Value.size() == count);
}
operator const TVector<VectorType>&() const {
@@ -641,7 +641,7 @@ struct Schema {
using Precharge = AutoPrecharge;
template <TColumnId _ColumnId, NScheme::TTypeId _ColumnType, bool _IsNotNull = false>
- struct Column {
+ struct Column {
constexpr static TColumnId ColumnId = _ColumnId;
constexpr static NScheme::TTypeId ColumnType = _ColumnType;
constexpr static bool IsNotNull = _IsNotNull;
@@ -655,8 +655,8 @@ struct Schema {
template <typename...>
struct TableColumns;
- template <typename T>
- struct TableColumns<T> {
+ template <typename T>
+ struct TableColumns<T> {
using Type = typename T::Type;
using TupleType = std::tuple<typename T::Type>;
using RealTupleType = std::tuple<typename NSchemeTypeMapper<T::ColumnType>::Type>;
@@ -683,8 +683,8 @@ struct Schema {
}
};
- template <typename T, typename... Ts>
- struct TableColumns<T, Ts...> : TableColumns<Ts...> {
+ template <typename T, typename... Ts>
+ struct TableColumns<T, Ts...> : TableColumns<Ts...> {
using Type = std::tuple<typename T::Type, typename Ts::Type...>;
using TupleType = std::tuple<typename T::Type, typename Ts::Type...>;
using RealTupleType = std::tuple<typename NSchemeTypeMapper<T::ColumnType>::Type, typename NSchemeTypeMapper<Ts::ColumnType>::Type...>;
@@ -733,7 +733,7 @@ struct Schema {
template <typename, typename, typename, typename> class RangeKeyOperations;
template <typename IteratorType, typename TableType, typename... KeyColumns>
- class TableSelector {
+ class TableSelector {
protected:
using KeyColumnsType = std::tuple<KeyColumns...>;
using KeyValuesType = std::tuple<typename KeyColumns::Type...>;
@@ -766,12 +766,12 @@ struct Schema {
return KeyOperations<TableType, KeyValuesType>(*Database, keyValues);
}
- template <typename... Keys>
+ template <typename... Keys>
auto Range(Keys... keyValues) {
return KeyPrefixOperations<IteratorType, TableType, typename first_n_of<sizeof...(Keys), KeyValuesType>::type>(*Database, keyValues...);
}
- template <typename... Keys>
+ template <typename... Keys>
auto Prefix(Keys... keyValues) {
return KeyPrefixOperations<IteratorType, TableType, typename first_n_of<sizeof...(Keys), KeyValuesType>::type>(*Database, keyValues...);
}
@@ -820,18 +820,18 @@ struct Schema {
template <typename... KeyColumnsTypes>
struct TableKey : TableColumns<KeyColumnsTypes...> {
private:
- template <typename... Ts>
- struct TableKeyMaterializer;
+ template <typename... Ts>
+ struct TableKeyMaterializer;
- template <typename T>
- struct TableKeyMaterializer<T> {
+ template <typename T>
+ struct TableKeyMaterializer<T> {
static void Materialize(TToughDb& database) {
database.Alter().AddColumnToKey(TableId, T::ColumnId);
}
};
- template <typename T, typename... Ts>
- struct TableKeyMaterializer<T, Ts...> : TableKeyMaterializer<Ts...> {
+ template <typename T, typename... Ts>
+ struct TableKeyMaterializer<T, Ts...> : TableKeyMaterializer<Ts...> {
static void Materialize(TToughDb& database) {
TableKeyMaterializer<T>::Materialize(database);
TableKeyMaterializer<Ts...>::Materialize(database);
@@ -1045,8 +1045,8 @@ struct Schema {
return nullptr;
}
return THolder<IteratorType>(database.IterateRangeGeneric<IteratorType>(TableId, NTable::TKeyRange{ }, columns).Release());
- }
-
+ }
+
static bool Precharge(TToughDb& database, NTable::TTagsRef columns) {
return Precharger<AutoPrecharge>::Precharge(database, TableId, {}, {}, columns, IteratorType::Direction);
}
@@ -1257,7 +1257,7 @@ struct Schema {
class EqualKeyIterator
: public KeyIterator<NTable::TTableIt, EqualKeyIterator<TableType, KeyValuesType>>
{
- public:
+ public:
using KeyColumnsType = typename TableType::TKey::KeyColumnsType;
using Iterator = KeyIterator<NTable::TTableIt, EqualKeyIterator<TableType, KeyValuesType>>;
@@ -1296,25 +1296,25 @@ struct Schema {
template <typename TableType, typename KeyIterator, typename... ColumnTypes>
class Rowset {
public:
- template <typename... Cs>
- static Columns<Cs...> GetColumns(Columns<Cs...>) { return Columns<Cs...>(); }
- template <typename... Cs>
- static Columns<Cs...> GetColumns(Columns<TableColumns<Cs...>>) { return Columns<Cs...>(); }
- using ColumnsType = decltype(GetColumns(Columns<ColumnTypes...>()));
-
+ template <typename... Cs>
+ static Columns<Cs...> GetColumns(Columns<Cs...>) { return Columns<Cs...>(); }
+ template <typename... Cs>
+ static Columns<Cs...> GetColumns(Columns<TableColumns<Cs...>>) { return Columns<Cs...>(); }
+ using ColumnsType = decltype(GetColumns(Columns<ColumnTypes...>()));
+
template <typename... Args>
Rowset(TToughDb& database, Args&&... args)
: Iterator(database, std::forward<Args>(args)...)
{}
-
- bool IsReady() const {
+
+ bool IsReady() const {
return Iterator.IsReady();
- }
-
- bool IsValid() const {
+ }
+
+ bool IsValid() const {
return Iterator.IsValid();
- }
-
+ }
+
bool IsOk() const {
return Iterator.IsReady() && Iterator.IsValid();
}
@@ -1327,34 +1327,34 @@ struct Schema {
return Iterator.Next();
}
- template <typename... ColumnType>
+ template <typename... ColumnType>
auto GetValue() const {
Y_VERIFY_DEBUG(IsReady(), "Rowset is not ready");
Y_VERIFY_DEBUG(IsValid(), "Rowset is not valid");
- typename Columns<ColumnType...>::Type value(GetColumnValue<ColumnType>()...);
- return value;
- }
-
- template <typename ColumnType>
+ typename Columns<ColumnType...>::Type value(GetColumnValue<ColumnType>()...);
+ return value;
+ }
+
+ template <typename ColumnType>
auto GetValueOrDefault(typename ColumnType::Type defaultValue = GetDefaultValue<ColumnType>(SFINAE::special())) const {
Y_VERIFY_DEBUG(IsReady(), "Rowset is not ready");
Y_VERIFY_DEBUG(IsValid(), "Rowset is not valid");
- typename ColumnType::Type value(HaveValue<ColumnType>() ? GetColumnValue<ColumnType>() : defaultValue);
- return value;
- }
-
+ typename ColumnType::Type value(HaveValue<ColumnType>() ? GetColumnValue<ColumnType>() : defaultValue);
+ return value;
+ }
+
auto GetKey() const {
return ColumnsValueTuple<typename TableType::TKey::KeyColumnsType>::Get(*this);
- }
-
- template <typename ColumnType>
- bool HaveValue() const {
+ }
+
+ template <typename ColumnType>
+ bool HaveValue() const {
size_t index = GetIndex<ColumnType>();
TDbTupleRef tuple = Iterator.GetValues();
auto& cell = tuple.Columns[index];
- return !cell.IsNull();
- }
-
+ return !cell.IsNull();
+ }
+
TString DbgPrint(const NScheme::TTypeRegistry& typeRegistry) {
Y_VERIFY_DEBUG(IsReady(), "Rowset is not ready");
Y_VERIFY_DEBUG(IsValid(), "Rowset is not valid");
@@ -1366,7 +1366,7 @@ struct Schema {
return ColumnType::Default;
}
- template <typename ColumnType>
+ template <typename ColumnType>
static typename ColumnType::Type GetNullValue(SFINAE::general) {
return typename ColumnType::Type();
}
@@ -1392,7 +1392,7 @@ struct Schema {
}
template <typename ColumnType>
- typename ColumnType::Type GetColumnValue() const {
+ typename ColumnType::Type GetColumnValue() const {
size_t index = GetIndex<ColumnType>();
TDbTupleRef tuple = Iterator.GetValues();
auto& cell = tuple.Columns[index];
@@ -1400,17 +1400,17 @@ struct Schema {
if (cell.IsNull())
return GetNullValue<ColumnType>(SFINAE::special());
return TConvert<ColumnType, typename ColumnType::Type>::Convert(TRawTypeValue(cell.Data(), cell.Size(), type));
- }
-
+ }
+
KeyIterator Iterator;
- };
+ };
};
-
+
template <typename IteratorType, typename TableType>
class AnyKeyOperations: public Operations {
public:
using KeyColumnsType = typename TableType::TKey::KeyColumnsType;
-
+
template <typename KeyIterator, typename... Columns>
using Rowset = typename Operations::template Rowset<KeyIterator, Columns...>;
using Iterator = typename Operations::template AnyKeyIterator<IteratorType, TableType>;
@@ -1515,7 +1515,7 @@ struct Schema {
template <typename IteratorType, typename TableType, typename... KeyValuesTypes>
class GreaterOrEqualKeyOperations<IteratorType, TableType, std::tuple<KeyValuesTypes...>>: public Operations {
- public:
+ public:
using KeyColumnsType = typename TableType::TKey::KeyColumnsType;
using KeyValuesType = typename first_n_of<sizeof...(KeyValuesTypes), typename TableType::TKey::RealKeyValuesType>::type;
@@ -1555,11 +1555,11 @@ struct Schema {
return RangeKeyOperations<IteratorType, TableType, MinKeyValuesType, MaxKeyValuesType>(*Database, KeyValues, keyValues...);
}
- template <typename... ColumnTypes>
+ template <typename... ColumnTypes>
auto Select() {
return Rowset<TableType, Iterator, ColumnTypes...>(*Database, KeyValues, Columns<ColumnTypes...>::GetColumnIds());
- }
-
+ }
+
auto Select() {
return Rowset<TableType, Iterator, typename TableType::TColumns>(*Database, KeyValues, Columns<typename TableType::TColumns>::GetColumnIds());
}
@@ -1782,7 +1782,7 @@ struct Schema {
return Update(TNull<ColumnTypes>()...);
}
- template <typename... UpdateTypes>
+ template <typename... UpdateTypes>
KeyOperations& Update(const UpdateTypes&... updates) {
std::array<TUpdateOp, sizeof...(UpdateTypes)> update_ops = {{updates...}};
Database->Update(TableId, NTable::ERowOp::Upsert, TTupleToRawTypeValue<KeyValuesType, KeyColumnsType>(KeyValues), update_ops);
@@ -1845,8 +1845,8 @@ struct Schema {
}
};
- template <typename Type, typename... Types>
- struct SchemaTables: SchemaTables<Types...> {
+ template <typename Type, typename... Types>
+ struct SchemaTables: SchemaTables<Types...> {
static bool Precharge(TToughDb& database) {
return SchemaTables<Type>::Precharge(database) & SchemaTables<Types...>::Precharge(database);
}
@@ -1866,8 +1866,8 @@ struct Schema {
}
};
- template <typename Type>
- struct SchemaTables<Type> {
+ template <typename Type>
+ struct SchemaTables<Type> {
static TString GetTableName(const TString& typeName) {
return typeName.substr(typeName.rfind(':') + 1);
}
@@ -1979,87 +1979,87 @@ public:
}
}
- void NoMoreReadsForTx() {
- return Database.NoMoreReadsForTx();
- }
-
+ void NoMoreReadsForTx() {
+ return Database.NoMoreReadsForTx();
+ }
+
protected:
TToughDb& Database;
};
-namespace NHelpers {
-
-// Fills NTable::TScheme::TTableSchema from static NIceDb::Schema
-template <class TTable>
-struct TStaticSchemaFiller {
- template <typename...>
- struct TFiller;
-
- template <typename Column>
- struct TFiller<Column> {
- static void Fill(NTable::TScheme::TTableSchema& schema) {
- schema.Columns[Column::ColumnId] = NTable::TColumn(
- TTable::template TableColumns<Column>::GetColumnName(),
- Column::ColumnId,
- Column::ColumnType);
- }
- };
-
- template <typename Column, typename... Columns>
- struct TFiller<Column, Columns...> {
- static void Fill(NTable::TScheme::TTableSchema& schema) {
- TFiller<Column>::Fill(schema);
- TFiller<Columns...>::Fill(schema);
- }
- };
-
- template <typename... Columns>
- using TColumnsType = typename TTable::template TableColumns<Columns...>;
-
- template <typename... Columns>
- static void FillColumns(NTable::TScheme::TTableSchema& schema, TColumnsType<Columns...>) {
- TFiller<Columns...>::Fill(schema);
- }
-
- template <typename...>
- struct TKeyFiller;
-
- template <typename Key>
- struct TKeyFiller<Key> {
- static void Fill(NTable::TScheme::TTableSchema& schema, i32 index) {
- schema.KeyColumns.push_back(Key::ColumnId);
- auto& column = schema.Columns[Key::ColumnId];
- column.KeyOrder = index;
- }
- };
-
- template <typename Key, typename... Keys>
- struct TKeyFiller<Key, Keys...> {
- static void Fill(NTable::TScheme::TTableSchema& schema, i32 index) {
- TKeyFiller<Key>::Fill(schema, index);
- TKeyFiller<Keys...>::Fill(schema, index + 1);
- }
- };
-
- template <typename... Keys>
- using TKeysType = typename TTable::template TableKey<Keys...>;
-
- template <typename... Keys>
- static void FillKeys(NTable::TScheme::TTableSchema& schema, TKeysType<Keys...>) {
- TKeyFiller<Keys...>::Fill(schema, 0);
- }
-
- static void Fill(NTable::TScheme::TTableSchema& schema) {
- FillColumns(schema, typename TTable::TColumns());
- FillKeys(schema, typename TTable::TKey());
-
- for (const auto& c : schema.Columns) {
- schema.ColumnNames[c.second.Name] = c.second.Id;
- }
- }
-};
-
-} // namespace NHelpers
-
+namespace NHelpers {
+
+// Fills NTable::TScheme::TTableSchema from static NIceDb::Schema
+template <class TTable>
+struct TStaticSchemaFiller {
+ template <typename...>
+ struct TFiller;
+
+ template <typename Column>
+ struct TFiller<Column> {
+ static void Fill(NTable::TScheme::TTableSchema& schema) {
+ schema.Columns[Column::ColumnId] = NTable::TColumn(
+ TTable::template TableColumns<Column>::GetColumnName(),
+ Column::ColumnId,
+ Column::ColumnType);
+ }
+ };
+
+ template <typename Column, typename... Columns>
+ struct TFiller<Column, Columns...> {
+ static void Fill(NTable::TScheme::TTableSchema& schema) {
+ TFiller<Column>::Fill(schema);
+ TFiller<Columns...>::Fill(schema);
+ }
+ };
+
+ template <typename... Columns>
+ using TColumnsType = typename TTable::template TableColumns<Columns...>;
+
+ template <typename... Columns>
+ static void FillColumns(NTable::TScheme::TTableSchema& schema, TColumnsType<Columns...>) {
+ TFiller<Columns...>::Fill(schema);
+ }
+
+ template <typename...>
+ struct TKeyFiller;
+
+ template <typename Key>
+ struct TKeyFiller<Key> {
+ static void Fill(NTable::TScheme::TTableSchema& schema, i32 index) {
+ schema.KeyColumns.push_back(Key::ColumnId);
+ auto& column = schema.Columns[Key::ColumnId];
+ column.KeyOrder = index;
+ }
+ };
+
+ template <typename Key, typename... Keys>
+ struct TKeyFiller<Key, Keys...> {
+ static void Fill(NTable::TScheme::TTableSchema& schema, i32 index) {
+ TKeyFiller<Key>::Fill(schema, index);
+ TKeyFiller<Keys...>::Fill(schema, index + 1);
+ }
+ };
+
+ template <typename... Keys>
+ using TKeysType = typename TTable::template TableKey<Keys...>;
+
+ template <typename... Keys>
+ static void FillKeys(NTable::TScheme::TTableSchema& schema, TKeysType<Keys...>) {
+ TKeyFiller<Keys...>::Fill(schema, 0);
+ }
+
+ static void Fill(NTable::TScheme::TTableSchema& schema) {
+ FillColumns(schema, typename TTable::TColumns());
+ FillKeys(schema, typename TTable::TKey());
+
+ for (const auto& c : schema.Columns) {
+ schema.ColumnNames[c.second.Name] = c.second.Id;
+ }
+ }
+};
+
+} // namespace NHelpers
+
}
}
diff --git a/ydb/core/tablet_flat/flat_cxx_database_ut.cpp b/ydb/core/tablet_flat/flat_cxx_database_ut.cpp
index 96d6ce8ef47..0423d82e624 100644
--- a/ydb/core/tablet_flat/flat_cxx_database_ut.cpp
+++ b/ydb/core/tablet_flat/flat_cxx_database_ut.cpp
@@ -165,11 +165,11 @@ Y_UNIT_TEST_SUITE(TFlatCxxDatabaseTest) {
DB.Commit(stamp, true);
}
- // SelectRow
+ // SelectRow
{
TDummyEnv env;
DB.Begin(++stamp, env);
- for (ui64 i = 0; i < 1000; ++i) {
+ for (ui64 i = 0; i < 1000; ++i) {
auto row = db.Table<Schema::TestTable>().Key(i).Select<
Schema::TestTable::Value,
Schema::TestTable::Name,
@@ -188,20 +188,20 @@ Y_UNIT_TEST_SUITE(TFlatCxxDatabaseTest) {
bool boolValue = row.GetValue<Schema::TestTable::BoolValue>();
ESomeEnum enumValue = row.GetValue<Schema::TestTable::EnumValue>();
TInstant instantValue = row.GetValue<Schema::TestTable::InstantValue>();
- UNIT_ASSERT_EQUAL(value, i);
- UNIT_ASSERT_EQUAL(ToString(value), name);
- UNIT_ASSERT_EQUAL(boolValue, (i % 2 == 0));
+ UNIT_ASSERT_EQUAL(value, i);
+ UNIT_ASSERT_EQUAL(ToString(value), name);
+ UNIT_ASSERT_EQUAL(boolValue, (i % 2 == 0));
UNIT_ASSERT_EQUAL(enumValue, ESomeEnum::SomeValue1);
UNIT_ASSERT_EQUAL(instantValue, timestamp);
UNIT_ASSERT_EQUAL(row.GetValue<Schema::TestTable::EmptyValue>(), 13);
UNIT_ASSERT_EQUAL(row.GetValueOrDefault<Schema::TestTable::EmptyValue>(), 13);
UNIT_ASSERT_EQUAL(row.GetValueOrDefault<Schema::TestTable::EmptyValue>(i), i);
- }
+ }
DB.Commit(stamp, true);
- }
-
+ }
+
// All
- {
+ {
TDummyEnv env;
DB.Begin(++stamp, env);
UNIT_ASSERT(db.Table<Schema::TestTable>().Precharge());
@@ -302,7 +302,7 @@ Y_UNIT_TEST_SUITE(TFlatCxxDatabaseTest) {
DB.Commit(stamp, true);
}
- // GreaterOrEqual
+ // GreaterOrEqual
{
TDummyEnv env;
DB.Begin(++stamp, env);
@@ -605,30 +605,30 @@ Y_UNIT_TEST_SUITE(TFlatCxxDatabaseTest) {
UNIT_ASSERT(scheme.Tables.find(1)->second.ColumnNames.find("testtest")->second == 1);
UNIT_ASSERT(scheme.Tables.find(1)->second.ColumnNames.find("test") == scheme.Tables.find(1)->second.ColumnNames.end());
}
-
- Y_UNIT_TEST(SchemaFillerTest) {
- NTable::TScheme::TTableSchema schema;
- NIceDb::NHelpers::TStaticSchemaFiller<Schema::TestTable>::Fill(schema);
-
- UNIT_ASSERT_VALUES_EQUAL(schema.Columns.size(), 8);
- UNIT_ASSERT_VALUES_EQUAL(schema.ColumnNames.size(), 8);
- UNIT_ASSERT_VALUES_EQUAL(schema.KeyColumns.size(), 1);
- const TVector<std::pair<TString, NScheme::TTypeId>> columns = {
- {"ID", NScheme::NTypeIds::Uint64},
- {"Value", NScheme::NTypeIds::Uint64},
- {"Name", NScheme::NTypeIds::Utf8},
- {"BoolValue", NScheme::NTypeIds::Bool},
- {"EmptyValue", NScheme::NTypeIds::Uint64},
- {"ProtoValue", NScheme::NTypeIds::String},
- {"EnumValue", NScheme::NTypeIds::Uint64},
- {"InstantValue", NScheme::NTypeIds::Uint64}
- };
- for (const auto& col : columns) {
- ui32 id = schema.ColumnNames.at(col.first);
- UNIT_ASSERT_VALUES_EQUAL(schema.Columns.at(id).Name, col.first);
- UNIT_ASSERT_VALUES_EQUAL(schema.Columns.at(id).PType, col.second);
- }
- }
+
+ Y_UNIT_TEST(SchemaFillerTest) {
+ NTable::TScheme::TTableSchema schema;
+ NIceDb::NHelpers::TStaticSchemaFiller<Schema::TestTable>::Fill(schema);
+
+ UNIT_ASSERT_VALUES_EQUAL(schema.Columns.size(), 8);
+ UNIT_ASSERT_VALUES_EQUAL(schema.ColumnNames.size(), 8);
+ UNIT_ASSERT_VALUES_EQUAL(schema.KeyColumns.size(), 1);
+ const TVector<std::pair<TString, NScheme::TTypeId>> columns = {
+ {"ID", NScheme::NTypeIds::Uint64},
+ {"Value", NScheme::NTypeIds::Uint64},
+ {"Name", NScheme::NTypeIds::Utf8},
+ {"BoolValue", NScheme::NTypeIds::Bool},
+ {"EmptyValue", NScheme::NTypeIds::Uint64},
+ {"ProtoValue", NScheme::NTypeIds::String},
+ {"EnumValue", NScheme::NTypeIds::Uint64},
+ {"InstantValue", NScheme::NTypeIds::Uint64}
+ };
+ for (const auto& col : columns) {
+ ui32 id = schema.ColumnNames.at(col.first);
+ UNIT_ASSERT_VALUES_EQUAL(schema.Columns.at(id).Name, col.first);
+ UNIT_ASSERT_VALUES_EQUAL(schema.Columns.at(id).PType, col.second);
+ }
+ }
}
}
diff --git a/ydb/core/tablet_flat/flat_database.cpp b/ydb/core/tablet_flat/flat_database.cpp
index bcaa79bb333..c4591195155 100644
--- a/ydb/core/tablet_flat/flat_database.cpp
+++ b/ydb/core/tablet_flat/flat_database.cpp
@@ -23,18 +23,18 @@ namespace NTable {
TDatabase::TDatabase(TDatabaseImpl *databaseImpl) noexcept
: DatabaseImpl(databaseImpl ? databaseImpl : new TDatabaseImpl(0, new TScheme, nullptr))
- , NoMoreReadsFlag(true)
+ , NoMoreReadsFlag(true)
{
-}
-
+}
+
TDatabase::~TDatabase() { }
const TScheme& TDatabase::GetScheme() const noexcept
{
return *DatabaseImpl->Scheme;
-}
-
+}
+
TIntrusiveConstPtr<TRowScheme> TDatabase::GetRowScheme(ui32 table) const noexcept
{
return Require(table)->GetScheme();
@@ -70,8 +70,8 @@ TAutoPtr<TTableIt> TDatabase::Iterate(ui32 table, TRawVals key, TTagsRef tags, E
IteratedTables.insert(table);
return Require(table)->Iterate(key, tags, Env, seekBy(key, mode), TRowVersion::Max());
-}
-
+}
+
TAutoPtr<TTableIt> TDatabase::IterateExact(ui32 table, TRawVals key, TTagsRef tags, TRowVersion snapshot) const noexcept
{
Y_VERIFY(!NoMoreReadsFlag, "Trying to read after reads prohibited, table %u", table);
@@ -176,29 +176,29 @@ EReady TDatabase::Select(ui32 table, TRawVals key, TTagsRef tags, TRowState &row
void TDatabase::CalculateReadSize(TSizeEnv& env, ui32 table, TRawVals minKey, TRawVals maxKey,
TTagsRef tags, ui64 flg, ui64 items, ui64 bytes,
EDirection direction, TRowVersion snapshot)
-{
+{
Y_VERIFY(!NoMoreReadsFlag, "Trying to do precharge after reads prohibited, table %u", table);
Require(table)->Precharge(minKey, maxKey, tags, &env, flg, items, bytes, direction, snapshot);
-}
-
+}
+
bool TDatabase::Precharge(ui32 table, TRawVals minKey, TRawVals maxKey,
TTagsRef tags, ui64 flg, ui64 items, ui64 bytes,
EDirection direction, TRowVersion snapshot)
-{
+{
Y_VERIFY(!NoMoreReadsFlag, "Trying to do precharge after reads prohibited, table %u", table);
auto res = Require(table)->Precharge(minKey, maxKey, tags, Env, flg, items, bytes, direction, snapshot);
Change->Stats.ChargeSieved += res.Sieved;
Change->Stats.ChargeWeeded += res.Weeded;
return res.Ready == EReady::Data;
-}
-
+}
+
void TDatabase::Update(ui32 table, ERowOp rop, TRawVals key, TArrayRef<const TUpdateOp> ops, TRowVersion rowVersion)
{
Y_VERIFY_DEBUG(rowVersion != TRowVersion::Max(), "Updates cannot have v{max} as row version");
Redo->EvUpdate(table, rop, key, ops, rowVersion);
-}
-
+}
+
void TDatabase::UpdateTx(ui32 table, ERowOp rop, TRawVals key, TArrayRef<const TUpdateOp> ops, ui64 txId)
{
Redo->EvUpdateTx(table, rop, key, ops, txId);
@@ -231,10 +231,10 @@ const TRowVersionRanges& TDatabase::GetRemovedRowVersions(ui32 table) const
return empty;
}
-void TDatabase::NoMoreReadsForTx() {
- NoMoreReadsFlag = true;
-}
-
+void TDatabase::NoMoreReadsForTx() {
+ NoMoreReadsFlag = true;
+}
+
void TDatabase::Begin(TTxStamp stamp, IPages& env)
{
Y_VERIFY(!Redo, "Transaction already in progress");
@@ -243,7 +243,7 @@ void TDatabase::Begin(TTxStamp stamp, IPages& env)
Redo = new NRedo::TWriter{ Annex.Get(), DatabaseImpl->AnnexByteLimit() };
Change = MakeHolder<TChange>(Stamp = stamp, DatabaseImpl->Serial() + 1);
Env = &env;
- NoMoreReadsFlag = false;
+ NoMoreReadsFlag = false;
}
TPartView TDatabase::GetPartView(ui32 tableId, const TLogoBlobID &bundle) const {
@@ -252,8 +252,8 @@ TPartView TDatabase::GetPartView(ui32 tableId, const TLogoBlobID &bundle) const
TVector<TPartView> TDatabase::GetTableParts(ui32 tableId) const {
return Require(tableId)->GetAllParts();
-}
-
+}
+
TVector<TIntrusiveConstPtr<TColdPart>> TDatabase::GetTableColdParts(ui32 tableId) const {
return Require(tableId)->GetColdParts();
}
@@ -278,22 +278,22 @@ ui64 TDatabase::GetTableMemSize(ui32 tableId, TEpoch epoch) const {
return Require(tableId)->GetMemSize(epoch);
}
-ui64 TDatabase::GetTableMemRowCount(ui32 tableId) const {
- return Require(tableId)->GetMemRowCount();
-}
-
-ui64 TDatabase::GetTableIndexSize(ui32 tableId) const {
+ui64 TDatabase::GetTableMemRowCount(ui32 tableId) const {
+ return Require(tableId)->GetMemRowCount();
+}
+
+ui64 TDatabase::GetTableIndexSize(ui32 tableId) const {
return Require(tableId)->Stat().Parts.IndexBytes;
-}
-
+}
+
ui64 TDatabase::GetTableSearchHeight(ui32 tableId) const {
return Require(tableId)->GetSearchHeight();
}
-ui64 TDatabase::EstimateRowSize(ui32 tableId) const {
- return Require(tableId)->EstimateRowSize();
-}
-
+ui64 TDatabase::EstimateRowSize(ui32 tableId) const {
+ return Require(tableId)->EstimateRowSize();
+}
+
const TDbStats& TDatabase::Counters() const noexcept
{
return DatabaseImpl->Stats;
@@ -308,8 +308,8 @@ TDatabase::TChg TDatabase::Head(ui32 table) const noexcept
return { wrap.Serial, wrap->Head() };
}
-}
-
+}
+
TString TDatabase::SnapshotToLog(ui32 table, TTxStamp stamp)
{
auto scn = DatabaseImpl->Serial() + 1;
@@ -321,8 +321,8 @@ TString TDatabase::SnapshotToLog(ui32 table, TTxStamp stamp)
NRedo::TWriter{ }
.EvBegin(ui32(ECompatibility::Head), ui32(ECompatibility::Edge), scn, stamp)
.EvFlush(table, stamp, epoch).Dump();
-}
-
+}
+
ui32 TDatabase::TxSnapTable(ui32 table)
{
Require(table);
@@ -333,8 +333,8 @@ ui32 TDatabase::TxSnapTable(ui32 table)
TAutoPtr<TSubset> TDatabase::Subset(ui32 table, TArrayRef<const TLogoBlobID> bundle, TEpoch before) const
{
return Require(table)->Subset(bundle, before);
-}
-
+}
+
TAutoPtr<TSubset> TDatabase::Subset(ui32 table, TEpoch before, TRawVals from, TRawVals to) const
{
auto subset = Require(table)->Subset(before);
@@ -364,7 +364,7 @@ TBundleSlicesMap TDatabase::LookupSlices(ui32 table, TArrayRef<const TLogoBlobID
{
return Require(table)->LookupSlices(bundles);
}
-
+
void TDatabase::ReplaceSlices(ui32 table, TBundleSlicesMap slices)
{
return DatabaseImpl->ReplaceSlices(table, std::move(slices));
@@ -383,8 +383,8 @@ void TDatabase::ReplaceTxStatus(ui32 table, TArrayRef<const TIntrusiveConstPtr<T
void TDatabase::Merge(ui32 table, TPartView partView)
{
return DatabaseImpl->Merge(table, std::move(partView));
-}
-
+}
+
void TDatabase::Merge(ui32 table, TIntrusiveConstPtr<TColdPart> part)
{
return DatabaseImpl->Merge(table, std::move(part));
@@ -401,25 +401,25 @@ TAlter& TDatabase::Alter()
Y_VERIFY(!*Redo, "Scheme change must be done before any data updates");
return *(Alter_ ? Alter_ : (Alter_ = new TAlter()));
-}
-
+}
+
void TDatabase::DebugDumpTable(ui32 table, IOutputStream& str, const NScheme::TTypeRegistry& typeRegistry) const {
str << "Table " << table << Endl;
if (auto &wrap = DatabaseImpl->Get(table, false))
wrap->DebugDump(str, Env, typeRegistry);
- else
- str << "unknown" << Endl;
-}
-
+ else
+ str << "unknown" << Endl;
+}
+
void TDatabase::DebugDump(IOutputStream& str, const NScheme::TTypeRegistry& typeRegistry) const {
for (const auto& it: DatabaseImpl->Scheme->Tables) {
if (DatabaseImpl->Get(it.first, false)) {
str << "======= " << it.second.Name << " ======\n";
DebugDumpTable(it.first, str, typeRegistry);
- }
- }
-}
-
+ }
+ }
+}
+
TKeyRangeCache* TDatabase::DebugGetTableErasedKeysCache(ui32 table) const {
if (auto &wrap = DatabaseImpl->Get(table, false)) {
return wrap->GetErasedKeysCache();
@@ -500,11 +500,11 @@ TDatabase::TProd TDatabase::Commit(TTxStamp stamp, bool commit, TCookieAllocator
if (Alter_) {
auto delta = Alter_->Flush();
-
+
if (DatabaseImpl->Apply(*delta, &prefix))
Y_PROTOBUF_SUPPRESS_NODISCARD delta->SerializeToString(&Change->Scheme);
- }
-
+ }
+
for (auto &one: Change->Snapshots) {
one.Epoch = Require(one.Table)->Snapshot();
prefix.EvFlush(one.Table, Stamp, one.Epoch);
@@ -545,8 +545,8 @@ TDatabase::TProd TDatabase::Commit(TTxStamp stamp, bool commit, TCookieAllocator
} else if (Change->Deleted.size() != Change->Garbage.size()) {
Y_Fail(NFmt::Do(*Change) << " has inconsistent garbage data");
}
- }
-
+ }
+
Redo = nullptr;
Annex = nullptr;
Alter_ = nullptr;
@@ -596,10 +596,10 @@ TCompactionStats TDatabase::GetCompactionStats(ui32 table) const
return Require(table)->GetCompactionStats();
}
-// NOTE: This helper should be used only to dump local DB contents in GDB
-void DebugDumpDb(const TDatabase &db) {
- NScheme::TTypeRegistry typeRegistry;
- db.DebugDump(Cout, typeRegistry);
-}
-
+// NOTE: This helper should be used only to dump local DB contents in GDB
+void DebugDumpDb(const TDatabase &db) {
+ NScheme::TTypeRegistry typeRegistry;
+ db.DebugDump(Cout, typeRegistry);
+}
+
}}
diff --git a/ydb/core/tablet_flat/flat_database.h b/ydb/core/tablet_flat/flat_database.h
index 9581c88d968..98baa8f4383 100644
--- a/ydb/core/tablet_flat/flat_database.h
+++ b/ydb/core/tablet_flat/flat_database.h
@@ -5,7 +5,7 @@
#include "flat_dbase_scheme.h"
#include "flat_dbase_change.h"
#include "flat_dbase_misc.h"
-#include "flat_iterator.h"
+#include "flat_iterator.h"
#include "util_basics.h"
namespace NKikimr {
@@ -75,8 +75,8 @@ public:
template<class TIteratorType>
TAutoPtr<TIteratorType> IterateRangeGeneric(ui32 table, const TKeyRange& range, TTagsRef tags, TRowVersion snapshot = TRowVersion::Max()) const noexcept;
- // NOTE: the row refeneces data in some internal buffers that get invalidated on the next Select() or Commit() call
- EReady Select(ui32 table, TRawVals key, TTagsRef tags, TRowState& row,
+ // NOTE: the row refeneces data in some internal buffers that get invalidated on the next Select() or Commit() call
+ EReady Select(ui32 table, TRawVals key, TTagsRef tags, TRowState& row,
ui64 readFlags = 0, TRowVersion snapshot = TRowVersion::Max()) const noexcept;
EReady Select(ui32 table, TRawVals key, TTagsRef tags, TRowState& row, TSelectStats& stats,
@@ -91,7 +91,7 @@ public:
TTagsRef tags, ui64 readFlags, ui64 itemsLimit, ui64 bytesLimit,
EDirection direction = EDirection::Forward,
TRowVersion snapshot = TRowVersion::Max());
-
+
void Update(ui32 table, ERowOp, TRawVals key, TArrayRef<const TUpdateOp>, TRowVersion rowVersion = TRowVersion::Min());
void UpdateTx(ui32 table, ERowOp, TRawVals key, TArrayRef<const TUpdateOp>, ui64 txId);
@@ -114,7 +114,7 @@ public:
*/
const TRowVersionRanges& GetRemovedRowVersions(ui32 table) const;
- void NoMoreReadsForTx();
+ void NoMoreReadsForTx();
TAlter& Alter(); /* Begin DDL ALTER script */
@@ -132,13 +132,13 @@ public:
void EnumerateTableTxStatusParts(ui32 table, const std::function<void(const TIntrusiveConstPtr<TTxStatusPart>&)>& callback) const;
void EnumerateTxStatusParts(const std::function<void(const TIntrusiveConstPtr<TTxStatusPart>&)>& callback) const;
ui64 GetTableMemSize(ui32 table, TEpoch epoch = TEpoch::Max()) const;
- ui64 GetTableMemRowCount(ui32 tableId) const;
+ ui64 GetTableMemRowCount(ui32 tableId) const;
ui64 GetTableIndexSize(ui32 table) const;
ui64 GetTableSearchHeight(ui32 table) const;
ui64 EstimateRowSize(ui32 table) const;
const TCounters& Counters() const noexcept;
TString SnapshotToLog(ui32 table, TTxStamp);
-
+
TAutoPtr<TSubset> Subset(ui32 table, TArrayRef<const TLogoBlobID> bundle, TEpoch before) const;
TAutoPtr<TSubset> Subset(ui32 table, TEpoch before, TRawVals from, TRawVals to) const;
TAutoPtr<TSubset> ScanSnapshot(ui32 table, TRowVersion snapshot = TRowVersion::Max());
@@ -154,7 +154,7 @@ public:
void DebugDumpTable(ui32 table, IOutputStream& str, const NScheme::TTypeRegistry& typeRegistry) const;
void DebugDump(IOutputStream& str, const NScheme::TTypeRegistry& typeRegistry) const;
-
+
TKeyRangeCache* DebugGetTableErasedKeysCache(ui32 table) const;
// executor interface
@@ -168,23 +168,23 @@ public:
TCompactionStats GetCompactionStats(ui32 table) const;
-private:
+private:
TTable* Require(ui32 tableId) const noexcept;
private:
const THolder<TDatabaseImpl> DatabaseImpl;
ui64 Stamp = Max<ui64>();
- bool NoMoreReadsFlag;
+ bool NoMoreReadsFlag;
IPages* Env = nullptr;
THolder<TChange> Change;
TAutoPtr<TAlter> Alter_;
TAutoPtr<TAnnex> Annex;
TAutoPtr<NRedo::TWriter> Redo;
- mutable TDeque<TPartSimpleIt> TempIterators; // Keeps the last result of Select() valid
+ mutable TDeque<TPartSimpleIt> TempIterators; // Keeps the last result of Select() valid
mutable THashSet<ui32> IteratedTables;
};
-
+
}}
diff --git a/ydb/core/tablet_flat/flat_dbase_naked.h b/ydb/core/tablet_flat/flat_dbase_naked.h
index b6dc4ae68e8..493867a7f87 100644
--- a/ydb/core/tablet_flat/flat_dbase_naked.h
+++ b/ydb/core/tablet_flat/flat_dbase_naked.h
@@ -340,7 +340,7 @@ namespace NTable {
for (auto it : xrange(Annex.size()))
if (Annex[it].GId != *annex[it]) {
Y_FAIL("NRedo EvAnnex isn't match to assigned annex");
- }
+ }
} else {
Annex.reserve(annex.size());
diff --git a/ydb/core/tablet_flat/flat_dbase_scheme.cpp b/ydb/core/tablet_flat/flat_dbase_scheme.cpp
index 2106303d70f..fa12005009b 100644
--- a/ydb/core/tablet_flat/flat_dbase_scheme.cpp
+++ b/ydb/core/tablet_flat/flat_dbase_scheme.cpp
@@ -30,7 +30,7 @@ TAutoPtr<TSchemeChanges> TScheme::GetSnapshot() const {
delta.AddColumn(table, col.Name, it.first, col.PType, col.NotNull, col.Null);
delta.AddColumnToFamily(table, it.first, col.Family);
- }
+ }
for(ui32 columnId : itTable.second.KeyColumns)
delta.AddColumnToKey(table, columnId);
@@ -53,8 +53,8 @@ TAutoPtr<TSchemeChanges> TScheme::GetSnapshot() const {
delta.SetExecutorAllowLogBatching(Executor.AllowLogBatching);
delta.SetExecutorLogFlushPeriod(Executor.LogFlushPeriod);
delta.SetExecutorResourceProfile(Executor.ResourceProfile);
- delta.SetExecutorFastLogPolicy(Executor.LogFastTactic);
-
+ delta.SetExecutorFastLogPolicy(Executor.LogFastTactic);
+
return delta.Flush();
}
@@ -250,12 +250,12 @@ TAlter& TAlter::SetCompactionPolicy(ui32 tableId, const TCompactionPolicy& newPo
{
TAlterRecord &delta = *Log.AddDelta();
delta.SetDeltaType(TAlterRecord::SetCompactionPolicy);
- delta.SetTableId(tableId);
- newPolicy.Serialize(*delta.MutableCompactionPolicy());
+ delta.SetTableId(tableId);
+ newPolicy.Serialize(*delta.MutableCompactionPolicy());
return *this;
-}
-
+}
+
TAlter& TAlter::SetByKeyFilter(ui32 tableId, bool enabled)
{
TAlterRecord &delta = *Log.AddDelta();
diff --git a/ydb/core/tablet_flat/flat_dbase_scheme.h b/ydb/core/tablet_flat/flat_dbase_scheme.h
index bf6bb4c9ae0..1fd5ed2b480 100644
--- a/ydb/core/tablet_flat/flat_dbase_scheme.h
+++ b/ydb/core/tablet_flat/flat_dbase_scheme.h
@@ -18,7 +18,7 @@ using namespace NTabletFlatScheme;
using NKikimrSchemeOp::ECompactionStrategy;
-using TCompactionPolicy = NLocalDb::TCompactionPolicy;
+using TCompactionPolicy = NLocalDb::TCompactionPolicy;
class TScheme {
public:
@@ -74,16 +74,16 @@ public:
using TColumn = NTable::TColumn;
- struct TTableSchema {
- using TColumns = THashMap<ui32, TColumn>;
- using TColumnNames = THashMap<TString, ui32>;
-
- TColumns Columns;
- TColumnNames ColumnNames;
- TVector<ui32> KeyColumns; // key columns sorted by order
- };
-
- struct TTableInfo : public TTableSchema {
+ struct TTableSchema {
+ using TColumns = THashMap<ui32, TColumn>;
+ using TColumnNames = THashMap<TString, ui32>;
+
+ TColumns Columns;
+ TColumnNames ColumnNames;
+ TVector<ui32> KeyColumns; // key columns sorted by order
+ };
+
+ struct TTableInfo : public TTableSchema {
TTableInfo(TString name, ui32 id)
: Id(id)
, Name(std::move(name))
@@ -143,7 +143,7 @@ public:
inline TColumn* GetColumnInfo(TTableInfo* ptable, ui32 id) {
return ptable ? ptable->Columns.FindPtr(id) : nullptr;
- }
+ }
inline const TColumn* GetColumnInfo(const TTableInfo* ptable, ui32 id) const {
return ptable ? ptable->Columns.FindPtr(id) : nullptr;
diff --git a/ydb/core/tablet_flat/flat_executor.cpp b/ydb/core/tablet_flat/flat_executor.cpp
index e3d60a1bbce..59afc462158 100644
--- a/ydb/core/tablet_flat/flat_executor.cpp
+++ b/ydb/core/tablet_flat/flat_executor.cpp
@@ -38,8 +38,8 @@
#include <library/cpp/actors/core/hfunc.h>
#include <util/generic/xrange.h>
-#include <util/generic/ymath.h>
-
+#include <util/generic/ymath.h>
+
namespace NKikimr {
namespace NTabletFlatExecutor {
@@ -72,9 +72,9 @@ TExecutor::TExecutor(
, ActivationQueue(new TActivationQueue())
, PendingQueue(new TActivationQueue())
, Emitter(new TIdEmitter)
- , CounterEventsInFlight(new TEvTabletCounters::TInFlightCookie)
+ , CounterEventsInFlight(new TEvTabletCounters::TInFlightCookie)
, Stats(new TExecutorStatsImpl())
- , LogFlushDelayOverrideUsec(-1, -1, 60*1000*1000)
+ , LogFlushDelayOverrideUsec(-1, -1, 60*1000*1000)
{}
TExecutor::~TExecutor() {
@@ -96,8 +96,8 @@ void TExecutor::Registered(TActorSystem *sys, const TActorId&)
Broker = new TBroker(this, Emitter);
Scans = new TScans(Logger.Get(), this, Emitter, Owner, OwnerActorId);
Memory = new TMemory(Logger.Get(), this, Emitter, Sprintf(" at tablet %" PRIu64, Owner->TabletID()));
- TString myTabletType = TTabletTypes::TypeToStr(Owner->TabletType());
- AppData()->Icb->RegisterSharedControl(LogFlushDelayOverrideUsec, myTabletType + "_LogFlushDelayOverrideUsec");
+ TString myTabletType = TTabletTypes::TypeToStr(Owner->TabletType());
+ AppData()->Icb->RegisterSharedControl(LogFlushDelayOverrideUsec, myTabletType + "_LogFlushDelayOverrideUsec");
// instantiate alert counters so even never reported alerts are created
GetServiceCounters(AppData()->Counters, "tablets")->GetCounter("alerts_pending_nodata", true);
@@ -114,13 +114,13 @@ void TExecutor::PassAway() {
<< NFmt::Do(*this) << " suiciding, " << NFmt::If(waste, true);
}
- if (CompactionLogic) {
+ if (CompactionLogic) {
CompactionLogic->Stop();
}
if (Broker || Scans || Memory) {
Send(NResourceBroker::MakeResourceBrokerID(), new NResourceBroker::TEvResourceBroker::TEvNotifyActorDied);
- }
+ }
Scans->Drop();
Owner = nullptr;
@@ -380,7 +380,7 @@ void TExecutor::Active(const TActorContext &ctx) {
CountersBaseline = MakeHolder<TExecutorCounters>();
Counters->RememberCurrentStateAsBaseline(*CountersBaseline);
LogicRedo->InstallCounters(Counters.Get(), nullptr);
-
+
CounterCacheFresh = new NMonitoring::TCounterForPtr;
CounterCacheStaging = new NMonitoring::TCounterForPtr;
CounterCacheMemTable = new NMonitoring::TCounterForPtr;
@@ -403,7 +403,7 @@ void TExecutor::Active(const TActorContext &ctx) {
Stats->IsFollower = false;
CompactionLogic->Start();
-
+
for (const auto &it: Database->GetScheme().Tables)
CompactionLogic->UpdateInMemStatsStep(it.first, 0, Database->GetTableMemSize(it.first));
@@ -1733,7 +1733,7 @@ void TExecutor::PostponeTransaction(TAutoPtr<TSeat> seat, TPageCollectionTxEnv &
}
pad->Seat->CPUBookkeepingTime += bookkeepingTimer.PassedReset();
- Counters->Cumulative()[TExecutorCounters::TX_POSTPONED].Increment(1);
+ Counters->Cumulative()[TExecutorCounters::TX_POSTPONED].Increment(1);
if (AppTxCounters && txType != UnknownTxType)
AppTxCounters->TxCumulative(txType, COUNTER_TT_POSTPONED).Increment(1);
@@ -1745,10 +1745,10 @@ void TExecutor::PostponeTransaction(TAutoPtr<TSeat> seat, TPageCollectionTxEnv &
Counters->Cumulative()[TExecutorCounters::TX_BYTES_READ].Increment(loadBytes);
Counters->Cumulative()[TExecutorCounters::TX_CACHE_MISSES].Increment(loadPages);
- if (AppTxCounters && txType != UnknownTxType) {
+ if (AppTxCounters && txType != UnknownTxType) {
AppTxCounters->TxCumulative(txType, COUNTER_TT_LOADED_BLOCKS).Increment(loadPages);
AppTxCounters->TxCumulative(txType, COUNTER_TT_BYTES_READ).Increment(loadBytes);
- }
+ }
Counters->Simple()[TExecutorCounters::CACHE_PINNED_SET] = PrivatePageCache->GetStats().PinnedSetSize;
Counters->Simple()[TExecutorCounters::CACHE_PINNED_LOAD] = PrivatePageCache->GetStats().PinnedLoadSize;
@@ -1773,8 +1773,8 @@ void TExecutor::CommitTransactionLog(TAutoPtr<TSeat> seat, TPageCollectionTxEnv
if (seat->Retries == 1) {
Counters->Cumulative()[TExecutorCounters::TX_CACHE_HITS].Increment(touchedBlocks);
- }
-
+ }
+
UnpinTransactionPages(*seat);
Memory->ReleaseMemory(*seat);
@@ -2234,16 +2234,16 @@ void TExecutor::CommitTransactionLog(TAutoPtr<TSeat> seat, TPageCollectionTxEnv
LogBatchFlushScheduled = true;
auto delay = Scheme().Executor.LogFlushPeriod;
- if (LogFlushDelayOverrideUsec != -1) {
- delay = TDuration::MicroSeconds(LogFlushDelayOverrideUsec);
- }
- if (delay.MicroSeconds() == 0) {
- ctx.Send(ctx.SelfID, new TEvents::TEvFlushLog());
- } else {
- Y_VERIFY_DEBUG(delay < TDuration::Minutes(1));
- delay = Min(delay, TDuration::Seconds(59));
- Schedule(delay, new TEvents::TEvFlushLog());
- }
+ if (LogFlushDelayOverrideUsec != -1) {
+ delay = TDuration::MicroSeconds(LogFlushDelayOverrideUsec);
+ }
+ if (delay.MicroSeconds() == 0) {
+ ctx.Send(ctx.SelfID, new TEvents::TEvFlushLog());
+ } else {
+ Y_VERIFY_DEBUG(delay < TDuration::Minutes(1));
+ delay = Min(delay, TDuration::Seconds(59));
+ Schedule(delay, new TEvents::TEvFlushLog());
+ }
}
if (NeedFollowerSnapshot || LogicSnap->MayFlush(false))
@@ -2388,7 +2388,7 @@ void TExecutor::MakeLogSnapshot() {
BorrowLogic->SnapToLog(snap, *commit);
GcLogic->SnapToLog(snap, commit->Step);
LogicSnap->MakeSnap(snap, *commit, Logger.Get());
-
+
CommitManager->Commit(commit);
CompactionLogic->UpdateLogUsage(LogicRedo->GrabLogUsage());
@@ -2825,8 +2825,8 @@ void TExecutor::Handle(NBlockIO::TEvStat::TPtr &ev, const TActorContext &ctx) {
break;
}
}
-}
-
+}
+
void TExecutor::UtilizeSubset(const NTable::TSubset &subset,
const NTable::NFwd::TSeen &seen,
THashSet<TLogoBlobID> reusedBundles,
@@ -2919,13 +2919,13 @@ void TExecutor::Handle(NOps::TEvResult::TPtr &ev) {
void TExecutor::Handle(NOps::TEvResult *ops, TProdCompact *msg, bool cancelled) {
THPTimer partSwitchCpuTimer;
-
+
if (msg->Params->TaskId != 0) {
// We have taken over this task, mark it as finished in the broker
auto status = cancelled ? EResourceStatus::Cancelled : EResourceStatus::Finished;
Broker->FinishTask(msg->Params->TaskId, status);
}
-
+
const ui32 tableId = msg->Params->Table;
const bool abandoned = cancelled || !Scheme().GetTableInfo(tableId);
@@ -2963,15 +2963,15 @@ void TExecutor::Handle(NOps::TEvResult *ops, TProdCompact *msg, bool cancelled)
CheckYellow(std::move(msg->YellowMoveChannels), std::move(msg->YellowStopChannels), /* terminal */ true);
return Broken();
- }
-
+ }
+
ActiveTransaction = true;
const ui64 snapStamp = msg->Params->Edge.TxStamp ? msg->Params->Edge.TxStamp
: MakeGenStepPair(Generation(), msg->Step);
LogicRedo->FlushBatchedLog();
-
+
// now apply effects
NKikimrExecutorFlat::TTablePartSwitch proto;
proto.SetTableId(tableId);
@@ -2996,8 +2996,8 @@ void TExecutor::Handle(NOps::TEvResult *ops, TProdCompact *msg, bool cancelled)
sx->SetHead(ops->Subset->Head.ToProto());
} else {
Y_VERIFY(!hadFrozen, "Compacted frozen parts without correct head epoch");
- }
-
+ }
+
if (results) {
auto &gcDiscovered = commit->GcDelta.Created;
@@ -3201,8 +3201,8 @@ void TExecutor::Handle(NOps::TEvResult *ops, TProdCompact *msg, bool cancelled)
if (LogicSnap->MayFlush(false)) {
MakeLogSnapshot();
}
-}
-
+}
+
void TExecutor::UpdateUsedTabletMemory() {
UsedTabletMemory = 0;
// Estimate memory usage for internal executor structures.
@@ -3228,10 +3228,10 @@ void TExecutor::UpdateCounters(const TActorContext &ctx) {
TAutoPtr<TTabletCountersBase> executorCounters;
TAutoPtr<TTabletCountersBase> externalTabletCounters;
- if (CounterEventsInFlight.RefCount() == 1) {
+ if (CounterEventsInFlight.RefCount() == 1) {
UpdateUsedTabletMemory();
- if (Counters) {
+ if (Counters) {
const auto& dbCounters = Database->Counters();
@@ -3289,7 +3289,7 @@ void TExecutor::UpdateCounters(const TActorContext &ctx) {
Counters->Simple()[TExecutorCounters::DB_UNIQUE_OUTER_ITEMS].Set(0);
}
Counters->Simple()[TExecutorCounters::DB_UNIQUE_KEEP_BYTES].Set(BorrowLogic->GetKeepBytes());
- }
+ }
if (GcLogic) {
auto gcInfo = GcLogic->IntrospectStateSize();
@@ -3314,8 +3314,8 @@ void TExecutor::UpdateCounters(const TActorContext &ctx) {
Counters->Simple()[TExecutorCounters::USED_TABLET_TX_MEMORY].Set(memory.Static);
Counters->Simple()[TExecutorCounters::USED_DYNAMIC_TX_MEMORY].Set(memory.Dynamic);
- executorCounters = Counters->MakeDiffForAggr(*CountersBaseline);
- Counters->RememberCurrentStateAsBaseline(*CountersBaseline);
+ executorCounters = Counters->MakeDiffForAggr(*CountersBaseline);
+ Counters->RememberCurrentStateAsBaseline(*CountersBaseline);
if (ResourceMetrics && !Stats->IsFollower) {
// N.B. DB_UNIQUE_OUTER_BYTES is already part of DB_UNIQUE_DATA_BYTES, due to how BackingSize works
@@ -3332,15 +3332,15 @@ void TExecutor::UpdateCounters(const TActorContext &ctx) {
Counters->Simple()[TExecutorCounters::CONSUMED_STORAGE].Set(storageSize);
Counters->Simple()[TExecutorCounters::CONSUMED_MEMORY].Set(memorySize);
}
- }
+ }
- if (AppCounters) {
- externalTabletCounters = AppCounters->MakeDiffForAggr(*AppCountersBaseline);
- AppCounters->RememberCurrentStateAsBaseline(*AppCountersBaseline);
- }
+ if (AppCounters) {
+ externalTabletCounters = AppCounters->MakeDiffForAggr(*AppCountersBaseline);
+ AppCounters->RememberCurrentStateAsBaseline(*AppCountersBaseline);
+ }
- // tablet id + tablet type
- ui64 tabletId = Owner->TabletID();
+ // tablet id + tablet type
+ ui64 tabletId = Owner->TabletID();
auto tabletType = Owner->TabletType();
auto tenantPathId = Owner->Info()->TenantPathId;
@@ -3351,25 +3351,25 @@ void TExecutor::UpdateCounters(const TActorContext &ctx) {
if (ResourceMetrics) {
ResourceMetrics->TryUpdate(ctx);
}
- }
+ }
Schedule(TDuration::Seconds(15), new TEvPrivate::TEvUpdateCounters());
}
-float TExecutor::GetRejectProbability() const {
- // Limit number of in-flight TXs
- // TODO: make configurable
+float TExecutor::GetRejectProbability() const {
+ // Limit number of in-flight TXs
+ // TODO: make configurable
if (Stats->TxInFly > 10000)
- return 1.0;
-
+ return 1.0;
+
// Followers do not control compaction so let's always allow to read the data from follower
if (Stats->IsFollower)
- return 0.0;
-
+ return 0.0;
+
auto sigmoid = [](float x) -> float {
auto ex = exp(x);
return ex / (ex + 1.0); // N.B. better precision than 1 / (1 + exp(-x))
};
-
+
// Maps overload [0,1] to reject probability [0,1]
auto calcProbability = [&sigmoid](float x) -> float {
if (x < 0.0f) return 0.0f;
@@ -3380,13 +3380,13 @@ float TExecutor::GetRejectProbability() const {
auto scale = sigmoid(6.0f);
return 0.5f + 0.5f * (value - 0.5f) / (scale - 0.5f);
};
-
+
const float overloadFactor = CompactionLogic->GetOverloadFactor();
const float rejectProbability = calcProbability(overloadFactor);
-
+
return rejectProbability;
-}
-
+}
+
TString TExecutor::BorrowSnapshot(ui32 table, const TTableSnapshotContext &snap, TRawVals from, TRawVals to, ui64 loaner) const
{
@@ -3746,14 +3746,14 @@ void TExecutor::RenderHtmlPage(NMon::TEvRemoteHttpInfo::TPtr &ev) const {
Sort(columns);
for (auto icol : columns) {
const auto &col = tinfo.Columns.find(icol)->second;
- const bool isKey = (tinfo.KeyColumns.end() != std::find(tinfo.KeyColumns.begin(), tinfo.KeyColumns.end(), col.Id));
+ const bool isKey = (tinfo.KeyColumns.end() != std::find(tinfo.KeyColumns.begin(), tinfo.KeyColumns.end(), col.Id));
TABLER() {
TABLED() {str << col.Name;}
TABLED() {str << col.Id;}
TABLED() {str << tr.GetTypeName(col.PType);}
TABLED() {str << (isKey ? ToString(col.KeyOrder) : "");}
}
- }
+ }
}
}
}
diff --git a/ydb/core/tablet_flat/flat_executor.h b/ydb/core/tablet_flat/flat_executor.h
index 27ac7e5bce5..df08b3a338a 100644
--- a/ydb/core/tablet_flat/flat_executor.h
+++ b/ydb/core/tablet_flat/flat_executor.h
@@ -14,7 +14,7 @@
#include "flat_exec_commit.h"
#include "flat_exec_read.h"
#include "flat_executor_misc.h"
-#include "flat_executor_compaction_logic.h"
+#include "flat_executor_compaction_logic.h"
#include "flat_executor_gclogic.h"
#include "flat_bio_events.h"
#include "flat_bio_stats.h"
@@ -320,9 +320,9 @@ class TExecutor
{
using ELnLev = NUtil::ELnLev;
- friend class TExecutorCompactionLogic;
+ friend class TExecutorCompactionLogic;
class TTxExecutorDbMon;
-
+
static constexpr ui64 PostponeTransactionMemThreshold = 250*1024*1024;
struct TEvPrivate {
@@ -404,7 +404,7 @@ class TExecutor
TLoadBlobQueue PendingBlobQueue;
// Used control number of in flight events to the counter aggregator
- TIntrusivePtr<TEvTabletCounters::TInFlightCookie> CounterEventsInFlight;
+ TIntrusivePtr<TEvTabletCounters::TInFlightCookie> CounterEventsInFlight;
TTabletCountersWithTxTypes* AppTxCounters = nullptr;
@@ -438,8 +438,8 @@ class TExecutor
TActorContext OwnerCtx() const;
- TControlWrapper LogFlushDelayOverrideUsec;
-
+ TControlWrapper LogFlushDelayOverrideUsec;
+
ui64 Stamp() const noexcept;
void Registered(TActorSystem*, const TActorId&) override;
void PassAway() override;
@@ -627,9 +627,9 @@ public:
// database interface
const NTable::TScheme& Scheme() const noexcept override;
ui64 TabletId() const { return Owner->TabletID(); }
-
+
float GetRejectProbability() const override;
-
+
TActorId GetLauncher() const { return Launcher; }
};
diff --git a/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp b/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp
index 24559d1a797..bd52264b10e 100644
--- a/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp
+++ b/ydb/core/tablet_flat/flat_executor_borrowlogic.cpp
@@ -245,9 +245,9 @@ void TExecutorBorrowLogic::BorrowBundle(
loaners.end());
Sort(fullBorrow);
- // !!HACK: Allow to borrow the same bundle multiple times
- //Y_VERIFY(std::adjacent_find(fullBorrow.begin(), fullBorrow.end()) == fullBorrow.end());
- fullBorrow.erase(std::unique(fullBorrow.begin(), fullBorrow.end()), fullBorrow.end());
+ // !!HACK: Allow to borrow the same bundle multiple times
+ //Y_VERIFY(std::adjacent_find(fullBorrow.begin(), fullBorrow.end()) == fullBorrow.end());
+ fullBorrow.erase(std::unique(fullBorrow.begin(), fullBorrow.end()), fullBorrow.end());
StoreBorrowProto(bundleId, storedInfo, commit);
}
@@ -258,9 +258,9 @@ void TExecutorBorrowLogic::LoanBundle(
TLogCommit *commit)
{
auto storedInfoItPair = BorrowedInfo.insert(std::make_pair(bundleId, TBorrowedPartInfo()));
- Y_VERIFY(storedInfoItPair.second,
- "must not back-borrow parts at %" PRIu64 " part owner %" PRIu64 " existing loan from %" PRIu64 " new loan from %" PRIu64,
- SelfTabletId, bundleId.TabletID(), storedInfoItPair.first->second.LoanInfo.Lender, loaned.Lender);
+ Y_VERIFY(storedInfoItPair.second,
+ "must not back-borrow parts at %" PRIu64 " part owner %" PRIu64 " existing loan from %" PRIu64 " new loan from %" PRIu64,
+ SelfTabletId, bundleId.TabletID(), storedInfoItPair.first->second.LoanInfo.Lender, loaned.Lender);
HasFlag = true;
TBorrowedPartInfo &storedInfo = storedInfoItPair.first->second;
diff --git a/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp b/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp
index ed43f6a8272..3d699e8f87b 100644
--- a/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp
+++ b/ydb/core/tablet_flat/flat_executor_compaction_logic.cpp
@@ -2,15 +2,15 @@
#include "flat_exec_broker.h"
#include "flat_dbase_scheme.h"
#include "flat_comp_create.h"
-
+
#include <ydb/core/base/appdata.h>
-
+
#include <library/cpp/monlib/service/pages/templates.h>
#include <util/generic/cast.h>
-namespace NKikimr {
-namespace NTabletFlatExecutor {
-
+namespace NKikimr {
+namespace NTabletFlatExecutor {
+
TCompactionLogicState::TSnapRequest::~TSnapRequest()
{}
@@ -37,15 +37,15 @@ void TCompactionLogic::Start() {
auto result = ReflectSchemeChanges();
Y_VERIFY(!result.StrategyChanges);
State->Snapshots.clear();
-}
-
+}
+
void TCompactionLogic::Stop() {
for (auto &kv : State->Tables) {
StopTable(kv.second);
}
State->Tables.clear();
-}
-
+}
+
TCompactionLogicState::TSnapshotState TCompactionLogic::SnapToLog(ui32 tableId) {
auto* info = State->Tables.FindPtr(tableId);
Y_VERIFY(info);
@@ -264,8 +264,8 @@ TReflectSchemeChangesResult TCompactionLogic::ReflectSchemeChanges()
}
return result;
-}
-
+}
+
THolder<NTable::ICompactionStrategy> TCompactionLogic::CreateStrategy(
ui32 tableId,
NKikimrSchemeOp::ECompactionStrategy strategy)
@@ -369,7 +369,7 @@ void TCompactionLogic::UpdateInMemStatsStep(ui32 table, ui32 steps, ui64 size) {
auto &mem = info->InMem;
mem.EstimatedSize = size;
mem.Steps += steps;
-
+
CheckInMemStats(table);
}
@@ -412,8 +412,8 @@ void TCompactionLogic::CheckInMemStats(ui32 table) {
mem.State = ECompactionState::PendingBackground;
}
}
-}
-
+}
+
void TCompactionLogic::UpdateLogUsage(TArrayRef<const NRedo::TUsage> usage)
{
for (auto &one : usage) {
@@ -521,7 +521,7 @@ TCompactionLogic::HandleCompaction(
{
const ui32 tableId = params->Table;
const auto edge = params->Edge;
-
+
TCompactionLogicState::TTableInfo *tableInfo = State->Tables.FindPtr(tableId);
Y_VERIFY(tableInfo, "Unexpected CompleteCompaction for a dropped table");
@@ -579,7 +579,7 @@ TCompactionLogic::HandleCompaction(
CheckInMemStats(tableId);
}
}
-
+
if (tableInfo->ForcedCompactionState == EForcedCompactionState::None) {
if (tableInfo->ForcedCompactionQueued) {
tableInfo->ForcedCompactionQueued = false;
@@ -706,28 +706,28 @@ ui32 TCompactionLogicState::TTableInfo::ComputeBackgroundPriority(
inMem.Steps * 100 / policy.InMemForceStepsToSnapshot);
if (!perc || perc < bckgPolicy.Threshold)
- return TCompactionLogic::BAD_PRIORITY;
+ return TCompactionLogic::BAD_PRIORITY;
return ComputeBackgroundPriority(inMem.CompactionTask, bckgPolicy, perc, now);
}
-float TCompactionLogic::GetOverloadFactor() const {
- float overloadFactor = 0;
- for (const auto& ti : State->Tables) {
+float TCompactionLogic::GetOverloadFactor() const {
+ float overloadFactor = 0;
+ for (const auto& ti : State->Tables) {
overloadFactor = Max(overloadFactor, ti.second.InMem.OverloadFactor);
overloadFactor = Max(overloadFactor, ti.second.Strategy->GetOverloadFactor());
- }
- return overloadFactor;
-}
-
-ui64 TCompactionLogic::GetBackingSize() const {
- ui64 size = 0;
- for (const auto& ti : State->Tables) {
+ }
+ return overloadFactor;
+}
+
+ui64 TCompactionLogic::GetBackingSize() const {
+ ui64 size = 0;
+ for (const auto& ti : State->Tables) {
size += ti.second.Strategy->GetBackingSize();
- }
- return size;
-}
-
+ }
+ return size;
+}
+
ui64 TCompactionLogic::GetBackingSize(ui64 ownerTabletId) const {
ui64 size = 0;
for (const auto& ti : State->Tables) {
@@ -744,7 +744,7 @@ void TCompactionLogic::OutputHtml(IOutputStream &out, const NTable::TScheme &sch
DIV_CLASS("row") { out
<< "InMem Size: " << xtable.second.InMem.EstimatedSize
<< ", Changes: " << xtable.second.InMem.Steps
- << ", Compaction state: " << xtable.second.InMem.State
+ << ", Compaction state: " << xtable.second.InMem.State
<< ", Backing size: " << xtable.second.Strategy->GetBackingSize()
<< ", Log overhead size: " << xtable.second.InMem.LogOverheadSize
<< ", count: " << xtable.second.InMem.LogOverheadCount;
@@ -767,4 +767,4 @@ void TCompactionLogic::OutputHtml(IOutputStream &out, const NTable::TScheme &sch
}
}
-}}
+}}
diff --git a/ydb/core/tablet_flat/flat_executor_compaction_logic.h b/ydb/core/tablet_flat/flat_executor_compaction_logic.h
index 87b22d476f8..b85a13a19ec 100644
--- a/ydb/core/tablet_flat/flat_executor_compaction_logic.h
+++ b/ydb/core/tablet_flat/flat_executor_compaction_logic.h
@@ -1,4 +1,4 @@
-#pragma once
+#pragma once
#include "defs.h"
#include "tablet_flat_executor.h"
#include "flat_executor_misc.h"
@@ -8,19 +8,19 @@
#include "util_fmt_line.h"
#include <ydb/core/base/localdb.h>
#include <library/cpp/time_provider/time_provider.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NTable{
class TScheme;
}
-namespace NTabletFlatExecutor {
-
+namespace NTabletFlatExecutor {
+
class TTableSnapshotContext;
-using TCompactionPolicy = NLocalDb::TCompactionPolicy;
-
+using TCompactionPolicy = NLocalDb::TCompactionPolicy;
+
enum class EForceCompaction {
Mem,
Borrowed,
@@ -103,14 +103,14 @@ struct TCompactionLogicState {
TTableInfo() = default;
~TTableInfo();
-
- ui32 ComputeBackgroundPriority(const TCompactionLogicState::TCompactionTask &task,
- const TCompactionPolicy::TBackgroundPolicy &policy,
- ui32 percentage,
- TInstant now) const;
- ui32 ComputeBackgroundPriority(const TCompactionLogicState::TInMem &inMem,
- const TCompactionPolicy &policy,
- TInstant now) const;
+
+ ui32 ComputeBackgroundPriority(const TCompactionLogicState::TCompactionTask &task,
+ const TCompactionPolicy::TBackgroundPolicy &policy,
+ ui32 percentage,
+ TInstant now) const;
+ ui32 ComputeBackgroundPriority(const TCompactionLogicState::TInMem &inMem,
+ const TCompactionPolicy &policy,
+ TInstant now) const;
};
struct TSnapshotState {
@@ -183,9 +183,9 @@ class TCompactionLogic {
const NTable::TCompactionParams* params,
TTableCompactionResult* ret);
-public:
- static constexpr ui32 BAD_PRIORITY = Max<ui32>();
-
+public:
+ static constexpr ui32 BAD_PRIORITY = Max<ui32>();
+
TCompactionLogic(
NUtil::ILogger*,
NTable::IResourceBroker*,
@@ -193,10 +193,10 @@ public:
TAutoPtr<TCompactionLogicState>,
TString taskSuffix = { });
~TCompactionLogic();
-
+
void Start();
void Stop();
-
+
TCompactionLogicState::TSnapshotState SnapToLog(ui32 tableId);
// Update priorities for background compaction tasks.
@@ -222,8 +222,8 @@ public:
void CheckInMemStats(ui32 table);
void UpdateLogUsage(TArrayRef<const NRedo::TUsage>);
void UpdateLogUsage(const NRedo::TUsage&);
- float GetOverloadFactor() const;
- ui64 GetBackingSize() const;
+ float GetOverloadFactor() const;
+ ui64 GetBackingSize() const;
ui64 GetBackingSize(ui64 ownerTabletId) const;
TTableCompactionResult CompleteCompaction(
@@ -242,6 +242,6 @@ public:
TTableCompactionChanges RemovedParts(ui32 tableId, TArrayRef<const TLogoBlobID> parts);
void OutputHtml(IOutputStream &out, const NTable::TScheme &scheme, const TCgiParameters& cgi);
-};
-
-}}
+};
+
+}}
diff --git a/ydb/core/tablet_flat/flat_executor_counters.h b/ydb/core/tablet_flat/flat_executor_counters.h
index a62035b9a9d..f100bc48a00 100644
--- a/ydb/core/tablet_flat/flat_executor_counters.h
+++ b/ydb/core/tablet_flat/flat_executor_counters.h
@@ -6,7 +6,7 @@ namespace NKikimr {
namespace NTabletFlatExecutor {
#define FLAT_EXECUTOR_SIMPLE_COUNTERS_MAP(XX) \
- XX(DB_TX_IN_FLY, "ExecutorTxInFly") \
+ XX(DB_TX_IN_FLY, "ExecutorTxInFly") \
XX(LOG_REDO_COUNT, "LogRedoItems") \
XX(LOG_REDO_MEMORY, "LogRedoMemory") \
XX(LOG_REDO_SOLIDS, "LogRedoLargeGlobIds") \
@@ -44,8 +44,8 @@ namespace NTabletFlatExecutor {
XX(GC_BLOBS_CREATED, "GcBlobsCreated") \
XX(GC_BLOBS_DELETED, "GcBlobsDeleted") \
XX(GC_BARRIERS_ACTIVE, "GcBarriersActive") \
- XX(CACHE_FRESH_SIZE, "CacheFreshSize") \
- XX(CACHE_STAGING_SIZE, "CacheStagingSize") \
+ XX(CACHE_FRESH_SIZE, "CacheFreshSize") \
+ XX(CACHE_STAGING_SIZE, "CacheStagingSize") \
XX(CACHE_WARM_SIZE, "CacheMemTableSize") \
XX(CACHE_PINNED_SET, "CachePinned") \
XX(CACHE_PINNED_LOAD, "CachePinnedLoad") \
@@ -84,7 +84,7 @@ namespace NTabletFlatExecutor {
XX(TX_QUEUED, "TxQueued") \
XX(TX_RETRIED, "TxRetried") \
XX(TX_FINISHED, "TxFinished") \
- XX(TX_POSTPONED, "TxPostponed") \
+ XX(TX_POSTPONED, "TxPostponed") \
XX(TX_MEM_REQUESTS, "TxMemoryRequests") \
XX(TX_MEM_CAPTURES, "TxMemoryCaptures") \
XX(TX_MEM_ATTACHES, "TxMemoryAttaches") \
diff --git a/ydb/core/tablet_flat/flat_executor_database_ut.cpp b/ydb/core/tablet_flat/flat_executor_database_ut.cpp
index b983e3f2dbf..0168615c55b 100644
--- a/ydb/core/tablet_flat/flat_executor_database_ut.cpp
+++ b/ydb/core/tablet_flat/flat_executor_database_ut.cpp
@@ -5,29 +5,29 @@
#include <ydb/core/tablet_flat/test/libs/exec/fuzzy.h>
#include <library/cpp/testing/unittest/registar.h>
#include "flat_database.h"
-
+
#include <util/system/sanitizers.h>
#include <util/system/valgrind.h>
-namespace NKikimr {
-namespace NTabletFlatExecutor {
-
+namespace NKikimr {
+namespace NTabletFlatExecutor {
+
using ELookup = NTable::ELookup;
using TDbWrapper = NTable::TDbWrapper;
using ITestDb = NTable::ITestDb;
-
+
const ui64 MaxActionCount = 12000;
const ui64 MultiPageMaxActionCount = 10000;
class TFuzzyActor : public NFake::TNanny {
-public:
+public:
explicit TFuzzyActor(ui32 lives, ui64 limit)
: Respawn(lives)
, Actions(limit)
{
Spawn();
}
-
+
EDo Run() override
{
using TContext = TTransactionContext;
@@ -35,11 +35,11 @@ public:
Actions -= Min(Actions, ui32(1));
NFake::TFuncTx::TCall func;
-
+
const ui32 action = RandomNumber<ui32>(55);
const ui32 table = RandomNumber<ui32>(4);
const ui32 key = RandomNumber<ui32>(300);
-
+
if (Actions == 0) {
func = [this](ITestDb& testDb, TContext &txc) {
return Fuzzy.DropTables(testDb, txc);
@@ -69,12 +69,12 @@ public:
};
} else {
Y_FAIL("Random generator produced unexpected action value");
- }
-
+ }
+
QueueTx(func);
return !Actions ? EDo::Stop : Actions < Rebirth ? Spawn() : EDo::More;
- }
+ }
private:
EDo Spawn() noexcept
@@ -94,59 +94,59 @@ private:
ui32 Rebirth = 0; /* When to restart tablet */
NFake::TFuzzySet Fuzzy{ false };
-};
-
-
+};
+
+
class TDbTestPlayerActor : public NFake::TNanny {
-public:
+public:
explicit TDbTestPlayerActor(const TVector<NFake::TFuncTx::TCall>& actions)
: Actions(actions)
{
Y_VERIFY(actions.size(), "Have to pass at least one action");
}
-
+
EDo Run() override
{
QueueTx(std::move(Actions.at(Index)));
-
+
return ++Index < Actions.size() ? EDo::More : EDo::Stop;
- }
+ }
private:
TVector<NFake::TFuncTx::TCall> Actions;
size_t Index = 0;
-};
-
-// Mimics schema and transactions that happen inside coordinator
+};
+
+// Mimics schema and transactions that happen inside coordinator
class THeThing : public NFake::TNanny {
-private:
+private:
ui64 ActionCount = 0;
- const ui64 MaxActionCount;
+ const ui64 MaxActionCount;
bool SchemaReady = false;
-
- TIntrusivePtr<IRandomProvider> RandomProvider;
-
- using TTxId = ui64;
-
- const ui32 TxTable = 0;
- const ui32 AffectedTable = 4;
-
+
+ TIntrusivePtr<IRandomProvider> RandomProvider;
+
+ using TTxId = ui64;
+
+ const ui32 TxTable = 0;
+ const ui32 AffectedTable = 4;
+
TVector<TDeque<TTxId>> DatashardTxQueues;
THashMap<TTxId, ui32> UnconfirmedCount;
- TTxId LastTxId = 0;
- ui64 LastDatashardIdx = 0;
-
- void CreateSchema(TDbWrapper& db) {
+ TTxId LastTxId = 0;
+ ui64 LastDatashardIdx = 0;
+
+ void CreateSchema(TDbWrapper& db) {
if (std::exchange(SchemaReady, true))
- return;
-
+ return;
+
NTable::TAlter delta;
-
+
delta.AddTable("TxTable", TxTable);
delta.AddColumn(TxTable, "TxId", 1, NScheme::TUint64::TypeId, false);
delta.AddColumn(TxTable, "Plan", 2, NScheme::TUint64::TypeId, false);
delta.AddColumnToKey(TxTable, 1);
-
+
delta.AddTable("AffectedTable", AffectedTable);
delta.AddColumn(AffectedTable, "TxId", 1, NScheme::TUint64::TypeId, false);
delta.AddColumn(AffectedTable, "Datashard", 2, NScheme::TUint64::TypeId, false);
@@ -165,96 +165,96 @@ private:
}
db.Apply(*delta.Flush());
- }
-
- void AddRandomTx(TDbWrapper& db) {
- db.Apply(db.Update(TxTable).Key(LastTxId).Set("Plan", LastTxId));
- for (ui32 i = 0; i < 3; ++i) {
- // Choose random datashard to be Tx participant
- ui64 datashard = RandomNumber(DatashardTxQueues.size());
- DatashardTxQueues[datashard].push_back(LastTxId);
- db.Apply(db.Update(AffectedTable).Key(LastTxId, datashard).Set("Plan", LastTxId));
- UnconfirmedCount[LastTxId]++;
- }
- LastTxId++;
- }
-
- void CompleteDatashardTx(ui64 datashard, TDbWrapper& db) {
- if (DatashardTxQueues[datashard].empty())
- return;
-
- TTxId txId = DatashardTxQueues[datashard].front();
- DatashardTxQueues[datashard].pop_front();
- db.Apply(db.Erase(AffectedTable).Key(txId, datashard));
-
- ui32 cnt = --UnconfirmedCount[txId];
- if (cnt == 0) {
- // All participants confirmed Tx completion
- UnconfirmedCount.erase(txId);
- db.Apply(db.Erase(TxTable).Key(txId));
- }
- }
-
- // Inserts in both TxTable and AffectedTable
- void StartTransactions(TDbWrapper& db) {
+ }
+
+ void AddRandomTx(TDbWrapper& db) {
+ db.Apply(db.Update(TxTable).Key(LastTxId).Set("Plan", LastTxId));
+ for (ui32 i = 0; i < 3; ++i) {
+ // Choose random datashard to be Tx participant
+ ui64 datashard = RandomNumber(DatashardTxQueues.size());
+ DatashardTxQueues[datashard].push_back(LastTxId);
+ db.Apply(db.Update(AffectedTable).Key(LastTxId, datashard).Set("Plan", LastTxId));
+ UnconfirmedCount[LastTxId]++;
+ }
+ LastTxId++;
+ }
+
+ void CompleteDatashardTx(ui64 datashard, TDbWrapper& db) {
+ if (DatashardTxQueues[datashard].empty())
+ return;
+
+ TTxId txId = DatashardTxQueues[datashard].front();
+ DatashardTxQueues[datashard].pop_front();
+ db.Apply(db.Erase(AffectedTable).Key(txId, datashard));
+
+ ui32 cnt = --UnconfirmedCount[txId];
+ if (cnt == 0) {
+ // All participants confirmed Tx completion
+ UnconfirmedCount.erase(txId);
+ db.Apply(db.Erase(TxTable).Key(txId));
+ }
+ }
+
+ // Inserts in both TxTable and AffectedTable
+ void StartTransactions(TDbWrapper& db) {
CreateSchema(db);
- // Generate some new transactions
- ui64 newTxCount = RandomNumber(5);
- for (ui64 i = 0; i < newTxCount; ++i) {
- AddRandomTx(db);
- }
- }
-
- // Always deletes from AffectedTable and sometimes from TxTable
- void FinishTransactions(TDbWrapper& db) {
- // Finish some transactions on each datashard
- ui64 txCountInFlight = RandomNumber(1+UnconfirmedCount.size());
- do {
- if (RandomNumber(2))
- CompleteDatashardTx(LastDatashardIdx, db);
- LastDatashardIdx += 1;
- LastDatashardIdx %= DatashardTxQueues.size();
- } while (UnconfirmedCount.size() > txCountInFlight);
- }
-
- ui64 RandomNumber(ui64 limit) {
- Y_VERIFY(limit > 0, "Invalid limit specified [0,%" PRIu64 ")", limit);
+ // Generate some new transactions
+ ui64 newTxCount = RandomNumber(5);
+ for (ui64 i = 0; i < newTxCount; ++i) {
+ AddRandomTx(db);
+ }
+ }
+
+ // Always deletes from AffectedTable and sometimes from TxTable
+ void FinishTransactions(TDbWrapper& db) {
+ // Finish some transactions on each datashard
+ ui64 txCountInFlight = RandomNumber(1+UnconfirmedCount.size());
+ do {
+ if (RandomNumber(2))
+ CompleteDatashardTx(LastDatashardIdx, db);
+ LastDatashardIdx += 1;
+ LastDatashardIdx %= DatashardTxQueues.size();
+ } while (UnconfirmedCount.size() > txCountInFlight);
+ }
+
+ ui64 RandomNumber(ui64 limit) {
+ Y_VERIFY(limit > 0, "Invalid limit specified [0,%" PRIu64 ")", limit);
return RandomProvider->GenRand64() % limit;
- }
-
-public:
+ }
+
+public:
THeThing(ui64 maxActionCount, ui64 randomSeed)
: MaxActionCount(maxActionCount)
- , RandomProvider(CreateDeterministicRandomProvider(randomSeed))
- {
- DatashardTxQueues.resize(20);
- }
-
+ , RandomProvider(CreateDeterministicRandomProvider(randomSeed))
+ {
+ DatashardTxQueues.resize(20);
+ }
+
EDo Run() override
{
if (RandomNumber(1000) < 4)
return EDo::Born;
-
- ui32 action = RandomNumber(8);
- if (action < 3) {
+
+ ui32 action = RandomNumber(8);
+ if (action < 3) {
QueueTx([this](ITestDb& testDb, TTransactionContext&){ TDbWrapper db(testDb); this->StartTransactions(db); return true; });
- } else {
+ } else {
QueueTx([this](ITestDb& testDb, TTransactionContext&){ TDbWrapper db(testDb); this->FinishTransactions(db); return true; });
- }
-
+ }
+
return ++ActionCount < MaxActionCount ? EDo::More : EDo::Stop;
- }
-};
-
-
-// Generates a table with many rows and the does a SelectRange query for the whole table
-// If prefetch works properly the SelectRange transaction it is expected not to have restarts
+ }
+};
+
+
+// Generates a table with many rows and the does a SelectRange query for the whole table
+// If prefetch works properly the SelectRange transaction it is expected not to have restarts
class TFullScan : public NFake::TNanny {
public:
explicit TFullScan(ui64 rows) : Rows(rows) { }
-private:
+private:
EDo Run() override
{
if (++RowCount <= Rows) {
@@ -264,17 +264,17 @@ private:
} else if (RowCount > Rows + 1) {
Y_FAIL("Shouldn't request more task after EDo::Stop");
}
-
+
return RowCount <= Rows ? EDo::More : EDo::Stop;
}
-
+
void CreateSchema(TDbWrapper& db)
{
if (std::exchange(SchemaReady, true))
- return;
-
+ return;
+
NTable::TAlter delta;
-
+
delta.AddTable("table", Table);
delta.SetFamily(Table, AltFamily, NTable::NPage::ECache::None, NTable::NPage::ECodec::Plain);
delta.AddColumn(Table, "Id", 1, NScheme::TUint32::TypeId, false);
@@ -303,7 +303,7 @@ private:
resrart on full scan.
*/
policy.Generations = {
- { 200 * 1024 * 1024, 8, 8, 300 * 1024 * 1024, comp_g0, false },
+ { 200 * 1024 * 1024, 8, 8, 300 * 1024 * 1024, comp_g0, false },
{ 400 * 1024 * 1024, 8, 8, 800 * 1024 * 1024, comp_g1, false }
};
@@ -311,55 +311,55 @@ private:
}
db.Apply(*delta.Flush());
- }
-
- void AddRandomRowTx(TDbWrapper& db) {
- CreateSchema(db);
-
- ui64 rowId = RowCount;
-
- // Add big rows with big values in order to produce many pages
+ }
+
+ void AddRandomRowTx(TDbWrapper& db) {
+ CreateSchema(db);
+
+ ui64 rowId = RowCount;
+
+ // Add big rows with big values in order to produce many pages
db.Apply(db.Update(Table).Key(rowId).Set("value", rowId).Set("large", TString(10000, 'A')));
- }
-
- bool DoFullScanTx(TDbWrapper& db) {
- try {
+ }
+
+ bool DoFullScanTx(TDbWrapper& db) {
+ try {
const std::array<ui32, 2> tags{{ 1 /* Id */, 2 /* value */ }};
-
+
db->Precharge(Table, { }, { }, tags, 0);
TAutoPtr<NTable::ITestIterator> it = db->Iterate(Table, { }, tags, ELookup::GreaterOrEqualThan);
while (it->Next(NTable::ENext::All) == NTable::EReady::Data) {
LastKey = it->GetValues().Columns[0].AsValue<ui32>();
- }
+ }
Y_VERIFY(LastKey + 1 == RowCount /* incomplete read */);
Y_VERIFY(Restarts == 1 /* exactly one precharge */);
- return true;
+ return true;
} catch (NTable::TIteratorNotReady&) {
Restarts++;
Cerr << "Full scan restart at id = " << LastKey << Endl;
- return false;
- }
- }
-
+ return false;
+ }
+ }
+
private:
const ui64 Rows = 0;
const ui32 Table = 1;
const ui32 AltFamily = 1;
-
+
ui32 Restarts = 0;
ui64 RowCount = 0;
ui64 LastKey = Max<ui64>();
bool SchemaReady = false;
-};
-
+};
+
void RunTest(IActor *test)
{
NFake::TRunner env;
-
+
env->SetLogPriority(NKikimrServices::TABLET_MAIN, NActors::NLog::PRI_CRIT);
env->SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NActors::NLog::PRI_INFO);
env->SetLogPriority(NKikimrServices::TABLET_OPS_HOST, NActors::NLog::PRI_INFO);
@@ -367,7 +367,7 @@ void RunTest(IActor *test)
env->SetLogPriority(NKikimrServices::OPS_BACKUP, NActors::NLog::PRI_INFO);
env->SetLogPriority(NKikimrServices::SAUSAGE_BIO, NActors::NLog::PRI_INFO);
env->SetLogPriority(NKikimrServices::TABLET_SAUSAGECACHE, NActors::NLog::PRI_INFO);
-
+
if (false) {
env->SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NActors::NLog::PRI_DEBUG);
env->SetLogPriority(NKikimrServices::TABLET_FLATBOOT, NActors::NLog::PRI_DEBUG);
@@ -376,42 +376,42 @@ void RunTest(IActor *test)
env.RunTest(test);
env.Finalize();
-}
-
+}
+
Y_UNIT_TEST_SUITE(TExecutorDb) {
Y_UNIT_TEST(RandomOps)
{
RunTest(new TFuzzyActor(5, MaxActionCount));
- }
-
+ }
+
Y_UNIT_TEST(FullScan)
{
RunTest(new TFullScan(MultiPageMaxActionCount));
- }
-
+ }
+
Y_UNIT_TEST(CoordinatorSimulation)
{
RunTest(new THeThing(MaxActionCount, 42));
- }
-
+ }
+
Y_UNIT_TEST(RandomCoordinatorSimulation)
{
RunTest(new THeThing(MaxActionCount, TInstant::Now().Seconds()));
- }
-
+ }
+
Y_UNIT_TEST(MultiPage)
{
NFake::TFuzzySet fuzzy(false /* no compression */);
-
+
TVector<NFake::TFuncTx::TCall> tx = {
[&fuzzy](ITestDb& testDb, TTransactionContext &txc){ return fuzzy.UpdateRowTx(testDb, txc, 2, 100, 10000000); },
[&fuzzy](ITestDb& testDb, TTransactionContext &txc){ return fuzzy.UpdateRowTx(testDb, txc, 2, 101, 10000000); },
[&fuzzy](ITestDb& testDb, TTransactionContext &txc){ return fuzzy.UpdateRowTx(testDb, txc, 2, 100, 10000000); },
[&fuzzy](ITestDb& testDb, TTransactionContext &txc){ return fuzzy.ReadTx(testDb, txc, 2); }
- };
-
+ };
+
RunTest(new TDbTestPlayerActor(tx));
- }
+ }
Y_UNIT_TEST(EncodedPage)
{
@@ -433,7 +433,7 @@ Y_UNIT_TEST_SUITE(TExecutorDb) {
RunTest(new TDbTestPlayerActor(tx));
}
-}
-
-}
-}
+}
+
+}
+}
diff --git a/ydb/core/tablet_flat/flat_executor_db_mon.cpp b/ydb/core/tablet_flat/flat_executor_db_mon.cpp
index 406be970df8..d5353747beb 100644
--- a/ydb/core/tablet_flat/flat_executor_db_mon.cpp
+++ b/ydb/core/tablet_flat/flat_executor_db_mon.cpp
@@ -4,7 +4,7 @@
#include <ydb/library/dynumber/dynumber.h>
#include <util/stream/hex.h>
-#include <util/string/escape.h>
+#include <util/string/escape.h>
#include <library/cpp/html/pcdata/pcdata.h>
namespace NKikimr {
@@ -133,84 +133,84 @@ public:
str << "</tr>";
str << "</thead>";
str << "<tbody>";
- ssize_t rowOffset = FromStringWithDefault<ssize_t>(cgi.Get("RowsOffset"), 0);
- rowOffset = Max<ssize_t>(rowOffset, 0);
- ssize_t rowLimit = FromStringWithDefault<ssize_t>(cgi.Get("MaxRows"), 1000);
- rowLimit = Max<ssize_t>(rowLimit, 1);
- ssize_t rowCount = 0;
+ ssize_t rowOffset = FromStringWithDefault<ssize_t>(cgi.Get("RowsOffset"), 0);
+ rowOffset = Max<ssize_t>(rowOffset, 0);
+ ssize_t rowLimit = FromStringWithDefault<ssize_t>(cgi.Get("MaxRows"), 1000);
+ rowLimit = Max<ssize_t>(rowLimit, 1);
+ ssize_t rowCount = 0;
while (result->Next(NTable::ENext::Data) == NTable::EReady::Data && rowCount < rowOffset + rowLimit) {
- ++rowCount;
- if (rowCount > rowOffset) {
- str << "<tr>";
+ ++rowCount;
+ if (rowCount > rowOffset) {
+ str << "<tr>";
TDbTupleRef tuple = result->GetValues();
- for (size_t i = 0; i < columns.size(); ++i) {
- const void *data = tuple.Columns[i].Data();
- ui32 size = tuple.Columns[i].Size();
- str << "<td>";
- if (data == nullptr) {
- str << "<i>&lt;null&gt;</i>";
- } else {
- switch(tuple.Types[i]) {
- case NScheme::NTypeIds::Int8:
- str << *(i8*)data;
- break;
- case NScheme::NTypeIds::Int16:
- str << *(i16*)data;
- break;
- case NScheme::NTypeIds::Uint16:
- str << *(ui16*)data;
- break;
- case NScheme::NTypeIds::Int32:
- str << *(i32*)data;
- break;
- case NScheme::NTypeIds::Uint32:
- str << *(ui32*)data;
- break;
- case NScheme::NTypeIds::Int64:
- str << *(i64*)data;
- break;
- case NScheme::NTypeIds::Uint64:
- str << *(ui64*)data;
- break;
- case NScheme::NTypeIds::Byte:
- str << (ui32)*(ui8*)data;
- break;
- case NScheme::NTypeIds::Bool:
- str << *(bool*)data;
- break;
- case NScheme::NTypeIds::Double:
- str << *(double*)data;
- break;
- case NScheme::NTypeIds::Float:
- str << *(float*)data;
- break;
- case NScheme::NTypeIds::Date:
- str << *(ui16*)data;
- break;
- case NScheme::NTypeIds::Datetime:
- str << *(ui32*)data;
- break;
- case NScheme::NTypeIds::Timestamp:
- str << *(ui64*)data;
- break;
- case NScheme::NTypeIds::Interval:
- str << *(i64*)data;
- break;
- case NScheme::NTypeIds::PairUi64Ui64:
- str << "(" << ((std::pair<ui64,ui64>*)data)->first << "," << ((std::pair<ui64,ui64>*)data)->second << ")";
- break;
+ for (size_t i = 0; i < columns.size(); ++i) {
+ const void *data = tuple.Columns[i].Data();
+ ui32 size = tuple.Columns[i].Size();
+ str << "<td>";
+ if (data == nullptr) {
+ str << "<i>&lt;null&gt;</i>";
+ } else {
+ switch(tuple.Types[i]) {
+ case NScheme::NTypeIds::Int8:
+ str << *(i8*)data;
+ break;
+ case NScheme::NTypeIds::Int16:
+ str << *(i16*)data;
+ break;
+ case NScheme::NTypeIds::Uint16:
+ str << *(ui16*)data;
+ break;
+ case NScheme::NTypeIds::Int32:
+ str << *(i32*)data;
+ break;
+ case NScheme::NTypeIds::Uint32:
+ str << *(ui32*)data;
+ break;
+ case NScheme::NTypeIds::Int64:
+ str << *(i64*)data;
+ break;
+ case NScheme::NTypeIds::Uint64:
+ str << *(ui64*)data;
+ break;
+ case NScheme::NTypeIds::Byte:
+ str << (ui32)*(ui8*)data;
+ break;
+ case NScheme::NTypeIds::Bool:
+ str << *(bool*)data;
+ break;
+ case NScheme::NTypeIds::Double:
+ str << *(double*)data;
+ break;
+ case NScheme::NTypeIds::Float:
+ str << *(float*)data;
+ break;
+ case NScheme::NTypeIds::Date:
+ str << *(ui16*)data;
+ break;
+ case NScheme::NTypeIds::Datetime:
+ str << *(ui32*)data;
+ break;
+ case NScheme::NTypeIds::Timestamp:
+ str << *(ui64*)data;
+ break;
+ case NScheme::NTypeIds::Interval:
+ str << *(i64*)data;
+ break;
+ case NScheme::NTypeIds::PairUi64Ui64:
+ str << "(" << ((std::pair<ui64,ui64>*)data)->first << "," << ((std::pair<ui64,ui64>*)data)->second << ")";
+ break;
case NScheme::NTypeIds::String:
case NScheme::NTypeIds::String4k:
case NScheme::NTypeIds::String2m:
- str << EncodeHtmlPcdata(EscapeC(TStringBuf(static_cast<const char*>(data), Min(size, (ui32)1024))));
- break;
- case NScheme::NTypeIds::ActorId:
+ str << EncodeHtmlPcdata(EscapeC(TStringBuf(static_cast<const char*>(data), Min(size, (ui32)1024))));
+ break;
+ case NScheme::NTypeIds::ActorId:
str << *(TActorId*)data;
- break;
+ break;
case NScheme::NTypeIds::Utf8:
case NScheme::NTypeIds::Json:
- str << EncodeHtmlPcdata(TStringBuf((const char*)data, size));
- break;
+ str << EncodeHtmlPcdata(TStringBuf((const char*)data, size));
+ break;
case NScheme::NTypeIds::JsonDocument: {
const auto json = NBinaryJson::SerializeToJson(TStringBuf((const char*)data, size));
str << "(JsonDocument) " << EncodeHtmlPcdata(json);
@@ -221,24 +221,24 @@ public:
str << "(DyNumber) " << number;
break;
}
- default:
- str << "<i>unknown type " << tuple.Types[i] << "</i>";
- break;
- }
+ default:
+ str << "<i>unknown type " << tuple.Types[i] << "</i>";
+ break;
+ }
}
- str << "</td>";
+ str << "</td>";
}
- str << "</tr>";
+ str << "</tr>";
}
}
str << "</tbody>";
str << "</table>";
-
+
if (result->Last() == NTable::EReady::Page)
return false;
auto fnPrintLink = [this, &str, tableId, &cgi] (ssize_t offset, ssize_t limit, TString caption) {
- str << "<a href='db?TabletID=" << Self->TabletId()
+ str << "<a href='db?TabletID=" << Self->TabletId()
<< "&TableID=" << tableId;
if (cgi.Has("Key")) {
str << "&Key=" << cgi.Get("Key");
@@ -247,25 +247,25 @@ public:
str << "&Lookup=" << cgi.Get("Lookup");
}
str << "&RowsOffset=" << offset
- << "&MaxRows=" << limit
- << "'>" << caption << "</a>";
- };
-
- // Prev rows?
- if (rowOffset > 0) {
- ssize_t off = Max<ssize_t>(0, rowOffset - rowLimit);
- ssize_t lim = Min<ssize_t>(rowOffset, rowLimit);
- fnPrintLink(off, lim, Sprintf("Prev %" PRISZT " rows", lim));
- str << "<br>";
- }
-
- // More rows?
+ << "&MaxRows=" << limit
+ << "'>" << caption << "</a>";
+ };
+
+ // Prev rows?
+ if (rowOffset > 0) {
+ ssize_t off = Max<ssize_t>(0, rowOffset - rowLimit);
+ ssize_t lim = Min<ssize_t>(rowOffset, rowLimit);
+ fnPrintLink(off, lim, Sprintf("Prev %" PRISZT " rows", lim));
+ str << "<br>";
+ }
+
+ // More rows?
if (result->Next(NTable::ENext::Data) != NTable::EReady::Gone) {
- fnPrintLink(rowCount, rowLimit, Sprintf("Next %" PRISZT " rows", rowLimit));
- str << "<br>";
- }
-
- fnPrintLink(0, 1000000000, "All");
+ fnPrintLink(rowCount, rowLimit, Sprintf("Next %" PRISZT " rows", rowLimit));
+ str << "<br>";
+ }
+
+ fnPrintLink(0, 1000000000, "All");
}
}
}
diff --git a/ydb/core/tablet_flat/flat_executor_ut.cpp b/ydb/core/tablet_flat/flat_executor_ut.cpp
index fb45c68da84..c9cf5289711 100644
--- a/ydb/core/tablet_flat/flat_executor_ut.cpp
+++ b/ydb/core/tablet_flat/flat_executor_ut.cpp
@@ -691,7 +691,7 @@ Y_UNIT_TEST_SUITE(TFlatTableBackgroundCompactions) {
TString Type;
ui32 MaxPriority;
};
-
+
struct TIsResourceAllocation {
TIsResourceAllocation()
{}
diff --git a/ydb/core/tablet_flat/flat_iterator.h b/ydb/core/tablet_flat/flat_iterator.h
index a155c191503..60195c17b44 100644
--- a/ydb/core/tablet_flat/flat_iterator.h
+++ b/ydb/core/tablet_flat/flat_iterator.h
@@ -1,4 +1,4 @@
-#pragma once
+#pragma once
#include "flat_iterator_ops.h"
#include "flat_mem_iter.h"
#include "flat_part_iter_multi.h"
@@ -6,16 +6,16 @@
#include "flat_row_state.h"
#include "flat_range_cache.h"
#include "util_fmt_cell.h"
-
+
#include <library/cpp/containers/stack_vector/stack_vec.h>
-
-#include <util/draft/holder_vector.h>
-#include <util/generic/vector.h>
-#include <util/generic/queue.h>
-
-namespace NKikimr {
+
+#include <util/draft/holder_vector.h>
+#include <util/generic/vector.h>
+#include <util/generic/queue.h>
+
+namespace NKikimr {
namespace NTable {
-
+
enum class ENext {
All,
Data,
@@ -234,17 +234,17 @@ class TTableItBase : TNonCopyable {
bool FutureEntryValid = false;
};
-public:
+public:
TTableItBase(
const TRowScheme* scheme, TTagsRef tags, ui64 lim = Max<ui64>(),
TRowVersion snapshot = TRowVersion::Max(),
const NTable::TTransactionMap<TRowVersion>& committedTransactions = {});
-
+
~TTableItBase();
void Push(TAutoPtr<TMemIt>);
void Push(TAutoPtr<TRunIt>);
-
+
void StopBefore(TArrayRef<const TCell> key);
void StopAfter(TArrayRef<const TCell> key);
@@ -282,7 +282,7 @@ public:
isHead = false;
}
} else if ((Ready = Apply()) != EReady::Data) {
-
+
} else if (mode == ENext::All || mode == ENext::Uncommitted || State.GetRowState() != ERowOp::Erase) {
break;
} else {
@@ -312,12 +312,12 @@ public:
TDbTupleRef GetKey() const noexcept;
TDbTupleRef GetValues() const noexcept;
-
+
const TRowState& Row() const noexcept
{
return State;
- }
-
+ }
+
bool IsUncommitted() const noexcept;
ui64 GetUncommittedTxId() const noexcept;
EReady SkipUncommitted() noexcept;
@@ -330,7 +330,7 @@ public:
TIntrusivePtr<TKeyRangeCache> ErasedKeysCache;
TIteratorStats Stats;
-private:
+private:
ui64 Limit = 0;
TRowState State;
@@ -348,41 +348,41 @@ private:
THolderVector<TRunIt> RunIters;
TOwnedCellVec StopKey;
bool StopKeyInclusive = true;
-
- struct TIteratorId {
+
+ struct TIteratorId {
EType Type;
- ui16 Index;
+ ui16 Index;
TEpoch Epoch;
- };
-
+ };
+
struct TElement {
TArrayRef<const TCell> Key;
- TIteratorId IteratorId;
- };
-
+ TIteratorId IteratorId;
+ };
+
struct TComparator {
TComparator(TArrayRef<const NScheme::TTypeIdOrder> types)
: Types(types)
- {}
-
+ {}
+
int CompareKeys(TArrayRef<const TCell> a, TArrayRef<const TCell> b) const noexcept
{
return TIteratorOps::CompareKeys(Types, a, b);
- }
-
+ }
+
bool operator() (const TElement& a, const TElement& b) const noexcept
{
if (int cmp = CompareKeys(a.Key, b.Key))
return cmp > 0;
-
+
Y_VERIFY_DEBUG(a.IteratorId.Epoch != b.IteratorId.Epoch,
"Found equal key iterators with the same epoch");
return a.IteratorId.Epoch < b.IteratorId.Epoch;
- }
-
+ }
+
const TArrayRef<const NScheme::TTypeIdOrder> Types;
- };
-
+ };
+
/**
* Adjust epoch into a modified range
*
@@ -413,7 +413,7 @@ private:
using TForwardIter = typename TIterators::iterator;
using TReverseIter = typename TIterators::reverse_iterator;
- TIterators Iterators;
+ TIterators Iterators;
TForwardIter Active;
TForwardIter Inactive;
ui64 DeltaTxId = 0;
@@ -431,8 +431,8 @@ private:
void AddNotReadyIterator(TIteratorId itId);
bool SeekInternal(TArrayRef<const TCell> key, ESeek seek) noexcept;
-};
-
+};
+
class TTableIt;
class TTableReverseIt;
@@ -459,7 +459,7 @@ inline TTableItBase<TIteratorOps>::TTableItBase(
const TRowScheme* scheme, TTagsRef tags, ui64 limit,
TRowVersion snapshot,
const NTable::TTransactionMap<TRowVersion>& committedTransactions)
- : Scheme(scheme)
+ : Scheme(scheme)
, Remap(*Scheme, tags)
, Limit(limit)
, State(Remap.Size())
@@ -468,8 +468,8 @@ inline TTableItBase<TIteratorOps>::TTableItBase(
, Comparator(Scheme->Keys->Types)
, Active(Iterators.end())
, Inactive(Iterators.end())
-{}
-
+{}
+
template<class TIteratorOps>
inline TTableItBase<TIteratorOps>::~TTableItBase()
{
@@ -501,8 +501,8 @@ inline void TTableItBase<TIteratorOps>::Push(TAutoPtr<TMemIt> it)
TDbTupleRef key = MemIters.back()->GetKey();
AddReadyIterator(key.Cells(), itId);
}
-}
-
+}
+
template<class TIteratorOps>
inline void TTableItBase<TIteratorOps>::Push(TAutoPtr<TRunIt> it)
{
@@ -559,7 +559,7 @@ inline EReady TTableItBase<TIteratorOps>::Start() noexcept
if (Active != Iterators.end()) {
return EReady::Page;
}
-
+
if (!Iterators ||
Iterators.front().IteratorId.Type == EType::Stop ||
!(Limit && Limit--))
@@ -571,8 +571,8 @@ inline EReady TTableItBase<TIteratorOps>::Start() noexcept
PopHeap(Iterators.begin(), Active--, Comparator);
while (Active != Iterators.begin()) {
if (Comparator.CompareKeys(key, Iterators.front().Key) != 0)
- break;
-
+ break;
+
if (Iterators.front().IteratorId.Type == EType::Stop) {
// This is the last row we may return
Limit = 0;
@@ -580,13 +580,13 @@ inline EReady TTableItBase<TIteratorOps>::Start() noexcept
}
PopHeap(Iterators.begin(), Active--, Comparator);
- }
+ }
Stage = EStage::Snap;
Inactive = Iterators.end();
return EReady::Data;
-}
-
+}
+
template<class TIteratorOps>
inline EReady TTableItBase<TIteratorOps>::Turn() noexcept
{
@@ -599,7 +599,7 @@ inline EReady TTableItBase<TIteratorOps>::Turn() noexcept
size_t left = Iterators.end() - Active;
while (left-- > 0) {
TIteratorId ai = Active->IteratorId;
- ui32 idx = ai.Index;
+ ui32 idx = ai.Index;
switch (ai.Type) {
case EType::Mem: {
auto& mi = *MemIters[idx];
@@ -613,7 +613,7 @@ inline EReady TTableItBase<TIteratorOps>::Turn() noexcept
Inactive = Active;
}
break;
- }
+ }
case EType::Run: {
auto& it = *RunIters[idx];
switch (TIteratorOps::MoveNext(it)) {
@@ -644,16 +644,16 @@ inline EReady TTableItBase<TIteratorOps>::Turn() noexcept
default: {
Y_FAIL("Unexpected iterator type");
}
- }
- }
-
+ }
+ }
+
if (!ready) {
return EReady::Page;
}
return Start();
-}
-
+}
+
template<class TIteratorOps>
inline bool TTableItBase<TIteratorOps>::IsUncommitted() const noexcept
{
@@ -857,7 +857,7 @@ template<class TIteratorOps>
inline EReady TTableItBase<TIteratorOps>::Apply() noexcept
{
State.Reset(Remap.Nulls());
-
+
const TDbTupleRef key = GetKey();
for (auto &pin: Remap.KeyPins())
@@ -912,20 +912,20 @@ inline EReady TTableItBase<TIteratorOps>::Apply() noexcept
}
default:
Y_FAIL("Unexpected iterator type");
- }
-
+ }
+
if (State.IsFinalized() || !committed)
- break;
- }
-
+ break;
+ }
+
if (State.Need()) {
return EReady::Page;
}
Stage = EStage::Done;
return EReady::Data;
-}
-
+}
+
template<class TIteratorOps>
inline TDbTupleRef TTableItBase<TIteratorOps>::GetKey() const noexcept
{
@@ -937,17 +937,17 @@ inline TDbTupleRef TTableItBase<TIteratorOps>::GetKey() const noexcept
return RunIters[ai.Index]->GetKey();
default:
Y_FAIL("Unexpected iterator type");
- }
-}
-
+ }
+}
+
template<class TIteratorOps>
inline TDbTupleRef TTableItBase<TIteratorOps>::GetValues() const noexcept
{
if (State.GetRowState() == ERowOp::Erase)
- return TDbTupleRef();
+ return TDbTupleRef();
return TDbTupleRef(Remap.Types().begin(), (*State).begin(), (*State).size());
-}
-
+}
+
template<class TIteratorOps>
inline bool TTableItBase<TIteratorOps>::SeekInternal(TArrayRef<const TCell> key, ESeek seek) noexcept
{
@@ -1035,7 +1035,7 @@ inline bool TTableItBase<TIteratorOps>::SeekInternal(TArrayRef<const TCell> key,
}
return ready;
-}
+}
}
}
diff --git a/ydb/core/tablet_flat/flat_mem_warm.cpp b/ydb/core/tablet_flat/flat_mem_warm.cpp
index 7f9db0be961..df1cb798e23 100644
--- a/ydb/core/tablet_flat/flat_mem_warm.cpp
+++ b/ydb/core/tablet_flat/flat_mem_warm.cpp
@@ -1,14 +1,14 @@
#include "flat_mem_warm.h"
#include "flat_mem_snapshot.h"
#include "flat_page_other.h"
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NTable {
-
+
TString PrintRow(const TDbTupleRef& row, const NScheme::TTypeRegistry& typeRegistry) {
- return DbgPrintTuple(row, typeRegistry);
-}
-
+ return DbgPrintTuple(row, typeRegistry);
+}
+
TIntrusiveConstPtr<NPage::TExtBlobs> TMemTable::MakeBlobsPage(TArrayRef<const TMemTableSnapshot> list)
{
NPage::TExtBlobsWriter writer;
@@ -42,13 +42,13 @@ void TMemTable::DebugDump(IOutputStream& str, const NScheme::TTypeRegistry& type
auto types = Scheme->Keys->BasicTypes();
for (it.SeekFirst(); it.IsValid(); it.Next()) {
TDbTupleRef key(types.data(), it.GetKey(), types.size());
-
+
TString keyStr = PrintRow(key, typeRegistry) + " -> ";
const auto *row = it.GetValue();
while (row) {
- str << keyStr
+ str << keyStr
<< "ERowOp " << int(row->Rop)
- << " {";
+ << " {";
for (ui32 i = 0; i < row->Items; ++i) {
TTag colId = row->Ops()[i].Tag;
if (Scheme->ColInfo(colId)) {
@@ -56,16 +56,16 @@ void TMemTable::DebugDump(IOutputStream& str, const NScheme::TTypeRegistry& type
auto &op = row->Ops()[i];
str << EOpToStr(ECellOp(op.Op)) << " " << op.Tag << " " << DbgPrintCell(op.Value, typeId, typeRegistry);
- } else {
- str << "unknown column " << colId;
- }
+ } else {
+ str << "unknown column " << colId;
+ }
if (i+1 < row->Items)
- str << ", ";
- }
- str << "}" << Endl;
+ str << ", ";
+ }
+ str << "}" << Endl;
row = row->Next;
- }
- }
-}
-
-}}
+ }
+ }
+}
+
+}}
diff --git a/ydb/core/tablet_flat/flat_ops_compact.h b/ydb/core/tablet_flat/flat_ops_compact.h
index fe98d42f56e..2ac81c4113a 100644
--- a/ydb/core/tablet_flat/flat_ops_compact.h
+++ b/ydb/core/tablet_flat/flat_ops_compact.h
@@ -503,7 +503,7 @@ namespace NTabletFlatExecutor {
auto flag = NKikimrBlobStorage::AsyncBlob;
auto *ev = new TEvPut(id.Logo, std::exchange(glob.Data, TString{ }), TInstant::Max(), flag,
- TEvBlobStorage::TEvPut::ETactic::TacticMaxThroughput);
+ TEvBlobStorage::TEvPut::ETactic::TacticMaxThroughput);
auto ctx = TActivationContext::ActorContextFor(SelfId());
SendToBSProxy(ctx, id.Group, ev);
diff --git a/ydb/core/tablet_flat/flat_part_store.h b/ydb/core/tablet_flat/flat_part_store.h
index ba94521b275..ee0bc2324bc 100644
--- a/ydb/core/tablet_flat/flat_part_store.h
+++ b/ydb/core/tablet_flat/flat_part_store.h
@@ -71,8 +71,8 @@ public:
ui64 DataSize() const override
{
return BackingSize() - IndexesRawSize;
- }
-
+ }
+
ui64 GetPageSize(NPage::TPageId id, NPage::TGroupId groupId) const override
{
Y_VERIFY(groupId.Index < PageCollections.size());
diff --git a/ydb/core/tablet_flat/flat_row_eggs.h b/ydb/core/tablet_flat/flat_row_eggs.h
index a58cbe93170..aaf9e7f35c1 100644
--- a/ydb/core/tablet_flat/flat_row_eggs.h
+++ b/ydb/core/tablet_flat/flat_row_eggs.h
@@ -53,10 +53,10 @@ namespace NTable {
Outer = 3, /* ~ is packed out of rows page */
};
- enum EHint : ui64 {
- NoByKey = 0x1,
- };
-
+ enum EHint : ui64 {
+ NoByKey = 0x1,
+ };
+
struct TLargeObj {
TLargeObj() = default;
diff --git a/ydb/core/tablet_flat/flat_sausagecache.cpp b/ydb/core/tablet_flat/flat_sausagecache.cpp
index 1704d16bab1..4d396e8f758 100644
--- a/ydb/core/tablet_flat/flat_sausagecache.cpp
+++ b/ydb/core/tablet_flat/flat_sausagecache.cpp
@@ -80,7 +80,7 @@ TPrivatePageCache::TPage::TWaitQueuePtr TPrivatePageCache::ForgetPageCollection(
// todo: amortize destruction cost (how?)
auto it = PageCollections.find(id);
Y_VERIFY(it != PageCollections.end(), "trying to forget unknown page collection. logic flaw?");
- TIntrusivePtr<TInfo> info = it->second;
+ TIntrusivePtr<TInfo> info = it->second;
TPage::TWaitQueuePtr ret;
for (const auto& kv : info->PageMap) {
diff --git a/ydb/core/tablet_flat/flat_sausagecache.h b/ydb/core/tablet_flat/flat_sausagecache.h
index 232392af0a7..ac040eb0171 100644
--- a/ydb/core/tablet_flat/flat_sausagecache.h
+++ b/ydb/core/tablet_flat/flat_sausagecache.h
@@ -129,7 +129,7 @@ public:
};
};
- struct TInfo : public TThrRefBase {
+ struct TInfo : public TThrRefBase {
ui32 Total() const noexcept {
return PageMap.size();
}
diff --git a/ydb/core/tablet_flat/flat_scan_actor.h b/ydb/core/tablet_flat/flat_scan_actor.h
index 076d3109cd0..5cbf2797a03 100644
--- a/ydb/core/tablet_flat/flat_scan_actor.h
+++ b/ydb/core/tablet_flat/flat_scan_actor.h
@@ -58,7 +58,7 @@ namespace NOps {
~TDriver()
{
- /* Correct actors shutdown hasn't been implemented in
+ /* Correct actors shutdown hasn't been implemented in
kikimr, thus actors may be destructed in incompleted state
and dtor cannot be used for completeness checkups.
@@ -246,17 +246,17 @@ namespace NOps {
conf.AheadLo = Args.AheadLo;
conf.AheadHi = Args.AheadHi;
-
+
if (Conf.ReadAheadLo != Max<ui64>() && Conf.ReadAheadLo <= conf.AheadLo) {
- conf.AheadLo = Conf.ReadAheadLo;
- }
-
- if (Conf.ReadAheadHi != Max<ui64>() && Conf.ReadAheadHi <= conf.AheadHi) {
- conf.AheadHi = Conf.ReadAheadHi;
- }
-
- conf.AheadLo = Min(conf.AheadLo, conf.AheadHi);
-
+ conf.AheadLo = Conf.ReadAheadLo;
+ }
+
+ if (Conf.ReadAheadHi != Max<ui64>() && Conf.ReadAheadHi <= conf.AheadHi) {
+ conf.AheadHi = Conf.ReadAheadHi;
+ }
+
+ conf.AheadLo = Min(conf.AheadLo, conf.AheadHi);
+
conf.Trace = Args.Trace;
conf.Edge = Conf.LargeEdge;
conf.Tablet = Args.Tablet;
diff --git a/ydb/core/tablet_flat/flat_scan_iface.h b/ydb/core/tablet_flat/flat_scan_iface.h
index 9f00d9ca607..6b0fa8e42d7 100644
--- a/ydb/core/tablet_flat/flat_scan_iface.h
+++ b/ydb/core/tablet_flat/flat_scan_iface.h
@@ -101,10 +101,10 @@ namespace NTable {
*/
ui32 LargeEdge = Max<ui32>();
-
- // Scan can override default read ahead settings
- ui64 ReadAheadLo = Max<ui64>();
- ui64 ReadAheadHi = Max<ui64>();
+
+ // Scan can override default read ahead settings
+ ui64 ReadAheadLo = Max<ui64>();
+ ui64 ReadAheadHi = Max<ui64>();
};
struct THello {
diff --git a/ydb/core/tablet_flat/flat_stat_part.h b/ydb/core/tablet_flat/flat_stat_part.h
index f6259f8c170..230f94f04c5 100644
--- a/ydb/core/tablet_flat/flat_stat_part.h
+++ b/ydb/core/tablet_flat/flat_stat_part.h
@@ -1,16 +1,16 @@
-#pragma once
-
+#pragma once
+
#include "flat_part_iface.h"
-#include "flat_part_laid.h"
+#include "flat_part_laid.h"
#include "flat_page_frames.h"
#include "util_basics.h"
-
+
#include <library/cpp/containers/stack_vector/stack_vec.h>
-#include <util/draft/holder_vector.h>
-
-namespace NKikimr {
-namespace NTable {
-
+#include <util/draft/holder_vector.h>
+
+namespace NKikimr {
+namespace NTable {
+
/**
* Helper for calculating small blobs size between a pair of rows
*/
@@ -92,36 +92,36 @@ private:
TSmallVec<TPartGroupSizeHelper> GroupHelpers;
};
-// Iterates over part index and calculates total row count and data size
-// NOTE: we don't know row count for the last page so we also ignore its size
-// This shouldn't be a problem for big parts with many pages
-class TPartIndexIterator {
-public:
+// Iterates over part index and calculates total row count and data size
+// NOTE: we don't know row count for the last page so we also ignore its size
+// This shouldn't be a problem for big parts with many pages
+class TPartIndexIterator {
+public:
TPartIndexIterator(TIntrusiveConstPtr<TPart> part, TIntrusiveConstPtr<TKeyNulls> keys)
: Part(std::move(part))
, KeyColumns(std::move(keys))
- {
- Pos = Part->Index->Begin();
- End = Part->Index->End();
+ {
+ Pos = Part->Index->Begin();
+ End = Part->Index->End();
AltGroups.reserve(Part->Scheme->Groups.size() - 1);
for (ui32 group : xrange(size_t(1), Part->Scheme->Groups.size())) {
AltGroups.emplace_back(Part.Get(), NPage::TGroupId(group));
}
- FillKey();
- }
-
- bool IsValid() const {
- return Pos != End;
- }
-
- void Next() {
- Y_VERIFY(IsValid());
-
+ FillKey();
+ }
+
+ bool IsValid() const {
+ return Pos != End;
+ }
+
+ void Next() {
+ Y_VERIFY(IsValid());
+
auto curPageId = Pos->GetPageId();
LastSize = CurrentSize;
LastRowId = Pos->GetRowId();
- CurrentSize += GetPageSize(curPageId);
- ++Pos;
+ CurrentSize += GetPageSize(curPageId);
+ ++Pos;
TRowId nextRowId = Pos ? Pos->GetRowId() : Max<TRowId>();
for (auto& g : AltGroups) {
while (g.Pos && g.Pos->GetRowId() < nextRowId) {
@@ -130,19 +130,19 @@ public:
++g.Pos;
}
}
- FillKey();
- }
-
- TDbTupleRef GetCurrentKey() const {
+ FillKey();
+ }
+
+ TDbTupleRef GetCurrentKey() const {
Y_VERIFY(KeyColumns->BasicTypes().size() == CurrentKey.size());
return TDbTupleRef(KeyColumns->BasicTypes().data(), CurrentKey.data(), CurrentKey.size());
- }
-
+ }
+
ui64 GetLastRowId() const {
return LastRowId;
}
- ui64 GetCurrentRowId() const {
+ ui64 GetCurrentRowId() const {
if (IsValid()) {
return Pos->GetRowId();
}
@@ -151,40 +151,40 @@ public:
return endRowId;
}
return LastRowId;
- }
-
+ }
+
ui64 GetLastDataSize() const {
return LastSize;
}
- ui64 GetCurrentDataSize() const {
- return CurrentSize;
- }
-
-private:
+ ui64 GetCurrentDataSize() const {
+ return CurrentSize;
+ }
+
+private:
ui64 GetPageSize(TPageId pageId, NPage::TGroupId groupId = { }) const {
return Part->GetPageSize(pageId, groupId);
- }
-
- void FillKey() {
- CurrentKey.clear();
-
- if (!IsValid())
- return;
-
- ui32 keyIdx = 0;
- // Add columns that are present in the part
+ }
+
+ void FillKey() {
+ CurrentKey.clear();
+
+ if (!IsValid())
+ return;
+
+ ui32 keyIdx = 0;
+ // Add columns that are present in the part
for (;keyIdx < Part->Scheme->Groups[0].KeyTypes.size(); ++keyIdx) {
CurrentKey.push_back(Pos->Cell(Part->Scheme->Groups[0].ColsKeyIdx[keyIdx]));
- }
-
- // Extend with default values if needed
- for (;keyIdx < KeyColumns->Defs.size(); ++keyIdx) {
- CurrentKey.push_back(KeyColumns->Defs[keyIdx]);
- }
- }
-
-private:
+ }
+
+ // Extend with default values if needed
+ for (;keyIdx < KeyColumns->Defs.size(); ++keyIdx) {
+ CurrentKey.push_back(KeyColumns->Defs[keyIdx]);
+ }
+ }
+
+private:
struct TGroupState {
NPage::TIndex::TIter Pos;
const NPage::TGroupId GroupId;
@@ -200,38 +200,38 @@ private:
TIntrusiveConstPtr<TKeyNulls> KeyColumns;
NPage::TIndex::TIter Pos;
NPage::TIndex::TIter End;
- TSmallVec<TCell> CurrentKey;
- ui64 CurrentSize = 0;
- ui64 LastRowId = 0;
+ TSmallVec<TCell> CurrentKey;
+ ui64 CurrentSize = 0;
+ ui64 LastRowId = 0;
ui64 LastSize = 0;
TSmallVec<TGroupState> AltGroups;
-};
-
-// This iterator skipps pages that are screened. Currently the logic is simple:
-// if page start key is screened then we assume that the whole previous page is screened
-// if page start key is not screened then the whole previous page is added to stats
-class TScreenedPartIndexIterator {
-public:
+};
+
+// This iterator skipps pages that are screened. Currently the logic is simple:
+// if page start key is screened then we assume that the whole previous page is screened
+// if page start key is not screened then the whole previous page is added to stats
+class TScreenedPartIndexIterator {
+public:
TScreenedPartIndexIterator(TPartView partView, TIntrusiveConstPtr<TKeyNulls> keyColumns,
TIntrusiveConstPtr<NPage::TFrames> small)
: PartIter(partView.Part, keyColumns)
, Screen(std::move(partView.Screen))
, Small(std::move(small))
- , CurrentHole(TScreen::Iter(Screen, CurrentHoleIdx, 0, 1))
- {
- }
-
- bool IsValid() const {
- return PartIter.IsValid();
- }
-
- void Next() {
- Y_VERIFY(IsValid());
-
+ , CurrentHole(TScreen::Iter(Screen, CurrentHoleIdx, 0, 1))
+ {
+ }
+
+ bool IsValid() const {
+ return PartIter.IsValid();
+ }
+
+ void Next() {
+ Y_VERIFY(IsValid());
+
PrevRowCount = CurrentRowCount;
PrevSize = CurrentSize;
PartIter.Next();
-
+
ui64 rowCount = IncludedRows(PartIter.GetLastRowId(), PartIter.GetCurrentRowId());
if (rowCount > 0) {
// We try to count rows precisely, but data is only counted per-page
@@ -241,34 +241,34 @@ public:
CurrentSize += CalcSmallBytes();
}
}
- }
-
- TDbTupleRef GetCurrentKey() const {
- return PartIter.GetCurrentKey();
- }
-
- ui64 GetRowCountDelta() const {
+ }
+
+ TDbTupleRef GetCurrentKey() const {
+ return PartIter.GetCurrentKey();
+ }
+
+ ui64 GetRowCountDelta() const {
return CurrentRowCount - PrevRowCount;
- }
-
- ui64 GetDataSizeDelta() const {
+ }
+
+ ui64 GetDataSizeDelta() const {
return CurrentSize - PrevSize;
- }
-
-private:
+ }
+
+private:
ui64 IncludedRows(TRowId beginRowId, TRowId endRowId) noexcept {
if (!Screen) {
// Include all rows
return endRowId - beginRowId;
}
-
+
TRowId rowId = beginRowId;
ui64 rowCount = 0;
while (rowId < endRowId) {
// Skip screen holes before the current rowId
while (CurrentHole.End <= rowId) {
CurrentHole = TScreen::Next(Screen, CurrentHoleIdx, 1);
- }
+ }
TRowId next;
if (rowId < CurrentHole.Begin) {
// Skip rows before the next begin
@@ -277,13 +277,13 @@ private:
// Include rows before the next end
next = Min(CurrentHole.End, endRowId);
rowCount += next - rowId;
- }
+ }
rowId = next;
}
-
+
return rowCount;
- }
-
+ }
+
ui64 CalcSmallBytes() noexcept {
ui64 bytes = 0;
@@ -305,17 +305,17 @@ private:
return bytes;
}
-private:
- TPartIndexIterator PartIter;
+private:
+ TPartIndexIterator PartIter;
TIntrusiveConstPtr<TScreen> Screen;
TIntrusiveConstPtr<NPage::TFrames> Small; /* Inverted index for small blobs */
- size_t CurrentHoleIdx = 0;
- TScreen::THole CurrentHole;
+ size_t CurrentHoleIdx = 0;
+ TScreen::THole CurrentHole;
ui64 CurrentRowCount = 0;
- ui64 PrevRowCount = 0;
+ ui64 PrevRowCount = 0;
ui64 CurrentSize = 0;
- ui64 PrevSize = 0;
+ ui64 PrevSize = 0;
ui32 PrevSmallPage = 0;
-};
-
-}}
+};
+
+}}
diff --git a/ydb/core/tablet_flat/flat_stat_table.cpp b/ydb/core/tablet_flat/flat_stat_table.cpp
index 92a54c8afa8..dfbaab9da75 100644
--- a/ydb/core/tablet_flat/flat_stat_table.cpp
+++ b/ydb/core/tablet_flat/flat_stat_table.cpp
@@ -1,66 +1,66 @@
-#include "flat_part_laid.h"
-#include "flat_stat_table.h"
-#include "flat_stat_part.h"
-#include "flat_table_subset.h"
-
-namespace NKikimr {
-namespace NTable {
-
-void BuildStats(const TSubset& subset, TStats& stats, ui64 rowCountResolution, ui64 dataSizeResolution, const IPages* env) {
+#include "flat_part_laid.h"
+#include "flat_stat_table.h"
+#include "flat_stat_part.h"
+#include "flat_table_subset.h"
+
+namespace NKikimr {
+namespace NTable {
+
+void BuildStats(const TSubset& subset, TStats& stats, ui64 rowCountResolution, ui64 dataSizeResolution, const IPages* env) {
Y_UNUSED(env);
- stats.Clear();
-
+ stats.Clear();
+
TStatsIterator stIter(subset.Scheme->Keys);
-
- // Make index iterators for all parts
- for (auto& pi : subset.Flatten) {
+
+ // Make index iterators for all parts
+ for (auto& pi : subset.Flatten) {
TAutoPtr<TScreenedPartIndexIterator> iter = new TScreenedPartIndexIterator(pi, subset.Scheme->Keys, pi->Small);
- if (iter->IsValid()) {
- stIter.Add(iter);
- }
- }
-
- ui64 prevRows = 0;
- ui64 prevSize = 0;
- for (; stIter.IsValid(); stIter.Next()) {
- stats.RowCount = stIter.GetCurrentRowCount();
- stats.DataSize = stIter.GetCurrentDataSize();
-
- const bool nextRowsBucket = (stats.RowCount >= prevRows + rowCountResolution);
- const bool nextSizeBucket = (stats.DataSize >= prevSize + dataSizeResolution);
-
- if (!nextRowsBucket && !nextSizeBucket)
- continue;
-
- TDbTupleRef currentKey = stIter.GetCurrentKey();
+ if (iter->IsValid()) {
+ stIter.Add(iter);
+ }
+ }
+
+ ui64 prevRows = 0;
+ ui64 prevSize = 0;
+ for (; stIter.IsValid(); stIter.Next()) {
+ stats.RowCount = stIter.GetCurrentRowCount();
+ stats.DataSize = stIter.GetCurrentDataSize();
+
+ const bool nextRowsBucket = (stats.RowCount >= prevRows + rowCountResolution);
+ const bool nextSizeBucket = (stats.DataSize >= prevSize + dataSizeResolution);
+
+ if (!nextRowsBucket && !nextSizeBucket)
+ continue;
+
+ TDbTupleRef currentKey = stIter.GetCurrentKey();
TString serializedKey = TSerializedCellVec::Serialize(TConstArrayRef<TCell>(currentKey.Columns, currentKey.ColumnCount));
-
- if (nextRowsBucket) {
- stats.RowCountHistogram.push_back({serializedKey, stats.RowCount});
- prevRows = stats.RowCount;
- }
-
- if (nextSizeBucket) {
- stats.DataSizeHistogram.push_back({serializedKey, stats.DataSize});
- prevSize = stats.DataSize;
- }
- }
+
+ if (nextRowsBucket) {
+ stats.RowCountHistogram.push_back({serializedKey, stats.RowCount});
+ prevRows = stats.RowCount;
+ }
+
+ if (nextSizeBucket) {
+ stats.DataSizeHistogram.push_back({serializedKey, stats.DataSize});
+ prevSize = stats.DataSize;
+ }
+ }
stats.RowCount = stIter.GetCurrentRowCount();
stats.DataSize = stIter.GetCurrentDataSize();
-}
-
-void GetPartOwners(const TSubset& subset, THashSet<ui64>& partOwners) {
- for (auto& pi : subset.Flatten) {
- partOwners.insert(pi->Label.TabletID());
- }
+}
+
+void GetPartOwners(const TSubset& subset, THashSet<ui64>& partOwners) {
+ for (auto& pi : subset.Flatten) {
+ partOwners.insert(pi->Label.TabletID());
+ }
for (auto& pi : subset.ColdParts) {
partOwners.insert(pi->Label.TabletID());
}
for (auto& pi : subset.TxStatus) {
partOwners.insert(pi->Label.TabletID());
}
-}
-
-}}
+}
+
+}}
diff --git a/ydb/core/tablet_flat/flat_stat_table.h b/ydb/core/tablet_flat/flat_stat_table.h
index 613395ad753..8a892b8a67c 100644
--- a/ydb/core/tablet_flat/flat_stat_table.h
+++ b/ydb/core/tablet_flat/flat_stat_table.h
@@ -1,55 +1,55 @@
-#pragma once
-
-#include "flat_part_laid.h"
-#include "flat_stat_part.h"
-#include "flat_table_subset.h"
-
-#include <util/generic/queue.h>
-#include <util/generic/hash_set.h>
-
-namespace NKikimr {
-namespace NTable {
-
-// Iterates over all parts and maintains total row count and data size
-class TStatsIterator {
-public:
+#pragma once
+
+#include "flat_part_laid.h"
+#include "flat_stat_part.h"
+#include "flat_table_subset.h"
+
+#include <util/generic/queue.h>
+#include <util/generic/hash_set.h>
+
+namespace NKikimr {
+namespace NTable {
+
+// Iterates over all parts and maintains total row count and data size
+class TStatsIterator {
+public:
explicit TStatsIterator(TIntrusiveConstPtr<TKeyNulls> keyColumns)
- : KeyColumns(keyColumns)
+ : KeyColumns(keyColumns)
, Heap(TIterKeyGreater{ this })
- {}
-
+ {}
+
void Add(THolder<TScreenedPartIndexIterator> pi) {
Iterators.PushBack(std::move(pi));
- TScreenedPartIndexIterator* it = Iterators.back();
- if (it->IsValid()) {
+ TScreenedPartIndexIterator* it = Iterators.back();
+ if (it->IsValid()) {
NextRowCount += it->GetRowCountDelta();
NextDataSize += it->GetDataSizeDelta();
- Heap.push(it);
- }
- }
-
- bool IsValid() const {
+ Heap.push(it);
+ }
+ }
+
+ bool IsValid() const {
return !Heap.empty() || CurrentKeyValid;
- }
-
- void Next() {
+ }
+
+ void Next() {
ui64 lastRowCount = RowCount;
ui64 lastDataSize = DataSize;
- Y_VERIFY(IsValid());
-
+ Y_VERIFY(IsValid());
+
while (!Heap.empty()) {
RowCount = NextRowCount;
DataSize = NextDataSize;
TScreenedPartIndexIterator* it = Heap.top();
- Heap.pop();
+ Heap.pop();
TDbTupleRef key = it->GetCurrentKey();
TString serialized = TSerializedCellVec::Serialize({key.Columns, key.ColumnCount});
CurrentKey = TSerializedCellVec(serialized);
CurrentKeyValid = true;
TDbTupleRef currentKeyTuple(KeyColumns->BasicTypes().data(), CurrentKey.GetCells().data(), CurrentKey.GetCells().size());
-
- if (MoveIterator(it))
- Heap.push(it);
+
+ if (MoveIterator(it))
+ Heap.push(it);
while (!Heap.empty() && CompareKeys(currentKeyTuple, Heap.top()->GetCurrentKey()) == 0) {
it = Heap.top();
@@ -62,160 +62,160 @@ public:
if (RowCount != lastRowCount && DataSize != lastDataSize) {
return;
}
- }
+ }
RowCount = NextRowCount;
DataSize = NextDataSize;
CurrentKeyValid = false;
- }
-
- TDbTupleRef GetCurrentKey() const {
+ }
+
+ TDbTupleRef GetCurrentKey() const {
return TDbTupleRef(KeyColumns->BasicTypes().data(), CurrentKey.GetCells().data(), CurrentKey.GetCells().size());
- }
-
- ui64 GetCurrentRowCount() const {
- return RowCount;
- }
-
- ui64 GetCurrentDataSize() const {
- return DataSize;
- }
-
-private:
+ }
+
+ ui64 GetCurrentRowCount() const {
+ return RowCount;
+ }
+
+ ui64 GetCurrentDataSize() const {
+ return DataSize;
+ }
+
+private:
int CompareKeys(const TDbTupleRef& a, const TDbTupleRef& b) const noexcept {
return ComparePartKeys(a.Cells(), b.Cells(), *KeyColumns);
- }
-
- struct TIterKeyGreater {
+ }
+
+ struct TIterKeyGreater {
const TStatsIterator* Self;
- bool operator ()(const TScreenedPartIndexIterator* a, const TScreenedPartIndexIterator* b) const {
+ bool operator ()(const TScreenedPartIndexIterator* a, const TScreenedPartIndexIterator* b) const {
return Self->CompareKeys(a->GetCurrentKey(), b->GetCurrentKey()) > 0;
- }
- };
-
- bool MoveIterator(TScreenedPartIndexIterator* it) {
+ }
+ };
+
+ bool MoveIterator(TScreenedPartIndexIterator* it) {
it->Next();
- NextRowCount += it->GetRowCountDelta();
- NextDataSize += it->GetDataSizeDelta();
-
+ NextRowCount += it->GetRowCountDelta();
+ NextDataSize += it->GetDataSizeDelta();
+
return it->IsValid();
- }
-
+ }
+
TIntrusiveConstPtr<TKeyNulls> KeyColumns;
- THolderVector<TScreenedPartIndexIterator> Iterators;
+ THolderVector<TScreenedPartIndexIterator> Iterators;
TPriorityQueue<TScreenedPartIndexIterator*, TSmallVec<TScreenedPartIndexIterator*>, TIterKeyGreater> Heap;
- TSerializedCellVec CurrentKey;
- ui64 RowCount = 0;
- ui64 DataSize = 0;
- ui64 NextRowCount = 0;
- ui64 NextDataSize = 0;
+ TSerializedCellVec CurrentKey;
+ ui64 RowCount = 0;
+ ui64 DataSize = 0;
+ ui64 NextRowCount = 0;
+ ui64 NextDataSize = 0;
bool CurrentKeyValid = false;
-};
-
-struct TBucket {
- TString EndKey;
- ui64 Value;
-};
-
+};
+
+struct TBucket {
+ TString EndKey;
+ ui64 Value;
+};
+
using THistogram = TVector<TBucket>;
-
-struct TStats {
- ui64 RowCount = 0;
- ui64 DataSize = 0;
- THistogram RowCountHistogram;
- THistogram DataSizeHistogram;
-
- void Clear() {
- RowCount = 0;
- DataSize = 0;
- RowCountHistogram.clear();
- DataSizeHistogram.clear();
- }
-
- void Swap(TStats& other) {
- std::swap(RowCount, other.RowCount);
- std::swap(DataSize, other.DataSize);
- RowCountHistogram.swap(other.RowCountHistogram);
- DataSizeHistogram.swap(other.DataSizeHistogram);
- }
-};
-
-class TKeyAccessSample {
-public:
- enum class EAccessKind {
- Read = 1,
- Update = 2,
- Delete = 3
- };
-
- using TSample = TVector<std::pair<TString, EAccessKind>>;
-
-public:
- explicit TKeyAccessSample(ui64 sampleCount = 100)
- : SampleCount(sampleCount)
- , TotalCount(0)
- {}
-
- void Add(TArrayRef<const TCell> key, EAccessKind accessKind = EAccessKind::Read) {
- ui64 idx = TotalCount;
- ++TotalCount;
- if (idx >= SampleCount) {
- idx = RandomNumber<ui64>(TotalCount) ;
- }
-
- if (idx >= SampleCount) {
- return;
- }
-
- TSerializedCellVec saved(TSerializedCellVec::Serialize(key));
-
- auto it = KeyRefCount.find(saved.GetBuffer());
- if (it != KeyRefCount.end()) {
- // Add a reference for existing key
- saved = it->second.first;
- ++it->second.second;
- } else {
- KeyRefCount[saved.GetBuffer()] = std::make_pair(saved, 1);
- }
-
- if (Sample.size() < SampleCount) {
- Sample.emplace_back(std::make_pair(saved.GetBuffer(), accessKind));
- } else {
- TString old = Sample[idx].first;
- auto oit = KeyRefCount.find(old);
- Y_VERIFY(oit != KeyRefCount.end());
-
- // Delete the key if this was the last reference
- if (oit->second.second == 1) {
- KeyRefCount.erase(oit);
- } else {
- --oit->second.second;
- }
-
- Sample[idx] = std::make_pair(saved.GetBuffer(), accessKind);
- }
- }
-
- const TSample& GetSample() const {
- return Sample;
- }
-
- void Clear() {
- Sample.clear();
- TotalCount = 0;
- KeyRefCount.clear();
- }
-
-private:
- TSample Sample;
- const ui64 SampleCount;
- ui64 TotalCount;
- // Store only unique keys and their ref counts to save memory
- THashMap<TString, std::pair<TSerializedCellVec, ui64>> KeyRefCount;
-};
-
-void BuildStats(const TSubset& subset, TStats& stats, ui64 rowCountResolution, ui64 dataSizeResolution, const IPages* env);
-void GetPartOwners(const TSubset& subset, THashSet<ui64>& partOwners);
-
-}}
+
+struct TStats {
+ ui64 RowCount = 0;
+ ui64 DataSize = 0;
+ THistogram RowCountHistogram;
+ THistogram DataSizeHistogram;
+
+ void Clear() {
+ RowCount = 0;
+ DataSize = 0;
+ RowCountHistogram.clear();
+ DataSizeHistogram.clear();
+ }
+
+ void Swap(TStats& other) {
+ std::swap(RowCount, other.RowCount);
+ std::swap(DataSize, other.DataSize);
+ RowCountHistogram.swap(other.RowCountHistogram);
+ DataSizeHistogram.swap(other.DataSizeHistogram);
+ }
+};
+
+class TKeyAccessSample {
+public:
+ enum class EAccessKind {
+ Read = 1,
+ Update = 2,
+ Delete = 3
+ };
+
+ using TSample = TVector<std::pair<TString, EAccessKind>>;
+
+public:
+ explicit TKeyAccessSample(ui64 sampleCount = 100)
+ : SampleCount(sampleCount)
+ , TotalCount(0)
+ {}
+
+ void Add(TArrayRef<const TCell> key, EAccessKind accessKind = EAccessKind::Read) {
+ ui64 idx = TotalCount;
+ ++TotalCount;
+ if (idx >= SampleCount) {
+ idx = RandomNumber<ui64>(TotalCount) ;
+ }
+
+ if (idx >= SampleCount) {
+ return;
+ }
+
+ TSerializedCellVec saved(TSerializedCellVec::Serialize(key));
+
+ auto it = KeyRefCount.find(saved.GetBuffer());
+ if (it != KeyRefCount.end()) {
+ // Add a reference for existing key
+ saved = it->second.first;
+ ++it->second.second;
+ } else {
+ KeyRefCount[saved.GetBuffer()] = std::make_pair(saved, 1);
+ }
+
+ if (Sample.size() < SampleCount) {
+ Sample.emplace_back(std::make_pair(saved.GetBuffer(), accessKind));
+ } else {
+ TString old = Sample[idx].first;
+ auto oit = KeyRefCount.find(old);
+ Y_VERIFY(oit != KeyRefCount.end());
+
+ // Delete the key if this was the last reference
+ if (oit->second.second == 1) {
+ KeyRefCount.erase(oit);
+ } else {
+ --oit->second.second;
+ }
+
+ Sample[idx] = std::make_pair(saved.GetBuffer(), accessKind);
+ }
+ }
+
+ const TSample& GetSample() const {
+ return Sample;
+ }
+
+ void Clear() {
+ Sample.clear();
+ TotalCount = 0;
+ KeyRefCount.clear();
+ }
+
+private:
+ TSample Sample;
+ const ui64 SampleCount;
+ ui64 TotalCount;
+ // Store only unique keys and their ref counts to save memory
+ THashMap<TString, std::pair<TSerializedCellVec, ui64>> KeyRefCount;
+};
+
+void BuildStats(const TSubset& subset, TStats& stats, ui64 rowCountResolution, ui64 dataSizeResolution, const IPages* env);
+void GetPartOwners(const TSubset& subset, THashSet<ui64>& partOwners);
+
+}}
diff --git a/ydb/core/tablet_flat/flat_table.cpp b/ydb/core/tablet_flat/flat_table.cpp
index 4f78d31e831..686a4042687 100644
--- a/ydb/core/tablet_flat/flat_table.cpp
+++ b/ydb/core/tablet_flat/flat_table.cpp
@@ -560,7 +560,7 @@ void TTable::AddSafe(TPartView partView)
}
TTable::TReady TTable::Precharge(TRawVals minKey_, TRawVals maxKey_, TTagsRef tags,
- IPages* env, ui64 flg,
+ IPages* env, ui64 flg,
ui64 items, ui64 bytes,
EDirection direction,
TRowVersion snapshot) const
@@ -597,12 +597,12 @@ TTable::TReady TTable::Precharge(TRawVals minKey_, TRawVals maxKey_, TTagsRef ta
} else {
++res.Weeded;
}
- }
- }
- } else {
+ }
+ }
+ } else {
const TCelled minKey(minKey_, *Scheme->Keys, false);
const TCelled maxKey(maxKey_, *Scheme->Keys, false);
-
+
for (const auto& run : GetLevels()) {
switch (direction) {
case EDirection::Forward:
@@ -755,7 +755,7 @@ TAutoPtr<TTableReverseIt> TTable::IterateReverse(TRawVals key_, TTagsRef tags, I
return dbIter;
}
-TTable::TReady TTable::Select(TRawVals key_, TTagsRef tags, IPages* env, TRowState& row,
+TTable::TReady TTable::Select(TRawVals key_, TTagsRef tags, IPages* env, TRowState& row,
ui64 flg, TRowVersion snapshot,
TDeque<TPartSimpleIt>& tempIterators) const noexcept
{
@@ -789,7 +789,7 @@ TTable::TReady TTable::Select(TRawVals key_, TTagsRef tags, IPages* env, TRowSta
it->Apply(row, CommittedTransactions);
}
result.Invisible += it->InvisibleRowSkips;
- }
+ }
}
// Frozen are sorted by epoch, apply in reverse order
@@ -868,15 +868,15 @@ void TTable::DebugDump(IOutputStream& str, IPages* env, const NScheme::TTypeRegi
str << "Frozen: [";
for (const auto& it : Frozen) {
str << it->Epoch;
- }
- str << "]" << Endl
+ }
+ str << "]" << Endl
<< "Parts: ["
- << Endl;
+ << Endl;
for (const auto& fpIt: Flatten) {
- str << " ";
+ str << " ";
NFmt::Ln(*fpIt.second);
- }
- str << "]" << Endl;
+ }
+ str << "]" << Endl;
if (ColdParts) {
str << "ColdParts: [" << Endl;
for (const auto& it : ColdParts) {
@@ -892,13 +892,13 @@ void TTable::DebugDump(IOutputStream& str, IPages* env, const NScheme::TTypeRegi
for (const auto& it : Frozen) {
str << "Frozen " << it->Epoch << " dump: " << Endl;
it->DebugDump(str, reg);
- }
-
+ }
+
TDump dump(str, env, &reg);
for (const auto &it: Flatten) dump.Part(*it.second);
-}
-
+}
+
TKeyRangeCache* TTable::GetErasedKeysCache() const
{
return ErasedKeysCache.Get();
diff --git a/ydb/core/tablet_flat/flat_table.h b/ydb/core/tablet_flat/flat_table.h
index 8cc207a344b..a1fdeb97093 100644
--- a/ydb/core/tablet_flat/flat_table.h
+++ b/ydb/core/tablet_flat/flat_table.h
@@ -3,7 +3,7 @@
#include "flat_update_op.h"
#include "flat_dbase_scheme.h"
#include "flat_mem_warm.h"
-#include "flat_iterator.h"
+#include "flat_iterator.h"
#include "flat_row_scheme.h"
#include "flat_row_versions.h"
#include "flat_part_laid.h"
@@ -18,12 +18,12 @@
#include <ydb/core/scheme/scheme_tablecell.h>
#include <library/cpp/containers/stack_vector/stack_vec.h>
-
+
#include <util/generic/deque.h>
#include <util/generic/set.h>
-#include <util/generic/hash.h>
-#include <util/generic/ptr.h>
-
+#include <util/generic/hash.h>
+#include <util/generic/ptr.h>
+
namespace NKikimr {
namespace NTable {
@@ -63,9 +63,9 @@ public:
explicit TTable(TEpoch);
~TTable();
-
- void SetScheme(const TScheme::TTableInfo& tableScheme);
-
+
+ void SetScheme(const TScheme::TTableInfo& tableScheme);
+
TIntrusiveConstPtr<TRowScheme> GetScheme() const noexcept;
TEpoch Snapshot() noexcept;
@@ -128,11 +128,11 @@ public:
TAutoPtr<TTableIt> Iterate(TRawVals key, TTagsRef tags, IPages* env, ESeek, TRowVersion snapshot) const noexcept;
TAutoPtr<TTableReverseIt> IterateReverse(TRawVals key, TTagsRef tags, IPages* env, ESeek, TRowVersion snapshot) const noexcept;
- TReady Select(TRawVals key, TTagsRef tags, IPages* env, TRowState& row,
+ TReady Select(TRawVals key, TTagsRef tags, IPages* env, TRowState& row,
ui64 flg, TRowVersion snapshot, TDeque<TPartSimpleIt>& tempIterators) const noexcept;
TReady Precharge(TRawVals minKey, TRawVals maxKey, TTagsRef tags,
- IPages* env, ui64 flg,
+ IPages* env, ui64 flg,
ui64 itemsLimit, ui64 bytesLimit,
EDirection direction, TRowVersion snapshot) const;
@@ -223,11 +223,11 @@ public:
return Stat_.FrozenWaste + (Mutable ? Mutable->GetWastedMem() : 0);
}
- ui64 GetMemRowCount() const noexcept
- {
- return Stat_.FrozenRows + (Mutable ? Mutable->GetRowCount() : 0);
- }
-
+ ui64 GetMemRowCount() const noexcept
+ {
+ return Stat_.FrozenRows + (Mutable ? Mutable->GetRowCount() : 0);
+ }
+
ui64 GetOpsCount() const noexcept
{
return Stat_.FrozenOps + (Mutable ? Mutable->GetOpsCount() : 0);
@@ -254,7 +254,7 @@ public:
}
void DebugDump(IOutputStream& str, IPages *env, const NScheme::TTypeRegistry& typeRegistry) const;
-
+
TKeyRangeCache* GetErasedKeysCache() const;
bool RemoveRowVersions(const TRowVersion& lower, const TRowVersion& upper);
@@ -267,7 +267,7 @@ public:
void FillTxStatusCache(THashMap<TLogoBlobID, TSharedData>& cache) const noexcept;
-private:
+private:
TMemTable& MemTable();
void AddSafe(TPartView partView);
@@ -303,7 +303,7 @@ private:
THashMap<ui64, TOpenTransaction> OpenTransactions;
TTransactionMap<TRowVersion> CommittedTransactions;
TTransactionSet RemovedTransactions;
-};
-
-}
-}
+};
+
+}
+}
diff --git a/ydb/core/tablet_flat/flat_table_part_ut.cpp b/ydb/core/tablet_flat/flat_table_part_ut.cpp
index 1a871124b66..185265a95ef 100644
--- a/ydb/core/tablet_flat/flat_table_part_ut.cpp
+++ b/ydb/core/tablet_flat/flat_table_part_ut.cpp
@@ -3,8 +3,8 @@
#include <ydb/core/tablet_flat/test/libs/table/test_part.h>
#include <ydb/core/tablet_flat/test/libs/table/test_writer.h>
-#include "flat_stat_part.h"
-#include "flat_stat_table.h"
+#include "flat_stat_part.h"
+#include "flat_stat_table.h"
#include "flat_page_other.h"
#include <library/cpp/testing/unittest/registar.h>
@@ -37,167 +37,167 @@ Y_UNIT_TEST_SUITE(TLegacy) {
}
Y_UNIT_TEST(IndexIter) {
- TNullOutput devNull;
+ TNullOutput devNull;
IOutputStream& dbgOut = devNull; //*/ Cerr;
-
- NScheme::TTypeRegistry typeRegistry;
-
- TLayoutCook lay;
- lay
- .Col(0, 0, NScheme::NTypeIds::Uint64)
- .Col(0, 1, NScheme::NTypeIds::Uint32)
- .Col(0, 2, NScheme::NTypeIds::Uint32)
- .Key({ 0, 1});
-
+
+ NScheme::TTypeRegistry typeRegistry;
+
+ TLayoutCook lay;
+ lay
+ .Col(0, 0, NScheme::NTypeIds::Uint64)
+ .Col(0, 1, NScheme::NTypeIds::Uint32)
+ .Col(0, 2, NScheme::NTypeIds::Uint32)
+ .Key({ 0, 1});
+
TPartCook cook(lay, { true, 4096 });
-
- const ui64 X1 = 0, X2 = 3000;
-
- for (ui64 key1 = X1; key1 <= X2; key1++) {
- for (ui32 key2 = 0; key2 < 1 + key1/1000; key2++)
- cook.AddN(key1, key2, key2);
- }
-
- TPartEggs eggs = cook.Finish();
+
+ const ui64 X1 = 0, X2 = 3000;
+
+ for (ui64 key1 = X1; key1 <= X2; key1++) {
+ for (ui32 key2 = 0; key2 < 1 + key1/1000; key2++)
+ cook.AddN(key1, key2, key2);
+ }
+
+ TPartEggs eggs = cook.Finish();
UNIT_ASSERT_C(eggs.Parts.size() == 1,
"Unexpected " << eggs.Parts.size() << " results");
-
+
auto fnIterate = [&dbgOut, &typeRegistry] (TIntrusiveConstPtr<TPartStore> part, TIntrusiveConstPtr<TRowScheme> scheme) {
TPartIndexIterator idxIter(part, scheme->Keys);
-
- while (idxIter.IsValid()) {
- TDbTupleRef key = idxIter.GetCurrentKey();
- dbgOut << DbgPrintTuple(key, typeRegistry) << " " << idxIter.GetCurrentRowId() << " " << idxIter.GetCurrentDataSize() << Endl;
- idxIter.Next();
- }
- };
-
- dbgOut << "Iterate with the matching row scheme" << Endl;
+
+ while (idxIter.IsValid()) {
+ TDbTupleRef key = idxIter.GetCurrentKey();
+ dbgOut << DbgPrintTuple(key, typeRegistry) << " " << idxIter.GetCurrentRowId() << " " << idxIter.GetCurrentDataSize() << Endl;
+ idxIter.Next();
+ }
+ };
+
+ dbgOut << "Iterate with the matching row scheme" << Endl;
fnIterate(eggs.At(0), eggs.Scheme);
-
- // Add a column with default value to the key
- ui32 def10 = 121212;
- TLayoutCook newLay;
- newLay
- .Col(0, 0, NScheme::NTypeIds::Uint64)
- .Col(0, 1, NScheme::NTypeIds::Uint32)
- .Col(0, 2, NScheme::NTypeIds::Uint32)
- .Col(0, 10, NScheme::NTypeIds::Uint32, TCell((const char*)&def10, sizeof(def10)))
- .Key({ 0, 1, 10});
-
- dbgOut << "Iterate with added key column with default value" << Endl;
+
+ // Add a column with default value to the key
+ ui32 def10 = 121212;
+ TLayoutCook newLay;
+ newLay
+ .Col(0, 0, NScheme::NTypeIds::Uint64)
+ .Col(0, 1, NScheme::NTypeIds::Uint32)
+ .Col(0, 2, NScheme::NTypeIds::Uint32)
+ .Col(0, 10, NScheme::NTypeIds::Uint32, TCell((const char*)&def10, sizeof(def10)))
+ .Key({ 0, 1, 10});
+
+ dbgOut << "Iterate with added key column with default value" << Endl;
fnIterate(eggs.At(0), newLay.RowScheme());
- }
-
+ }
+
Y_UNIT_TEST(ScreenedIndexIter) {
- TNullOutput devNull;
+ TNullOutput devNull;
IOutputStream& dbgOut = devNull; //*/ Cerr;
-
- NScheme::TTypeRegistry typeRegistry;
-
- TLayoutCook lay;
- lay
- .Col(0, 0, NScheme::NTypeIds::Uint64)
- .Col(0, 1, NScheme::NTypeIds::Uint32)
- .Col(0, 2, NScheme::NTypeIds::Uint32)
- .Key({ 0, 1});
-
- const ui64 DATA_PAGE_SIZE = 4096;
+
+ NScheme::TTypeRegistry typeRegistry;
+
+ TLayoutCook lay;
+ lay
+ .Col(0, 0, NScheme::NTypeIds::Uint64)
+ .Col(0, 1, NScheme::NTypeIds::Uint32)
+ .Col(0, 2, NScheme::NTypeIds::Uint32)
+ .Key({ 0, 1});
+
+ const ui64 DATA_PAGE_SIZE = 4096;
TPartCook cook(lay, NPage::TConf(true, DATA_PAGE_SIZE));
-
- const ui64 X1 = 0, X2 = 3000;
-
- for (ui64 key1 = X1; key1 <= X2; key1++) {
- ui32 key2 = 3333;
- cook.AddN(key1, key2, key2);
- }
-
- TPartEggs eggs = cook.Finish();
+
+ const ui64 X1 = 0, X2 = 3000;
+
+ for (ui64 key1 = X1; key1 <= X2; key1++) {
+ ui32 key2 = 3333;
+ cook.AddN(key1, key2, key2);
+ }
+
+ TPartEggs eggs = cook.Finish();
UNIT_ASSERT_C(eggs.Parts.size() == 1,
"Unexpected " << eggs.Parts.size() << " results");
-
+
auto fnIterate = [&dbgOut, &typeRegistry] (TIntrusiveConstPtr<TPartStore> part, TIntrusiveConstPtr<TScreen> screen,
TIntrusiveConstPtr<TRowScheme> scheme, TIntrusiveConstPtr<NPage::TFrames> frames) -> std::pair<ui64, ui64> {
TScreenedPartIndexIterator idxIter(TPartView{part, screen, nullptr}, scheme->Keys, std::move(frames));
-
- ui64 rowCount = 0;
- ui64 size = 0;
- while (idxIter.IsValid()) {
- TDbTupleRef key = idxIter.GetCurrentKey();
- rowCount += idxIter.GetRowCountDelta();
- size += idxIter.GetDataSizeDelta();
- dbgOut << DbgPrintTuple(key, typeRegistry)
- << " " << idxIter.GetRowCountDelta() << " " << idxIter.GetDataSizeDelta()
- << " " << rowCount << " " << size << Endl;
- idxIter.Next();
- }
-
+
+ ui64 rowCount = 0;
+ ui64 size = 0;
+ while (idxIter.IsValid()) {
+ TDbTupleRef key = idxIter.GetCurrentKey();
+ rowCount += idxIter.GetRowCountDelta();
+ size += idxIter.GetDataSizeDelta();
+ dbgOut << DbgPrintTuple(key, typeRegistry)
+ << " " << idxIter.GetRowCountDelta() << " " << idxIter.GetDataSizeDelta()
+ << " " << rowCount << " " << size << Endl;
+ idxIter.Next();
+ }
+
rowCount += idxIter.GetRowCountDelta();
size += idxIter.GetDataSizeDelta();
- return {rowCount, size};
- };
-
- dbgOut << "Hide all" << Endl;
- {
+ return {rowCount, size};
+ };
+
+ dbgOut << "Hide all" << Endl;
+ {
TIntrusiveConstPtr<TScreen> screen = new TScreen({});
auto res = fnIterate(eggs.At(0), screen, eggs.Scheme, nullptr);
- UNIT_ASSERT_VALUES_EQUAL_C(res.first, 0, "RowCount should be 0");
- UNIT_ASSERT_VALUES_EQUAL_C(res.second, 0, "DataSize should be 0");
- }
-
- const ui64 ROWS_PER_PAGE = 169; // emperical
- const ui64 REAL_PAGE_SIZE = 4076; // also emperical
+ UNIT_ASSERT_VALUES_EQUAL_C(res.first, 0, "RowCount should be 0");
+ UNIT_ASSERT_VALUES_EQUAL_C(res.second, 0, "DataSize should be 0");
+ }
+
+ const ui64 ROWS_PER_PAGE = 169; // emperical
+ const ui64 REAL_PAGE_SIZE = 4076; // also emperical
ui64 expectedRowCount = X2 + 1;
ui64 expectedTotalSize = 0;
ui64 expectedPageCount = (expectedRowCount + ROWS_PER_PAGE - 1) / ROWS_PER_PAGE;
for (ui32 pageId = 0; pageId < expectedPageCount; ++pageId) {
expectedTotalSize += eggs.At(0)->GetPageSize(pageId, {});
}
-
- dbgOut << "Hide none" << Endl;
- {
+
+ dbgOut << "Hide none" << Endl;
+ {
TIntrusiveConstPtr<TScreen> screen = new TScreen({TScreen::THole(true)});
auto res = fnIterate(eggs.At(0), screen, eggs.Scheme, nullptr);
- UNIT_ASSERT_VALUES_EQUAL_C(res.first, expectedRowCount, "RowCount doesn't match");
+ UNIT_ASSERT_VALUES_EQUAL_C(res.first, expectedRowCount, "RowCount doesn't match");
UNIT_ASSERT_VALUES_EQUAL_C(res.second, expectedTotalSize, "DataSize doesn't match");
- }
-
- dbgOut << "Hide 2 pages" << Endl;
- {
+ }
+
+ dbgOut << "Hide 2 pages" << Endl;
+ {
TIntrusiveConstPtr<TScreen> screen = new TScreen({TScreen::THole(0,150), TScreen::THole(550, 10000)});
auto res = fnIterate(eggs.At(0), screen, eggs.Scheme, nullptr);
UNIT_ASSERT_VALUES_EQUAL_C(res.first, expectedRowCount - 400, "RowCount doesn't match");
UNIT_ASSERT_VALUES_EQUAL_C(res.second, expectedTotalSize - REAL_PAGE_SIZE*2, "DataSize doesn't match");
- }
-
+ }
+
dbgOut << "Hide all except 3 pages" << Endl;
- {
+ {
TIntrusiveConstPtr<TScreen> screen = new TScreen({TScreen::THole(150, 400)});
auto res = fnIterate(eggs.At(0), screen, eggs.Scheme, nullptr);
UNIT_ASSERT_VALUES_EQUAL_C(res.first, 250, "RowCount doesn't match");
UNIT_ASSERT_VALUES_EQUAL_C(res.second, REAL_PAGE_SIZE*3, "DataSize doesn't match");
- }
-
- dbgOut << "Hide 2 rows in one page - we just ignore this" << Endl;
- {
+ }
+
+ dbgOut << "Hide 2 rows in one page - we just ignore this" << Endl;
+ {
TIntrusiveConstPtr<TScreen> screen = new TScreen({TScreen::THole(0,150), TScreen::THole(152, 10000)});
auto res = fnIterate(eggs.At(0), screen, eggs.Scheme, nullptr);
UNIT_ASSERT_VALUES_EQUAL_C(res.first, expectedRowCount - 2, "RowCount doesn't match");
UNIT_ASSERT_VALUES_EQUAL_C(res.second, expectedTotalSize, "DataSize doesn't match");
- }
-
+ }
+
dbgOut << "Hide 4 pages in 3 different ranges" << Endl;
- {
+ {
TIntrusiveConstPtr<TScreen> screen = new TScreen({
- TScreen::THole(400, 600),
+ TScreen::THole(400, 600),
TScreen::THole(850, 950),
- TScreen::THole(1200, 10000)
- });
+ TScreen::THole(1200, 10000)
+ });
auto res = fnIterate(eggs.At(0), screen, eggs.Scheme, nullptr);
UNIT_ASSERT_VALUES_EQUAL_C(res.first, expectedRowCount - 400 - 250 - 250, "RowCount doesn't match");
UNIT_ASSERT_VALUES_EQUAL_C(res.second, expectedTotalSize - REAL_PAGE_SIZE*4, "DataSize doesn't match");
- }
+ }
dbgOut << "Attach outer pages to index with screen" << Endl;
{
@@ -214,104 +214,104 @@ Y_UNIT_TEST_SUITE(TLegacy) {
auto res1 = fnIterate(eggs.At(0), screen, eggs.Scheme, frames);
UNIT_ASSERT_VALUES_EQUAL_C(res1.second, REAL_PAGE_SIZE*2 + 1800, "DataSize doesn't match with a screen");
}
- }
-
+ }
+
Y_UNIT_TEST(StatsIter) {
- TNullOutput devNull;
+ TNullOutput devNull;
IOutputStream& dbgOut = devNull; //*/ Cerr;
-
- NScheme::TTypeRegistry typeRegistry;
-
- TLayoutCook lay1;
- lay1
- .Col(0, 0, NScheme::NTypeIds::Uint64)
- .Col(0, 1, NScheme::NTypeIds::Uint32)
- .Col(0, 2, NScheme::NTypeIds::Uint32)
- .Key({ 0, 1});
-
+
+ NScheme::TTypeRegistry typeRegistry;
+
+ TLayoutCook lay1;
+ lay1
+ .Col(0, 0, NScheme::NTypeIds::Uint64)
+ .Col(0, 1, NScheme::NTypeIds::Uint32)
+ .Col(0, 2, NScheme::NTypeIds::Uint32)
+ .Key({ 0, 1});
+
TPartCook cook1(lay1, { true, 4096 });
-
- {
- const ui64 X1 = 0, X2 = 3000;
-
- for (ui64 key1 = X1; key1 <= X2; key1++) {
- for (ui32 key2 = 0; key2 < 10; key2++)
- cook1.AddN(key1, key2, key2);
- }
- }
-
- TPartEggs eggs1 = cook1.Finish();
+
+ {
+ const ui64 X1 = 0, X2 = 3000;
+
+ for (ui64 key1 = X1; key1 <= X2; key1++) {
+ for (ui32 key2 = 0; key2 < 10; key2++)
+ cook1.AddN(key1, key2, key2);
+ }
+ }
+
+ TPartEggs eggs1 = cook1.Finish();
UNIT_ASSERT_C(eggs1.Parts.size() == 1,
"Unexpected " << eggs1.Parts.size() << " results");
-
- // Add a column with default value to the key
- ui32 def10 = 3;
- TLayoutCook lay2;
- lay2
- .Col(0, 0, NScheme::NTypeIds::Uint64)
- .Col(0, 1, NScheme::NTypeIds::Uint32)
- .Col(0, 2, NScheme::NTypeIds::Uint32)
- .Col(0, 10, NScheme::NTypeIds::Uint32, TCell((const char*)&def10, sizeof(def10)))
- .Key({ 0, 1, 10});
-
-
+
+ // Add a column with default value to the key
+ ui32 def10 = 3;
+ TLayoutCook lay2;
+ lay2
+ .Col(0, 0, NScheme::NTypeIds::Uint64)
+ .Col(0, 1, NScheme::NTypeIds::Uint32)
+ .Col(0, 2, NScheme::NTypeIds::Uint32)
+ .Col(0, 10, NScheme::NTypeIds::Uint32, TCell((const char*)&def10, sizeof(def10)))
+ .Key({ 0, 1, 10});
+
+
TPartCook cook2(lay2, { true, 4096 });
-
- {
- const ui64 X1 = 2000, X2 = 5000;
-
- for (ui64 key1 = X1; key1 <= X2; key1++) {
- for (ui32 key2 = 0; key2 < key1%10; key2++)
- cook2.AddN(key1, key2, key2, key2);
- }
- }
-
- TPartEggs eggs2 = cook2.Finish();
+
+ {
+ const ui64 X1 = 2000, X2 = 5000;
+
+ for (ui64 key1 = X1; key1 <= X2; key1++) {
+ for (ui32 key2 = 0; key2 < key1%10; key2++)
+ cook2.AddN(key1, key2, key2, key2);
+ }
+ }
+
+ TPartEggs eggs2 = cook2.Finish();
UNIT_ASSERT_C(eggs2.Parts.size() == 1,
"Unexpected " << eggs2.Parts.size() << " results");
-
+
TIntrusiveConstPtr<TScreen> screen1 = new TScreen({
- TScreen::THole(400, 600),
- TScreen::THole(700, 800),
- TScreen::THole(1200, 100000)
- });
-
+ TScreen::THole(400, 600),
+ TScreen::THole(700, 800),
+ TScreen::THole(1200, 100000)
+ });
+
TIntrusiveConstPtr<TScreen> screen2 = new TScreen({
- TScreen::THole(2400, 2600),
- TScreen::THole(2700, 2800),
- TScreen::THole(4200, 100000)
- });
-
- TStatsIterator stIter(lay2.RowScheme()->Keys);
+ TScreen::THole(2400, 2600),
+ TScreen::THole(2700, 2800),
+ TScreen::THole(4200, 100000)
+ });
+
+ TStatsIterator stIter(lay2.RowScheme()->Keys);
stIter.Add(MakeHolder<TScreenedPartIndexIterator>(TPartView{eggs2.At(0), screen2, nullptr}, lay2.RowScheme()->Keys, nullptr));
stIter.Add(MakeHolder<TScreenedPartIndexIterator>(TPartView{eggs1.At(0), screen1, nullptr}, lay2.RowScheme()->Keys, nullptr));
-
- UNIT_ASSERT(stIter.IsValid());
- UNIT_ASSERT(stIter.GetCurrentRowCount() == 0);
- UNIT_ASSERT(stIter.GetCurrentDataSize() == 0);
- stIter.Next();
-
- TSerializedCellVec prevKey;
- ui64 prevRowCount = 0;
- ui64 prevDataSize = 0;
- while (stIter.IsValid()) {
- TDbTupleRef key = stIter.GetCurrentKey();
- UNIT_ASSERT_C(CompareTypedCellVectors(key.Columns, prevKey.GetCells().data(), key.Types, key.ColumnCount, prevKey.GetCells().size()) > 0,
- "Keys must be sorted");
-
- UNIT_ASSERT(prevRowCount < stIter.GetCurrentRowCount());
- UNIT_ASSERT(prevDataSize < stIter.GetCurrentDataSize());
-
- dbgOut << DbgPrintTuple(key, typeRegistry)
- << " " << stIter.GetCurrentRowCount() << " " << stIter.GetCurrentDataSize() << Endl;
-
+
+ UNIT_ASSERT(stIter.IsValid());
+ UNIT_ASSERT(stIter.GetCurrentRowCount() == 0);
+ UNIT_ASSERT(stIter.GetCurrentDataSize() == 0);
+ stIter.Next();
+
+ TSerializedCellVec prevKey;
+ ui64 prevRowCount = 0;
+ ui64 prevDataSize = 0;
+ while (stIter.IsValid()) {
+ TDbTupleRef key = stIter.GetCurrentKey();
+ UNIT_ASSERT_C(CompareTypedCellVectors(key.Columns, prevKey.GetCells().data(), key.Types, key.ColumnCount, prevKey.GetCells().size()) > 0,
+ "Keys must be sorted");
+
+ UNIT_ASSERT(prevRowCount < stIter.GetCurrentRowCount());
+ UNIT_ASSERT(prevDataSize < stIter.GetCurrentDataSize());
+
+ dbgOut << DbgPrintTuple(key, typeRegistry)
+ << " " << stIter.GetCurrentRowCount() << " " << stIter.GetCurrentDataSize() << Endl;
+
prevKey = TSerializedCellVec(TSerializedCellVec::Serialize(TConstArrayRef<TCell>(key.Columns, key.ColumnCount)));
- prevRowCount = stIter.GetCurrentRowCount();
- prevDataSize = stIter.GetCurrentDataSize();
- stIter.Next();
- }
- }
-
+ prevRowCount = stIter.GetCurrentRowCount();
+ prevDataSize = stIter.GetCurrentDataSize();
+ stIter.Next();
+ }
+ }
+
}
} // namespace NTest
diff --git a/ydb/core/tablet_flat/flat_table_subset.h b/ydb/core/tablet_flat/flat_table_subset.h
index 10859f05efb..bffbf5a1271 100644
--- a/ydb/core/tablet_flat/flat_table_subset.h
+++ b/ydb/core/tablet_flat/flat_table_subset.h
@@ -56,7 +56,7 @@ namespace NTable {
}
ui64 MaxRows() const noexcept
- {
+ {
ui64 rows = 0;
for (const auto &memTable : Frozen)
@@ -71,8 +71,8 @@ namespace NTable {
}
return rows;
- }
-
+ }
+
TRowVersion MinRowVersion() const noexcept
{
TRowVersion minVersion = TRowVersion::Max();
diff --git a/ydb/core/tablet_flat/flat_update_op.h b/ydb/core/tablet_flat/flat_update_op.h
index 5977b0b582b..9d7cb269efa 100644
--- a/ydb/core/tablet_flat/flat_update_op.h
+++ b/ydb/core/tablet_flat/flat_update_op.h
@@ -4,8 +4,8 @@
#include "flat_row_column.h"
#include <ydb/core/scheme/scheme_tablecell.h>
#include <ydb/core/scheme_types/scheme_raw_type_value.h>
-#include <util/string/printf.h>
-
+#include <util/string/printf.h>
+
namespace NKikimr {
namespace NTable {
@@ -22,8 +22,8 @@ namespace NTable {
default:
return "!!unexpected op!!";
}
- }
-
+ }
+
struct TUpdateOp {
TUpdateOp() = default;
@@ -55,6 +55,6 @@ namespace NTable {
TRawTypeValue Value;
};
-}
-
+}
+
}
diff --git a/ydb/core/tablet_flat/shared_sausagecache.cpp b/ydb/core/tablet_flat/shared_sausagecache.cpp
index 6fad54f1b03..c60c30202ba 100644
--- a/ydb/core/tablet_flat/shared_sausagecache.cpp
+++ b/ydb/core/tablet_flat/shared_sausagecache.cpp
@@ -169,8 +169,8 @@ class TSharedPageCache : public TActor<TSharedPageCache> {
TCacheCache<TPage, TPage::TWeight> Cache;
- TControlWrapper SizeOverride;
-
+ TControlWrapper SizeOverride;
+
ui64 StatBioReqs = 0;
ui64 StatHitPages = 0;
ui64 StatHitBytes = 0;
@@ -184,7 +184,7 @@ class TSharedPageCache : public TActor<TSharedPageCache> {
Owner = owner;
Logger = new NUtil::TLogger(sys, NKikimrServices::TABLET_SAUSAGECACHE);
- sys->AppData<TAppData>()->Icb->RegisterSharedControl(SizeOverride, Config->CacheName + "_Size");
+ sys->AppData<TAppData>()->Icb->RegisterSharedControl(SizeOverride, Config->CacheName + "_Size");
}
void TakePoison()
@@ -1010,7 +1010,7 @@ public:
: TActor(&TThis::StateFunc)
, Config(config)
, Cache(*config->CacheConfig)
- , SizeOverride(config->CacheConfig->Limit, 1, Max<i64>())
+ , SizeOverride(config->CacheConfig->Limit, 1, Max<i64>())
{
AsyncRequests.Limit = Config->TotalAsyncQueueInFlyLimit;
ScanRequests.Limit = Config->TotalScanQueueInFlyLimit;
diff --git a/ydb/core/tablet_flat/shared_sausagecache.h b/ydb/core/tablet_flat/shared_sausagecache.h
index d1adbdf1d8c..f7afb2ee999 100644
--- a/ydb/core/tablet_flat/shared_sausagecache.h
+++ b/ydb/core/tablet_flat/shared_sausagecache.h
@@ -43,7 +43,7 @@ struct TSharedPageCacheConfig final : public TAtomicRefCount<TSharedPageCacheCon
TIntrusivePtr<TCacheCacheConfig> CacheConfig;
ui64 TotalScanQueueInFlyLimit = 512 * 1024 * 1024;
ui64 TotalAsyncQueueInFlyLimit = 512 * 1024 * 1024;
- TString CacheName = "SharedPageCache";
+ TString CacheName = "SharedPageCache";
TIntrusivePtr<TSharedPageCacheCounters> Counters;
};
diff --git a/ydb/core/tablet_flat/tablet_flat_executed.cpp b/ydb/core/tablet_flat/tablet_flat_executed.cpp
index c0d1b68ef2d..4b3c15490c5 100644
--- a/ydb/core/tablet_flat/tablet_flat_executed.cpp
+++ b/ydb/core/tablet_flat/tablet_flat_executed.cpp
@@ -122,18 +122,18 @@ void TTabletExecutedFlat::HandleLocalMKQL(TEvTablet::TEvLocalMKQL::TPtr &ev, con
Execute(Factory->Make(ev), ctx);
}
-void TTabletExecutedFlat::HandleLocalSchemeTx(TEvTablet::TEvLocalSchemeTx::TPtr &ev, const TActorContext &ctx) {
+void TTabletExecutedFlat::HandleLocalSchemeTx(TEvTablet::TEvLocalSchemeTx::TPtr &ev, const TActorContext &ctx) {
Y_VERIFY(Factory, "Need IMiniKQLFactory to execute scheme query");
Execute(Factory->Make(ev), ctx);
-}
-
-void TTabletExecutedFlat::HandleLocalReadColumns(TEvTablet::TEvLocalReadColumns::TPtr &ev, const TActorContext &ctx) {
- Y_VERIFY(Factory, "Need IMiniKQLFactory to execute read columns query");
-
- Execute(Factory->Make(ev), ctx);
-}
-
+}
+
+void TTabletExecutedFlat::HandleLocalReadColumns(TEvTablet::TEvLocalReadColumns::TPtr &ev, const TActorContext &ctx) {
+ Y_VERIFY(Factory, "Need IMiniKQLFactory to execute read columns query");
+
+ Execute(Factory->Make(ev), ctx);
+}
+
void TTabletExecutedFlat::SignalTabletActive(const TActorContext &ctx) {
ctx.Send(Tablet(), new TEvTablet::TEvTabletActive());
}
@@ -253,8 +253,8 @@ bool TTabletExecutedFlat::HandleDefaultEvents(STFUNC_SIG) {
HFunc(TEvTablet::TEvTabletStop, HandleTabletStop);
HFunc(TEvTablet::TEvTabletDead, HandleTabletDead);
HFunc(TEvTablet::TEvLocalMKQL, HandleLocalMKQL);
- HFunc(TEvTablet::TEvLocalSchemeTx, HandleLocalSchemeTx);
- HFunc(TEvTablet::TEvLocalReadColumns, HandleLocalReadColumns);
+ HFunc(TEvTablet::TEvLocalSchemeTx, HandleLocalSchemeTx);
+ HFunc(TEvTablet::TEvLocalReadColumns, HandleLocalReadColumns);
hFunc(TEvTablet::TEvGetCounters, HandleGetCounters);
hFunc(TEvTablet::TEvUpdateConfig, Handle);
HFunc(NMon::TEvRemoteHttpInfo, RenderHtmlPage);
diff --git a/ydb/core/tablet_flat/tablet_flat_executed.h b/ydb/core/tablet_flat/tablet_flat_executed.h
index 1d9935de96a..117d7532bb3 100644
--- a/ydb/core/tablet_flat/tablet_flat_executed.h
+++ b/ydb/core/tablet_flat/tablet_flat_executed.h
@@ -56,8 +56,8 @@ protected:
void HandleTabletStop(TEvTablet::TEvTabletStop::TPtr &ev, const TActorContext &ctx);
void HandleTabletDead(TEvTablet::TEvTabletDead::TPtr &ev, const TActorContext &ctx);
void HandleLocalMKQL(TEvTablet::TEvLocalMKQL::TPtr &ev, const TActorContext &ctx);
- void HandleLocalSchemeTx(TEvTablet::TEvLocalSchemeTx::TPtr &ev, const TActorContext &ctx);
- void HandleLocalReadColumns(TEvTablet::TEvLocalReadColumns::TPtr &ev, const TActorContext &ctx);
+ void HandleLocalSchemeTx(TEvTablet::TEvLocalSchemeTx::TPtr &ev, const TActorContext &ctx);
+ void HandleLocalReadColumns(TEvTablet::TEvLocalReadColumns::TPtr &ev, const TActorContext &ctx);
void HandleGetCounters(TEvTablet::TEvGetCounters::TPtr &ev);
STFUNC(StateInitImpl);
diff --git a/ydb/core/tablet_flat/test/libs/exec/runner.h b/ydb/core/tablet_flat/test/libs/exec/runner.h
index 1885d12e3c4..3b58ab814b4 100644
--- a/ydb/core/tablet_flat/test/libs/exec/runner.h
+++ b/ydb/core/tablet_flat/test/libs/exec/runner.h
@@ -44,7 +44,7 @@ namespace NFake {
SetupStaticServices();
auto *types = NTable::NTest::DbgRegistry();
- auto *app = new TAppData(0, 0, 0, 0, { }, types, nullptr, nullptr, nullptr);
+ auto *app = new TAppData(0, 0, 0, 0, { }, types, nullptr, nullptr, nullptr);
Env.Initialize({ app, nullptr, nullptr });
Env.SetDispatchTimeout(DEFAULT_DISPATCH_TIMEOUT);
diff --git a/ydb/core/tablet_flat/test/libs/rows/misc.h b/ydb/core/tablet_flat/test/libs/rows/misc.h
index a9d1b6653e3..955a34d4a7e 100644
--- a/ydb/core/tablet_flat/test/libs/rows/misc.h
+++ b/ydb/core/tablet_flat/test/libs/rows/misc.h
@@ -11,8 +11,8 @@ namespace NTest{
class TGrowHeap: public TAtomicRefCount<TGrowHeap> {
public:
- explicit TGrowHeap(size_t bytes)
- : Pool(bytes, TMemoryPool::TLinearGrow::Instance())
+ explicit TGrowHeap(size_t bytes)
+ : Pool(bytes, TMemoryPool::TLinearGrow::Instance())
{
}
diff --git a/ydb/core/tablet_flat/test/libs/table/test_part.h b/ydb/core/tablet_flat/test/libs/table/test_part.h
index 0d155c4b879..e1dde28eeaa 100644
--- a/ydb/core/tablet_flat/test/libs/table/test_part.h
+++ b/ydb/core/tablet_flat/test/libs/table/test_part.h
@@ -81,11 +81,11 @@ namespace NTest {
return { true, Get(part, room, ref) };
}
-
+
const TSharedData* TryGetPage(const TPart *part, TPageId ref, TGroupId groupId) override
- {
+ {
return Get(part, groupId.Index, ref);
- }
+ }
private:
const TSharedData* Get(const TPart *part, ui32 room, ui32 ref) const
diff --git a/ydb/core/tablet_flat/ut/flat_database_ut_common.h b/ydb/core/tablet_flat/ut/flat_database_ut_common.h
index d67f46462c1..966dc0ba351 100644
--- a/ydb/core/tablet_flat/ut/flat_database_ut_common.h
+++ b/ydb/core/tablet_flat/ut/flat_database_ut_common.h
@@ -1,5 +1,5 @@
-#pragma once
-
+#pragma once
+
#include "flat_test_db_helpers.h"
#include <ydb/core/tablet_flat/flat_dbase_scheme.h>
diff --git a/ydb/core/tablet_flat/ut/flat_test_db.cpp b/ydb/core/tablet_flat/ut/flat_test_db.cpp
index a3f39734e49..b65757d4bbd 100644
--- a/ydb/core/tablet_flat/ut/flat_test_db.cpp
+++ b/ydb/core/tablet_flat/ut/flat_test_db.cpp
@@ -1,53 +1,53 @@
-#include "flat_test_db.h"
-#include "flat_database_ut_common.h"
-
+#include "flat_test_db.h"
+#include "flat_database_ut_common.h"
+
#include <ydb/core/tablet_flat/flat_database.h>
#include <ydb/core/tablet_flat/test/libs/rows/tool.h>
#include <ydb/core/scheme_types/scheme_types.h>
#include <ydb/core/util/pb.h>
-
+
#include <library/cpp/testing/unittest/registar.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NTable {
-
-struct TFakeKey {
+
+struct TFakeKey {
TVector<TFakeTableCell> Columns;
-};
-
-struct TFakeKeyComparator {
- bool operator () (const TFakeKey& a, const TFakeKey& b) const {
+};
+
+struct TFakeKeyComparator {
+ bool operator () (const TFakeKey& a, const TFakeKey& b) const {
Y_ASSERT(a.Columns.size() == b.Columns.size());
- for (ui32 i = 0; i < a.Columns.size(); ++i) {
- const TRawTypeValue& av = a.Columns[i].Get();
- const TRawTypeValue& bv = b.Columns[i].Get();
- if (av.IsEmpty() && !bv.IsEmpty())
- return true;
- if (bv.IsEmpty())
- return false;
+ for (ui32 i = 0; i < a.Columns.size(); ++i) {
+ const TRawTypeValue& av = a.Columns[i].Get();
+ const TRawTypeValue& bv = b.Columns[i].Get();
+ if (av.IsEmpty() && !bv.IsEmpty())
+ return true;
+ if (bv.IsEmpty())
+ return false;
Y_ASSERT(av.Type() == bv.Type());
- int res = CompareTypedCells(TCell(&av), TCell(&bv), av.Type());
- if (res)
- return res < 0;
- }
- return false;
- }
-};
-
-struct TFakeVal {
- bool IsDeleted;
+ int res = CompareTypedCells(TCell(&av), TCell(&bv), av.Type());
+ if (res)
+ return res < 0;
+ }
+ return false;
+ }
+};
+
+struct TFakeVal {
+ bool IsDeleted;
TMap<TTag, TFakeTableCell> Columns;
-
+
void ApplyUpdate(const TFakeVal& other)
{
if (IsDeleted = other.IsDeleted) {
- Columns.clear();
+ Columns.clear();
} else {
for (auto &val: other.Columns)
Columns[val.first] = val.second;
- }
+ }
}
-
+
TCell Read(
TArrayRef<const TFakeTableCell> key, const TColumn &col)
const noexcept
@@ -58,59 +58,59 @@ struct TFakeVal {
auto *val = Columns.FindPtr(col.Id);
return val ? **val : col.Null;
- }
- }
-
- void Swap(TFakeVal& other) {
- std::swap(IsDeleted, other.IsDeleted);
- Columns.swap(other.Columns);
- }
-};
-
+ }
+ }
+
+ void Swap(TFakeVal& other) {
+ std::swap(IsDeleted, other.IsDeleted);
+ Columns.swap(other.Columns);
+ }
+};
+
typedef TMap<TFakeKey, TFakeVal, TFakeKeyComparator> TFakeTable;
-
-
-class TFakeDbIterator : public ITestIterator {
+
+
+class TFakeDbIterator : public ITestIterator {
TVector<ui32> ValueTags;
TVector<const TColumn*> Cols;
- TFakeTable::const_iterator RowIt;
- TFakeTable::const_iterator RowEnd;
-
+ TFakeTable::const_iterator RowIt;
+ TFakeTable::const_iterator RowEnd;
+
TVector<NScheme::TTypeId> KeyTypes;
TVector<TCell> KeyCells;
-
+
TVector<NScheme::TTypeId> ValueTypes;
TVector<TCell> ValueCells;
bool First = true;
-
-public:
+
+public:
explicit TFakeDbIterator(const TScheme& scheme, ui32 root, TTagsRef tags, TFakeTable::const_iterator rowIt, TFakeTable::const_iterator rowEnd)
: ValueTags(tags.begin(), tags.end())
- , RowIt(rowIt)
- , RowEnd(rowEnd)
- {
+ , RowIt(rowIt)
+ , RowEnd(rowEnd)
+ {
Cols.reserve(tags.size());
ValueTypes.reserve(tags.size());
ValueCells.reserve(tags.size());
- for (auto tag : ValueTags) {
+ for (auto tag : ValueTags) {
auto *column = scheme.GetColumnInfo(root, tag);
Cols.push_back(column);
ValueTypes.push_back(column->PType);
- }
-
+ }
+
for (auto tag : scheme.GetTableInfo(root)->KeyColumns) {
KeyTypes.push_back(scheme.GetColumnInfo(root, tag)->PType);
- }
-
- KeyCells.reserve(KeyTypes.size());
- }
-
- const TFakeKey& GetFakeKey() const {
- return RowIt->first;
- }
-
+ }
+
+ KeyCells.reserve(KeyTypes.size());
+ }
+
+ const TFakeKey& GetFakeKey() const {
+ return RowIt->first;
+ }
+
EReady Next(ENext mode) override
{
/* Should position to the first row on first Next() to conform db
@@ -126,110 +126,110 @@ public:
break;
}
- FillCells();
+ FillCells();
return IsValid () ? EReady::Data : EReady::Gone;
- }
-
+ }
+
bool IsValid() override {
- return RowIt != RowEnd;
- }
-
+ return RowIt != RowEnd;
+ }
+
bool IsRowDeleted() override {
Y_ASSERT(IsValid());
- return RowIt->second.IsDeleted;
- }
-
+ return RowIt->second.IsDeleted;
+ }
+
TDbTupleRef GetKey() override {
- if (!IsValid())
- return TDbTupleRef();
-
- return TDbTupleRef(&KeyTypes[0], &KeyCells[0], KeyCells.size());
- }
-
+ if (!IsValid())
+ return TDbTupleRef();
+
+ return TDbTupleRef(&KeyTypes[0], &KeyCells[0], KeyCells.size());
+ }
+
TDbTupleRef GetValues() override {
- if (!IsValid())
- return TDbTupleRef();
-
- if (RowIt->second.IsDeleted)
- return TDbTupleRef();
-
- return TDbTupleRef(&ValueTypes[0], &ValueCells[0], ValueCells.size());
- }
-
+ if (!IsValid())
+ return TDbTupleRef();
+
+ if (RowIt->second.IsDeleted)
+ return TDbTupleRef();
+
+ return TDbTupleRef(&ValueTypes[0], &ValueCells[0], ValueCells.size());
+ }
+
TCell GetValue(ui32 idx) override {
- if (!IsValid())
- return TCell();
-
- return ValueCells[idx];
- }
-
-private:
- void FillCells() {
- KeyCells.clear();
- ValueCells.clear();
- if (!IsValid())
- return;
-
+ if (!IsValid())
+ return TCell();
+
+ return ValueCells[idx];
+ }
+
+private:
+ void FillCells() {
+ KeyCells.clear();
+ ValueCells.clear();
+ if (!IsValid())
+ return;
+
for (const TFakeTableCell& kc : RowIt->first.Columns)
KeyCells.push_back(*kc);
-
+
for (auto *col: Cols)
ValueCells.emplace_back(RowIt->second.Read(RowIt->first.Columns, *col));
- }
-};
-
+ }
+};
+
// Simple test implementation of a DB using TMap<>
-class TFakeDb : public ITestDb {
-private:
- TAutoPtr<TScheme> Scheme;
-
-public:
+class TFakeDb : public ITestDb {
+private:
+ TAutoPtr<TScheme> Scheme;
+
+public:
void Init(const TScheme& scheme) override {
- Scheme.Reset(new TScheme(scheme));
- }
-
+ Scheme.Reset(new TScheme(scheme));
+ }
+
const TScheme& GetScheme() const override {
- return *Scheme;
- }
-
+ return *Scheme;
+ }
+
TString FinishTransaction(bool commit) override {
- if (commit) {
+ if (commit) {
TAutoPtr<TSchemeChanges> schemeDeltaRecs = SchemeChanges.Flush();
TSchemeModifier(*Scheme).Apply(*schemeDeltaRecs);
- // Apply scheme operations that affect existing data
- for (ui32 i = 0; i < schemeDeltaRecs->DeltaSize(); ++i) {
+ // Apply scheme operations that affect existing data
+ for (ui32 i = 0; i < schemeDeltaRecs->DeltaSize(); ++i) {
const TAlterRecord& rec = schemeDeltaRecs->GetDelta(i);
- switch (rec.GetDeltaType()) {
+ switch (rec.GetDeltaType()) {
case TAlterRecord::AddTable:
- Tables[rec.GetTableId()] = TFakeTable();
- break;
+ Tables[rec.GetTableId()] = TFakeTable();
+ break;
case TAlterRecord::DropTable:
- Tables.erase(rec.GetTableId());
- break;
+ Tables.erase(rec.GetTableId());
+ break;
case TAlterRecord::DropColumn:
- EraseColumnValues(rec.GetTableId(), rec.GetColumnId());
- break;
- default:
- break;
- }
- }
-
- // Apply data modifications
- for (auto& table : TxChanges) {
- ui32 rootId = table.first;
- for (auto& row : table.second) {
- TFakeVal& fv = Tables[rootId][row.first];
- // Replace the whole row with the updated
- fv.Swap(row.second);
- }
- }
- }
- TxChanges.clear();
+ EraseColumnValues(rec.GetTableId(), rec.GetColumnId());
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Apply data modifications
+ for (auto& table : TxChanges) {
+ ui32 rootId = table.first;
+ for (auto& row : table.second) {
+ TFakeVal& fv = Tables[rootId][row.first];
+ // Replace the whole row with the updated
+ fv.Swap(row.second);
+ }
+ }
+ }
+ TxChanges.clear();
SchemeChanges.Flush();
return TString();
- }
-
+ }
+
void Update(ui32 root, ERowOp rop, TRawVals key, TArrayRef<const TUpdateOp> ops) override
{
if (rop == ERowOp::Upsert) {
@@ -241,99 +241,99 @@ public:
void UpdateRow(ui32 root, TRawVals key, TArrayRef<const TUpdateOp> ops)
{
- TFakeKey fk;
+ TFakeKey fk;
FillKey(root, fk, key, true);
-
- // Copy previous value from the commited data
+
+ // Copy previous value from the commited data
if (!TxChanges[root].contains(fk) && Tables[root].contains(fk)) {
- TxChanges[root][fk] = Tables[root][fk];
- }
-
- // Apply changes
- TFakeVal& fv = TxChanges[root][fk];
- fv.IsDeleted = false;
+ TxChanges[root][fk] = Tables[root][fk];
+ }
+
+ // Apply changes
+ TFakeVal& fv = TxChanges[root][fk];
+ fv.IsDeleted = false;
for (auto &one: ops) {
if (one.Op == ECellOp::Null || !one.Value)
fv.Columns[one.Tag].Set({ });
- else
+ else
fv.Columns[one.Tag].Set(one.Value);
- }
- }
-
+ }
+ }
+
void EraseRow(ui32 root, TRawVals key) {
- TFakeKey fk;
+ TFakeKey fk;
FillKey(root, fk, key, true);
- TFakeVal fv;
- fv.IsDeleted = true;
- TxChanges[root][fk] = fv;
- }
-
+ TFakeVal fv;
+ fv.IsDeleted = true;
+ TxChanges[root][fk] = fv;
+ }
+
void Precharge(ui32 root,
TRawVals keyFrom, TRawVals keyTo,
TTagsRef tags, ui32 flags) override {
- Y_UNUSED(root);
- Y_UNUSED(keyFrom);
- Y_UNUSED(keyTo);
- Y_UNUSED(tags);
- Y_UNUSED(flags);
- }
-
+ Y_UNUSED(root);
+ Y_UNUSED(keyFrom);
+ Y_UNUSED(keyTo);
+ Y_UNUSED(tags);
+ Y_UNUSED(flags);
+ }
+
ITestIterator* Iterate(ui32 root, TRawVals key, TTagsRef tags, ELookup mode) noexcept override {
if (!key) {
return new TFakeDbIterator(GetScheme(), root, tags, Tables[root].begin(), Tables[root].end());
- }
-
- TFakeKey fk;
+ }
+
+ TFakeKey fk;
FillKey(root, fk, key, false);
if (mode == ELookup::ExactMatch) {
auto begin = Tables[root].find(fk);
decltype(begin) end = begin;
return new TFakeDbIterator(GetScheme(), root, tags, begin, end == Tables[root].end() ? end : ++end);
- } else {
+ } else {
auto begin = Tables[root].lower_bound(fk);
if (mode == ELookup::GreaterThan && begin != Tables[root].end()) {
- TFakeKeyComparator keyLess;
- // proceed to the next row if keys are equal
+ TFakeKeyComparator keyLess;
+ // proceed to the next row if keys are equal
if (!keyLess(fk, begin->first) && !keyLess(begin->first, fk))
++begin;
- }
+ }
return new TFakeDbIterator(GetScheme(), root, tags, begin, Tables[root].end());
- }
- }
-
+ }
+ }
+
void Apply(const TSchemeChanges &delta) override
{
SchemeChanges.Merge(delta);
}
-private:
+private:
void FillKey(ui32, TFakeKey& fk, TRawVals key, bool) const
{
fk.Columns.resize(key.size());
for (ui32 on = 0; on < fk.Columns.size(); on++) {
fk.Columns[on].Set(key[on]);
- }
- }
-
- void EraseColumnValues(ui32 tableId, ui32 colId) {
- for (auto& row : Tables[tableId]) {
- row.second.Columns.erase(colId);
- }
- }
-
-private:
+ }
+ }
+
+ void EraseColumnValues(ui32 tableId, ui32 colId) {
+ for (auto& row : Tables[tableId]) {
+ row.second.Columns.erase(colId);
+ }
+ }
+
+private:
THashMap<ui32, TFakeTable> Tables;
THashMap<ui32, TFakeTable> TxChanges;
TAlter SchemeChanges;
-};
-
-
-TAutoPtr<ITestDb> CreateFakeDb() {
- return new TFakeDb();
-}
-
+};
+
+
+TAutoPtr<ITestDb> CreateFakeDb() {
+ return new TFakeDb();
+}
+
} // namspace NTable
-} // namespace NKikimr
+} // namespace NKikimr
diff --git a/ydb/core/tablet_flat/ut/flat_test_db.h b/ydb/core/tablet_flat/ut/flat_test_db.h
index 69b15e3635b..ed0af81c68a 100644
--- a/ydb/core/tablet_flat/ut/flat_test_db.h
+++ b/ydb/core/tablet_flat/ut/flat_test_db.h
@@ -1,316 +1,316 @@
-#pragma once
-
+#pragma once
+
#include <ydb/core/tablet_flat/test/libs/table/test_pretty.h>
#include <ydb/core/tablet_flat/flat_database.h>
#include <ydb/core/tablet_flat/flat_dbase_scheme.h>
#include <ydb/core/tablet_flat/flat_update_op.h>
-
+
#include <library/cpp/testing/unittest/registar.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NTable {
-
-// Unified interface for iterator
-class ITestIterator {
-public:
- virtual ~ITestIterator() {}
-
+
+// Unified interface for iterator
+class ITestIterator {
+public:
+ virtual ~ITestIterator() {}
+
virtual EReady Next(ENext mode) = 0;
- virtual bool IsValid() = 0;
- virtual bool IsRowDeleted() = 0;
- virtual TDbTupleRef GetKey() = 0;
- virtual TDbTupleRef GetValues() = 0;
- virtual TCell GetValue(ui32 idx) = 0;
-};
-
-// Unified interface for real DB implementation and simple reference implementation
-class ITestDb {
-public:
+ virtual bool IsValid() = 0;
+ virtual bool IsRowDeleted() = 0;
+ virtual TDbTupleRef GetKey() = 0;
+ virtual TDbTupleRef GetValues() = 0;
+ virtual TCell GetValue(ui32 idx) = 0;
+};
+
+// Unified interface for real DB implementation and simple reference implementation
+class ITestDb {
+public:
using ECodec = NPage::ECodec;
using ECache = NPage::ECache;
- virtual ~ITestDb() {}
-
- virtual void Init(const TScheme& scheme) = 0;
- virtual const TScheme& GetScheme() const = 0;
+ virtual ~ITestDb() {}
+
+ virtual void Init(const TScheme& scheme) = 0;
+ virtual const TScheme& GetScheme() const = 0;
virtual TString FinishTransaction(bool commit) = 0;
virtual void Update(ui32 root, ERowOp, TRawVals key, TArrayRef<const TUpdateOp> ops) = 0;
-
- virtual void Precharge(ui32 root,
+
+ virtual void Precharge(ui32 root,
TRawVals keyFrom, TRawVals keyTo,
TTagsRef tags, ui32 flags) = 0;
virtual ITestIterator* Iterate(ui32 root, TRawVals key, TTagsRef tags, ELookup) = 0;
virtual void Apply(const TSchemeChanges&) = 0;
-};
-
-// Means that iterator needs pages to be loaded
-class TIteratorNotReady : public yexception {
-public:
- TIteratorNotReady() : yexception() {}
-};
-
-class TFlatDbIterator : public ITestIterator {
+};
+
+// Means that iterator needs pages to be loaded
+class TIteratorNotReady : public yexception {
+public:
+ TIteratorNotReady() : yexception() {}
+};
+
+class TFlatDbIterator : public ITestIterator {
TAutoPtr<TTableIt> Iter;
-
-public:
+
+public:
explicit TFlatDbIterator(TAutoPtr<TTableIt> iter)
- : Iter(iter)
- {}
-
+ : Iter(iter)
+ {}
+
EReady Next(ENext mode) override
{
const auto ready = Iter->Next(mode);
if (ready == EReady::Page)
- throw TIteratorNotReady();
+ throw TIteratorNotReady();
return ready;
- }
-
+ }
+
bool IsValid() override {
return Iter->Last() == EReady::Data;
- }
-
+ }
+
bool IsRowDeleted() override {
return Iter->Row().GetRowState() == ERowOp::Erase;
- }
-
+ }
+
TDbTupleRef GetKey() override {
- return Iter->GetKey();
- }
-
+ return Iter->GetKey();
+ }
+
TDbTupleRef GetValues() override {
- return Iter->GetValues();
- }
-
+ return Iter->GetValues();
+ }
+
TCell GetValue(ui32 idx) override {
return Iter->Row().Get(idx);
- }
-};
-
-class TFlatDbWrapper : public ITestDb {
-private:
- TDatabase* Db;
-
-public:
-
- TFlatDbWrapper()
- : Db(nullptr)
- {}
-
- void SetDb(TDatabase* db) {
- Db = db;
- }
-
- const TDatabase* GetDb() const {
- return Db;
- }
-
+ }
+};
+
+class TFlatDbWrapper : public ITestDb {
+private:
+ TDatabase* Db;
+
+public:
+
+ TFlatDbWrapper()
+ : Db(nullptr)
+ {}
+
+ void SetDb(TDatabase* db) {
+ Db = db;
+ }
+
+ const TDatabase* GetDb() const {
+ return Db;
+ }
+
void Init(const TScheme& scheme) override {
Y_UNUSED(scheme);
Y_VERIFY("Not supported by flat db wrapper");
- }
-
+ }
+
const TScheme& GetScheme() const override {
return Db->GetScheme();
- }
-
+ }
+
TString FinishTransaction(bool commit) override {
Y_UNUSED(commit);
Y_VERIFY("Not supported by flat db wrapper");
- return "42";
- }
-
+ return "42";
+ }
+
void Update(ui32 root, ERowOp rop, TRawVals key, TArrayRef<const TUpdateOp> ops) override
{
Db->Update(root, rop, key, ops);
- }
-
+ }
+
void Precharge(ui32 root,
TRawVals keyFrom, TRawVals keyTo,
TTagsRef tags, ui32 flags) override {
bool res = Db->Precharge(root, keyFrom, keyTo, tags, flags, -1, -1);
- if (!res)
- throw TIteratorNotReady();
- }
-
+ if (!res)
+ throw TIteratorNotReady();
+ }
+
ITestIterator* Iterate(ui32 root, TRawVals key, TTagsRef tags, ELookup mode) override {
if (auto res = Db->Iterate(root, key, tags, mode))
return new TFlatDbIterator(res);
throw TIteratorNotReady();
- }
-
+ }
+
void Apply(const TSchemeChanges &delta) override
{
Db->Alter().Merge(delta);
}
-};
-
-
-// Create a simple refenrce DB implementation wrapped in ITestDB interface
-TAutoPtr<ITestDb> CreateFakeDb();
-
-
-// Iterator for TDbPair
-class TIteratorPair : public ITestIterator {
- TAutoPtr<ITestIterator> It1;
- TAutoPtr<ITestIterator> It2;
-
-public:
- TIteratorPair(TAutoPtr<ITestIterator> it1, TAutoPtr<ITestIterator> it2)
- : It1(it1)
- , It2(it2)
- {}
-
+};
+
+
+// Create a simple refenrce DB implementation wrapped in ITestDB interface
+TAutoPtr<ITestDb> CreateFakeDb();
+
+
+// Iterator for TDbPair
+class TIteratorPair : public ITestIterator {
+ TAutoPtr<ITestIterator> It1;
+ TAutoPtr<ITestIterator> It2;
+
+public:
+ TIteratorPair(TAutoPtr<ITestIterator> it1, TAutoPtr<ITestIterator> it2)
+ : It1(it1)
+ , It2(it2)
+ {}
+
EReady Next(ENext mode) override
{
const auto one = It1->Next(mode);
const auto two = It2->Next(mode);
UNIT_ASSERT(one == two);
return one;
- }
-
+ }
+
bool IsValid() override {
- return It1->IsValid();
- }
-
+ return It1->IsValid();
+ }
+
bool IsRowDeleted() override {
- return It2->IsRowDeleted();
- }
-
+ return It2->IsRowDeleted();
+ }
+
TDbTupleRef GetKey() override {
- TDbTupleRef keyTuple1 = It1->GetKey();
- TDbTupleRef keyTuple2 = It2->GetKey();
- VerifyEqualTuples(keyTuple1, keyTuple2);
- return keyTuple1;
- }
-
+ TDbTupleRef keyTuple1 = It1->GetKey();
+ TDbTupleRef keyTuple2 = It2->GetKey();
+ VerifyEqualTuples(keyTuple1, keyTuple2);
+ return keyTuple1;
+ }
+
TDbTupleRef GetValues() override {
- TDbTupleRef tuple1 = It1->GetValues();
- TDbTupleRef tuple2 = It2->GetValues();
- VerifyEqualTuples(tuple1, tuple2);
- return tuple1;
- }
-
+ TDbTupleRef tuple1 = It1->GetValues();
+ TDbTupleRef tuple2 = It2->GetValues();
+ VerifyEqualTuples(tuple1, tuple2);
+ return tuple1;
+ }
+
TCell GetValue(ui32 idx) override {
- TCell c1 = It1->GetValue(idx);
- TCell c2 = It2->GetValue(idx);
- UNIT_ASSERT(c1.IsNull() == c2.IsNull());
- if (!c1.IsNull()) {
- UNIT_ASSERT(c1.Size() == c2.Size());
- UNIT_ASSERT(0 == memcmp(c1.Data(), c2.Data(), c1.Size()));
- }
- return c1;
- }
-
-private:
- static void VerifyEqualTuples(const TDbTupleRef& a, const TDbTupleRef& b) {
+ TCell c1 = It1->GetValue(idx);
+ TCell c2 = It2->GetValue(idx);
+ UNIT_ASSERT(c1.IsNull() == c2.IsNull());
+ if (!c1.IsNull()) {
+ UNIT_ASSERT(c1.Size() == c2.Size());
+ UNIT_ASSERT(0 == memcmp(c1.Data(), c2.Data(), c1.Size()));
+ }
+ return c1;
+ }
+
+private:
+ static void VerifyEqualTuples(const TDbTupleRef& a, const TDbTupleRef& b) {
TString aStr = PrintRow(a);
TString bStr = PrintRow(b);
- UNIT_ASSERT_NO_DIFF(aStr, bStr);
- }
-};
-
-// Applies operations to 2 DBs and compares results
-class TDbPair : public ITestDb {
-private:
- ITestDb* Db1;
- ITestDb* Db2;
-
-public:
-
- TDbPair(ITestDb& db1, ITestDb& db2)
- : Db1(&db1)
- , Db2(&db2)
- {}
-
+ UNIT_ASSERT_NO_DIFF(aStr, bStr);
+ }
+};
+
+// Applies operations to 2 DBs and compares results
+class TDbPair : public ITestDb {
+private:
+ ITestDb* Db1;
+ ITestDb* Db2;
+
+public:
+
+ TDbPair(ITestDb& db1, ITestDb& db2)
+ : Db1(&db1)
+ , Db2(&db2)
+ {}
+
void Init(const TScheme& scheme) override {
- Db1->Init(scheme);
- Db2->Init(scheme);
- }
-
+ Db1->Init(scheme);
+ Db2->Init(scheme);
+ }
+
const TScheme& GetScheme() const override {
- return Db1->GetScheme();
- }
-
- const ITestDb* GetDb1() const {
- return Db1;
- }
-
- const ITestDb* GetDb2() const {
- return Db2;
- }
-
+ return Db1->GetScheme();
+ }
+
+ const ITestDb* GetDb1() const {
+ return Db1;
+ }
+
+ const ITestDb* GetDb2() const {
+ return Db2;
+ }
+
TString FinishTransaction(bool commit) override {
- auto res = Db1->FinishTransaction(commit);
- Db2->FinishTransaction(commit);
- return res;
- }
-
+ auto res = Db1->FinishTransaction(commit);
+ Db2->FinishTransaction(commit);
+ return res;
+ }
+
void Update(ui32 root, ERowOp rop, TRawVals key, TArrayRef<const TUpdateOp> ops) override {
Db1->Update(root, rop, key, ops);
Db2->Update(root, rop, key, ops);
- }
-
+ }
+
void Precharge(ui32 root,
TRawVals keyFrom, TRawVals keyTo,
TTagsRef tags, ui32 flags) override {
Db1->Precharge(root, keyFrom, keyTo, tags, flags);
Db2->Precharge(root, keyFrom, keyTo, tags, flags);
- }
-
+ }
+
ITestIterator* Iterate(ui32 root, TRawVals key, TTagsRef tags, ELookup mode) override {
- return new TIteratorPair(
+ return new TIteratorPair(
Db1->Iterate(root, key, tags, mode),
Db2->Iterate(root, key, tags, mode));
- }
-
+ }
+
void Apply(const TSchemeChanges &delta) override
{
Db1->Apply(delta);
Db2->Apply(delta);
}
- bool CompareDBs() {
- bool res = true;
- for (auto& ti : Db1->GetScheme().Tables) {
- bool tabRes = CompareTables(ti.first);
- if (!tabRes) {
- Cerr << "Tables " << ti.first << " are different!" << Endl;
- res = false;
- }
- }
- return res;
- }
-
-private:
- bool CompareTables(ui32 root) {
+ bool CompareDBs() {
+ bool res = true;
+ for (auto& ti : Db1->GetScheme().Tables) {
+ bool tabRes = CompareTables(ti.first);
+ if (!tabRes) {
+ Cerr << "Tables " << ti.first << " are different!" << Endl;
+ res = false;
+ }
+ }
+ return res;
+ }
+
+private:
+ bool CompareTables(ui32 root) {
TVector<ui32> valTags;
- for (const auto& ci : Db1->GetScheme().GetTableInfo(root)->Columns) {
- valTags.push_back(ci.first);
- }
-
+ for (const auto& ci : Db1->GetScheme().GetTableInfo(root)->Columns) {
+ valTags.push_back(ci.first);
+ }
+
TAutoPtr<ITestIterator> it1 = Db1->Iterate(root, {}, valTags, ELookup::GreaterOrEqualThan);
TAutoPtr<ITestIterator> it2 = Db2->Iterate(root, {}, valTags, ELookup::GreaterOrEqualThan);
-
+
ui64 errors = 0;
int cmp = 0;
EReady rdy1 = EReady::Gone;
EReady rdy2 = EReady::Gone;
- while (true) {
+ while (true) {
TDbTupleRef key1, key2, val1, val2;
-
+
if ((rdy1 = cmp <= 0 ? it1->Next(ENext::Data) : rdy1) == EReady::Data) {
- key1 = it1->GetKey();
- val1 = it1->GetValues();
- }
-
+ key1 = it1->GetKey();
+ val1 = it1->GetValues();
+ }
+
if ((rdy2 = cmp >= 0 ? it2->Next(ENext::Data) : rdy2) == EReady::Data) {
- key2 = it2->GetKey();
- val2 = it2->GetValues();
- }
-
+ key2 = it2->GetKey();
+ val2 = it2->GetValues();
+ }
+
if (rdy1 == rdy2 && rdy2 == EReady::Gone) {
break;
} else if (rdy1 == EReady::Data && rdy2 != EReady::Data) {
@@ -320,9 +320,9 @@ private:
} else {
cmp = CmpTupleRefs(key1, key2);
}
-
+
errors += (cmp != 0) ? 1 : 0;
-
+
if (cmp > 0) {
Cerr << "Missing row in DB #1:" << Endl
<< PrintRow(key2) << " : "<< PrintRow(val2) << Endl << Endl;
@@ -334,11 +334,11 @@ private:
Cerr << "Different values for key " << PrintRow(key1) << Endl;
Cerr << PrintRow(val1) << Endl
<< PrintRow(val2) << Endl << Endl;
- }
- }
+ }
+ }
return errors == 0;
- }
+ }
static int CmpTupleRefs(const TDbTupleRef &one, const TDbTupleRef &two) {
const auto num = one.ColumnCount;
@@ -351,7 +351,7 @@ private:
return CompareTypedCellVectors(one.Columns, two.Columns, one.Types, num);
}
}
-};
-
-} // namspace NTabletFlatExecutor
-} // namespace NKikimr
+};
+
+} // namspace NTabletFlatExecutor
+} // namespace NKikimr
diff --git a/ydb/core/tablet_flat/ut/flat_test_db_helpers.h b/ydb/core/tablet_flat/ut/flat_test_db_helpers.h
index 83d2bc73bd1..ffc9a36daac 100644
--- a/ydb/core/tablet_flat/ut/flat_test_db_helpers.h
+++ b/ydb/core/tablet_flat/ut/flat_test_db_helpers.h
@@ -1,44 +1,44 @@
-#pragma once
-
-#include "flat_test_db.h"
-
+#pragma once
+
+#include "flat_test_db.h"
+
#include <ydb/core/tablet_flat/flat_dbase_apply.h>
#include <ydb/core/tablet_flat/flat_row_scheme.h>
#include <ydb/core/tablet_flat/flat_mem_warm.h>
#include <ydb/core/scheme/scheme_type_id.h>
-
+
#include <library/cpp/testing/unittest/registar.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NTable {
-
-struct TFakeTableCell {
-private:
+
+struct TFakeTableCell {
+private:
ECellOp Op = ECellOp::Set;
- TRawTypeValue Val;
+ TRawTypeValue Val;
TString Buf;
-public:
- TFakeTableCell() {}
-
- TFakeTableCell(const TFakeTableCell& other) {
- Set(other.Get());
- }
-
- const TFakeTableCell& operator = (const TFakeTableCell& other) {
- if (this != &other)
- Set(other.Get());
- return *this;
- }
-
- void Set(const TRawTypeValue& v) {
- if (!v.IsEmpty()) {
+public:
+ TFakeTableCell() {}
+
+ TFakeTableCell(const TFakeTableCell& other) {
+ Set(other.Get());
+ }
+
+ const TFakeTableCell& operator = (const TFakeTableCell& other) {
+ if (this != &other)
+ Set(other.Get());
+ return *this;
+ }
+
+ void Set(const TRawTypeValue& v) {
+ if (!v.IsEmpty()) {
Buf = TString((const char*)v.Data(), v.Size());
Val = TRawTypeValue(Buf.data(), Buf.size(), v.Type());
- } else {
- Val = TRawTypeValue();
- }
- }
-
+ } else {
+ Val = TRawTypeValue();
+ }
+ }
+
void SetOp(ECellOp op) {
Op = op;
}
@@ -48,34 +48,34 @@ public:
return TCell((const char*)Val.Data(), Val.Size());
}
- const TRawTypeValue& Get() const {
- return Val;
- }
+ const TRawTypeValue& Get() const {
+ return Val;
+ }
ECellOp GetOp() const {
return Op;
}
-};
-
-// TODO: properly support all types
-inline TFakeTableCell FromVal(NScheme::TTypeId t, i64 val) {
- TFakeTableCell c;
- ui32 sz = sizeof(val);
- switch (t) {
+};
+
+// TODO: properly support all types
+inline TFakeTableCell FromVal(NScheme::TTypeId t, i64 val) {
+ TFakeTableCell c;
+ ui32 sz = sizeof(val);
+ switch (t) {
case NScheme::NTypeIds::Byte:
case NScheme::NTypeIds::Bool:
- sz = 1;
- break;
+ sz = 1;
+ break;
case NScheme::NTypeIds::Int32:
case NScheme::NTypeIds::Uint32:
- sz = 4;
- break;
- }
-
- c.Set(TRawTypeValue(&val, sz, t));
- return c;
-}
-
+ sz = 4;
+ break;
+ }
+
+ c.Set(TRawTypeValue(&val, sz, t));
+ return c;
+}
+
inline TFakeTableCell MakeNull(ECellOp op) {
TFakeTableCell c;
c.SetOp(op);
@@ -87,70 +87,70 @@ inline TFakeTableCell FromVal(NScheme::TTypeId, std::nullptr_t) {
}
inline TFakeTableCell FromVal(NScheme::TTypeId t, TString val) {
- TFakeTableCell c;
+ TFakeTableCell c;
c.Set(TRawTypeValue(val.data(), val.size(), t));
- return c;
-}
-
-inline TFakeTableCell FromVal(NScheme::TTypeId t, const char* v) {
+ return c;
+}
+
+inline TFakeTableCell FromVal(NScheme::TTypeId t, const char* v) {
return FromVal(t, TString(v));
-}
-
-// Store table id and row key for an update operation
-class TDbRowOpBase {
-protected:
- const TScheme& Scheme;
-private:
- ui32 Root;
+}
+
+// Store table id and row key for an update operation
+class TDbRowOpBase {
+protected:
+ const TScheme& Scheme;
+private:
+ ui32 Root;
TVector<TFakeTableCell> KeyCells;
-public:
- TDbRowOpBase(const TScheme& scheme, ui32 root)
- : Scheme(scheme)
- , Root(root)
- {}
-
- template <typename... Tt>
- TDbRowOpBase& Key(Tt... tt) {
- AppendKeyColumn(Root, Scheme, KeyCells, tt...);
- return *this;
- }
-
- ui32 GetRoot() const {
- return Root;
- }
-
+public:
+ TDbRowOpBase(const TScheme& scheme, ui32 root)
+ : Scheme(scheme)
+ , Root(root)
+ {}
+
+ template <typename... Tt>
+ TDbRowOpBase& Key(Tt... tt) {
+ AppendKeyColumn(Root, Scheme, KeyCells, tt...);
+ return *this;
+ }
+
+ ui32 GetRoot() const {
+ return Root;
+ }
+
const TVector<TFakeTableCell>& GetKey() const {
- return KeyCells;
- }
-};
-
-// Accumulates row key and and a set of tag updates operations
-class TDbRowUpdate : public TDbRowOpBase {
+ return KeyCells;
+ }
+};
+
+// Accumulates row key and and a set of tag updates operations
+class TDbRowUpdate : public TDbRowOpBase {
TMap<ui32, TFakeTableCell> TagOps;
-public:
- TDbRowUpdate(const TScheme& scheme, ui32 root)
- : TDbRowOpBase(scheme, root)
- {}
-
- template <typename... Tt>
- TDbRowUpdate& Key(Tt... tt) {
- TDbRowOpBase::Key(tt...);
- return *this;
- }
-
- template<typename T>
+public:
+ TDbRowUpdate(const TScheme& scheme, ui32 root)
+ : TDbRowOpBase(scheme, root)
+ {}
+
+ template <typename... Tt>
+ TDbRowUpdate& Key(Tt... tt) {
+ TDbRowOpBase::Key(tt...);
+ return *this;
+ }
+
+ template<typename T>
TDbRowUpdate& Set(TString tagName, const T& val) {
- const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(GetRoot());
+ const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(GetRoot());
Y_VERIFY(tableInfo, "Unknown table id %u", GetRoot());
- const ui32* tagId = tableInfo->ColumnNames.FindPtr(tagName);
+ const ui32* tagId = tableInfo->ColumnNames.FindPtr(tagName);
Y_VERIFY(tagId, "Unknown column \"%s\" in table %u", tagName.data(), GetRoot());
const auto *colInfo = Scheme.GetColumnInfo(GetRoot(), *tagId);
Y_VERIFY(colInfo, "Column info not found for table id %u, column id %u", GetRoot(), *tagId);
- NScheme::TTypeId type = colInfo->PType;
- TagOps[*tagId] = FromVal(type, val);
- return *this;
- }
-
+ NScheme::TTypeId type = colInfo->PType;
+ TagOps[*tagId] = FromVal(type, val);
+ return *this;
+ }
+
TDbRowUpdate& Erase(TString tagName) {
const TScheme::TTableInfo* tableInfo = Scheme.GetTableInfo(GetRoot());
Y_VERIFY(tableInfo, "Unknown table id %u", GetRoot());
@@ -163,96 +163,96 @@ public:
}
const TMap<ui32, TFakeTableCell>& GetTagOps() const {
- return TagOps;
- }
-};
-
-typedef TDbRowOpBase TDbRowErase;
-
-
-template <typename... Tt>
+ return TagOps;
+ }
+};
+
+typedef TDbRowOpBase TDbRowErase;
+
+
+template <typename... Tt>
void AppendKeyColumn(ui32 root, const TScheme& scheme, TVector<TFakeTableCell>& tuple) {
- // Extend the key according to scheme
- tuple.resize(scheme.GetTableInfo(root)->KeyColumns.size());
-}
-
-template <typename T, typename... Tt>
+ // Extend the key according to scheme
+ tuple.resize(scheme.GetTableInfo(root)->KeyColumns.size());
+}
+
+template <typename T, typename... Tt>
void AppendKeyColumn(ui32 root, const TScheme& scheme, TVector<TFakeTableCell>& tuple, T t, Tt... tt) {
- ui32 pos = tuple.size();
+ ui32 pos = tuple.size();
ui32 tag = scheme.GetTableInfo(root)->KeyColumns[pos];
NScheme::TTypeId type = scheme.GetColumnInfo(root, tag)->PType;
- tuple.push_back(FromVal(type, t));
- AppendKeyColumn(root, scheme, tuple, tt...);
-}
-
+ tuple.push_back(FromVal(type, t));
+ AppendKeyColumn(root, scheme, tuple, tt...);
+}
+
template <typename... Tt>
void AppendKeyColumn(ui32 root, const TScheme& scheme, TVector<TFakeTableCell>& tuple, nullptr_t, Tt... tt) {
tuple.push_back(MakeNull(ECellOp::Set));
AppendKeyColumn(root, scheme, tuple, tt...);
}
-// Helps to simplify test code that deals with ITestDb
-class TDbWrapper {
- ITestDb& Db;
+// Helps to simplify test code that deals with ITestDb
+class TDbWrapper {
+ ITestDb& Db;
TScheme Scheme;
-
-public:
+
+public:
using ECodec = NPage::ECodec;
using ECache = NPage::ECache;
- explicit TDbWrapper(ITestDb& db)
- : Db(db)
+ explicit TDbWrapper(ITestDb& db)
+ : Db(db)
, Scheme(Db.GetScheme())
- {}
-
+ {}
+
ITestDb* operator->() const { return &Db; }
- TDbRowUpdate Update(ui32 root) {
+ TDbRowUpdate Update(ui32 root) {
return TDbRowUpdate(Scheme, root);
- }
-
- TDbRowErase Erase(ui32 root) {
+ }
+
+ TDbRowErase Erase(ui32 root) {
return TDbRowErase(Scheme, root);
- }
-
- void Apply(const TDbRowUpdate& update) {
+ }
+
+ void Apply(const TDbRowUpdate& update) {
TVector<TRawTypeValue> key;
Y_VERIFY(!update.GetKey().empty());
- for (const auto& col : update.GetKey()) {
- key.push_back(col.Get());
- }
-
+ for (const auto& col : update.GetKey()) {
+ key.push_back(col.Get());
+ }
+
TVector<TUpdateOp> ops;
- for (const auto& op : update.GetTagOps()) {
+ for (const auto& op : update.GetTagOps()) {
ops.push_back(TUpdateOp(op.first, op.second.GetOp(), op.second.Get()));
- }
-
+ }
+
Db.Update(update.GetRoot(), ERowOp::Upsert, key, ops);
- }
-
- void Apply(const TDbRowErase& erase) {
+ }
+
+ void Apply(const TDbRowErase& erase) {
TVector<TRawTypeValue> key;
Y_VERIFY(!erase.GetKey().empty());
- for (const auto& col : erase.GetKey()) {
- key.push_back(col.Get());
- }
-
+ for (const auto& col : erase.GetKey()) {
+ key.push_back(col.Get());
+ }
+
Db.Update(erase.GetRoot(), ERowOp::Erase, key, { });
- }
-
+ }
+
void Apply(const TSchemeChanges &delta)
{
Db.Apply(delta);
TSchemeModifier(Scheme).Apply(delta);
}
- void FinishTransaction(bool commit) {
- Db.FinishTransaction(commit);
+ void FinishTransaction(bool commit) {
+ Db.FinishTransaction(commit);
Scheme = Db.GetScheme();
- }
-
-};
-
-} // namspace NTabletFlatExecutor
-} // namespace NKikimr
-
+ }
+
+};
+
+} // namspace NTabletFlatExecutor
+} // namespace NKikimr
+
diff --git a/ydb/core/tablet_flat/ut/ut_db_scheme.cpp b/ydb/core/tablet_flat/ut/ut_db_scheme.cpp
index 650f5ae21b0..ee12efa9032 100644
--- a/ydb/core/tablet_flat/ut/ut_db_scheme.cpp
+++ b/ydb/core/tablet_flat/ut/ut_db_scheme.cpp
@@ -166,7 +166,7 @@ Y_UNIT_TEST_SUITE(TScheme) {
Y_UNIT_TEST(Policy)
{
auto delta = TModel().Build();
-
+
TIntrusivePtr<TCompactionPolicy> policy = new TCompactionPolicy();
policy->InMemSizeToSnapshot = 1234;
policy->InMemStepsToSnapshot = 100;
@@ -177,17 +177,17 @@ Y_UNIT_TEST_SUITE(TScheme) {
policy->ReadAheadLoThreshold = 50000;
policy->Generations.push_back({150, 3, 4, 250, NLocalDb::LegacyQueueIdToTaskName(1), true});
policy->Generations.push_back({550, 7, 8, 950, NLocalDb::LegacyQueueIdToTaskName(2), false});
-
+
delta.SetCompactionPolicy(TModel::TableId, *policy);
TAutoPtr<TScheme> scheme = new TScheme();
TSchemeModifier(*scheme).Apply(*delta.Flush());
-
- auto snapshot = scheme->GetSnapshot();
-
- TAutoPtr<TScheme> scheme2 = new TScheme();
+
+ auto snapshot = scheme->GetSnapshot();
+
+ TAutoPtr<TScheme> scheme2 = new TScheme();
TSchemeModifier(*scheme2).Apply(*snapshot);
-
+
if (auto &policy = scheme2->GetTableInfo(TModel::TableId)->CompactionPolicy) {
UNIT_ASSERT_VALUES_EQUAL(policy->InMemSizeToSnapshot, 1234);
UNIT_ASSERT_VALUES_EQUAL(policy->InMemStepsToSnapshot, 100);
@@ -203,7 +203,7 @@ Y_UNIT_TEST_SUITE(TScheme) {
} else {
UNIT_ASSERT(false);
}
- }
+ }
}
}
diff --git a/ydb/core/tablet_flat/ut/ya.make b/ydb/core/tablet_flat/ut/ya.make
index ed7588a97c5..fd66d042026 100644
--- a/ydb/core/tablet_flat/ut/ya.make
+++ b/ydb/core/tablet_flat/ut/ya.make
@@ -1,5 +1,5 @@
UNITTEST_FOR(ydb/core/tablet_flat)
-
+
FORK_SUBTESTS()
IF (WITH_VALGRIND)
@@ -12,23 +12,23 @@ ELSE()
ENDIF()
OWNER(g:kikimr)
-
-SRCS(
+
+SRCS(
datetime_ut.cpp
decimal_ut.cpp
flat_cxx_database_ut.cpp
ut_db_iface.cpp
ut_db_scheme.cpp
flat_executor_ut.cpp
- flat_executor_database_ut.cpp
- flat_executor_gclogic_ut.cpp
+ flat_executor_database_ut.cpp
+ flat_executor_gclogic_ut.cpp
flat_range_cache_ut.cpp
flat_row_versions_ut.cpp
flat_sausagecache_ut.cpp
flat_table_part_ut.cpp
- flat_test_db.h
- flat_test_db.cpp
- flat_test_db_helpers.h
+ flat_test_db.h
+ flat_test_db.cpp
+ flat_test_db_helpers.h
shared_handle_ut.cpp
ut_self.cpp
ut_iterator.cpp
@@ -52,19 +52,19 @@ SRCS(
ut_slice.cpp
ut_slice_loader.cpp
ut_versions.cpp
-)
-
+)
+
RESOURCE(
../test/data/002_full_part.pages abi/002_full_part.pages
../test/data/008_basics_db.redo abi/008_basics_db.redo
)
-PEERDIR(
+PEERDIR(
library/cpp/resource
ydb/core/scheme
ydb/core/tablet_flat/test/libs/exec
ydb/core/tablet_flat/test/libs/table
ydb/library/yql/public/udf/service/exception_policy
-)
-
-END()
+)
+
+END()
diff --git a/ydb/core/tablet_flat/ya.make b/ydb/core/tablet_flat/ya.make
index 250a3a41feb..6b1226bf01c 100644
--- a/ydb/core/tablet_flat/ya.make
+++ b/ydb/core/tablet_flat/ya.make
@@ -27,8 +27,8 @@ SRCS(
flat_executor_bootlogic.h
flat_executor_borrowlogic.cpp
flat_executor_borrowlogic.h
- flat_executor_compaction_logic.cpp
- flat_executor_compaction_logic.h
+ flat_executor_compaction_logic.cpp
+ flat_executor_compaction_logic.h
flat_executor_counters.cpp
flat_executor_counters.h
flat_executor_db_mon.cpp
@@ -54,12 +54,12 @@ SRCS(
flat_range_cache.cpp
flat_row_versions.cpp
flat_stat_part.cpp
- flat_stat_part.h
- flat_stat_table.h
- flat_stat_table.cpp
+ flat_stat_part.h
+ flat_stat_table.h
+ flat_stat_table.cpp
flat_store_hotdog.cpp
flat_table.cpp
- flat_table.h
+ flat_table.h
flat_table_part.cpp
flat_table_part.h
flat_table_misc.cpp
diff --git a/ydb/core/testlib/actors/test_runtime.cpp b/ydb/core/testlib/actors/test_runtime.cpp
index 8b4ff3d0aa1..63f0436bff7 100644
--- a/ydb/core/testlib/actors/test_runtime.cpp
+++ b/ydb/core/testlib/actors/test_runtime.cpp
@@ -97,8 +97,8 @@ namespace NActors {
MonPorts.clear();
for (ui32 nodeIndex = 0; nodeIndex < NodeCount; ++nodeIndex) {
- ui32 nodeId = FirstNodeId + nodeIndex;
- auto* node = GetNodeById(nodeId);
+ ui32 nodeId = FirstNodeId + nodeIndex;
+ auto* node = GetNodeById(nodeId);
const auto* app0 = App0.Get();
if (!SingleSysEnv) {
const TIntrusivePtr<NMonitoring::TDynamicCounters> profilerCounters = NKikimr::GetServiceCounters(node->DynamicCounters, "utils");
@@ -116,7 +116,7 @@ namespace NActors {
node->AppData0.reset(new NKikimr::TAppData(0, 1, 2, 3, { }, app0->TypeRegistry, app0->FunctionRegistry, app0->FormatFactory, nullptr));
node->ActorSystem = MakeActorSystem(nodeIndex, node);
}
- node->LogSettings->MessagePrefix = " node " + ToString(nodeId);
+ node->LogSettings->MessagePrefix = " node " + ToString(nodeId);
auto* nodeAppData = node->GetAppData<NKikimr::TAppData>();
nodeAppData->DataShardExportFactory = app0->DataShardExportFactory;
diff --git a/ydb/core/testlib/actors/test_runtime_ut.cpp b/ydb/core/testlib/actors/test_runtime_ut.cpp
index db555584e95..fce7f192934 100644
--- a/ydb/core/testlib/actors/test_runtime_ut.cpp
+++ b/ydb/core/testlib/actors/test_runtime_ut.cpp
@@ -12,7 +12,7 @@ Y_UNIT_TEST_SUITE(TActorTest) {
TTestActorRuntime::TEgg MakeEgg()
{
return
- { new TAppData(0, 0, 0, 0, { }, nullptr, nullptr, nullptr, nullptr), nullptr, nullptr };
+ { new TAppData(0, 0, 0, 0, { }, nullptr, nullptr, nullptr, nullptr), nullptr, nullptr };
}
Y_UNIT_TEST(TestHandleEvent) {
diff --git a/ydb/core/testlib/basics/appdata.h b/ydb/core/testlib/basics/appdata.h
index 3dbb6d934cf..409869161f5 100644
--- a/ydb/core/testlib/basics/appdata.h
+++ b/ydb/core/testlib/basics/appdata.h
@@ -38,7 +38,7 @@ namespace NKikimr {
struct TMine : public NActors::IDestructable {
TIntrusivePtr<NScheme::TTypeRegistry> Types;
TIntrusivePtr<NMiniKQL::IFunctionRegistry> Funcs;
- TIntrusivePtr<TFormatFactory> Formats;
+ TIntrusivePtr<TFormatFactory> Formats;
std::shared_ptr<NDataShard::IExportFactory> DataShardExportFactory;
std::shared_ptr<NPDisk::IIoContextFactory> IoContext;
diff --git a/ydb/core/testlib/basics/services.cpp b/ydb/core/testlib/basics/services.cpp
index ab58d982754..b4585ad96c6 100644
--- a/ydb/core/testlib/basics/services.cpp
+++ b/ydb/core/testlib/basics/services.cpp
@@ -140,16 +140,16 @@ namespace NPDisk {
nodeIndex);
}
- void SetupBlobCache(TTestActorRuntime& runtime, ui32 nodeIndex)
- {
+ void SetupBlobCache(TTestActorRuntime& runtime, ui32 nodeIndex)
+ {
runtime.AddLocalService(NBlobCache::MakeBlobCacheServiceId(),
- TActorSetupCmd(
+ TActorSetupCmd(
NBlobCache::CreateBlobCache(20<<20, runtime.GetDynamicCounters(nodeIndex)),
- TMailboxType::ReadAsFilled,
- 0),
- nodeIndex);
- }
-
+ TMailboxType::ReadAsFilled,
+ 0),
+ nodeIndex);
+ }
+
template<size_t N>
static TIntrusivePtr<TStateStorageInfo> GenerateStateStorageInfo(const TActorId (&replicas)[N], ui64 stateStorageGroup)
{
@@ -266,7 +266,7 @@ namespace NPDisk {
SetupTabletPipePeNodeCaches(runtime, nodeIndex);
SetupResourceBroker(runtime, nodeIndex);
SetupSharedPageCache(runtime, nodeIndex, caches);
- SetupBlobCache(runtime, nodeIndex);
+ SetupBlobCache(runtime, nodeIndex);
SetupQuoterService(runtime, nodeIndex);
if (factory)
diff --git a/ydb/core/testlib/fake_coordinator.h b/ydb/core/testlib/fake_coordinator.h
index 2339d3c6566..b3098d60a59 100644
--- a/ydb/core/testlib/fake_coordinator.h
+++ b/ydb/core/testlib/fake_coordinator.h
@@ -22,31 +22,31 @@ namespace NKikimr {
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::TX_COORDINATOR_ACTOR;
- }
-
+ }
+
TFakeCoordinator(const TActorId &tablet, TTabletStorageInfo *info, TState::TPtr state)
: TActor<TFakeCoordinator>(&TFakeCoordinator::StateInit)
, TTabletExecutedFlat(info, tablet, new NMiniKQL::TMiniKQLFactory)
, State(state)
- , Pipes(NTabletPipe::CreateUnboundedClientCache(GetPipeClientConfig()))
+ , Pipes(NTabletPipe::CreateUnboundedClientCache(GetPipeClientConfig()))
{
}
- static NTabletPipe::TClientConfig GetPipeClientConfig() {
- NTabletPipe::TClientConfig config;
- config.CheckAliveness = true;
+ static NTabletPipe::TClientConfig GetPipeClientConfig() {
+ NTabletPipe::TClientConfig config;
+ config.CheckAliveness = true;
config.RetryPolicy = {
.RetryLimitCount = 3,
.MinRetryTime = TDuration::MilliSeconds(10),
.MaxRetryTime = TDuration::MilliSeconds(500),
.BackoffMultiplier = 2
};
- return config;
- }
-
+ return config;
+ }
+
void OnActivateExecutor(const TActorContext &ctx) final {
Become(&TFakeCoordinator::StateWork);
- SendQueued(ctx);
+ SendQueued(ctx);
}
void OnDetach(const TActorContext &ctx) override {
@@ -110,12 +110,12 @@ namespace NKikimr {
void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) {
if (!Pipes->OnConnect(ev)) {
- if (ev->Get()->Dead) {
- AckPlanStepsForDeadTablet(ev->Get()->TabletId);
+ if (ev->Get()->Dead) {
+ AckPlanStepsForDeadTablet(ev->Get()->TabletId);
AdvancePlan(ctx);
} else {
SendQueued(ctx, ev->Get()->TabletId);
- }
+ }
}
}
@@ -152,22 +152,22 @@ namespace NKikimr {
}
}
- void AckPlanStepsForDeadTablet(ui64 tabletId) {
- auto pit = State->QueuedPlans.lower_bound(std::make_pair(tabletId, 0));
- while (pit != State->QueuedPlans.end() && pit->first.first == tabletId) {
+ void AckPlanStepsForDeadTablet(ui64 tabletId) {
+ auto pit = State->QueuedPlans.lower_bound(std::make_pair(tabletId, 0));
+ while (pit != State->QueuedPlans.end() && pit->first.first == tabletId) {
Cerr << "FAKE_COORDINATOR: forgetting step " << pit->first.second << " for dead tablet " << pit->first.first << Endl;
- auto toErase = pit;
- ++pit;
- for (const auto& evPlan : toErase->second) {
- for (const auto& mediatorTx : evPlan->Record.GetTransactions()) {
- ui64 txId = mediatorTx.GetTxId();
- UnlinkTx(tabletId, txId);
- }
- }
- State->QueuedPlans.erase(toErase);
- }
- }
-
+ auto toErase = pit;
+ ++pit;
+ for (const auto& evPlan : toErase->second) {
+ for (const auto& mediatorTx : evPlan->Record.GetTransactions()) {
+ ui64 txId = mediatorTx.GetTxId();
+ UnlinkTx(tabletId, txId);
+ }
+ }
+ State->QueuedPlans.erase(toErase);
+ }
+ }
+
ui64 GetMinStep() const {
ui64 minStep = Max<ui64>();
for (auto& kv : State->QueuedPlans) {
diff --git a/ydb/core/testlib/fake_scheme_shard.h b/ydb/core/testlib/fake_scheme_shard.h
index 95e17f4e052..8c388d49dd0 100644
--- a/ydb/core/testlib/fake_scheme_shard.h
+++ b/ydb/core/testlib/fake_scheme_shard.h
@@ -32,8 +32,8 @@ public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::FLAT_SCHEMESHARD_ACTOR;
- }
-
+ }
+
TFakeSchemeShard(const TActorId &tablet, TTabletStorageInfo *info, TState::TPtr state)
: TActor<TFakeSchemeShard>(&TFakeSchemeShard::StateInit)
, NTabletFlatExecutor::TTabletExecutedFlat(info, tablet, new NMiniKQL::TMiniKQLFactory)
diff --git a/ydb/core/testlib/tablet_helpers.cpp b/ydb/core/testlib/tablet_helpers.cpp
index 428cae40050..a853040b932 100644
--- a/ydb/core/testlib/tablet_helpers.cpp
+++ b/ydb/core/testlib/tablet_helpers.cpp
@@ -70,8 +70,8 @@ namespace NKikimr {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::TX_MEDIATOR_ACTOR;
- }
-
+ }
+
TFakeMediatorTimecastProxy()
: TActor(&TFakeMediatorTimecastProxy::StateFunc)
{}
@@ -182,12 +182,12 @@ namespace NKikimr {
TabletRelatedActors[info->CurrentLeaderTablet] = info->TabletID;
}
- } else if (event->GetTypeRewrite() == TEvFakeHive::EvNotifyTabletDeleted) {
- auto notifyEv = static_cast<TEvFakeHive::TEvNotifyTabletDeleted*>(event->GetBase());
- ui64 tabletId = notifyEv->TabletId;
- DeletedTablets.insert(tabletId);
- if (ENABLE_REBOOT_DISPATCH_LOG)
- Cerr << "Forgetting tablet " << tabletId << Endl;
+ } else if (event->GetTypeRewrite() == TEvFakeHive::EvNotifyTabletDeleted) {
+ auto notifyEv = static_cast<TEvFakeHive::TEvNotifyTabletDeleted*>(event->GetBase());
+ ui64 tabletId = notifyEv->TabletId;
+ DeletedTablets.insert(tabletId);
+ if (ENABLE_REBOOT_DISPATCH_LOG)
+ Cerr << "Forgetting tablet " << tabletId << Endl;
}
}
@@ -215,8 +215,8 @@ namespace NKikimr {
bool IsTabletEvent(const TAutoPtr<IEventHandle>& event, ui64 tabletId) const {
if (DeletedTablets.contains(tabletId))
- return false;
-
+ return false;
+
auto it = TabletLeaders.find(tabletId);
if (it != TabletLeaders.end() && event->GetRecipientRewrite() == it->second) {
return true;
@@ -225,26 +225,26 @@ namespace NKikimr {
return false;
}
- bool IsCommitResult(const TAutoPtr<IEventHandle>& event) const {
- // TEvCommitResult is sent to Executor actor not the Tablet actor
- if (event->GetTypeRewrite() == TEvTablet::TEvCommitResult::EventType) {
- return true;
- }
-
- return false;
- }
-
- bool IsCommitResult(const TAutoPtr<IEventHandle>& event, ui64 tabletId) const {
- // TEvCommitResult is sent to Executor actor not the Tablet actor
- if (event->GetTypeRewrite() == TEvTablet::TEvCommitResult::EventType &&
- event->Get<TEvTablet::TEvCommitResult>()->TabletID == tabletId)
- {
- return true;
- }
-
- return false;
- }
-
+ bool IsCommitResult(const TAutoPtr<IEventHandle>& event) const {
+ // TEvCommitResult is sent to Executor actor not the Tablet actor
+ if (event->GetTypeRewrite() == TEvTablet::TEvCommitResult::EventType) {
+ return true;
+ }
+
+ return false;
+ }
+
+ bool IsCommitResult(const TAutoPtr<IEventHandle>& event, ui64 tabletId) const {
+ // TEvCommitResult is sent to Executor actor not the Tablet actor
+ if (event->GetTypeRewrite() == TEvTablet::TEvCommitResult::EventType &&
+ event->Get<TEvTablet::TEvCommitResult>()->TabletID == tabletId)
+ {
+ return true;
+ }
+
+ return false;
+ }
+
bool IsTabletRelatedEvent(const TAutoPtr<IEventHandle>& event) {
auto it = TabletRelatedActors.find(event->GetRecipientRewrite());
if (it != TabletRelatedActors.end()) {
@@ -257,7 +257,7 @@ namespace NKikimr {
protected:
TMap<ui64, TActorId> TabletLeaders;
TMap<TActorId, ui64> TabletRelatedActors;
- TSet<ui64> DeletedTablets;
+ TSet<ui64> DeletedTablets;
bool& TracingActive;
const TVector<ui64> TabletIds;
};
@@ -265,12 +265,12 @@ namespace NKikimr {
class TRebootTabletObserver : public TTabletTracer {
public:
TRebootTabletObserver(ui32 tabletEventCountBeforeReboot, ui64 tabletId, bool& tracingActive, const TVector<ui64>& tabletIds,
- TTestActorRuntime::TEventFilter filter, bool killOnCommit)
+ TTestActorRuntime::TEventFilter filter, bool killOnCommit)
: TTabletTracer(tracingActive, tabletIds)
, TabletEventCountBeforeReboot(tabletEventCountBeforeReboot)
, TabletId(tabletId)
, Filter(filter)
- , KillOnCommit(killOnCommit)
+ , KillOnCommit(killOnCommit)
, CurrentEventCount(0)
, HasReboot0(false)
{
@@ -278,23 +278,23 @@ namespace NKikimr {
TTestActorRuntime::EEventAction OnEvent(TTestActorRuntime& runtime, TAutoPtr<IEventHandle>& event) {
TTabletTracer::OnEvent(runtime, event);
-
+
TActorId actor = event->Recipient;
if (KillOnCommit && IsCommitResult(event) && HideCommitsFrom.contains(actor)) {
- // We dropped one of the previous TEvCommitResult coming to this Executore actor
- // after that we must drop all TEvCommitResult until this Executor dies
- if (ENABLE_REBOOT_DISPATCH_LOG)
- Cerr << "!Hidden TEvCommitResult" << Endl;
- return TTestActorRuntime::EEventAction::DROP;
- }
-
+ // We dropped one of the previous TEvCommitResult coming to this Executore actor
+ // after that we must drop all TEvCommitResult until this Executor dies
+ if (ENABLE_REBOOT_DISPATCH_LOG)
+ Cerr << "!Hidden TEvCommitResult" << Endl;
+ return TTestActorRuntime::EEventAction::DROP;
+ }
+
if (!TracingActive)
return TTestActorRuntime::EEventAction::PROCESS;
if (Filter(runtime, event))
return TTestActorRuntime::EEventAction::PROCESS;
- if (!IsTabletEvent(event, TabletId) && !(KillOnCommit && IsCommitResult(event, TabletId)))
+ if (!IsTabletEvent(event, TabletId) && !(KillOnCommit && IsCommitResult(event, TabletId)))
return TTestActorRuntime::EEventAction::PROCESS;
if (CurrentEventCount++ != TabletEventCountBeforeReboot)
@@ -302,36 +302,36 @@ namespace NKikimr {
HasReboot0 = true;
TString eventType = (event->HasEvent() && event->GetBase()) ? TypeName(*event->GetBase()) : "nullptr";
-
- if (KillOnCommit && IsCommitResult(event)) {
- if (ENABLE_REBOOT_DISPATCH_LOG)
- Cerr << "!Drop TEvCommitResult and kill " << TabletId << Endl;
- // We are going to drop current TEvCommitResult event so we must drop all
- // the following TEvCommitResult events in order not to break Tx order
- HideCommitsFrom.insert(actor);
- } else {
- runtime.PushFront(event);
- }
-
+
+ if (KillOnCommit && IsCommitResult(event)) {
+ if (ENABLE_REBOOT_DISPATCH_LOG)
+ Cerr << "!Drop TEvCommitResult and kill " << TabletId << Endl;
+ // We are going to drop current TEvCommitResult event so we must drop all
+ // the following TEvCommitResult events in order not to break Tx order
+ HideCommitsFrom.insert(actor);
+ } else {
+ runtime.PushFront(event);
+ }
+
TActorId targetActorId = TabletLeaders[TabletId];
if (targetActorId == TActorId()) {
if (ENABLE_REBOOT_DISPATCH_LOG)
Cerr << "!IGNORE " << TabletId << " event " << eventType << " becouse actor is null!\n";
- return TTestActorRuntime::EEventAction::DROP;
+ return TTestActorRuntime::EEventAction::DROP;
}
-
+
if (ENABLE_REBOOT_DISPATCH_LOG)
- Cerr << "!Reboot " << TabletId << " (actor " << targetActorId << ") on event " << eventType << " !\n";
-
- // Wait for the tablet to boot or to become deleted
+ Cerr << "!Reboot " << TabletId << " (actor " << targetActorId << ") on event " << eventType << " !\n";
+
+ // Wait for the tablet to boot or to become deleted
runtime.Send(new IEventHandle(targetActorId, TActorId(), new TEvents::TEvPoisonPill()));
TDispatchOptions rebootOptions;
rebootOptions.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvRestored, 2));
- rebootOptions.CustomFinalCondition = [this]() -> bool {
+ rebootOptions.CustomFinalCondition = [this]() -> bool {
return DeletedTablets.contains(TabletId);
- };
+ };
runtime.DispatchEvents(rebootOptions);
if (ENABLE_REBOOT_DISPATCH_LOG)
@@ -356,65 +356,65 @@ namespace NKikimr {
const ui32 TabletEventCountBeforeReboot;
const ui64 TabletId;
const TTestActorRuntime::TEventFilter Filter;
- const bool KillOnCommit; // Kill tablet after log is committed but before Complete() is called for Tx's
+ const bool KillOnCommit; // Kill tablet after log is committed but before Complete() is called for Tx's
ui32 CurrentEventCount;
bool HasReboot0;
TSet<TActorId> HideCommitsFrom;
};
- // Breaks pipe after the specified number of events
+ // Breaks pipe after the specified number of events
class TPipeResetObserver : public TTabletTracer {
- public:
+ public:
TPipeResetObserver(ui32 eventCountBeforeReboot, bool& tracingActive, TTestActorRuntime::TEventFilter filter, const TVector<ui64>& tabletIds)
: TTabletTracer(tracingActive, tabletIds)
, EventCountBeforeReboot(eventCountBeforeReboot)
- , TracingActive(tracingActive)
- , Filter(filter)
- , CurrentEventCount(0)
- , HasReset0(false)
- {}
-
- TTestActorRuntime::EEventAction OnEvent(TTestActorRuntime& runtime, TAutoPtr<IEventHandle>& event) {
+ , TracingActive(tracingActive)
+ , Filter(filter)
+ , CurrentEventCount(0)
+ , HasReset0(false)
+ {}
+
+ TTestActorRuntime::EEventAction OnEvent(TTestActorRuntime& runtime, TAutoPtr<IEventHandle>& event) {
TTabletTracer::OnEvent(runtime, event);
- if (!TracingActive)
- return TTestActorRuntime::EEventAction::PROCESS;
-
- if (Filter(runtime, event))
- return TTestActorRuntime::EEventAction::PROCESS;
-
- // Intercept only EvSend and EvPush
- if (event->GetTypeRewrite() != TEvTabletPipe::EvSend && event->GetTypeRewrite() != TEvTabletPipe::EvPush)
- return TTestActorRuntime::EEventAction::PROCESS;
-
- if (CurrentEventCount++ != EventCountBeforeReboot)
- return TTestActorRuntime::EEventAction::PROCESS;
-
- HasReset0 = true;
-
- if (ENABLE_REBOOT_DISPATCH_LOG)
- Cerr << "!Reset pipe\n";
-
- // Replace the event with PoisonPill in order to kill PipeClient or PipeServer
+ if (!TracingActive)
+ return TTestActorRuntime::EEventAction::PROCESS;
+
+ if (Filter(runtime, event))
+ return TTestActorRuntime::EEventAction::PROCESS;
+
+ // Intercept only EvSend and EvPush
+ if (event->GetTypeRewrite() != TEvTabletPipe::EvSend && event->GetTypeRewrite() != TEvTabletPipe::EvPush)
+ return TTestActorRuntime::EEventAction::PROCESS;
+
+ if (CurrentEventCount++ != EventCountBeforeReboot)
+ return TTestActorRuntime::EEventAction::PROCESS;
+
+ HasReset0 = true;
+
+ if (ENABLE_REBOOT_DISPATCH_LOG)
+ Cerr << "!Reset pipe\n";
+
+ // Replace the event with PoisonPill in order to kill PipeClient or PipeServer
TActorId targetActorId = event->GetRecipientRewrite();
runtime.Send(new IEventHandle(targetActorId, TActorId(), new TEvents::TEvPoisonPill()));
-
- return TTestActorRuntime::EEventAction::DROP;
- }
-
- bool HasReset() const {
- return HasReset0;
- }
-
- private:
- const ui32 EventCountBeforeReboot;
- bool& TracingActive;
- const TTestActorRuntime::TEventFilter Filter;
- ui32 CurrentEventCount;
- bool HasReset0;
- };
-
-
+
+ return TTestActorRuntime::EEventAction::DROP;
+ }
+
+ bool HasReset() const {
+ return HasReset0;
+ }
+
+ private:
+ const ui32 EventCountBeforeReboot;
+ bool& TracingActive;
+ const TTestActorRuntime::TEventFilter Filter;
+ ui32 CurrentEventCount;
+ bool HasReset0;
+ };
+
+
class TDelayingObserver : public TTabletTracer {
public:
TDelayingObserver(bool& tracingActive, double delayInjectionProbability, const TVector<ui64>& tabletIds)
@@ -644,12 +644,12 @@ namespace NKikimr {
return storagePoolKinds;
}
- i64 SetSplitMergePartCountLimit(TTestActorRuntime* runtime, i64 val) {
- TAtomic prev;
- runtime->GetAppData().Icb->SetValue("SchemeShard_SplitMergePartCountLimit", val, prev);
- return prev;
- }
-
+ i64 SetSplitMergePartCountLimit(TTestActorRuntime* runtime, i64 val) {
+ TAtomic prev;
+ runtime->GetAppData().Icb->SetValue("SchemeShard_SplitMergePartCountLimit", val, prev);
+ return prev;
+ }
+
bool SetAllowServerlessStorageBilling(TTestActorRuntime* runtime, bool isAllow) {
TAtomic prev;
runtime->GetAppData().Icb->SetValue("SchemeShard_AllowServerlessStorageBilling", isAllow, prev);
@@ -751,7 +751,7 @@ namespace NKikimr {
void RunTestWithReboots(const TVector<ui64>& tabletIds, std::function<TTestActorRuntime::TEventFilter()> filterFactory,
std::function<void(const TString& dispatchPass, std::function<void(TTestActorRuntime&)> setup, bool& activeZone)> testFunc,
- ui32 selectedReboot, ui64 selectedTablet, ui32 bucket, ui32 totalBuckets, bool killOnCommit) {
+ ui32 selectedReboot, ui64 selectedTablet, ui32 bucket, ui32 totalBuckets, bool killOnCommit) {
bool activeZone = false;
if (selectedReboot == Max<ui32>())
@@ -813,7 +813,7 @@ namespace NKikimr {
++runCount;
activeZone = false;
TTestActorRuntime::TEventFilter filter = filterFactory();
- TRebootTabletObserver rebootingObserver(tabletEventCountBeforeReboot, tabletId, activeZone, tabletIds, filter, killOnCommit);
+ TRebootTabletObserver rebootingObserver(tabletEventCountBeforeReboot, tabletId, activeZone, tabletIds, filter, killOnCommit);
TTabletScheduledFilter scheduledFilter(rebootingObserver);
testFunc(dispatchName,
[&](TTestActorRuntime& runtime) {
@@ -852,14 +852,14 @@ namespace NKikimr {
void RunTestWithPipeResets(const TVector<ui64>& tabletIds, std::function<TTestActorRuntime::TEventFilter()> filterFactory,
std::function<void(const TString& dispatchPass, std::function<void(TTestActorRuntime&)> setup, bool& activeZone)> testFunc,
- ui32 selectedReboot, ui32 bucket, ui32 totalBuckets) {
- bool activeZone = false;
-
- if (selectedReboot == Max<ui32>()) {
+ ui32 selectedReboot, ui32 bucket, ui32 totalBuckets) {
+ bool activeZone = false;
+
+ if (selectedReboot == Max<ui32>()) {
TTabletTracer tabletTracer(activeZone, tabletIds);
TTabletScheduledFilter scheduledFilter(tabletTracer);
-
- testFunc(INITIAL_TEST_DISPATCH_NAME, [&](TTestActorRuntime& runtime) {
+
+ testFunc(INITIAL_TEST_DISPATCH_NAME, [&](TTestActorRuntime& runtime) {
runtime.SetObserverFunc([&](TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event) {
tabletTracer.OnEvent(AsKikimrRuntime(runtime), event);
return TTestActorRuntime::EEventAction::PROCESS;
@@ -870,43 +870,43 @@ namespace NKikimr {
});
runtime.SetScheduledEventFilter([&](TTestActorRuntimeBase& r, TAutoPtr<IEventHandle>& event,
- TDuration delay, TInstant& deadline) {
+ TDuration delay, TInstant& deadline) {
auto& runtime = AsKikimrRuntime(r);
return scheduledFilter(runtime, event, delay, deadline) && TTestActorRuntime::DefaultScheduledFilterFunc(runtime, event, delay, deadline);
- });
-
- runtime.SetScheduledEventsSelectorFunc(&TTestActorRuntime::CollapsedTimeScheduledEventsSelector);
- }, activeZone);
- }
-
- if (SUPPRESS_REBOOTS || GetEnv("FAST_UT")=="1")
- return;
-
- ui32 runCount = 0;
- ui32 eventCountBeforeReboot = 0;
- if (selectedReboot != Max<ui32>()) {
- eventCountBeforeReboot = selectedReboot;
- }
-
- bool hasReboot = true;
- while (hasReboot) {
- if (totalBuckets && ((eventCountBeforeReboot % totalBuckets) != bucket)) {
- ++eventCountBeforeReboot;
- continue;
- }
-
+ });
+
+ runtime.SetScheduledEventsSelectorFunc(&TTestActorRuntime::CollapsedTimeScheduledEventsSelector);
+ }, activeZone);
+ }
+
+ if (SUPPRESS_REBOOTS || GetEnv("FAST_UT")=="1")
+ return;
+
+ ui32 runCount = 0;
+ ui32 eventCountBeforeReboot = 0;
+ if (selectedReboot != Max<ui32>()) {
+ eventCountBeforeReboot = selectedReboot;
+ }
+
+ bool hasReboot = true;
+ while (hasReboot) {
+ if (totalBuckets && ((eventCountBeforeReboot % totalBuckets) != bucket)) {
+ ++eventCountBeforeReboot;
+ continue;
+ }
+
TString dispatchName = Sprintf("Pipe reset at event #%" PRIu32, eventCountBeforeReboot);
- if (ENABLE_REBOOT_DISPATCH_LOG)
- Cout << "===> BEGIN dispatch: " << dispatchName << "\n";
-
- try {
- ++runCount;
- activeZone = false;
- TTestActorRuntime::TEventFilter filter = filterFactory();
+ if (ENABLE_REBOOT_DISPATCH_LOG)
+ Cout << "===> BEGIN dispatch: " << dispatchName << "\n";
+
+ try {
+ ++runCount;
+ activeZone = false;
+ TTestActorRuntime::TEventFilter filter = filterFactory();
TPipeResetObserver pipeResetingObserver(eventCountBeforeReboot, activeZone, filter, tabletIds);
TTabletScheduledFilter scheduledFilter(pipeResetingObserver);
- testFunc(dispatchName,
+ testFunc(dispatchName,
[&](TTestActorRuntime& runtime) {
runtime.SetObserverFunc([&](TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event) {
return pipeResetingObserver.OnEvent(AsKikimrRuntime(runtime), event);
@@ -925,21 +925,21 @@ namespace NKikimr {
runtime.SetScheduledEventsSelectorFunc(&TTestActorRuntime::CollapsedTimeScheduledEventsSelector);
}, activeZone);
- hasReboot = pipeResetingObserver.HasReset();
- }
- catch (yexception& e) {
- UNIT_FAIL("Failed at dispatch " << dispatchName << " with exception " << e.what() << "\n");
- }
-
- if (ENABLE_REBOOT_DISPATCH_LOG)
- Cout << "===> END dispatch: " << dispatchName << "\n";
-
- ++eventCountBeforeReboot;
- if (selectedReboot != Max<ui32>())
- break;
- }
- }
-
+ hasReboot = pipeResetingObserver.HasReset();
+ }
+ catch (yexception& e) {
+ UNIT_FAIL("Failed at dispatch " << dispatchName << " with exception " << e.what() << "\n");
+ }
+
+ if (ENABLE_REBOOT_DISPATCH_LOG)
+ Cout << "===> END dispatch: " << dispatchName << "\n";
+
+ ++eventCountBeforeReboot;
+ if (selectedReboot != Max<ui32>())
+ break;
+ }
+ }
+
void RunTestWithDelays(const TRunWithDelaysConfig& config, const TVector<ui64>& tabletIds,
std::function<void(const TString& dispatchPass, std::function<void(TTestActorRuntime&)> setup, bool& activeZone)> testFunc) {
if (SUPPRESS_DELAYS || GetEnv("FAST_UT")=="1")
@@ -1087,8 +1087,8 @@ namespace NKikimr {
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::HIVE_ACTOR;
- }
-
+ }
+
TFakeHive(const TActorId &tablet, TTabletStorageInfo *info, TState::TPtr state,
TGetTabletCreationFunc getTabletCreationFunc)
: TActor<TFakeHive>(&TFakeHive::StateInit)
@@ -1100,12 +1100,12 @@ namespace NKikimr {
void OnActivateExecutor(const TActorContext &ctx) final {
Become(&TFakeHive::StateWork);
-
- while (!InitialEventsQueue.empty()) {
- TAutoPtr<IEventHandle> &ev = InitialEventsQueue.front();
- ctx.ExecutorThread.Send(ev.Release());
- InitialEventsQueue.pop_front();
- }
+
+ while (!InitialEventsQueue.empty()) {
+ TAutoPtr<IEventHandle> &ev = InitialEventsQueue.front();
+ ctx.ExecutorThread.Send(ev.Release());
+ InitialEventsQueue.pop_front();
+ }
}
void OnDetach(const TActorContext &ctx) override {
@@ -1117,11 +1117,11 @@ namespace NKikimr {
Die(ctx);
}
- void Enqueue(STFUNC_SIG) override {
- Y_UNUSED(ctx);
- InitialEventsQueue.push_back(ev);
- }
-
+ void Enqueue(STFUNC_SIG) override {
+ Y_UNUSED(ctx);
+ InitialEventsQueue.push_back(ev);
+ }
+
void StateInit(STFUNC_SIG) {
StateInitImpl(ev, ctx);
}
@@ -1133,9 +1133,9 @@ namespace NKikimr {
HFunc(TEvHive::TEvAdoptTablet, Handle);
HFunc(TEvHive::TEvDeleteTablet, Handle);
HFunc(TEvHive::TEvDeleteOwnerTablets, Handle);
- HFunc(TEvHive::TEvRequestHiveInfo, Handle);
+ HFunc(TEvHive::TEvRequestHiveInfo, Handle);
HFunc(TEvHive::TEvInitiateTabletExternalBoot, Handle);
- HFunc(TEvFakeHive::TEvSubscribeToTabletDeletion, Handle);
+ HFunc(TEvFakeHive::TEvSubscribeToTabletDeletion, Handle);
HFunc(TEvents::TEvPoisonPill, Handle);
}
}
@@ -1159,21 +1159,21 @@ namespace NKikimr {
if (bootMode == NKikimrHive::TABLET_BOOT_MODE_EXTERNAL) {
// don't boot anything
} else if (auto x = GetTabletCreationFunc(type)) {
- bootstrapperActorId = Boot(ctx, type, x, DataGroupErasure);
+ bootstrapperActorId = Boot(ctx, type, x, DataGroupErasure);
} else if (type == defaultTabletTypes.DataShard) {
bootstrapperActorId = Boot(ctx, type, &CreateDataShard, DataGroupErasure);
} else if (type == defaultTabletTypes.KeyValue) {
- bootstrapperActorId = Boot(ctx, type, &CreateKeyValueFlat, DataGroupErasure);
+ bootstrapperActorId = Boot(ctx, type, &CreateKeyValueFlat, DataGroupErasure);
} else if (type == defaultTabletTypes.ColumnShard) {
bootstrapperActorId = Boot(ctx, type, &CreateColumnShard, DataGroupErasure);
} else if (type == defaultTabletTypes.PersQueue) {
- bootstrapperActorId = Boot(ctx, type, &CreatePersQueue, DataGroupErasure);
+ bootstrapperActorId = Boot(ctx, type, &CreatePersQueue, DataGroupErasure);
} else if (type == defaultTabletTypes.PersQueueReadBalancer) {
- bootstrapperActorId = Boot(ctx, type, &CreatePersQueueReadBalancer, DataGroupErasure);
+ bootstrapperActorId = Boot(ctx, type, &CreatePersQueueReadBalancer, DataGroupErasure);
} else if (type == defaultTabletTypes.Coordinator) {
- bootstrapperActorId = Boot(ctx, type, &CreateFlatTxCoordinator, DataGroupErasure);
+ bootstrapperActorId = Boot(ctx, type, &CreateFlatTxCoordinator, DataGroupErasure);
} else if (type == defaultTabletTypes.Mediator) {
- bootstrapperActorId = Boot(ctx, type, &CreateTxMediator, DataGroupErasure);
+ bootstrapperActorId = Boot(ctx, type, &CreateTxMediator, DataGroupErasure);
} else if (type == defaultTabletTypes.SchemeShard) {
bootstrapperActorId = Boot(ctx, type, &CreateFlatTxSchemeShard, DataGroupErasure);
} else if (type == defaultTabletTypes.Kesus) {
@@ -1195,8 +1195,8 @@ namespace NKikimr {
if (status == NKikimrProto::OK) {
ui64 tabletId = State->AllocateTabletId();
- it = State->Tablets.insert(std::make_pair(key, TTabletInfo(type, tabletId, bootstrapperActorId))).first;
- State->TabletIdToOwner[tabletId] = key;
+ it = State->Tablets.insert(std::make_pair(key, TTabletInfo(type, tabletId, bootstrapperActorId))).first;
+ State->TabletIdToOwner[tabletId] = key;
}
} else {
if (it->second.Type != type) {
@@ -1314,13 +1314,13 @@ namespace NKikimr {
}
ctx.Send(ev->Sender, new TEvHive::TEvDeleteTabletReply(NKikimrProto::OK, TabletID(), rec.GetTxId_Deprecated(), rec.GetShardOwnerId(), deletedIdx));
}
-
+
void Handle(TEvHive::TEvDeleteOwnerTablets::TPtr &ev, const TActorContext &ctx) {
NKikimrHive::TEvDeleteOwnerTablets& rec = ev->Get()->Record;
Cout << "FAKEHIVE " << TabletID() << " TEvDeleteOwnerTablets " << rec.ShortDebugString() << Endl;
auto ownerId = rec.GetOwner();
TVector<ui64> toDelete;
-
+
if (ownerId == 0) {
ctx.Send(ev->Sender, new TEvHive::TEvDeleteOwnerTabletsReply(NKikimrProto::ERROR, TabletID(), ownerId, rec.GetTxId()));
return;
@@ -1331,10 +1331,10 @@ namespace NKikimr {
if (id.first != ownerId) {
continue;
- }
+ }
toDelete.push_back(id.second);
- }
+ }
if (toDelete.empty()) {
ctx.Send(ev->Sender, new TEvHive::TEvDeleteOwnerTabletsReply(NKikimrProto::ALREADY, TabletID(), ownerId, rec.GetTxId()));
@@ -1349,25 +1349,25 @@ namespace NKikimr {
ctx.Send(ev->Sender, new TEvHive::TEvDeleteOwnerTabletsReply(NKikimrProto::OK, TabletID(), ownerId, rec.GetTxId()));
}
- void Handle(TEvHive::TEvRequestHiveInfo::TPtr &ev, const TActorContext &ctx) {
- const auto& record = ev->Get()->Record;
- TAutoPtr<TEvHive::TEvResponseHiveInfo> response = new TEvHive::TEvResponseHiveInfo();
-
- if (record.HasTabletID()) {
- auto it = State->TabletIdToOwner.find(record.GetTabletID());
- FillTabletInfo(response->Record, record.GetTabletID(), it == State->TabletIdToOwner.end() ? nullptr : State->Tablets.FindPtr(it->second));
- } else {
- response->Record.MutableTablets()->Reserve(State->Tablets.size());
- for (auto it = State->Tablets.begin(); it != State->Tablets.end(); ++it) {
- if (record.HasTabletType() && record.GetTabletType() != it->second.Type)
- continue;
- FillTabletInfo(response->Record, it->second.TabletId, &it->second);
- }
- }
-
- ctx.Send(ev->Sender, response.Release());
- }
-
+ void Handle(TEvHive::TEvRequestHiveInfo::TPtr &ev, const TActorContext &ctx) {
+ const auto& record = ev->Get()->Record;
+ TAutoPtr<TEvHive::TEvResponseHiveInfo> response = new TEvHive::TEvResponseHiveInfo();
+
+ if (record.HasTabletID()) {
+ auto it = State->TabletIdToOwner.find(record.GetTabletID());
+ FillTabletInfo(response->Record, record.GetTabletID(), it == State->TabletIdToOwner.end() ? nullptr : State->Tablets.FindPtr(it->second));
+ } else {
+ response->Record.MutableTablets()->Reserve(State->Tablets.size());
+ for (auto it = State->Tablets.begin(); it != State->Tablets.end(); ++it) {
+ if (record.HasTabletType() && record.GetTabletType() != it->second.Type)
+ continue;
+ FillTabletInfo(response->Record, it->second.TabletId, &it->second);
+ }
+ }
+
+ ctx.Send(ev->Sender, response.Release());
+ }
+
void Handle(TEvHive::TEvInitiateTabletExternalBoot::TPtr &ev, const TActorContext &ctx) {
ui64 tabletId = ev->Get()->Record.GetTabletID();
if (!State->TabletIdToOwner.contains(tabletId)) {
@@ -1383,22 +1383,22 @@ namespace NKikimr {
ctx.Send(ev->Sender, new TEvLocal::TEvBootTablet(*tabletInfo.Get(), 0), 0, ev->Cookie);
}
- void Handle(TEvFakeHive::TEvSubscribeToTabletDeletion::TPtr &ev, const TActorContext &ctx) {
- ui64 tabletId = ev->Get()->TabletId;
- auto it = State->TabletIdToOwner.find(tabletId);
- if (it == State->TabletIdToOwner.end()) {
- SendDeletionNotification(tabletId, ev->Sender, ctx);
- } else {
- State->Tablets.FindPtr(it->second)->DeletionWaiters.insert(ev->Sender);
- }
- }
-
+ void Handle(TEvFakeHive::TEvSubscribeToTabletDeletion::TPtr &ev, const TActorContext &ctx) {
+ ui64 tabletId = ev->Get()->TabletId;
+ auto it = State->TabletIdToOwner.find(tabletId);
+ if (it == State->TabletIdToOwner.end()) {
+ SendDeletionNotification(tabletId, ev->Sender, ctx);
+ } else {
+ State->Tablets.FindPtr(it->second)->DeletionWaiters.insert(ev->Sender);
+ }
+ }
+
void SendDeletionNotification(ui64 tabletId, TActorId waiter, const TActorContext& ctx) {
- TAutoPtr<TEvHive::TEvResponseHiveInfo> response = new TEvHive::TEvResponseHiveInfo();
- FillTabletInfo(response->Record, tabletId, nullptr);
- ctx.Send(waiter, response.Release());
- }
-
+ TAutoPtr<TEvHive::TEvResponseHiveInfo> response = new TEvHive::TEvResponseHiveInfo();
+ FillTabletInfo(response->Record, tabletId, nullptr);
+ ctx.Send(waiter, response.Release());
+ }
+
void Handle(TEvents::TEvPoisonPill::TPtr &ev, const TActorContext &ctx) {
Y_UNUSED(ev);
Become(&TThis::BrokenState);
@@ -1410,21 +1410,21 @@ namespace NKikimr {
TBlobStorageGroupType::EErasureSpecies erasure) {
TIntrusivePtr<TBootstrapperInfo> bi(new TBootstrapperInfo(new TTabletSetupInfo(op, TMailboxType::Simple, 0,
TMailboxType::Simple, 0)));
- return ctx.ExecutorThread.RegisterActor(CreateBootstrapper(
+ return ctx.ExecutorThread.RegisterActor(CreateBootstrapper(
CreateTestTabletInfo(State->NextTabletId, tabletType, erasure), bi.Get()));
}
- void FillTabletInfo(NKikimrHive::TEvResponseHiveInfo& response, ui64 tabletId, const TFakeHiveTabletInfo *info) {
- auto& tabletInfo = *response.AddTablets();
- tabletInfo.SetTabletID(tabletId);
- if (info) {
- tabletInfo.SetTabletType(info->Type);
+ void FillTabletInfo(NKikimrHive::TEvResponseHiveInfo& response, ui64 tabletId, const TFakeHiveTabletInfo *info) {
+ auto& tabletInfo = *response.AddTablets();
+ tabletInfo.SetTabletID(tabletId);
+ if (info) {
+ tabletInfo.SetTabletType(info->Type);
tabletInfo.SetState(200); // THive::ReadyToWork
-
+
// TODO: fill other fields when needed
- }
- }
-
+ }
+ }
+
private:
TState::TPtr State;
TGetTabletCreationFunc GetTabletCreationFunc;
diff --git a/ydb/core/testlib/tablet_helpers.h b/ydb/core/testlib/tablet_helpers.h
index 9be9ec9e8ac..81a7c614aaa 100644
--- a/ydb/core/testlib/tablet_helpers.h
+++ b/ydb/core/testlib/tablet_helpers.h
@@ -15,7 +15,7 @@
namespace NKikimr {
struct TAppPrepare;
-
+
const TBlobStorageGroupType::EErasureSpecies DataGroupErasure = TBlobStorageGroupType::ErasureNone;
TActorId FollowerTablet(TTestActorRuntime &runtime, const TActorId &launcher, TTabletStorageInfo *info,
@@ -41,20 +41,20 @@ namespace NKikimr {
void SetupChannelProfiles(TAppPrepare &app, ui32 domainId = 0, ui32 nchannels = 3);
TDomainsInfo::TDomain::TStoragePoolKinds DefaultPoolKinds(ui32 count = 1);
- i64 SetSplitMergePartCountLimit(TTestActorRuntime* runtime, i64 val);
+ i64 SetSplitMergePartCountLimit(TTestActorRuntime* runtime, i64 val);
bool SetAllowServerlessStorageBilling(TTestActorRuntime* runtime, bool isAllow);
-
+
const TString INITIAL_TEST_DISPATCH_NAME = "Trace";
void RunTestWithReboots(const TVector<ui64>& tabletIds, std::function<TTestActorRuntime::TEventFilter()> filterFactory,
std::function<void(const TString& dispatchPass, std::function<void(TTestActorRuntime&)> setup, bool& activeZone)> testFunc,
- ui32 selectedReboot = Max<ui32>(), ui64 selectedTablet = Max<ui64>(), ui32 bucket = 0, ui32 totalBuckets = 0, bool killOnCommit = false);
+ ui32 selectedReboot = Max<ui32>(), ui64 selectedTablet = Max<ui64>(), ui32 bucket = 0, ui32 totalBuckets = 0, bool killOnCommit = false);
- // Resets pipe when receiving client events
+ // Resets pipe when receiving client events
void RunTestWithPipeResets(const TVector<ui64>& tabletIds, std::function<TTestActorRuntime::TEventFilter()> filterFactory,
std::function<void(const TString& dispatchPass, std::function<void(TTestActorRuntime&)> setup, bool& activeZone)> testFunc,
- ui32 selectedReboot = Max<ui32>(), ui32 bucket = 0, ui32 totalBuckets = 0);
-
+ ui32 selectedReboot = Max<ui32>(), ui32 bucket = 0, ui32 totalBuckets = 0);
+
struct TRunWithDelaysConfig {
double DelayInjectionProbability;
TDuration ReschedulingDelay;
@@ -85,30 +85,30 @@ namespace NKikimr {
void WaitScheduledEvents(TTestActorRuntime &runtime, TDuration delay, const TActorId &sender, ui32 nodeIndex = 0);
-
- struct TEvFakeHive {
- enum EEv {
- EvSubscribeToTabletDeletion = TEvHive::EvEnd + 1,
- EvNotifyTabletDeleted
- };
-
- struct TEvSubscribeToTabletDeletion : public TEventLocal<TEvSubscribeToTabletDeletion, EvSubscribeToTabletDeletion> {
- ui64 TabletId;
-
- explicit TEvSubscribeToTabletDeletion(ui64 tabletId)
- : TabletId(tabletId)
- {}
- };
-
- struct TEvNotifyTabletDeleted : public TEventLocal<TEvNotifyTabletDeleted, EvNotifyTabletDeleted> {
- ui64 TabletId;
-
- explicit TEvNotifyTabletDeleted(ui64 tabletId)
- : TabletId(tabletId)
- {}
- };
- };
-
+
+ struct TEvFakeHive {
+ enum EEv {
+ EvSubscribeToTabletDeletion = TEvHive::EvEnd + 1,
+ EvNotifyTabletDeleted
+ };
+
+ struct TEvSubscribeToTabletDeletion : public TEventLocal<TEvSubscribeToTabletDeletion, EvSubscribeToTabletDeletion> {
+ ui64 TabletId;
+
+ explicit TEvSubscribeToTabletDeletion(ui64 tabletId)
+ : TabletId(tabletId)
+ {}
+ };
+
+ struct TEvNotifyTabletDeleted : public TEventLocal<TEvNotifyTabletDeleted, EvNotifyTabletDeleted> {
+ ui64 TabletId;
+
+ explicit TEvNotifyTabletDeleted(ui64 tabletId)
+ : TabletId(tabletId)
+ {}
+ };
+ };
+
struct TFakeHiveTabletInfo {
const TTabletTypes::EType Type;
const ui64 TabletId;
@@ -118,11 +118,11 @@ namespace NKikimr {
ui32 ChannelsProfile;
THashSet<TActorId> DeletionWaiters;
-
+
TFakeHiveTabletInfo(TTabletTypes::EType type, ui64 tabletId, TActorId bootstrapperActorId)
: Type(type)
, TabletId(tabletId)
- , BootstrapperActorId(bootstrapperActorId)
+ , BootstrapperActorId(bootstrapperActorId)
{}
TFakeHiveTabletInfo(const TFakeHiveTabletInfo& info) = default;
diff --git a/ydb/core/testlib/tenant_runtime.cpp b/ydb/core/testlib/tenant_runtime.cpp
index b7494fb1087..974cc166d0d 100644
--- a/ydb/core/testlib/tenant_runtime.cpp
+++ b/ydb/core/testlib/tenant_runtime.cpp
@@ -195,8 +195,8 @@ class TFakeSchemeShard : public TActor<TFakeSchemeShard>, public TTabletExecuted
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::FLAT_SCHEMESHARD_ACTOR;
- }
-
+ }
+
TFakeSchemeShard(const TActorId &tablet, TTabletStorageInfo *info,
TActorId sender, const TVector<std::pair<TString, ui64>> &subDomains)
: TActor(&TThis::StateInit)
@@ -638,8 +638,8 @@ class TFakeHive : public TActor<TFakeHive>, public TTabletExecutedFlat {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::HIVE_ACTOR;
- }
-
+ }
+
TFakeHive(const TActorId &tablet, TTabletStorageInfo *info, TActorId sender,
ui64 hiveId, const THashMap<TSubDomainKey, TString> &subDomainKeys)
: TActor(&TThis::StateInit)
diff --git a/ydb/core/testlib/test_client.cpp b/ydb/core/testlib/test_client.cpp
index a018abe6a56..d4907c26f1e 100644
--- a/ydb/core/testlib/test_client.cpp
+++ b/ydb/core/testlib/test_client.cpp
@@ -193,7 +193,7 @@ namespace Tests {
app.AddHive(Settings->Domain, ChangeStateStorage(Hive, Settings->Domain));
app.SetFnRegistry(Settings->FrFactory);
- app.SetFormatsFactory(Settings->Formats);
+ app.SetFormatsFactory(Settings->Formats);
if (Settings->Formats) {
NKikHouse::RegisterFormat(*Settings->Formats);
@@ -252,33 +252,33 @@ namespace Tests {
auto grpcRequestProxy = NGRpcService::CreateGRpcRequestProxy(Settings->AppConfig);
auto grpcRequestProxyId = system->Register(grpcRequestProxy, TMailboxType::ReadAsFilled);
system->RegisterLocalService(NGRpcService::CreateGRpcRequestProxyId(), grpcRequestProxyId);
- auto grpcMon = system->Register(NGRpcService::CreateGrpcMonService(), TMailboxType::ReadAsFilled);
- system->RegisterLocalService(NGRpcService::GrpcMonServiceId(), grpcMon);
+ auto grpcMon = system->Register(NGRpcService::CreateGrpcMonService(), TMailboxType::ReadAsFilled);
+ system->RegisterLocalService(NGRpcService::GrpcMonServiceId(), grpcMon);
GRpcServerRootCounters = MakeIntrusive<NMonitoring::TDynamicCounters>();
auto& counters = GRpcServerRootCounters;
- auto& appData = Runtime->GetAppData();
-
- // Setup discovery for typically used services on the node
- {
- TIntrusivePtr<NGRpcService::TGrpcEndpointDescription> desc = new NGRpcService::TGrpcEndpointDescription();
- desc->Address = options.Host;
- desc->Port = options.Port;
- desc->Ssl = !options.SslData.Empty();
-
- TVector<TString> rootDomains;
- for (auto &domain : appData.DomainsInfo->Domains) {
- rootDomains.emplace_back("/" + domain.second->Name);
- }
- desc->ServedDatabases.insert(desc->ServedDatabases.end(), rootDomains.begin(), rootDomains.end());
-
+ auto& appData = Runtime->GetAppData();
+
+ // Setup discovery for typically used services on the node
+ {
+ TIntrusivePtr<NGRpcService::TGrpcEndpointDescription> desc = new NGRpcService::TGrpcEndpointDescription();
+ desc->Address = options.Host;
+ desc->Port = options.Port;
+ desc->Ssl = !options.SslData.Empty();
+
+ TVector<TString> rootDomains;
+ for (auto &domain : appData.DomainsInfo->Domains) {
+ rootDomains.emplace_back("/" + domain.second->Name);
+ }
+ desc->ServedDatabases.insert(desc->ServedDatabases.end(), rootDomains.begin(), rootDomains.end());
+
TVector<TString> grpcServices = {"yql", "clickhouse_internal", "datastreams", "table_service", "scripting", "experimental", "discovery", "pqcd", "pq", "pqv1" };
- desc->ServedServices.insert(desc->ServedServices.end(), grpcServices.begin(), grpcServices.end());
-
- system->Register(NGRpcService::CreateGrpcEndpointPublishActor(desc.Get()), TMailboxType::ReadAsFilled, appData.UserPoolId);
- }
-
+ desc->ServedServices.insert(desc->ServedServices.end(), grpcServices.begin(), grpcServices.end());
+
+ system->Register(NGRpcService::CreateGrpcEndpointPublishActor(desc.Get()), TMailboxType::ReadAsFilled, appData.UserPoolId);
+ }
+
auto future = grpcService->Prepare(
system,
NMsgBusProxy::CreatePersQueueMetaCacheV2Id(),
@@ -311,9 +311,9 @@ namespace Tests {
GRpcServer->AddService(new NKesus::TKesusGRpcService(system, counters, grpcRequestProxyId));
GRpcServer->AddService(new NGRpcService::TGRpcCmsService(system, counters, grpcRequestProxyId));
GRpcServer->AddService(new NGRpcService::TGRpcDiscoveryService(system, counters, grpcRequestProxyId));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbExperimentalService(system, counters, grpcRequestProxyId));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbClickhouseInternalService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxyId));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbS3InternalService(system, counters, grpcRequestProxyId));
+ GRpcServer->AddService(new NGRpcService::TGRpcYdbExperimentalService(system, counters, grpcRequestProxyId));
+ GRpcServer->AddService(new NGRpcService::TGRpcYdbClickhouseInternalService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxyId));
+ GRpcServer->AddService(new NGRpcService::TGRpcYdbS3InternalService(system, counters, grpcRequestProxyId));
GRpcServer->AddService(new NQuoter::TRateLimiterGRpcService(system, counters, grpcRequestProxyId));
GRpcServer->AddService(new NGRpcService::TGRpcYdbLongTxService(system, counters, grpcRequestProxyId));
GRpcServer->AddService(new NGRpcService::TGRpcDataStreamsService(system, counters, grpcRequestProxyId));
@@ -328,7 +328,7 @@ namespace Tests {
GRpcServer->AddService(service);
}
}
- GRpcServer->AddService(new NGRpcService::TGRpcYdbLogStoreService(system, counters, grpcRequestProxyId));
+ GRpcServer->AddService(new NGRpcService::TGRpcYdbLogStoreService(system, counters, grpcRequestProxyId));
GRpcServer->AddService(new NGRpcService::TGRpcAuthService(system, counters, grpcRequestProxyId));
GRpcServer->Start();
}
@@ -936,7 +936,7 @@ namespace Tests {
}
bool TClient::LoadTypes() {
- TAutoPtr<NMsgBusProxy::TBusTypesRequest> request(new NMsgBusProxy::TBusTypesRequest());
+ TAutoPtr<NMsgBusProxy::TBusTypesRequest> request(new NMsgBusProxy::TBusTypesRequest());
if (TypesEtag.Defined()) {
request->Record.SetETag(*TypesEtag.Get());
}
@@ -1024,7 +1024,7 @@ namespace Tests {
#ifndef NDEBUG
Cout << PrintResult<NMsgBusProxy::TBusResponse>(reply.Get()) << Endl;
#endif
- return reply;
+ return reply;
}
void TClient::InitRootScheme() {
@@ -1040,7 +1040,7 @@ namespace Tests {
void TClient::ExecuteTraceCommand(NKikimrClient::TMessageBusTraceRequest::ECommand command, const TString &path) {
- TAutoPtr<NMsgBusProxy::TBusMessageBusTraceRequest> request(new NMsgBusProxy::TBusMessageBusTraceRequest());
+ TAutoPtr<NMsgBusProxy::TBusMessageBusTraceRequest> request(new NMsgBusProxy::TBusMessageBusTraceRequest());
request->Record.SetCommand(command);
if (path)
request->Record.SetPath(path);
@@ -1049,7 +1049,7 @@ namespace Tests {
}
TString TClient::StartTrace(const TString &path) {
- TAutoPtr<NMsgBusProxy::TBusMessageBusTraceRequest> request(new NMsgBusProxy::TBusMessageBusTraceRequest());
+ TAutoPtr<NMsgBusProxy::TBusMessageBusTraceRequest> request(new NMsgBusProxy::TBusMessageBusTraceRequest());
request->Record.SetCommand(NKikimrClient::TMessageBusTraceRequest::START);
if (path)
request->Record.SetPath(path);
@@ -1064,7 +1064,7 @@ namespace Tests {
}
void TClient::StopTrace() {
- TAutoPtr<NMsgBusProxy::TBusMessageBusTraceRequest> request(new NMsgBusProxy::TBusMessageBusTraceRequest());
+ TAutoPtr<NMsgBusProxy::TBusMessageBusTraceRequest> request(new NMsgBusProxy::TBusMessageBusTraceRequest());
request->Record.SetCommand(NKikimrClient::TMessageBusTraceRequest::STOP);
TAutoPtr<NBus::TBusMessage> reply;
UNIT_ASSERT_VALUES_EQUAL(SyncCall(request, reply), NBus::MESSAGE_OK);
@@ -1075,7 +1075,7 @@ namespace Tests {
TDuration timeout)
{
auto deadline = TInstant::Now() + timeout;
-
+
NBus::EMessageStatus status;
const NKikimrClient::TResponse* response = nullptr;
do {
@@ -1087,15 +1087,15 @@ namespace Tests {
#ifndef NDEBUG
Cerr << "waiting..." << Endl;
#endif
- status = SyncCall(msg, reply);
- if (status != NBus::MESSAGE_OK) {
- const char *description = NBus::MessageStatusDescription(status);
- Cerr << description << Endl;
- return status;
- }
+ status = SyncCall(msg, reply);
+ if (status != NBus::MESSAGE_OK) {
+ const char *description = NBus::MessageStatusDescription(status);
+ Cerr << description << Endl;
+ return status;
+ }
if (reply->GetHeader()->Type != NMsgBusProxy::MTYPE_CLIENT_RESPONSE) {
- break;
- }
+ break;
+ }
response = &static_cast<NMsgBusProxy::TBusResponse*>(reply.Get())->Record;
} while (response->GetStatus() == NMsgBusProxy::MSTATUS_INPROGRESS && deadline >= TInstant::Now());
@@ -1109,7 +1109,7 @@ namespace Tests {
if (status != NBus::MESSAGE_OK) {
return status;
- }
+ }
const NMsgBusProxy::TBusResponse* flatResponse = dynamic_cast<const NMsgBusProxy::TBusResponse*>(reply.Get());
if (!flatResponse)
@@ -1117,9 +1117,9 @@ namespace Tests {
const NKikimrClient::TResponse* response = &flatResponse->Record;
- if (response->HasErrorReason()) {
+ if (response->HasErrorReason()) {
Cerr << "reason: " << response->GetErrorReason() << Endl;
- }
+ }
if (response->GetStatus() != NMsgBusProxy::MSTATUS_INPROGRESS) {
return status;
@@ -1127,25 +1127,25 @@ namespace Tests {
NKikimrClient::TFlatTxId txId = response->GetFlatTxId();
return WaitCompletion(txId.GetTxId(), txId.GetSchemeShardTabletId(), txId.GetPathId(), reply, timeout);
- }
-
+ }
+
NMsgBusProxy::EResponseStatus TClient::MkDir(const TString& parent, const TString& name, const TApplyIf& applyIf) {
NMsgBusProxy::TBusSchemeOperation* request(new NMsgBusProxy::TBusSchemeOperation());
- auto* mkDirTx = request->Record.MutableTransaction()->MutableModifyScheme();
- mkDirTx->SetWorkingDir(parent);
+ auto* mkDirTx = request->Record.MutableTransaction()->MutableModifyScheme();
+ mkDirTx->SetWorkingDir(parent);
mkDirTx->SetOperationType(NKikimrSchemeOp::ESchemeOpMkDir);
- mkDirTx->MutableMkDir()->SetName(name);
+ mkDirTx->MutableMkDir()->SetName(name);
SetApplyIf(*mkDirTx, applyIf);
- TAutoPtr<NBus::TBusMessage> reply;
- NBus::EMessageStatus msgStatus = SendAndWaitCompletion(request, reply);
+ TAutoPtr<NBus::TBusMessage> reply;
+ NBus::EMessageStatus msgStatus = SendAndWaitCompletion(request, reply);
#ifndef NDEBUG
Cout << PrintResult<NMsgBusProxy::TBusResponse>(reply.Get()) << Endl;
#endif
- UNIT_ASSERT_VALUES_EQUAL(msgStatus, NBus::MESSAGE_OK);
+ UNIT_ASSERT_VALUES_EQUAL(msgStatus, NBus::MESSAGE_OK);
const NKikimrClient::TResponse &response = dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record;
- return (NMsgBusProxy::EResponseStatus)response.GetStatus();
- }
-
+ return (NMsgBusProxy::EResponseStatus)response.GetStatus();
+ }
+
NMsgBusProxy::EResponseStatus TClient::RmDir(const TString& parent, const TString& name, const TApplyIf& applyIf) {
NMsgBusProxy::TBusSchemeOperation* request(new NMsgBusProxy::TBusSchemeOperation());
auto* mkDirTx = request->Record.MutableTransaction()->MutableModifyScheme();
@@ -1326,13 +1326,13 @@ namespace Tests {
op->SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpCreateTable);
op->SetWorkingDir(parent);
op->MutableCreateTable()->CopyFrom(table);
- TAutoPtr<NBus::TBusMessage> reply;
+ TAutoPtr<NBus::TBusMessage> reply;
NBus::EMessageStatus status = SendAndWaitCompletion(request.Release(), reply, timeout);
UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
const NKikimrClient::TResponse &response = dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record;
- return (NMsgBusProxy::EResponseStatus)response.GetStatus();
- }
-
+ return (NMsgBusProxy::EResponseStatus)response.GetStatus();
+ }
+
NMsgBusProxy::EResponseStatus TClient::CreateTableWithUniformShardedIndex(const TString& parent,
const NKikimrSchemeOp::TTableDescription &table, const TString& indexName, const TVector<TString> indexColumns, TDuration timeout)
{
@@ -1502,24 +1502,24 @@ namespace Tests {
op->SetWorkingDir(parent);
op->MutableAlterTable()->CopyFrom(alter);
TAutoPtr<NBus::TBusMessage> reply;
- if (userToken) {
- request->Record.SetSecurityToken(userToken);
- }
+ if (userToken) {
+ request->Record.SetSecurityToken(userToken);
+ }
NBus::EMessageStatus status = SendAndWaitCompletion(request.Release(), reply);
UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
- return dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Release());
- }
-
- TAutoPtr<NMsgBusProxy::TBusResponse> TClient::AlterTable(const TString& parent, const TString& alter, const TString& userToken) {
+ return dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Release());
+ }
+
+ TAutoPtr<NMsgBusProxy::TBusResponse> TClient::AlterTable(const TString& parent, const TString& alter, const TString& userToken) {
NKikimrSchemeOp::TTableDescription table;
- bool parseOk = ::google::protobuf::TextFormat::ParseFromString(alter, &table);
- UNIT_ASSERT(parseOk);
- return AlterTable(parent, table, userToken);
- }
-
+ bool parseOk = ::google::protobuf::TextFormat::ParseFromString(alter, &table);
+ UNIT_ASSERT(parseOk);
+ return AlterTable(parent, table, userToken);
+ }
+
NMsgBusProxy::EResponseStatus TClient::AlterTable(const TString& parent, const NKikimrSchemeOp::TTableDescription& alter) {
- TAutoPtr<NMsgBusProxy::TBusResponse> reply = AlterTable(parent, alter, TString());
- const NKikimrClient::TResponse &response = reply->Record;
+ TAutoPtr<NMsgBusProxy::TBusResponse> reply = AlterTable(parent, alter, TString());
+ const NKikimrClient::TResponse &response = reply->Record;
return (NMsgBusProxy::EResponseStatus)response.GetStatus();
}
@@ -1553,7 +1553,7 @@ namespace Tests {
NBus::EMessageStatus status = SendAndWaitCompletion(request.Release(), reply);
UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
const NKikimrClient::TResponse &response = dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record;
- return (NMsgBusProxy::EResponseStatus)response.GetStatus();
+ return (NMsgBusProxy::EResponseStatus)response.GetStatus();
}
NMsgBusProxy::EResponseStatus TClient::DeleteTopic(const TString& parent, const TString& name) {
@@ -1609,17 +1609,17 @@ namespace Tests {
TAutoPtr<NMsgBusProxy::TBusResponse> TClient::LsImpl(const TString& path) {
TAutoPtr<NMsgBusProxy::TBusSchemeDescribe> request(new NMsgBusProxy::TBusSchemeDescribe());
- request->Record.SetPath(path);
- request->Record.MutableOptions()->SetShowPrivateTable(true);
- TAutoPtr<NBus::TBusMessage> reply;
- NBus::EMessageStatus msgStatus = SendWhenReady(request, reply);
- UNIT_ASSERT_VALUES_EQUAL(msgStatus, NBus::MESSAGE_OK);
+ request->Record.SetPath(path);
+ request->Record.MutableOptions()->SetShowPrivateTable(true);
+ TAutoPtr<NBus::TBusMessage> reply;
+ NBus::EMessageStatus msgStatus = SendWhenReady(request, reply);
+ UNIT_ASSERT_VALUES_EQUAL(msgStatus, NBus::MESSAGE_OK);
#ifndef NDEBUG
Cerr << "TClient::Ls: " << PrintResult<NMsgBusProxy::TBusResponse>(reply.Get()) << Endl;
#endif
return dynamic_cast<NMsgBusProxy::TBusResponse*>(reply.Release());
- }
-
+ }
+
TAutoPtr<NMsgBusProxy::TBusResponse> TClient::Ls(const TString& path) {
return LsImpl(path).Release();
}
@@ -1813,16 +1813,16 @@ namespace Tests {
bool TClient::LocalQuery(const ui64 tabletId, const TString &pgmText, NKikimrMiniKQL::TResult& result) {
TAutoPtr<NMsgBusProxy::TBusTabletLocalMKQL> request = new NMsgBusProxy::TBusTabletLocalMKQL();
request->Record.SetTabletID(ChangeStateStorage(tabletId, Domain));
- request->Record.SetWithRetry(true);
+ request->Record.SetWithRetry(true);
auto *mkql = request->Record.MutableProgram();
mkql->MutableProgram()->SetText(pgmText);
TAutoPtr<NBus::TBusMessage> reply;
- auto status = SyncCall(request, reply);
- UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
+ auto status = SyncCall(request, reply);
+ UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
const NKikimrClient::TResponse &response = dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record;
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
if (response.HasExecutionEngineEvaluatedResponse())
result.CopyFrom(response.GetExecutionEngineEvaluatedResponse());
@@ -1830,49 +1830,49 @@ namespace Tests {
return response.GetExecutionEngineResponseStatus() == ui32(NMiniKQL::IEngineFlat::EStatus::Complete);
}
- bool TClient::LocalSchemeTx(const ui64 tabletId, const NTabletFlatScheme::TSchemeChanges& changes, bool dryRun,
+ bool TClient::LocalSchemeTx(const ui64 tabletId, const NTabletFlatScheme::TSchemeChanges& changes, bool dryRun,
NTabletFlatScheme::TSchemeChanges& scheme, TString& err) {
- TAutoPtr<NMsgBusProxy::TBusTabletLocalSchemeTx> request = new NMsgBusProxy::TBusTabletLocalSchemeTx();
+ TAutoPtr<NMsgBusProxy::TBusTabletLocalSchemeTx> request = new NMsgBusProxy::TBusTabletLocalSchemeTx();
request->Record.SetTabletID(ChangeStateStorage(tabletId, Domain));
- request->Record.SetDryRun(dryRun);
- auto *schemeChanges = request->Record.MutableSchemeChanges();
- schemeChanges->CopyFrom(changes);
-
- TAutoPtr<NBus::TBusMessage> reply;
- auto status = SyncCall(request, reply);
- UNIT_ASSERT_EQUAL(status, NBus::MESSAGE_OK);
-
+ request->Record.SetDryRun(dryRun);
+ auto *schemeChanges = request->Record.MutableSchemeChanges();
+ schemeChanges->CopyFrom(changes);
+
+ TAutoPtr<NBus::TBusMessage> reply;
+ auto status = SyncCall(request, reply);
+ UNIT_ASSERT_EQUAL(status, NBus::MESSAGE_OK);
+
const NKikimrClient::TResponse &response = dynamic_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record;
- UNIT_ASSERT_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
-
- err = response.GetErrorReason();
- scheme.CopyFrom(response.GetLocalDbScheme());
-
+ UNIT_ASSERT_EQUAL(response.GetStatus(), NMsgBusProxy::MSTATUS_OK);
+
+ err = response.GetErrorReason();
+ scheme.CopyFrom(response.GetLocalDbScheme());
+
return err.empty();
- }
-
+ }
+
bool TClient::LocalSchemeTx(const ui64 tabletId, const TString &schemeChangesStr, bool dryRun,
NTabletFlatScheme::TSchemeChanges& scheme, TString& err) {
- NTabletFlatScheme::TSchemeChanges schemeChanges;
- ::google::protobuf::TextFormat::ParseFromString(schemeChangesStr, &schemeChanges);
- return LocalSchemeTx(tabletId, schemeChanges, dryRun, scheme, err);
- }
-
+ NTabletFlatScheme::TSchemeChanges schemeChanges;
+ ::google::protobuf::TextFormat::ParseFromString(schemeChangesStr, &schemeChanges);
+ return LocalSchemeTx(tabletId, schemeChanges, dryRun, scheme, err);
+ }
+
bool TClient::Compile(const TString &mkql, TString &compiled) {
TAutoPtr<NMsgBusProxy::TBusRequest> request = new NMsgBusProxy::TBusRequest();
- auto* mkqlTx = request->Record.MutableTransaction()->MutableMiniKQLTransaction();
- mkqlTx->MutableProgram()->SetText(mkql);
- mkqlTx->SetFlatMKQL(true);
+ auto* mkqlTx = request->Record.MutableTransaction()->MutableMiniKQLTransaction();
+ mkqlTx->MutableProgram()->SetText(mkql);
+ mkqlTx->SetFlatMKQL(true);
mkqlTx->SetMode(NKikimrTxUserProxy::TMiniKQLTransaction::COMPILE);
- TAutoPtr<NBus::TBusMessage> reply;
- NBus::EMessageStatus msgStatus = SyncCall(request, reply);
- UNIT_ASSERT_EQUAL(msgStatus, NBus::MESSAGE_OK);
-
+ TAutoPtr<NBus::TBusMessage> reply;
+ NBus::EMessageStatus msgStatus = SyncCall(request, reply);
+ UNIT_ASSERT_EQUAL(msgStatus, NBus::MESSAGE_OK);
+
const NKikimrClient::TResponse &response = static_cast<NMsgBusProxy::TBusResponse *>(reply.Get())->Record;
if (!response.HasMiniKQLCompileResults())
return false;
-
+
const auto &compileRes = response.GetMiniKQLCompileResults();
if (compileRes.ProgramCompileErrorsSize()) {
NYql::TIssues issues;
@@ -1901,8 +1901,8 @@ namespace Tests {
if (opts.Params)
mkqlTx->MutableParams()->SetText(opts.Params);
mkqlTx->SetFlatMKQL(true);
- if (opts.CollectStats)
- mkqlTx->SetCollectStats(true);
+ if (opts.CollectStats)
+ mkqlTx->SetCollectStats(true);
}
TAutoPtr<NBus::TBusMessage> reply;
@@ -1912,7 +1912,7 @@ namespace Tests {
NMsgBusProxy::TBusResponse * ret = static_cast<NMsgBusProxy::TBusResponse *>(reply.Get());
ui32 responseStatus = ret->Record.GetStatus();
if (responseStatus == NMsgBusProxy::MSTATUS_NOTREADY ||
- responseStatus == NMsgBusProxy::MSTATUS_TIMEOUT ||
+ responseStatus == NMsgBusProxy::MSTATUS_TIMEOUT ||
responseStatus == NMsgBusProxy::MSTATUS_INPROGRESS)
continue;
@@ -1922,33 +1922,33 @@ namespace Tests {
UNIT_ASSERT(retryCnt > 0);
return response.GetStatus();
- }
+ }
bool TClient::FlatQuery(const TString &query, TFlatQueryOptions& opts, NKikimrMiniKQL::TResult &result, const NKikimrClient::TResponse& expectedResponse) {
NKikimrClient::TResponse response;
- FlatQueryRaw(query, opts, response);
-
+ FlatQueryRaw(query, opts, response);
+
if (!response.GetDataShardErrors().empty()) {
Cerr << "DataShardErrors:" << Endl << response.GetDataShardErrors() << Endl;
- }
+ }
if (!response.GetMiniKQLErrors().empty()) {
Cerr << "MiniKQLErrors:" << Endl << response.GetMiniKQLErrors() << Endl;
- }
- if (response.HasProxyErrorCode()) {
- if (response.GetProxyErrorCode() != TEvTxUserProxy::TResultStatus::ExecComplete)
- Cerr << "proxy error code: " << static_cast<TEvTxUserProxy::TResultStatus::EStatus>(response.GetProxyErrorCode()) << Endl;
+ }
+ if (response.HasProxyErrorCode()) {
+ if (response.GetProxyErrorCode() != TEvTxUserProxy::TResultStatus::ExecComplete)
+ Cerr << "proxy error code: " << static_cast<TEvTxUserProxy::TResultStatus::EStatus>(response.GetProxyErrorCode()) << Endl;
if (expectedResponse.HasProxyErrorCode()) {
UNIT_ASSERT_VALUES_EQUAL(response.GetProxyErrorCode(), expectedResponse.GetProxyErrorCode());
}
- }
- if (response.HasProxyErrors()) {
- Cerr << "proxy errors: " << response.GetProxyErrors() << Endl;
- }
- if (response.UnresolvedKeysSize() > 0) {
- for (size_t i = 0, end = response.UnresolvedKeysSize(); i < end; ++i) {
- Cerr << response.GetUnresolvedKeys(i) << Endl;
- }
- }
+ }
+ if (response.HasProxyErrors()) {
+ Cerr << "proxy errors: " << response.GetProxyErrors() << Endl;
+ }
+ if (response.UnresolvedKeysSize() > 0) {
+ for (size_t i = 0, end = response.UnresolvedKeysSize(); i < end; ++i) {
+ Cerr << response.GetUnresolvedKeys(i) << Endl;
+ }
+ }
if (response.HasMiniKQLCompileResults()) {
const auto &compileRes = response.GetMiniKQLCompileResults();
if (compileRes.ProgramCompileErrorsSize()) {
@@ -1969,24 +1969,24 @@ namespace Tests {
if (response.HasHadFollowerReads() && response.GetHadFollowerReads()) {
Cerr << "had follower reads" << Endl;
}
-
+
if (expectedResponse.HasStatus()) {
UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), expectedResponse.GetStatus());
}
if (expectedResponse.GetStatus() != NMsgBusProxy::MSTATUS_OK)
return false;
- UNIT_ASSERT(response.HasTxId());
+ UNIT_ASSERT(response.HasTxId());
UNIT_ASSERT(response.GetExecutionEngineResponseStatus() == ui32(NMiniKQL::IEngineFlat::EStatus::Complete)
|| response.GetExecutionEngineResponseStatus() == ui32(NMiniKQL::IEngineFlat::EStatus::Aborted));
-
- if (response.HasExecutionEngineEvaluatedResponse()) {
- result.Swap(response.MutableExecutionEngineEvaluatedResponse());
- }
-
+
+ if (response.HasExecutionEngineEvaluatedResponse()) {
+ result.Swap(response.MutableExecutionEngineEvaluatedResponse());
+ }
+
return response.GetExecutionEngineResponseStatus() == ui32(NMiniKQL::IEngineFlat::EStatus::Complete);
- }
-
+ }
+
bool TClient::FlatQuery(const TString &query, TFlatQueryOptions& opts, NKikimrMiniKQL::TResult &result, ui32 expectedStatus) {
NKikimrClient::TResponse expectedResponse;
expectedResponse.SetStatus(expectedStatus);
@@ -2008,46 +2008,46 @@ namespace Tests {
TString TClient::SendTabletMonQuery(TTestActorRuntime* runtime, ui64 tabletId, TString query) {
TActorId sender = runtime->AllocateEdgeActor(0);
ForwardToTablet(*runtime, tabletId, sender, new NActors::NMon::TEvRemoteHttpInfo(query), 0);
- TAutoPtr<IEventHandle> handle;
- // Timeout for DEBUG purposes only
- runtime->GrabEdgeEvent<NMon::TEvRemoteJsonInfoRes>(handle);
+ TAutoPtr<IEventHandle> handle;
+ // Timeout for DEBUG purposes only
+ runtime->GrabEdgeEvent<NMon::TEvRemoteJsonInfoRes>(handle);
TString res = handle->Get<NMon::TEvRemoteJsonInfoRes>()->Json;
#ifndef NDEBUG
- Cerr << res << Endl;
+ Cerr << res << Endl;
#endif
- return res;
- }
-
+ return res;
+ }
+
TString TClient::MarkNodeInHive(TTestActorRuntime* runtime, ui32 nodeIdx, bool up) {
- ui32 nodeId = runtime->GetNodeId(nodeIdx);
+ ui32 nodeId = runtime->GetNodeId(nodeIdx);
ui64 hive = ChangeStateStorage(Tests::Hive, Domain);
TInstant deadline = TInstant::Now() + TIMEOUT;
- while (TInstant::Now() <= deadline) {
+ while (TInstant::Now() <= deadline) {
TString res = SendTabletMonQuery(runtime, hive, TString("/app?page=SetDown&node=") + ToString(nodeId) + "&down=" + (up ? "0" : "1"));
if (!res.empty() && !res.Contains("Error"))
- return res;
+ return res;
- }
- UNIT_ASSERT_C(false, "Failed to mark node in hive");
+ }
+ UNIT_ASSERT_C(false, "Failed to mark node in hive");
return TString();
- }
-
+ }
+
TString TClient::KickNodeInHive(TTestActorRuntime* runtime, ui32 nodeIdx) {
- ui32 nodeId = runtime->GetNodeId(nodeIdx);
+ ui32 nodeId = runtime->GetNodeId(nodeIdx);
ui64 hive = ChangeStateStorage(Tests::Hive, Domain);
return SendTabletMonQuery(runtime, hive, TString("/app?page=KickNode&node=") + ToString(nodeId));
- }
-
+ }
+
bool TClient::WaitForTabletAlive(TTestActorRuntime* runtime, ui64 tabletId, bool leader, TDuration timeout) {
TActorId edge = runtime->AllocateEdgeActor();
- NTabletPipe::TClientConfig clientConfig;
+ NTabletPipe::TClientConfig clientConfig;
clientConfig.AllowFollower = !leader;
clientConfig.ForceFollower = !leader;
clientConfig.RetryPolicy = NTabletPipe::TClientRetryPolicy::WithRetries();
TActorId pipeClient = runtime->Register(NTabletPipe::CreateClient(edge, tabletId, clientConfig));
- TAutoPtr<IEventHandle> handle;
+ TAutoPtr<IEventHandle> handle;
const TInstant deadline = TInstant::Now() + timeout;
- bool res = false;
+ bool res = false;
try {
while (TInstant::Now() <= deadline) {
@@ -2059,13 +2059,13 @@ namespace Tests {
res = (ev->Status == NKikimrProto::OK);
break;
}
- }
+ }
} catch (TEmptyEventQueueException &) {}
runtime->Send(new IEventHandle(pipeClient, TActorId(), new TEvents::TEvPoisonPill()));
- return res;
- }
-
+ return res;
+ }
+
bool TClient::WaitForTabletDown(TTestActorRuntime* runtime, ui64 tabletId, bool leader, TDuration timeout) {
TActorId edge = runtime->AllocateEdgeActor();
NTabletPipe::TClientConfig clientConfig;
@@ -2112,46 +2112,46 @@ namespace Tests {
}
void TClient::GetTabletInfoFromHive(TTestActorRuntime* runtime, ui64 tabletId, bool returnFollowers, NKikimrHive::TEvResponseHiveInfo& res) {
- TAutoPtr<TEvHive::TEvRequestHiveInfo> ev(new TEvHive::TEvRequestHiveInfo);
- ev->Record.SetTabletID(tabletId);
+ TAutoPtr<TEvHive::TEvRequestHiveInfo> ev(new TEvHive::TEvRequestHiveInfo);
+ ev->Record.SetTabletID(tabletId);
ev->Record.SetReturnFollowers(returnFollowers);
-
+
ui64 hive = ChangeStateStorage(Tests::Hive, Domain);
TActorId edge = runtime->AllocateEdgeActor();
- runtime->SendToPipe(hive, edge, ev.Release());
- TAutoPtr<IEventHandle> handle;
- TEvHive::TEvResponseHiveInfo* response = runtime->GrabEdgeEventRethrow<TEvHive::TEvResponseHiveInfo>(handle);
- res.Swap(&response->Record);
- }
-
+ runtime->SendToPipe(hive, edge, ev.Release());
+ TAutoPtr<IEventHandle> handle;
+ TEvHive::TEvResponseHiveInfo* response = runtime->GrabEdgeEventRethrow<TEvHive::TEvResponseHiveInfo>(handle);
+ res.Swap(&response->Record);
+ }
+
ui32 TClient::GetLeaderNode(TTestActorRuntime* runtime, ui64 tabletId) {
- NKikimrHive::TEvResponseHiveInfo res;
- GetTabletInfoFromHive(runtime, tabletId, false, res);
- // Cerr << res << Endl;
-
- for (const NKikimrHive::TTabletInfo& tablet : res.GetTablets()) {
+ NKikimrHive::TEvResponseHiveInfo res;
+ GetTabletInfoFromHive(runtime, tabletId, false, res);
+ // Cerr << res << Endl;
+
+ for (const NKikimrHive::TTabletInfo& tablet : res.GetTablets()) {
if (tablet.GetTabletID() == tabletId && tablet.GetNodeID() != 0) {
- return NodeIdToIndex(runtime, tablet.GetNodeID());
- }
- }
-
+ return NodeIdToIndex(runtime, tablet.GetNodeID());
+ }
+ }
+
return Max<ui32>();
- }
-
+ }
+
bool TClient::TabletExistsInHive(TTestActorRuntime* runtime, ui64 tabletId, bool evenInDeleting) {
- NKikimrHive::TEvResponseHiveInfo res;
- GetTabletInfoFromHive(runtime, tabletId, false, res);
- // Cerr << res << Endl;
-
- for (const NKikimrHive::TTabletInfo& tablet : res.GetTablets()) {
- if (tablet.GetTabletID() == tabletId) {
+ NKikimrHive::TEvResponseHiveInfo res;
+ GetTabletInfoFromHive(runtime, tabletId, false, res);
+ // Cerr << res << Endl;
+
+ for (const NKikimrHive::TTabletInfo& tablet : res.GetTablets()) {
+ if (tablet.GetTabletID() == tabletId) {
return evenInDeleting || tablet.GetState() != (ui32)NHive::ETabletState::Deleting;
- }
- }
-
- return false;
- }
-
+ }
+ }
+
+ return false;
+ }
+
void TClient::GetTabletStorageInfoFromHive(TTestActorRuntime* runtime, ui64 tabletId, NKikimrHive::TEvGetTabletStorageInfoResult& res) {
TAutoPtr<TEvHive::TEvGetTabletStorageInfo> ev(new TEvHive::TEvGetTabletStorageInfo(tabletId));
@@ -2178,51 +2178,51 @@ namespace Tests {
}
TVector<ui32> TClient::GetFollowerNodes(TTestActorRuntime* runtime, ui64 tabletId) {
- NKikimrHive::TEvResponseHiveInfo res;
- GetTabletInfoFromHive(runtime, tabletId, true, res);
- // Cerr << res << Endl;
-
+ NKikimrHive::TEvResponseHiveInfo res;
+ GetTabletInfoFromHive(runtime, tabletId, true, res);
+ // Cerr << res << Endl;
+
TVector<ui32> followerNodes;
- for (const NKikimrHive::TTabletInfo& tablet : res.GetTablets()) {
+ for (const NKikimrHive::TTabletInfo& tablet : res.GetTablets()) {
if (tablet.GetTabletID() == tabletId && tablet.HasFollowerID()) {
followerNodes.push_back(NodeIdToIndex(runtime, tablet.GetNodeID()));
- }
- }
-
+ }
+ }
+
return followerNodes;
- }
-
- void TClient::S3Listing(const TString& table, const TString& prefixColumnsPb,
- const TString& pathPrefix, const TString& pathDelimiter,
- const TString& startAfterSuffixColumnsPb,
+ }
+
+ void TClient::S3Listing(const TString& table, const TString& prefixColumnsPb,
+ const TString& pathPrefix, const TString& pathDelimiter,
+ const TString& startAfterSuffixColumnsPb,
const TVector<TString>& columnsToReturn, ui32 maxKeys,
- ui32 timeoutMillisec,
- NKikimrClient::TS3ListingResponse& res) {
- TAutoPtr<NMsgBusProxy::TBusS3ListingRequest> request = new NMsgBusProxy::TBusS3ListingRequest();
- request->Record.SetTableName(table);
- bool parseOk = ::google::protobuf::TextFormat::ParseFromString(prefixColumnsPb, request->Record.MutableKeyPrefix());
- UNIT_ASSERT_C(parseOk, "Failed to parse prefix columns: " + prefixColumnsPb);
- request->Record.SetPathColumnPrefix(pathPrefix);
- request->Record.SetPathColumnDelimiter(pathDelimiter);
- if (!startAfterSuffixColumnsPb.empty()) {
- parseOk = ::google::protobuf::TextFormat::ParseFromString(startAfterSuffixColumnsPb, request->Record.MutableStartAfterKeySuffix());
- UNIT_ASSERT_C(parseOk, "Failed to parse suffix columns: " + startAfterSuffixColumnsPb);
- }
- request->Record.SetMaxKeys(maxKeys);
- request->Record.SetTimeout(timeoutMillisec);
- for (const TString& c : columnsToReturn) {
- request->Record.AddColumnsToReturn(c);
- }
-
- TAutoPtr<NBus::TBusMessage> reply;
- auto status = SyncCall(request, reply);
- UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
-
- NKikimrClient::TS3ListingResponse& response = dynamic_cast<NMsgBusProxy::TBusS3ListingResponse*>(reply.Get())->Record;
-
- res.Swap(&response);
- }
-
+ ui32 timeoutMillisec,
+ NKikimrClient::TS3ListingResponse& res) {
+ TAutoPtr<NMsgBusProxy::TBusS3ListingRequest> request = new NMsgBusProxy::TBusS3ListingRequest();
+ request->Record.SetTableName(table);
+ bool parseOk = ::google::protobuf::TextFormat::ParseFromString(prefixColumnsPb, request->Record.MutableKeyPrefix());
+ UNIT_ASSERT_C(parseOk, "Failed to parse prefix columns: " + prefixColumnsPb);
+ request->Record.SetPathColumnPrefix(pathPrefix);
+ request->Record.SetPathColumnDelimiter(pathDelimiter);
+ if (!startAfterSuffixColumnsPb.empty()) {
+ parseOk = ::google::protobuf::TextFormat::ParseFromString(startAfterSuffixColumnsPb, request->Record.MutableStartAfterKeySuffix());
+ UNIT_ASSERT_C(parseOk, "Failed to parse suffix columns: " + startAfterSuffixColumnsPb);
+ }
+ request->Record.SetMaxKeys(maxKeys);
+ request->Record.SetTimeout(timeoutMillisec);
+ for (const TString& c : columnsToReturn) {
+ request->Record.AddColumnsToReturn(c);
+ }
+
+ TAutoPtr<NBus::TBusMessage> reply;
+ auto status = SyncCall(request, reply);
+ UNIT_ASSERT_VALUES_EQUAL(status, NBus::MESSAGE_OK);
+
+ NKikimrClient::TS3ListingResponse& response = dynamic_cast<NMsgBusProxy::TBusS3ListingResponse*>(reply.Get())->Record;
+
+ res.Swap(&response);
+ }
+
ui64 TClient::GetKesusTabletId(const TString& kesusPath) {
auto describeResult = Ls(kesusPath);
UNIT_ASSERT_C(describeResult->Record.GetPathDescription().HasKesus(), describeResult->Record);
diff --git a/ydb/core/testlib/test_client.h b/ydb/core/testlib/test_client.h
index 705d71dec0f..2064752ab45 100644
--- a/ydb/core/testlib/test_client.h
+++ b/ydb/core/testlib/test_client.h
@@ -26,7 +26,7 @@
#include <ydb/core/base/grpc_service_factory.h>
#include <google/protobuf/text_format.h>
-
+
#include <functional>
#include <algorithm>
@@ -102,7 +102,7 @@ namespace Tests {
NFake::TStorage CustomDiskParams;
TControls Controls;
TAppPrepare::TFnReg FrFactory = &DefaultFrFactory;
- TIntrusivePtr<TFormatFactory> Formats;
+ TIntrusivePtr<TFormatFactory> Formats;
bool EnableMockOnSingleNode = true;
TAutoPtr<TLogBackend> LogBackend;
TLoggerInitializer LoggerInitializer;
@@ -265,7 +265,7 @@ namespace Tests {
struct TFlatQueryOptions {
TString Params;
bool IsQueryCompiled = false;
- bool CollectStats = false;
+ bool CollectStats = false;
};
struct TPathVersion {
@@ -276,7 +276,7 @@ namespace Tests {
using TApplyIf = TVector<TPathVersion>;
TClient(const TServerSettings& settings);
- virtual ~TClient();
+ virtual ~TClient();
const NMsgBusProxy::TMsgBusClientConfig& GetClientConfig() const;
std::shared_ptr<NMsgBusProxy::TMsgBusClient> GetClient() const;
@@ -314,7 +314,7 @@ namespace Tests {
}
template <typename T>
- NBus::EMessageStatus SyncCall(TAutoPtr<T> msgHolder, TAutoPtr<NBus::TBusMessage> &reply) {
+ NBus::EMessageStatus SyncCall(TAutoPtr<T> msgHolder, TAutoPtr<NBus::TBusMessage> &reply) {
NBus::EMessageStatus msgbusStatus = NBus::EMessageStatus::MESSAGE_TIMEOUT;
const ui64 finishTimeMs = TInstant::Now().MilliSeconds() + TIME_LIMIT_MS;
PrepareRequest(msgHolder);
@@ -342,7 +342,7 @@ namespace Tests {
TString StartTrace(const TString &path);
void StopTrace();
- // Flat DB operations
+ // Flat DB operations
NMsgBusProxy::EResponseStatus WaitCreateTx(TTestActorRuntime* runtime, const TString& path, TDuration timeout);
NMsgBusProxy::EResponseStatus MkDir(const TString& parent, const TString& name, const TApplyIf& applyIf = {});
NMsgBusProxy::EResponseStatus RmDir(const TString& parent, const TString& name, const TApplyIf& applyIf = {});
@@ -373,7 +373,7 @@ namespace Tests {
NMsgBusProxy::EResponseStatus AlterTable(const TString& parent, const NKikimrSchemeOp::TTableDescription& update);
NMsgBusProxy::EResponseStatus AlterTable(const TString& parent, const TString& alter);
TAutoPtr<NMsgBusProxy::TBusResponse> AlterTable(const TString& parent, const NKikimrSchemeOp::TTableDescription& update, const TString& userToken);
- TAutoPtr<NMsgBusProxy::TBusResponse> AlterTable(const TString& parent, const TString& alter, const TString& userToken);
+ TAutoPtr<NMsgBusProxy::TBusResponse> AlterTable(const TString& parent, const TString& alter, const TString& userToken);
NMsgBusProxy::EResponseStatus CreateOlapStore(const TString& parent, const TString& scheme);
NMsgBusProxy::EResponseStatus CreateOlapStore(const TString& parent, const NKikimrSchemeOp::TColumnStoreDescription& store);
@@ -392,13 +392,13 @@ namespace Tests {
bool FlatQuery(const TString& mkql, TFlatQueryOptions& opts, NKikimrMiniKQL::TResult& result,
ui32 expectedStatus = NMsgBusProxy::MSTATUS_OK);
bool FlatQueryParams(const TString &query, const TString &params, bool queryCompiled, NKikimrMiniKQL::TResult &result);
-
- // returns NMsgBusProxy::MSTATUS_* and the raw response
+
+ // returns NMsgBusProxy::MSTATUS_* and the raw response
ui32 FlatQueryRaw(const TString &query, TFlatQueryOptions& opts, NKikimrClient::TResponse& response, int retryCnt = 10);
-
+
bool Compile(const TString &mkql, TString &compiled);
bool LocalQuery(ui64 tabletId, const TString &pgmText, NKikimrMiniKQL::TResult& result);
- bool LocalSchemeTx(const ui64 tabletId, const NTabletFlatScheme::TSchemeChanges& schemeChanges, bool dryRun,
+ bool LocalSchemeTx(const ui64 tabletId, const NTabletFlatScheme::TSchemeChanges& schemeChanges, bool dryRun,
NTabletFlatScheme::TSchemeChanges& scheme, TString& err);
bool LocalSchemeTx(const ui64 tabletId, const TString& schemeChanges, bool dryRun,
NTabletFlatScheme::TSchemeChanges& scheme, TString& err);
@@ -408,13 +408,13 @@ namespace Tests {
TString CreateStoragePool(const TString& poolKind, const TString& partOfName, ui32 groups = 1);
NKikimrBlobStorage::TDefineStoragePool DescribeStoragePool(const TString& name);
void RemoveStoragePool(const TString& name);
-
+
TAutoPtr<NMsgBusProxy::TBusResponse> HiveCreateTablet(ui32 domainUid, ui64 owner, ui64 owner_index, TTabletTypes::EType tablet_type,
const TVector<ui32>& allowed_node_ids, const TVector<TSubDomainKey>& allowed_domains = {}, const TChannelsBindings& binding = {});
- // Helper functions
+ // Helper functions
TString SendTabletMonQuery(TTestActorRuntime* runtime, ui64 tabletId, TString query);
TString MarkNodeInHive(TTestActorRuntime* runtime, ui32 nodeIdx, bool up);
TString KickNodeInHive(TTestActorRuntime* runtime, ui32 nodeIdx);
@@ -423,11 +423,11 @@ namespace Tests {
ui32 GetLeaderNode(TTestActorRuntime* runtime, ui64 tabletId);
bool TabletExistsInHive(TTestActorRuntime* runtime, ui64 tabletId, bool evenInDeleting = false);
TVector<ui32> GetFollowerNodes(TTestActorRuntime *runtime, ui64 tabletId);
- void S3Listing(const TString& table, const TString& prefixColumnsPb, const TString &pathPrefix,
- const TString &pathDelimiter, const TString& startAfterSuffixColumnsPb,
- const TVector<TString>& columnsToReturn, ui32 maxKeys, ui32 timeoutMillisec,
- NKikimrClient::TS3ListingResponse &res);
-
+ void S3Listing(const TString& table, const TString& prefixColumnsPb, const TString &pathPrefix,
+ const TString &pathDelimiter, const TString& startAfterSuffixColumnsPb,
+ const TVector<TString>& columnsToReturn, ui32 maxKeys, ui32 timeoutMillisec,
+ NKikimrClient::TS3ListingResponse &res);
+
void GetTabletInfoFromHive(TTestActorRuntime* runtime, ui64 tabletId, bool returnFollowers, NKikimrHive::TEvResponseHiveInfo& res);
void GetTabletStorageInfoFromHive(TTestActorRuntime* runtime, ui64 tabletId, NKikimrHive::TEvGetTabletStorageInfoResult& res);
@@ -440,60 +440,60 @@ namespace Tests {
THolder<NKesus::TEvKesus::TEvGetConfigResult> GetKesusConfig(TTestActorRuntime* runtime, const TString& kesusPath);
protected:
- template <class TMsg>
+ template <class TMsg>
TString PrintResult(NBus::TBusMessage* msg, size_t maxSz = 1000) {
- auto res = dynamic_cast<TMsg*>(msg);
+ auto res = dynamic_cast<TMsg*>(msg);
TString s;
- ::google::protobuf::TextFormat::PrintToString(res->Record, &s);
+ ::google::protobuf::TextFormat::PrintToString(res->Record, &s);
if (s.size() > maxSz) {
- s.resize(maxSz);
- s += "...\n(TRUNCATED)\n";
- }
- return s;
- }
-
- // Waits for kikimr server to become ready
- template <class TReq>
- NBus::EMessageStatus SendWhenReady(TAutoPtr<TReq> request, TAutoPtr<NBus::TBusMessage>& reply, const ui32 timeout = 5000) {
- TInstant deadline = TInstant::Now() + TDuration::MilliSeconds(timeout);
- NBus::EMessageStatus status = NBus::MESSAGE_UNKNOWN;
- // Server might not be ready
- do {
- TAutoPtr<TReq> msgCopy(new TReq());
- msgCopy->Record = request->Record;
- status = SyncCall(msgCopy, reply);
-
- if (status != NBus::MESSAGE_OK)
- return status;
-
- const NMsgBusProxy::TBusResponse* notReadyResp = dynamic_cast<const NMsgBusProxy::TBusResponse*>(reply.Get());
- if (!notReadyResp)
- break;
-
- if (notReadyResp->Record.GetStatus() != NMsgBusProxy::MSTATUS_NOTREADY)
+ s.resize(maxSz);
+ s += "...\n(TRUNCATED)\n";
+ }
+ return s;
+ }
+
+ // Waits for kikimr server to become ready
+ template <class TReq>
+ NBus::EMessageStatus SendWhenReady(TAutoPtr<TReq> request, TAutoPtr<NBus::TBusMessage>& reply, const ui32 timeout = 5000) {
+ TInstant deadline = TInstant::Now() + TDuration::MilliSeconds(timeout);
+ NBus::EMessageStatus status = NBus::MESSAGE_UNKNOWN;
+ // Server might not be ready
+ do {
+ TAutoPtr<TReq> msgCopy(new TReq());
+ msgCopy->Record = request->Record;
+ status = SyncCall(msgCopy, reply);
+
+ if (status != NBus::MESSAGE_OK)
+ return status;
+
+ const NMsgBusProxy::TBusResponse* notReadyResp = dynamic_cast<const NMsgBusProxy::TBusResponse*>(reply.Get());
+ if (!notReadyResp)
+ break;
+
+ if (notReadyResp->Record.GetStatus() != NMsgBusProxy::MSTATUS_NOTREADY)
break;
-
- // Retry if the server wasn't ready yet
- Sleep(TDuration::MilliSeconds(10));
- } while (TInstant::Now() < deadline);
-
- return status;
- }
-
- // Waits for scheme operation to complete
+
+ // Retry if the server wasn't ready yet
+ Sleep(TDuration::MilliSeconds(10));
+ } while (TInstant::Now() < deadline);
+
+ return status;
+ }
+
+ // Waits for scheme operation to complete
NBus::EMessageStatus WaitCompletion(ui64 txId, ui64 schemeshard, ui64 pathId,
TAutoPtr<NBus::TBusMessage>& reply,
TDuration timeout = TDuration::Seconds(1000));
NBus::EMessageStatus SendAndWaitCompletion(TAutoPtr<NMsgBusProxy::TBusSchemeOperation> request,
TAutoPtr<NBus::TBusMessage>& reply,
TDuration timeout = TDuration::Seconds(1000));
-
- ui32 NodeIdToIndex(TTestActorRuntime* runtime, ui32 id) {
- ui32 offset = runtime->GetNodeId(0);
+
+ ui32 NodeIdToIndex(TTestActorRuntime* runtime, ui32 id) {
+ ui32 offset = runtime->GetNodeId(0);
Y_VERIFY(id >= offset);
- return id - offset;
- }
-
+ return id - offset;
+ }
+
TAutoPtr<NMsgBusProxy::TBusResponse> LsImpl(const TString& path);
static void SetApplyIf(NKikimrSchemeOp::TModifyScheme& transaction, const TApplyIf& applyIf) {
@@ -504,7 +504,7 @@ namespace Tests {
}
}
- protected:
+ protected:
using TStoragePoolKinds = TDomainsInfo::TDomain::TStoragePoolKinds;
const ui32 Domain;
diff --git a/ydb/core/tx/columnshard/blob.cpp b/ydb/core/tx/columnshard/blob.cpp
index f088e2c1de3..b22d3fca73c 100644
--- a/ydb/core/tx/columnshard/blob.cpp
+++ b/ydb/core/tx/columnshard/blob.cpp
@@ -1,122 +1,122 @@
-#include "blob.h"
-#include "defs.h"
-
+#include "blob.h"
+#include "defs.h"
+
#include <ydb/core/tx/columnshard/engines/defs.h>
-
-#include <charconv>
-
+
+#include <charconv>
+
namespace NKikimr::NOlap {
-
-#define PARSE_INT_COMPONENT(fieldType, fieldName, endChar) \
- if (pos >= endPos) { \
- error = "Failed to parse " #fieldName " component"; \
- return TUnifiedBlobId(); \
- } \
- fieldType fieldName = -1; \
- { \
- auto [ptr, ec] { std::from_chars(str + pos, str + endPos, fieldName) }; \
- if (ec != std::errc()) { \
- error = "Failed to parse " #fieldName " component"; \
- return TUnifiedBlobId(); \
- } else { \
- pos = ptr - str; \
- } \
- if (str[pos++] != endChar) { \
- error = #endChar " not found after " #fieldName; \
- return TUnifiedBlobId(); \
- } \
- }
-
-// Format: "DS:group:logoBlobId"
-// Example: "DS:2181038103:[72075186224038245:51:31595:2:0:11952:0]"
-TUnifiedBlobId DoParseExtendedDsBlobId(const TString& s, TString& error) {
- Y_VERIFY(s.size() > 2);
- const char* str = s.c_str();
- Y_VERIFY(str[0] == 'D' && str[1] == 'S');
- i64 pos = 2;
- i64 endPos = s.size();
- if (str[pos++] != ':') {
- error = "Starting ':' not found";
- return TUnifiedBlobId();
- }
-
- PARSE_INT_COMPONENT(ui32, dsGroup, ':');
-
- TLogoBlobID logoBlobId;
- if (!TLogoBlobID::Parse(logoBlobId, s.substr(pos), error)) {
- return TUnifiedBlobId();
- }
-
- return TUnifiedBlobId(dsGroup, logoBlobId);
-}
-
-// Format: "SM[tabletId:generation:step:cookie:size]"
-// Example: "SM[72075186224038245:51:31184:0:2528]"
-TUnifiedBlobId DoParseSmallBlobId(const TString& s, TString& error) {
- Y_VERIFY(s.size() > 2);
- const char* str = s.c_str();
- Y_VERIFY(str[0] == 'S' && str[1] == 'M');
- i64 pos = 2;
- i64 endPos = s.size();
- if (str[pos++] != '[') {
- error = "opening [ not found";
- return TUnifiedBlobId();
- }
-
- PARSE_INT_COMPONENT(ui64, tabletId, ':');
- PARSE_INT_COMPONENT(ui32, gen, ':');
- PARSE_INT_COMPONENT(ui32, step, ':');
- PARSE_INT_COMPONENT(ui32, cookie, ':');
- PARSE_INT_COMPONENT(ui32, size, ']');
-
- if (pos != endPos) {
- error = "Extra characters after closing ]";
- return TUnifiedBlobId();
- }
-
- return TUnifiedBlobId(tabletId, gen, step, cookie, size);
-}
-
-TUnifiedBlobId TUnifiedBlobId::ParseFromString(const TString& str,
- const IBlobGroupSelector* dsGroupSelector, TString& error)
-{
- if (str.size() <= 2) {
- error = "String size is too small";
- return TUnifiedBlobId();
- }
-
- if (str[0] == '[') {
- // If blobId starts with '[' this must be a logoblobId and if channel is set to FAKE_CHANNEL
- // this is a fake logoblobid used for small blob
- TLogoBlobID logoBlobId;
- bool parsed = TLogoBlobID::Parse(logoBlobId, str, error);
- if (!parsed) {
- error = "Cannot parse TLogoBlobID: " + error;
- return TUnifiedBlobId();
- }
- if (logoBlobId.Channel() == TSmallBlobId::FAKE_CHANNEL) {
- // Small blob
- return TUnifiedBlobId(logoBlobId.TabletID(), logoBlobId.Generation(), logoBlobId.Step(),
- logoBlobId.Cookie(), logoBlobId.BlobSize());
- } else {
- // DS blob
- if (!dsGroupSelector) {
- error = "Need TBlobGroupSelector to resolve DS group for the blob";
- return TUnifiedBlobId();
- }
- return TUnifiedBlobId(dsGroupSelector->GetGroup(logoBlobId), logoBlobId);
- }
- } else if (str[0] == 'D' && str[1] == 'S') {
- return DoParseExtendedDsBlobId(str, error);
- } else if (str[0] == 'S' && str[1] == 'M') {
- return DoParseSmallBlobId(str, error);
- } else if (str[0] == 'S' && str[1] == '3') {
- error = "S3 blob id parsing is not yet implemented";
- return TUnifiedBlobId();
- }
-
- error = Sprintf("Unknown blob id format: %c%c", str[0], str[1]);
- return TUnifiedBlobId();
-}
-
+
+#define PARSE_INT_COMPONENT(fieldType, fieldName, endChar) \
+ if (pos >= endPos) { \
+ error = "Failed to parse " #fieldName " component"; \
+ return TUnifiedBlobId(); \
+ } \
+ fieldType fieldName = -1; \
+ { \
+ auto [ptr, ec] { std::from_chars(str + pos, str + endPos, fieldName) }; \
+ if (ec != std::errc()) { \
+ error = "Failed to parse " #fieldName " component"; \
+ return TUnifiedBlobId(); \
+ } else { \
+ pos = ptr - str; \
+ } \
+ if (str[pos++] != endChar) { \
+ error = #endChar " not found after " #fieldName; \
+ return TUnifiedBlobId(); \
+ } \
+ }
+
+// Format: "DS:group:logoBlobId"
+// Example: "DS:2181038103:[72075186224038245:51:31595:2:0:11952:0]"
+TUnifiedBlobId DoParseExtendedDsBlobId(const TString& s, TString& error) {
+ Y_VERIFY(s.size() > 2);
+ const char* str = s.c_str();
+ Y_VERIFY(str[0] == 'D' && str[1] == 'S');
+ i64 pos = 2;
+ i64 endPos = s.size();
+ if (str[pos++] != ':') {
+ error = "Starting ':' not found";
+ return TUnifiedBlobId();
+ }
+
+ PARSE_INT_COMPONENT(ui32, dsGroup, ':');
+
+ TLogoBlobID logoBlobId;
+ if (!TLogoBlobID::Parse(logoBlobId, s.substr(pos), error)) {
+ return TUnifiedBlobId();
+ }
+
+ return TUnifiedBlobId(dsGroup, logoBlobId);
+}
+
+// Format: "SM[tabletId:generation:step:cookie:size]"
+// Example: "SM[72075186224038245:51:31184:0:2528]"
+TUnifiedBlobId DoParseSmallBlobId(const TString& s, TString& error) {
+ Y_VERIFY(s.size() > 2);
+ const char* str = s.c_str();
+ Y_VERIFY(str[0] == 'S' && str[1] == 'M');
+ i64 pos = 2;
+ i64 endPos = s.size();
+ if (str[pos++] != '[') {
+ error = "opening [ not found";
+ return TUnifiedBlobId();
+ }
+
+ PARSE_INT_COMPONENT(ui64, tabletId, ':');
+ PARSE_INT_COMPONENT(ui32, gen, ':');
+ PARSE_INT_COMPONENT(ui32, step, ':');
+ PARSE_INT_COMPONENT(ui32, cookie, ':');
+ PARSE_INT_COMPONENT(ui32, size, ']');
+
+ if (pos != endPos) {
+ error = "Extra characters after closing ]";
+ return TUnifiedBlobId();
+ }
+
+ return TUnifiedBlobId(tabletId, gen, step, cookie, size);
+}
+
+TUnifiedBlobId TUnifiedBlobId::ParseFromString(const TString& str,
+ const IBlobGroupSelector* dsGroupSelector, TString& error)
+{
+ if (str.size() <= 2) {
+ error = "String size is too small";
+ return TUnifiedBlobId();
+ }
+
+ if (str[0] == '[') {
+ // If blobId starts with '[' this must be a logoblobId and if channel is set to FAKE_CHANNEL
+ // this is a fake logoblobid used for small blob
+ TLogoBlobID logoBlobId;
+ bool parsed = TLogoBlobID::Parse(logoBlobId, str, error);
+ if (!parsed) {
+ error = "Cannot parse TLogoBlobID: " + error;
+ return TUnifiedBlobId();
+ }
+ if (logoBlobId.Channel() == TSmallBlobId::FAKE_CHANNEL) {
+ // Small blob
+ return TUnifiedBlobId(logoBlobId.TabletID(), logoBlobId.Generation(), logoBlobId.Step(),
+ logoBlobId.Cookie(), logoBlobId.BlobSize());
+ } else {
+ // DS blob
+ if (!dsGroupSelector) {
+ error = "Need TBlobGroupSelector to resolve DS group for the blob";
+ return TUnifiedBlobId();
+ }
+ return TUnifiedBlobId(dsGroupSelector->GetGroup(logoBlobId), logoBlobId);
+ }
+ } else if (str[0] == 'D' && str[1] == 'S') {
+ return DoParseExtendedDsBlobId(str, error);
+ } else if (str[0] == 'S' && str[1] == 'M') {
+ return DoParseSmallBlobId(str, error);
+ } else if (str[0] == 'S' && str[1] == '3') {
+ error = "S3 blob id parsing is not yet implemented";
+ return TUnifiedBlobId();
+ }
+
+ error = Sprintf("Unknown blob id format: %c%c", str[0], str[1]);
+ return TUnifiedBlobId();
+}
+
}
diff --git a/ydb/core/tx/columnshard/blob.h b/ydb/core/tx/columnshard/blob.h
index d1a8609974f..ec83cff7601 100644
--- a/ydb/core/tx/columnshard/blob.h
+++ b/ydb/core/tx/columnshard/blob.h
@@ -1,271 +1,271 @@
-#pragma once
-
+#pragma once
+
#include <ydb/core/base/logoblob.h>
-
-#include <util/generic/string.h>
-
+
+#include <util/generic/string.h>
+
namespace NKikimr::NOlap {
-
-class IBlobGroupSelector;
-
-// Encapsulates different types of blob ids to simplify dealing with blobs for the
-// components that do not need to know where the blob is stored
-// Blob id formats:
-// * Old DS blob id: just "logoBlobId" e.g. "[72075186224038245:51:31595:2:0:11952:0]"
-// * DS blob id: "DS:dsGroup:logoBlobId" e.g. "DS:2181038103:[72075186224038245:51:31595:2:0:11952:0]"
-// * Small blob id: "SM[tabletId:generation:step:cookie:size]" e.g. "SM[72075186224038245:51:31184:0:2528]"
-class TUnifiedBlobId {
- struct TInvalid {
- bool operator == (const TInvalid&) const { return true; }
- };
-
- // Id of a blob in YDB distributed storage
- struct TDsBlobId {
- TLogoBlobID BlobId;
- ui32 DsGroup;
-
- bool operator == (const TDsBlobId& other) const {
- return BlobId == other.BlobId && DsGroup == other.DsGroup;
- }
-
- TString ToStringNew() const {
- return Sprintf( "DS:%" PRIu32 ":%s", DsGroup, BlobId.ToString().c_str());
- }
-
- TString ToStringLegacy() const {
- return BlobId.ToString();
- }
-
- ui64 Hash() const {
- return CombineHashes<ui64>(BlobId.Hash(), IntHash(DsGroup));
- }
- };
-
- // Id of a blob that is stored in Tablet local DB table
- struct TSmallBlobId {
- static constexpr ui8 FAKE_CHANNEL = 255; // Small blob id can be represented as
- // a fake TLogoBlobID with channel = FAKE_CHANNEL
-
- ui64 TabletId;
- ui32 Gen;
- ui32 Step;
- ui32 Cookie;
- ui32 Size;
-
- bool operator == (const TSmallBlobId& other) const {
- return TabletId == other.TabletId &&
- Gen == other.Gen &&
- Step == other.Step &&
- Cookie == other.Cookie &&
- Size == other.Size;
- }
-
- TString ToStringNew() const {
- return Sprintf( "SM[%" PRIu64 ":%" PRIu32 ":%" PRIu32 ":%" PRIu32 ":%" PRIu32 "]",
- TabletId, Gen, Step, Cookie, Size);
- }
-
- TString ToStringLegacy() const {
- // For compatibility with preproduction version small blobs can also be
- // addressed by fake TlogoBlobID with channel = 255
- return TLogoBlobID(TabletId, Gen, Step, FAKE_CHANNEL, Size, Cookie).ToString();
- }
-
- ui64 Hash() const {
- ui64 hash = IntHash(TabletId);
- hash = CombineHashes<ui64>(hash, IntHash(Gen));
- hash = CombineHashes<ui64>(hash, IntHash(Step));
- hash = CombineHashes<ui64>(hash, IntHash(Cookie));
- hash = CombineHashes<ui64>(hash, IntHash(Size));
- return hash;
- }
- };
-
- std::variant<
- TInvalid,
- TDsBlobId,
- TSmallBlobId
- > Id;
-
-public:
- enum EBlobType {
- INVALID = 0,
- DS_BLOB = 1,
- TABLET_SMALL_BLOB = 2,
- };
-
- TUnifiedBlobId()
- : Id(TInvalid())
- {}
-
- // Initialize as DS blob Id
- TUnifiedBlobId(ui32 dsGroup, const TLogoBlobID& logoBlobId)
- : Id(TDsBlobId{logoBlobId, dsGroup})
- {}
-
- // Initialize as Small blob Id
- TUnifiedBlobId(ui64 tabletId, ui32 gen, ui32 step, ui32 cookie, ui32 size)
- : Id(TSmallBlobId{tabletId, gen, step, cookie, size})
- {}
-
- TUnifiedBlobId(const TUnifiedBlobId& other) = default;
- TUnifiedBlobId& operator = (const TUnifiedBlobId& logoBlobId) = default;
- TUnifiedBlobId(TUnifiedBlobId&& other) = default;
- TUnifiedBlobId& operator = (TUnifiedBlobId&& logoBlobId) = default;
-
- static TUnifiedBlobId ParseFromString(const TString& str,
- const IBlobGroupSelector* dsGroupSelector, TString& error);
-
- bool operator == (const TUnifiedBlobId& other) const {
- return Id == other.Id;
- }
-
- EBlobType GetType() const {
- return (EBlobType)Id.index();
- }
-
- bool IsValid() const {
- return Id.index() != INVALID;
- }
-
- size_t BlobSize() const {
- switch (Id.index()) {
- case DS_BLOB:
- return std::get<TDsBlobId>(Id).BlobId.BlobSize();
- case TABLET_SMALL_BLOB:
- return std::get<TSmallBlobId>(Id).Size;
- default:
- Y_FAIL("Invalid blob id");
- }
- }
-
- bool IsSmallBlob() const {
- return GetType() == TABLET_SMALL_BLOB;
- }
-
- bool IsDsBlob() const {
- return GetType() == DS_BLOB;
- }
-
- TLogoBlobID GetLogoBlobId() const {
- Y_VERIFY(IsDsBlob());
- return std::get<TDsBlobId>(Id).BlobId;
- }
-
- ui32 GetDsGroup() const {
- Y_VERIFY(IsDsBlob());
- return std::get<TDsBlobId>(Id).DsGroup;
- }
-
- ui64 GetTabletId() const {
- switch (Id.index()) {
- case DS_BLOB:
- return std::get<TDsBlobId>(Id).BlobId.TabletID();
- case TABLET_SMALL_BLOB:
- return std::get<TSmallBlobId>(Id).TabletId;
- default:
- Y_FAIL("No Tablet Id");
- }
- }
-
- ui64 Hash() const noexcept {
- switch (Id.index()) {
- case INVALID:
- return 0;
- case DS_BLOB:
- return std::get<TDsBlobId>(Id).Hash();
- case TABLET_SMALL_BLOB:
- return std::get<TSmallBlobId>(Id).Hash();
- default:
- Y_FAIL("Not implemented");
- }
- }
-
- // This is only implemented for DS for backward compatibility with persisted data.
- // All new functionality should rahter use string blob id representation
- TString SerializeBinary() const {
- Y_VERIFY(IsDsBlob());
- return TString((const char*)GetLogoBlobId().GetRaw(), sizeof(TLogoBlobID));
- }
-
- TString ToStringLegacy() const {
- switch (Id.index()) {
- case DS_BLOB:
- return std::get<TDsBlobId>(Id).ToStringLegacy();
- case TABLET_SMALL_BLOB:
- return std::get<TSmallBlobId>(Id).ToStringLegacy();
- default:
- return "<Invalid blob id>";
- }
- }
-
- TString ToStringNew() const {
- switch (Id.index()) {
- case DS_BLOB:
- return std::get<TDsBlobId>(Id).ToStringNew();
- case TABLET_SMALL_BLOB:
- return std::get<TSmallBlobId>(Id).ToStringNew();
- default:
- return "<Invalid blob id>";
- }
- }
-};
-
-
-// Describes a range of bytes in a blob. It is used for read requests and for caching.
-struct TBlobRange {
- TUnifiedBlobId BlobId;
- ui32 Offset;
- ui32 Size;
-
- explicit TBlobRange(const TUnifiedBlobId& blobId = TUnifiedBlobId(), ui32 offset = 0, ui32 size = 0)
- : BlobId(blobId)
- , Offset(offset)
- , Size(size)
- {}
-
- bool operator == (const TBlobRange& other) const {
- return
- BlobId == other.BlobId &&
- Offset == other.Offset &&
- Size == other.Size;
- }
-
- ui64 Hash() const noexcept {
- ui64 hash = BlobId.Hash();
- hash = CombineHashes<ui64>(hash, IntHash(Offset));
- hash = CombineHashes<ui64>(hash, IntHash(Size));
- return hash;
- }
-
- TString ToString() const {
- return Sprintf("{ Blob: %s Offset: %" PRIu32 " Size: %" PRIu32 " }",
- BlobId.ToStringNew().c_str(), Offset, Size);
- }
-};
-
+
+class IBlobGroupSelector;
+
+// Encapsulates different types of blob ids to simplify dealing with blobs for the
+// components that do not need to know where the blob is stored
+// Blob id formats:
+// * Old DS blob id: just "logoBlobId" e.g. "[72075186224038245:51:31595:2:0:11952:0]"
+// * DS blob id: "DS:dsGroup:logoBlobId" e.g. "DS:2181038103:[72075186224038245:51:31595:2:0:11952:0]"
+// * Small blob id: "SM[tabletId:generation:step:cookie:size]" e.g. "SM[72075186224038245:51:31184:0:2528]"
+class TUnifiedBlobId {
+ struct TInvalid {
+ bool operator == (const TInvalid&) const { return true; }
+ };
+
+ // Id of a blob in YDB distributed storage
+ struct TDsBlobId {
+ TLogoBlobID BlobId;
+ ui32 DsGroup;
+
+ bool operator == (const TDsBlobId& other) const {
+ return BlobId == other.BlobId && DsGroup == other.DsGroup;
+ }
+
+ TString ToStringNew() const {
+ return Sprintf( "DS:%" PRIu32 ":%s", DsGroup, BlobId.ToString().c_str());
+ }
+
+ TString ToStringLegacy() const {
+ return BlobId.ToString();
+ }
+
+ ui64 Hash() const {
+ return CombineHashes<ui64>(BlobId.Hash(), IntHash(DsGroup));
+ }
+ };
+
+ // Id of a blob that is stored in Tablet local DB table
+ struct TSmallBlobId {
+ static constexpr ui8 FAKE_CHANNEL = 255; // Small blob id can be represented as
+ // a fake TLogoBlobID with channel = FAKE_CHANNEL
+
+ ui64 TabletId;
+ ui32 Gen;
+ ui32 Step;
+ ui32 Cookie;
+ ui32 Size;
+
+ bool operator == (const TSmallBlobId& other) const {
+ return TabletId == other.TabletId &&
+ Gen == other.Gen &&
+ Step == other.Step &&
+ Cookie == other.Cookie &&
+ Size == other.Size;
+ }
+
+ TString ToStringNew() const {
+ return Sprintf( "SM[%" PRIu64 ":%" PRIu32 ":%" PRIu32 ":%" PRIu32 ":%" PRIu32 "]",
+ TabletId, Gen, Step, Cookie, Size);
+ }
+
+ TString ToStringLegacy() const {
+ // For compatibility with preproduction version small blobs can also be
+ // addressed by fake TlogoBlobID with channel = 255
+ return TLogoBlobID(TabletId, Gen, Step, FAKE_CHANNEL, Size, Cookie).ToString();
+ }
+
+ ui64 Hash() const {
+ ui64 hash = IntHash(TabletId);
+ hash = CombineHashes<ui64>(hash, IntHash(Gen));
+ hash = CombineHashes<ui64>(hash, IntHash(Step));
+ hash = CombineHashes<ui64>(hash, IntHash(Cookie));
+ hash = CombineHashes<ui64>(hash, IntHash(Size));
+ return hash;
+ }
+ };
+
+ std::variant<
+ TInvalid,
+ TDsBlobId,
+ TSmallBlobId
+ > Id;
+
+public:
+ enum EBlobType {
+ INVALID = 0,
+ DS_BLOB = 1,
+ TABLET_SMALL_BLOB = 2,
+ };
+
+ TUnifiedBlobId()
+ : Id(TInvalid())
+ {}
+
+ // Initialize as DS blob Id
+ TUnifiedBlobId(ui32 dsGroup, const TLogoBlobID& logoBlobId)
+ : Id(TDsBlobId{logoBlobId, dsGroup})
+ {}
+
+ // Initialize as Small blob Id
+ TUnifiedBlobId(ui64 tabletId, ui32 gen, ui32 step, ui32 cookie, ui32 size)
+ : Id(TSmallBlobId{tabletId, gen, step, cookie, size})
+ {}
+
+ TUnifiedBlobId(const TUnifiedBlobId& other) = default;
+ TUnifiedBlobId& operator = (const TUnifiedBlobId& logoBlobId) = default;
+ TUnifiedBlobId(TUnifiedBlobId&& other) = default;
+ TUnifiedBlobId& operator = (TUnifiedBlobId&& logoBlobId) = default;
+
+ static TUnifiedBlobId ParseFromString(const TString& str,
+ const IBlobGroupSelector* dsGroupSelector, TString& error);
+
+ bool operator == (const TUnifiedBlobId& other) const {
+ return Id == other.Id;
+ }
+
+ EBlobType GetType() const {
+ return (EBlobType)Id.index();
+ }
+
+ bool IsValid() const {
+ return Id.index() != INVALID;
+ }
+
+ size_t BlobSize() const {
+ switch (Id.index()) {
+ case DS_BLOB:
+ return std::get<TDsBlobId>(Id).BlobId.BlobSize();
+ case TABLET_SMALL_BLOB:
+ return std::get<TSmallBlobId>(Id).Size;
+ default:
+ Y_FAIL("Invalid blob id");
+ }
+ }
+
+ bool IsSmallBlob() const {
+ return GetType() == TABLET_SMALL_BLOB;
+ }
+
+ bool IsDsBlob() const {
+ return GetType() == DS_BLOB;
+ }
+
+ TLogoBlobID GetLogoBlobId() const {
+ Y_VERIFY(IsDsBlob());
+ return std::get<TDsBlobId>(Id).BlobId;
+ }
+
+ ui32 GetDsGroup() const {
+ Y_VERIFY(IsDsBlob());
+ return std::get<TDsBlobId>(Id).DsGroup;
+ }
+
+ ui64 GetTabletId() const {
+ switch (Id.index()) {
+ case DS_BLOB:
+ return std::get<TDsBlobId>(Id).BlobId.TabletID();
+ case TABLET_SMALL_BLOB:
+ return std::get<TSmallBlobId>(Id).TabletId;
+ default:
+ Y_FAIL("No Tablet Id");
+ }
+ }
+
+ ui64 Hash() const noexcept {
+ switch (Id.index()) {
+ case INVALID:
+ return 0;
+ case DS_BLOB:
+ return std::get<TDsBlobId>(Id).Hash();
+ case TABLET_SMALL_BLOB:
+ return std::get<TSmallBlobId>(Id).Hash();
+ default:
+ Y_FAIL("Not implemented");
+ }
+ }
+
+ // This is only implemented for DS for backward compatibility with persisted data.
+ // All new functionality should rahter use string blob id representation
+ TString SerializeBinary() const {
+ Y_VERIFY(IsDsBlob());
+ return TString((const char*)GetLogoBlobId().GetRaw(), sizeof(TLogoBlobID));
+ }
+
+ TString ToStringLegacy() const {
+ switch (Id.index()) {
+ case DS_BLOB:
+ return std::get<TDsBlobId>(Id).ToStringLegacy();
+ case TABLET_SMALL_BLOB:
+ return std::get<TSmallBlobId>(Id).ToStringLegacy();
+ default:
+ return "<Invalid blob id>";
+ }
+ }
+
+ TString ToStringNew() const {
+ switch (Id.index()) {
+ case DS_BLOB:
+ return std::get<TDsBlobId>(Id).ToStringNew();
+ case TABLET_SMALL_BLOB:
+ return std::get<TSmallBlobId>(Id).ToStringNew();
+ default:
+ return "<Invalid blob id>";
+ }
+ }
+};
+
+
+// Describes a range of bytes in a blob. It is used for read requests and for caching.
+struct TBlobRange {
+ TUnifiedBlobId BlobId;
+ ui32 Offset;
+ ui32 Size;
+
+ explicit TBlobRange(const TUnifiedBlobId& blobId = TUnifiedBlobId(), ui32 offset = 0, ui32 size = 0)
+ : BlobId(blobId)
+ , Offset(offset)
+ , Size(size)
+ {}
+
+ bool operator == (const TBlobRange& other) const {
+ return
+ BlobId == other.BlobId &&
+ Offset == other.Offset &&
+ Size == other.Size;
+ }
+
+ ui64 Hash() const noexcept {
+ ui64 hash = BlobId.Hash();
+ hash = CombineHashes<ui64>(hash, IntHash(Offset));
+ hash = CombineHashes<ui64>(hash, IntHash(Size));
+ return hash;
+ }
+
+ TString ToString() const {
+ return Sprintf("{ Blob: %s Offset: %" PRIu32 " Size: %" PRIu32 " }",
+ BlobId.ToStringNew().c_str(), Offset, Size);
+ }
+};
+
}
-
-inline
-IOutputStream& operator <<(IOutputStream& out, const NKikimr::NOlap::TUnifiedBlobId& blobId) {
- return out << blobId.ToStringNew();
-}
-
-inline
-IOutputStream& operator <<(IOutputStream& out, const NKikimr::NOlap::TBlobRange& blobRange) {
- return out << blobRange.ToString();
-}
-
-template<>
-struct ::THash<NKikimr::NOlap::TUnifiedBlobId> {
- inline ui64 operator()(const NKikimr::NOlap::TUnifiedBlobId& a) const {
- return a.Hash();
- }
-};
-
-template <>
-struct THash<NKikimr::NOlap::TBlobRange> {
- inline size_t operator() (const NKikimr::NOlap::TBlobRange& key) const {
- return key.Hash();
- }
-};
+
+inline
+IOutputStream& operator <<(IOutputStream& out, const NKikimr::NOlap::TUnifiedBlobId& blobId) {
+ return out << blobId.ToStringNew();
+}
+
+inline
+IOutputStream& operator <<(IOutputStream& out, const NKikimr::NOlap::TBlobRange& blobRange) {
+ return out << blobRange.ToString();
+}
+
+template<>
+struct ::THash<NKikimr::NOlap::TUnifiedBlobId> {
+ inline ui64 operator()(const NKikimr::NOlap::TUnifiedBlobId& a) const {
+ return a.Hash();
+ }
+};
+
+template <>
+struct THash<NKikimr::NOlap::TBlobRange> {
+ inline size_t operator() (const NKikimr::NOlap::TBlobRange& key) const {
+ return key.Hash();
+ }
+};
diff --git a/ydb/core/tx/columnshard/blob_cache.cpp b/ydb/core/tx/columnshard/blob_cache.cpp
index 27c8b3cd89c..1d676cd21a1 100644
--- a/ydb/core/tx/columnshard/blob_cache.cpp
+++ b/ydb/core/tx/columnshard/blob_cache.cpp
@@ -1,600 +1,600 @@
-#include "blob_cache.h"
-#include "columnshard.h"
-
+#include "blob_cache.h"
+#include "columnshard.h"
+
#include <ydb/core/base/appdata.h>
#include <ydb/core/base/blobstorage.h>
#include <ydb/core/base/tablet_pipe.h>
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/cache/cache.h>
-
-#include <util/string/vector.h>
-
+
+#include <library/cpp/actors/core/actor.h>
+#include <library/cpp/actors/core/hfunc.h>
+#include <library/cpp/cache/cache.h>
+
+#include <util/string/vector.h>
+
namespace NKikimr::NBlobCache {
-
-using namespace NActors;
-
-class TBlobCache: public TActorBootstrapped<TBlobCache> {
-private:
- struct TReadInfo {
- bool Cache; // Put in cache after read?
- TList<TActorId> Waiting; // List of readers
-
- TReadInfo()
- : Cache(true)
- {}
- };
-
- static constexpr i64 MAX_IN_FLIGHT_BYTES = 250ll << 20;
- static constexpr i64 MAX_REQUEST_BYTES = 8ll << 20;
- static constexpr TDuration DEFAULT_READ_DEADLINE = TDuration::Seconds(5);
-
- TLRUCache<TBlobRange, TString> Cache;
- THashMap<TUnifiedBlobId, THashSet<TBlobRange>> CachedRanges; // List of cached ranges by blob id
- // It is used to remove all blob ranges from cache when
- // it gets a notification that a blob has been deleted
- TControlWrapper MaxCacheDataSize;
- TControlWrapper MaxInFlightDataSize;
- i64 CacheDataSize; // Current size of all blobs in cache
- ui64 ReadCookie;
- THashMap<ui64, std::vector<TBlobRange>> CookieToRange; // All in-flight requests
- THashMap<TBlobRange, TReadInfo> OutstandingReads; // All in-flight and enqueued reads
- TDeque<TBlobRange> ReadQueue; // Reads that are waiting to be sent
- // TODO: Consider making per-group queues
- i64 InFlightDataSize; // Current size of all in-flight blobs
-
- THashMap<ui64, TActorId> ShardPipes; // TabletId -> PipeClient for small blob read requests
- THashMap<ui64, THashSet<ui64>> InFlightSmallBlobRequests; // TabletId -> list to read cookies
-
- using TCounterPtr = NMonitoring::TDynamicCounters::TCounterPtr;
- const TCounterPtr SizeBytes;
- const TCounterPtr SizeBlobs;
- const TCounterPtr Hits;
- const TCounterPtr Misses;
- const TCounterPtr Evictions;
- const TCounterPtr Adds;
- const TCounterPtr Forgets;
- const TCounterPtr HitsBytes;
- const TCounterPtr EvictedBytes;
- const TCounterPtr ReadBytes;
- const TCounterPtr AddBytes;
- const TCounterPtr ForgetBytes;
- const TCounterPtr SizeBytesInFlight;
- const TCounterPtr SizeBlobsInFlight;
- const TCounterPtr ReadRequests;
- const TCounterPtr ReadsInQueue;
-
-public:
+
+using namespace NActors;
+
+class TBlobCache: public TActorBootstrapped<TBlobCache> {
+private:
+ struct TReadInfo {
+ bool Cache; // Put in cache after read?
+ TList<TActorId> Waiting; // List of readers
+
+ TReadInfo()
+ : Cache(true)
+ {}
+ };
+
+ static constexpr i64 MAX_IN_FLIGHT_BYTES = 250ll << 20;
+ static constexpr i64 MAX_REQUEST_BYTES = 8ll << 20;
+ static constexpr TDuration DEFAULT_READ_DEADLINE = TDuration::Seconds(5);
+
+ TLRUCache<TBlobRange, TString> Cache;
+ THashMap<TUnifiedBlobId, THashSet<TBlobRange>> CachedRanges; // List of cached ranges by blob id
+ // It is used to remove all blob ranges from cache when
+ // it gets a notification that a blob has been deleted
+ TControlWrapper MaxCacheDataSize;
+ TControlWrapper MaxInFlightDataSize;
+ i64 CacheDataSize; // Current size of all blobs in cache
+ ui64 ReadCookie;
+ THashMap<ui64, std::vector<TBlobRange>> CookieToRange; // All in-flight requests
+ THashMap<TBlobRange, TReadInfo> OutstandingReads; // All in-flight and enqueued reads
+ TDeque<TBlobRange> ReadQueue; // Reads that are waiting to be sent
+ // TODO: Consider making per-group queues
+ i64 InFlightDataSize; // Current size of all in-flight blobs
+
+ THashMap<ui64, TActorId> ShardPipes; // TabletId -> PipeClient for small blob read requests
+ THashMap<ui64, THashSet<ui64>> InFlightSmallBlobRequests; // TabletId -> list to read cookies
+
+ using TCounterPtr = NMonitoring::TDynamicCounters::TCounterPtr;
+ const TCounterPtr SizeBytes;
+ const TCounterPtr SizeBlobs;
+ const TCounterPtr Hits;
+ const TCounterPtr Misses;
+ const TCounterPtr Evictions;
+ const TCounterPtr Adds;
+ const TCounterPtr Forgets;
+ const TCounterPtr HitsBytes;
+ const TCounterPtr EvictedBytes;
+ const TCounterPtr ReadBytes;
+ const TCounterPtr AddBytes;
+ const TCounterPtr ForgetBytes;
+ const TCounterPtr SizeBytesInFlight;
+ const TCounterPtr SizeBlobsInFlight;
+ const TCounterPtr ReadRequests;
+ const TCounterPtr ReadsInQueue;
+
+public:
static constexpr auto ActorActivityType() {
- return NKikimrServices::TActivity::BLOB_CACHE_ACTOR;
- }
-
-public:
- explicit TBlobCache(ui64 maxSize, TIntrusivePtr<NMonitoring::TDynamicCounters> counters)
- : TActorBootstrapped<TBlobCache>()
- , Cache(SIZE_MAX)
- , MaxCacheDataSize(maxSize, 0, 1ull << 40)
- , MaxInFlightDataSize(Min<i64>(MaxCacheDataSize, MAX_IN_FLIGHT_BYTES), 0, 10ull << 30)
- , CacheDataSize(0)
- , ReadCookie(1)
- , InFlightDataSize(0)
- , SizeBytes(counters->GetCounter("SizeBytes"))
- , SizeBlobs(counters->GetCounter("SizeBlobs"))
- , Hits(counters->GetCounter("Hits", true))
- , Misses(counters->GetCounter("Misses", true))
- , Evictions(counters->GetCounter("Evictions", true))
- , Adds(counters->GetCounter("Adds", true))
- , Forgets(counters->GetCounter("Forgets", true))
- , HitsBytes(counters->GetCounter("HitsBytes", true))
- , EvictedBytes(counters->GetCounter("EvictedBytes", true))
- , ReadBytes(counters->GetCounter("ReadBytes", true))
- , AddBytes(counters->GetCounter("AddBytes", true))
- , ForgetBytes(counters->GetCounter("ForgetBytes", true))
- , SizeBytesInFlight(counters->GetCounter("SizeBytesInFlight"))
- , SizeBlobsInFlight(counters->GetCounter("SizeBlobsInFlight"))
- , ReadRequests(counters->GetCounter("ReadRequests", true))
- , ReadsInQueue(counters->GetCounter("ReadsInQueue"))
- {}
-
- void Bootstrap(const TActorContext& ctx) {
- AppData(ctx)->Icb->RegisterSharedControl(MaxCacheDataSize, "BlobCache.MaxCacheDataSize");
- AppData(ctx)->Icb->RegisterSharedControl(MaxInFlightDataSize, "BlobCache.MaxInFlightDataSize");
- Become(&TBlobCache::StateFunc);
- ScheduleWakeup();
- }
-
-private:
- STFUNC(StateFunc) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvents::TEvPoisonPill, Handle);
- HFunc(TEvents::TEvWakeup, Handle);
- HFunc(TEvBlobCache::TEvReadBlobRange, Handle);
- HFunc(TEvBlobCache::TEvReadBlobRangeBatch, Handle);
- HFunc(TEvBlobCache::TEvCacheBlobRange, Handle);
- HFunc(TEvBlobCache::TEvForgetBlob, Handle);
- HFunc(TEvBlobStorage::TEvGetResult, Handle);
- HFunc(TEvTabletPipe::TEvClientConnected, Handle);
- HFunc(TEvTabletPipe::TEvClientDestroyed, Handle);
- HFunc(TEvColumnShard::TEvReadBlobRangesResult, Handle);
- default:
+ return NKikimrServices::TActivity::BLOB_CACHE_ACTOR;
+ }
+
+public:
+ explicit TBlobCache(ui64 maxSize, TIntrusivePtr<NMonitoring::TDynamicCounters> counters)
+ : TActorBootstrapped<TBlobCache>()
+ , Cache(SIZE_MAX)
+ , MaxCacheDataSize(maxSize, 0, 1ull << 40)
+ , MaxInFlightDataSize(Min<i64>(MaxCacheDataSize, MAX_IN_FLIGHT_BYTES), 0, 10ull << 30)
+ , CacheDataSize(0)
+ , ReadCookie(1)
+ , InFlightDataSize(0)
+ , SizeBytes(counters->GetCounter("SizeBytes"))
+ , SizeBlobs(counters->GetCounter("SizeBlobs"))
+ , Hits(counters->GetCounter("Hits", true))
+ , Misses(counters->GetCounter("Misses", true))
+ , Evictions(counters->GetCounter("Evictions", true))
+ , Adds(counters->GetCounter("Adds", true))
+ , Forgets(counters->GetCounter("Forgets", true))
+ , HitsBytes(counters->GetCounter("HitsBytes", true))
+ , EvictedBytes(counters->GetCounter("EvictedBytes", true))
+ , ReadBytes(counters->GetCounter("ReadBytes", true))
+ , AddBytes(counters->GetCounter("AddBytes", true))
+ , ForgetBytes(counters->GetCounter("ForgetBytes", true))
+ , SizeBytesInFlight(counters->GetCounter("SizeBytesInFlight"))
+ , SizeBlobsInFlight(counters->GetCounter("SizeBlobsInFlight"))
+ , ReadRequests(counters->GetCounter("ReadRequests", true))
+ , ReadsInQueue(counters->GetCounter("ReadsInQueue"))
+ {}
+
+ void Bootstrap(const TActorContext& ctx) {
+ AppData(ctx)->Icb->RegisterSharedControl(MaxCacheDataSize, "BlobCache.MaxCacheDataSize");
+ AppData(ctx)->Icb->RegisterSharedControl(MaxInFlightDataSize, "BlobCache.MaxInFlightDataSize");
+ Become(&TBlobCache::StateFunc);
+ ScheduleWakeup();
+ }
+
+private:
+ STFUNC(StateFunc) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvents::TEvPoisonPill, Handle);
+ HFunc(TEvents::TEvWakeup, Handle);
+ HFunc(TEvBlobCache::TEvReadBlobRange, Handle);
+ HFunc(TEvBlobCache::TEvReadBlobRangeBatch, Handle);
+ HFunc(TEvBlobCache::TEvCacheBlobRange, Handle);
+ HFunc(TEvBlobCache::TEvForgetBlob, Handle);
+ HFunc(TEvBlobStorage::TEvGetResult, Handle);
+ HFunc(TEvTabletPipe::TEvClientConnected, Handle);
+ HFunc(TEvTabletPipe::TEvClientDestroyed, Handle);
+ HFunc(TEvColumnShard::TEvReadBlobRangesResult, Handle);
+ default:
LOG_S_WARN("Unhandled event type: " << ev->GetTypeRewrite()
- << " event: " << (ev->HasEvent() ? ev->GetBase()->ToString().data() : "serialized?"));
- ctx.Send(ev->ForwardOnNondelivery(TEvents::TEvUndelivered::ReasonActorUnknown));
- break;
- };
- }
-
- void ScheduleWakeup() {
- Schedule(TDuration::MilliSeconds(100), new TEvents::TEvWakeup());
- }
-
- void Handle(TEvents::TEvWakeup::TPtr& ev, const TActorContext& ctx) {
- Y_UNUSED(ev);
- Evict(ctx); // Max cache size might have changed
- ScheduleWakeup();
- }
-
- void Handle(TEvents::TEvPoisonPill::TPtr& ev, const TActorContext& ctx) {
- Y_UNUSED(ev);
- Die(ctx);
- }
-
- void Handle(TEvBlobCache::TEvReadBlobRange::TPtr& ev, const TActorContext& ctx) {
- const TBlobRange& blobRange = ev->Get()->BlobRange;
- const bool promote = ev->Get()->Cache;
-
- LOG_S_DEBUG("Read request: " << blobRange);
-
- HandleSingleRangeRead(blobRange, promote, ev->Sender, ctx);
-
- MakeReadRequests(ctx);
- }
-
- void HandleSingleRangeRead(const TBlobRange& blobRange,
- const bool promoteInCache, const TActorId& sender, const TActorContext& ctx)
- {
- // Is in cache?
- auto it = promoteInCache ? Cache.Find(blobRange) : Cache.FindWithoutPromote(blobRange);
- if (it != Cache.End()) {
- Hits->Inc();
- HitsBytes->Add(blobRange.Size);
- return SendResult(sender, blobRange, NKikimrProto::OK, it.Value(), ctx);
- }
-
- Misses->Inc();
-
- // Is outsanding?
- auto readIt = OutstandingReads.find(blobRange);
- if (readIt != OutstandingReads.end()) {
- readIt->second.Waiting.push_back(sender);
- readIt->second.Cache |= promoteInCache;
- return;
- }
-
- EnqueueRead(blobRange, promoteInCache, sender);
- }
-
- void Handle(TEvBlobCache::TEvReadBlobRangeBatch::TPtr& ev, const TActorContext& ctx) {
- const auto& ranges = ev->Get()->BlobRanges;
- LOG_S_DEBUG("Batch read request: " << JoinStrings(ranges.begin(), ranges.end(), " "));
-
- for (const auto& blobRange : ranges) {
- HandleSingleRangeRead(blobRange, ev->Get()->Cache, ev->Sender, ctx);
- }
-
- MakeReadRequests(ctx);
- }
-
- void Handle(TEvBlobCache::TEvCacheBlobRange::TPtr& ev, const TActorContext& ctx) {
- const auto& blobRange = ev->Get()->BlobRange;
- const auto& data = ev->Get()->Data;
-
- if (blobRange.Size != data.size()) {
- LOG_S_ERROR("Trying to add invalid data for range: " << blobRange << " size: " << data.size());
- return;
- }
-
- Adds->Inc();
-
- if (OutstandingReads.count(blobRange)) {
- // Don't bother if there is already a read request for this range
- return;
- }
-
- LOG_S_DEBUG("Adding range: " << blobRange);
-
- AddBytes->Add(blobRange.Size);
-
- InsertIntoCache(blobRange, data);
-
- Evict(ctx);
- }
-
- void Handle(TEvBlobCache::TEvForgetBlob::TPtr& ev, const TActorContext&) {
- const TUnifiedBlobId& blobId = ev->Get()->BlobId;
-
- LOG_S_DEBUG("Forgetting blob: " << blobId);
-
- Forgets->Inc();
-
- auto blobIdIt = CachedRanges.find(blobId);
- if (blobIdIt == CachedRanges.end()) {
- return;
- }
-
- // Remove all ranges of this blob that are present in cache
- for (const auto& blobRange: blobIdIt->second) {
- auto rangeIt = Cache.FindWithoutPromote(blobRange);
- if (rangeIt == Cache.End()) {
- continue;
- }
-
- Cache.Erase(rangeIt);
- CacheDataSize -= blobRange.Size;
- SizeBytes->Sub(blobRange.Size);
- SizeBlobs->Dec();
- ForgetBytes->Add(blobRange.Size);
- }
-
- CachedRanges.erase(blobIdIt);
- }
-
- void EnqueueRead(const TBlobRange& blobRange, const bool promoteInCache, const TActorId& sender) {
- TReadInfo& blobInfo = OutstandingReads[blobRange];
- blobInfo.Waiting.push_back(sender);
- blobInfo.Cache = promoteInCache;
-
- LOG_S_DEBUG("Enqueue read range: " << blobRange);
-
- ReadQueue.push_back(blobRange);
- ReadsInQueue->Set(ReadQueue.size());
- }
-
- void SendBatchReadRequest(const std::vector<TBlobRange>& blobRanges, const ui64 cookie, const TActorContext& ctx) {
- if (blobRanges.front().BlobId.IsSmallBlob()) {
- SendBatchReadRequestToTablet(blobRanges, cookie, ctx);
- } else {
- SendBatchReadRequestToDS(blobRanges, cookie, ctx);
- }
- }
-
- void SendBatchReadRequestToDS(const std::vector<TBlobRange>& blobRanges,
- const ui64 cookie, const TActorContext& ctx)
- {
- const ui32 dsGroup = blobRanges.front().BlobId.GetDsGroup();
-
- LOG_S_DEBUG("Sending read from DS: group: " << dsGroup
- << " ranges: " << JoinStrings(blobRanges.begin(), blobRanges.end(), " ")
- << " cookie: " << cookie);
-
- TArrayHolder<TEvBlobStorage::TEvGet::TQuery> queires(new TEvBlobStorage::TEvGet::TQuery[blobRanges.size()]);
- for (size_t i = 0; i < blobRanges.size(); ++i) {
- Y_VERIFY(dsGroup == blobRanges[i].BlobId.GetDsGroup());
- queires[i].Set(blobRanges[i].BlobId.GetLogoBlobId(), blobRanges[i].Offset, blobRanges[i].Size);
- }
-
- const TInstant deadline = AppData(ctx)->TimeProvider->Now() + DEFAULT_READ_DEADLINE;
- const NKikimrBlobStorage::EGetHandleClass handleClass = NKikimrBlobStorage::FastRead;
- SendToBSProxy(ctx,
- dsGroup,
- new TEvBlobStorage::TEvGet(queires, blobRanges.size(), deadline, handleClass, false),
- cookie);
-
- ReadRequests->Inc();
- }
-
- // Checks if 2 blobs can be read in a single request (e.g. DS blobs from the same
- // tablet residing on the same DS group, or 2 small blobs from the same tablet)
- inline bool CanBatchReads(const TUnifiedBlobId& a, const TUnifiedBlobId& b) {
- if (a.GetType() != b.GetType()) {
- return false;
- }
-
- if (!a.IsValid()) {
- return false;
- }
-
- // Same tablet and same DS group?
- if (a.IsDsBlob()) {
- return a.GetTabletId() == b.GetTabletId() &&
- a.GetDsGroup() == b.GetDsGroup();
- }
-
- // Small blobs from the same tablet?
- if (a.IsSmallBlob()) {
- return a.GetTabletId() == b.GetTabletId();
- }
-
- return false;
- }
-
- void MakeReadRequests(const TActorContext& ctx) {
- std::vector<TBlobRange> blobRanges;
- ui64 cookie = ++ReadCookie;
- ui64 requestSize = 0;
-
- // NOTE: if queue is not empty, at least 1 in-flight request is allowed
- while (!ReadQueue.empty() && (InFlightDataSize == 0 || InFlightDataSize < MaxInFlightDataSize)) {
- const TBlobRange& blobRange = ReadQueue.front();
-
- // Only group ranges from the same Tablet and same DS group
- if (!blobRanges.empty() && (
- !CanBatchReads(blobRanges.back().BlobId, blobRange.BlobId) ||
- requestSize > MAX_REQUEST_BYTES
- ))
- {
- SendBatchReadRequest(blobRanges, cookie, ctx);
- blobRanges.clear();
- cookie = ++ReadCookie;
- requestSize = 0;
- }
- blobRanges.push_back(blobRange);
- requestSize += blobRange.Size;
- CookieToRange[cookie].push_back(blobRange);
-
- SizeBytesInFlight->Add(blobRange.Size);
- SizeBlobsInFlight->Inc();
- InFlightDataSize += blobRange.Size;
-
- // We might need to free some space to accomodate the results of new reads
- Evict(ctx);
-
- ReadQueue.pop_front();
- }
- if (!blobRanges.empty()) {
- SendBatchReadRequest(blobRanges, cookie, ctx);
- }
- ReadsInQueue->Set(ReadQueue.size());
- }
-
- void SendResult(const TActorId& to, const TBlobRange& blobRange, NKikimrProto::EReplyStatus status,
- const TString& data, const TActorContext& ctx) {
+ << " event: " << (ev->HasEvent() ? ev->GetBase()->ToString().data() : "serialized?"));
+ ctx.Send(ev->ForwardOnNondelivery(TEvents::TEvUndelivered::ReasonActorUnknown));
+ break;
+ };
+ }
+
+ void ScheduleWakeup() {
+ Schedule(TDuration::MilliSeconds(100), new TEvents::TEvWakeup());
+ }
+
+ void Handle(TEvents::TEvWakeup::TPtr& ev, const TActorContext& ctx) {
+ Y_UNUSED(ev);
+ Evict(ctx); // Max cache size might have changed
+ ScheduleWakeup();
+ }
+
+ void Handle(TEvents::TEvPoisonPill::TPtr& ev, const TActorContext& ctx) {
+ Y_UNUSED(ev);
+ Die(ctx);
+ }
+
+ void Handle(TEvBlobCache::TEvReadBlobRange::TPtr& ev, const TActorContext& ctx) {
+ const TBlobRange& blobRange = ev->Get()->BlobRange;
+ const bool promote = ev->Get()->Cache;
+
+ LOG_S_DEBUG("Read request: " << blobRange);
+
+ HandleSingleRangeRead(blobRange, promote, ev->Sender, ctx);
+
+ MakeReadRequests(ctx);
+ }
+
+ void HandleSingleRangeRead(const TBlobRange& blobRange,
+ const bool promoteInCache, const TActorId& sender, const TActorContext& ctx)
+ {
+ // Is in cache?
+ auto it = promoteInCache ? Cache.Find(blobRange) : Cache.FindWithoutPromote(blobRange);
+ if (it != Cache.End()) {
+ Hits->Inc();
+ HitsBytes->Add(blobRange.Size);
+ return SendResult(sender, blobRange, NKikimrProto::OK, it.Value(), ctx);
+ }
+
+ Misses->Inc();
+
+ // Is outsanding?
+ auto readIt = OutstandingReads.find(blobRange);
+ if (readIt != OutstandingReads.end()) {
+ readIt->second.Waiting.push_back(sender);
+ readIt->second.Cache |= promoteInCache;
+ return;
+ }
+
+ EnqueueRead(blobRange, promoteInCache, sender);
+ }
+
+ void Handle(TEvBlobCache::TEvReadBlobRangeBatch::TPtr& ev, const TActorContext& ctx) {
+ const auto& ranges = ev->Get()->BlobRanges;
+ LOG_S_DEBUG("Batch read request: " << JoinStrings(ranges.begin(), ranges.end(), " "));
+
+ for (const auto& blobRange : ranges) {
+ HandleSingleRangeRead(blobRange, ev->Get()->Cache, ev->Sender, ctx);
+ }
+
+ MakeReadRequests(ctx);
+ }
+
+ void Handle(TEvBlobCache::TEvCacheBlobRange::TPtr& ev, const TActorContext& ctx) {
+ const auto& blobRange = ev->Get()->BlobRange;
+ const auto& data = ev->Get()->Data;
+
+ if (blobRange.Size != data.size()) {
+ LOG_S_ERROR("Trying to add invalid data for range: " << blobRange << " size: " << data.size());
+ return;
+ }
+
+ Adds->Inc();
+
+ if (OutstandingReads.count(blobRange)) {
+ // Don't bother if there is already a read request for this range
+ return;
+ }
+
+ LOG_S_DEBUG("Adding range: " << blobRange);
+
+ AddBytes->Add(blobRange.Size);
+
+ InsertIntoCache(blobRange, data);
+
+ Evict(ctx);
+ }
+
+ void Handle(TEvBlobCache::TEvForgetBlob::TPtr& ev, const TActorContext&) {
+ const TUnifiedBlobId& blobId = ev->Get()->BlobId;
+
+ LOG_S_DEBUG("Forgetting blob: " << blobId);
+
+ Forgets->Inc();
+
+ auto blobIdIt = CachedRanges.find(blobId);
+ if (blobIdIt == CachedRanges.end()) {
+ return;
+ }
+
+ // Remove all ranges of this blob that are present in cache
+ for (const auto& blobRange: blobIdIt->second) {
+ auto rangeIt = Cache.FindWithoutPromote(blobRange);
+ if (rangeIt == Cache.End()) {
+ continue;
+ }
+
+ Cache.Erase(rangeIt);
+ CacheDataSize -= blobRange.Size;
+ SizeBytes->Sub(blobRange.Size);
+ SizeBlobs->Dec();
+ ForgetBytes->Add(blobRange.Size);
+ }
+
+ CachedRanges.erase(blobIdIt);
+ }
+
+ void EnqueueRead(const TBlobRange& blobRange, const bool promoteInCache, const TActorId& sender) {
+ TReadInfo& blobInfo = OutstandingReads[blobRange];
+ blobInfo.Waiting.push_back(sender);
+ blobInfo.Cache = promoteInCache;
+
+ LOG_S_DEBUG("Enqueue read range: " << blobRange);
+
+ ReadQueue.push_back(blobRange);
+ ReadsInQueue->Set(ReadQueue.size());
+ }
+
+ void SendBatchReadRequest(const std::vector<TBlobRange>& blobRanges, const ui64 cookie, const TActorContext& ctx) {
+ if (blobRanges.front().BlobId.IsSmallBlob()) {
+ SendBatchReadRequestToTablet(blobRanges, cookie, ctx);
+ } else {
+ SendBatchReadRequestToDS(blobRanges, cookie, ctx);
+ }
+ }
+
+ void SendBatchReadRequestToDS(const std::vector<TBlobRange>& blobRanges,
+ const ui64 cookie, const TActorContext& ctx)
+ {
+ const ui32 dsGroup = blobRanges.front().BlobId.GetDsGroup();
+
+ LOG_S_DEBUG("Sending read from DS: group: " << dsGroup
+ << " ranges: " << JoinStrings(blobRanges.begin(), blobRanges.end(), " ")
+ << " cookie: " << cookie);
+
+ TArrayHolder<TEvBlobStorage::TEvGet::TQuery> queires(new TEvBlobStorage::TEvGet::TQuery[blobRanges.size()]);
+ for (size_t i = 0; i < blobRanges.size(); ++i) {
+ Y_VERIFY(dsGroup == blobRanges[i].BlobId.GetDsGroup());
+ queires[i].Set(blobRanges[i].BlobId.GetLogoBlobId(), blobRanges[i].Offset, blobRanges[i].Size);
+ }
+
+ const TInstant deadline = AppData(ctx)->TimeProvider->Now() + DEFAULT_READ_DEADLINE;
+ const NKikimrBlobStorage::EGetHandleClass handleClass = NKikimrBlobStorage::FastRead;
+ SendToBSProxy(ctx,
+ dsGroup,
+ new TEvBlobStorage::TEvGet(queires, blobRanges.size(), deadline, handleClass, false),
+ cookie);
+
+ ReadRequests->Inc();
+ }
+
+ // Checks if 2 blobs can be read in a single request (e.g. DS blobs from the same
+ // tablet residing on the same DS group, or 2 small blobs from the same tablet)
+ inline bool CanBatchReads(const TUnifiedBlobId& a, const TUnifiedBlobId& b) {
+ if (a.GetType() != b.GetType()) {
+ return false;
+ }
+
+ if (!a.IsValid()) {
+ return false;
+ }
+
+ // Same tablet and same DS group?
+ if (a.IsDsBlob()) {
+ return a.GetTabletId() == b.GetTabletId() &&
+ a.GetDsGroup() == b.GetDsGroup();
+ }
+
+ // Small blobs from the same tablet?
+ if (a.IsSmallBlob()) {
+ return a.GetTabletId() == b.GetTabletId();
+ }
+
+ return false;
+ }
+
+ void MakeReadRequests(const TActorContext& ctx) {
+ std::vector<TBlobRange> blobRanges;
+ ui64 cookie = ++ReadCookie;
+ ui64 requestSize = 0;
+
+ // NOTE: if queue is not empty, at least 1 in-flight request is allowed
+ while (!ReadQueue.empty() && (InFlightDataSize == 0 || InFlightDataSize < MaxInFlightDataSize)) {
+ const TBlobRange& blobRange = ReadQueue.front();
+
+ // Only group ranges from the same Tablet and same DS group
+ if (!blobRanges.empty() && (
+ !CanBatchReads(blobRanges.back().BlobId, blobRange.BlobId) ||
+ requestSize > MAX_REQUEST_BYTES
+ ))
+ {
+ SendBatchReadRequest(blobRanges, cookie, ctx);
+ blobRanges.clear();
+ cookie = ++ReadCookie;
+ requestSize = 0;
+ }
+ blobRanges.push_back(blobRange);
+ requestSize += blobRange.Size;
+ CookieToRange[cookie].push_back(blobRange);
+
+ SizeBytesInFlight->Add(blobRange.Size);
+ SizeBlobsInFlight->Inc();
+ InFlightDataSize += blobRange.Size;
+
+ // We might need to free some space to accomodate the results of new reads
+ Evict(ctx);
+
+ ReadQueue.pop_front();
+ }
+ if (!blobRanges.empty()) {
+ SendBatchReadRequest(blobRanges, cookie, ctx);
+ }
+ ReadsInQueue->Set(ReadQueue.size());
+ }
+
+ void SendResult(const TActorId& to, const TBlobRange& blobRange, NKikimrProto::EReplyStatus status,
+ const TString& data, const TActorContext& ctx) {
LOG_S_DEBUG("Send result: " << blobRange << " to: " << to << " status: " << status);
-
- ctx.Send(to, new TEvBlobCache::TEvReadBlobRangeResult(blobRange, status, data));
- }
-
- void Handle(TEvBlobStorage::TEvGetResult::TPtr& ev, const TActorContext& ctx) {
- const ui64 readCookie = ev->Cookie;
-
- if (ev->Get()->ResponseSz < 1) {
- Y_FAIL("Unexpected reply from blobstorage");
- }
-
- if (ev->Get()->Status != NKikimrProto::EReplyStatus::OK) {
- LOG_S_WARN("Read failed: " << ev->Get()->ToString());
- }
-
- auto cookieIt = CookieToRange.find(readCookie);
- if (cookieIt == CookieToRange.end()) {
- // This shouldn't happen
- LOG_S_CRIT("Unknown read result cookie: " << readCookie);
- return;
- }
-
- std::vector<TBlobRange> blobRanges = std::move(cookieIt->second);
- CookieToRange.erase(readCookie);
-
- Y_VERIFY(blobRanges.size() == ev->Get()->ResponseSz, "Mismatched number of results for read request!");
-
- for (size_t i = 0; i < ev->Get()->ResponseSz; ++i) {
- const auto& res = ev->Get()->Responses[i];
- ProcessSingleRangeResult(blobRanges[i], readCookie, res.Status, res.Buffer, ctx);
- }
-
- MakeReadRequests(ctx);
- }
-
- void ProcessSingleRangeResult(const TBlobRange& blobRange, const ui64 readCookie,
- ui32 status, const TString& data, const TActorContext& ctx)
- {
- auto readIt = OutstandingReads.find(blobRange);
- if (readIt == OutstandingReads.end()) {
- // This shouldn't happen
+
+ ctx.Send(to, new TEvBlobCache::TEvReadBlobRangeResult(blobRange, status, data));
+ }
+
+ void Handle(TEvBlobStorage::TEvGetResult::TPtr& ev, const TActorContext& ctx) {
+ const ui64 readCookie = ev->Cookie;
+
+ if (ev->Get()->ResponseSz < 1) {
+ Y_FAIL("Unexpected reply from blobstorage");
+ }
+
+ if (ev->Get()->Status != NKikimrProto::EReplyStatus::OK) {
+ LOG_S_WARN("Read failed: " << ev->Get()->ToString());
+ }
+
+ auto cookieIt = CookieToRange.find(readCookie);
+ if (cookieIt == CookieToRange.end()) {
+ // This shouldn't happen
+ LOG_S_CRIT("Unknown read result cookie: " << readCookie);
+ return;
+ }
+
+ std::vector<TBlobRange> blobRanges = std::move(cookieIt->second);
+ CookieToRange.erase(readCookie);
+
+ Y_VERIFY(blobRanges.size() == ev->Get()->ResponseSz, "Mismatched number of results for read request!");
+
+ for (size_t i = 0; i < ev->Get()->ResponseSz; ++i) {
+ const auto& res = ev->Get()->Responses[i];
+ ProcessSingleRangeResult(blobRanges[i], readCookie, res.Status, res.Buffer, ctx);
+ }
+
+ MakeReadRequests(ctx);
+ }
+
+ void ProcessSingleRangeResult(const TBlobRange& blobRange, const ui64 readCookie,
+ ui32 status, const TString& data, const TActorContext& ctx)
+ {
+ auto readIt = OutstandingReads.find(blobRange);
+ if (readIt == OutstandingReads.end()) {
+ // This shouldn't happen
LOG_S_CRIT("Unknown read result key: " << blobRange << " cookie: " << readCookie);
- return;
- }
-
- SizeBytesInFlight->Sub(blobRange.Size);
- SizeBlobsInFlight->Dec();
- InFlightDataSize -= blobRange.Size;
-
- Y_VERIFY(Cache.Find(blobRange) == Cache.End(),
- "Range %s must not be already in cache", blobRange.ToString().c_str());
-
- if (status == NKikimrProto::EReplyStatus::OK) {
- Y_VERIFY(blobRange.Size == data.size(),
- "Read %s, size %" PRISZT, blobRange.ToString().c_str(), data.size());
- ReadBytes->Add(blobRange.Size);
-
- if (readIt->second.Cache) {
- InsertIntoCache(blobRange, data);
- }
- } else {
- LOG_S_WARN("Read failed for range: " << blobRange
- << " status: " << NKikimrProto::EReplyStatus_Name(status));
- }
-
- // Send results to all waiters
- for (const auto& to : readIt->second.Waiting) {
- SendResult(to, blobRange, (NKikimrProto::EReplyStatus)status, data, ctx);
- }
-
- OutstandingReads.erase(readIt);
- }
-
- void SendBatchReadRequestToTablet(const std::vector<TBlobRange>& blobRanges,
- const ui64 cookie, const TActorContext& ctx)
- {
- Y_VERIFY(!blobRanges.empty());
- ui64 tabletId = blobRanges.front().BlobId.GetTabletId();
-
- LOG_S_DEBUG("Sending read from Tablet: " << tabletId
- << " ranges: " << JoinStrings(blobRanges.begin(), blobRanges.end(), " ")
- << " cookie: " << cookie);
-
- if (!ShardPipes.contains(tabletId)) {
- NTabletPipe::TClientConfig clientConfig;
- clientConfig.AllowFollower = false;
- clientConfig.CheckAliveness = true;
- clientConfig.RetryPolicy = {
- .RetryLimitCount = 10,
- .MinRetryTime = TDuration::MilliSeconds(5),
- };
- ShardPipes[tabletId] = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, tabletId, clientConfig));
- }
-
- auto ev = std::make_unique<TEvColumnShard::TEvReadBlobRanges>();
- for (const auto& r : blobRanges) {
- auto* range = ev->Record.AddBlobRanges();
- range->SetBlobId(r.BlobId.ToStringNew());
- range->SetOffset(r.Offset);
- range->SetSize(r.Size);
- }
-
- InFlightSmallBlobRequests[tabletId].insert(cookie);
- NTabletPipe::SendData(ctx, ShardPipes[tabletId], ev.release(), cookie);
- }
-
- // Frogets the pipe to the tablet and fails all in-flight requests to it
- void DestroyPipe(ui64 tabletId, const TActorContext& ctx) {
- ShardPipes.erase(tabletId);
- // Send errors for in-flight requests
- auto cookies = std::move(InFlightSmallBlobRequests[tabletId]);
- InFlightSmallBlobRequests.erase(tabletId);
- for (ui64 readCookie : cookies) {
- auto cookieIt = CookieToRange.find(readCookie);
- if (cookieIt == CookieToRange.end()) {
- // This might only happen in case fo race between response and pipe close
- LOG_S_INFO("Unknown read result cookie: " << readCookie);
- return;
- }
-
- std::vector<TBlobRange> blobRanges = std::move(cookieIt->second);
- CookieToRange.erase(readCookie);
-
- for (size_t i = 0; i < blobRanges.size(); ++i) {
- Y_VERIFY(blobRanges[i].BlobId.IsSmallBlob());
- Y_VERIFY(blobRanges[i].BlobId.GetTabletId() == tabletId);
- ProcessSingleRangeResult(blobRanges[i], readCookie, NKikimrProto::EReplyStatus::NOTREADY, {}, ctx);
- }
- }
-
- MakeReadRequests(ctx);
- }
-
- void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) {
- TEvTabletPipe::TEvClientConnected* msg = ev->Get();
- const ui64 tabletId = msg->TabletId;
- Y_VERIFY(tabletId != 0);
- if (msg->Status == NKikimrProto::OK) {
- LOG_S_DEBUG("Pipe connected to tablet: " << tabletId);
- } else {
- LOG_S_DEBUG("Pipe connection to tablet: " << tabletId << " failed with status: " << msg->Status);
- DestroyPipe(tabletId, ctx);
- }
- }
-
- void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) {
- const ui64 tabletId = ev->Get()->TabletId;
- Y_VERIFY(tabletId != 0);
-
- LOG_S_DEBUG("Closed pipe connection to tablet: " << tabletId);
- DestroyPipe(tabletId, ctx);
- }
-
- void Handle(TEvColumnShard::TEvReadBlobRangesResult::TPtr& ev, const TActorContext& ctx) {
- ui64 tabletId = ev->Get()->Record.GetTabletId();
- ui64 readCookie = ev->Cookie;
- LOG_S_DEBUG("Got read result from tablet: " << tabletId);
-
- InFlightSmallBlobRequests[tabletId].erase(readCookie);
-
- auto cookieIt = CookieToRange.find(readCookie);
- if (cookieIt == CookieToRange.end()) {
- // This might only happen in case fo race between response and pipe close
- LOG_S_INFO("Unknown read result cookie: " << readCookie);
- return;
- }
-
- std::vector<TBlobRange> blobRanges = std::move(cookieIt->second);
- CookieToRange.erase(readCookie);
-
- const auto& record = ev->Get()->Record;
-
- Y_VERIFY(blobRanges.size() == record.ResultsSize(), "Mismatched number of results for read request!");
-
- for (size_t i = 0; i < record.ResultsSize(); ++i) {
- const auto& res = record.GetResults(i);
-
- Y_VERIFY(blobRanges[i].BlobId.ToStringNew() == res.GetBlobRange().GetBlobId());
- Y_VERIFY(blobRanges[i].Offset == res.GetBlobRange().GetOffset());
- Y_VERIFY(blobRanges[i].Size == res.GetBlobRange().GetSize());
- ProcessSingleRangeResult(blobRanges[i], readCookie, res.GetStatus(), res.GetData(), ctx);
- }
-
- MakeReadRequests(ctx);
- }
-
- void InsertIntoCache(const TBlobRange& blobRange, TString data) {
- CacheDataSize += blobRange.Size;
- SizeBytes->Add(blobRange.Size);
- SizeBlobs->Inc();
-
- // Shrink the buffer if it has to much extra capacity
- if (data.capacity() > data.size() * 1.1) {
- data = TString(data.begin(), data.end());
- }
-
- Cache.Insert(blobRange, data);
- CachedRanges[blobRange.BlobId].insert(blobRange);
- }
-
+ return;
+ }
+
+ SizeBytesInFlight->Sub(blobRange.Size);
+ SizeBlobsInFlight->Dec();
+ InFlightDataSize -= blobRange.Size;
+
+ Y_VERIFY(Cache.Find(blobRange) == Cache.End(),
+ "Range %s must not be already in cache", blobRange.ToString().c_str());
+
+ if (status == NKikimrProto::EReplyStatus::OK) {
+ Y_VERIFY(blobRange.Size == data.size(),
+ "Read %s, size %" PRISZT, blobRange.ToString().c_str(), data.size());
+ ReadBytes->Add(blobRange.Size);
+
+ if (readIt->second.Cache) {
+ InsertIntoCache(blobRange, data);
+ }
+ } else {
+ LOG_S_WARN("Read failed for range: " << blobRange
+ << " status: " << NKikimrProto::EReplyStatus_Name(status));
+ }
+
+ // Send results to all waiters
+ for (const auto& to : readIt->second.Waiting) {
+ SendResult(to, blobRange, (NKikimrProto::EReplyStatus)status, data, ctx);
+ }
+
+ OutstandingReads.erase(readIt);
+ }
+
+ void SendBatchReadRequestToTablet(const std::vector<TBlobRange>& blobRanges,
+ const ui64 cookie, const TActorContext& ctx)
+ {
+ Y_VERIFY(!blobRanges.empty());
+ ui64 tabletId = blobRanges.front().BlobId.GetTabletId();
+
+ LOG_S_DEBUG("Sending read from Tablet: " << tabletId
+ << " ranges: " << JoinStrings(blobRanges.begin(), blobRanges.end(), " ")
+ << " cookie: " << cookie);
+
+ if (!ShardPipes.contains(tabletId)) {
+ NTabletPipe::TClientConfig clientConfig;
+ clientConfig.AllowFollower = false;
+ clientConfig.CheckAliveness = true;
+ clientConfig.RetryPolicy = {
+ .RetryLimitCount = 10,
+ .MinRetryTime = TDuration::MilliSeconds(5),
+ };
+ ShardPipes[tabletId] = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, tabletId, clientConfig));
+ }
+
+ auto ev = std::make_unique<TEvColumnShard::TEvReadBlobRanges>();
+ for (const auto& r : blobRanges) {
+ auto* range = ev->Record.AddBlobRanges();
+ range->SetBlobId(r.BlobId.ToStringNew());
+ range->SetOffset(r.Offset);
+ range->SetSize(r.Size);
+ }
+
+ InFlightSmallBlobRequests[tabletId].insert(cookie);
+ NTabletPipe::SendData(ctx, ShardPipes[tabletId], ev.release(), cookie);
+ }
+
+ // Frogets the pipe to the tablet and fails all in-flight requests to it
+ void DestroyPipe(ui64 tabletId, const TActorContext& ctx) {
+ ShardPipes.erase(tabletId);
+ // Send errors for in-flight requests
+ auto cookies = std::move(InFlightSmallBlobRequests[tabletId]);
+ InFlightSmallBlobRequests.erase(tabletId);
+ for (ui64 readCookie : cookies) {
+ auto cookieIt = CookieToRange.find(readCookie);
+ if (cookieIt == CookieToRange.end()) {
+ // This might only happen in case fo race between response and pipe close
+ LOG_S_INFO("Unknown read result cookie: " << readCookie);
+ return;
+ }
+
+ std::vector<TBlobRange> blobRanges = std::move(cookieIt->second);
+ CookieToRange.erase(readCookie);
+
+ for (size_t i = 0; i < blobRanges.size(); ++i) {
+ Y_VERIFY(blobRanges[i].BlobId.IsSmallBlob());
+ Y_VERIFY(blobRanges[i].BlobId.GetTabletId() == tabletId);
+ ProcessSingleRangeResult(blobRanges[i], readCookie, NKikimrProto::EReplyStatus::NOTREADY, {}, ctx);
+ }
+ }
+
+ MakeReadRequests(ctx);
+ }
+
+ void Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const TActorContext& ctx) {
+ TEvTabletPipe::TEvClientConnected* msg = ev->Get();
+ const ui64 tabletId = msg->TabletId;
+ Y_VERIFY(tabletId != 0);
+ if (msg->Status == NKikimrProto::OK) {
+ LOG_S_DEBUG("Pipe connected to tablet: " << tabletId);
+ } else {
+ LOG_S_DEBUG("Pipe connection to tablet: " << tabletId << " failed with status: " << msg->Status);
+ DestroyPipe(tabletId, ctx);
+ }
+ }
+
+ void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) {
+ const ui64 tabletId = ev->Get()->TabletId;
+ Y_VERIFY(tabletId != 0);
+
+ LOG_S_DEBUG("Closed pipe connection to tablet: " << tabletId);
+ DestroyPipe(tabletId, ctx);
+ }
+
+ void Handle(TEvColumnShard::TEvReadBlobRangesResult::TPtr& ev, const TActorContext& ctx) {
+ ui64 tabletId = ev->Get()->Record.GetTabletId();
+ ui64 readCookie = ev->Cookie;
+ LOG_S_DEBUG("Got read result from tablet: " << tabletId);
+
+ InFlightSmallBlobRequests[tabletId].erase(readCookie);
+
+ auto cookieIt = CookieToRange.find(readCookie);
+ if (cookieIt == CookieToRange.end()) {
+ // This might only happen in case fo race between response and pipe close
+ LOG_S_INFO("Unknown read result cookie: " << readCookie);
+ return;
+ }
+
+ std::vector<TBlobRange> blobRanges = std::move(cookieIt->second);
+ CookieToRange.erase(readCookie);
+
+ const auto& record = ev->Get()->Record;
+
+ Y_VERIFY(blobRanges.size() == record.ResultsSize(), "Mismatched number of results for read request!");
+
+ for (size_t i = 0; i < record.ResultsSize(); ++i) {
+ const auto& res = record.GetResults(i);
+
+ Y_VERIFY(blobRanges[i].BlobId.ToStringNew() == res.GetBlobRange().GetBlobId());
+ Y_VERIFY(blobRanges[i].Offset == res.GetBlobRange().GetOffset());
+ Y_VERIFY(blobRanges[i].Size == res.GetBlobRange().GetSize());
+ ProcessSingleRangeResult(blobRanges[i], readCookie, res.GetStatus(), res.GetData(), ctx);
+ }
+
+ MakeReadRequests(ctx);
+ }
+
+ void InsertIntoCache(const TBlobRange& blobRange, TString data) {
+ CacheDataSize += blobRange.Size;
+ SizeBytes->Add(blobRange.Size);
+ SizeBlobs->Inc();
+
+ // Shrink the buffer if it has to much extra capacity
+ if (data.capacity() > data.size() * 1.1) {
+ data = TString(data.begin(), data.end());
+ }
+
+ Cache.Insert(blobRange, data);
+ CachedRanges[blobRange.BlobId].insert(blobRange);
+ }
+
void Evict(const TActorContext&) {
- while (CacheDataSize + InFlightDataSize > MaxCacheDataSize) {
- auto it = Cache.FindOldest();
- if (it == Cache.End()) {
- break;
- }
-
+ while (CacheDataSize + InFlightDataSize > MaxCacheDataSize) {
+ auto it = Cache.FindOldest();
+ if (it == Cache.End()) {
+ break;
+ }
+
LOG_S_DEBUG("Evict: " << it.Key());
-
- {
- // Remove the range from list of ranges by blob id
- auto blobIdIt = CachedRanges.find(it.Key().BlobId);
- if (blobIdIt != CachedRanges.end()) {
- blobIdIt->second.erase(it.Key());
- if (blobIdIt->second.empty()) {
- CachedRanges.erase(blobIdIt);
- }
- }
- }
-
- Evictions->Inc();
- EvictedBytes->Add(it.Key().Size);
-
- CacheDataSize -= it.Key().Size;
- Cache.Erase(it);
-
- SizeBytes->Set(CacheDataSize);
- SizeBlobs->Set(Cache.Size());
- }
- }
-};
-
-NActors::IActor* CreateBlobCache(ui64 maxBytes, TIntrusivePtr<NMonitoring::TDynamicCounters> counters) {
- return new TBlobCache(maxBytes, counters);
-}
-
-void AddRangeToCache(const TBlobRange& blobRange, const TString& data) {
- TlsActivationContext->Send(
- new IEventHandle(MakeBlobCacheServiceId(), TActorId(), new TEvBlobCache::TEvCacheBlobRange(blobRange, data)));
-}
-
-void ForgetBlob(const TUnifiedBlobId& blobId) {
- TlsActivationContext->Send(
- new IEventHandle(MakeBlobCacheServiceId(), TActorId(), new TEvBlobCache::TEvForgetBlob(blobId)));
-}
-
-}
+
+ {
+ // Remove the range from list of ranges by blob id
+ auto blobIdIt = CachedRanges.find(it.Key().BlobId);
+ if (blobIdIt != CachedRanges.end()) {
+ blobIdIt->second.erase(it.Key());
+ if (blobIdIt->second.empty()) {
+ CachedRanges.erase(blobIdIt);
+ }
+ }
+ }
+
+ Evictions->Inc();
+ EvictedBytes->Add(it.Key().Size);
+
+ CacheDataSize -= it.Key().Size;
+ Cache.Erase(it);
+
+ SizeBytes->Set(CacheDataSize);
+ SizeBlobs->Set(Cache.Size());
+ }
+ }
+};
+
+NActors::IActor* CreateBlobCache(ui64 maxBytes, TIntrusivePtr<NMonitoring::TDynamicCounters> counters) {
+ return new TBlobCache(maxBytes, counters);
+}
+
+void AddRangeToCache(const TBlobRange& blobRange, const TString& data) {
+ TlsActivationContext->Send(
+ new IEventHandle(MakeBlobCacheServiceId(), TActorId(), new TEvBlobCache::TEvCacheBlobRange(blobRange, data)));
+}
+
+void ForgetBlob(const TUnifiedBlobId& blobId) {
+ TlsActivationContext->Send(
+ new IEventHandle(MakeBlobCacheServiceId(), TActorId(), new TEvBlobCache::TEvForgetBlob(blobId)));
+}
+
+}
diff --git a/ydb/core/tx/columnshard/blob_cache.h b/ydb/core/tx/columnshard/blob_cache.h
index 4a00e4d052b..6901eb218be 100644
--- a/ydb/core/tx/columnshard/blob_cache.h
+++ b/ydb/core/tx/columnshard/blob_cache.h
@@ -1,108 +1,108 @@
-#pragma once
-
-#include "blob.h"
-
+#pragma once
+
+#include "blob.h"
+
#include <ydb/core/tx/ctor_logger.h>
#include <ydb/core/base/logoblob.h>
#include <ydb/core/base/events.h>
#include <ydb/core/base/blobstorage.h>
-
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <library/cpp/actors/core/actorid.h>
-#include <library/cpp/actors/core/event_local.h>
-
-#include <util/generic/vector.h>
-
+
+#include <library/cpp/monlib/dynamic_counters/counters.h>
+#include <library/cpp/actors/core/actorid.h>
+#include <library/cpp/actors/core/event_local.h>
+
+#include <util/generic/vector.h>
+
namespace NKikimr::NBlobCache {
-
-using NOlap::TUnifiedBlobId;
-using NOlap::TBlobRange;
-
+
+using NOlap::TUnifiedBlobId;
+using NOlap::TBlobRange;
+
using TLogThis = TCtorLogger<NKikimrServices::BLOB_CACHE>;
-
-struct TEvBlobCache {
- enum EEv {
- EvReadBlobRange = EventSpaceBegin(TKikimrEvents::ES_BLOB_CACHE),
- EvReadBlobRangeBatch,
- EvReadBlobRangeResult,
- EvCacheBlobRange,
- EvForgetBlob,
-
- EvEnd
- };
-
- static_assert(EvEnd < EventSpaceEnd(TKikimrEvents::ES_BLOB_CACHE), "Unexpected TEvBlobCache event range");
-
- struct TEvReadBlobRange : public NActors::TEventLocal<TEvReadBlobRange, EvReadBlobRange> {
- TBlobRange BlobRange;
- bool Cache;
- // TODO: pass some kind of priority?
- explicit TEvReadBlobRange(const TBlobRange& blobRange, bool cache = true)
- : BlobRange(blobRange)
- , Cache(cache)
- {}
- };
-
- // Read a batch of ranges from the same DS group
- // This is usefull to save IOPs when reading multiple columns from the same blob
- struct TEvReadBlobRangeBatch : public NActors::TEventLocal<TEvReadBlobRangeBatch, EvReadBlobRangeBatch> {
- std::vector<TBlobRange> BlobRanges;
- bool Cache;
- // TODO: pass some kind of priority?
- explicit TEvReadBlobRangeBatch(std::vector<TBlobRange>&& blobRanges, bool cache = true)
- : BlobRanges(blobRanges)
- , Cache(cache)
- {}
- };
-
- struct TEvReadBlobRangeResult : public NActors::TEventLocal<TEvReadBlobRangeResult, EvReadBlobRangeResult> {
- TBlobRange BlobRange;
- NKikimrProto::EReplyStatus Status;
- TString Data;
-
- TEvReadBlobRangeResult(const TBlobRange& blobRange, NKikimrProto::EReplyStatus status, const TString& data)
- : BlobRange(blobRange)
- , Status(status)
- , Data(data)
- {}
- };
-
- // Put a blob range data into cache. This helps to reduce number of reads from disks done by indexing, compactions
- // and queries that read recent data
- struct TEvCacheBlobRange : public NActors::TEventLocal<TEvCacheBlobRange, EvCacheBlobRange> {
- TBlobRange BlobRange;
- TString Data;
-
- TEvCacheBlobRange(const TBlobRange& blobRange, const TString& data)
- : BlobRange(blobRange)
- , Data(data)
- {}
- };
-
- // Notify the cache that this blob will not be requested any more
- // (e.g. when it was deleted after indexing or compaction)
- struct TEvForgetBlob : public NActors::TEventLocal<TEvForgetBlob, EvForgetBlob> {
- TUnifiedBlobId BlobId;
-
- explicit TEvForgetBlob(const TUnifiedBlobId& blobId)
- : BlobId(blobId)
- {}
- };
-};
-
-inline
-NActors::TActorId MakeBlobCacheServiceId() {
- static_assert(TActorId::MaxServiceIDLength == 12, "Unexpected actor id length");
- const char x[12] = "blob_cache";
- return TActorId(0, TStringBuf(x, 12));
-}
-
-NActors::IActor* CreateBlobCache(ui64 maxBytes, TIntrusivePtr<NMonitoring::TDynamicCounters>);
-
-// Explicitly add and remove data from cache. This is usefull for newly written data that is likely to be read by
-// indexing, compaction and user queries and for the data that has been compacted and will not be read again.
-void AddRangeToCache(const TBlobRange& blobRange, const TString& data);
-void ForgetBlob(const TUnifiedBlobId& blobId);
-
-}
+
+struct TEvBlobCache {
+ enum EEv {
+ EvReadBlobRange = EventSpaceBegin(TKikimrEvents::ES_BLOB_CACHE),
+ EvReadBlobRangeBatch,
+ EvReadBlobRangeResult,
+ EvCacheBlobRange,
+ EvForgetBlob,
+
+ EvEnd
+ };
+
+ static_assert(EvEnd < EventSpaceEnd(TKikimrEvents::ES_BLOB_CACHE), "Unexpected TEvBlobCache event range");
+
+ struct TEvReadBlobRange : public NActors::TEventLocal<TEvReadBlobRange, EvReadBlobRange> {
+ TBlobRange BlobRange;
+ bool Cache;
+ // TODO: pass some kind of priority?
+ explicit TEvReadBlobRange(const TBlobRange& blobRange, bool cache = true)
+ : BlobRange(blobRange)
+ , Cache(cache)
+ {}
+ };
+
+ // Read a batch of ranges from the same DS group
+ // This is usefull to save IOPs when reading multiple columns from the same blob
+ struct TEvReadBlobRangeBatch : public NActors::TEventLocal<TEvReadBlobRangeBatch, EvReadBlobRangeBatch> {
+ std::vector<TBlobRange> BlobRanges;
+ bool Cache;
+ // TODO: pass some kind of priority?
+ explicit TEvReadBlobRangeBatch(std::vector<TBlobRange>&& blobRanges, bool cache = true)
+ : BlobRanges(blobRanges)
+ , Cache(cache)
+ {}
+ };
+
+ struct TEvReadBlobRangeResult : public NActors::TEventLocal<TEvReadBlobRangeResult, EvReadBlobRangeResult> {
+ TBlobRange BlobRange;
+ NKikimrProto::EReplyStatus Status;
+ TString Data;
+
+ TEvReadBlobRangeResult(const TBlobRange& blobRange, NKikimrProto::EReplyStatus status, const TString& data)
+ : BlobRange(blobRange)
+ , Status(status)
+ , Data(data)
+ {}
+ };
+
+ // Put a blob range data into cache. This helps to reduce number of reads from disks done by indexing, compactions
+ // and queries that read recent data
+ struct TEvCacheBlobRange : public NActors::TEventLocal<TEvCacheBlobRange, EvCacheBlobRange> {
+ TBlobRange BlobRange;
+ TString Data;
+
+ TEvCacheBlobRange(const TBlobRange& blobRange, const TString& data)
+ : BlobRange(blobRange)
+ , Data(data)
+ {}
+ };
+
+ // Notify the cache that this blob will not be requested any more
+ // (e.g. when it was deleted after indexing or compaction)
+ struct TEvForgetBlob : public NActors::TEventLocal<TEvForgetBlob, EvForgetBlob> {
+ TUnifiedBlobId BlobId;
+
+ explicit TEvForgetBlob(const TUnifiedBlobId& blobId)
+ : BlobId(blobId)
+ {}
+ };
+};
+
+inline
+NActors::TActorId MakeBlobCacheServiceId() {
+ static_assert(TActorId::MaxServiceIDLength == 12, "Unexpected actor id length");
+ const char x[12] = "blob_cache";
+ return TActorId(0, TStringBuf(x, 12));
+}
+
+NActors::IActor* CreateBlobCache(ui64 maxBytes, TIntrusivePtr<NMonitoring::TDynamicCounters>);
+
+// Explicitly add and remove data from cache. This is usefull for newly written data that is likely to be read by
+// indexing, compaction and user queries and for the data that has been compacted and will not be read again.
+void AddRangeToCache(const TBlobRange& blobRange, const TString& data);
+void ForgetBlob(const TUnifiedBlobId& blobId);
+
+}
diff --git a/ydb/core/tx/columnshard/blob_manager.cpp b/ydb/core/tx/columnshard/blob_manager.cpp
index 3dc33c610ce..2d390802d0f 100644
--- a/ydb/core/tx/columnshard/blob_manager.cpp
+++ b/ydb/core/tx/columnshard/blob_manager.cpp
@@ -1,513 +1,513 @@
-#include "defs.h"
-#include "columnshard_impl.h"
-#include "blob_manager.h"
-#include "blob_manager_db.h"
-#include "blob_cache.h"
-
+#include "defs.h"
+#include "columnshard_impl.h"
+#include "blob_manager.h"
+#include "blob_manager_db.h"
+#include "blob_cache.h"
+
#include <ydb/core/base/blobstorage.h>
-
+
namespace NKikimr::NColumnShard {
-
-TLogoBlobID ParseLogoBlobId(TString blobId) {
- TLogoBlobID logoBlobId;
- TString err;
- if (!TLogoBlobID::Parse(logoBlobId, blobId, err)) {
- Y_FAIL("%s", err.c_str());
- }
- return logoBlobId;
-}
-
-struct TBlobBatch::TBatchInfo : TNonCopyable {
- TIntrusivePtr<TTabletStorageInfo> TabletInfo;
+
+TLogoBlobID ParseLogoBlobId(TString blobId) {
+ TLogoBlobID logoBlobId;
+ TString err;
+ if (!TLogoBlobID::Parse(logoBlobId, blobId, err)) {
+ Y_FAIL("%s", err.c_str());
+ }
+ return logoBlobId;
+}
+
+struct TBlobBatch::TBatchInfo : TNonCopyable {
+ TIntrusivePtr<TTabletStorageInfo> TabletInfo;
TAllocatedGenStepConstPtr GenStepRef;
- const ui32 Gen;
- const ui32 Step;
- const ui32 Channel;
-
- TVector<ui32> BlobSizes;
- TVector<bool> InFlight;
- i32 InFlightCount;
- ui64 TotalSizeBytes;
- TVector<TString> SmallBlobs;
-
+ const ui32 Gen;
+ const ui32 Step;
+ const ui32 Channel;
+
+ TVector<ui32> BlobSizes;
+ TVector<bool> InFlight;
+ i32 InFlightCount;
+ ui64 TotalSizeBytes;
+ TVector<TString> SmallBlobs;
+
TBatchInfo(TIntrusivePtr<TTabletStorageInfo> tabletInfo, TAllocatedGenStepConstPtr genStep, ui32 channel)
- : TabletInfo(tabletInfo)
- , GenStepRef(genStep)
- , Gen(std::get<0>(GenStepRef->GenStep))
- , Step(std::get<1>(GenStepRef->GenStep))
- , Channel(channel)
- , InFlightCount(0)
- , TotalSizeBytes(0)
- {}
-
- TUnifiedBlobId NextBlobId(ui32 blobSize) {
- BlobSizes.push_back(blobSize);
- InFlight.push_back(true);
- ++InFlightCount;
- TotalSizeBytes += blobSize;
- return MakeBlobId(BlobSizes.size()-1);
- }
-
- TUnifiedBlobId MakeBlobId(ui32 i) const {
- Y_VERIFY(i < BlobSizes.size());
- const ui32 dsGroup = TabletInfo->GroupFor(Channel, Gen);
- return TUnifiedBlobId(dsGroup, TLogoBlobID(TabletInfo->TabletID, Gen, Step, Channel, BlobSizes[i], i));
- }
-
- TUnifiedBlobId AddSmallBlob(const TString& data) {
- // NOTE: small blobs are not included into TotalSizeBytes
- SmallBlobs.push_back(data);
- return MakeSmallBlobId(SmallBlobs.size()-1);
- }
-
- TUnifiedBlobId MakeSmallBlobId(ui32 i) const {
- Y_VERIFY(i < SmallBlobs.size());
- return TUnifiedBlobId(TabletInfo->TabletID, Gen, Step, i, SmallBlobs[i].size());
- }
-};
-
-TBlobBatch::TBlobBatch(std::unique_ptr<TBatchInfo> batchInfo)
- : BatchInfo(std::move(batchInfo))
-{}
-
-TBlobBatch::TBlobBatch() = default;
-TBlobBatch::TBlobBatch(TBlobBatch&& other) = default;
-TBlobBatch& TBlobBatch::operator =(TBlobBatch&& other) = default;
-TBlobBatch::~TBlobBatch() = default;
-
-void TBlobBatch::SendWriteRequest(const TActorContext& ctx, ui32 groupId, const TLogoBlobID& logoBlobId, const TString& data, ui64 cookie, TInstant deadline) {
+ : TabletInfo(tabletInfo)
+ , GenStepRef(genStep)
+ , Gen(std::get<0>(GenStepRef->GenStep))
+ , Step(std::get<1>(GenStepRef->GenStep))
+ , Channel(channel)
+ , InFlightCount(0)
+ , TotalSizeBytes(0)
+ {}
+
+ TUnifiedBlobId NextBlobId(ui32 blobSize) {
+ BlobSizes.push_back(blobSize);
+ InFlight.push_back(true);
+ ++InFlightCount;
+ TotalSizeBytes += blobSize;
+ return MakeBlobId(BlobSizes.size()-1);
+ }
+
+ TUnifiedBlobId MakeBlobId(ui32 i) const {
+ Y_VERIFY(i < BlobSizes.size());
+ const ui32 dsGroup = TabletInfo->GroupFor(Channel, Gen);
+ return TUnifiedBlobId(dsGroup, TLogoBlobID(TabletInfo->TabletID, Gen, Step, Channel, BlobSizes[i], i));
+ }
+
+ TUnifiedBlobId AddSmallBlob(const TString& data) {
+ // NOTE: small blobs are not included into TotalSizeBytes
+ SmallBlobs.push_back(data);
+ return MakeSmallBlobId(SmallBlobs.size()-1);
+ }
+
+ TUnifiedBlobId MakeSmallBlobId(ui32 i) const {
+ Y_VERIFY(i < SmallBlobs.size());
+ return TUnifiedBlobId(TabletInfo->TabletID, Gen, Step, i, SmallBlobs[i].size());
+ }
+};
+
+TBlobBatch::TBlobBatch(std::unique_ptr<TBatchInfo> batchInfo)
+ : BatchInfo(std::move(batchInfo))
+{}
+
+TBlobBatch::TBlobBatch() = default;
+TBlobBatch::TBlobBatch(TBlobBatch&& other) = default;
+TBlobBatch& TBlobBatch::operator =(TBlobBatch&& other) = default;
+TBlobBatch::~TBlobBatch() = default;
+
+void TBlobBatch::SendWriteRequest(const TActorContext& ctx, ui32 groupId, const TLogoBlobID& logoBlobId, const TString& data, ui64 cookie, TInstant deadline) {
LOG_S_TRACE("EvPut " << data.size() << " bytes to group " << groupId
- << " at tablet " << BatchInfo->TabletInfo->TabletID);
-
- auto handleClass = NKikimrBlobStorage::UserData;
- //auto handleClass = NKikimrBlobStorage::AsyncBlob; // TODO: what's the difference?
- auto tactic = TEvBlobStorage::TEvPut::TacticMaxThroughput;
-
- THolder<TEvBlobStorage::TEvPut> put(
- new TEvBlobStorage::TEvPut(logoBlobId, data, deadline, handleClass, tactic));
- SendPutToGroup(ctx, groupId, BatchInfo->TabletInfo.Get(), std::move(put), cookie);
-}
-
-TUnifiedBlobId TBlobBatch::SendWriteBlobRequest(const TString& blobData, TInstant deadline, const TActorContext& ctx) {
- Y_VERIFY(blobData.size() <= TLimits::MAX_BLOB_SIZE, "Blob %" PRISZT" size exceeds the limit %" PRIu64,
- blobData.size(), TLimits::MAX_BLOB_SIZE);
-
- TUnifiedBlobId blobId = BatchInfo->NextBlobId(blobData.size());
- ui32 groupId = blobId.GetDsGroup();
-
- SendWriteRequest(ctx, groupId, blobId.GetLogoBlobId(), blobData, 0, deadline);
-
- return blobId;
-}
-
-void TBlobBatch::OnBlobWriteResult(TEvBlobStorage::TEvPutResult::TPtr& ev) {
- TLogoBlobID blobId = ev->Get()->Id;
- Y_VERIFY(ev->Get()->Status == NKikimrProto::OK, "The caller must handle unsuccessful status");
+ << " at tablet " << BatchInfo->TabletInfo->TabletID);
+
+ auto handleClass = NKikimrBlobStorage::UserData;
+ //auto handleClass = NKikimrBlobStorage::AsyncBlob; // TODO: what's the difference?
+ auto tactic = TEvBlobStorage::TEvPut::TacticMaxThroughput;
+
+ THolder<TEvBlobStorage::TEvPut> put(
+ new TEvBlobStorage::TEvPut(logoBlobId, data, deadline, handleClass, tactic));
+ SendPutToGroup(ctx, groupId, BatchInfo->TabletInfo.Get(), std::move(put), cookie);
+}
+
+TUnifiedBlobId TBlobBatch::SendWriteBlobRequest(const TString& blobData, TInstant deadline, const TActorContext& ctx) {
+ Y_VERIFY(blobData.size() <= TLimits::MAX_BLOB_SIZE, "Blob %" PRISZT" size exceeds the limit %" PRIu64,
+ blobData.size(), TLimits::MAX_BLOB_SIZE);
+
+ TUnifiedBlobId blobId = BatchInfo->NextBlobId(blobData.size());
+ ui32 groupId = blobId.GetDsGroup();
+
+ SendWriteRequest(ctx, groupId, blobId.GetLogoBlobId(), blobData, 0, deadline);
+
+ return blobId;
+}
+
+void TBlobBatch::OnBlobWriteResult(TEvBlobStorage::TEvPutResult::TPtr& ev) {
+ TLogoBlobID blobId = ev->Get()->Id;
+ Y_VERIFY(ev->Get()->Status == NKikimrProto::OK, "The caller must handle unsuccessful status");
Y_VERIFY(BatchInfo);
- Y_VERIFY(BatchInfo->InFlight[blobId.Cookie()], "Blob %s is already acked!", blobId.ToString().c_str());
- BatchInfo->InFlight[blobId.Cookie()] = false;
- --BatchInfo->InFlightCount;
- Y_VERIFY(BatchInfo->InFlightCount >= 0);
-}
-
-bool TBlobBatch::AllBlobWritesCompleted() const {
+ Y_VERIFY(BatchInfo->InFlight[blobId.Cookie()], "Blob %s is already acked!", blobId.ToString().c_str());
+ BatchInfo->InFlight[blobId.Cookie()] = false;
+ --BatchInfo->InFlightCount;
+ Y_VERIFY(BatchInfo->InFlightCount >= 0);
+}
+
+bool TBlobBatch::AllBlobWritesCompleted() const {
Y_VERIFY(BatchInfo);
- return BatchInfo->InFlightCount == 0;
-}
-
-ui64 TBlobBatch::GetBlobCount() const {
+ return BatchInfo->InFlightCount == 0;
+}
+
+ui64 TBlobBatch::GetBlobCount() const {
if (BatchInfo) {
return BatchInfo->BlobSizes.size();
}
return 0;
-}
-
-ui64 TBlobBatch::GetTotalSize() const {
+}
+
+ui64 TBlobBatch::GetTotalSize() const {
if (BatchInfo) {
return BatchInfo->TotalSizeBytes;
}
return 0;
-}
-
-
-TUnifiedBlobId TBlobBatch::AddSmallBlob(const TString& data) {
- Y_VERIFY(BatchInfo);
- return BatchInfo->AddSmallBlob(data);
-}
-
-TBlobManager::TBlobManager(TIntrusivePtr<TTabletStorageInfo> tabletInfo, ui32 gen)
- : TabletInfo(tabletInfo)
- , CurrentGen(gen)
- , CurrentStep(0)
- , BlobCountToTriggerGC(BLOB_COUNT_TO_TRIGGER_GC_DEFAULT, 0, Max<i64>())
- , GCIntervalSeconds(GC_INTERVAL_SECONDS_DEFAULT, 0, Max<i64>())
-{}
-
-void TBlobManager::RegisterControls(NKikimr::TControlBoard& icb) {
- icb.RegisterSharedControl(BlobCountToTriggerGC, "ColumnShardControls.BlobCountToTriggerGC");
- icb.RegisterSharedControl(GCIntervalSeconds, "ColumnShardControls.GCIntervalSeconds");
-}
-
-bool TBlobManager::LoadState(IBlobManagerDb& db) {
- // Load last collected Generation
- if (!db.LoadLastGcBarrier(LastCollectedGenStep)) {
- return false;
- }
- NewCollectGenStep = LastCollectedGenStep;
-
- // Load the keep and delete queues
- TVector<TUnifiedBlobId> blobsToKeep;
- TVector<TUnifiedBlobId> blobsToDelete;
- TBlobGroupSelector dsGroupSelector(TabletInfo);
- if (!db.LoadLists(blobsToKeep, blobsToDelete, &dsGroupSelector)) {
- return false;
- }
-
- // Build the list of steps that cannot be garbage collected before Keep flag is set on the blobs
- THashSet<TGenStep> genStepsWithBlobsToKeep;
- for (const auto unifiedBlobId : blobsToKeep) {
- Y_VERIFY(unifiedBlobId.IsDsBlob(), "Not a DS blob id in Keep table: %s", unifiedBlobId.ToStringNew().c_str());
-
- TLogoBlobID blobId = unifiedBlobId.GetLogoBlobId();
- TGenStep genStep{blobId.Generation(), blobId.Step()};
-
- Y_VERIFY(genStep > LastCollectedGenStep,
- "Blob %s in keep queue is before last barrier (%" PRIu32 ":%" PRIu32 ")",
- unifiedBlobId.ToStringNew().c_str(), std::get<0>(LastCollectedGenStep), std::get<1>(LastCollectedGenStep));
-
- genStepsWithBlobsToKeep.insert(genStep);
- BlobsToKeep.insert(blobId);
- }
-
- for (const auto unifiedBlobId : blobsToDelete) {
- if (unifiedBlobId.IsSmallBlob()) {
- SmallBlobsToDelete.insert(unifiedBlobId);
- } else if (unifiedBlobId.IsDsBlob()) {
- BlobsToDelete.insert(unifiedBlobId.GetLogoBlobId());
- } else {
- Y_FAIL("Unexpected blob id: %s", unifiedBlobId.ToStringNew().c_str());
- }
- }
-
- AllocatedGenSteps.clear();
- for (const auto& gs : genStepsWithBlobsToKeep) {
- AllocatedGenSteps.push_back(new TAllocatedGenStep(gs));
- }
- AllocatedGenSteps.push_back(new TAllocatedGenStep({CurrentGen, 0}));
-
+}
+
+
+TUnifiedBlobId TBlobBatch::AddSmallBlob(const TString& data) {
+ Y_VERIFY(BatchInfo);
+ return BatchInfo->AddSmallBlob(data);
+}
+
+TBlobManager::TBlobManager(TIntrusivePtr<TTabletStorageInfo> tabletInfo, ui32 gen)
+ : TabletInfo(tabletInfo)
+ , CurrentGen(gen)
+ , CurrentStep(0)
+ , BlobCountToTriggerGC(BLOB_COUNT_TO_TRIGGER_GC_DEFAULT, 0, Max<i64>())
+ , GCIntervalSeconds(GC_INTERVAL_SECONDS_DEFAULT, 0, Max<i64>())
+{}
+
+void TBlobManager::RegisterControls(NKikimr::TControlBoard& icb) {
+ icb.RegisterSharedControl(BlobCountToTriggerGC, "ColumnShardControls.BlobCountToTriggerGC");
+ icb.RegisterSharedControl(GCIntervalSeconds, "ColumnShardControls.GCIntervalSeconds");
+}
+
+bool TBlobManager::LoadState(IBlobManagerDb& db) {
+ // Load last collected Generation
+ if (!db.LoadLastGcBarrier(LastCollectedGenStep)) {
+ return false;
+ }
+ NewCollectGenStep = LastCollectedGenStep;
+
+ // Load the keep and delete queues
+ TVector<TUnifiedBlobId> blobsToKeep;
+ TVector<TUnifiedBlobId> blobsToDelete;
+ TBlobGroupSelector dsGroupSelector(TabletInfo);
+ if (!db.LoadLists(blobsToKeep, blobsToDelete, &dsGroupSelector)) {
+ return false;
+ }
+
+ // Build the list of steps that cannot be garbage collected before Keep flag is set on the blobs
+ THashSet<TGenStep> genStepsWithBlobsToKeep;
+ for (const auto unifiedBlobId : blobsToKeep) {
+ Y_VERIFY(unifiedBlobId.IsDsBlob(), "Not a DS blob id in Keep table: %s", unifiedBlobId.ToStringNew().c_str());
+
+ TLogoBlobID blobId = unifiedBlobId.GetLogoBlobId();
+ TGenStep genStep{blobId.Generation(), blobId.Step()};
+
+ Y_VERIFY(genStep > LastCollectedGenStep,
+ "Blob %s in keep queue is before last barrier (%" PRIu32 ":%" PRIu32 ")",
+ unifiedBlobId.ToStringNew().c_str(), std::get<0>(LastCollectedGenStep), std::get<1>(LastCollectedGenStep));
+
+ genStepsWithBlobsToKeep.insert(genStep);
+ BlobsToKeep.insert(blobId);
+ }
+
+ for (const auto unifiedBlobId : blobsToDelete) {
+ if (unifiedBlobId.IsSmallBlob()) {
+ SmallBlobsToDelete.insert(unifiedBlobId);
+ } else if (unifiedBlobId.IsDsBlob()) {
+ BlobsToDelete.insert(unifiedBlobId.GetLogoBlobId());
+ } else {
+ Y_FAIL("Unexpected blob id: %s", unifiedBlobId.ToStringNew().c_str());
+ }
+ }
+
+ AllocatedGenSteps.clear();
+ for (const auto& gs : genStepsWithBlobsToKeep) {
+ AllocatedGenSteps.push_back(new TAllocatedGenStep(gs));
+ }
+ AllocatedGenSteps.push_back(new TAllocatedGenStep({CurrentGen, 0}));
+
Sort(AllocatedGenSteps.begin(), AllocatedGenSteps.end(), [](const TAllocatedGenStepConstPtr& a, const TAllocatedGenStepConstPtr& b) {
- return a->GenStep < b->GenStep;
- });
-
- return true;
-}
-
-bool TBlobManager::TryMoveGCBarrier() {
- // Check that there is no GC request in flight
- if (!PerGroupGCListsInFlight.empty()) {
- return false;
- }
-
- if (BlobsToKeep.empty() && BlobsToDelete.empty() && LastCollectedGenStep == TGenStep{CurrentGen, CurrentStep}) {
- return false;
- }
-
- // Delay GC if there are to few blobs and last GC was not long ago
- if ((i64)BlobsToKeep.size() < BlobCountToTriggerGC &&
- (i64)BlobsToDelete.size() < BlobCountToTriggerGC &&
- PreviousGCTime + TDuration::Seconds(GCIntervalSeconds) > AppData()->TimeProvider->Now())
- {
- return false;
- }
-
- // Find the GenStep where GC barrier can be moved
- {
- Y_VERIFY(NewCollectGenStep >= LastCollectedGenStep);
- while (!AllocatedGenSteps.empty()) {
- if (!AllocatedGenSteps.front()->Finished()) {
- break;
- }
- Y_VERIFY(AllocatedGenSteps.front()->GenStep > CollectGenStepInFlight);
- NewCollectGenStep = AllocatedGenSteps.front()->GenStep;
-
- AllocatedGenSteps.pop_front();
- }
- if (AllocatedGenSteps.empty()) {
- NewCollectGenStep = TGenStep{CurrentGen, CurrentStep};
- }
- }
-
- return NewCollectGenStep > LastCollectedGenStep;
-}
-
-THashMap<ui32, std::unique_ptr<TEvBlobStorage::TEvCollectGarbage>> TBlobManager::PreparePerGroupGCRequests() {
- if (!TryMoveGCBarrier()) {
- return {};
- }
-
- PreviousGCTime = AppData()->TimeProvider->Now();
-
- CollectGenStepInFlight = NewCollectGenStep;
-
- const ui32 channelIdx = BLOB_CHANNEL;
-
- // Find the list of groups between LastCollectedGenSten and new GC GenStep
- PerGroupGCListsInFlight.clear();
- {
- const ui32 fromGen = std::get<0>(LastCollectedGenStep);
- const ui32 toGen = std::get<0>(CollectGenStepInFlight);
- const auto& channelHistory = TabletInfo->ChannelInfo(channelIdx)->History;
- auto fnCmpGen = [](ui32 gen, const auto& historyEntry) {
- return gen < historyEntry.FromGeneration;
- };
- // Look for the entry with FromGeneration <= fromGen and the next entry has FromGeneration > fromGen
- auto fromIt = std::upper_bound(channelHistory.begin(), channelHistory.end(), fromGen, fnCmpGen);
- if (fromIt != channelHistory.begin()) {
- --fromIt;
- }
- auto toIt = std::upper_bound(channelHistory.begin(), channelHistory.end(), toGen, fnCmpGen);
- for (auto it = fromIt; it != toIt; ++it) {
- ui32 group = it->GroupID;
- PerGroupGCListsInFlight[group];
- }
- }
-
- // Make per-group Keep/DontKeep lists
- {
- // Add all blobs to keep
- while (!BlobsToKeep.empty()) {
- auto blobIt = BlobsToKeep.begin();
- if (TGenStep{blobIt->Generation(), blobIt->Step()} > CollectGenStepInFlight) {
- break;
- }
- ui32 blobGroup = TabletInfo->GroupFor(blobIt->Channel(), blobIt->Generation());
- PerGroupGCListsInFlight[blobGroup].KeepList.insert(*blobIt);
- BlobsToKeep.erase(blobIt);
- }
-
- // Add all blobs to delete
- while (!BlobsToDelete.empty()) {
- auto blobIt = BlobsToDelete.begin();
- if (TGenStep{blobIt->Generation(), blobIt->Step()} > CollectGenStepInFlight) {
- break;
- }
- ui32 blobGroup = TabletInfo->GroupFor(blobIt->Channel(), blobIt->Generation());
- bool canSkipDontKeep = false;
- if (PerGroupGCListsInFlight[blobGroup].KeepList.count(*blobIt)) {
- // Remove the blob from keep list if its also in the delete list
- PerGroupGCListsInFlight[blobGroup].KeepList.erase(*blobIt);
- // Skipped blobs still need to be deleted from BlobsToKeep table
- PerGroupGCListsInFlight[blobGroup].KeepListSkipped.push_back(*blobIt);
-
- if (CurrentGen == blobIt->Generation()) {
- // If this blob was created and deleted in the current generation then
- // we can skip sending both Keep and DontKeep flags.
- // NOTE: its not safe to do this for older generations because there is
- // a scenario when Keep flag was sent in the old generation and then tablet restarted
- // before getting the result and removing the blob from the Keep list.
- canSkipDontKeep = true;
- }
- }
- if (!canSkipDontKeep) {
- PerGroupGCListsInFlight[blobGroup].DontKeepList.insert(*blobIt);
- } else {
- // Skipped blobs still need to be deleted from BlobsToDelete table
- PerGroupGCListsInFlight[blobGroup].DontKeepListSkipped.push_back(*blobIt);
- }
- BlobsToDelete.erase(blobIt);
- }
- }
-
- // Make per group requests
- THashMap<ui32, std::unique_ptr<TEvBlobStorage::TEvCollectGarbage>> requests;
- {
- for (const auto& gl : PerGroupGCListsInFlight) {
- ui32 group = gl.first;
- requests[group] = std::make_unique<TEvBlobStorage::TEvCollectGarbage>(
- TabletInfo->TabletID, CurrentGen, PerGenerationCounter,
- channelIdx, true,
- std::get<0>(CollectGenStepInFlight), std::get<1>(CollectGenStepInFlight),
- new TVector<TLogoBlobID>(gl.second.KeepList.begin(), gl.second.KeepList.end()),
- new TVector<TLogoBlobID>(gl.second.DontKeepList.begin(), gl.second.DontKeepList.end()),
- TInstant::Max(), true);
-
- CounterToGroupInFlight[PerGenerationCounter] = group;
-
- PerGenerationCounter += requests[group]->PerGenerationCounterStepSize();
- }
- }
-
- return requests;
-}
-
-void TBlobManager::OnGCResult(TEvBlobStorage::TEvCollectGarbageResult::TPtr ev, IBlobManagerDb& db) {
- Y_VERIFY(ev->Get()->Status == NKikimrProto::OK, "The caller must handle unsuccessful status");
- Y_VERIFY(!CounterToGroupInFlight.empty());
- Y_VERIFY(!PerGroupGCListsInFlight.empty());
-
- // Find the group for this result
- ui64 counterFromRequest = ev->Get()->PerGenerationCounter;
- Y_VERIFY(CounterToGroupInFlight.count(counterFromRequest));
- ui32 group = CounterToGroupInFlight[counterFromRequest];
-
- auto it = PerGroupGCListsInFlight.find(group);
- for (const auto& blobId : it->second.KeepList) {
- db.EraseBlobToKeep(TUnifiedBlobId(group, blobId));
- }
-
- for (const auto& blobId : it->second.DontKeepList) {
- db.EraseBlobToDelete(TUnifiedBlobId(group, blobId));
- }
-
- for (const auto& blobId : it->second.KeepListSkipped) {
- db.EraseBlobToKeep(TUnifiedBlobId(group, blobId));
- }
-
- for (const auto& blobId : it->second.DontKeepListSkipped) {
- db.EraseBlobToDelete(TUnifiedBlobId(group, blobId));
- }
-
- ++CountersUpdate.GcRequestsSent;
- CountersUpdate.BlobKeepEntries += it->second.KeepList.size();
- CountersUpdate.BlobDontKeepEntries += it->second.DontKeepList.size();
- // "SkippedBlobs" counter tracks blobs that where excluded from both Keep and DontKeep lists
- // DontKeepListSkipped contains those blobs; KeepListSkipped contains them too but also some more
- CountersUpdate.BlobSkippedEntries += it->second.DontKeepListSkipped.size();
-
- PerGroupGCListsInFlight.erase(it);
- CounterToGroupInFlight.erase(group);
-
- // All requests done?
- if (PerGroupGCListsInFlight.empty()) {
- LastCollectedGenStep = CollectGenStepInFlight;
- db.SaveLastGcBarrier(LastCollectedGenStep);
- }
-
- PerformDelayedDeletes(db);
-}
-
-TBlobBatch TBlobManager::StartBlobBatch(ui32 channel) {
- ++CountersUpdate.BatchesStarted;
- Y_VERIFY(channel == BLOB_CHANNEL, "Support for mutiple blob channels is not implemented yet");
- ++CurrentStep;
+ return a->GenStep < b->GenStep;
+ });
+
+ return true;
+}
+
+bool TBlobManager::TryMoveGCBarrier() {
+ // Check that there is no GC request in flight
+ if (!PerGroupGCListsInFlight.empty()) {
+ return false;
+ }
+
+ if (BlobsToKeep.empty() && BlobsToDelete.empty() && LastCollectedGenStep == TGenStep{CurrentGen, CurrentStep}) {
+ return false;
+ }
+
+ // Delay GC if there are to few blobs and last GC was not long ago
+ if ((i64)BlobsToKeep.size() < BlobCountToTriggerGC &&
+ (i64)BlobsToDelete.size() < BlobCountToTriggerGC &&
+ PreviousGCTime + TDuration::Seconds(GCIntervalSeconds) > AppData()->TimeProvider->Now())
+ {
+ return false;
+ }
+
+ // Find the GenStep where GC barrier can be moved
+ {
+ Y_VERIFY(NewCollectGenStep >= LastCollectedGenStep);
+ while (!AllocatedGenSteps.empty()) {
+ if (!AllocatedGenSteps.front()->Finished()) {
+ break;
+ }
+ Y_VERIFY(AllocatedGenSteps.front()->GenStep > CollectGenStepInFlight);
+ NewCollectGenStep = AllocatedGenSteps.front()->GenStep;
+
+ AllocatedGenSteps.pop_front();
+ }
+ if (AllocatedGenSteps.empty()) {
+ NewCollectGenStep = TGenStep{CurrentGen, CurrentStep};
+ }
+ }
+
+ return NewCollectGenStep > LastCollectedGenStep;
+}
+
+THashMap<ui32, std::unique_ptr<TEvBlobStorage::TEvCollectGarbage>> TBlobManager::PreparePerGroupGCRequests() {
+ if (!TryMoveGCBarrier()) {
+ return {};
+ }
+
+ PreviousGCTime = AppData()->TimeProvider->Now();
+
+ CollectGenStepInFlight = NewCollectGenStep;
+
+ const ui32 channelIdx = BLOB_CHANNEL;
+
+ // Find the list of groups between LastCollectedGenSten and new GC GenStep
+ PerGroupGCListsInFlight.clear();
+ {
+ const ui32 fromGen = std::get<0>(LastCollectedGenStep);
+ const ui32 toGen = std::get<0>(CollectGenStepInFlight);
+ const auto& channelHistory = TabletInfo->ChannelInfo(channelIdx)->History;
+ auto fnCmpGen = [](ui32 gen, const auto& historyEntry) {
+ return gen < historyEntry.FromGeneration;
+ };
+ // Look for the entry with FromGeneration <= fromGen and the next entry has FromGeneration > fromGen
+ auto fromIt = std::upper_bound(channelHistory.begin(), channelHistory.end(), fromGen, fnCmpGen);
+ if (fromIt != channelHistory.begin()) {
+ --fromIt;
+ }
+ auto toIt = std::upper_bound(channelHistory.begin(), channelHistory.end(), toGen, fnCmpGen);
+ for (auto it = fromIt; it != toIt; ++it) {
+ ui32 group = it->GroupID;
+ PerGroupGCListsInFlight[group];
+ }
+ }
+
+ // Make per-group Keep/DontKeep lists
+ {
+ // Add all blobs to keep
+ while (!BlobsToKeep.empty()) {
+ auto blobIt = BlobsToKeep.begin();
+ if (TGenStep{blobIt->Generation(), blobIt->Step()} > CollectGenStepInFlight) {
+ break;
+ }
+ ui32 blobGroup = TabletInfo->GroupFor(blobIt->Channel(), blobIt->Generation());
+ PerGroupGCListsInFlight[blobGroup].KeepList.insert(*blobIt);
+ BlobsToKeep.erase(blobIt);
+ }
+
+ // Add all blobs to delete
+ while (!BlobsToDelete.empty()) {
+ auto blobIt = BlobsToDelete.begin();
+ if (TGenStep{blobIt->Generation(), blobIt->Step()} > CollectGenStepInFlight) {
+ break;
+ }
+ ui32 blobGroup = TabletInfo->GroupFor(blobIt->Channel(), blobIt->Generation());
+ bool canSkipDontKeep = false;
+ if (PerGroupGCListsInFlight[blobGroup].KeepList.count(*blobIt)) {
+ // Remove the blob from keep list if its also in the delete list
+ PerGroupGCListsInFlight[blobGroup].KeepList.erase(*blobIt);
+ // Skipped blobs still need to be deleted from BlobsToKeep table
+ PerGroupGCListsInFlight[blobGroup].KeepListSkipped.push_back(*blobIt);
+
+ if (CurrentGen == blobIt->Generation()) {
+ // If this blob was created and deleted in the current generation then
+ // we can skip sending both Keep and DontKeep flags.
+ // NOTE: its not safe to do this for older generations because there is
+ // a scenario when Keep flag was sent in the old generation and then tablet restarted
+ // before getting the result and removing the blob from the Keep list.
+ canSkipDontKeep = true;
+ }
+ }
+ if (!canSkipDontKeep) {
+ PerGroupGCListsInFlight[blobGroup].DontKeepList.insert(*blobIt);
+ } else {
+ // Skipped blobs still need to be deleted from BlobsToDelete table
+ PerGroupGCListsInFlight[blobGroup].DontKeepListSkipped.push_back(*blobIt);
+ }
+ BlobsToDelete.erase(blobIt);
+ }
+ }
+
+ // Make per group requests
+ THashMap<ui32, std::unique_ptr<TEvBlobStorage::TEvCollectGarbage>> requests;
+ {
+ for (const auto& gl : PerGroupGCListsInFlight) {
+ ui32 group = gl.first;
+ requests[group] = std::make_unique<TEvBlobStorage::TEvCollectGarbage>(
+ TabletInfo->TabletID, CurrentGen, PerGenerationCounter,
+ channelIdx, true,
+ std::get<0>(CollectGenStepInFlight), std::get<1>(CollectGenStepInFlight),
+ new TVector<TLogoBlobID>(gl.second.KeepList.begin(), gl.second.KeepList.end()),
+ new TVector<TLogoBlobID>(gl.second.DontKeepList.begin(), gl.second.DontKeepList.end()),
+ TInstant::Max(), true);
+
+ CounterToGroupInFlight[PerGenerationCounter] = group;
+
+ PerGenerationCounter += requests[group]->PerGenerationCounterStepSize();
+ }
+ }
+
+ return requests;
+}
+
+void TBlobManager::OnGCResult(TEvBlobStorage::TEvCollectGarbageResult::TPtr ev, IBlobManagerDb& db) {
+ Y_VERIFY(ev->Get()->Status == NKikimrProto::OK, "The caller must handle unsuccessful status");
+ Y_VERIFY(!CounterToGroupInFlight.empty());
+ Y_VERIFY(!PerGroupGCListsInFlight.empty());
+
+ // Find the group for this result
+ ui64 counterFromRequest = ev->Get()->PerGenerationCounter;
+ Y_VERIFY(CounterToGroupInFlight.count(counterFromRequest));
+ ui32 group = CounterToGroupInFlight[counterFromRequest];
+
+ auto it = PerGroupGCListsInFlight.find(group);
+ for (const auto& blobId : it->second.KeepList) {
+ db.EraseBlobToKeep(TUnifiedBlobId(group, blobId));
+ }
+
+ for (const auto& blobId : it->second.DontKeepList) {
+ db.EraseBlobToDelete(TUnifiedBlobId(group, blobId));
+ }
+
+ for (const auto& blobId : it->second.KeepListSkipped) {
+ db.EraseBlobToKeep(TUnifiedBlobId(group, blobId));
+ }
+
+ for (const auto& blobId : it->second.DontKeepListSkipped) {
+ db.EraseBlobToDelete(TUnifiedBlobId(group, blobId));
+ }
+
+ ++CountersUpdate.GcRequestsSent;
+ CountersUpdate.BlobKeepEntries += it->second.KeepList.size();
+ CountersUpdate.BlobDontKeepEntries += it->second.DontKeepList.size();
+ // "SkippedBlobs" counter tracks blobs that where excluded from both Keep and DontKeep lists
+ // DontKeepListSkipped contains those blobs; KeepListSkipped contains them too but also some more
+ CountersUpdate.BlobSkippedEntries += it->second.DontKeepListSkipped.size();
+
+ PerGroupGCListsInFlight.erase(it);
+ CounterToGroupInFlight.erase(group);
+
+ // All requests done?
+ if (PerGroupGCListsInFlight.empty()) {
+ LastCollectedGenStep = CollectGenStepInFlight;
+ db.SaveLastGcBarrier(LastCollectedGenStep);
+ }
+
+ PerformDelayedDeletes(db);
+}
+
+TBlobBatch TBlobManager::StartBlobBatch(ui32 channel) {
+ ++CountersUpdate.BatchesStarted;
+ Y_VERIFY(channel == BLOB_CHANNEL, "Support for mutiple blob channels is not implemented yet");
+ ++CurrentStep;
TAllocatedGenStepConstPtr genStepRef = new TAllocatedGenStep({CurrentGen, CurrentStep});
- AllocatedGenSteps.push_back(genStepRef);
- auto batchInfo = std::make_unique<TBlobBatch::TBatchInfo>(TabletInfo, genStepRef, channel);
- return TBlobBatch(std::move(batchInfo));
-}
-
-void TBlobManager::SaveBlobBatch(TBlobBatch&& blobBatch, IBlobManagerDb& db) {
+ AllocatedGenSteps.push_back(genStepRef);
+ auto batchInfo = std::make_unique<TBlobBatch::TBatchInfo>(TabletInfo, genStepRef, channel);
+ return TBlobBatch(std::move(batchInfo));
+}
+
+void TBlobManager::SaveBlobBatch(TBlobBatch&& blobBatch, IBlobManagerDb& db) {
Y_VERIFY(blobBatch.BatchInfo);
- ++CountersUpdate.BatchesCommitted;
- CountersUpdate.BlobsWritten += blobBatch.GetBlobCount();
-
+ ++CountersUpdate.BatchesCommitted;
+ CountersUpdate.BlobsWritten += blobBatch.GetBlobCount();
+
LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID
- << " Save Batch GenStep: " << blobBatch.BatchInfo->Gen << ":" << blobBatch.BatchInfo->Step
- << " Blob count: " << blobBatch.BatchInfo->BlobSizes.size());
-
- // Add this batch to KeepQueue
- for (ui32 i = 0; i < blobBatch.BatchInfo->BlobSizes.size(); ++i) {
- const TUnifiedBlobId blobId = blobBatch.BatchInfo->MakeBlobId(i);
- BlobsToKeep.insert(blobId.GetLogoBlobId());
- db.AddBlobToKeep(blobId);
- }
-
- // Save all small blobs
- for (ui32 i = 0; i < blobBatch.BatchInfo->SmallBlobs.size(); ++i) {
- const TUnifiedBlobId blobId = blobBatch.BatchInfo->MakeSmallBlobId(i);
- LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Save Small Blob " << blobId);
- db.WriteSmallBlob(blobId, blobBatch.BatchInfo->SmallBlobs[i]);
- ++CountersUpdate.SmallBlobsWritten;
- CountersUpdate.SmallBlobsBytesWritten += blobId.BlobSize();
- }
-
- blobBatch.BatchInfo->GenStepRef.Reset();
-}
-
-void TBlobManager::DeleteBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db) {
- PerformDelayedDeletes(db);
-
- ++CountersUpdate.BlobsDeleted;
-
- if (blobId.IsSmallBlob()) {
- if (BlobsUseCount.count(blobId) == 0) {
- DeleteSmallBlob(blobId, db);
- } else {
- LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delay Delete Small Blob " << blobId);
- db.AddBlobToDelete(blobId);
- SmallBlobsToDeleteDelayed.insert(blobId);
- }
- return;
- }
-
- // Persist deletion intent
- db.AddBlobToDelete(blobId);
-
- // Check if the deletion needs to be delayed until the blob is no longer
- // used by in-flight requests
- if (BlobsUseCount.count(blobId) == 0) {
- LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delete Blob " << blobId);
- TLogoBlobID logoBlobId = blobId.GetLogoBlobId();
- BlobsToDelete.insert(logoBlobId);
- NBlobCache::ForgetBlob(blobId);
- } else {
- LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delay Delete Blob " << blobId);
- BlobsToDeleteDelayed.insert(blobId.GetLogoBlobId());
- }
-}
-
-void TBlobManager::DeleteSmallBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db) {
- LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delete Small Blob " << blobId);
- db.EraseSmallBlob(blobId);
- NBlobCache::ForgetBlob(blobId);
- ++CountersUpdate.SmallBlobsDeleted;
- CountersUpdate.SmallBlobsBytesDeleted += blobId.BlobSize();
-}
-
-void TBlobManager::PerformDelayedDeletes(IBlobManagerDb& db) {
- for (const auto& blobId : SmallBlobsToDelete) {
- DeleteSmallBlob(blobId, db);
- db.EraseBlobToDelete(blobId);
- }
- SmallBlobsToDelete.clear();
-}
-
-void TBlobManager::SetBlobInUse(const TUnifiedBlobId& blobId, bool inUse) {
- if (inUse) {
- BlobsUseCount[blobId]++;
- return;
- }
-
- auto useIt = BlobsUseCount.find(blobId);
- Y_VERIFY(useIt != BlobsUseCount.end(), "Trying to un-use an unknown blob %s", blobId.ToStringNew().c_str());
- --useIt->second;
-
- if (useIt->second > 0) {
- // Blob is still in use
- return;
- }
-
- BlobsUseCount.erase(useIt);
-
- // Check if the blob is marked for delayed deletion
- if (blobId.IsSmallBlob()) {
- if (SmallBlobsToDeleteDelayed.count(blobId)) {
- LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delayed Small Blob " << blobId
- << " is no longer in use" );
- SmallBlobsToDeleteDelayed.erase(blobId);
- SmallBlobsToDelete.insert(blobId);
- }
- } else {
- TLogoBlobID logoBlobId = blobId.GetLogoBlobId();
- auto delayedIt = BlobsToDeleteDelayed.find(logoBlobId);
- if (delayedIt != BlobsToDeleteDelayed.end()) {
- LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delete Delayed Blob " << blobId);
- BlobsToDelete.insert(logoBlobId);
- BlobsToDeleteDelayed.erase(delayedIt);
- }
- }
-
- NBlobCache::ForgetBlob(blobId);
-}
-
+ << " Save Batch GenStep: " << blobBatch.BatchInfo->Gen << ":" << blobBatch.BatchInfo->Step
+ << " Blob count: " << blobBatch.BatchInfo->BlobSizes.size());
+
+ // Add this batch to KeepQueue
+ for (ui32 i = 0; i < blobBatch.BatchInfo->BlobSizes.size(); ++i) {
+ const TUnifiedBlobId blobId = blobBatch.BatchInfo->MakeBlobId(i);
+ BlobsToKeep.insert(blobId.GetLogoBlobId());
+ db.AddBlobToKeep(blobId);
+ }
+
+ // Save all small blobs
+ for (ui32 i = 0; i < blobBatch.BatchInfo->SmallBlobs.size(); ++i) {
+ const TUnifiedBlobId blobId = blobBatch.BatchInfo->MakeSmallBlobId(i);
+ LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Save Small Blob " << blobId);
+ db.WriteSmallBlob(blobId, blobBatch.BatchInfo->SmallBlobs[i]);
+ ++CountersUpdate.SmallBlobsWritten;
+ CountersUpdate.SmallBlobsBytesWritten += blobId.BlobSize();
+ }
+
+ blobBatch.BatchInfo->GenStepRef.Reset();
+}
+
+void TBlobManager::DeleteBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db) {
+ PerformDelayedDeletes(db);
+
+ ++CountersUpdate.BlobsDeleted;
+
+ if (blobId.IsSmallBlob()) {
+ if (BlobsUseCount.count(blobId) == 0) {
+ DeleteSmallBlob(blobId, db);
+ } else {
+ LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delay Delete Small Blob " << blobId);
+ db.AddBlobToDelete(blobId);
+ SmallBlobsToDeleteDelayed.insert(blobId);
+ }
+ return;
+ }
+
+ // Persist deletion intent
+ db.AddBlobToDelete(blobId);
+
+ // Check if the deletion needs to be delayed until the blob is no longer
+ // used by in-flight requests
+ if (BlobsUseCount.count(blobId) == 0) {
+ LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delete Blob " << blobId);
+ TLogoBlobID logoBlobId = blobId.GetLogoBlobId();
+ BlobsToDelete.insert(logoBlobId);
+ NBlobCache::ForgetBlob(blobId);
+ } else {
+ LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delay Delete Blob " << blobId);
+ BlobsToDeleteDelayed.insert(blobId.GetLogoBlobId());
+ }
+}
+
+void TBlobManager::DeleteSmallBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db) {
+ LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delete Small Blob " << blobId);
+ db.EraseSmallBlob(blobId);
+ NBlobCache::ForgetBlob(blobId);
+ ++CountersUpdate.SmallBlobsDeleted;
+ CountersUpdate.SmallBlobsBytesDeleted += blobId.BlobSize();
+}
+
+void TBlobManager::PerformDelayedDeletes(IBlobManagerDb& db) {
+ for (const auto& blobId : SmallBlobsToDelete) {
+ DeleteSmallBlob(blobId, db);
+ db.EraseBlobToDelete(blobId);
+ }
+ SmallBlobsToDelete.clear();
+}
+
+void TBlobManager::SetBlobInUse(const TUnifiedBlobId& blobId, bool inUse) {
+ if (inUse) {
+ BlobsUseCount[blobId]++;
+ return;
+ }
+
+ auto useIt = BlobsUseCount.find(blobId);
+ Y_VERIFY(useIt != BlobsUseCount.end(), "Trying to un-use an unknown blob %s", blobId.ToStringNew().c_str());
+ --useIt->second;
+
+ if (useIt->second > 0) {
+ // Blob is still in use
+ return;
+ }
+
+ BlobsUseCount.erase(useIt);
+
+ // Check if the blob is marked for delayed deletion
+ if (blobId.IsSmallBlob()) {
+ if (SmallBlobsToDeleteDelayed.count(blobId)) {
+ LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delayed Small Blob " << blobId
+ << " is no longer in use" );
+ SmallBlobsToDeleteDelayed.erase(blobId);
+ SmallBlobsToDelete.insert(blobId);
+ }
+ } else {
+ TLogoBlobID logoBlobId = blobId.GetLogoBlobId();
+ auto delayedIt = BlobsToDeleteDelayed.find(logoBlobId);
+ if (delayedIt != BlobsToDeleteDelayed.end()) {
+ LOG_S_DEBUG("BlobManager at tablet " << TabletInfo->TabletID << " Delete Delayed Blob " << blobId);
+ BlobsToDelete.insert(logoBlobId);
+ BlobsToDeleteDelayed.erase(delayedIt);
+ }
+ }
+
+ NBlobCache::ForgetBlob(blobId);
+}
+
}
diff --git a/ydb/core/tx/columnshard/blob_manager.h b/ydb/core/tx/columnshard/blob_manager.h
index 2e1dd6c76b7..fb9814ceac7 100644
--- a/ydb/core/tx/columnshard/blob_manager.h
+++ b/ydb/core/tx/columnshard/blob_manager.h
@@ -1,219 +1,219 @@
-#pragma once
-
-#include "blob.h"
-
+#pragma once
+
+#include "blob.h"
+
#include <ydb/core/tx/columnshard/inflight_request_tracker.h>
#include <ydb/core/tablet_flat/flat_executor.h>
#include <ydb/core/blobstorage/dsproxy/blobstorage_backoff.h>
-
-#include <util/generic/string.h>
-
+
+#include <util/generic/string.h>
+
namespace NKikimr::NColumnShard {
-
-using NOlap::TUnifiedBlobId;
-using NOlap::TBlobRange;
-
-
-// A batch of blobs that are written by a single task.
-// The batch is later saved or discarded as a whole.
-class TBlobBatch : public TMoveOnly {
- friend class TBlobManager;
-
- struct TBatchInfo;
-
- std::unique_ptr<TBatchInfo> BatchInfo;
-
-private:
- explicit TBlobBatch(std::unique_ptr<TBatchInfo> batchInfo);
-
- void SendWriteRequest(const TActorContext& ctx, ui32 groupId, const TLogoBlobID& logoBlobId,
- const TString& data, ui64 cookie, TInstant deadline);
-
-public:
- TBlobBatch();
- TBlobBatch(TBlobBatch&& other);
- TBlobBatch& operator = (TBlobBatch&& other);
- ~TBlobBatch();
-
- // Write new blob as a part of this batch
- TUnifiedBlobId SendWriteBlobRequest(const TString& blobData, TInstant deadline, const TActorContext& ctx);
-
- // Called with the result of WriteBlob request
- void OnBlobWriteResult(TEvBlobStorage::TEvPutResult::TPtr& ev);
-
- // Tells if all WriteBlob requests got corresponding results
- bool AllBlobWritesCompleted() const;
-
- // Number of blobs in the batch
- ui64 GetBlobCount() const;
-
- // Size of all blobs in the batch
- ui64 GetTotalSize() const;
-
- // Small blobs will be saved as rows in SmallBlobs local table when the batch gets saved
- TUnifiedBlobId AddSmallBlob(const TString& data);
-};
-
-class IBlobManagerDb;
-
-// An interface for writing and deleting blobs for the ColumnShard index management.
-// All garbage collection related logic is hidden inside the implementation.
-class IBlobManager {
-protected:
- static constexpr ui32 BLOB_CHANNEL = 2;
-
-public:
- virtual ~IBlobManager() = default;
-
- // Allocates a temporary blob batch with the BlobManager. If the tablet crashes or if
- // this object is destroyed without doing SaveBlobBatch then all blobs in this batch
- // will get garbage-collected.
- virtual TBlobBatch StartBlobBatch(ui32 channel = BLOB_CHANNEL) = 0;
-
- // This method is called in the same transaction in which the user saves references to blobs
- // in some LocalDB table. It tells the BlobManager that the blobs are becoming permanently saved.
- // NOTE: At this point all blob writes must be already acknowleged.
- virtual void SaveBlobBatch(TBlobBatch&& blobBatch, IBlobManagerDb& db) = 0;
-
- // Deletes the blob that was previously permanently saved
- virtual void DeleteBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db) = 0;
-};
-
-// Garbage Collection generation and step
-using TGenStep = std::tuple<ui32, ui32>;
-
-// A ref-counted object to keep track when GC barrier can be moved to some step.
-// This means that all needed blobs below this step have been KeepFlag-ed and Ack-ed
-struct TAllocatedGenStep : public TThrRefBase {
- const TGenStep GenStep;
-
- explicit TAllocatedGenStep(const TGenStep& genStep)
- : GenStep(genStep)
- {}
-
- bool Finished() const {
- return RefCount() == 1;
- }
-};
-
+
+using NOlap::TUnifiedBlobId;
+using NOlap::TBlobRange;
+
+
+// A batch of blobs that are written by a single task.
+// The batch is later saved or discarded as a whole.
+class TBlobBatch : public TMoveOnly {
+ friend class TBlobManager;
+
+ struct TBatchInfo;
+
+ std::unique_ptr<TBatchInfo> BatchInfo;
+
+private:
+ explicit TBlobBatch(std::unique_ptr<TBatchInfo> batchInfo);
+
+ void SendWriteRequest(const TActorContext& ctx, ui32 groupId, const TLogoBlobID& logoBlobId,
+ const TString& data, ui64 cookie, TInstant deadline);
+
+public:
+ TBlobBatch();
+ TBlobBatch(TBlobBatch&& other);
+ TBlobBatch& operator = (TBlobBatch&& other);
+ ~TBlobBatch();
+
+ // Write new blob as a part of this batch
+ TUnifiedBlobId SendWriteBlobRequest(const TString& blobData, TInstant deadline, const TActorContext& ctx);
+
+ // Called with the result of WriteBlob request
+ void OnBlobWriteResult(TEvBlobStorage::TEvPutResult::TPtr& ev);
+
+ // Tells if all WriteBlob requests got corresponding results
+ bool AllBlobWritesCompleted() const;
+
+ // Number of blobs in the batch
+ ui64 GetBlobCount() const;
+
+ // Size of all blobs in the batch
+ ui64 GetTotalSize() const;
+
+ // Small blobs will be saved as rows in SmallBlobs local table when the batch gets saved
+ TUnifiedBlobId AddSmallBlob(const TString& data);
+};
+
+class IBlobManagerDb;
+
+// An interface for writing and deleting blobs for the ColumnShard index management.
+// All garbage collection related logic is hidden inside the implementation.
+class IBlobManager {
+protected:
+ static constexpr ui32 BLOB_CHANNEL = 2;
+
+public:
+ virtual ~IBlobManager() = default;
+
+ // Allocates a temporary blob batch with the BlobManager. If the tablet crashes or if
+ // this object is destroyed without doing SaveBlobBatch then all blobs in this batch
+ // will get garbage-collected.
+ virtual TBlobBatch StartBlobBatch(ui32 channel = BLOB_CHANNEL) = 0;
+
+ // This method is called in the same transaction in which the user saves references to blobs
+ // in some LocalDB table. It tells the BlobManager that the blobs are becoming permanently saved.
+ // NOTE: At this point all blob writes must be already acknowleged.
+ virtual void SaveBlobBatch(TBlobBatch&& blobBatch, IBlobManagerDb& db) = 0;
+
+ // Deletes the blob that was previously permanently saved
+ virtual void DeleteBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db) = 0;
+};
+
+// Garbage Collection generation and step
+using TGenStep = std::tuple<ui32, ui32>;
+
+// A ref-counted object to keep track when GC barrier can be moved to some step.
+// This means that all needed blobs below this step have been KeepFlag-ed and Ack-ed
+struct TAllocatedGenStep : public TThrRefBase {
+ const TGenStep GenStep;
+
+ explicit TAllocatedGenStep(const TGenStep& genStep)
+ : GenStep(genStep)
+ {}
+
+ bool Finished() const {
+ return RefCount() == 1;
+ }
+};
+
using TAllocatedGenStepConstPtr = TIntrusiveConstPtr<TAllocatedGenStep>;
-struct TBlobManagerCounters {
- ui64 BatchesStarted = 0;
- ui64 BatchesCommitted = 0;
- // TODO: ui64 BatchesDiscarded = 0; // Can we count them?
- ui64 BlobsWritten = 0;
- ui64 BlobsDeleted = 0;
- ui64 BlobKeepEntries = 0;
- ui64 BlobDontKeepEntries = 0;
- ui64 BlobSkippedEntries = 0;
- ui64 GcRequestsSent = 0;
- ui64 SmallBlobsWritten = 0;
- ui64 SmallBlobsBytesWritten = 0;
- ui64 SmallBlobsDeleted = 0;
- ui64 SmallBlobsBytesDeleted = 0;
-};
-
-// The implementation of BlobManager that hides all GC-related details
-class TBlobManager : public IBlobManager, public IBlobInUseTracker {
-private:
- static constexpr size_t BLOB_COUNT_TO_TRIGGER_GC_DEFAULT = 1000;
- static constexpr ui64 GC_INTERVAL_SECONDS_DEFAULT = 60;
-
-private:
- TIntrusivePtr<TTabletStorageInfo> TabletInfo;
- const ui32 CurrentGen;
- ui32 CurrentStep;
- TControlWrapper BlobCountToTriggerGC;
- TControlWrapper GCIntervalSeconds;
-
- // Lists of blobs that need Keep flag to be set
- TSet<TLogoBlobID> BlobsToKeep;
- // Lists of blobs that need DoNotKeep flag to be set
- TSet<TLogoBlobID> BlobsToDelete;
-
- // List of blobs that are marked for deletion but are still used by in-flight requests
- TSet<TLogoBlobID> BlobsToDeleteDelayed;
-
- // List of small blobs that are marked for deletion but are still used by in-flight requests
- THashSet<TUnifiedBlobId> SmallBlobsToDeleteDelayed;
-
- // List of small blobs that that were in-use when DeleteBlob was called and are no longer in-use
- // Now they can now be deleted
- THashSet<TUnifiedBlobId> SmallBlobsToDelete;
-
- // List of blobs that are used by in-flight requests
- THashMap<TUnifiedBlobId, i64> BlobsUseCount;
-
- // Sorted queue of GenSteps that have in-flight BlobBatches
+struct TBlobManagerCounters {
+ ui64 BatchesStarted = 0;
+ ui64 BatchesCommitted = 0;
+ // TODO: ui64 BatchesDiscarded = 0; // Can we count them?
+ ui64 BlobsWritten = 0;
+ ui64 BlobsDeleted = 0;
+ ui64 BlobKeepEntries = 0;
+ ui64 BlobDontKeepEntries = 0;
+ ui64 BlobSkippedEntries = 0;
+ ui64 GcRequestsSent = 0;
+ ui64 SmallBlobsWritten = 0;
+ ui64 SmallBlobsBytesWritten = 0;
+ ui64 SmallBlobsDeleted = 0;
+ ui64 SmallBlobsBytesDeleted = 0;
+};
+
+// The implementation of BlobManager that hides all GC-related details
+class TBlobManager : public IBlobManager, public IBlobInUseTracker {
+private:
+ static constexpr size_t BLOB_COUNT_TO_TRIGGER_GC_DEFAULT = 1000;
+ static constexpr ui64 GC_INTERVAL_SECONDS_DEFAULT = 60;
+
+private:
+ TIntrusivePtr<TTabletStorageInfo> TabletInfo;
+ const ui32 CurrentGen;
+ ui32 CurrentStep;
+ TControlWrapper BlobCountToTriggerGC;
+ TControlWrapper GCIntervalSeconds;
+
+ // Lists of blobs that need Keep flag to be set
+ TSet<TLogoBlobID> BlobsToKeep;
+ // Lists of blobs that need DoNotKeep flag to be set
+ TSet<TLogoBlobID> BlobsToDelete;
+
+ // List of blobs that are marked for deletion but are still used by in-flight requests
+ TSet<TLogoBlobID> BlobsToDeleteDelayed;
+
+ // List of small blobs that are marked for deletion but are still used by in-flight requests
+ THashSet<TUnifiedBlobId> SmallBlobsToDeleteDelayed;
+
+ // List of small blobs that that were in-use when DeleteBlob was called and are no longer in-use
+ // Now they can now be deleted
+ THashSet<TUnifiedBlobId> SmallBlobsToDelete;
+
+ // List of blobs that are used by in-flight requests
+ THashMap<TUnifiedBlobId, i64> BlobsUseCount;
+
+ // Sorted queue of GenSteps that have in-flight BlobBatches
TDeque<TAllocatedGenStepConstPtr> AllocatedGenSteps;
-
- // The Gen:Step that has been acknowledged by the Distributed Storage
- TGenStep LastCollectedGenStep = {0, 0};
-
- // The Gen:Step where GC barrier can be moved
- TGenStep NewCollectGenStep = {0, 0};
-
- // Distributed Storage requires a monotonically increasing counter for GC requests
- ui64 PerGenerationCounter = 1;
-
- // GC requests that are currently in-flight: they have been
- // sent to Distributed Storage and we are waiting for the results
- struct TGCLists {
- THashSet<TLogoBlobID> KeepList;
- THashSet<TLogoBlobID> DontKeepList;
- TVector<TLogoBlobID> KeepListSkipped; // List of blobs excluded from Keep list for optimization
- TVector<TLogoBlobID> DontKeepListSkipped; // List of blobs excluded from both Keep/DontKeep lists
- // NOTE: skipped blobs still need to be removed from local db after GC request completes
- };
- THashMap<ui32, TGCLists> PerGroupGCListsInFlight;
- // Maps PerGenerationCounter value to the group in PerGroupGCListsInFlight
- THashMap<ui64, ui32> CounterToGroupInFlight;
- // The barrier in the current in-flight GC request(s)
- TGenStep CollectGenStepInFlight = {0, 0};
-
- // Stores counter updates since last call to GetCountersUpdate()
- // Then the counters are reset and start accumulating new delta
- TBlobManagerCounters CountersUpdate;
-
- TInstant PreviousGCTime; // Used for delaying next GC if there are too few blobs to collect
-
-public:
- TBlobManager(TIntrusivePtr<TTabletStorageInfo> tabletInfo, ui32 gen);
-
- void RegisterControls(NKikimr::TControlBoard& icb);
-
- // Loads the state at startup
- bool LoadState(IBlobManagerDb& db);
-
- // Checks if GC barrier can be moved. Updates NewCollectGenStep if possible.
- bool TryMoveGCBarrier();
-
- // Prepares Keep/DontKeep lists and GC barrier
- THashMap<ui32, std::unique_ptr<TEvBlobStorage::TEvCollectGarbage>> PreparePerGroupGCRequests();
-
- // Called with GC result received from Distributed Storage
- void OnGCResult(TEvBlobStorage::TEvCollectGarbageResult::TPtr ev, IBlobManagerDb& db);
-
- TBlobManagerCounters GetCountersUpdate() {
- TBlobManagerCounters res = CountersUpdate;
- CountersUpdate = TBlobManagerCounters();
- return res;
- }
-
- // Implementation of IBlobManager interface
- TBlobBatch StartBlobBatch(ui32 channel = BLOB_CHANNEL) override;
- void SaveBlobBatch(TBlobBatch&& blobBatch, IBlobManagerDb& db) override;
- void DeleteBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db) override;
-
- // Implementation of IBlobInUseTracker
- void SetBlobInUse(const TUnifiedBlobId& blobId, bool inUse) override;
-
-private:
- void DeleteSmallBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db);
-
- // Delete small blobs that were previously in use and could not be deleted
- void PerformDelayedDeletes(IBlobManagerDb& db);
-};
-
+
+ // The Gen:Step that has been acknowledged by the Distributed Storage
+ TGenStep LastCollectedGenStep = {0, 0};
+
+ // The Gen:Step where GC barrier can be moved
+ TGenStep NewCollectGenStep = {0, 0};
+
+ // Distributed Storage requires a monotonically increasing counter for GC requests
+ ui64 PerGenerationCounter = 1;
+
+ // GC requests that are currently in-flight: they have been
+ // sent to Distributed Storage and we are waiting for the results
+ struct TGCLists {
+ THashSet<TLogoBlobID> KeepList;
+ THashSet<TLogoBlobID> DontKeepList;
+ TVector<TLogoBlobID> KeepListSkipped; // List of blobs excluded from Keep list for optimization
+ TVector<TLogoBlobID> DontKeepListSkipped; // List of blobs excluded from both Keep/DontKeep lists
+ // NOTE: skipped blobs still need to be removed from local db after GC request completes
+ };
+ THashMap<ui32, TGCLists> PerGroupGCListsInFlight;
+ // Maps PerGenerationCounter value to the group in PerGroupGCListsInFlight
+ THashMap<ui64, ui32> CounterToGroupInFlight;
+ // The barrier in the current in-flight GC request(s)
+ TGenStep CollectGenStepInFlight = {0, 0};
+
+ // Stores counter updates since last call to GetCountersUpdate()
+ // Then the counters are reset and start accumulating new delta
+ TBlobManagerCounters CountersUpdate;
+
+ TInstant PreviousGCTime; // Used for delaying next GC if there are too few blobs to collect
+
+public:
+ TBlobManager(TIntrusivePtr<TTabletStorageInfo> tabletInfo, ui32 gen);
+
+ void RegisterControls(NKikimr::TControlBoard& icb);
+
+ // Loads the state at startup
+ bool LoadState(IBlobManagerDb& db);
+
+ // Checks if GC barrier can be moved. Updates NewCollectGenStep if possible.
+ bool TryMoveGCBarrier();
+
+ // Prepares Keep/DontKeep lists and GC barrier
+ THashMap<ui32, std::unique_ptr<TEvBlobStorage::TEvCollectGarbage>> PreparePerGroupGCRequests();
+
+ // Called with GC result received from Distributed Storage
+ void OnGCResult(TEvBlobStorage::TEvCollectGarbageResult::TPtr ev, IBlobManagerDb& db);
+
+ TBlobManagerCounters GetCountersUpdate() {
+ TBlobManagerCounters res = CountersUpdate;
+ CountersUpdate = TBlobManagerCounters();
+ return res;
+ }
+
+ // Implementation of IBlobManager interface
+ TBlobBatch StartBlobBatch(ui32 channel = BLOB_CHANNEL) override;
+ void SaveBlobBatch(TBlobBatch&& blobBatch, IBlobManagerDb& db) override;
+ void DeleteBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db) override;
+
+ // Implementation of IBlobInUseTracker
+ void SetBlobInUse(const TUnifiedBlobId& blobId, bool inUse) override;
+
+private:
+ void DeleteSmallBlob(const TUnifiedBlobId& blobId, IBlobManagerDb& db);
+
+ // Delete small blobs that were previously in use and could not be deleted
+ void PerformDelayedDeletes(IBlobManagerDb& db);
+};
+
}
diff --git a/ydb/core/tx/columnshard/blob_manager_db.cpp b/ydb/core/tx/columnshard/blob_manager_db.cpp
index 10f8c0a2568..839c179f543 100644
--- a/ydb/core/tx/columnshard/blob_manager_db.cpp
+++ b/ydb/core/tx/columnshard/blob_manager_db.cpp
@@ -1,109 +1,109 @@
-#include "blob_manager_db.h"
-#include "blob_manager.h"
-#include "columnshard_schema.h"
-
+#include "blob_manager_db.h"
+#include "blob_manager.h"
+#include "columnshard_schema.h"
+
namespace NKikimr::NColumnShard {
-
-bool TBlobManagerDb::LoadLastGcBarrier(TGenStep& lastCollectedGenStep) {
- NIceDb::TNiceDb db(Database);
- ui64 gen = 0;
- ui64 step = 0;
- if (!Schema::GetSpecialValue(db, Schema::EValueIds::LastGcBarrierGen, gen) ||
- !Schema::GetSpecialValue(db, Schema::EValueIds::LastGcBarrierStep, step))
- {
- return false;
- }
- lastCollectedGenStep = {gen, step};
- return true;
-}
-
-void TBlobManagerDb::SaveLastGcBarrier(const TGenStep& lastCollectedGenStep) {
- NIceDb::TNiceDb db(Database);
- Schema::SaveSpecialValue(db, Schema::EValueIds::LastGcBarrierGen, std::get<0>(lastCollectedGenStep));
- Schema::SaveSpecialValue(db, Schema::EValueIds::LastGcBarrierStep, std::get<1>(lastCollectedGenStep));
-}
-
-bool TBlobManagerDb::LoadLists(TVector<TUnifiedBlobId>& blobsToKeep, TVector<TUnifiedBlobId>& blobsToDelete,
- const NOlap::IBlobGroupSelector* dsGroupSelector)
-{
- blobsToKeep.clear();
- blobsToDelete.clear();
-
- NIceDb::TNiceDb db(Database);
-
- {
- auto rowset = db.Table<Schema::BlobsToKeep>().Select();
- if (!rowset.IsReady())
- return false;
-
- TString error;
-
- while (!rowset.EndOfSet()) {
- const TString blobIdStr = rowset.GetValue<Schema::BlobsToKeep::BlobId>();
- TUnifiedBlobId unifiedBlobId = TUnifiedBlobId::ParseFromString(blobIdStr, dsGroupSelector, error);
- Y_VERIFY(unifiedBlobId.IsValid(), "%s", error.c_str());
-
- blobsToKeep.push_back(unifiedBlobId);
- if (!rowset.Next())
- return false;
- }
- }
-
- {
- auto rowset = db.Table<Schema::BlobsToDelete>().Select();
- if (!rowset.IsReady())
- return false;
-
- TString error;
-
- while (!rowset.EndOfSet()) {
- const TString blobIdStr = rowset.GetValue<Schema::BlobsToDelete::BlobId>();
- TUnifiedBlobId unifiedBlobId = TUnifiedBlobId::ParseFromString(blobIdStr, dsGroupSelector, error);
- Y_VERIFY(unifiedBlobId.IsValid(), "%s", error.c_str());
- blobsToDelete.push_back(unifiedBlobId);
- if (!rowset.Next())
- return false;
- }
- }
-
- return true;
-}
-
-void TBlobManagerDb::AddBlobToKeep(const TUnifiedBlobId& blobId) {
- NIceDb::TNiceDb db(Database);
- db.Table<Schema::BlobsToKeep>().Key(blobId.ToStringLegacy()).Update();
-}
-
-void TBlobManagerDb::EraseBlobToKeep(const TUnifiedBlobId& blobId) {
- NIceDb::TNiceDb db(Database);
- db.Table<Schema::BlobsToKeep>().Key(blobId.ToStringLegacy()).Delete();
- db.Table<Schema::BlobsToKeep>().Key(blobId.ToStringNew()).Delete();
-}
-
-void TBlobManagerDb::AddBlobToDelete(const TUnifiedBlobId& blobId) {
- NIceDb::TNiceDb db(Database);
- db.Table<Schema::BlobsToDelete>().Key(blobId.ToStringLegacy()).Update();
-}
-
-void TBlobManagerDb::EraseBlobToDelete(const TUnifiedBlobId& blobId) {
- NIceDb::TNiceDb db(Database);
- db.Table<Schema::BlobsToDelete>().Key(blobId.ToStringLegacy()).Delete();
- db.Table<Schema::BlobsToDelete>().Key(blobId.ToStringNew()).Delete();
-}
-
-void TBlobManagerDb::WriteSmallBlob(const TUnifiedBlobId& blobId, const TString& data) {
- Y_VERIFY(blobId.IsSmallBlob());
- NIceDb::TNiceDb db(Database);
- db.Table<Schema::SmallBlobs>().Key(blobId.ToStringNew()).Update(
- NIceDb::TUpdate<Schema::SmallBlobs::Data>(data)
- );
-}
-
-void TBlobManagerDb::EraseSmallBlob(const TUnifiedBlobId& blobId) {
- Y_VERIFY(blobId.IsSmallBlob());
- NIceDb::TNiceDb db(Database);
- db.Table<Schema::SmallBlobs>().Key(blobId.ToStringLegacy()).Delete();
- db.Table<Schema::SmallBlobs>().Key(blobId.ToStringNew()).Delete();
-}
-
+
+bool TBlobManagerDb::LoadLastGcBarrier(TGenStep& lastCollectedGenStep) {
+ NIceDb::TNiceDb db(Database);
+ ui64 gen = 0;
+ ui64 step = 0;
+ if (!Schema::GetSpecialValue(db, Schema::EValueIds::LastGcBarrierGen, gen) ||
+ !Schema::GetSpecialValue(db, Schema::EValueIds::LastGcBarrierStep, step))
+ {
+ return false;
+ }
+ lastCollectedGenStep = {gen, step};
+ return true;
+}
+
+void TBlobManagerDb::SaveLastGcBarrier(const TGenStep& lastCollectedGenStep) {
+ NIceDb::TNiceDb db(Database);
+ Schema::SaveSpecialValue(db, Schema::EValueIds::LastGcBarrierGen, std::get<0>(lastCollectedGenStep));
+ Schema::SaveSpecialValue(db, Schema::EValueIds::LastGcBarrierStep, std::get<1>(lastCollectedGenStep));
+}
+
+bool TBlobManagerDb::LoadLists(TVector<TUnifiedBlobId>& blobsToKeep, TVector<TUnifiedBlobId>& blobsToDelete,
+ const NOlap::IBlobGroupSelector* dsGroupSelector)
+{
+ blobsToKeep.clear();
+ blobsToDelete.clear();
+
+ NIceDb::TNiceDb db(Database);
+
+ {
+ auto rowset = db.Table<Schema::BlobsToKeep>().Select();
+ if (!rowset.IsReady())
+ return false;
+
+ TString error;
+
+ while (!rowset.EndOfSet()) {
+ const TString blobIdStr = rowset.GetValue<Schema::BlobsToKeep::BlobId>();
+ TUnifiedBlobId unifiedBlobId = TUnifiedBlobId::ParseFromString(blobIdStr, dsGroupSelector, error);
+ Y_VERIFY(unifiedBlobId.IsValid(), "%s", error.c_str());
+
+ blobsToKeep.push_back(unifiedBlobId);
+ if (!rowset.Next())
+ return false;
+ }
+ }
+
+ {
+ auto rowset = db.Table<Schema::BlobsToDelete>().Select();
+ if (!rowset.IsReady())
+ return false;
+
+ TString error;
+
+ while (!rowset.EndOfSet()) {
+ const TString blobIdStr = rowset.GetValue<Schema::BlobsToDelete::BlobId>();
+ TUnifiedBlobId unifiedBlobId = TUnifiedBlobId::ParseFromString(blobIdStr, dsGroupSelector, error);
+ Y_VERIFY(unifiedBlobId.IsValid(), "%s", error.c_str());
+ blobsToDelete.push_back(unifiedBlobId);
+ if (!rowset.Next())
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void TBlobManagerDb::AddBlobToKeep(const TUnifiedBlobId& blobId) {
+ NIceDb::TNiceDb db(Database);
+ db.Table<Schema::BlobsToKeep>().Key(blobId.ToStringLegacy()).Update();
+}
+
+void TBlobManagerDb::EraseBlobToKeep(const TUnifiedBlobId& blobId) {
+ NIceDb::TNiceDb db(Database);
+ db.Table<Schema::BlobsToKeep>().Key(blobId.ToStringLegacy()).Delete();
+ db.Table<Schema::BlobsToKeep>().Key(blobId.ToStringNew()).Delete();
+}
+
+void TBlobManagerDb::AddBlobToDelete(const TUnifiedBlobId& blobId) {
+ NIceDb::TNiceDb db(Database);
+ db.Table<Schema::BlobsToDelete>().Key(blobId.ToStringLegacy()).Update();
+}
+
+void TBlobManagerDb::EraseBlobToDelete(const TUnifiedBlobId& blobId) {
+ NIceDb::TNiceDb db(Database);
+ db.Table<Schema::BlobsToDelete>().Key(blobId.ToStringLegacy()).Delete();
+ db.Table<Schema::BlobsToDelete>().Key(blobId.ToStringNew()).Delete();
+}
+
+void TBlobManagerDb::WriteSmallBlob(const TUnifiedBlobId& blobId, const TString& data) {
+ Y_VERIFY(blobId.IsSmallBlob());
+ NIceDb::TNiceDb db(Database);
+ db.Table<Schema::SmallBlobs>().Key(blobId.ToStringNew()).Update(
+ NIceDb::TUpdate<Schema::SmallBlobs::Data>(data)
+ );
+}
+
+void TBlobManagerDb::EraseSmallBlob(const TUnifiedBlobId& blobId) {
+ Y_VERIFY(blobId.IsSmallBlob());
+ NIceDb::TNiceDb db(Database);
+ db.Table<Schema::SmallBlobs>().Key(blobId.ToStringLegacy()).Delete();
+ db.Table<Schema::SmallBlobs>().Key(blobId.ToStringNew()).Delete();
+}
+
}
diff --git a/ydb/core/tx/columnshard/blob_manager_db.h b/ydb/core/tx/columnshard/blob_manager_db.h
index 6bf2e715a22..0ffae5f4724 100644
--- a/ydb/core/tx/columnshard/blob_manager_db.h
+++ b/ydb/core/tx/columnshard/blob_manager_db.h
@@ -1,52 +1,52 @@
-#pragma once
-#include "defs.h"
-
-#include "blob_manager.h"
-
+#pragma once
+#include "defs.h"
+
+#include "blob_manager.h"
+
namespace NKikimr::NTable {
-class TDatabase;
-}
-
+class TDatabase;
+}
+
namespace NKikimr::NColumnShard {
-
-class IBlobManagerDb {
-public:
- virtual ~IBlobManagerDb() = default;
-
- virtual bool LoadLastGcBarrier(TGenStep& lastCollectedGenStep) = 0;
- virtual void SaveLastGcBarrier(const TGenStep& lastCollectedGenStep) = 0;
-
- virtual bool LoadLists(TVector<TUnifiedBlobId>& blobsToKeep, TVector<TUnifiedBlobId>& blobsToDelete,
- const NOlap::IBlobGroupSelector* dsGroupSelector) = 0;
- virtual void AddBlobToKeep(const TUnifiedBlobId& blobId) = 0;
- virtual void EraseBlobToKeep(const TUnifiedBlobId& blobId) = 0;
- virtual void AddBlobToDelete(const TUnifiedBlobId& blobId) = 0;
- virtual void EraseBlobToDelete(const TUnifiedBlobId& blobId) = 0;
- virtual void WriteSmallBlob(const TUnifiedBlobId& blobId, const TString& data) = 0;
- virtual void EraseSmallBlob(const TUnifiedBlobId& blobId) = 0;
-};
-
-
-class TBlobManagerDb : public IBlobManagerDb {
-public:
- explicit TBlobManagerDb(NTable::TDatabase& db)
- : Database(db)
- {}
-
- bool LoadLastGcBarrier(TGenStep& lastCollectedGenStep) override;
- void SaveLastGcBarrier(const TGenStep& lastCollectedGenStep) override;
-
- bool LoadLists(TVector<TUnifiedBlobId>& blobsToKeep, TVector<TUnifiedBlobId>& blobsToDelete,
- const NOlap::IBlobGroupSelector* dsGroupSelector) override;
- void AddBlobToKeep(const TUnifiedBlobId& blobId) override;
- void EraseBlobToKeep(const TUnifiedBlobId& blobId) override;
- void AddBlobToDelete(const TUnifiedBlobId& blobId) override;
- void EraseBlobToDelete(const TUnifiedBlobId& blobId) override;
- void WriteSmallBlob(const TUnifiedBlobId& blobId, const TString& data) override;
- void EraseSmallBlob(const TUnifiedBlobId& blobId) override;
-
-private:
- NTable::TDatabase& Database;
-};
-
+
+class IBlobManagerDb {
+public:
+ virtual ~IBlobManagerDb() = default;
+
+ virtual bool LoadLastGcBarrier(TGenStep& lastCollectedGenStep) = 0;
+ virtual void SaveLastGcBarrier(const TGenStep& lastCollectedGenStep) = 0;
+
+ virtual bool LoadLists(TVector<TUnifiedBlobId>& blobsToKeep, TVector<TUnifiedBlobId>& blobsToDelete,
+ const NOlap::IBlobGroupSelector* dsGroupSelector) = 0;
+ virtual void AddBlobToKeep(const TUnifiedBlobId& blobId) = 0;
+ virtual void EraseBlobToKeep(const TUnifiedBlobId& blobId) = 0;
+ virtual void AddBlobToDelete(const TUnifiedBlobId& blobId) = 0;
+ virtual void EraseBlobToDelete(const TUnifiedBlobId& blobId) = 0;
+ virtual void WriteSmallBlob(const TUnifiedBlobId& blobId, const TString& data) = 0;
+ virtual void EraseSmallBlob(const TUnifiedBlobId& blobId) = 0;
+};
+
+
+class TBlobManagerDb : public IBlobManagerDb {
+public:
+ explicit TBlobManagerDb(NTable::TDatabase& db)
+ : Database(db)
+ {}
+
+ bool LoadLastGcBarrier(TGenStep& lastCollectedGenStep) override;
+ void SaveLastGcBarrier(const TGenStep& lastCollectedGenStep) override;
+
+ bool LoadLists(TVector<TUnifiedBlobId>& blobsToKeep, TVector<TUnifiedBlobId>& blobsToDelete,
+ const NOlap::IBlobGroupSelector* dsGroupSelector) override;
+ void AddBlobToKeep(const TUnifiedBlobId& blobId) override;
+ void EraseBlobToKeep(const TUnifiedBlobId& blobId) override;
+ void AddBlobToDelete(const TUnifiedBlobId& blobId) override;
+ void EraseBlobToDelete(const TUnifiedBlobId& blobId) override;
+ void WriteSmallBlob(const TUnifiedBlobId& blobId, const TString& data) override;
+ void EraseSmallBlob(const TUnifiedBlobId& blobId) override;
+
+private:
+ NTable::TDatabase& Database;
+};
+
}
diff --git a/ydb/core/tx/columnshard/blob_manager_txs.cpp b/ydb/core/tx/columnshard/blob_manager_txs.cpp
index 33f3dbf93e9..ed9e49a8aa4 100644
--- a/ydb/core/tx/columnshard/blob_manager_txs.cpp
+++ b/ydb/core/tx/columnshard/blob_manager_txs.cpp
@@ -1,82 +1,82 @@
-#include "defs.h"
-#include "columnshard_impl.h"
-#include "blob_manager.h"
-#include "blob_manager_db.h"
-
+#include "defs.h"
+#include "columnshard_impl.h"
+#include "blob_manager.h"
+#include "blob_manager_db.h"
+
#include <ydb/core/base/blobstorage.h>
-
+
namespace NKikimr::NColumnShard {
-
-// Run GC related logic of the BlobManager
-class TTxRunGC : public NTabletFlatExecutor::TTransactionBase<TColumnShard> {
- THashMap<ui32, std::unique_ptr<TEvBlobStorage::TEvCollectGarbage>> Requests;
-public:
- TTxRunGC(TColumnShard* self)
- : TBase(self)
- {}
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Y_UNUSED(txc);
- Y_UNUSED(ctx);
-
- Requests = Self->BlobManager->PreparePerGroupGCRequests();
-
- return true;
- }
-
- void Complete(const TActorContext& ctx) override {
- for (auto& r : Requests) {
- ui32 groupId = r.first;
- auto ev = std::move(r.second);
+
+// Run GC related logic of the BlobManager
+class TTxRunGC : public NTabletFlatExecutor::TTransactionBase<TColumnShard> {
+ THashMap<ui32, std::unique_ptr<TEvBlobStorage::TEvCollectGarbage>> Requests;
+public:
+ TTxRunGC(TColumnShard* self)
+ : TBase(self)
+ {}
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ Y_UNUSED(txc);
+ Y_UNUSED(ctx);
+
+ Requests = Self->BlobManager->PreparePerGroupGCRequests();
+
+ return true;
+ }
+
+ void Complete(const TActorContext& ctx) override {
+ for (auto& r : Requests) {
+ ui32 groupId = r.first;
+ auto ev = std::move(r.second);
LOG_S_DEBUG("BlobManager at tablet " << Self->TabletID()
- << " Sending GC to group " << groupId << ": " << ev->Print(true));
-
- SendToBSProxy(ctx, groupId, ev.release());
- }
- }
-};
-
-ITransaction* TColumnShard::CreateTxRunGc() {
- return new TTxRunGC(this);
-}
-
-
-// Update the BlobManager with the GC result
-class TTxProcessGCResult : public NTabletFlatExecutor::TTransactionBase<TColumnShard> {
- TEvBlobStorage::TEvCollectGarbageResult::TPtr Ev;
-public:
- TTxProcessGCResult(TColumnShard* self, TEvBlobStorage::TEvCollectGarbageResult::TPtr& ev)
- : TBase(self)
- , Ev(ev)
- {}
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- if (Ev->Get()->Status != NKikimrProto::OK) {
- LOG_S_WARN("BlobManager at tablet " << Self->TabletID()
- << " GC Failed: " << Ev->Get()->Print(true));
- Self->BecomeBroken(ctx);
- return true;
- }
-
+ << " Sending GC to group " << groupId << ": " << ev->Print(true));
+
+ SendToBSProxy(ctx, groupId, ev.release());
+ }
+ }
+};
+
+ITransaction* TColumnShard::CreateTxRunGc() {
+ return new TTxRunGC(this);
+}
+
+
+// Update the BlobManager with the GC result
+class TTxProcessGCResult : public NTabletFlatExecutor::TTransactionBase<TColumnShard> {
+ TEvBlobStorage::TEvCollectGarbageResult::TPtr Ev;
+public:
+ TTxProcessGCResult(TColumnShard* self, TEvBlobStorage::TEvCollectGarbageResult::TPtr& ev)
+ : TBase(self)
+ , Ev(ev)
+ {}
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ if (Ev->Get()->Status != NKikimrProto::OK) {
+ LOG_S_WARN("BlobManager at tablet " << Self->TabletID()
+ << " GC Failed: " << Ev->Get()->Print(true));
+ Self->BecomeBroken(ctx);
+ return true;
+ }
+
LOG_S_DEBUG("BlobManager at tablet " << Self->TabletID()
- << " GC Result: " << Ev->Get()->Print(true));
-
- // Update Keep/DontKeep lists and last GC barrier
- TBlobManagerDb blobManagerDb(txc.DB);
- Self->BlobManager->OnGCResult(Ev, blobManagerDb);
- return true;
- }
-
- void Complete(const TActorContext& ctx) override {
- // Schedule next GC
- if (Self->BlobManager->TryMoveGCBarrier()) {
- Self->Execute(Self->CreateTxRunGc(), ctx);
- }
- }
-};
-
-void TColumnShard::Handle(TEvBlobStorage::TEvCollectGarbageResult::TPtr& ev, const TActorContext& ctx) {
- Execute(new TTxProcessGCResult(this, ev), ctx);
-}
-
+ << " GC Result: " << Ev->Get()->Print(true));
+
+ // Update Keep/DontKeep lists and last GC barrier
+ TBlobManagerDb blobManagerDb(txc.DB);
+ Self->BlobManager->OnGCResult(Ev, blobManagerDb);
+ return true;
+ }
+
+ void Complete(const TActorContext& ctx) override {
+ // Schedule next GC
+ if (Self->BlobManager->TryMoveGCBarrier()) {
+ Self->Execute(Self->CreateTxRunGc(), ctx);
+ }
+ }
+};
+
+void TColumnShard::Handle(TEvBlobStorage::TEvCollectGarbageResult::TPtr& ev, const TActorContext& ctx) {
+ Execute(new TTxProcessGCResult(this, ev), ctx);
+}
+
}
diff --git a/ydb/core/tx/columnshard/columnshard.cpp b/ydb/core/tx/columnshard/columnshard.cpp
index 5704616aa49..cc4017b5bc6 100644
--- a/ydb/core/tx/columnshard/columnshard.cpp
+++ b/ydb/core/tx/columnshard/columnshard.cpp
@@ -11,15 +11,15 @@ IActor* CreateColumnShard(const TActorId& tablet, TTabletStorageInfo* info) {
namespace NKikimr::NColumnShard {
-IActor* CreateIndexingActor(ui64 tabletId, const TActorId& parent);
-IActor* CreateCompactionActor(ui64 tabletId, const TActorId& parent);
-IActor* CreateWriteActor(ui64 tabletId, const NOlap::TIndexInfo& indexTable,
- const TActorId& dstActor, TBlobBatch&& blobBatch, bool blobGrouppingEnabled,
+IActor* CreateIndexingActor(ui64 tabletId, const TActorId& parent);
+IActor* CreateCompactionActor(ui64 tabletId, const TActorId& parent);
+IActor* CreateWriteActor(ui64 tabletId, const NOlap::TIndexInfo& indexTable,
+ const TActorId& dstActor, TBlobBatch&& blobBatch, bool blobGrouppingEnabled,
TAutoPtr<TEvColumnShard::TEvWrite> ev, const TInstant& deadline = TInstant::Max());
-IActor* CreateWriteActor(ui64 tabletId, const NOlap::TIndexInfo& indexTable,
- const TActorId& dstActor, TBlobBatch&& blobBatch, bool blobGrouppingEnabled,
+IActor* CreateWriteActor(ui64 tabletId, const NOlap::TIndexInfo& indexTable,
+ const TActorId& dstActor, TBlobBatch&& blobBatch, bool blobGrouppingEnabled,
TAutoPtr<TEvPrivate::TEvWriteIndex> ev, const TInstant& deadline = TInstant::Max());
-IActor* CreateColumnShardScan(const TActorId& scanComputeActor, ui32 scanId, ui64 txId);
+IActor* CreateColumnShardScan(const TActorId& scanComputeActor, ui32 scanId, ui64 txId);
void TColumnShard::BecomeBroken(const TActorContext& ctx)
{
@@ -32,21 +32,21 @@ void TColumnShard::BecomeBroken(const TActorContext& ctx)
void TColumnShard::SwitchToWork(const TActorContext& ctx) {
Become(&TThis::StateWork);
LOG_S_INFO("Switched to work at " << TabletID() << " actor " << ctx.SelfID);
- IndexingActor = ctx.Register(CreateIndexingActor(TabletID(), ctx.SelfID));
- CompactionActor = ctx.Register(CreateCompactionActor(TabletID(), ctx.SelfID));
+ IndexingActor = ctx.Register(CreateIndexingActor(TabletID(), ctx.SelfID));
+ CompactionActor = ctx.Register(CreateCompactionActor(TabletID(), ctx.SelfID));
SignalTabletActive(ctx);
}
void TColumnShard::OnActivateExecutor(const TActorContext& ctx) {
LOG_S_DEBUG("OnActivateExecutor at " << TabletID() << " actor " << ctx.SelfID);
- Executor()->RegisterExternalTabletCounters(TabletCountersPtr.release());
- BlobManager = std::make_unique<TBlobManager>(Info(), Executor()->Generation());
-
+ Executor()->RegisterExternalTabletCounters(TabletCountersPtr.release());
+ BlobManager = std::make_unique<TBlobManager>(Info(), Executor()->Generation());
+
auto& icb = *AppData(ctx)->Icb;
BlobManager->RegisterControls(icb);
Limits.RegisterControls(icb);
CompactionLimits.RegisterControls(icb);
- Settings.RegisterControls(icb);
+ Settings.RegisterControls(icb);
Execute(CreateTxInitSchema(), ctx);
}
@@ -108,11 +108,11 @@ void TColumnShard::Handle(TEvColumnShard::TEvWrite::TPtr& ev, const TActorContex
auto& data = Proto(ev->Get()).GetData();
const ui64 tableId = ev->Get()->Record.GetTableId();
- bool error = data.empty() || data.size() > TLimits::MAX_BLOB_SIZE || !PrimaryIndex || !IsTableWritable(tableId)
- || ev->Get()->PutStatus == NKikimrProto::ERROR;
+ bool error = data.empty() || data.size() > TLimits::MAX_BLOB_SIZE || !PrimaryIndex || !IsTableWritable(tableId)
+ || ev->Get()->PutStatus == NKikimrProto::ERROR;
if (error) {
- LOG_S_WARN("Write (fail) " << data.size() << " bytes at tablet " << TabletID());
+ LOG_S_WARN("Write (fail) " << data.size() << " bytes at tablet " << TabletID());
ev->Get()->PutStatus = NKikimrProto::ERROR;
Execute(new TTxWrite(this, ev), ctx);
@@ -135,10 +135,10 @@ void TColumnShard::Handle(TEvColumnShard::TEvWrite::TPtr& ev, const TActorContex
} else {
LOG_S_DEBUG("Write (blob) " << data.size() << " bytes at tablet " << TabletID());
- ev->Get()->MaxSmallBlobSize = Settings.MaxSmallBlobSize;
-
+ ev->Get()->MaxSmallBlobSize = Settings.MaxSmallBlobSize;
+
ctx.Register(CreateWriteActor(TabletID(), PrimaryIndex->GetIndexInfo(), ctx.SelfID,
- BlobManager->StartBlobBatch(), Settings.BlobWriteGrouppingEnabled, ev->Release()));
+ BlobManager->StartBlobBatch(), Settings.BlobWriteGrouppingEnabled, ev->Release()));
}
}
}
@@ -177,7 +177,7 @@ void TColumnShard::Handle(TEvPrivate::TEvWriteIndex::TPtr& ev, const TActorConte
Y_VERIFY(!blobs.empty());
ctx.Register(CreateWriteActor(TabletID(), NOlap::TIndexInfo("dummy", 0), ctx.SelfID,
- BlobManager->StartBlobBatch(), Settings.BlobWriteGrouppingEnabled, ev->Release()));
+ BlobManager->StartBlobBatch(), Settings.BlobWriteGrouppingEnabled, ev->Release()));
}
} else {
if (ev->Get()->PutStatus == NKikimrProto::OK) {
@@ -191,7 +191,7 @@ void TColumnShard::Handle(TEvPrivate::TEvWriteIndex::TPtr& ev, const TActorConte
}
}
-void TColumnShard::Handle(TEvColumnShard::TEvScan::TPtr& ev, const TActorContext& ctx) {
+void TColumnShard::Handle(TEvColumnShard::TEvScan::TPtr& ev, const TActorContext& ctx) {
const auto* msg = ev->Get();
ui64 txId = msg->Record.GetTxId();
const auto& snapshot = msg->Record.GetSnapshot();
@@ -207,21 +207,21 @@ void TColumnShard::Handle(TEvColumnShard::TEvScan::TPtr& ev, const TActorContext
ScanTxInFlight.insert({txId, TAppData::TimeProvider->Now()});
SetCounter(COUNTER_SCAN_IN_FLY, ScanTxInFlight.size());
- Execute(new TTxScan(this, ev), ctx);
-}
-
-void TColumnShard::Handle(TEvPrivate::TEvScanStats::TPtr& ev, const TActorContext &ctx) {
- Y_UNUSED(ctx);
-
- IncCounter(COUNTER_SCANNED_ROWS, ev->Get()->Rows);
- IncCounter(COUNTER_SCANNED_BYTES, ev->Get()->Bytes);
-}
-
-void TColumnShard::Handle(TEvPrivate::TEvReadFinished::TPtr& ev, const TActorContext &ctx) {
- Y_UNUSED(ctx);
- ui64 readCookie = ev->Get()->RequestCookie;
- LOG_S_DEBUG("Finished read cookie: " << readCookie << " at tablet " << TabletID());
- InFlightReadsTracker.RemoveInFlightRequest(ev->Get()->RequestCookie, *BlobManager);
+ Execute(new TTxScan(this, ev), ctx);
+}
+
+void TColumnShard::Handle(TEvPrivate::TEvScanStats::TPtr& ev, const TActorContext &ctx) {
+ Y_UNUSED(ctx);
+
+ IncCounter(COUNTER_SCANNED_ROWS, ev->Get()->Rows);
+ IncCounter(COUNTER_SCANNED_BYTES, ev->Get()->Bytes);
+}
+
+void TColumnShard::Handle(TEvPrivate::TEvReadFinished::TPtr& ev, const TActorContext &ctx) {
+ Y_UNUSED(ctx);
+ ui64 readCookie = ev->Get()->RequestCookie;
+ LOG_S_DEBUG("Finished read cookie: " << readCookie << " at tablet " << TabletID());
+ InFlightReadsTracker.RemoveInFlightRequest(ev->Get()->RequestCookie, *BlobManager);
ui64 txId = ev->Get()->TxId;
if (ScanTxInFlight.count(txId)) {
@@ -230,13 +230,13 @@ void TColumnShard::Handle(TEvPrivate::TEvReadFinished::TPtr& ev, const TActorCon
ScanTxInFlight.erase(txId);
SetCounter(COUNTER_SCAN_IN_FLY, ScanTxInFlight.size());
}
-}
-
-void TColumnShard::Handle(TEvColumnShard::TEvReadBlobRanges::TPtr& ev, const TActorContext& ctx) {
- LOG_S_DEBUG("Read blob ranges at tablet " << TabletID() << ev->Get()->Record);
- Execute(new TTxReadBlobRanges(this, ev), ctx);
-}
-
+}
+
+void TColumnShard::Handle(TEvColumnShard::TEvReadBlobRanges::TPtr& ev, const TActorContext& ctx) {
+ LOG_S_DEBUG("Read blob ranges at tablet " << TabletID() << ev->Get()->Record);
+ Execute(new TTxReadBlobRanges(this, ev), ctx);
+}
+
void TColumnShard::Handle(TEvPrivate::TEvPeriodicWakeup::TPtr& ev, const TActorContext& ctx) {
if (ev->Get()->Manual) {
EnqueueBackgroundActivities();
@@ -250,18 +250,18 @@ void TColumnShard::Handle(TEvPrivate::TEvPeriodicWakeup::TPtr& ev, const TActorC
ctx.Schedule(ActivationPeriod, new TEvPrivate::TEvPeriodicWakeup());
}
-void TColumnShard::UpdateBlobMangerCounters() {
- const auto counters = BlobManager->GetCountersUpdate();
- IncCounter(COUNTER_BLOB_MANAGER_GC_REQUESTS, counters.GcRequestsSent);
- IncCounter(COUNTER_BLOB_MANAGER_KEEP_BLOBS, counters.BlobKeepEntries);
- IncCounter(COUNTER_BLOB_MANAGER_DONT_KEEP_BLOBS, counters.BlobDontKeepEntries);
- IncCounter(COUNTER_BLOB_MANAGER_SKIPPED_BLOBS, counters.BlobSkippedEntries);
- IncCounter(COUNTER_SMALL_BLOB_WRITE_COUNT, counters.SmallBlobsWritten);
- IncCounter(COUNTER_SMALL_BLOB_WRITE_BYTES, counters.SmallBlobsBytesWritten);
- IncCounter(COUNTER_SMALL_BLOB_DELETE_COUNT, counters.SmallBlobsDeleted);
- IncCounter(COUNTER_SMALL_BLOB_DELETE_BYTES, counters.SmallBlobsBytesDeleted);
-}
-
+void TColumnShard::UpdateBlobMangerCounters() {
+ const auto counters = BlobManager->GetCountersUpdate();
+ IncCounter(COUNTER_BLOB_MANAGER_GC_REQUESTS, counters.GcRequestsSent);
+ IncCounter(COUNTER_BLOB_MANAGER_KEEP_BLOBS, counters.BlobKeepEntries);
+ IncCounter(COUNTER_BLOB_MANAGER_DONT_KEEP_BLOBS, counters.BlobDontKeepEntries);
+ IncCounter(COUNTER_BLOB_MANAGER_SKIPPED_BLOBS, counters.BlobSkippedEntries);
+ IncCounter(COUNTER_SMALL_BLOB_WRITE_COUNT, counters.SmallBlobsWritten);
+ IncCounter(COUNTER_SMALL_BLOB_WRITE_BYTES, counters.SmallBlobsBytesWritten);
+ IncCounter(COUNTER_SMALL_BLOB_DELETE_COUNT, counters.SmallBlobsDeleted);
+ IncCounter(COUNTER_SMALL_BLOB_DELETE_BYTES, counters.SmallBlobsBytesDeleted);
+}
+
void TColumnShard::UpdateInsertTableCounters() {
NOlap::TInsertTable::TCounters prepared, committed;
InsertTable->GetCounters(prepared, committed);
@@ -281,7 +281,7 @@ void TColumnShard::UpdateIndexCounters() {
return;
}
- auto& stats = PrimaryIndex->GetTotalStats();
+ auto& stats = PrimaryIndex->GetTotalStats();
SetCounter(COUNTER_INDEX_TABLES, stats.Tables);
SetCounter(COUNTER_INDEX_GRANULES, stats.Granules);
SetCounter(COUNTER_INDEX_EMPTY_GRANULES, stats.EmptyGranules);
diff --git a/ydb/core/tx/columnshard/columnshard.h b/ydb/core/tx/columnshard/columnshard.h
index 817b4ba8b09..62486e3c33e 100644
--- a/ydb/core/tx/columnshard/columnshard.h
+++ b/ydb/core/tx/columnshard/columnshard.h
@@ -1,6 +1,6 @@
#pragma once
#include "defs.h"
-#include "blob_manager.h"
+#include "blob_manager.h"
#include <ydb/core/tx/tx.h>
#include <ydb/core/tx/message_seqno.h>
@@ -8,9 +8,9 @@
#include <ydb/core/tx/long_tx_service/public/types.h>
-// TODO: temporarily reuse datashard TEvScan (KIKIMR-11069)
+// TODO: temporarily reuse datashard TEvScan (KIKIMR-11069)
#include <ydb/core/tx/datashard/datashard.h>
-
+
namespace NKikimr {
struct TEvColumnShard {
@@ -20,8 +20,8 @@ struct TEvColumnShard {
EvProposeTransactionResult,
EvNotifyTxCompletion,
EvNotifyTxCompletionResult,
- EvReadBlobRanges,
- EvReadBlobRangesResult,
+ EvReadBlobRanges,
+ EvReadBlobRangesResult,
EvWrite = EvProposeTransaction + 256,
EvRead,
@@ -127,22 +127,22 @@ struct TEvColumnShard {
}
};
- // Read small blobs from the tablet
- struct TEvReadBlobRanges : public TEventPB<TEvReadBlobRanges,
- NKikimrTxColumnShard::TEvReadBlobRanges,
- TEvColumnShard::EvReadBlobRanges>
- {
- };
-
- struct TEvReadBlobRangesResult : public TEventPB<TEvReadBlobRangesResult,
- NKikimrTxColumnShard::TEvReadBlobRangesResult,
- TEvColumnShard::EvReadBlobRangesResult>
- {
- explicit TEvReadBlobRangesResult(ui64 tabletId = 0) {
- Record.SetTabletId(tabletId);
- }
- };
-
+ // Read small blobs from the tablet
+ struct TEvReadBlobRanges : public TEventPB<TEvReadBlobRanges,
+ NKikimrTxColumnShard::TEvReadBlobRanges,
+ TEvColumnShard::EvReadBlobRanges>
+ {
+ };
+
+ struct TEvReadBlobRangesResult : public TEventPB<TEvReadBlobRangesResult,
+ NKikimrTxColumnShard::TEvReadBlobRangesResult,
+ TEvColumnShard::EvReadBlobRangesResult>
+ {
+ explicit TEvReadBlobRangesResult(ui64 tabletId = 0) {
+ Record.SetTabletId(tabletId);
+ }
+ };
+
struct TEvWrite : public TEventPB<TEvWrite, NKikimrTxColumnShard::TEvWrite, TEvColumnShard::EvWrite> {
TEvWrite() = default;
@@ -176,12 +176,12 @@ struct TEvColumnShard {
}
NKikimrProto::EReplyStatus PutStatus = NKikimrProto::UNKNOWN;
- NColumnShard::TUnifiedBlobId BlobId;
- NColumnShard::TBlobBatch BlobBatch;
+ NColumnShard::TUnifiedBlobId BlobId;
+ NColumnShard::TBlobBatch BlobBatch;
NColumnShard::TUsage ResourceUsage;
TVector<ui32> YellowMoveChannels;
TVector<ui32> YellowStopChannels;
- ui64 MaxSmallBlobSize;
+ ui64 MaxSmallBlobSize;
};
struct TEvWriteResult : public TEventPB<TEvWriteResult, NKikimrTxColumnShard::TEvWriteResult,
@@ -234,7 +234,7 @@ struct TEvColumnShard {
Record.CopyFrom(ev.Record);
}
};
-
+
using TEvScan = TEvDataShard::TEvKqpScan;
};
diff --git a/ydb/core/tx/columnshard/columnshard__index_scan.h b/ydb/core/tx/columnshard/columnshard__index_scan.h
index 0f062ab3c0f..44e9b77cc01 100644
--- a/ydb/core/tx/columnshard/columnshard__index_scan.h
+++ b/ydb/core/tx/columnshard/columnshard__index_scan.h
@@ -1,152 +1,152 @@
-#pragma once
-
-#include "columnshard__scan.h"
-#include "columnshard_common.h"
+#pragma once
+
+#include "columnshard__scan.h"
+#include "columnshard_common.h"
#include <ydb/core/tx/columnshard/engines/indexed_read_data.h>
-
+
namespace NKikimr::NColumnShard {
-
-class TIndexColumnResolver : public IColumnResolver {
- const NOlap::TIndexInfo& IndexInfo;
+
+class TIndexColumnResolver : public IColumnResolver {
+ const NOlap::TIndexInfo& IndexInfo;
public:
- explicit TIndexColumnResolver(const NOlap::TIndexInfo& indexInfo)
- : IndexInfo(indexInfo)
- {}
+ explicit TIndexColumnResolver(const NOlap::TIndexInfo& indexInfo)
+ : IndexInfo(indexInfo)
+ {}
- TString GetColumnName(ui32 id, bool required) const override {
- return IndexInfo.GetColumnName(id, required);
+ TString GetColumnName(ui32 id, bool required) const override {
+ return IndexInfo.GetColumnName(id, required);
}
-};
+};
-using NOlap::TUnifiedBlobId;
-using NOlap::TBlobRange;
+using NOlap::TUnifiedBlobId;
+using NOlap::TBlobRange;
class TColumnShardScanIterator : public TScanIteratorBase {
- NOlap::TReadMetadata::TConstPtr ReadMetadata;
- NOlap::TIndexedReadData IndexedData;
- THashMap<TBlobRange, ui64> IndexedBlobs; // blobId -> granule
- THashSet<TBlobRange> WaitIndexed;
- THashMap<ui64, THashSet<TBlobRange>> GranuleBlobs; // granule -> blobs
- THashMap<TUnifiedBlobId, ui32> WaitCommitted;
- TVector<TBlobRange> BlobsToRead;
- ui64 NextBlobIdxToRead = 0;
- TDeque<NOlap::TPartialReadResult> ReadyResults;
- bool IsReadFinished = false;
- ui64 ItemsRead = 0;
- const i64 MaxRowsInBatch = 5000;
-
-public:
- TColumnShardScanIterator(NOlap::TReadMetadata::TConstPtr readMetadata)
- : ReadMetadata(readMetadata)
- , IndexedData(ReadMetadata)
- {
- ui32 batchNo = 0;
- for (size_t i = 0; i < ReadMetadata->CommittedBlobs.size(); ++i, ++batchNo) {
- const TUnifiedBlobId& blobId = ReadMetadata->CommittedBlobs[i];
- WaitCommitted.emplace(blobId, batchNo);
- }
+ NOlap::TReadMetadata::TConstPtr ReadMetadata;
+ NOlap::TIndexedReadData IndexedData;
+ THashMap<TBlobRange, ui64> IndexedBlobs; // blobId -> granule
+ THashSet<TBlobRange> WaitIndexed;
+ THashMap<ui64, THashSet<TBlobRange>> GranuleBlobs; // granule -> blobs
+ THashMap<TUnifiedBlobId, ui32> WaitCommitted;
+ TVector<TBlobRange> BlobsToRead;
+ ui64 NextBlobIdxToRead = 0;
+ TDeque<NOlap::TPartialReadResult> ReadyResults;
+ bool IsReadFinished = false;
+ ui64 ItemsRead = 0;
+ const i64 MaxRowsInBatch = 5000;
+
+public:
+ TColumnShardScanIterator(NOlap::TReadMetadata::TConstPtr readMetadata)
+ : ReadMetadata(readMetadata)
+ , IndexedData(ReadMetadata)
+ {
+ ui32 batchNo = 0;
+ for (size_t i = 0; i < ReadMetadata->CommittedBlobs.size(); ++i, ++batchNo) {
+ const TUnifiedBlobId& blobId = ReadMetadata->CommittedBlobs[i];
+ WaitCommitted.emplace(blobId, batchNo);
+ }
IndexedBlobs = IndexedData.InitRead(batchNo, true);
for (auto& [blobId, granule] : IndexedBlobs) {
WaitIndexed.insert(blobId);
GranuleBlobs[granule].insert(blobId);
}
-
- // Read all committed blobs
- for (const auto& blobId : ReadMetadata->CommittedBlobs) {
- BlobsToRead.push_back(TBlobRange(blobId, 0, blobId.BlobSize()));
- }
-
- Y_VERIFY(ReadMetadata->IsSorted());
+
+ // Read all committed blobs
+ for (const auto& blobId : ReadMetadata->CommittedBlobs) {
+ BlobsToRead.push_back(TBlobRange(blobId, 0, blobId.BlobSize()));
+ }
+
+ Y_VERIFY(ReadMetadata->IsSorted());
// Read all indexed blobs (in correct order)
- auto granulesOrder = ReadMetadata->SelectInfo->GranulesOrder(ReadMetadata->IsDescSorted());
+ auto granulesOrder = ReadMetadata->SelectInfo->GranulesOrder(ReadMetadata->IsDescSorted());
for (ui64 granule : granulesOrder) {
auto& blobs = GranuleBlobs[granule];
- BlobsToRead.insert(BlobsToRead.end(), blobs.begin(), blobs.end());
- }
-
- IsReadFinished = ReadMetadata->Empty();
- }
-
+ BlobsToRead.insert(BlobsToRead.end(), blobs.begin(), blobs.end());
+ }
+
+ IsReadFinished = ReadMetadata->Empty();
+ }
+
void AddData(const TBlobRange& blobRange, TString data) override {
- const auto& blobId = blobRange.BlobId;
- if (IndexedBlobs.count(blobRange)) {
- if (!WaitIndexed.count(blobRange)) {
- return; // ignore duplicate parts
- }
- WaitIndexed.erase(blobRange);
- IndexedData.AddIndexedColumn(blobRange, data);
- } else if (WaitCommitted.count(blobId)) {
- ui32 batchNo = WaitCommitted[blobId];
- WaitCommitted.erase(blobId);
-
- IndexedData.AddNotIndexed(batchNo, data);
- }
- }
-
+ const auto& blobId = blobRange.BlobId;
+ if (IndexedBlobs.count(blobRange)) {
+ if (!WaitIndexed.count(blobRange)) {
+ return; // ignore duplicate parts
+ }
+ WaitIndexed.erase(blobRange);
+ IndexedData.AddIndexedColumn(blobRange, data);
+ } else if (WaitCommitted.count(blobId)) {
+ ui32 batchNo = WaitCommitted[blobId];
+ WaitCommitted.erase(blobId);
+
+ IndexedData.AddNotIndexed(batchNo, data);
+ }
+ }
+
bool Finished() const override {
- return IsReadFinished && ReadyResults.empty();
- }
-
+ return IsReadFinished && ReadyResults.empty();
+ }
+
NOlap::TPartialReadResult GetBatch() override {
- FillReadyResults();
-
- if (ReadyResults.empty()) {
- return {};
- }
-
- auto result(std::move(ReadyResults.front()));
- ReadyResults.pop_front();
-
- return result;
- }
-
+ FillReadyResults();
+
+ if (ReadyResults.empty()) {
+ return {};
+ }
+
+ auto result(std::move(ReadyResults.front()));
+ ReadyResults.pop_front();
+
+ return result;
+ }
+
TBlobRange GetNextBlobToRead() override {
- if (IsReadFinished || NextBlobIdxToRead == BlobsToRead.size()) {
- return TBlobRange();
- }
- const auto& blob = BlobsToRead[NextBlobIdxToRead];
- ++NextBlobIdxToRead;
- return blob;
- }
-
+ if (IsReadFinished || NextBlobIdxToRead == BlobsToRead.size()) {
+ return TBlobRange();
+ }
+ const auto& blob = BlobsToRead[NextBlobIdxToRead];
+ ++NextBlobIdxToRead;
+ return blob;
+ }
+
size_t ReadyResultsCount() const override {
- return ReadyResults.size();
- }
-
-private:
- void FillReadyResults() {
- auto ready = IndexedData.GetReadyResults(MaxRowsInBatch);
- i64 limitLeft = ReadMetadata->Limit == 0 ? INT64_MAX : ReadMetadata->Limit - ItemsRead;
- for (size_t i = 0; i < ready.size() && limitLeft; ++i) {
- if (ready[i].ResultBatch->num_rows() == 0 && !ready[i].LastReadKey) {
- Y_VERIFY(i+1 == ready.size(), "Only last batch can be empty!");
- break;
- }
-
- ReadyResults.emplace_back(std::move(ready[i]));
- auto& batch = ReadyResults.back();
- if (batch.ResultBatch->num_rows() > limitLeft) {
- // Trim the last batch if total row count execceds the requested limit
- batch.ResultBatch = batch.ResultBatch->Slice(0, limitLeft);
- ready.clear();
- }
- limitLeft -= batch.ResultBatch->num_rows();
- ItemsRead += batch.ResultBatch->num_rows();
- }
-
- if (limitLeft == 0) {
- WaitCommitted.clear();
- WaitIndexed.clear();
- IsReadFinished = true;
- }
-
- if (WaitCommitted.empty() && WaitIndexed.empty() && NextBlobIdxToRead == BlobsToRead.size()) {
- IsReadFinished = true;
- }
- }
-};
-
+ return ReadyResults.size();
+ }
+
+private:
+ void FillReadyResults() {
+ auto ready = IndexedData.GetReadyResults(MaxRowsInBatch);
+ i64 limitLeft = ReadMetadata->Limit == 0 ? INT64_MAX : ReadMetadata->Limit - ItemsRead;
+ for (size_t i = 0; i < ready.size() && limitLeft; ++i) {
+ if (ready[i].ResultBatch->num_rows() == 0 && !ready[i].LastReadKey) {
+ Y_VERIFY(i+1 == ready.size(), "Only last batch can be empty!");
+ break;
+ }
+
+ ReadyResults.emplace_back(std::move(ready[i]));
+ auto& batch = ReadyResults.back();
+ if (batch.ResultBatch->num_rows() > limitLeft) {
+ // Trim the last batch if total row count execceds the requested limit
+ batch.ResultBatch = batch.ResultBatch->Slice(0, limitLeft);
+ ready.clear();
+ }
+ limitLeft -= batch.ResultBatch->num_rows();
+ ItemsRead += batch.ResultBatch->num_rows();
+ }
+
+ if (limitLeft == 0) {
+ WaitCommitted.clear();
+ WaitIndexed.clear();
+ IsReadFinished = true;
+ }
+
+ if (WaitCommitted.empty() && WaitIndexed.empty() && NextBlobIdxToRead == BlobsToRead.size()) {
+ IsReadFinished = true;
+ }
+ }
+};
+
}
diff --git a/ydb/core/tx/columnshard/columnshard__init.cpp b/ydb/core/tx/columnshard/columnshard__init.cpp
index 9dcc3e0d7a7..a690aec8119 100644
--- a/ydb/core/tx/columnshard/columnshard__init.cpp
+++ b/ydb/core/tx/columnshard/columnshard__init.cpp
@@ -2,7 +2,7 @@
#include "columnshard_ttl.h"
#include "columnshard_txs.h"
#include "columnshard_schema.h"
-#include "blob_manager_db.h"
+#include "blob_manager_db.h"
#include <ydb/core/tablet/tablet_exception.h>
@@ -16,7 +16,7 @@ void TTxInit::SetDefaults() {
Self->CurrentSchemeShardId = 0;
Self->LastSchemaSeqNo = { };
Self->ProcessingParams.reset();
- Self->LastWriteId = TWriteId{0};
+ Self->LastWriteId = TWriteId{0};
Self->LastPlannedStep = 0;
Self->LastPlannedTxId = 0;
Self->BasicTxInfo.clear();
@@ -34,8 +34,8 @@ void TTxInit::SetDefaults() {
bool TTxInit::ReadEverything(TTransactionContext& txc, const TActorContext& ctx)
{
// Load InsertTable
- TBlobGroupSelector dsGroupSelector(Self->Info());
- NOlap::TDbWrapper dbTable(txc.DB, &dsGroupSelector);
+ TBlobGroupSelector dsGroupSelector(Self->Info());
+ NOlap::TDbWrapper dbTable(txc.DB, &dsGroupSelector);
if (!Self->InsertTable->Load(dbTable, AppData(ctx)->TimeProvider->Now())) {
return false;
}
@@ -53,8 +53,8 @@ bool TTxInit::ReadEverything(TTransactionContext& txc, const TActorContext& ctx)
ready = ready & Schema::Precharge<Schema::TableInfo>(db, txc.DB.GetScheme());
ready = ready & Schema::Precharge<Schema::TableVersionInfo>(db, txc.DB.GetScheme());
ready = ready & Schema::Precharge<Schema::LongTxWrites>(db, txc.DB.GetScheme());
- ready = ready & Schema::Precharge<Schema::BlobsToKeep>(db, txc.DB.GetScheme());
- ready = ready & Schema::Precharge<Schema::BlobsToDelete>(db, txc.DB.GetScheme());
+ ready = ready & Schema::Precharge<Schema::BlobsToKeep>(db, txc.DB.GetScheme());
+ ready = ready & Schema::Precharge<Schema::BlobsToDelete>(db, txc.DB.GetScheme());
ready = ready && Schema::GetSpecialValue(db, Schema::EValueIds::CurrentSchemeShardId, Self->CurrentSchemeShardId);
ready = ready && Schema::GetSpecialValue(db, Schema::EValueIds::LastSchemaSeqNoGeneration, Self->LastSchemaSeqNo.Generation);
@@ -103,7 +103,7 @@ bool TTxInit::ReadEverything(TTransactionContext& txc, const TActorContext& ctx)
TColumnShard::TCommitMeta meta;
meta.MetaShard = body.GetTxInitiator();
for (auto& id : body.GetWriteIds()) {
- meta.AddWriteId(TWriteId{id});
+ meta.AddWriteId(TWriteId{id});
}
Self->CommitsInFlight.emplace(txId, std::move(meta));
@@ -280,7 +280,7 @@ bool TTxInit::ReadEverything(TTransactionContext& txc, const TActorContext& ctx)
return false;
while (!rowset.EndOfSet()) {
- const TWriteId writeId = TWriteId{rowset.GetValue<Schema::LongTxWrites::WriteId>()};
+ const TWriteId writeId = TWriteId{rowset.GetValue<Schema::LongTxWrites::WriteId>()};
NKikimrLongTxService::TLongTxId proto;
Y_VERIFY(proto.ParseFromString(rowset.GetValue<Schema::LongTxWrites::LongTxId>()));
const auto longTxId = NLongTxService::TLongTxId::FromProto(proto);
@@ -295,10 +295,10 @@ bool TTxInit::ReadEverything(TTransactionContext& txc, const TActorContext& ctx)
for (const auto& pr : Self->CommitsInFlight) {
ui64 txId = pr.first;
if (pr.second.MetaShard == 0) {
- for (TWriteId writeId : pr.second.WriteIds) {
+ for (TWriteId writeId : pr.second.WriteIds) {
Y_VERIFY(Self->LongTxWrites.contains(writeId),
- "TTxInit at %" PRIu64 " : Commit %" PRIu64 " references local write %" PRIu64 " that doesn't exist",
- Self->TabletID(), txId, writeId);
+ "TTxInit at %" PRIu64 " : Commit %" PRIu64 " references local write %" PRIu64 " that doesn't exist",
+ Self->TabletID(), txId, writeId);
Self->AddLongTxWrite(writeId, txId);
}
}
@@ -306,21 +306,21 @@ bool TTxInit::ReadEverything(TTransactionContext& txc, const TActorContext& ctx)
// Load primary index
if (Self->PrimaryIndex) {
- TBlobGroupSelector dsGroupSelector(Self->Info());
- NOlap::TDbWrapper idxDB(txc.DB, &dsGroupSelector);
+ TBlobGroupSelector dsGroupSelector(Self->Info());
+ NOlap::TDbWrapper idxDB(txc.DB, &dsGroupSelector);
if (!Self->PrimaryIndex->Load(idxDB, Self->PathsToDrop)) {
return false;
}
}
- // Initialize the BlobManager
- {
- TBlobManagerDb blobManagerDb(txc.DB);
- if (!Self->BlobManager->LoadState(blobManagerDb)) {
- return false;
- }
- }
-
+ // Initialize the BlobManager
+ {
+ TBlobManagerDb blobManagerDb(txc.DB);
+ if (!Self->BlobManager->LoadState(blobManagerDb)) {
+ return false;
+ }
+ }
+
Self->UpdateInsertTableCounters();
Self->UpdateIndexCounters();
Self->UpdateResourceMetrics({});
@@ -377,35 +377,35 @@ void TTxUpdateSchema::Complete(const TActorContext& ctx) {
bool TTxInitSchema::Execute(TTransactionContext& txc, const TActorContext&) {
LOG_S_DEBUG("TxInitSchema.Execute at tablet " << Self->TabletID());
- bool isCreate = txc.DB.GetScheme().IsEmpty();
+ bool isCreate = txc.DB.GetScheme().IsEmpty();
NIceDb::TNiceDb(txc.DB).Materialize<Schema>();
-
- if (isCreate) {
- txc.DB.Alter().SetExecutorAllowLogBatching(gAllowLogBatchingDefaultValue);
- txc.DB.Alter().SetExecutorLogFlushPeriod(TDuration::MicroSeconds(500));
- txc.DB.Alter().SetExecutorCacheSize(500000);
- }
-
- // Enable compression for the SmallBlobs table
- const auto* smallBlobsDefaultColumnFamily = txc.DB.GetScheme().DefaultFamilyFor(Schema::SmallBlobs::TableId);
- if (!smallBlobsDefaultColumnFamily ||
- smallBlobsDefaultColumnFamily->Codec != NTable::TAlter::ECodec::LZ4)
- {
- txc.DB.Alter().SetFamily(Schema::SmallBlobs::TableId, 0,
- NTable::TAlter::ECache::None, NTable::TAlter::ECodec::LZ4);
- }
-
- // SmallBlobs table has compaction policy suitable for a big table
- const auto* smallBlobsTable = txc.DB.GetScheme().GetTableInfo(Schema::SmallBlobs::TableId);
- NLocalDb::TCompactionPolicyPtr bigTableCompactionPolicy = NLocalDb::CreateDefaultUserTablePolicy();
- bigTableCompactionPolicy->MinDataPageSize = 32 * 1024;
- if (!smallBlobsTable ||
- !smallBlobsTable->CompactionPolicy ||
- smallBlobsTable->CompactionPolicy->Generations.size() != bigTableCompactionPolicy->Generations.size())
- {
- txc.DB.Alter().SetCompactionPolicy(Schema::SmallBlobs::TableId, *bigTableCompactionPolicy);
- }
-
+
+ if (isCreate) {
+ txc.DB.Alter().SetExecutorAllowLogBatching(gAllowLogBatchingDefaultValue);
+ txc.DB.Alter().SetExecutorLogFlushPeriod(TDuration::MicroSeconds(500));
+ txc.DB.Alter().SetExecutorCacheSize(500000);
+ }
+
+ // Enable compression for the SmallBlobs table
+ const auto* smallBlobsDefaultColumnFamily = txc.DB.GetScheme().DefaultFamilyFor(Schema::SmallBlobs::TableId);
+ if (!smallBlobsDefaultColumnFamily ||
+ smallBlobsDefaultColumnFamily->Codec != NTable::TAlter::ECodec::LZ4)
+ {
+ txc.DB.Alter().SetFamily(Schema::SmallBlobs::TableId, 0,
+ NTable::TAlter::ECache::None, NTable::TAlter::ECodec::LZ4);
+ }
+
+ // SmallBlobs table has compaction policy suitable for a big table
+ const auto* smallBlobsTable = txc.DB.GetScheme().GetTableInfo(Schema::SmallBlobs::TableId);
+ NLocalDb::TCompactionPolicyPtr bigTableCompactionPolicy = NLocalDb::CreateDefaultUserTablePolicy();
+ bigTableCompactionPolicy->MinDataPageSize = 32 * 1024;
+ if (!smallBlobsTable ||
+ !smallBlobsTable->CompactionPolicy ||
+ smallBlobsTable->CompactionPolicy->Generations.size() != bigTableCompactionPolicy->Generations.size())
+ {
+ txc.DB.Alter().SetCompactionPolicy(Schema::SmallBlobs::TableId, *bigTableCompactionPolicy);
+ }
+
return true;
}
diff --git a/ydb/core/tx/columnshard/columnshard__progress_tx.cpp b/ydb/core/tx/columnshard/columnshard__progress_tx.cpp
index 989659b1cd0..9caf23e866d 100644
--- a/ydb/core/tx/columnshard/columnshard__progress_tx.cpp
+++ b/ydb/core/tx/columnshard/columnshard__progress_tx.cpp
@@ -41,8 +41,8 @@ public:
break;
}
ui64 txId = it->TxId;
- LOG_S_DEBUG("Removing outdated txId " << txId << " max step " << it->MaxStep << " outdated step "
- << outdatedStep << " at tablet " << Self->TabletID());
+ LOG_S_DEBUG("Removing outdated txId " << txId << " max step " << it->MaxStep << " outdated step "
+ << outdatedStep << " at tablet " << Self->TabletID());
Self->DeadlineQueue.erase(it);
Self->RemoveTx(txc.DB, txId);
++removedCount;
@@ -81,15 +81,15 @@ public:
case NKikimrTxColumnShard::TX_KIND_COMMIT: {
const auto& meta = Self->CommitsInFlight.at(txId);
- TBlobGroupSelector dsGroupSelector(Self->Info());
- NOlap::TDbWrapper dbTable(txc.DB, &dsGroupSelector);
+ TBlobGroupSelector dsGroupSelector(Self->Info());
+ NOlap::TDbWrapper dbTable(txc.DB, &dsGroupSelector);
auto counters = Self->InsertTable->Commit(dbTable, step, txId, meta.MetaShard, meta.WriteIds);
Self->IncCounter(COUNTER_BLOBS_COMMITTED, counters.Rows);
Self->IncCounter(COUNTER_BYTES_COMMITTED, counters.Bytes);
Self->IncCounter(COUNTER_RAW_BYTES_COMMITTED, counters.RawBytes);
if (meta.MetaShard == 0) {
- for (TWriteId writeId : meta.WriteIds) {
+ for (TWriteId writeId : meta.WriteIds) {
Self->RemoveLongTxWrite(db, writeId);
}
}
@@ -129,11 +129,11 @@ public:
for (auto& rec : TxEvents) {
ctx.Send(rec.Target, rec.Event.Release(), 0, rec.Cookie);
}
-
- Self->UpdateBlobMangerCounters();
- if (Self->BlobManager->TryMoveGCBarrier()) {
- Self->Execute(Self->CreateTxRunGc(), ctx);
- }
+
+ Self->UpdateBlobMangerCounters();
+ if (Self->BlobManager->TryMoveGCBarrier()) {
+ Self->Execute(Self->CreateTxRunGc(), ctx);
+ }
if (StartBackgroundActivities) {
Self->EnqueueBackgroundActivities(false, true);
diff --git a/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp b/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp
index 3d72a1b547e..c8d8d450ff5 100644
--- a/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp
+++ b/ydb/core/tx/columnshard/columnshard__propose_transaction.cpp
@@ -113,14 +113,14 @@ bool TTxProposeTransaction::Execute(TTransactionContext& txc, const TActorContex
// Check that all write ids actually exist
bool failed = false;
for (ui64 writeId : body.GetWriteIds()) {
- if (!Self->LongTxWrites.contains(TWriteId{writeId})) {
+ if (!Self->LongTxWrites.contains(TWriteId{writeId})) {
statusMessage = TStringBuilder()
<< "Commit TxId# " << txId << " references WriteId# " << writeId
<< " that no longer exists";
failed = true;
break;
}
- auto& lw = Self->LongTxWrites[TWriteId{writeId}];
+ auto& lw = Self->LongTxWrites[TWriteId{writeId}];
if (lw.PreparedTxId != 0) {
statusMessage = TStringBuilder()
<< "Commit TxId# " << txId << " references WriteId# " << writeId
@@ -132,13 +132,13 @@ bool TTxProposeTransaction::Execute(TTransactionContext& txc, const TActorContex
}
}
- minStep = Self->GetAllowedStep();
- maxStep = minStep + Self->MaxCommitTxDelay.MilliSeconds();
+ minStep = Self->GetAllowedStep();
+ maxStep = minStep + Self->MaxCommitTxDelay.MilliSeconds();
TColumnShard::TCommitMeta meta;
meta.MetaShard = body.GetTxInitiator();
- for (ui64 wId : body.GetWriteIds()) {
- TWriteId writeId{wId};
+ for (ui64 wId : body.GetWriteIds()) {
+ TWriteId writeId{wId};
meta.AddWriteId(writeId);
if (meta.MetaShard == 0) {
Self->AddLongTxWrite(writeId, txId);
diff --git a/ydb/core/tx/columnshard/columnshard__read.cpp b/ydb/core/tx/columnshard/columnshard__read.cpp
index 72417615b26..6dcfc3d703e 100644
--- a/ydb/core/tx/columnshard/columnshard__read.cpp
+++ b/ydb/core/tx/columnshard/columnshard__read.cpp
@@ -1,7 +1,7 @@
#include "columnshard_impl.h"
#include "columnshard_txs.h"
#include "columnshard_schema.h"
-#include "columnshard__index_scan.h"
+#include "columnshard__index_scan.h"
#include <ydb/core/tx/columnshard/engines/column_engine.h>
#include <ydb/core/tx/columnshard/engines/indexed_read_data.h>
@@ -11,37 +11,37 @@ namespace {
template <typename T, typename U>
TVector<T> ProtoToVector(const U& cont) {
- return TVector<T>(cont.begin(), cont.end());
+ return TVector<T>(cont.begin(), cont.end());
}
}
-IActor* CreateReadActor(ui64 tabletId,
+IActor* CreateReadActor(ui64 tabletId,
const TActorId& dstActor,
std::unique_ptr<TEvColumnShard::TEvReadResult>&& event,
- NOlap::TReadMetadata::TConstPtr readMetadata,
- const TInstant& deadline,
- const TActorId& columnShardActorId,
- ui64 requestCookie);
+ NOlap::TReadMetadata::TConstPtr readMetadata,
+ const TInstant& deadline,
+ const TActorId& columnShardActorId,
+ ui64 requestCookie);
using namespace NTabletFlatExecutor;
-NOlap::TReadMetadata::TPtr
+NOlap::TReadMetadata::TPtr
TTxReadBase::PrepareReadMetadata(const TActorContext& ctx, const TReadDescription& read,
const std::unique_ptr<NOlap::TInsertTable>& insertTable,
- const std::unique_ptr<NOlap::IColumnEngine>& index,
- TString& error) const {
- Y_UNUSED(ctx);
-
+ const std::unique_ptr<NOlap::IColumnEngine>& index,
+ TString& error) const {
+ Y_UNUSED(ctx);
+
if (!insertTable || !index) {
return {};
}
-
- if (read.PlanStep < Self->GetMinReadStep()) {
- error = Sprintf("Snapshot %" PRIu64 ":%" PRIu64 " too old", read.PlanStep, read.TxId);
- return {};
- }
-
+
+ if (read.PlanStep < Self->GetMinReadStep()) {
+ error = Sprintf("Snapshot %" PRIu64 ":%" PRIu64 " too old", read.PlanStep, read.TxId);
+ return {};
+ }
+
const NOlap::TIndexInfo& indexInfo = index->GetIndexInfo();
auto spOut = std::make_shared<NOlap::TReadMetadata>(indexInfo);
auto& out = *spOut;
@@ -57,8 +57,8 @@ TTxReadBase::PrepareReadMetadata(const TActorContext& ctx, const TReadDescriptio
} else if (read.ColumnNames.size()) {
out.ResultSchema = indexInfo.ArrowSchema(read.ColumnNames);
} else {
- error = "Empty column list requested";
- return {};
+ error = "Empty column list requested";
+ return {};
}
if (!out.BlobSchema || !out.ResultSchema) {
@@ -67,7 +67,7 @@ TTxReadBase::PrepareReadMetadata(const TActorContext& ctx, const TReadDescriptio
// insert table
- out.CommittedBlobs = insertTable->Read(read.PathId, read.PlanStep, read.TxId);
+ out.CommittedBlobs = insertTable->Read(read.PathId, read.PlanStep, read.TxId);
// index
@@ -80,7 +80,7 @@ TTxReadBase::PrepareReadMetadata(const TActorContext& ctx, const TReadDescriptio
if (!read.ColumnIds.empty()) {
columns = indexInfo.GetColumnNames(read.ColumnIds);
}
- Y_VERIFY(!columns.empty(), "Empty column list");
+ Y_VERIFY(!columns.empty(), "Empty column list");
{ // Add more columns: snapshot, replace, predicate
// Key columns (replace, sort)
@@ -109,8 +109,8 @@ TTxReadBase::PrepareReadMetadata(const TActorContext& ctx, const TReadDescriptio
for (auto& reqCol : requiredColumns) {
columns.push_back(reqCol);
}
- }
-
+ }
+
out.LoadSchema = indexInfo.AddColumns(out.ResultSchema, columns);
if (!out.LoadSchema) {
return {};
@@ -149,15 +149,15 @@ TTxReadBase::PrepareReadMetadata(const TActorContext& ctx, const TReadDescriptio
out.GreaterPredicate, out.LessPredicate);
}
return spOut;
-}
-
+}
+
bool TTxReadBase::ParseProgram(const TActorContext& ctx, NKikimrSchemeOp::EOlapProgramType programType,
TString serializedProgram, TReadDescription& read, const IColumnResolver& columnResolver)
{
if (serializedProgram.empty()) {
return true;
}
-
+
NKikimrSSA::TProgram program;
NKikimrSSA::TOlapProgram olapProgram;
@@ -211,18 +211,18 @@ bool TTxRead::Execute(TTransactionContext& txc, const TActorContext& ctx) {
auto& record = Proto(Ev->Get());
ui64 metaShard = record.GetTxInitiator();
-
- TReadDescription read;
- read.PlanStep = record.GetPlanStep();
- read.TxId = record.GetTxId();
- read.PathId = record.GetTableId();
+
+ TReadDescription read;
+ read.PlanStep = record.GetPlanStep();
+ read.TxId = record.GetTxId();
+ read.PathId = record.GetTableId();
read.ReadNothing = Self->PathsToDrop.count(read.PathId);
- read.ColumnIds = ProtoToVector<ui32>(record.GetColumnIds());
- read.ColumnNames = ProtoToVector<TString>(record.GetColumnNames());
- if (read.ColumnIds.empty() && read.ColumnNames.empty()) {
- auto allColumnNames = indexInfo.ArrowSchema()->field_names();
- read.ColumnNames.assign(allColumnNames.begin(), allColumnNames.end());
- }
+ read.ColumnIds = ProtoToVector<ui32>(record.GetColumnIds());
+ read.ColumnNames = ProtoToVector<TString>(record.GetColumnNames());
+ if (read.ColumnIds.empty() && read.ColumnNames.empty()) {
+ auto allColumnNames = indexInfo.ArrowSchema()->field_names();
+ read.ColumnNames.assign(allColumnNames.begin(), allColumnNames.end());
+ }
if (record.HasGreaterPredicate()) {
auto& proto = record.GetGreaterPredicate();
@@ -236,7 +236,7 @@ bool TTxRead::Execute(TTransactionContext& txc, const TActorContext& ctx) {
read.LessPredicate = std::make_shared<NOlap::TPredicate>(
NArrow::EOperation::Less, proto.GetRow(), schema, proto.GetInclusive());
}
-
+
bool parseResult = ParseProgram(ctx, record.GetOlapProgramType(), record.GetOlapProgram(), read,
TIndexColumnResolver(Self->PrimaryIndex->GetIndexInfo()));
@@ -251,7 +251,7 @@ bool TTxRead::Execute(TTransactionContext& txc, const TActorContext& ctx) {
}
Result = std::make_unique<TEvColumnShard::TEvReadResult>(
- Self->TabletID(), metaShard, read.PlanStep, read.TxId, read.PathId, 0, true, status);
+ Self->TabletID(), metaShard, read.PlanStep, read.TxId, read.PathId, 0, true, status);
if (status == NKikimrTxColumnShard::EResultStatus::SUCCESS) {
Self->IncCounter(COUNTER_READ_SUCCESS);
@@ -269,7 +269,7 @@ void TTxRead::Complete(const TActorContext& ctx) {
bool success = (Proto(Result.get()).GetStatus() == NKikimrTxColumnShard::EResultStatus::SUCCESS);
if (!success) {
- LOG_S_DEBUG("TTxRead.Complete. Error " << ErrorDescription << " while reading at tablet " << Self->TabletID());
+ LOG_S_DEBUG("TTxRead.Complete. Error " << ErrorDescription << " while reading at tablet " << Self->TabletID());
ctx.Send(Ev->Get()->GetSource(), Result.release());
} else if (noData) {
LOG_S_DEBUG("TTxRead.Complete. Empty result at tablet " << Self->TabletID());
@@ -277,19 +277,19 @@ void TTxRead::Complete(const TActorContext& ctx) {
} else {
LOG_S_DEBUG("TTxRead.Complete at tablet " << Self->TabletID() << " Metadata: " << *ReadMetadata);
- const ui64 requestCookie = Self->InFlightReadsTracker.AddInFlightRequest(
- std::static_pointer_cast<const NOlap::TReadMetadataBase>(ReadMetadata), *Self->BlobManager);
- auto statsDelta = Self->InFlightReadsTracker.GetSelectStatsDelta();
+ const ui64 requestCookie = Self->InFlightReadsTracker.AddInFlightRequest(
+ std::static_pointer_cast<const NOlap::TReadMetadataBase>(ReadMetadata), *Self->BlobManager);
+ auto statsDelta = Self->InFlightReadsTracker.GetSelectStatsDelta();
- Self->IncCounter(COUNTER_READ_INDEX_GRANULES, statsDelta.Granules);
- Self->IncCounter(COUNTER_READ_INDEX_PORTIONS, statsDelta.Portions);
- Self->IncCounter(COUNTER_READ_INDEX_BLOBS, statsDelta.Blobs);
- Self->IncCounter(COUNTER_READ_INDEX_ROWS, statsDelta.Rows);
- Self->IncCounter(COUNTER_READ_INDEX_BYTES, statsDelta.Bytes);
+ Self->IncCounter(COUNTER_READ_INDEX_GRANULES, statsDelta.Granules);
+ Self->IncCounter(COUNTER_READ_INDEX_PORTIONS, statsDelta.Portions);
+ Self->IncCounter(COUNTER_READ_INDEX_BLOBS, statsDelta.Blobs);
+ Self->IncCounter(COUNTER_READ_INDEX_ROWS, statsDelta.Rows);
+ Self->IncCounter(COUNTER_READ_INDEX_BYTES, statsDelta.Bytes);
TInstant deadline = TInstant::Max(); // TODO
- ctx.Register(CreateReadActor(Self->TabletID(), Ev->Get()->GetSource(),
- std::move(Result), ReadMetadata, deadline, Self->SelfId(), requestCookie));
+ ctx.Register(CreateReadActor(Self->TabletID(), Ev->Get()->GetSource(),
+ std::move(Result), ReadMetadata, deadline, Self->SelfId(), requestCookie));
}
}
diff --git a/ydb/core/tx/columnshard/columnshard__read_blob_ranges.cpp b/ydb/core/tx/columnshard/columnshard__read_blob_ranges.cpp
index 3fe26c3d172..59383f94ed8 100644
--- a/ydb/core/tx/columnshard/columnshard__read_blob_ranges.cpp
+++ b/ydb/core/tx/columnshard/columnshard__read_blob_ranges.cpp
@@ -1,104 +1,104 @@
-#include "columnshard_impl.h"
-#include "columnshard_txs.h"
-#include "columnshard_schema.h"
-#include "blob_manager_db.h"
-
+#include "columnshard_impl.h"
+#include "columnshard_txs.h"
+#include "columnshard_schema.h"
+#include "blob_manager_db.h"
+
namespace NKikimr::NColumnShard {
-
-using namespace NTabletFlatExecutor;
-
-// Returns false in case of page fault
-bool TryReadValue(NIceDb::TNiceDb& db, const TString& key, TString& value, ui32& readStatus) {
- auto rowset = db.Table<Schema::SmallBlobs>().Key(key).Select<Schema::SmallBlobs::Data>();
- if (!rowset.IsReady()) {
- return false;
- }
-
- if (rowset.IsValid()) {
- readStatus = NKikimrProto::EReplyStatus::OK;
- value = rowset.GetValue<Schema::SmallBlobs::Data>();
- } else {
- readStatus = NKikimrProto::EReplyStatus::NODATA;
- value.clear();
- }
- return true;
-}
-
-bool TTxReadBlobRanges::Execute(TTransactionContext& txc, const TActorContext& ctx) {
- Y_VERIFY(Ev);
- auto& record = Ev->Get()->Record;
- LOG_S_DEBUG("TTxReadBlobRanges.Execute at tablet " << Self->TabletID()<< " : " << record);
-
- Result = std::make_unique<TEvColumnShard::TEvReadBlobRangesResult>(Self->TabletID());
-
- NIceDb::TNiceDb db(txc.DB);
-
- ui64 successCount = 0;
- ui64 errorCount = 0;
- ui64 byteCount = 0;
- for (const auto& range : record.GetBlobRanges()) {
- auto blobId = range.GetBlobId();
-
- TString blob;
- ui32 status = NKikimrProto::EReplyStatus::NODATA;
- if (!TryReadValue(db, blobId, blob, status)) {
- return false; // Page fault
- }
-
- if (status == NKikimrProto::EReplyStatus::NODATA) {
- // If the value wasn't found by string key then try to parse the key as small blob id
- // and try lo lookup by this id serialized in the old format and in the new format
- TString error;
- NOlap::TUnifiedBlobId smallBlobId = NOlap::TUnifiedBlobId::ParseFromString(blobId, nullptr, error);
-
- if (smallBlobId.IsValid()) {
- if (!TryReadValue(db, smallBlobId.ToStringNew(), blob, status)) {
- return false; // Page fault
- }
-
- if (status == NKikimrProto::EReplyStatus::NODATA &&
- !TryReadValue(db, smallBlobId.ToStringLegacy(), blob, status))
- {
- return false; // Page fault
- }
- }
- }
-
- auto* res = Result->Record.AddResults();
- res->MutableBlobRange()->CopyFrom(range);
- if (status == NKikimrProto::EReplyStatus::OK) {
- if (range.GetOffset() + range.GetSize() <= blob.size()) {
- res->SetData(blob.substr(range.GetOffset(), range.GetSize()));
- byteCount += range.GetSize();
- } else {
- LOG_S_NOTICE("TTxReadBlobRanges.Execute at tablet " << Self->TabletID()
- << " the requested range " << range << " is outside blob data, blob size << " << blob.size());
- status = NKikimrProto::EReplyStatus::ERROR;
- }
- }
- res->SetStatus(status);
- if (status == NKikimrProto::EReplyStatus::OK) {
- ++successCount;
- } else {
- ++errorCount;
- }
- }
-
- // Sending result right away without waiting for Complete()
- // It is ok because the blob ids that were requested can only be known
- // to the caller if they have been already committed.
- ctx.Send(Ev->Sender, Result.release(), 0, Ev->Cookie);
-
- Self->IncCounter(COUNTER_SMALL_BLOB_READ_SUCCESS, successCount);
- Self->IncCounter(COUNTER_SMALL_BLOB_READ_ERROR, errorCount);
- Self->IncCounter(COUNTER_SMALL_BLOB_READ_BYTES, byteCount);
-
- return true;
-}
-
-void TTxReadBlobRanges::Complete(const TActorContext& ctx) {
- Y_UNUSED(ctx);
- LOG_S_DEBUG("TTxReadBlobRanges.Complete at tablet " << Self->TabletID());
-}
-
+
+using namespace NTabletFlatExecutor;
+
+// Returns false in case of page fault
+bool TryReadValue(NIceDb::TNiceDb& db, const TString& key, TString& value, ui32& readStatus) {
+ auto rowset = db.Table<Schema::SmallBlobs>().Key(key).Select<Schema::SmallBlobs::Data>();
+ if (!rowset.IsReady()) {
+ return false;
+ }
+
+ if (rowset.IsValid()) {
+ readStatus = NKikimrProto::EReplyStatus::OK;
+ value = rowset.GetValue<Schema::SmallBlobs::Data>();
+ } else {
+ readStatus = NKikimrProto::EReplyStatus::NODATA;
+ value.clear();
+ }
+ return true;
+}
+
+bool TTxReadBlobRanges::Execute(TTransactionContext& txc, const TActorContext& ctx) {
+ Y_VERIFY(Ev);
+ auto& record = Ev->Get()->Record;
+ LOG_S_DEBUG("TTxReadBlobRanges.Execute at tablet " << Self->TabletID()<< " : " << record);
+
+ Result = std::make_unique<TEvColumnShard::TEvReadBlobRangesResult>(Self->TabletID());
+
+ NIceDb::TNiceDb db(txc.DB);
+
+ ui64 successCount = 0;
+ ui64 errorCount = 0;
+ ui64 byteCount = 0;
+ for (const auto& range : record.GetBlobRanges()) {
+ auto blobId = range.GetBlobId();
+
+ TString blob;
+ ui32 status = NKikimrProto::EReplyStatus::NODATA;
+ if (!TryReadValue(db, blobId, blob, status)) {
+ return false; // Page fault
+ }
+
+ if (status == NKikimrProto::EReplyStatus::NODATA) {
+ // If the value wasn't found by string key then try to parse the key as small blob id
+ // and try lo lookup by this id serialized in the old format and in the new format
+ TString error;
+ NOlap::TUnifiedBlobId smallBlobId = NOlap::TUnifiedBlobId::ParseFromString(blobId, nullptr, error);
+
+ if (smallBlobId.IsValid()) {
+ if (!TryReadValue(db, smallBlobId.ToStringNew(), blob, status)) {
+ return false; // Page fault
+ }
+
+ if (status == NKikimrProto::EReplyStatus::NODATA &&
+ !TryReadValue(db, smallBlobId.ToStringLegacy(), blob, status))
+ {
+ return false; // Page fault
+ }
+ }
+ }
+
+ auto* res = Result->Record.AddResults();
+ res->MutableBlobRange()->CopyFrom(range);
+ if (status == NKikimrProto::EReplyStatus::OK) {
+ if (range.GetOffset() + range.GetSize() <= blob.size()) {
+ res->SetData(blob.substr(range.GetOffset(), range.GetSize()));
+ byteCount += range.GetSize();
+ } else {
+ LOG_S_NOTICE("TTxReadBlobRanges.Execute at tablet " << Self->TabletID()
+ << " the requested range " << range << " is outside blob data, blob size << " << blob.size());
+ status = NKikimrProto::EReplyStatus::ERROR;
+ }
+ }
+ res->SetStatus(status);
+ if (status == NKikimrProto::EReplyStatus::OK) {
+ ++successCount;
+ } else {
+ ++errorCount;
+ }
+ }
+
+ // Sending result right away without waiting for Complete()
+ // It is ok because the blob ids that were requested can only be known
+ // to the caller if they have been already committed.
+ ctx.Send(Ev->Sender, Result.release(), 0, Ev->Cookie);
+
+ Self->IncCounter(COUNTER_SMALL_BLOB_READ_SUCCESS, successCount);
+ Self->IncCounter(COUNTER_SMALL_BLOB_READ_ERROR, errorCount);
+ Self->IncCounter(COUNTER_SMALL_BLOB_READ_BYTES, byteCount);
+
+ return true;
+}
+
+void TTxReadBlobRanges::Complete(const TActorContext& ctx) {
+ Y_UNUSED(ctx);
+ LOG_S_DEBUG("TTxReadBlobRanges.Complete at tablet " << Self->TabletID());
+}
+
}
diff --git a/ydb/core/tx/columnshard/columnshard__scan.cpp b/ydb/core/tx/columnshard/columnshard__scan.cpp
index 9922df2982a..e2f7ba5a515 100644
--- a/ydb/core/tx/columnshard/columnshard__scan.cpp
+++ b/ydb/core/tx/columnshard/columnshard__scan.cpp
@@ -1,7 +1,7 @@
-#include "columnshard__scan.h"
-#include "columnshard__index_scan.h"
-#include "columnshard__stats_scan.h"
-
+#include "columnshard__scan.h"
+#include "columnshard__index_scan.h"
+#include "columnshard__stats_scan.h"
+
#include <ydb/core/tx/columnshard/blob_cache.h>
#include <ydb/core/tx/columnshard/columnshard_impl.h>
#include <ydb/core/tx/columnshard/columnshard_txs.h>
@@ -11,304 +11,304 @@
#include <ydb/core/actorlib_impl/long_timer.h>
#include <ydb/library/yql/core/issue/yql_issue.h>
#include <ydb/library/yql/public/issue/yql_issue_message.h>
-
+
namespace NKikimr::NColumnShard {
-
-using namespace NKqp;
-using NBlobCache::TBlobRange;
-
-constexpr ui64 INIT_BATCH_ROWS = 1000;
-constexpr i64 DEFAULT_READ_AHEAD_BYTES = 1*1024*1024;
-constexpr TDuration SCAN_HARD_TIMEOUT = TDuration::Minutes(10);
-constexpr TDuration SCAN_HARD_TIMEOUT_GAP = TDuration::Seconds(5);
-
-class TColumnShardScan : public TActorBootstrapped<TColumnShardScan>, NArrow::IRowWriter {
-public:
+
+using namespace NKqp;
+using NBlobCache::TBlobRange;
+
+constexpr ui64 INIT_BATCH_ROWS = 1000;
+constexpr i64 DEFAULT_READ_AHEAD_BYTES = 1*1024*1024;
+constexpr TDuration SCAN_HARD_TIMEOUT = TDuration::Minutes(10);
+constexpr TDuration SCAN_HARD_TIMEOUT_GAP = TDuration::Seconds(5);
+
+class TColumnShardScan : public TActorBootstrapped<TColumnShardScan>, NArrow::IRowWriter {
+public:
static constexpr auto ActorActivityType() {
- return NKikimrServices::TActivity::KQP_OLAP_SCAN;
- }
-
-public:
- TColumnShardScan(const TActorId& columnShardActorId, const TActorId& scanComputeActorId,
- ui32 scanId, ui64 txId, ui32 scanGen, ui64 requestCookie,
+ return NKikimrServices::TActivity::KQP_OLAP_SCAN;
+ }
+
+public:
+ TColumnShardScan(const TActorId& columnShardActorId, const TActorId& scanComputeActorId,
+ ui32 scanId, ui64 txId, ui32 scanGen, ui64 requestCookie,
const TString& table, TDuration timeout, TVector<TTxScan::TReadMetadataPtr>&& readMetadataList,
NKikimrTxDataShard::EScanDataFormat dataFormat)
- : ColumnShardActorId(columnShardActorId)
- , ScanComputeActorId(scanComputeActorId)
+ : ColumnShardActorId(columnShardActorId)
+ , ScanComputeActorId(scanComputeActorId)
, BlobCacheActorId(NBlobCache::MakeBlobCacheServiceId())
- , ScanId(scanId)
- , TxId(txId)
+ , ScanId(scanId)
+ , TxId(txId)
, ScanGen(scanGen)
- , RequestCookie(requestCookie)
+ , RequestCookie(requestCookie)
, DataFormat(dataFormat)
- , TablePath(table)
+ , TablePath(table)
, ReadMetadataRanges(std::move(readMetadataList))
, ReadMetadataIndex(0)
- , Deadline(TInstant::Now() + (timeout ? timeout + SCAN_HARD_TIMEOUT_GAP : SCAN_HARD_TIMEOUT))
- {
- KeyYqlSchema = ReadMetadataRanges[ReadMetadataIndex]->GetKeyYqlSchema();
- }
-
- void Bootstrap(const TActorContext& ctx) {
- ScanActorId = ctx.SelfID;
-
- TimeoutActorId = CreateLongTimer(TlsActivationContext->AsActorContext(), Deadline - TInstant::Now(),
- new IEventHandle(SelfId(), SelfId(), new TEvents::TEvWakeup));
-
+ , Deadline(TInstant::Now() + (timeout ? timeout + SCAN_HARD_TIMEOUT_GAP : SCAN_HARD_TIMEOUT))
+ {
+ KeyYqlSchema = ReadMetadataRanges[ReadMetadataIndex]->GetKeyYqlSchema();
+ }
+
+ void Bootstrap(const TActorContext& ctx) {
+ ScanActorId = ctx.SelfID;
+
+ TimeoutActorId = CreateLongTimer(TlsActivationContext->AsActorContext(), Deadline - TInstant::Now(),
+ new IEventHandle(SelfId(), SelfId(), new TEvents::TEvWakeup));
+
Y_VERIFY(!ScanIterator);
- ScanIterator = ReadMetadataRanges[ReadMetadataIndex]->StartScan();
-
- // propagate self actor id // TODO: FlagSubscribeOnSession ?
- Send(ScanComputeActorId, new TEvKqpCompute::TEvScanInitActor(ScanId, ctx.SelfID, ScanGen), IEventHandle::FlagTrackDelivery);
-
- Become(&TColumnShardScan::StateScan);
- }
-
-private:
- STATEFN(StateScan) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvKqpCompute::TEvScanDataAck, HandleScan);
+ ScanIterator = ReadMetadataRanges[ReadMetadataIndex]->StartScan();
+
+ // propagate self actor id // TODO: FlagSubscribeOnSession ?
+ Send(ScanComputeActorId, new TEvKqpCompute::TEvScanInitActor(ScanId, ctx.SelfID, ScanGen), IEventHandle::FlagTrackDelivery);
+
+ Become(&TColumnShardScan::StateScan);
+ }
+
+private:
+ STATEFN(StateScan) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvKqpCompute::TEvScanDataAck, HandleScan);
hFunc(NBlobCache::TEvBlobCache::TEvReadBlobRangeResult, HandleScan);
- hFunc(TEvKqp::TEvAbortExecution, HandleScan);
- hFunc(TEvents::TEvUndelivered, HandleScan);
- hFunc(TEvents::TEvWakeup, HandleScan);
- default:
- Y_FAIL("TColumnShardScan: unexpected event 0x%08" PRIx32, ev->GetTypeRewrite());
- }
- }
-
- bool ReadNextBlob() {
- auto blobRange = ScanIterator->GetNextBlobToRead();
- if (!blobRange.BlobId.IsValid()) {
- return false;
- }
- Send(BlobCacheActorId, new NBlobCache::TEvBlobCache::TEvReadBlobRange(blobRange));
- ++InFlightReads;
- InFlightReadBytes += blobRange.Size;
- return true;
- }
-
- void HandleScan(TEvKqpCompute::TEvScanDataAck::TPtr& ev) {
- LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got ScanDataAck"
- << ", at: " << ScanActorId
+ hFunc(TEvKqp::TEvAbortExecution, HandleScan);
+ hFunc(TEvents::TEvUndelivered, HandleScan);
+ hFunc(TEvents::TEvWakeup, HandleScan);
+ default:
+ Y_FAIL("TColumnShardScan: unexpected event 0x%08" PRIx32, ev->GetTypeRewrite());
+ }
+ }
+
+ bool ReadNextBlob() {
+ auto blobRange = ScanIterator->GetNextBlobToRead();
+ if (!blobRange.BlobId.IsValid()) {
+ return false;
+ }
+ Send(BlobCacheActorId, new NBlobCache::TEvBlobCache::TEvReadBlobRange(blobRange));
+ ++InFlightReads;
+ InFlightReadBytes += blobRange.Size;
+ return true;
+ }
+
+ void HandleScan(TEvKqpCompute::TEvScanDataAck::TPtr& ev) {
+ LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got ScanDataAck"
+ << ", at: " << ScanActorId
<< ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath
- << ", freeSpace: " << ev->Get()->FreeSpace << ", prevFreeSpace: " << PeerFreeSpace);
-
- --InFlightScanDataMessages;
-
- if (!ComputeActorId) {
- ComputeActorId = ev->Sender;
- InFlightScanDataMessages = 0;
- }
-
- Y_VERIFY(ev->Get()->Generation == ScanGen);
-
- PeerFreeSpace = ev->Get()->FreeSpace;
-
- ContinueProcessing();
- }
-
+ << ", freeSpace: " << ev->Get()->FreeSpace << ", prevFreeSpace: " << PeerFreeSpace);
+
+ --InFlightScanDataMessages;
+
+ if (!ComputeActorId) {
+ ComputeActorId = ev->Sender;
+ InFlightScanDataMessages = 0;
+ }
+
+ Y_VERIFY(ev->Get()->Generation == ScanGen);
+
+ PeerFreeSpace = ev->Get()->FreeSpace;
+
+ ContinueProcessing();
+ }
+
void HandleScan(NBlobCache::TEvBlobCache::TEvReadBlobRangeResult::TPtr& ev) {
- --InFlightReads;
-
+ --InFlightReads;
+
auto& event = *ev->Get();
- const auto& blobRange = event.BlobRange;
+ const auto& blobRange = event.BlobRange;
- if (event.Status != NKikimrProto::EReplyStatus::OK) {
- LOG_WARN_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got TEvReadBlobRangeResult error"
- << ", at: " << ScanActorId
+ if (event.Status != NKikimrProto::EReplyStatus::OK) {
+ LOG_WARN_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got TEvReadBlobRangeResult error"
+ << ", at: " << ScanActorId
<< ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath
- << ", blob: " << ev->Get()->BlobRange
- << ", status: " << NKikimrProto::EReplyStatus_Name(event.Status));
- SendError(event.Status);
- return Finish();
- }
-
- Y_VERIFY(event.Data.size() == blobRange.Size,
- "Read %s, size %" PRISZT, event.BlobRange.ToString().c_str(), event.Data.size());
-
- InFlightReadBytes -= blobRange.Size;
-
- LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got TEvReadBlobRangeResult"
- << ", at: " << ScanActorId
+ << ", blob: " << ev->Get()->BlobRange
+ << ", status: " << NKikimrProto::EReplyStatus_Name(event.Status));
+ SendError(event.Status);
+ return Finish();
+ }
+
+ Y_VERIFY(event.Data.size() == blobRange.Size,
+ "Read %s, size %" PRISZT, event.BlobRange.ToString().c_str(), event.Data.size());
+
+ InFlightReadBytes -= blobRange.Size;
+
+ LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got TEvReadBlobRangeResult"
+ << ", at: " << ScanActorId
<< ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath
<< ", blob: " << ev->Get()->BlobRange
- << ", prevFreeSpace: " << PeerFreeSpace);
-
- ScanIterator->AddData(blobRange, event.Data);
+ << ", prevFreeSpace: " << PeerFreeSpace);
+
+ ScanIterator->AddData(blobRange, event.Data);
- ContinueProcessing();
- }
+ ContinueProcessing();
+ }
+
+ // Returns true if it was able to produce new batch
+ bool ProduceResults() {
+ Y_VERIFY(!Finished);
- // Returns true if it was able to produce new batch
- bool ProduceResults() {
- Y_VERIFY(!Finished);
-
if (ScanIterator->Finished()) {
- return false;
- }
-
- auto result = ScanIterator->GetBatch();
- if (ResultYqlSchema.empty() && DataFormat != NKikimrTxDataShard::EScanDataFormat::ARROW) {
+ return false;
+ }
+
+ auto result = ScanIterator->GetBatch();
+ if (ResultYqlSchema.empty() && DataFormat != NKikimrTxDataShard::EScanDataFormat::ARROW) {
ResultYqlSchema = ReadMetadataRanges[ReadMetadataIndex]->GetResultYqlSchema();
}
- if (!result.ResultBatch) {
- // No data is ready yet
- return false;
- }
-
- auto& batch = result.ResultBatch;
- int numRows = batch->num_rows();
- int numColumns = batch->num_columns();
- LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got ready result"
- << ", at: " << ScanActorId
+ if (!result.ResultBatch) {
+ // No data is ready yet
+ return false;
+ }
+
+ auto& batch = result.ResultBatch;
+ int numRows = batch->num_rows();
+ int numColumns = batch->num_columns();
+ LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got ready result"
+ << ", at: " << ScanActorId
<< ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath
- << ", blob (" << numColumns << " columns, " << numRows << " rows)");
-
- switch (DataFormat) {
- case NKikimrTxDataShard::EScanDataFormat::UNSPECIFIED:
- case NKikimrTxDataShard::EScanDataFormat::CELLVEC: {
- MakeResult(INIT_BATCH_ROWS);
- NArrow::TArrowToYdbConverter batchConverter(ResultYqlSchema, *this);
+ << ", blob (" << numColumns << " columns, " << numRows << " rows)");
+
+ switch (DataFormat) {
+ case NKikimrTxDataShard::EScanDataFormat::UNSPECIFIED:
+ case NKikimrTxDataShard::EScanDataFormat::CELLVEC: {
+ MakeResult(INIT_BATCH_ROWS);
+ NArrow::TArrowToYdbConverter batchConverter(ResultYqlSchema, *this);
TString errStr;
bool ok = batchConverter.Process(*batch, errStr);
Y_VERIFY(ok, "%s", errStr.c_str());
- break;
- }
- case NKikimrTxDataShard::EScanDataFormat::ARROW: {
- MakeResult(0);
- Result->ArrowBatch = batch;
-
- Rows += batch->num_rows();
- Bytes += NArrow::GetBatchDataSize(batch);
- break;
- }
- } // switch DataFormat
- if (result.LastReadKey) {
- Result->LastKey = ConvertLastKey(result.LastReadKey);
- } else {
- Y_VERIFY(numRows == 0, "Got non-empty result batch without last key");
+ break;
+ }
+ case NKikimrTxDataShard::EScanDataFormat::ARROW: {
+ MakeResult(0);
+ Result->ArrowBatch = batch;
+
+ Rows += batch->num_rows();
+ Bytes += NArrow::GetBatchDataSize(batch);
+ break;
+ }
+ } // switch DataFormat
+ if (result.LastReadKey) {
+ Result->LastKey = ConvertLastKey(result.LastReadKey);
+ } else {
+ Y_VERIFY(numRows == 0, "Got non-empty result batch without last key");
+ }
+ SendResult(false, false);
+ return true;
+ }
+
+ void ContinueProcessingStep() {
+ if (!ScanIterator) {
+ return;
+ }
+
+ if (PeerFreeSpace == 0) {
+ // Throttle down until the compute actor is ready to receive more rows
+
+ LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Waiting for peer free space"
+ << ", at: " << ScanActorId
+ << ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath);
+
+ return;
+ }
+
+ // Send new results if there is available capacity
+ i64 MAX_SCANDATA_MESSAGES_IN_FLIGHT = 2;
+ while (InFlightScanDataMessages < MAX_SCANDATA_MESSAGES_IN_FLIGHT) {
+ if (!ProduceResults()) {
+ break;
+ }
+ }
+
+ // Switch to the next range if the current one is finished
+ if (ScanIterator->Finished() && !InFlightReads) {
+ NextReadMetadata();
+ }
+
+ size_t MIN_READY_RESULTS_IN_QUEUE = 3;
+ if (ScanIterator && ScanIterator->ReadyResultsCount() < MIN_READY_RESULTS_IN_QUEUE) {
+ // Make read-ahead requests for the subsequent blobs
+ while (InFlightReadBytes < MaxReadAheadBytes || !InFlightReads) {
+ if (!ReadNextBlob()) {
+ break;
+ }
+ }
+ }
+ }
+
+ void ContinueProcessing() {
+ const i64 maxSteps = ReadMetadataRanges.size();
+ for (i64 step = 0; step <= maxSteps; ++step) {
+ ContinueProcessingStep();
+
+ // Only exist the loop if either:
+ // * we have finished scanning ALL the ranges
+ // * or there is an in-flight blob read or ScanData message for which
+ // we will get a reply and will be able to proceed futher
+ if (!ScanIterator || InFlightScanDataMessages != 0 || InFlightReads != 0) {
+ return;
+ }
}
- SendResult(false, false);
- return true;
- }
-
- void ContinueProcessingStep() {
- if (!ScanIterator) {
- return;
- }
-
- if (PeerFreeSpace == 0) {
- // Throttle down until the compute actor is ready to receive more rows
-
- LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Waiting for peer free space"
- << ", at: " << ScanActorId
- << ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath);
-
- return;
- }
-
- // Send new results if there is available capacity
- i64 MAX_SCANDATA_MESSAGES_IN_FLIGHT = 2;
- while (InFlightScanDataMessages < MAX_SCANDATA_MESSAGES_IN_FLIGHT) {
- if (!ProduceResults()) {
- break;
- }
- }
-
- // Switch to the next range if the current one is finished
- if (ScanIterator->Finished() && !InFlightReads) {
- NextReadMetadata();
- }
-
- size_t MIN_READY_RESULTS_IN_QUEUE = 3;
- if (ScanIterator && ScanIterator->ReadyResultsCount() < MIN_READY_RESULTS_IN_QUEUE) {
- // Make read-ahead requests for the subsequent blobs
- while (InFlightReadBytes < MaxReadAheadBytes || !InFlightReads) {
- if (!ReadNextBlob()) {
- break;
- }
- }
- }
- }
-
- void ContinueProcessing() {
- const i64 maxSteps = ReadMetadataRanges.size();
- for (i64 step = 0; step <= maxSteps; ++step) {
- ContinueProcessingStep();
-
- // Only exist the loop if either:
- // * we have finished scanning ALL the ranges
- // * or there is an in-flight blob read or ScanData message for which
- // we will get a reply and will be able to proceed futher
- if (!ScanIterator || InFlightScanDataMessages != 0 || InFlightReads != 0) {
- return;
- }
- }
-
- // The loop has finished without any progress!
- LOG_ERROR_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Scan is hanging"
- << ", at: " << ScanActorId
- << ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath);
- }
-
- void HandleScan(TEvKqp::TEvAbortExecution::TPtr& ev) {
- auto& msg = ev->Get()->Record;
-
- auto prio = msg.GetStatusCode() == Ydb::StatusIds::SUCCESS ? NActors::NLog::PRI_DEBUG : NActors::NLog::PRI_WARN;
- LOG_LOG_S(*TlsActivationContext, prio, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got AbortExecution"
- << ", at: " << ScanActorId
+
+ // The loop has finished without any progress!
+ LOG_ERROR_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Scan is hanging"
+ << ", at: " << ScanActorId
+ << ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath);
+ }
+
+ void HandleScan(TEvKqp::TEvAbortExecution::TPtr& ev) {
+ auto& msg = ev->Get()->Record;
+
+ auto prio = msg.GetStatusCode() == Ydb::StatusIds::SUCCESS ? NActors::NLog::PRI_DEBUG : NActors::NLog::PRI_WARN;
+ LOG_LOG_S(*TlsActivationContext, prio, NKikimrServices::TX_COLUMNSHARD_SCAN, "Got AbortExecution"
+ << ", at: " << ScanActorId
<< ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath
- << ", code: " << Ydb::StatusIds_StatusCode_Name(msg.GetStatusCode())
- << ", reason: " << msg.GetMessage());
-
- AbortReason = std::move(msg.GetMessage());
- SendError(NKikimrProto::EReplyStatus::ERROR); // TODO: better status?
- Finish();
- }
-
- void HandleScan(TEvents::TEvUndelivered::TPtr& ev) {
- ui32 eventType = ev->Get()->SourceType;
- switch (eventType) {
- case TEvKqpCompute::TEvScanInitActor::EventType:
- AbortReason = "init failed";
- break;
- case TEvKqpCompute::TEvScanData::EventType:
- AbortReason = "failed to send data batch";
- break;
- }
-
- LOG_WARN_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Undelivered event: " << eventType
- << ", at: " << ScanActorId
+ << ", code: " << Ydb::StatusIds_StatusCode_Name(msg.GetStatusCode())
+ << ", reason: " << msg.GetMessage());
+
+ AbortReason = std::move(msg.GetMessage());
+ SendError(NKikimrProto::EReplyStatus::ERROR); // TODO: better status?
+ Finish();
+ }
+
+ void HandleScan(TEvents::TEvUndelivered::TPtr& ev) {
+ ui32 eventType = ev->Get()->SourceType;
+ switch (eventType) {
+ case TEvKqpCompute::TEvScanInitActor::EventType:
+ AbortReason = "init failed";
+ break;
+ case TEvKqpCompute::TEvScanData::EventType:
+ AbortReason = "failed to send data batch";
+ break;
+ }
+
+ LOG_WARN_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Undelivered event: " << eventType
+ << ", at: " << ScanActorId
<< ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath
- << ", reason: " << ev->Get()->Reason
- << ", description: " << AbortReason);
-
- Finish();
- }
-
- void HandleScan(TEvents::TEvWakeup::TPtr&) {
- LOG_ERROR_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Guard execution timeout at: " << ScanActorId
+ << ", reason: " << ev->Get()->Reason
+ << ", description: " << AbortReason);
+
+ Finish();
+ }
+
+ void HandleScan(TEvents::TEvWakeup::TPtr&) {
+ LOG_ERROR_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Guard execution timeout at: " << ScanActorId
<< ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath);
-
- TimeoutActorId = {};
-
- Finish();
- }
-
-private:
+
+ TimeoutActorId = {};
+
+ Finish();
+ }
+
+private:
void MakeResult(size_t reserveRows = 0) {
- if (!Finished && !Result) {
+ if (!Finished && !Result) {
Result = MakeHolder<TEvKqpCompute::TEvScanData>(ScanId, ScanGen);
if (reserveRows) {
Y_VERIFY(DataFormat != NKikimrTxDataShard::EScanDataFormat::ARROW);
Result->Rows.reserve(reserveRows);
}
- }
- }
-
+ }
+ }
+
void NextReadMetadata() {
- ScanIterator.reset();
+ ScanIterator.reset();
++ReadMetadataIndex;
@@ -319,150 +319,150 @@ private:
return Finish();
}
- ScanIterator = ReadMetadataRanges[ReadMetadataIndex]->StartScan();
+ ScanIterator = ReadMetadataRanges[ReadMetadataIndex]->StartScan();
// Used in TArrowToYdbConverter
- ResultYqlSchema.clear();
- }
-
- void AddRow(const TConstArrayRef<TCell>& row) override {
- Result->Rows.emplace_back(TOwnedCellVec::Make(row));
- ++Rows;
-
- // NOTE: Some per-row overhead to deal with the case when no columns were requested
- Bytes += std::max((ui64)8, (ui64)Result->Rows.back().DataSize());
- }
-
- TOwnedCellVec ConvertLastKey(const std::shared_ptr<arrow::RecordBatch>& lastReadKey) {
- Y_VERIFY(lastReadKey, "last key must be passed");
-
- struct TSingeRowWriter : public IRowWriter {
- TOwnedCellVec Row;
- bool Done = false;
- void AddRow(const TConstArrayRef<TCell>& row) override {
- Y_VERIFY(!Done);
- Row = TOwnedCellVec::Make(row);
- Done = true;
- }
- } singleRowWriter;
- NArrow::TArrowToYdbConverter converter(KeyYqlSchema, singleRowWriter);
+ ResultYqlSchema.clear();
+ }
+
+ void AddRow(const TConstArrayRef<TCell>& row) override {
+ Result->Rows.emplace_back(TOwnedCellVec::Make(row));
+ ++Rows;
+
+ // NOTE: Some per-row overhead to deal with the case when no columns were requested
+ Bytes += std::max((ui64)8, (ui64)Result->Rows.back().DataSize());
+ }
+
+ TOwnedCellVec ConvertLastKey(const std::shared_ptr<arrow::RecordBatch>& lastReadKey) {
+ Y_VERIFY(lastReadKey, "last key must be passed");
+
+ struct TSingeRowWriter : public IRowWriter {
+ TOwnedCellVec Row;
+ bool Done = false;
+ void AddRow(const TConstArrayRef<TCell>& row) override {
+ Y_VERIFY(!Done);
+ Row = TOwnedCellVec::Make(row);
+ Done = true;
+ }
+ } singleRowWriter;
+ NArrow::TArrowToYdbConverter converter(KeyYqlSchema, singleRowWriter);
TString errStr;
bool ok = converter.Process(*lastReadKey, errStr);
Y_VERIFY(ok, "%s", errStr.c_str());
-
- Y_VERIFY(singleRowWriter.Done);
- return singleRowWriter.Row;
- }
-
- bool SendResult(bool pageFault, bool lastBatch){
- if (Finished) {
- return true;
- }
-
- Result->PageFault = pageFault;
- Result->PageFaults = PageFaults;
- Result->Finished = lastBatch;
- TDuration totalElapsedTime = TDuration::Seconds(GetElapsedTicksAsSeconds());
+
+ Y_VERIFY(singleRowWriter.Done);
+ return singleRowWriter.Row;
+ }
+
+ bool SendResult(bool pageFault, bool lastBatch){
+ if (Finished) {
+ return true;
+ }
+
+ Result->PageFault = pageFault;
+ Result->PageFaults = PageFaults;
+ Result->Finished = lastBatch;
+ TDuration totalElapsedTime = TDuration::Seconds(GetElapsedTicksAsSeconds());
// Result->TotalTime = totalElapsedTime - LastReportedElapsedTime;
// TODO: Result->CpuTime = ...
- LastReportedElapsedTime = totalElapsedTime;
-
- PageFaults = 0;
-
- LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Send ScanData"
- << ", from: " << ScanActorId
- << ", to: " << ComputeActorId
+ LastReportedElapsedTime = totalElapsedTime;
+
+ PageFaults = 0;
+
+ LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_COLUMNSHARD_SCAN, "Send ScanData"
+ << ", from: " << ScanActorId
+ << ", to: " << ComputeActorId
<< ", txId: " << TxId << ", scanId: " << ScanId << ", gen: " << ScanGen << ", table: " << TablePath
- << ", bytes: " << Bytes << ", rows: " << Rows << ", page faults: " << Result->PageFaults
- << ", finished: " << Result->Finished << ", pageFault: " << Result->PageFault);
-
- if (PeerFreeSpace < Bytes) {
- PeerFreeSpace = 0;
- } else {
- PeerFreeSpace -= Bytes;
- }
-
- Finished = Result->Finished;
-
- Send(ComputeActorId, Result.Release(), IEventHandle::FlagTrackDelivery); // TODO: FlagSubscribeOnSession ?
- ++InFlightScanDataMessages;
-
- ReportStats();
-
- return true;
- }
-
- void SendError(NKikimrProto::EReplyStatus status) {
- auto ev = MakeHolder<TEvKqpCompute::TEvScanError>(ScanGen);
-
- ev->Record.SetStatus(Ydb::StatusIds::GENERIC_ERROR);
- auto issue = NYql::YqlIssue({}, NYql::TIssuesIds::KIKIMR_RESULT_UNAVAILABLE, TStringBuilder()
- << "Table " << TablePath << " scan failed, reason: " << NKikimrProto::EReplyStatus_Name(status));
- NYql::IssueToMessage(issue, ev->Record.MutableIssues()->Add());
-
- Send(ComputeActorId, ev.Release());
- }
-
- void Finish() {
- if (TimeoutActorId) {
- Send(TimeoutActorId, new TEvents::TEvPoison);
- }
-
- Send(ColumnShardActorId, new TEvPrivate::TEvReadFinished(RequestCookie, TxId));
- ReportStats();
- PassAway();
- }
-
- void ReportStats() {
- Send(ColumnShardActorId, new TEvPrivate::TEvScanStats(Rows, Bytes));
- Rows = 0;
- Bytes = 0;
- }
-
-private:
- const TActorId ColumnShardActorId;
- const TActorId ScanComputeActorId;
- TActorId ComputeActorId;
- TActorId ScanActorId;
- TActorId BlobCacheActorId;
- const ui32 ScanId;
- const ui64 TxId;
+ << ", bytes: " << Bytes << ", rows: " << Rows << ", page faults: " << Result->PageFaults
+ << ", finished: " << Result->Finished << ", pageFault: " << Result->PageFault);
+
+ if (PeerFreeSpace < Bytes) {
+ PeerFreeSpace = 0;
+ } else {
+ PeerFreeSpace -= Bytes;
+ }
+
+ Finished = Result->Finished;
+
+ Send(ComputeActorId, Result.Release(), IEventHandle::FlagTrackDelivery); // TODO: FlagSubscribeOnSession ?
+ ++InFlightScanDataMessages;
+
+ ReportStats();
+
+ return true;
+ }
+
+ void SendError(NKikimrProto::EReplyStatus status) {
+ auto ev = MakeHolder<TEvKqpCompute::TEvScanError>(ScanGen);
+
+ ev->Record.SetStatus(Ydb::StatusIds::GENERIC_ERROR);
+ auto issue = NYql::YqlIssue({}, NYql::TIssuesIds::KIKIMR_RESULT_UNAVAILABLE, TStringBuilder()
+ << "Table " << TablePath << " scan failed, reason: " << NKikimrProto::EReplyStatus_Name(status));
+ NYql::IssueToMessage(issue, ev->Record.MutableIssues()->Add());
+
+ Send(ComputeActorId, ev.Release());
+ }
+
+ void Finish() {
+ if (TimeoutActorId) {
+ Send(TimeoutActorId, new TEvents::TEvPoison);
+ }
+
+ Send(ColumnShardActorId, new TEvPrivate::TEvReadFinished(RequestCookie, TxId));
+ ReportStats();
+ PassAway();
+ }
+
+ void ReportStats() {
+ Send(ColumnShardActorId, new TEvPrivate::TEvScanStats(Rows, Bytes));
+ Rows = 0;
+ Bytes = 0;
+ }
+
+private:
+ const TActorId ColumnShardActorId;
+ const TActorId ScanComputeActorId;
+ TActorId ComputeActorId;
+ TActorId ScanActorId;
+ TActorId BlobCacheActorId;
+ const ui32 ScanId;
+ const ui64 TxId;
const ui32 ScanGen;
- const ui64 RequestCookie;
- const i64 MaxReadAheadBytes = DEFAULT_READ_AHEAD_BYTES;
+ const ui64 RequestCookie;
+ const i64 MaxReadAheadBytes = DEFAULT_READ_AHEAD_BYTES;
const NKikimrTxDataShard::EScanDataFormat DataFormat;
-
- const TString TablePath;
-
- TVector<NOlap::TReadMetadataBase::TConstPtr> ReadMetadataRanges;
+
+ const TString TablePath;
+
+ TVector<NOlap::TReadMetadataBase::TConstPtr> ReadMetadataRanges;
ui32 ReadMetadataIndex;
std::unique_ptr<TScanIteratorBase> ScanIterator;
-
- TVector<std::pair<TString, NScheme::TTypeId>> ResultYqlSchema;
- TVector<std::pair<TString, NScheme::TTypeId>> KeyYqlSchema;
- const TSerializedTableRange TableRange;
- const TSmallVec<bool> SkipNullKeys;
- const TInstant Deadline;
-
- TActorId TimeoutActorId;
- TMaybe<TString> AbortReason;
-
- ui64 PeerFreeSpace = 0;
- THolder<TEvKqpCompute::TEvScanData> Result;
- i64 InFlightReads = 0;
- i64 InFlightReadBytes = 0;
- i64 InFlightScanDataMessages = 0;
- bool Finished = false;
- ui64 Rows = 0;
- ui64 Bytes = 0;
- ui32 PageFaults = 0;
- TDuration LastReportedElapsedTime;
-};
-
+
+ TVector<std::pair<TString, NScheme::TTypeId>> ResultYqlSchema;
+ TVector<std::pair<TString, NScheme::TTypeId>> KeyYqlSchema;
+ const TSerializedTableRange TableRange;
+ const TSmallVec<bool> SkipNullKeys;
+ const TInstant Deadline;
+
+ TActorId TimeoutActorId;
+ TMaybe<TString> AbortReason;
+
+ ui64 PeerFreeSpace = 0;
+ THolder<TEvKqpCompute::TEvScanData> Result;
+ i64 InFlightReads = 0;
+ i64 InFlightReadBytes = 0;
+ i64 InFlightScanDataMessages = 0;
+ bool Finished = false;
+ ui64 Rows = 0;
+ ui64 Bytes = 0;
+ ui32 PageFaults = 0;
+ TDuration LastReportedElapsedTime;
+};
+
static void FillPredicatesFromRange(TReadDescription& read, const ::NKikimrTx::TKeyRange& keyRange,
const TVector<std::pair<TString, NScheme::TTypeId>>& ydbPk, ui64 tabletId) {
TSerializedTableRange range(keyRange);
-
+
LOG_S_DEBUG("TTxScan.Execute range predicate. From key size: "
<< range.From.GetCells().size() << " To key size: " << range.To.GetCells().size()
<< " at tablet " << tabletId);
@@ -476,87 +476,87 @@ static void FillPredicatesFromRange(TReadDescription& read, const ::NKikimrTx::T
LOG_S_DEBUG("TTxScan.Execute less predicate over columns: " << read.LessPredicate->ToString()
<< " at tablet " << tabletId);
- if (read.GreaterPredicate && read.GreaterPredicate->Empty()) {
+ if (read.GreaterPredicate && read.GreaterPredicate->Empty()) {
read.GreaterPredicate.reset();
}
- if (read.LessPredicate && read.LessPredicate->Empty()) {
+ if (read.LessPredicate && read.LessPredicate->Empty()) {
read.LessPredicate.reset();
}
}
-NOlap::TReadStatsMetadata::TPtr
-PrepareStatsReadMetadata(ui64 tabletId, const TReadDescription& read, const std::unique_ptr<NOlap::IColumnEngine>& index, TString& error) {
- THashSet<ui32> readColumnIds(read.ColumnIds.begin(), read.ColumnIds.end());
- for (auto& [id, name] : read.ProgramSourceColumns) {
- readColumnIds.insert(id);
- }
-
- for (ui32 colId : readColumnIds) {
- if (!PrimaryIndexStatsSchema.Columns.count(colId)) {
- error = Sprintf("Columnd id %" PRIu32 " not found", colId);
- return {};
- }
- }
-
- auto out = std::make_shared<NOlap::TReadStatsMetadata>(tabletId);
-
- out->ReadColumnIds.assign(readColumnIds.begin(), readColumnIds.end());
- out->ResultColumnIds = read.ColumnIds;
- out->Program = std::move(read.Program);
-
- if (!index) {
- return out;
- }
-
- ui64 fromPathId = 1;
- ui64 toPathId = Max<ui64>();
-
- if (read.GreaterPredicate && read.GreaterPredicate->Good()) {
+NOlap::TReadStatsMetadata::TPtr
+PrepareStatsReadMetadata(ui64 tabletId, const TReadDescription& read, const std::unique_ptr<NOlap::IColumnEngine>& index, TString& error) {
+ THashSet<ui32> readColumnIds(read.ColumnIds.begin(), read.ColumnIds.end());
+ for (auto& [id, name] : read.ProgramSourceColumns) {
+ readColumnIds.insert(id);
+ }
+
+ for (ui32 colId : readColumnIds) {
+ if (!PrimaryIndexStatsSchema.Columns.count(colId)) {
+ error = Sprintf("Columnd id %" PRIu32 " not found", colId);
+ return {};
+ }
+ }
+
+ auto out = std::make_shared<NOlap::TReadStatsMetadata>(tabletId);
+
+ out->ReadColumnIds.assign(readColumnIds.begin(), readColumnIds.end());
+ out->ResultColumnIds = read.ColumnIds;
+ out->Program = std::move(read.Program);
+
+ if (!index) {
+ return out;
+ }
+
+ ui64 fromPathId = 1;
+ ui64 toPathId = Max<ui64>();
+
+ if (read.GreaterPredicate && read.GreaterPredicate->Good()) {
auto from = read.GreaterPredicate->Batch->column(0);
- if (from) {
- fromPathId = static_cast<arrow::UInt64Array&>(*from).Value(0);
+ if (from) {
+ fromPathId = static_cast<arrow::UInt64Array&>(*from).Value(0);
}
- out->GreaterPredicate = read.GreaterPredicate;
- }
+ out->GreaterPredicate = read.GreaterPredicate;
+ }
+
+ if (read.LessPredicate && read.LessPredicate->Good()) {
+ auto to = read.LessPredicate->Batch->column(0);
+ if (to) {
+ toPathId = static_cast<arrow::UInt64Array&>(*to).Value(0);
+ }
+ out->LessPredicate = read.LessPredicate;
+ }
- if (read.LessPredicate && read.LessPredicate->Good()) {
- auto to = read.LessPredicate->Batch->column(0);
- if (to) {
- toPathId = static_cast<arrow::UInt64Array&>(*to).Value(0);
+ const auto& stats = index->GetStats();
+ if (read.TableName.EndsWith(NOlap::TIndexInfo::TABLE_INDEX_STATS_TABLE)) {
+ if (fromPathId <= read.PathId && toPathId >= read.PathId && stats.count(read.PathId)) {
+ out->IndexStats[read.PathId] = std::make_shared<NOlap::TColumnEngineStats>(*stats.at(read.PathId));
+ }
+ } else if (read.TableName.EndsWith(NOlap::TIndexInfo::STORE_INDEX_STATS_TABLE)) {
+ auto it = stats.lower_bound(fromPathId);
+ auto itEnd = stats.upper_bound(toPathId);
+ for (; it != itEnd; ++it) {
+ out->IndexStats[it->first] = std::make_shared<NOlap::TColumnEngineStats>(*it->second);
}
- out->LessPredicate = read.LessPredicate;
- }
-
- const auto& stats = index->GetStats();
- if (read.TableName.EndsWith(NOlap::TIndexInfo::TABLE_INDEX_STATS_TABLE)) {
- if (fromPathId <= read.PathId && toPathId >= read.PathId && stats.count(read.PathId)) {
- out->IndexStats[read.PathId] = std::make_shared<NOlap::TColumnEngineStats>(*stats.at(read.PathId));
- }
- } else if (read.TableName.EndsWith(NOlap::TIndexInfo::STORE_INDEX_STATS_TABLE)) {
- auto it = stats.lower_bound(fromPathId);
- auto itEnd = stats.upper_bound(toPathId);
- for (; it != itEnd; ++it) {
- out->IndexStats[it->first] = std::make_shared<NOlap::TColumnEngineStats>(*it->second);
- }
- }
- return out;
-}
-
-NOlap::TReadMetadataBase::TConstPtr TTxScan::CreateReadMetadata(const TActorContext& ctx, TReadDescription& read,
- bool indexStats, bool isReverse, ui64 itemsLimit)
-{
- NOlap::TReadMetadataBase::TPtr metadata;
- if (indexStats) {
- metadata = PrepareStatsReadMetadata(Self->TabletID(), read, Self->PrimaryIndex, ErrorDescription);
- } else {
- metadata = PrepareReadMetadata(ctx, read, Self->InsertTable, Self->PrimaryIndex, ErrorDescription);
- }
-
- if (!metadata) {
- return {};
- }
-
+ }
+ return out;
+}
+
+NOlap::TReadMetadataBase::TConstPtr TTxScan::CreateReadMetadata(const TActorContext& ctx, TReadDescription& read,
+ bool indexStats, bool isReverse, ui64 itemsLimit)
+{
+ NOlap::TReadMetadataBase::TPtr metadata;
+ if (indexStats) {
+ metadata = PrepareStatsReadMetadata(Self->TabletID(), read, Self->PrimaryIndex, ErrorDescription);
+ } else {
+ metadata = PrepareReadMetadata(ctx, read, Self->InsertTable, Self->PrimaryIndex, ErrorDescription);
+ }
+
+ if (!metadata) {
+ return {};
+ }
+
if (isReverse) {
metadata->SetDescSorting();
}
@@ -569,37 +569,37 @@ NOlap::TReadMetadataBase::TConstPtr TTxScan::CreateReadMetadata(const TActorCont
}
-bool TTxScan::Execute(TTransactionContext& txc, const TActorContext& ctx) {
+bool TTxScan::Execute(TTransactionContext& txc, const TActorContext& ctx) {
Y_UNUSED(txc);
- Y_VERIFY(Ev);
+ Y_VERIFY(Ev);
LOG_S_DEBUG("TTxScan.Execute at tablet " << Self->TabletID());
-
- auto& record = Ev->Get()->Record;
- const auto& snapshot = record.GetSnapshot();
-
+
+ auto& record = Ev->Get()->Record;
+ const auto& snapshot = record.GetSnapshot();
+
ui64 itemsLimit = record.HasItemsLimit() ? record.GetItemsLimit() : 0;
- TReadDescription read;
- read.PlanStep = snapshot.GetStep();
- read.TxId = snapshot.GetTxId();
- read.PathId = record.GetLocalPathId();
+ TReadDescription read;
+ read.PlanStep = snapshot.GetStep();
+ read.TxId = snapshot.GetTxId();
+ read.PathId = record.GetLocalPathId();
read.ReadNothing = Self->PathsToDrop.count(read.PathId);
- read.TableName = record.GetTablePath();
- bool isIndexStats = read.TableName.EndsWith(NOlap::TIndexInfo::STORE_INDEX_STATS_TABLE) ||
- read.TableName.EndsWith(NOlap::TIndexInfo::TABLE_INDEX_STATS_TABLE);
- read.ColumnIds.assign(record.GetColumnTags().begin(), record.GetColumnTags().end());
-
- // TODO: move this to CreateReadMetadata?
- if (read.ColumnIds.empty()) {
- // "SELECT COUNT(*)" requests empty column list but we need non-empty list for PrepareReadMetadata.
- // So we add first PK column to the request.
- if (!isIndexStats) {
- read.ColumnIds.push_back(Self->PrimaryIndex->GetIndexInfo().GetPKFirstColumnId());
- } else {
- read.ColumnIds.push_back(PrimaryIndexStatsSchema.KeyColumns.front());
- }
- }
-
+ read.TableName = record.GetTablePath();
+ bool isIndexStats = read.TableName.EndsWith(NOlap::TIndexInfo::STORE_INDEX_STATS_TABLE) ||
+ read.TableName.EndsWith(NOlap::TIndexInfo::TABLE_INDEX_STATS_TABLE);
+ read.ColumnIds.assign(record.GetColumnTags().begin(), record.GetColumnTags().end());
+
+ // TODO: move this to CreateReadMetadata?
+ if (read.ColumnIds.empty()) {
+ // "SELECT COUNT(*)" requests empty column list but we need non-empty list for PrepareReadMetadata.
+ // So we add first PK column to the request.
+ if (!isIndexStats) {
+ read.ColumnIds.push_back(Self->PrimaryIndex->GetIndexInfo().GetPKFirstColumnId());
+ } else {
+ read.ColumnIds.push_back(PrimaryIndexStatsSchema.KeyColumns.front());
+ }
+ }
+
bool parseResult;
if (!isIndexStats) {
@@ -616,108 +616,108 @@ bool TTxScan::Execute(TTransactionContext& txc, const TActorContext& ctx) {
if (!record.RangesSize()) {
auto range = CreateReadMetadata(ctx, read, isIndexStats, record.GetReverse(), itemsLimit);
- if (range) {
+ if (range) {
ReadMetadataRanges = {range};
- }
+ }
return true;
}
ReadMetadataRanges.reserve(record.RangesSize());
- auto ydbKey = isIndexStats ?
- NOlap::GetColumns(PrimaryIndexStatsSchema, PrimaryIndexStatsSchema.KeyColumns) :
- Self->PrimaryIndex->GetIndexInfo().GetPK();
-
+ auto ydbKey = isIndexStats ?
+ NOlap::GetColumns(PrimaryIndexStatsSchema, PrimaryIndexStatsSchema.KeyColumns) :
+ Self->PrimaryIndex->GetIndexInfo().GetPK();
+
for (auto& range: record.GetRanges()) {
FillPredicatesFromRange(read, range, ydbKey, Self->TabletID());
auto newRange = CreateReadMetadata(ctx, read, isIndexStats, record.GetReverse(), itemsLimit);
- if (!newRange) {
+ if (!newRange) {
ReadMetadataRanges.clear();
- return true;
- }
+ return true;
+ }
ReadMetadataRanges.emplace_back(newRange);
}
- if (record.GetReverse()) {
- std::reverse(ReadMetadataRanges.begin(), ReadMetadataRanges.end());
- }
-
- return true;
-}
-
-template <typename T>
-struct TContainerPrinter {
- const T& Ref;
-
- TContainerPrinter(const T& ref)
- : Ref(ref)
- {}
-
- friend IOutputStream& operator << (IOutputStream& out, const TContainerPrinter& cont) {
- for (auto& ptr : cont.Ref) {
- out << *ptr << " ";
- }
- return out;
- }
-};
-
-void TTxScan::Complete(const TActorContext& ctx) {
- auto& request = Ev->Get()->Record;
- auto scanComputeActor = Ev->Sender;
- const auto& snapshot = request.GetSnapshot();
- const auto scanId = request.GetScanId();
- const ui64 txId = request.GetTxId();
+ if (record.GetReverse()) {
+ std::reverse(ReadMetadataRanges.begin(), ReadMetadataRanges.end());
+ }
+
+ return true;
+}
+
+template <typename T>
+struct TContainerPrinter {
+ const T& Ref;
+
+ TContainerPrinter(const T& ref)
+ : Ref(ref)
+ {}
+
+ friend IOutputStream& operator << (IOutputStream& out, const TContainerPrinter& cont) {
+ for (auto& ptr : cont.Ref) {
+ out << *ptr << " ";
+ }
+ return out;
+ }
+};
+
+void TTxScan::Complete(const TActorContext& ctx) {
+ auto& request = Ev->Get()->Record;
+ auto scanComputeActor = Ev->Sender;
+ const auto& snapshot = request.GetSnapshot();
+ const auto scanId = request.GetScanId();
+ const ui64 txId = request.GetTxId();
const ui32 scanGen = request.GetGeneration();
- TString table = request.GetTablePath();
+ TString table = request.GetTablePath();
auto dataFormat = request.GetDataFormat();
- TDuration timeout = TDuration::MilliSeconds(request.GetTimeoutMs());
-
- if (scanGen > 1) {
- Self->IncCounter(COUNTER_SCAN_RESTARTED);
- }
-
- TStringStream detailedInfo;
- if (IS_LOG_PRIORITY_ENABLED(ctx, NActors::NLog::PRI_TRACE, NKikimrServices::TX_COLUMNSHARD)) {
- detailedInfo
+ TDuration timeout = TDuration::MilliSeconds(request.GetTimeoutMs());
+
+ if (scanGen > 1) {
+ Self->IncCounter(COUNTER_SCAN_RESTARTED);
+ }
+
+ TStringStream detailedInfo;
+ if (IS_LOG_PRIORITY_ENABLED(ctx, NActors::NLog::PRI_TRACE, NKikimrServices::TX_COLUMNSHARD)) {
+ detailedInfo
<< ", read metadata: " << TContainerPrinter(ReadMetadataRanges)
- << ", req: " << request;
- }
-
+ << ", req: " << request;
+ }
+
LOG_S_DEBUG("Starting scan"
<< ", txId: " << txId
<< ", scanId: " << scanId
<< ", gen: " << scanGen
- << ", table: " << table
+ << ", table: " << table
<< ", snapshot: " << snapshot
<< ", shard: " << Self->TabletID()
- << ", timeout: " << timeout
- << detailedInfo.Str()
- );
-
+ << ", timeout: " << timeout
+ << detailedInfo.Str()
+ );
+
if (ReadMetadataRanges.empty()) {
- Y_VERIFY(ErrorDescription);
- auto ev = MakeHolder<TEvKqpCompute::TEvScanError>(scanGen);
-
- ev->Record.SetStatus(Ydb::StatusIds::BAD_REQUEST);
- auto issue = NYql::YqlIssue({}, NYql::TIssuesIds::KIKIMR_BAD_REQUEST, TStringBuilder()
+ Y_VERIFY(ErrorDescription);
+ auto ev = MakeHolder<TEvKqpCompute::TEvScanError>(scanGen);
+
+ ev->Record.SetStatus(Ydb::StatusIds::BAD_REQUEST);
+ auto issue = NYql::YqlIssue({}, NYql::TIssuesIds::KIKIMR_BAD_REQUEST, TStringBuilder()
<< "Table " << table << " scan failed, reason: " << ErrorDescription);
- NYql::IssueToMessage(issue, ev->Record.MutableIssues()->Add());
-
- ctx.Send(scanComputeActor, ev.Release());
- return;
- }
-
- ui64 requestCookie = Self->InFlightReadsTracker.AddInFlightRequest(ReadMetadataRanges, *Self->BlobManager);
- auto statsDelta = Self->InFlightReadsTracker.GetSelectStatsDelta();
-
- Self->IncCounter(COUNTER_READ_INDEX_GRANULES, statsDelta.Granules);
- Self->IncCounter(COUNTER_READ_INDEX_PORTIONS, statsDelta.Portions);
- Self->IncCounter(COUNTER_READ_INDEX_BLOBS, statsDelta.Blobs);
- Self->IncCounter(COUNTER_READ_INDEX_ROWS, statsDelta.Rows);
- Self->IncCounter(COUNTER_READ_INDEX_BYTES, statsDelta.Bytes);
-
- ctx.Register(new TColumnShardScan(Self->SelfId(), scanComputeActor,
+ NYql::IssueToMessage(issue, ev->Record.MutableIssues()->Add());
+
+ ctx.Send(scanComputeActor, ev.Release());
+ return;
+ }
+
+ ui64 requestCookie = Self->InFlightReadsTracker.AddInFlightRequest(ReadMetadataRanges, *Self->BlobManager);
+ auto statsDelta = Self->InFlightReadsTracker.GetSelectStatsDelta();
+
+ Self->IncCounter(COUNTER_READ_INDEX_GRANULES, statsDelta.Granules);
+ Self->IncCounter(COUNTER_READ_INDEX_PORTIONS, statsDelta.Portions);
+ Self->IncCounter(COUNTER_READ_INDEX_BLOBS, statsDelta.Blobs);
+ Self->IncCounter(COUNTER_READ_INDEX_ROWS, statsDelta.Rows);
+ Self->IncCounter(COUNTER_READ_INDEX_BYTES, statsDelta.Bytes);
+
+ ctx.Register(new TColumnShardScan(Self->SelfId(), scanComputeActor,
scanId, txId, scanGen, requestCookie, table, timeout, std::move(ReadMetadataRanges), dataFormat));
-}
-
-}
+}
+
+}
diff --git a/ydb/core/tx/columnshard/columnshard__scan.h b/ydb/core/tx/columnshard/columnshard__scan.h
index e075b58aacb..1447d494c18 100644
--- a/ydb/core/tx/columnshard/columnshard__scan.h
+++ b/ydb/core/tx/columnshard/columnshard__scan.h
@@ -1,19 +1,19 @@
-#pragma once
-
-#include "blob_cache.h"
+#pragma once
+
+#include "blob_cache.h"
#include <ydb/core/tx/columnshard/engines/indexed_read_data.h>
-
+
namespace NKikimr::NColumnShard {
-
-class TScanIteratorBase {
-public:
- virtual ~TScanIteratorBase() = default;
-
- virtual void AddData(const NBlobCache::TBlobRange& /*blobRange*/, TString /*data*/) {}
- virtual bool Finished() const = 0;
- virtual NOlap::TPartialReadResult GetBatch() = 0;
- virtual NBlobCache::TBlobRange GetNextBlobToRead() { return NBlobCache::TBlobRange(); }
- virtual size_t ReadyResultsCount() const = 0;
-};
-
-}
+
+class TScanIteratorBase {
+public:
+ virtual ~TScanIteratorBase() = default;
+
+ virtual void AddData(const NBlobCache::TBlobRange& /*blobRange*/, TString /*data*/) {}
+ virtual bool Finished() const = 0;
+ virtual NOlap::TPartialReadResult GetBatch() = 0;
+ virtual NBlobCache::TBlobRange GetNextBlobToRead() { return NBlobCache::TBlobRange(); }
+ virtual size_t ReadyResultsCount() const = 0;
+};
+
+}
diff --git a/ydb/core/tx/columnshard/columnshard__stats_scan.h b/ydb/core/tx/columnshard/columnshard__stats_scan.h
index 744038f7504..5400d8660dc 100644
--- a/ydb/core/tx/columnshard/columnshard__stats_scan.h
+++ b/ydb/core/tx/columnshard/columnshard__stats_scan.h
@@ -1,193 +1,193 @@
-#pragma once
-
-#include "columnshard__scan.h"
-#include "columnshard_common.h"
+#pragma once
+
+#include "columnshard__scan.h"
+#include "columnshard_common.h"
#include <ydb/core/tablet_flat/flat_cxx_database.h>
#include <ydb/core/sys_view/common/schema.h>
-
+
namespace NKikimr::NColumnShard {
-
-static const NTable::TScheme::TTableSchema PrimaryIndexStatsSchema = []() {
- NTable::TScheme::TTableSchema schema;
- NIceDb::NHelpers::TStaticSchemaFiller<NKikimr::NSysView::Schema::PrimaryIndexStats>::Fill(schema);
- return schema;
-}();
-
-
-class TStatsColumnResolver : public IColumnResolver {
-public:
- TString GetColumnName(ui32 id, bool required) const override {
- auto it = PrimaryIndexStatsSchema.Columns.find(id);
- if (it == PrimaryIndexStatsSchema.Columns.end()) {
- Y_VERIFY(!required, "No column '%" PRIu32 "' in primary_index_stats", id);
- return {};
- }
- return it->second.Name;
- }
-};
-
-
-class TStatsIterator : public TScanIteratorBase {
-public:
- TStatsIterator(const NOlap::TReadStatsMetadata::TConstPtr& readMetadata)
- : ReadMetadata(readMetadata)
- , Reverse(ReadMetadata->IsDescSorted())
- , KeySchema(NOlap::MakeArrowSchema(PrimaryIndexStatsSchema.Columns, PrimaryIndexStatsSchema.KeyColumns))
- , ResultSchema(NOlap::MakeArrowSchema(PrimaryIndexStatsSchema.Columns, ReadMetadata->ResultColumnIds))
- , IndexStats(ReadMetadata->IndexStats.begin(), ReadMetadata->IndexStats.end())
- {
- }
-
- bool Finished() const override {
- return IndexStats.empty();
- }
-
- NOlap::TPartialReadResult GetBatch() override {
- // Take next raw batch
- auto batch = FillStatsBatch();
-
- // Extract the last row's PK
- auto keyBatch = NArrow::ExtractColumns(batch, KeySchema);
- auto lastKey = keyBatch->Slice(keyBatch->num_rows()-1, 1);
-
- ApplyRangePredicates(batch);
-
- if (!ReadMetadata->Program.empty()) {
- ApplyProgram(batch, ReadMetadata->Program);
- }
-
- // Leave only requested columns
- auto resultBatch = NArrow::ExtractColumns(batch, ResultSchema);
- NOlap::TPartialReadResult out{std::move(resultBatch), std::move(lastKey)};
-
- return out;
- }
-
- size_t ReadyResultsCount() const override {
- return IndexStats.empty() ? 0 : 1;
- }
-
-private:
- NOlap::TReadStatsMetadata::TConstPtr ReadMetadata;
- bool Reverse{false};
- std::shared_ptr<arrow::Schema> KeySchema;
- std::shared_ptr<arrow::Schema> ResultSchema;
-
- TMap<ui64, std::shared_ptr<NOlap::TColumnEngineStats>> IndexStats;
-
- static constexpr const ui64 NUM_KINDS = 4;
- static_assert(NUM_KINDS == NOlap::TPortionMeta::INACTIVE, "NUM_KINDS must match NOlap::TPortionMeta::EProduced enum");
-
- std::shared_ptr<arrow::RecordBatch> FillStatsBatch() {
-
- ui64 numRows = 0;
- numRows += NUM_KINDS * IndexStats.size();
-
- TVector<ui32> allColumnIds;
- for (const auto& c : PrimaryIndexStatsSchema.Columns) {
- allColumnIds.push_back(c.second.Id);
- }
- std::sort(allColumnIds.begin(), allColumnIds.end());
- auto schema = NOlap::MakeArrowSchema(PrimaryIndexStatsSchema.Columns, allColumnIds);
- auto builders = NArrow::MakeBuilders(schema, numRows);
-
- while (!IndexStats.empty()) {
- auto it = Reverse ? std::prev(IndexStats.end()) : IndexStats.begin();
- const auto& stats = it->second;
- Y_VERIFY(stats);
- AppendStats(builders, it->first, *stats);
- IndexStats.erase(it);
- }
-
- auto columns = NArrow::Finish(std::move(builders));
- return arrow::RecordBatch::Make(schema, numRows, columns);
- }
-
- void ApplyRangePredicates(std::shared_ptr<arrow::RecordBatch>& batch) {
- std::vector<bool> less;
- if (ReadMetadata->LessPredicate) {
- auto cmpType = ReadMetadata->LessPredicate->Inclusive ?
- NArrow::ECompareType::LESS_OR_EQUAL : NArrow::ECompareType::LESS;
- less = NArrow::MakePredicateFilter(batch, ReadMetadata->LessPredicate->Batch, cmpType);
- }
-
- std::vector<bool> greater;
- if (ReadMetadata->GreaterPredicate) {
- auto cmpType = ReadMetadata->GreaterPredicate->Inclusive ?
- NArrow::ECompareType::GREATER_OR_EQUAL : NArrow::ECompareType::GREATER;
- greater = NArrow::MakePredicateFilter(batch, ReadMetadata->GreaterPredicate->Batch, cmpType);
- }
-
- std::vector<bool> bits = NArrow::CombineFilters(std::move(less), std::move(greater));
- if (bits.size()) {
- auto res = arrow::compute::Filter(batch, NArrow::MakeFilter(bits));
- Y_VERIFY_S(res.ok(), res.status().message());
- Y_VERIFY((*res).kind() == arrow::Datum::RECORD_BATCH);
- batch = (*res).record_batch();
- }
- }
-
- void AppendStats(const std::vector<std::unique_ptr<arrow::ArrayBuilder>>& builders,
- ui64 pathId, const NOlap::TColumnEngineStats& stats) {
- using TUInt64 = arrow::UInt64Type::c_type;
- using TUInt32 = arrow::UInt32Type::c_type;
-
- TUInt64 pathIds[NUM_KINDS] = {pathId, pathId, pathId, pathId};
- /// It's better to keep it in sync with TPortionMeta::EProduced
- TUInt32 kinds[NUM_KINDS] = {1, 2, 3, 4};
- ui64 tabletId = ReadMetadata->TabletId;
- TUInt64 tabletIds[NUM_KINDS] = {tabletId, tabletId, tabletId, tabletId};
- TUInt64 rows[NUM_KINDS] = {
- stats.Inserted.Rows,
- stats.Compacted.Rows,
- stats.SplitCompacted.Rows,
- stats.Inactive.Rows
- };
- TUInt64 bytes[NUM_KINDS] = {
- stats.Inserted.Bytes,
- stats.Compacted.Bytes,
- stats.SplitCompacted.Bytes,
- stats.Inactive.Bytes
- };
- TUInt64 rawBytes[NUM_KINDS] = {
- stats.Inserted.RawBytes,
- stats.Compacted.RawBytes,
- stats.SplitCompacted.RawBytes,
- stats.Inactive.RawBytes
- };
- TUInt64 portions[NUM_KINDS] = {
- stats.Inserted.Portions,
- stats.Compacted.Portions,
- stats.SplitCompacted.Portions,
- stats.Inactive.Portions
- };
- TUInt64 blobs[NUM_KINDS] = {
- stats.Inserted.Blobs,
- stats.Compacted.Blobs,
- stats.SplitCompacted.Blobs,
- stats.Inactive.Blobs
- };
-
- if (Reverse) {
- std::reverse(std::begin(pathIds), std::end(pathIds));
- std::reverse(std::begin(kinds), std::end(kinds));
- std::reverse(std::begin(tabletIds), std::end(tabletIds));
- std::reverse(std::begin(rows), std::end(rows));
- std::reverse(std::begin(bytes), std::end(bytes));
- std::reverse(std::begin(rawBytes), std::end(rawBytes));
- std::reverse(std::begin(portions), std::end(portions));
- std::reverse(std::begin(blobs), std::end(blobs));
- }
-
- NArrow::Append<arrow::UInt64Type>(*builders[0], pathIds, NUM_KINDS);
- NArrow::Append<arrow::UInt32Type>(*builders[1], kinds, NUM_KINDS);
- NArrow::Append<arrow::UInt64Type>(*builders[2], tabletIds, NUM_KINDS);
- NArrow::Append<arrow::UInt64Type>(*builders[3], rows, NUM_KINDS);
- NArrow::Append<arrow::UInt64Type>(*builders[4], bytes, NUM_KINDS);
- NArrow::Append<arrow::UInt64Type>(*builders[5], rawBytes, NUM_KINDS);
- NArrow::Append<arrow::UInt64Type>(*builders[6], portions, NUM_KINDS);
- NArrow::Append<arrow::UInt64Type>(*builders[7], blobs, NUM_KINDS);
- }
-};
-
-}
+
+static const NTable::TScheme::TTableSchema PrimaryIndexStatsSchema = []() {
+ NTable::TScheme::TTableSchema schema;
+ NIceDb::NHelpers::TStaticSchemaFiller<NKikimr::NSysView::Schema::PrimaryIndexStats>::Fill(schema);
+ return schema;
+}();
+
+
+class TStatsColumnResolver : public IColumnResolver {
+public:
+ TString GetColumnName(ui32 id, bool required) const override {
+ auto it = PrimaryIndexStatsSchema.Columns.find(id);
+ if (it == PrimaryIndexStatsSchema.Columns.end()) {
+ Y_VERIFY(!required, "No column '%" PRIu32 "' in primary_index_stats", id);
+ return {};
+ }
+ return it->second.Name;
+ }
+};
+
+
+class TStatsIterator : public TScanIteratorBase {
+public:
+ TStatsIterator(const NOlap::TReadStatsMetadata::TConstPtr& readMetadata)
+ : ReadMetadata(readMetadata)
+ , Reverse(ReadMetadata->IsDescSorted())
+ , KeySchema(NOlap::MakeArrowSchema(PrimaryIndexStatsSchema.Columns, PrimaryIndexStatsSchema.KeyColumns))
+ , ResultSchema(NOlap::MakeArrowSchema(PrimaryIndexStatsSchema.Columns, ReadMetadata->ResultColumnIds))
+ , IndexStats(ReadMetadata->IndexStats.begin(), ReadMetadata->IndexStats.end())
+ {
+ }
+
+ bool Finished() const override {
+ return IndexStats.empty();
+ }
+
+ NOlap::TPartialReadResult GetBatch() override {
+ // Take next raw batch
+ auto batch = FillStatsBatch();
+
+ // Extract the last row's PK
+ auto keyBatch = NArrow::ExtractColumns(batch, KeySchema);
+ auto lastKey = keyBatch->Slice(keyBatch->num_rows()-1, 1);
+
+ ApplyRangePredicates(batch);
+
+ if (!ReadMetadata->Program.empty()) {
+ ApplyProgram(batch, ReadMetadata->Program);
+ }
+
+ // Leave only requested columns
+ auto resultBatch = NArrow::ExtractColumns(batch, ResultSchema);
+ NOlap::TPartialReadResult out{std::move(resultBatch), std::move(lastKey)};
+
+ return out;
+ }
+
+ size_t ReadyResultsCount() const override {
+ return IndexStats.empty() ? 0 : 1;
+ }
+
+private:
+ NOlap::TReadStatsMetadata::TConstPtr ReadMetadata;
+ bool Reverse{false};
+ std::shared_ptr<arrow::Schema> KeySchema;
+ std::shared_ptr<arrow::Schema> ResultSchema;
+
+ TMap<ui64, std::shared_ptr<NOlap::TColumnEngineStats>> IndexStats;
+
+ static constexpr const ui64 NUM_KINDS = 4;
+ static_assert(NUM_KINDS == NOlap::TPortionMeta::INACTIVE, "NUM_KINDS must match NOlap::TPortionMeta::EProduced enum");
+
+ std::shared_ptr<arrow::RecordBatch> FillStatsBatch() {
+
+ ui64 numRows = 0;
+ numRows += NUM_KINDS * IndexStats.size();
+
+ TVector<ui32> allColumnIds;
+ for (const auto& c : PrimaryIndexStatsSchema.Columns) {
+ allColumnIds.push_back(c.second.Id);
+ }
+ std::sort(allColumnIds.begin(), allColumnIds.end());
+ auto schema = NOlap::MakeArrowSchema(PrimaryIndexStatsSchema.Columns, allColumnIds);
+ auto builders = NArrow::MakeBuilders(schema, numRows);
+
+ while (!IndexStats.empty()) {
+ auto it = Reverse ? std::prev(IndexStats.end()) : IndexStats.begin();
+ const auto& stats = it->second;
+ Y_VERIFY(stats);
+ AppendStats(builders, it->first, *stats);
+ IndexStats.erase(it);
+ }
+
+ auto columns = NArrow::Finish(std::move(builders));
+ return arrow::RecordBatch::Make(schema, numRows, columns);
+ }
+
+ void ApplyRangePredicates(std::shared_ptr<arrow::RecordBatch>& batch) {
+ std::vector<bool> less;
+ if (ReadMetadata->LessPredicate) {
+ auto cmpType = ReadMetadata->LessPredicate->Inclusive ?
+ NArrow::ECompareType::LESS_OR_EQUAL : NArrow::ECompareType::LESS;
+ less = NArrow::MakePredicateFilter(batch, ReadMetadata->LessPredicate->Batch, cmpType);
+ }
+
+ std::vector<bool> greater;
+ if (ReadMetadata->GreaterPredicate) {
+ auto cmpType = ReadMetadata->GreaterPredicate->Inclusive ?
+ NArrow::ECompareType::GREATER_OR_EQUAL : NArrow::ECompareType::GREATER;
+ greater = NArrow::MakePredicateFilter(batch, ReadMetadata->GreaterPredicate->Batch, cmpType);
+ }
+
+ std::vector<bool> bits = NArrow::CombineFilters(std::move(less), std::move(greater));
+ if (bits.size()) {
+ auto res = arrow::compute::Filter(batch, NArrow::MakeFilter(bits));
+ Y_VERIFY_S(res.ok(), res.status().message());
+ Y_VERIFY((*res).kind() == arrow::Datum::RECORD_BATCH);
+ batch = (*res).record_batch();
+ }
+ }
+
+ void AppendStats(const std::vector<std::unique_ptr<arrow::ArrayBuilder>>& builders,
+ ui64 pathId, const NOlap::TColumnEngineStats& stats) {
+ using TUInt64 = arrow::UInt64Type::c_type;
+ using TUInt32 = arrow::UInt32Type::c_type;
+
+ TUInt64 pathIds[NUM_KINDS] = {pathId, pathId, pathId, pathId};
+ /// It's better to keep it in sync with TPortionMeta::EProduced
+ TUInt32 kinds[NUM_KINDS] = {1, 2, 3, 4};
+ ui64 tabletId = ReadMetadata->TabletId;
+ TUInt64 tabletIds[NUM_KINDS] = {tabletId, tabletId, tabletId, tabletId};
+ TUInt64 rows[NUM_KINDS] = {
+ stats.Inserted.Rows,
+ stats.Compacted.Rows,
+ stats.SplitCompacted.Rows,
+ stats.Inactive.Rows
+ };
+ TUInt64 bytes[NUM_KINDS] = {
+ stats.Inserted.Bytes,
+ stats.Compacted.Bytes,
+ stats.SplitCompacted.Bytes,
+ stats.Inactive.Bytes
+ };
+ TUInt64 rawBytes[NUM_KINDS] = {
+ stats.Inserted.RawBytes,
+ stats.Compacted.RawBytes,
+ stats.SplitCompacted.RawBytes,
+ stats.Inactive.RawBytes
+ };
+ TUInt64 portions[NUM_KINDS] = {
+ stats.Inserted.Portions,
+ stats.Compacted.Portions,
+ stats.SplitCompacted.Portions,
+ stats.Inactive.Portions
+ };
+ TUInt64 blobs[NUM_KINDS] = {
+ stats.Inserted.Blobs,
+ stats.Compacted.Blobs,
+ stats.SplitCompacted.Blobs,
+ stats.Inactive.Blobs
+ };
+
+ if (Reverse) {
+ std::reverse(std::begin(pathIds), std::end(pathIds));
+ std::reverse(std::begin(kinds), std::end(kinds));
+ std::reverse(std::begin(tabletIds), std::end(tabletIds));
+ std::reverse(std::begin(rows), std::end(rows));
+ std::reverse(std::begin(bytes), std::end(bytes));
+ std::reverse(std::begin(rawBytes), std::end(rawBytes));
+ std::reverse(std::begin(portions), std::end(portions));
+ std::reverse(std::begin(blobs), std::end(blobs));
+ }
+
+ NArrow::Append<arrow::UInt64Type>(*builders[0], pathIds, NUM_KINDS);
+ NArrow::Append<arrow::UInt32Type>(*builders[1], kinds, NUM_KINDS);
+ NArrow::Append<arrow::UInt64Type>(*builders[2], tabletIds, NUM_KINDS);
+ NArrow::Append<arrow::UInt64Type>(*builders[3], rows, NUM_KINDS);
+ NArrow::Append<arrow::UInt64Type>(*builders[4], bytes, NUM_KINDS);
+ NArrow::Append<arrow::UInt64Type>(*builders[5], rawBytes, NUM_KINDS);
+ NArrow::Append<arrow::UInt64Type>(*builders[6], portions, NUM_KINDS);
+ NArrow::Append<arrow::UInt64Type>(*builders[7], blobs, NUM_KINDS);
+ }
+};
+
+}
diff --git a/ydb/core/tx/columnshard/columnshard__write.cpp b/ydb/core/tx/columnshard/columnshard__write.cpp
index fbd2c7156eb..76fb8669609 100644
--- a/ydb/core/tx/columnshard/columnshard__write.cpp
+++ b/ydb/core/tx/columnshard/columnshard__write.cpp
@@ -1,8 +1,8 @@
#include "columnshard_impl.h"
#include "columnshard_txs.h"
#include "columnshard_schema.h"
-#include "blob_manager_db.h"
-#include "blob_cache.h"
+#include "blob_manager_db.h"
+#include "blob_cache.h"
namespace NKikimr::NColumnShard {
@@ -39,14 +39,14 @@ bool TTxWrite::Execute(TTransactionContext& txc, const TActorContext&) {
auto longTxId = NLongTxService::TLongTxId::FromProto(record.GetLongTxId());
writeId = (ui64)Self->GetLongTxWrite(db, longTxId);
}
-
+
ui64 writeUnixTime = meta.GetDirtyWriteTimeSeconds();
TInstant time = TInstant::Seconds(writeUnixTime);
// First write wins
- TBlobGroupSelector dsGroupSelector(Self->Info());
- NOlap::TDbWrapper dbTable(txc.DB, &dsGroupSelector);
- ok = Self->InsertTable->Insert(dbTable, NOlap::TInsertedData(metaShard, writeId, tableId, dedupId, logoBlobId, metaStr, time));
+ TBlobGroupSelector dsGroupSelector(Self->Info());
+ NOlap::TDbWrapper dbTable(txc.DB, &dsGroupSelector);
+ ok = Self->InsertTable->Insert(dbTable, NOlap::TInsertedData(metaShard, writeId, tableId, dedupId, logoBlobId, metaStr, time));
if (ok) {
auto newAborted = Self->InsertTable->AbortOld(dbTable, time);
for (auto& writeId : newAborted) {
@@ -57,15 +57,15 @@ bool TTxWrite::Execute(TTransactionContext& txc, const TActorContext&) {
// It's not optimal but correct.
TBlobManagerDb blobManagerDb(txc.DB);
auto allAborted = Self->InsertTable->GetAborted(); // copy (src is modified in cycle)
- for (auto& [abortedWriteId, abortedData] : allAborted) {
- Self->InsertTable->EraseAborted(dbTable, abortedData);
- Self->BlobManager->DeleteBlob(abortedData.BlobId, blobManagerDb);
+ for (auto& [abortedWriteId, abortedData] : allAborted) {
+ Self->InsertTable->EraseAborted(dbTable, abortedData);
+ Self->BlobManager->DeleteBlob(abortedData.BlobId, blobManagerDb);
}
- // Put new data into cache
- Y_VERIFY(logoBlobId.BlobSize() == data.size());
- NBlobCache::AddRangeToCache(NBlobCache::TBlobRange(logoBlobId, 0, data.size()), data);
-
+ // Put new data into cache
+ Y_VERIFY(logoBlobId.BlobSize() == data.size());
+ NBlobCache::AddRangeToCache(NBlobCache::TBlobRange(logoBlobId, 0, data.size()), data);
+
Self->UpdateInsertTableCounters();
ui64 blobsWritten = Ev->Get()->BlobBatch.GetBlobCount();
diff --git a/ydb/core/tx/columnshard/columnshard__write_index.cpp b/ydb/core/tx/columnshard/columnshard__write_index.cpp
index 5e9c909797e..08f3982efc7 100644
--- a/ydb/core/tx/columnshard/columnshard__write_index.cpp
+++ b/ydb/core/tx/columnshard/columnshard__write_index.cpp
@@ -1,8 +1,8 @@
#include "columnshard_impl.h"
#include "columnshard_txs.h"
#include "columnshard_schema.h"
-#include "blob_manager_db.h"
-#include "blob_cache.h"
+#include "blob_manager_db.h"
+#include "blob_cache.h"
namespace NKikimr::NColumnShard {
@@ -15,9 +15,9 @@ bool TTxWriteIndex::Execute(TTransactionContext& txc, const TActorContext&) {
txc.DB.NoMoreReadsForTx();
- ui64 blobsWritten = 0;
- ui64 bytesWritten = 0;
-
+ ui64 blobsWritten = 0;
+ ui64 bytesWritten = 0;
+
auto changes = Ev->Get()->IndexChanges;
Y_VERIFY(changes);
@@ -28,8 +28,8 @@ bool TTxWriteIndex::Execute(TTransactionContext& txc, const TActorContext&) {
snapshot = {Self->LastPlannedStep, Self->LastPlannedTxId};
}
- TBlobGroupSelector dsGroupSelector(Self->Info());
- NOlap::TDbWrapper dbWrap(txc.DB, &dsGroupSelector);
+ TBlobGroupSelector dsGroupSelector(Self->Info());
+ NOlap::TDbWrapper dbWrap(txc.DB, &dsGroupSelector);
ok = Self->PrimaryIndex->ApplyChanges(dbWrap, changes, snapshot); // update changes + apply
if (ok) {
LOG_S_DEBUG("TTxWriteIndex (" << changes->TypeString()
@@ -38,16 +38,16 @@ bool TTxWriteIndex::Execute(TTransactionContext& txc, const TActorContext&) {
TBlobManagerDb blobManagerDb(txc.DB);
for (const auto& cmtd : Ev->Get()->IndexChanges->DataToIndex) {
Self->InsertTable->EraseCommitted(dbWrap, cmtd);
- Self->BlobManager->DeleteBlob(cmtd.BlobId, blobManagerDb);
+ Self->BlobManager->DeleteBlob(cmtd.BlobId, blobManagerDb);
}
const auto& switchedPortions = Ev->Get()->IndexChanges->SwitchedPortions;
Self->IncCounter(COUNTER_PORTIONS_DEACTIVATED, switchedPortions.size());
- THashSet<TUnifiedBlobId> blobsDeactivated;
+ THashSet<TUnifiedBlobId> blobsDeactivated;
for (auto& portionInfo : switchedPortions) {
for (auto& rec : portionInfo.Records) {
- blobsDeactivated.insert(rec.BlobRange.BlobId);
+ blobsDeactivated.insert(rec.BlobRange.BlobId);
}
Self->IncCounter(COUNTER_RAW_BYTES_DEACTIVATED, portionInfo.RawBytesSum());
}
@@ -70,46 +70,46 @@ bool TTxWriteIndex::Execute(TTransactionContext& txc, const TActorContext&) {
case NOlap::TPortionMeta::SPLIT_COMPACTED:
Self->IncCounter(COUNTER_SPLIT_COMPACTION_PORTIONS_WRITTEN);
break;
- case NOlap::TPortionMeta::INACTIVE:
- Y_FAIL("Unexpected inactive case");
- break;
+ case NOlap::TPortionMeta::INACTIVE:
+ Y_FAIL("Unexpected inactive case");
+ break;
+ }
+
+ // Put newly created blobs into cache
+ if (Ev->Get()->CacheData) {
+ for (const auto& columnRec : portionInfo.Records) {
+ const auto* blob = Ev->Get()->IndexChanges->Blobs.FindPtr(columnRec.BlobRange);
+ Y_VERIFY_DEBUG(blob, "Column data must be passed if CacheData is set");
+ if (blob) {
+ Y_VERIFY(columnRec.BlobRange.Size == blob->Size());
+ NBlobCache::AddRangeToCache(columnRec.BlobRange, *blob);
+ }
+ }
}
-
- // Put newly created blobs into cache
- if (Ev->Get()->CacheData) {
- for (const auto& columnRec : portionInfo.Records) {
- const auto* blob = Ev->Get()->IndexChanges->Blobs.FindPtr(columnRec.BlobRange);
- Y_VERIFY_DEBUG(blob, "Column data must be passed if CacheData is set");
- if (blob) {
- Y_VERIFY(columnRec.BlobRange.Size == blob->Size());
- NBlobCache::AddRangeToCache(columnRec.BlobRange, *blob);
- }
- }
- }
}
const auto& portionsToDrop = Ev->Get()->IndexChanges->PortionsToDrop;
- THashSet<TUnifiedBlobId> blobsToDrop;
+ THashSet<TUnifiedBlobId> blobsToDrop;
Self->IncCounter(COUNTER_PORTIONS_ERASED, portionsToDrop.size());
for (const auto& portionInfo : portionsToDrop) {
for (const auto& rec : portionInfo.Records) {
- blobsToDrop.insert(rec.BlobRange.BlobId);
+ blobsToDrop.insert(rec.BlobRange.BlobId);
}
Self->IncCounter(COUNTER_RAW_BYTES_ERASED, portionInfo.RawBytesSum());
}
Self->IncCounter(COUNTER_BLOBS_ERASED, blobsToDrop.size());
for (const auto& blobId : blobsToDrop) {
- Self->BlobManager->DeleteBlob(blobId, blobManagerDb);
+ Self->BlobManager->DeleteBlob(blobId, blobManagerDb);
Self->IncCounter(COUNTER_BYTES_ERASED, blobId.BlobSize());
}
- blobsWritten = Ev->Get()->BlobBatch.GetBlobCount();
- bytesWritten = Ev->Get()->BlobBatch.GetTotalSize();
+ blobsWritten = Ev->Get()->BlobBatch.GetBlobCount();
+ bytesWritten = Ev->Get()->BlobBatch.GetTotalSize();
if (blobsWritten) {
Self->BlobManager->SaveBlobBatch(std::move(Ev->Get()->BlobBatch), blobManagerDb);
}
-
+
Self->UpdateInsertTableCounters();
Self->UpdateIndexCounters();
} else {
@@ -125,7 +125,7 @@ bool TTxWriteIndex::Execute(TTransactionContext& txc, const TActorContext&) {
if (changes->IsInsert()) {
Self->ActiveIndexing = false;
-
+
Self->IncCounter(ok ? COUNTER_INDEXING_SUCCESS : COUNTER_INDEXING_FAIL);
Self->IncCounter(COUNTER_INDEXING_BLOBS_WRITTEN, blobsWritten);
Self->IncCounter(COUNTER_INDEXING_BYTES_WRITTEN, bytesWritten);
@@ -144,8 +144,8 @@ bool TTxWriteIndex::Execute(TTransactionContext& txc, const TActorContext&) {
Self->IncCounter(COUNTER_SPLIT_COMPACTION_BLOBS_WRITTEN, blobsWritten);
Self->IncCounter(COUNTER_SPLIT_COMPACTION_BYTES_WRITTEN, bytesWritten);
}
- } else if (changes->IsCleanup()) {
- Self->ActiveCleanup = false;
+ } else if (changes->IsCleanup()) {
+ Self->ActiveCleanup = false;
Self->IncCounter(ok ? COUNTER_CLEANUP_SUCCESS : COUNTER_CLEANUP_FAIL);
} else if (changes->IsTtl()) {
@@ -153,7 +153,7 @@ bool TTxWriteIndex::Execute(TTransactionContext& txc, const TActorContext&) {
Self->IncCounter(ok ? COUNTER_TTL_SUCCESS : COUNTER_TTL_FAIL);
}
-
+
Self->UpdateResourceMetrics(Ev->Get()->ResourceUsage);
return true;
}
diff --git a/ydb/core/tx/columnshard/columnshard_common.cpp b/ydb/core/tx/columnshard/columnshard_common.cpp
index ee66cb0c251..1e76247da84 100644
--- a/ydb/core/tx/columnshard/columnshard_common.cpp
+++ b/ydb/core/tx/columnshard/columnshard_common.cpp
@@ -28,9 +28,9 @@ TString FromCells(const TConstArrayRef<TCell>& cells, const TVector<std::pair<TS
bool ok = batchBuilder.Start(columns);
Y_VERIFY(ok);
- batchBuilder.AddRow(NKikimr::TDbTupleRef(), NKikimr::TDbTupleRef(types.data(), cells.data(), cells.size()));
+ batchBuilder.AddRow(NKikimr::TDbTupleRef(), NKikimr::TDbTupleRef(types.data(), cells.data(), cells.size()));
- auto batch = batchBuilder.FlushBatch(false);
+ auto batch = batchBuilder.FlushBatch(false);
Y_VERIFY(batch);
Y_VERIFY(batch->num_columns() == (int)cells.size());
Y_VERIFY(batch->num_rows() == 1);
@@ -38,15 +38,15 @@ TString FromCells(const TConstArrayRef<TCell>& cells, const TVector<std::pair<TS
}
struct TContext {
- const IColumnResolver& ColumnResolver;
+ const IColumnResolver& ColumnResolver;
THashMap<ui32, TString> Sources;
- explicit TContext(const IColumnResolver& columnResolver)
- : ColumnResolver(columnResolver)
+ explicit TContext(const IColumnResolver& columnResolver)
+ : ColumnResolver(columnResolver)
{}
std::string GetName(ui32 columnId) {
- TString name = ColumnResolver.GetColumnName(columnId, false);
+ TString name = ColumnResolver.GetColumnName(columnId, false);
if (name.Empty()) {
name = ToString(columnId);
} else {
@@ -260,13 +260,13 @@ std::pair<TPredicate, TPredicate> RangePredicates(const TSerializedTableRange& r
rightCells.reserve(size);
rightColumns.reserve(size);
for (size_t i = 0; i < size; ++i) {
- if (!cells[i].IsNull()) {
- rightCells.push_back(cells[i]);
- rightColumns.push_back(columns[i]);
+ if (!cells[i].IsNull()) {
+ rightCells.push_back(cells[i]);
+ rightColumns.push_back(columns[i]);
rightTrailingNull = false;
} else {
rightTrailingNull = true;
- }
+ }
}
}
@@ -284,7 +284,7 @@ void TReadDescription::AddProgram(const IColumnResolver& columnResolver, const N
{
using TId = NKikimrSSA::TProgram::TCommand;
- TContext info(columnResolver);
+ TContext info(columnResolver);
auto step = std::make_shared<NArrow::TProgramStep>();
for (auto& cmd : program.GetCommand()) {
switch (cmd.GetLineCase()) {
diff --git a/ydb/core/tx/columnshard/columnshard_common.h b/ydb/core/tx/columnshard/columnshard_common.h
index 49d0f339c07..cf11921a839 100644
--- a/ydb/core/tx/columnshard/columnshard_common.h
+++ b/ydb/core/tx/columnshard/columnshard_common.h
@@ -1,4 +1,4 @@
-#pragma once
+#pragma once
#include <ydb/core/formats/arrow_helpers.h>
#include <ydb/core/scheme/scheme_tabledefs.h>
#include <ydb/core/protos/ssa.pb.h>
@@ -10,22 +10,22 @@ namespace NKikimr::NOlap {
namespace NKikimr::NColumnShard {
-using NOlap::TWriteId;
-
+using NOlap::TWriteId;
+
std::pair<NOlap::TPredicate, NOlap::TPredicate>
RangePredicates(const TSerializedTableRange& range, const TVector<std::pair<TString, NScheme::TTypeId>>& columns);
-class IColumnResolver {
-public:
- virtual ~IColumnResolver() = default;
- virtual TString GetColumnName(ui32 id, bool required = true) const = 0;
-};
-
+class IColumnResolver {
+public:
+ virtual ~IColumnResolver() = default;
+ virtual TString GetColumnName(ui32 id, bool required = true) const = 0;
+};
+
// Describes read/scan request
struct TReadDescription {
// Table
ui64 PathId = 0;
- TString TableName;
+ TString TableName;
bool ReadNothing = false;
// Less[OrEqual], Greater[OrEqual] or both
// There's complex logic in NKikimr::TTableRange comparison that could be emulated only with separated compare
@@ -48,7 +48,7 @@ struct TReadDescription {
ui64 PlanStep = 0;
ui64 TxId = 0;
- void AddProgram(const IColumnResolver& columnResolver, const NKikimrSSA::TProgram& program);
+ void AddProgram(const IColumnResolver& columnResolver, const NKikimrSSA::TProgram& program);
};
}
diff --git a/ydb/core/tx/columnshard/columnshard_impl.cpp b/ydb/core/tx/columnshard/columnshard_impl.cpp
index eab07367c8c..10dcea4d864 100644
--- a/ydb/core/tx/columnshard/columnshard_impl.cpp
+++ b/ydb/core/tx/columnshard/columnshard_impl.cpp
@@ -5,10 +5,10 @@
namespace NKikimr::NColumnShard {
-// NOTE: We really want to batch log records by default in columnshards!
-// But in unittests we want to test both scenarios
-bool gAllowLogBatchingDefaultValue = true;
-
+// NOTE: We really want to batch log records by default in columnshards!
+// But in unittests we want to test both scenarios
+bool gAllowLogBatchingDefaultValue = true;
+
namespace
{
@@ -29,13 +29,13 @@ TColumnShard::TColumnShard(TTabletStorageInfo* info, const TActorId& tablet)
, PipeClientCache(NTabletPipe::CreateBoundedClientCache(new NTabletPipe::TBoundedClientCacheConfig(), GetPipeClientConfig()))
, InsertTable(std::make_unique<NOlap::TInsertTable>())
{
- TabletCountersPtr.reset(new TProtobufTabletCounters<
+ TabletCountersPtr.reset(new TProtobufTabletCounters<
ESimpleCounters_descriptor,
ECumulativeCounters_descriptor,
EPercentileCounters_descriptor,
ETxTypes_descriptor
>());
- TabletCounters = TabletCountersPtr.get();
+ TabletCounters = TabletCountersPtr.get();
}
void TColumnShard::OnDetach(const TActorContext& ctx) {
@@ -170,17 +170,17 @@ ui64 TColumnShard::GetOutdatedStep() const {
return step;
}
-ui64 TColumnShard::GetAllowedStep() const {
- return Max(GetOutdatedStep() + 1, TAppData::TimeProvider->Now().MilliSeconds());
-}
-
-ui64 TColumnShard::GetMinReadStep() const {
- ui64 delayMillisec = MaxReadStaleness.MilliSeconds();
- ui64 passedStep = GetOutdatedStep();
- ui64 minReadStep = (passedStep > delayMillisec ? passedStep - delayMillisec : 0);
- return minReadStep;
-}
-
+ui64 TColumnShard::GetAllowedStep() const {
+ return Max(GetOutdatedStep() + 1, TAppData::TimeProvider->Now().MilliSeconds());
+}
+
+ui64 TColumnShard::GetMinReadStep() const {
+ ui64 delayMillisec = MaxReadStaleness.MilliSeconds();
+ ui64 passedStep = GetOutdatedStep();
+ ui64 minReadStep = (passedStep > delayMillisec ? passedStep - delayMillisec : 0);
+ return minReadStep;
+}
+
bool TColumnShard::HaveOutdatedTxs() const {
if (!DeadlineQueue) {
return false;
@@ -247,15 +247,15 @@ bool TColumnShard::RemoveTx(NTable::TDatabase& database, ui64 txId) {
case NKikimrTxColumnShard::TX_KIND_COMMIT: {
if (auto* meta = CommitsInFlight.FindPtr(txId)) {
if (meta->MetaShard == 0) {
- for (TWriteId writeId : meta->WriteIds) {
+ for (TWriteId writeId : meta->WriteIds) {
// TODO: we probably need to have more complex
// logic in the future, when there are multiple
// inflight commits for the same writeId.
RemoveLongTxWrite(db, writeId, txId);
}
}
- TBlobGroupSelector dsGroupSelector(Info());
- NOlap::TDbWrapper dbTable(database, &dsGroupSelector);
+ TBlobGroupSelector dsGroupSelector(Info());
+ NOlap::TDbWrapper dbTable(database, &dsGroupSelector);
InsertTable->Abort(dbTable, meta->MetaShard, meta->WriteIds);
CommitsInFlight.erase(txId);
@@ -471,8 +471,8 @@ void TColumnShard::RunDropTable(const NKikimrTxColumnShard::TDropTable& dropProt
Ttl.DropPathTtl(pathId);
// TODO: Allow to read old snapshots after DROP
- TBlobGroupSelector dsGroupSelector(Info());
- NOlap::TDbWrapper dbTable(txc.DB, &dsGroupSelector);
+ TBlobGroupSelector dsGroupSelector(Info());
+ NOlap::TDbWrapper dbTable(txc.DB, &dsGroupSelector);
auto abortedWrites = InsertTable->DropPath(dbTable, pathId);
for (auto& writeId : abortedWrites) {
RemoveLongTxWrite(db, writeId);
@@ -609,15 +609,15 @@ std::unique_ptr<TEvPrivate::TEvIndexing> TColumnShard::SetupIndexation() {
ui64 bytesToIndex = 0;
TVector<const NOlap::TInsertedData*> dataToIndex;
dataToIndex.reserve(TLimits::MIN_SMALL_BLOBS_TO_INSERT);
- for (auto& [pathId, committed] : InsertTable->GetCommitted()) {
- for (auto& data : committed) {
+ for (auto& [pathId, committed] : InsertTable->GetCommitted()) {
+ for (auto& data : committed) {
ui32 dataSize = data.BlobSize();
Y_VERIFY(dataSize);
size += dataSize;
if (bytesToIndex && (bytesToIndex + dataSize) > (ui64)Limits.MaxInsertBytes) {
continue;
- }
+ }
if (auto* pMap = PrimaryIndex->GetOverloadedGranules(data.PathId)) {
InsertTable->SetOverloaded(data.PathId, true);
++ignored;
@@ -655,9 +655,9 @@ std::unique_ptr<TEvPrivate::TEvIndexing> TColumnShard::SetupIndexation() {
}
ActiveIndexing = true;
- auto ev = std::make_unique<TEvPrivate::TEvWriteIndex>(PrimaryIndex->GetIndexInfo(), indexChanges,
- Settings.CacheDataAfterIndexing);
- return std::make_unique<TEvPrivate::TEvIndexing>(std::move(ev));
+ auto ev = std::make_unique<TEvPrivate::TEvWriteIndex>(PrimaryIndex->GetIndexInfo(), indexChanges,
+ Settings.CacheDataAfterIndexing);
+ return std::make_unique<TEvPrivate::TEvIndexing>(std::move(ev));
}
std::unique_ptr<TEvPrivate::TEvCompaction> TColumnShard::SetupCompaction() {
@@ -693,9 +693,9 @@ std::unique_ptr<TEvPrivate::TEvCompaction> TColumnShard::SetupCompaction() {
}
ActiveCompaction = true;
- auto ev = std::make_unique<TEvPrivate::TEvWriteIndex>(PrimaryIndex->GetIndexInfo(), indexChanges,
- Settings.CacheDataAfterCompaction);
- return std::make_unique<TEvPrivate::TEvCompaction>(std::move(ev));
+ auto ev = std::make_unique<TEvPrivate::TEvWriteIndex>(PrimaryIndex->GetIndexInfo(), indexChanges,
+ Settings.CacheDataAfterCompaction);
+ return std::make_unique<TEvPrivate::TEvCompaction>(std::move(ev));
}
std::unique_ptr<TEvPrivate::TEvWriteIndex> TColumnShard::SetupTtl(const THashMap<ui64, NOlap::TTtlInfo>& pathTtls,
@@ -734,64 +734,64 @@ std::unique_ptr<TEvPrivate::TEvWriteIndex> TColumnShard::SetupTtl(const THashMap
}
ActiveTtl = true;
- auto ev = std::make_unique<TEvPrivate::TEvWriteIndex>(PrimaryIndex->GetIndexInfo(), indexChanges, false);
+ auto ev = std::make_unique<TEvPrivate::TEvWriteIndex>(PrimaryIndex->GetIndexInfo(), indexChanges, false);
ev->PutStatus = NKikimrProto::OK; // No blobs to write, start TTxWriteIndex in event handler
return ev;
}
-std::unique_ptr<TEvPrivate::TEvWriteIndex> TColumnShard::SetupCleanup() {
- if (ActiveCleanup) {
- LOG_S_DEBUG("Cleanup already in progress at tablet " << TabletID());
- return {};
- }
- if (!PrimaryIndex) {
+std::unique_ptr<TEvPrivate::TEvWriteIndex> TColumnShard::SetupCleanup() {
+ if (ActiveCleanup) {
+ LOG_S_DEBUG("Cleanup already in progress at tablet " << TabletID());
+ return {};
+ }
+ if (!PrimaryIndex) {
LOG_S_NOTICE("Cleanup not started. No index for cleanup at tablet " << TabletID());
- return {};
- }
-
- NOlap::TSnapshot cleanupSnapshot{GetMinReadStep(), 0};
-
+ return {};
+ }
+
+ NOlap::TSnapshot cleanupSnapshot{GetMinReadStep(), 0};
+
auto changes = PrimaryIndex->StartCleanup(cleanupSnapshot, PathsToDrop);
if (!changes) {
LOG_S_NOTICE("Cannot prepare cleanup at tablet " << TabletID());
return {};
}
- Y_VERIFY(!changes->CompactionInfo);
- Y_VERIFY(changes->DataToIndex.empty());
- Y_VERIFY(changes->AppendedPortions.empty());
-
+ Y_VERIFY(!changes->CompactionInfo);
+ Y_VERIFY(changes->DataToIndex.empty());
+ Y_VERIFY(changes->AppendedPortions.empty());
+
// TODO: limit PortionsToDrop total size. Delete them in small portions.
- // Filter PortionsToDrop
- TVector<NOlap::TPortionInfo> portionsCanBedropped;
- THashSet<ui64> excludedPortions;
- for (const auto& portionInfo : changes->PortionsToDrop) {
- ui64 portionId = portionInfo.Records.front().Portion;
- // Exclude portions that are used by in-flght reads/scans
- if (!InFlightReadsTracker.IsPortionUsed(portionId)) {
- portionsCanBedropped.push_back(portionInfo);
- } else {
- excludedPortions.insert(portionId);
- }
- }
- changes->PortionsToDrop.swap(portionsCanBedropped);
-
- LOG_S_DEBUG("Prepare Cleanup snapshot: " << cleanupSnapshot
- << " portions to drop: " << changes->PortionsToDrop.size()
- << " in use by reads: " << excludedPortions.size()
- << " at tablet " << TabletID());
-
- if (changes->PortionsToDrop.empty()) {
- return {};
- }
-
- auto ev = std::make_unique<TEvPrivate::TEvWriteIndex>(PrimaryIndex->GetIndexInfo(), changes , false);
- ev->PutStatus = NKikimrProto::OK; // No new blobs to write
-
- ActiveCleanup = true;
- return ev;
-}
-
+ // Filter PortionsToDrop
+ TVector<NOlap::TPortionInfo> portionsCanBedropped;
+ THashSet<ui64> excludedPortions;
+ for (const auto& portionInfo : changes->PortionsToDrop) {
+ ui64 portionId = portionInfo.Records.front().Portion;
+ // Exclude portions that are used by in-flght reads/scans
+ if (!InFlightReadsTracker.IsPortionUsed(portionId)) {
+ portionsCanBedropped.push_back(portionInfo);
+ } else {
+ excludedPortions.insert(portionId);
+ }
+ }
+ changes->PortionsToDrop.swap(portionsCanBedropped);
+
+ LOG_S_DEBUG("Prepare Cleanup snapshot: " << cleanupSnapshot
+ << " portions to drop: " << changes->PortionsToDrop.size()
+ << " in use by reads: " << excludedPortions.size()
+ << " at tablet " << TabletID());
+
+ if (changes->PortionsToDrop.empty()) {
+ return {};
+ }
+
+ auto ev = std::make_unique<TEvPrivate::TEvWriteIndex>(PrimaryIndex->GetIndexInfo(), changes , false);
+ ev->PutStatus = NKikimrProto::OK; // No new blobs to write
+
+ ActiveCleanup = true;
+ return ev;
+}
+
NOlap::TIndexInfo TColumnShard::ConvertSchema(const NKikimrSchemeOp::TColumnTableSchema& schema) {
Y_VERIFY(schema.GetEngine() == NKikimrSchemeOp::COLUMN_ENGINE_REPLACING_TIMESERIES);
diff --git a/ydb/core/tx/columnshard/columnshard_impl.h b/ydb/core/tx/columnshard/columnshard_impl.h
index 517d385cd86..4c3c73fd633 100644
--- a/ydb/core/tx/columnshard/columnshard_impl.h
+++ b/ydb/core/tx/columnshard/columnshard_impl.h
@@ -4,8 +4,8 @@
#include "columnshard_common.h"
#include "columnshard_ttl.h"
#include "columnshard_txs.h"
-#include "blob_manager.h"
-#include "inflight_request_tracker.h"
+#include "blob_manager.h"
+#include "inflight_request_tracker.h"
#include <ydb/core/tx/tx_processing.h>
#include <ydb/core/tx/time_cast/time_cast.h>
@@ -15,30 +15,30 @@
namespace NKikimr::NColumnShard {
-extern bool gAllowLogBatchingDefaultValue;
-
-struct TSettings {
- TControlWrapper BlobWriteGrouppingEnabled;
- TControlWrapper CacheDataAfterIndexing;
- TControlWrapper CacheDataAfterCompaction;
- TControlWrapper MaxSmallBlobSize;
-
- TSettings()
- : BlobWriteGrouppingEnabled(1, 0, 1)
- , CacheDataAfterIndexing(1, 0, 1)
- , CacheDataAfterCompaction(1, 0, 1)
- , MaxSmallBlobSize(0, 0, 8000000)
- {}
-
- void RegisterControls(TControlBoard& icb) {
- icb.RegisterSharedControl(BlobWriteGrouppingEnabled, "ColumnShardControls.BlobWriteGrouppingEnabled");
- icb.RegisterSharedControl(CacheDataAfterIndexing, "ColumnShardControls.CacheDataAfterIndexing");
- icb.RegisterSharedControl(CacheDataAfterCompaction, "ColumnShardControls.CacheDataAfterCompaction");
- icb.RegisterSharedControl(MaxSmallBlobSize, "ColumnShardControls.MaxSmallBlobSize");
- }
-};
-
-
+extern bool gAllowLogBatchingDefaultValue;
+
+struct TSettings {
+ TControlWrapper BlobWriteGrouppingEnabled;
+ TControlWrapper CacheDataAfterIndexing;
+ TControlWrapper CacheDataAfterCompaction;
+ TControlWrapper MaxSmallBlobSize;
+
+ TSettings()
+ : BlobWriteGrouppingEnabled(1, 0, 1)
+ , CacheDataAfterIndexing(1, 0, 1)
+ , CacheDataAfterCompaction(1, 0, 1)
+ , MaxSmallBlobSize(0, 0, 8000000)
+ {}
+
+ void RegisterControls(TControlBoard& icb) {
+ icb.RegisterSharedControl(BlobWriteGrouppingEnabled, "ColumnShardControls.BlobWriteGrouppingEnabled");
+ icb.RegisterSharedControl(CacheDataAfterIndexing, "ColumnShardControls.CacheDataAfterIndexing");
+ icb.RegisterSharedControl(CacheDataAfterCompaction, "ColumnShardControls.CacheDataAfterCompaction");
+ icb.RegisterSharedControl(MaxSmallBlobSize, "ColumnShardControls.MaxSmallBlobSize");
+ }
+};
+
+
class TColumnShard
: public TActor<TColumnShard>
, public NTabletFlatExecutor::TTabletExecutedFlat
@@ -52,13 +52,13 @@ class TColumnShard
friend class TTxNotifyTxCompletion;
friend class TTxPlanStep;
friend class TTxWrite;
- friend class TTxReadBase;
+ friend class TTxReadBase;
friend class TTxRead;
- friend class TTxScan;
+ friend class TTxScan;
friend class TTxWriteIndex;
- friend class TTxRunGC;
- friend class TTxProcessGCResult;
- friend class TTxReadBlobRanges;
+ friend class TTxRunGC;
+ friend class TTxProcessGCResult;
+ friend class TTxReadBlobRanges;
class TTxProgressTx;
class TTxProposeCancel;
@@ -75,19 +75,19 @@ class TColumnShard
void Handle(TEvTxProcessing::TEvPlanStep::TPtr& ev, const TActorContext& ctx);
void Handle(TEvColumnShard::TEvWrite::TPtr& ev, const TActorContext& ctx);
void Handle(TEvColumnShard::TEvRead::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvColumnShard::TEvScan::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvColumnShard::TEvReadBlobRanges::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvColumnShard::TEvScan::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvColumnShard::TEvReadBlobRanges::TPtr& ev, const TActorContext& ctx);
void Handle(TEvMediatorTimecast::TEvRegisterTabletResult::TPtr& ev, const TActorContext& ctx);
void Handle(TEvMediatorTimecast::TEvNotifyPlanStep::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvPrivate::TEvScanStats::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvPrivate::TEvReadFinished::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvPrivate::TEvScanStats::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvPrivate::TEvReadFinished::TPtr &ev, const TActorContext &ctx);
void Handle(TEvPrivate::TEvPeriodicWakeup::TPtr& ev, const TActorContext& ctx);
void Handle(TEvPrivate::TEvWriteIndex::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvBlobStorage::TEvCollectGarbageResult::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvBlobStorage::TEvCollectGarbageResult::TPtr& ev, const TActorContext& ctx);
ITransaction* CreateTxInitSchema();
- ITransaction* CreateTxRunGc();
+ ITransaction* CreateTxRunGc();
void OnActivateExecutor(const TActorContext& ctx) override;
void OnDetach(const TActorContext& ctx) override;
@@ -168,17 +168,17 @@ protected:
HFunc(TEvColumnShard::TEvProposeTransaction, Handle);
HFunc(TEvColumnShard::TEvCancelTransactionProposal, Handle);
HFunc(TEvColumnShard::TEvNotifyTxCompletion, Handle);
- HFunc(TEvColumnShard::TEvScan, Handle);
+ HFunc(TEvColumnShard::TEvScan, Handle);
HFunc(TEvTxProcessing::TEvPlanStep, Handle);
HFunc(TEvColumnShard::TEvWrite, Handle);
HFunc(TEvColumnShard::TEvRead, Handle);
- HFunc(TEvColumnShard::TEvReadBlobRanges, Handle);
+ HFunc(TEvColumnShard::TEvReadBlobRanges, Handle);
HFunc(TEvMediatorTimecast::TEvRegisterTabletResult, Handle);
HFunc(TEvMediatorTimecast::TEvNotifyPlanStep, Handle);
- HFunc(TEvBlobStorage::TEvCollectGarbageResult, Handle);
+ HFunc(TEvBlobStorage::TEvCollectGarbageResult, Handle);
HFunc(TEvPrivate::TEvWriteIndex, Handle);
- HFunc(TEvPrivate::TEvScanStats, Handle);
- HFunc(TEvPrivate::TEvReadFinished, Handle);
+ HFunc(TEvPrivate::TEvScanStats, Handle);
+ HFunc(TEvPrivate::TEvReadFinished, Handle);
HFunc(TEvPrivate::TEvPeriodicWakeup, Handle);
default:
if (!HandleDefaultEvents(ev, ctx)) {
@@ -237,9 +237,9 @@ private:
struct TCommitMeta {
ui64 MetaShard{};
- THashSet<TWriteId> WriteIds;
+ THashSet<TWriteId> WriteIds;
- void AddWriteId(TWriteId id) {
+ void AddWriteId(TWriteId id) {
WriteIds.insert(id);
}
};
@@ -291,7 +291,7 @@ private:
ui64 CurrentSchemeShardId = 0;
TMessageSeqNo LastSchemaSeqNo;
std::optional<NKikimrSubDomains::TProcessingParams> ProcessingParams;
- TWriteId LastWriteId = TWriteId{0};
+ TWriteId LastWriteId = TWriteId{0};
ui64 LastPlannedStep = 0;
ui64 LastPlannedTxId = 0;
ui64 LastCompactedGranule = 0;
@@ -299,16 +299,16 @@ private:
TIntrusivePtr<TMediatorTimecastEntry> MediatorTimeCastEntry;
bool MediatorTimeCastRegistered = false;
TSet<ui64> MediatorTimeCastWaitingSteps;
- TDuration MaxReadStaleness = TDuration::Minutes(5); // TODO: Make configurable?
- TDuration MaxCommitTxDelay = TDuration::Seconds(30); // TODO: Make configurable?
+ TDuration MaxReadStaleness = TDuration::Minutes(5); // TODO: Make configurable?
+ TDuration MaxCommitTxDelay = TDuration::Seconds(30); // TODO: Make configurable?
TDuration ActivationPeriod = TDuration::Seconds(60);
TDuration FailActivationDelay = TDuration::Seconds(1);
TInstant LastBackActivation;
TActorId IndexingActor; // It's logically bounded to 1: we move each portion of data to multiple indices.
TActorId CompactionActor; // It's memory bounded to 1: we have no memory for parallel compation.
- std::unique_ptr<TTabletCountersBase> TabletCountersPtr;
- TTabletCountersBase* TabletCounters;
+ std::unique_ptr<TTabletCountersBase> TabletCountersPtr;
+ TTabletCountersBase* TabletCounters;
std::unique_ptr<NTabletPipe::IClientCache> PipeClientCache;
std::unique_ptr<NOlap::TInsertTable> InsertTable;
std::unique_ptr<NOlap::IColumnEngine> PrimaryIndex;
@@ -325,18 +325,18 @@ private:
THashMap<ui32, TSchemaPreset> SchemaPresets;
//THashMap<ui32, TTtlSettingsPreset> TtlSettingsPresets;
THashMap<ui64, TTableInfo> Tables;
- THashMap<TWriteId, TLongTxWriteInfo> LongTxWrites;
+ THashMap<TWriteId, TLongTxWriteInfo> LongTxWrites;
THashMap<TULID, TLongTxWriteInfo*> LongTxWritesByUniqueId;
TMultiMap<TRowVersion, TEvColumnShard::TEvRead::TPtr> WaitingReads;
TMultiMap<TRowVersion, TEvColumnShard::TEvScan::TPtr> WaitingScans;
THashSet<ui64> PathsToDrop;
bool ActiveIndexing = false;
bool ActiveCompaction = false;
- bool ActiveCleanup = false;
+ bool ActiveCleanup = false;
bool ActiveTtl = false;
- std::unique_ptr<TBlobManager> BlobManager;
- TInFlightReadsTracker InFlightReadsTracker;
- TSettings Settings;
+ std::unique_ptr<TBlobManager> BlobManager;
+ TInFlightReadsTracker InFlightReadsTracker;
+ TSettings Settings;
TLimits Limits;
TCompactionLimits CompactionLimits;
@@ -347,9 +347,9 @@ private:
void SendWaitPlanStep(ui64 step);
void RescheduleWaitingReads();
TRowVersion GetMaxReadVersion() const;
- ui64 GetMinReadStep() const;
+ ui64 GetMinReadStep() const;
ui64 GetOutdatedStep() const;
- ui64 GetAllowedStep() const;
+ ui64 GetAllowedStep() const;
bool HaveOutdatedTxs() const;
TWriteId GetLongTxWrite(NIceDb::TNiceDb& db, const NLongTxService::TLongTxId& longTxId);
@@ -382,13 +382,13 @@ private:
std::unique_ptr<TEvPrivate::TEvCompaction> SetupCompaction();
std::unique_ptr<TEvPrivate::TEvWriteIndex> SetupTtl(const THashMap<ui64, NOlap::TTtlInfo>& pathTtls = {},
bool force = false);
- std::unique_ptr<TEvPrivate::TEvWriteIndex> SetupCleanup();
+ std::unique_ptr<TEvPrivate::TEvWriteIndex> SetupCleanup();
- void UpdateBlobMangerCounters();
+ void UpdateBlobMangerCounters();
void UpdateInsertTableCounters();
void UpdateIndexCounters();
void UpdateResourceMetrics(const TUsage& usage);
-
+
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::TX_COLUMNSHARD_ACTOR;
diff --git a/ydb/core/tx/columnshard/columnshard_schema.h b/ydb/core/tx/columnshard/columnshard_schema.h
index d7c20b47ee0..9c178796aed 100644
--- a/ydb/core/tx/columnshard/columnshard_schema.h
+++ b/ydb/core/tx/columnshard/columnshard_schema.h
@@ -13,18 +13,18 @@
namespace NKikimr::NColumnShard {
-using NOlap::TWriteId;
-using NOlap::IBlobGroupSelector;
-
+using NOlap::TWriteId;
+using NOlap::IBlobGroupSelector;
+
struct Schema : NIceDb::Schema {
- // These settings are persisted on each Init. So we use empty settings in order not to overwrite what
- // was changed by the user
- struct EmptySettings {
- static void Materialize(NIceDb::TToughDb&) {}
- };
-
- using TSettings = SchemaSettings<EmptySettings>;
-
+ // These settings are persisted on each Init. So we use empty settings in order not to overwrite what
+ // was changed by the user
+ struct EmptySettings {
+ static void Materialize(NIceDb::TToughDb&) {}
+ };
+
+ using TSettings = SchemaSettings<EmptySettings>;
+
using TInsertedData = NOlap::TInsertedData;
using TGranuleRecord = NOlap::TGranuleRecord;
using TColumnRecord = NOlap::TColumnRecord;
@@ -44,9 +44,9 @@ struct Schema : NIceDb::Schema {
LastPlannedTxId = 5,
LastSchemaSeqNoGeneration = 6,
LastSchemaSeqNoRound = 7,
-
- LastGcBarrierGen = 8,
- LastGcBarrierStep = 9,
+
+ LastGcBarrierGen = 8,
+ LastGcBarrierStep = 9,
};
enum class EInsertTableIds : ui8 {
@@ -138,36 +138,36 @@ struct Schema : NIceDb::Schema {
using TColumns = TableColumns<PathId, SinceStep, SinceTxId, InfoProto>;
};
- struct LongTxWrites : Table<6> {
- struct WriteId : Column<1, NScheme::NTypeIds::Uint64> {};
- struct LongTxId : Column<2, NScheme::NTypeIds::String> {};
+ struct LongTxWrites : Table<6> {
+ struct WriteId : Column<1, NScheme::NTypeIds::Uint64> {};
+ struct LongTxId : Column<2, NScheme::NTypeIds::String> {};
+
+ using TKey = TableKey<WriteId>;
+ using TColumns = TableColumns<WriteId, LongTxId>;
+ };
- using TKey = TableKey<WriteId>;
- using TColumns = TableColumns<WriteId, LongTxId>;
- };
-
- struct BlobsToKeep : Table<7> {
+ struct BlobsToKeep : Table<7> {
struct BlobId : Column<1, NScheme::NTypeIds::String> {};
using TKey = TableKey<BlobId>;
using TColumns = TableColumns<BlobId>;
};
- struct BlobsToDelete : Table<8> {
- struct BlobId : Column<1, NScheme::NTypeIds::String> {};
+ struct BlobsToDelete : Table<8> {
+ struct BlobId : Column<1, NScheme::NTypeIds::String> {};
- using TKey = TableKey<BlobId>;
- using TColumns = TableColumns<BlobId>;
+ using TKey = TableKey<BlobId>;
+ using TColumns = TableColumns<BlobId>;
+ };
+
+ struct SmallBlobs : Table<12> {
+ struct BlobId : Column<1, NScheme::NTypeIds::String> {};
+ struct Data : Column<2, NScheme::NTypeIds::String> {};
+
+ using TKey = TableKey<BlobId>;
+ using TColumns = TableColumns<BlobId, Data>;
};
- struct SmallBlobs : Table<12> {
- struct BlobId : Column<1, NScheme::NTypeIds::String> {};
- struct Data : Column<2, NScheme::NTypeIds::String> {};
-
- using TKey = TableKey<BlobId>;
- using TColumns = TableColumns<BlobId, Data>;
- };
-
// Index tables
// InsertTable - common for all indices
@@ -235,13 +235,13 @@ struct Schema : NIceDb::Schema {
TableInfo,
TableVersionInfo,
LongTxWrites,
- BlobsToKeep,
- BlobsToDelete,
+ BlobsToKeep,
+ BlobsToDelete,
InsertTable,
IndexGranules,
IndexColumns,
- IndexCounters,
- SmallBlobs
+ IndexCounters,
+ SmallBlobs
>;
//
@@ -258,12 +258,12 @@ struct Schema : NIceDb::Schema {
template <typename T>
static bool GetSpecialValue(NIceDb::TNiceDb& db, EValueIds key, T& value) {
- using TSource = std::conditional_t<std::is_integral_v<T> || std::is_enum_v<T>, Value::Digit, Value::Bytes>;
+ using TSource = std::conditional_t<std::is_integral_v<T> || std::is_enum_v<T>, Value::Digit, Value::Bytes>;
auto rowset = db.Table<Value>().Key((ui32)key).Select<TSource>();
if (rowset.IsReady()) {
if (rowset.IsValid())
- value = T{rowset.template GetValue<TSource>()};
+ value = T{rowset.template GetValue<TSource>()};
return true;
}
return false;
@@ -407,24 +407,24 @@ struct Schema : NIceDb::Schema {
db.Table<TableInfo>().Key(pathId).Delete();
}
- static void SaveLongTxWrite(NIceDb::TNiceDb& db, TWriteId writeId, const NLongTxService::TLongTxId& longTxId) {
+ static void SaveLongTxWrite(NIceDb::TNiceDb& db, TWriteId writeId, const NLongTxService::TLongTxId& longTxId) {
NKikimrLongTxService::TLongTxId proto;
longTxId.ToProto(&proto);
TString serialized;
Y_VERIFY(proto.SerializeToString(&serialized));
- db.Table<LongTxWrites>().Key((ui64)writeId).Update(
+ db.Table<LongTxWrites>().Key((ui64)writeId).Update(
NIceDb::TUpdate<LongTxWrites::LongTxId>(serialized));
}
- static void EraseLongTxWrite(NIceDb::TNiceDb& db, TWriteId writeId) {
- db.Table<LongTxWrites>().Key((ui64)writeId).Delete();
+ static void EraseLongTxWrite(NIceDb::TNiceDb& db, TWriteId writeId) {
+ db.Table<LongTxWrites>().Key((ui64)writeId).Delete();
}
// InsertTable activities
static void InsertTable_Upsert(NIceDb::TNiceDb& db, EInsertTableIds recType, const TInsertedData& data) {
db.Table<InsertTable>().Key((ui8)recType, data.ShardOrPlan, data.WriteTxId, data.PathId, data.DedupId).Update(
- NIceDb::TUpdate<InsertTable::BlobId>(data.BlobId.ToStringLegacy()),
+ NIceDb::TUpdate<InsertTable::BlobId>(data.BlobId.ToStringLegacy()),
NIceDb::TUpdate<InsertTable::Meta>(data.Metadata)
);
}
@@ -458,7 +458,7 @@ struct Schema : NIceDb::Schema {
}
static bool InsertTable_Load(NIceDb::TNiceDb& db,
- const IBlobGroupSelector* dsGroupSelector,
+ const IBlobGroupSelector* dsGroupSelector,
THashMap<TWriteId, TInsertedData>& inserted,
THashMap<ui64, TSet<TInsertedData>>& committed,
THashMap<TWriteId, TInsertedData>& aborted,
@@ -477,8 +477,8 @@ struct Schema : NIceDb::Schema {
TString metaStr = rowset.GetValue<InsertTable::Meta>();
TString error;
- NOlap::TUnifiedBlobId blobId = NOlap::TUnifiedBlobId::ParseFromString(strBlobId, dsGroupSelector, error);
- Y_VERIFY(blobId.IsValid(), "Failied to parse blob id: %s", error.c_str());
+ NOlap::TUnifiedBlobId blobId = NOlap::TUnifiedBlobId::ParseFromString(strBlobId, dsGroupSelector, error);
+ Y_VERIFY(blobId.IsValid(), "Failied to parse blob id: %s", error.c_str());
TInstant writeTime = loadTime;
NKikimrTxColumnShard::TLogicalMetadata meta;
@@ -486,7 +486,7 @@ struct Schema : NIceDb::Schema {
writeTime = TInstant::Seconds(meta.GetDirtyWriteTimeSeconds());
}
- TInsertedData data(shardOrPlan, writeTxId, tableId, dedupId, blobId, metaStr, writeTime);
+ TInsertedData data(shardOrPlan, writeTxId, tableId, dedupId, blobId, metaStr, writeTime);
switch (recType) {
case EInsertTableIds::Inserted:
@@ -550,8 +550,8 @@ struct Schema : NIceDb::Schema {
NIceDb::TUpdate<IndexColumns::XTxId>(row.XTxId),
NIceDb::TUpdate<IndexColumns::Blob>(row.SerializedBlobId()),
NIceDb::TUpdate<IndexColumns::Metadata>(row.Metadata),
- NIceDb::TUpdate<IndexColumns::Offset>(row.BlobRange.Offset),
- NIceDb::TUpdate<IndexColumns::Size>(row.BlobRange.Size)
+ NIceDb::TUpdate<IndexColumns::Offset>(row.BlobRange.Offset),
+ NIceDb::TUpdate<IndexColumns::Size>(row.BlobRange.Size)
);
}
@@ -559,8 +559,8 @@ struct Schema : NIceDb::Schema {
db.Table<IndexColumns>().Key(index, row.Granule, row.ColumnId, row.PlanStep, row.TxId, row.Portion, row.Chunk).Delete();
}
- static bool IndexColumns_Load(NIceDb::TNiceDb& db, const IBlobGroupSelector* dsGroupSelector, ui32 index,
- std::function<void(TColumnRecord&&)> callback) {
+ static bool IndexColumns_Load(NIceDb::TNiceDb& db, const IBlobGroupSelector* dsGroupSelector, ui32 index,
+ std::function<void(TColumnRecord&&)> callback) {
auto rowset = db.Table<IndexColumns>().Prefix(index).Select();
if (!rowset.IsReady())
return false;
@@ -577,12 +577,12 @@ struct Schema : NIceDb::Schema {
row.XTxId = rowset.GetValue<IndexColumns::XTxId>();
TString strBlobId = rowset.GetValue<IndexColumns::Blob>();
row.Metadata = rowset.GetValue<IndexColumns::Metadata>();
- row.BlobRange.Offset = rowset.GetValue<IndexColumns::Offset>();
- row.BlobRange.Size = rowset.GetValue<IndexColumns::Size>();
+ row.BlobRange.Offset = rowset.GetValue<IndexColumns::Offset>();
+ row.BlobRange.Size = rowset.GetValue<IndexColumns::Size>();
- Y_VERIFY(strBlobId.size() == sizeof(TLogoBlobID), "Size %" PRISZT " doesn't match TLogoBlobID", strBlobId.size());
- TLogoBlobID logoBlobId((const ui64*)strBlobId.data());
- row.BlobRange.BlobId = NOlap::TUnifiedBlobId(dsGroupSelector->GetGroup(logoBlobId), logoBlobId);
+ Y_VERIFY(strBlobId.size() == sizeof(TLogoBlobID), "Size %" PRISZT " doesn't match TLogoBlobID", strBlobId.size());
+ TLogoBlobID logoBlobId((const ui64*)strBlobId.data());
+ row.BlobRange.BlobId = NOlap::TUnifiedBlobId(dsGroupSelector->GetGroup(logoBlobId), logoBlobId);
callback(std::move(row));
diff --git a/ydb/core/tx/columnshard/columnshard_txs.h b/ydb/core/tx/columnshard/columnshard_txs.h
index c065f0c6c1e..b7267dd8ff3 100644
--- a/ydb/core/tx/columnshard/columnshard_txs.h
+++ b/ydb/core/tx/columnshard/columnshard_txs.h
@@ -1,7 +1,7 @@
#pragma once
-#include "blob_manager.h"
-
+#include "blob_manager.h"
+
#include <ydb/core/tx/columnshard/engines/column_engine.h>
#include <ydb/core/tx/columnshard/engines/indexed_read_data.h>
@@ -20,8 +20,8 @@ struct TEvPrivate {
enum EEv {
EvIndexing = EventSpaceBegin(TEvents::ES_PRIVATE),
EvWriteIndex,
- EvScanStats,
- EvReadFinished,
+ EvScanStats,
+ EvReadFinished,
EvPeriodicWakeup,
EvEnd
};
@@ -35,25 +35,25 @@ struct TEvPrivate {
std::shared_ptr<NOlap::TColumnEngineChanges> IndexChanges;
TVector<TString> Blobs;
bool GranuleCompaction{false};
- TBlobBatch BlobBatch;
+ TBlobBatch BlobBatch;
TUsage ResourceUsage;
TVector<ui32> YellowMoveChannels;
TVector<ui32> YellowStopChannels;
- bool CacheData{false};
+ bool CacheData{false};
- TEvWriteIndex(const NOlap::TIndexInfo& indexInfo,
- std::shared_ptr<NOlap::TColumnEngineChanges> indexChanges,
- bool cacheData)
+ TEvWriteIndex(const NOlap::TIndexInfo& indexInfo,
+ std::shared_ptr<NOlap::TColumnEngineChanges> indexChanges,
+ bool cacheData)
: IndexInfo(indexInfo)
, IndexChanges(indexChanges)
- , CacheData(cacheData)
+ , CacheData(cacheData)
{}
};
struct TEvIndexing : public TEventLocal<TEvIndexing, EvIndexing> {
std::unique_ptr<TEvPrivate::TEvWriteIndex> TxEvent;
- explicit TEvIndexing(std::unique_ptr<TEvPrivate::TEvWriteIndex> txEvent)
+ explicit TEvIndexing(std::unique_ptr<TEvPrivate::TEvWriteIndex> txEvent)
: TxEvent(std::move(txEvent))
{}
};
@@ -61,27 +61,27 @@ struct TEvPrivate {
struct TEvCompaction : public TEventLocal<TEvCompaction, EvIndexing> {
std::unique_ptr<TEvPrivate::TEvWriteIndex> TxEvent;
- explicit TEvCompaction(std::unique_ptr<TEvPrivate::TEvWriteIndex> txEvent)
+ explicit TEvCompaction(std::unique_ptr<TEvPrivate::TEvWriteIndex> txEvent)
: TxEvent(std::move(txEvent))
{
TxEvent->GranuleCompaction = true;
}
};
-
- struct TEvScanStats : public TEventLocal<TEvScanStats, EvScanStats> {
- TEvScanStats(ui64 rows, ui64 bytes) : Rows(rows), Bytes(bytes) {}
- ui64 Rows;
- ui64 Bytes;
- };
-
- struct TEvReadFinished : public TEventLocal<TEvReadFinished, EvReadFinished> {
+
+ struct TEvScanStats : public TEventLocal<TEvScanStats, EvScanStats> {
+ TEvScanStats(ui64 rows, ui64 bytes) : Rows(rows), Bytes(bytes) {}
+ ui64 Rows;
+ ui64 Bytes;
+ };
+
+ struct TEvReadFinished : public TEventLocal<TEvReadFinished, EvReadFinished> {
explicit TEvReadFinished(ui64 requestCookie, ui64 txId = 0)
: RequestCookie(requestCookie), TxId(txId)
{}
- ui64 RequestCookie;
+ ui64 RequestCookie;
ui64 TxId;
- };
+ };
struct TEvPeriodicWakeup : public TEventLocal<TEvPeriodicWakeup, EvPeriodicWakeup> {
TEvPeriodicWakeup(bool manual = false)
@@ -191,18 +191,18 @@ private:
};
/// Read portion of data in OLAP transaction
-class TTxReadBase : public TTransactionBase<TColumnShard> {
-protected:
- explicit TTxReadBase(TColumnShard* self)
- : TBase(self)
- {}
-
- NOlap::TReadMetadata::TPtr PrepareReadMetadata(
+class TTxReadBase : public TTransactionBase<TColumnShard> {
+protected:
+ explicit TTxReadBase(TColumnShard* self)
+ : TBase(self)
+ {}
+
+ NOlap::TReadMetadata::TPtr PrepareReadMetadata(
const TActorContext& ctx,
const TReadDescription& readDescription,
const std::unique_ptr<NOlap::TInsertTable>& insertTable,
- const std::unique_ptr<NOlap::IColumnEngine>& index,
- TString& error) const;
+ const std::unique_ptr<NOlap::IColumnEngine>& index,
+ TString& error) const;
protected:
bool ParseProgram(
@@ -215,12 +215,12 @@ protected:
protected:
TString ErrorDescription;
-};
-
-class TTxRead : public TTxReadBase {
+};
+
+class TTxRead : public TTxReadBase {
public:
TTxRead(TColumnShard* self, TEvColumnShard::TEvRead::TPtr& ev)
- : TTxReadBase(self)
+ : TTxReadBase(self)
, Ev(ev)
{}
@@ -231,49 +231,49 @@ public:
private:
TEvColumnShard::TEvRead::TPtr Ev;
std::unique_ptr<TEvColumnShard::TEvReadResult> Result;
- NOlap::TReadMetadata::TConstPtr ReadMetadata;
+ NOlap::TReadMetadata::TConstPtr ReadMetadata;
};
-class TTxScan : public TTxReadBase {
-public:
- using TReadMetadataPtr = NOlap::TReadMetadataBase::TConstPtr;
-
- TTxScan(TColumnShard* self, TEvColumnShard::TEvScan::TPtr& ev)
- : TTxReadBase(self)
- , Ev(ev)
- {}
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override;
- void Complete(const TActorContext& ctx) override;
- TTxType GetTxType() const override { return TXTYPE_START_SCAN; }
-
-private:
- NOlap::TReadMetadataBase::TConstPtr CreateReadMetadata(const TActorContext& ctx, TReadDescription& read,
+class TTxScan : public TTxReadBase {
+public:
+ using TReadMetadataPtr = NOlap::TReadMetadataBase::TConstPtr;
+
+ TTxScan(TColumnShard* self, TEvColumnShard::TEvScan::TPtr& ev)
+ : TTxReadBase(self)
+ , Ev(ev)
+ {}
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override;
+ void Complete(const TActorContext& ctx) override;
+ TTxType GetTxType() const override { return TXTYPE_START_SCAN; }
+
+private:
+ NOlap::TReadMetadataBase::TConstPtr CreateReadMetadata(const TActorContext& ctx, TReadDescription& read,
bool isIndexStats, bool isReverse, ui64 limit);
private:
- TEvColumnShard::TEvScan::TPtr Ev;
+ TEvColumnShard::TEvScan::TPtr Ev;
TVector<TReadMetadataPtr> ReadMetadataRanges;
-};
-
-
-class TTxReadBlobRanges : public TTransactionBase<TColumnShard> {
-public:
- TTxReadBlobRanges(TColumnShard* self, TEvColumnShard::TEvReadBlobRanges::TPtr& ev)
- : TTransactionBase<TColumnShard>(self)
- , Ev(ev)
- {}
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override;
- void Complete(const TActorContext& ctx) override;
- TTxType GetTxType() const override { return TXTYPE_READ_BLOB_RANGES; }
-
-private:
- TEvColumnShard::TEvReadBlobRanges::TPtr Ev;
- std::unique_ptr<TEvColumnShard::TEvReadBlobRangesResult> Result;
-};
-
-
+};
+
+
+class TTxReadBlobRanges : public TTransactionBase<TColumnShard> {
+public:
+ TTxReadBlobRanges(TColumnShard* self, TEvColumnShard::TEvReadBlobRanges::TPtr& ev)
+ : TTransactionBase<TColumnShard>(self)
+ , Ev(ev)
+ {}
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override;
+ void Complete(const TActorContext& ctx) override;
+ TTxType GetTxType() const override { return TXTYPE_READ_BLOB_RANGES; }
+
+private:
+ TEvColumnShard::TEvReadBlobRanges::TPtr Ev;
+ std::unique_ptr<TEvColumnShard::TEvReadBlobRangesResult> Result;
+};
+
+
/// Common transaction for WriteIndex and GranuleCompaction.
/// For WriteIndex it writes new portion from InsertTable into index.
/// For GranuleCompaction it writes new portion of indexed data and mark old data with "switching" snapshot.
diff --git a/ydb/core/tx/columnshard/columnshard_ut_common.cpp b/ydb/core/tx/columnshard/columnshard_ut_common.cpp
index 0fe6bbeb794..fea0855cee2 100644
--- a/ydb/core/tx/columnshard/columnshard_ut_common.cpp
+++ b/ydb/core/tx/columnshard/columnshard_ut_common.cpp
@@ -1,7 +1,7 @@
#include "columnshard_ut_common.h"
-#include "columnshard__stats_scan.h"
-
+#include "columnshard__stats_scan.h"
+
#include <ydb/core/base/tablet.h>
#include <ydb/core/base/tablet_resolver.h>
#include <library/cpp/testing/unittest/registar.h>
@@ -91,14 +91,14 @@ void ScanIndexStats(TTestBasicRuntime& runtime, TActorId& sender, const TVector<
record.SetTxId(snap.PlanStep);
record.SetScanId(scanId);
// record.SetLocalPathId(0);
- record.SetTablePath(NOlap::TIndexInfo::STORE_INDEX_STATS_TABLE);
+ record.SetTablePath(NOlap::TIndexInfo::STORE_INDEX_STATS_TABLE);
// Schema: pathId, kind, rows, bytes, rawBytes. PK: {pathId, kind}
//record.SetSchemaVersion(0);
- auto ydbSchema = PrimaryIndexStatsSchema;
- for (const auto& col : ydbSchema.Columns) {
- record.AddColumnTags(col.second.Id);
- record.AddColumnTypes(col.second.PType);
+ auto ydbSchema = PrimaryIndexStatsSchema;
+ for (const auto& col : ydbSchema.Columns) {
+ record.AddColumnTags(col.second.Id);
+ record.AddColumnTypes(col.second.PType);
}
for (ui64 pathId : pathIds) {
@@ -215,7 +215,7 @@ TString MakeTestBlob(std::pair<ui64, ui64> range, const TVector<std::pair<TStrin
batchBuilder.AddRow(unused, NKikimr::TDbTupleRef(types.data(), cells.data(), types.size()));
}
- auto batch = batchBuilder.FlushBatch(true);
+ auto batch = batchBuilder.FlushBatch(true);
UNIT_ASSERT(batch);
auto status = batch->ValidateFull();
UNIT_ASSERT(status.ok());
diff --git a/ydb/core/tx/columnshard/compaction_actor.cpp b/ydb/core/tx/columnshard/compaction_actor.cpp
index 67b45f9201a..1e61e2b5f10 100644
--- a/ydb/core/tx/columnshard/compaction_actor.cpp
+++ b/ydb/core/tx/columnshard/compaction_actor.cpp
@@ -4,15 +4,15 @@
namespace NKikimr::NColumnShard {
-using NOlap::TBlobRange;
-
+using NOlap::TBlobRange;
+
class TCompactionActor : public TActorBootstrapped<TCompactionActor> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::TX_COLUMNSHARD_COMPACTION_ACTOR;
+ return NKikimrServices::TActivity::TX_COLUMNSHARD_COMPACTION_ACTOR;
}
- TCompactionActor(ui64 tabletId, const TActorId& parent)
+ TCompactionActor(ui64 tabletId, const TActorId& parent)
: TabletId(tabletId)
, Parent(parent)
, BlobCacheActorId(NBlobCache::MakeBlobCacheServiceId())
@@ -35,18 +35,18 @@ public:
for (auto& portionInfo : switchedPortions) {
Y_VERIFY(!portionInfo.Empty());
- std::vector<NBlobCache::TBlobRange> ranges;
+ std::vector<NBlobCache::TBlobRange> ranges;
for (auto& rec : portionInfo.Records) {
auto& blobRange = rec.BlobRange;
Blobs[blobRange] = {};
- // Group only ranges from the same blob into one request
- if (!ranges.empty() && ranges.back().BlobId != blobRange.BlobId) {
- SendReadRequest(ctx, std::move(ranges));
- ranges = {};
- }
- ranges.push_back(blobRange);
+ // Group only ranges from the same blob into one request
+ if (!ranges.empty() && ranges.back().BlobId != blobRange.BlobId) {
+ SendReadRequest(ctx, std::move(ranges));
+ ranges = {};
+ }
+ ranges.push_back(blobRange);
}
- SendReadRequest(ctx, std::move(ranges));
+ SendReadRequest(ctx, std::move(ranges));
}
}
@@ -55,7 +55,7 @@ public:
<< ") at tablet " << TabletId << " (compaction)");
auto& event = *ev->Get();
- const TBlobRange& blobId = event.BlobRange;
+ const TBlobRange& blobId = event.BlobRange;
Y_VERIFY(Blobs.count(blobId));
if (!Blobs[blobId].empty()) {
return;
@@ -65,7 +65,7 @@ public:
Y_VERIFY(event.Data.size());
TString blobData = event.Data;
- Y_VERIFY(blobData.size() == blobId.Size, "%u vs %u", (ui32)blobData.size(), blobId.Size);
+ Y_VERIFY(blobData.size() == blobId.Size, "%u vs %u", (ui32)blobData.size(), blobId.Size);
Blobs[blobId] = blobData;
} else {
LOG_S_ERROR("TEvReadBlobRangeResult cannot get blob " << blobId.ToString() << " status " << event.Status
@@ -102,7 +102,7 @@ private:
TActorId Parent;
TActorId BlobCacheActorId;
std::unique_ptr<TEvPrivate::TEvWriteIndex> TxEvent;
- THashMap<TBlobRange, TString> Blobs;
+ THashMap<TBlobRange, TString> Blobs;
ui32 NumRead{0};
void Clear() {
@@ -110,11 +110,11 @@ private:
NumRead = 0;
}
- void SendReadRequest(const TActorContext&, std::vector<NBlobCache::TBlobRange>&& ranges) {
- if (ranges.empty())
- return;
-
- Send(BlobCacheActorId, new NBlobCache::TEvBlobCache::TEvReadBlobRangeBatch(std::move(ranges), false));
+ void SendReadRequest(const TActorContext&, std::vector<NBlobCache::TBlobRange>&& ranges) {
+ if (ranges.empty())
+ return;
+
+ Send(BlobCacheActorId, new NBlobCache::TEvBlobCache::TEvReadBlobRangeBatch(std::move(ranges), false));
}
void CompactGranules(const TActorContext& ctx) {
@@ -126,13 +126,13 @@ private:
}
LOG_S_DEBUG("Granules compaction started at tablet " << TabletId);
- {
- TCpuGuard guard(TxEvent->ResourceUsage);
+ {
+ TCpuGuard guard(TxEvent->ResourceUsage);
- TxEvent->IndexChanges->SetBlobs(std::move(Blobs));
+ TxEvent->IndexChanges->SetBlobs(std::move(Blobs));
- TxEvent->Blobs = NOlap::TColumnEngineForLogs::CompactBlobs(TxEvent->IndexInfo, TxEvent->IndexChanges);
- }
+ TxEvent->Blobs = NOlap::TColumnEngineForLogs::CompactBlobs(TxEvent->IndexInfo, TxEvent->IndexChanges);
+ }
ui32 blobsSize = TxEvent->Blobs.size();
ctx.Send(Parent, TxEvent.release());
@@ -141,8 +141,8 @@ private:
}
};
-IActor* CreateCompactionActor(ui64 tabletId, const TActorId& parent) {
- return new TCompactionActor(tabletId, parent);
+IActor* CreateCompactionActor(ui64 tabletId, const TActorId& parent) {
+ return new TCompactionActor(tabletId, parent);
}
}
diff --git a/ydb/core/tx/columnshard/defs.h b/ydb/core/tx/columnshard/defs.h
index 85329f5a690..b8193e29925 100644
--- a/ydb/core/tx/columnshard/defs.h
+++ b/ydb/core/tx/columnshard/defs.h
@@ -76,11 +76,11 @@ struct TCompactionLimits {
struct TUsage {
ui64 CPUExecTime{};
ui64 Network{};
-
- void Add(const TUsage& other) {
- CPUExecTime += other.CPUExecTime;
- Network += other.Network;
- }
+
+ void Add(const TUsage& other) {
+ CPUExecTime += other.CPUExecTime;
+ Network += other.Network;
+ }
};
class TCpuGuard {
@@ -98,20 +98,20 @@ private:
THPTimer CpuTimer;
};
-
-// A helper to resolve DS groups where a tablet's blob ids
-class TBlobGroupSelector : public NOlap::IBlobGroupSelector {
-private:
- TIntrusiveConstPtr<TTabletStorageInfo> TabletInfo;
-
-public:
- explicit TBlobGroupSelector(TIntrusiveConstPtr<TTabletStorageInfo> tabletInfo)
- : TabletInfo(tabletInfo)
- {}
-
- ui32 GetGroup(const TLogoBlobID& blobId) const override {
- return TabletInfo->GroupFor(blobId.Channel(), blobId.Generation());
- }
-};
-
+
+// A helper to resolve DS groups where a tablet's blob ids
+class TBlobGroupSelector : public NOlap::IBlobGroupSelector {
+private:
+ TIntrusiveConstPtr<TTabletStorageInfo> TabletInfo;
+
+public:
+ explicit TBlobGroupSelector(TIntrusiveConstPtr<TTabletStorageInfo> tabletInfo)
+ : TabletInfo(tabletInfo)
+ {}
+
+ ui32 GetGroup(const TLogoBlobID& blobId) const override {
+ return TabletInfo->GroupFor(blobId.Channel(), blobId.Generation());
+ }
+};
+
}
diff --git a/ydb/core/tx/columnshard/engines/column_engine.h b/ydb/core/tx/columnshard/engines/column_engine.h
index 2c1c2be3de2..bad39f81cae 100644
--- a/ydb/core/tx/columnshard/engines/column_engine.h
+++ b/ydb/core/tx/columnshard/engines/column_engine.h
@@ -8,7 +8,7 @@
#include "granules_table.h"
#include <ydb/core/tx/columnshard/blob.h>
-
+
namespace NKikimr::NOlap {
struct TPredicate;
@@ -78,7 +78,7 @@ public:
: Type(type)
{}
- void SetBlobs(THashMap<TBlobRange, TString>&& blobs) {
+ void SetBlobs(THashMap<TBlobRange, TString>&& blobs) {
Y_VERIFY(!blobs.empty());
Blobs = std::move(blobs);
}
@@ -93,7 +93,7 @@ public:
TVector<TPortionInfo> AppendedPortions; // New portions after indexing or compaction
TVector<TPortionInfo> PortionsToDrop;
TVector<std::pair<TPortionInfo, ui64>> PortionsToMove; // {portion, new granule}
- THashMap<TBlobRange, TString> Blobs;
+ THashMap<TBlobRange, TString> Blobs;
bool IsInsert() const { return Type == INSERT; }
bool IsCompaction() const { return Type == COMPACTION; }
@@ -146,12 +146,12 @@ public:
}
out << "; ";
}
- if (ui32 dropped = changes.PortionsToDrop.size()) {
- out << "drop " << dropped << " portions";
- for (auto& portionInfo : changes.PortionsToDrop) {
+ if (ui32 dropped = changes.PortionsToDrop.size()) {
+ out << "drop " << dropped << " portions";
+ for (auto& portionInfo : changes.PortionsToDrop) {
out << portionInfo;
- }
- }
+ }
+ }
return out;
}
};
@@ -208,12 +208,12 @@ struct TSelectInfo {
out.Granules = Granules.size();
out.Portions = Portions.size();
- THashSet<TUnifiedBlobId> uniqBlob;
+ THashSet<TUnifiedBlobId> uniqBlob;
for (auto& portionInfo : Portions) {
out.Records += portionInfo.NumRecords();
out.Rows += portionInfo.NumRows();
for (auto& rec : portionInfo.Records) {
- uniqBlob.insert(rec.BlobRange.BlobId);
+ uniqBlob.insert(rec.BlobRange.BlobId);
}
}
out.Blobs += uniqBlob.size();
@@ -293,8 +293,8 @@ public:
virtual void UpdateDefaultSchema(const TSnapshot& snapshot, TIndexInfo&& info) = 0;
//virtual void UpdateTableSchema(ui64 pathId, const TSnapshot& snapshot, TIndexInfo&& info) = 0; // TODO
virtual void UpdateCompactionLimits(const TCompactionLimits& limits) = 0;
- virtual const TMap<ui64, std::shared_ptr<TColumnEngineStats>>& GetStats() const = 0;
- virtual const TColumnEngineStats& GetTotalStats() = 0;
+ virtual const TMap<ui64, std::shared_ptr<TColumnEngineStats>>& GetStats() const = 0;
+ virtual const TColumnEngineStats& GetTotalStats() = 0;
virtual ui64 MemoryUsage() const { return 0; }
};
diff --git a/ydb/core/tx/columnshard/engines/column_engine_logs.cpp b/ydb/core/tx/columnshard/engines/column_engine_logs.cpp
index 0d16fbad700..6fc16a5e3f1 100644
--- a/ydb/core/tx/columnshard/engines/column_engine_logs.cpp
+++ b/ydb/core/tx/columnshard/engines/column_engine_logs.cpp
@@ -49,11 +49,11 @@ ui64 ExtractTimestamp(const std::shared_ptr<TPredicate>& pkPredicate, const std:
// Although source batches are ordered only by PK (sorting key) resulting pathBatches are ordered by extended key.
// They have const snapshot columns that do not break sorting inside batch.
-std::shared_ptr<arrow::RecordBatch> AddSpecials(const TIndexInfo& indexInfo, const TInsertedData& inserted, const TString& data) {
+std::shared_ptr<arrow::RecordBatch> AddSpecials(const TIndexInfo& indexInfo, const TInsertedData& inserted, const TString& data) {
auto schema = indexInfo.ArrowSchema();
- Y_VERIFY(!data.empty(), "Blob data not present");
- auto batch = NArrow::DeserializeBatch(data, schema);
- Y_VERIFY(batch, "Deserialization failed");
+ Y_VERIFY(!data.empty(), "Blob data not present");
+ auto batch = NArrow::DeserializeBatch(data, schema);
+ Y_VERIFY(batch, "Deserialization failed");
i64 numRows = batch->num_rows();
auto columns = indexInfo.MakeSpecialColumns(inserted, numRows);
@@ -331,7 +331,7 @@ TColumnEngineForLogs::TColumnEngineForLogs(TIndexInfo&& info, ui64 tabletId, con
/// * apply PK predicate before REPLACE
IndexInfo.SetAllKeys(IndexInfo.GetPK(), {0});
- ui32 indexId = IndexInfo.GetId();
+ ui32 indexId = IndexInfo.GetId();
GranulesTable = std::make_shared<TGranulesTable>(indexId);
ColumnsTable = std::make_shared<TColumnsTable>(indexId);
CountersTable = std::make_shared<TCountersTable>(indexId);
@@ -349,11 +349,11 @@ ui64 TColumnEngineForLogs::MemoryUsage() const {
Counters.ColumnMetadataBytes;
}
-const TMap<ui64, std::shared_ptr<TColumnEngineStats>>& TColumnEngineForLogs::GetStats() const {
- return PathStats;
-}
+const TMap<ui64, std::shared_ptr<TColumnEngineStats>>& TColumnEngineForLogs::GetStats() const {
+ return PathStats;
+}
-const TColumnEngineStats& TColumnEngineForLogs::GetTotalStats() {
+const TColumnEngineStats& TColumnEngineForLogs::GetTotalStats() {
Counters.Tables = PathGranules.size();
Counters.Granules = Granules.size();
Counters.EmptyGranules = EmptyGranules.size();
@@ -385,10 +385,10 @@ void TColumnEngineForLogs::UpdatePortionStats(TColumnEngineStats& engineStats, c
bool isErase, bool isLoad) const {
ui64 columnRecords = portionInfo.Records.size();
ui64 metadataBytes = 0;
- THashSet<TUnifiedBlobId> blobs;
+ THashSet<TUnifiedBlobId> blobs;
for (auto& rec : portionInfo.Records) {
metadataBytes += rec.Metadata.size();
- blobs.insert(rec.BlobRange.BlobId);
+ blobs.insert(rec.BlobRange.BlobId);
}
ui32 rows = portionInfo.NumRows();
@@ -411,9 +411,9 @@ void TColumnEngineForLogs::UpdatePortionStats(TColumnEngineStats& engineStats, c
case NOlap::TPortionMeta::SPLIT_COMPACTED:
srcStats = &engineStats.SplitCompacted;
break;
- case NOlap::TPortionMeta::INACTIVE:
- srcStats = &engineStats.Inactive;
- break;
+ case NOlap::TPortionMeta::INACTIVE:
+ srcStats = &engineStats.Inactive;
+ break;
}
Y_VERIFY(srcStats);
auto* stats = portionInfo.IsActive() ? srcStats : &engineStats.Inactive;
@@ -581,8 +581,8 @@ std::shared_ptr<TColumnEngineChanges> TColumnEngineForLogs::StartInsert(TVector<
if (PathsGranulesOverloaded.count(pathId)) {
return {};
}
-
- // FIXME: Copying all granules of a huge table might be heavy
+
+ // FIXME: Copying all granules of a huge table might be heavy
changes->PathToGranule[pathId] = PathGranules[pathId];
} else {
++reserveGranules;
@@ -1251,7 +1251,7 @@ std::shared_ptr<TSelectInfo> TColumnEngineForLogs::Select(ui64 pathId, TSnapshot
auto& spg = Granules.find(granule)->second;
Y_VERIFY(spg);
auto& portions = spg->Portions;
- bool granuleHasDataForSnaphsot = false;
+ bool granuleHasDataForSnaphsot = false;
TMap<TSnapshot, TVector<ui64>> orderedPortions = GetOrderedPortions(granule, snapshot);
for (auto& [snap, vec] : orderedPortions) {
@@ -1268,13 +1268,13 @@ std::shared_ptr<TSelectInfo> TColumnEngineForLogs::Select(ui64 pathId, TSnapshot
}
}
out->Portions.emplace_back(std::move(outPortion));
- granuleHasDataForSnaphsot = true;
+ granuleHasDataForSnaphsot = true;
}
}
-
- if (granuleHasDataForSnaphsot) {
- out->Granules.push_back(spg->Record);
- }
+
+ if (granuleHasDataForSnaphsot) {
+ out->Granules.push_back(spg->Record);
+ }
}
return out;
@@ -1361,10 +1361,10 @@ TVector<TString> TColumnEngineForLogs::IndexBlobs(const TIndexInfo& indexInfo,
minSnapshot = insertSnap;
}
- TBlobRange blobRange(inserted.BlobId, 0, inserted.BlobId.BlobSize());
- auto* blobData = changes->Blobs.FindPtr(blobRange);
- Y_VERIFY(blobData, "Data for range %s has not been read", blobRange.ToString().c_str());
- auto batch = AddSpecials(indexInfo, inserted, *blobData);
+ TBlobRange blobRange(inserted.BlobId, 0, inserted.BlobId.BlobSize());
+ auto* blobData = changes->Blobs.FindPtr(blobRange);
+ Y_VERIFY(blobData, "Data for range %s has not been read", blobRange.ToString().c_str());
+ auto batch = AddSpecials(indexInfo, inserted, *blobData);
pathBatches[inserted.PathId].push_back(batch);
Y_VERIFY_DEBUG(NArrow::IsSorted(pathBatches[inserted.PathId].back(), indexInfo.GetReplaceKey()));
}
@@ -1396,7 +1396,7 @@ TVector<TString> TColumnEngineForLogs::IndexBlobs(const TIndexInfo& indexInfo,
static std::shared_ptr<arrow::RecordBatch> CompactInOneGranule(const TIndexInfo& indexInfo, ui64 granule,
const TVector<TPortionInfo>& portions,
- const THashMap<TBlobRange, TString>& blobs) {
+ const THashMap<TBlobRange, TString>& blobs) {
std::vector<std::shared_ptr<arrow::RecordBatch>> batches;
batches.reserve(portions.size());
diff --git a/ydb/core/tx/columnshard/engines/column_engine_logs.h b/ydb/core/tx/columnshard/engines/column_engine_logs.h
index 44be418425f..dca2e6afc12 100644
--- a/ydb/core/tx/columnshard/engines/column_engine_logs.h
+++ b/ydb/core/tx/columnshard/engines/column_engine_logs.h
@@ -146,8 +146,8 @@ public:
const TSnapshot& snapshot) override;
void UpdateDefaultSchema(const TSnapshot& snapshot, TIndexInfo&& info) override;
void UpdateCompactionLimits(const TCompactionLimits& limits) override { Limits = limits; }
- const TMap<ui64, std::shared_ptr<TColumnEngineStats>>& GetStats() const override;
- const TColumnEngineStats& GetTotalStats() override;
+ const TMap<ui64, std::shared_ptr<TColumnEngineStats>>& GetStats() const override;
+ const TColumnEngineStats& GetTotalStats() override;
ui64 MemoryUsage() const override;
std::shared_ptr<TSelectInfo> Select(ui64 pathId, TSnapshot snapshot,
@@ -203,7 +203,7 @@ private:
std::shared_ptr<TCountersTable> CountersTable;
THashMap<ui64, std::shared_ptr<TGranuleMeta>> Granules; // granule -> meta
THashMap<ui64, TMap<ui64, ui64>> PathGranules; // path_id -> {timestamp, granule}
- TMap<ui64, std::shared_ptr<TColumnEngineStats>> PathStats; // per path_id stats sorted by path_id
+ TMap<ui64, std::shared_ptr<TColumnEngineStats>> PathStats; // per path_id stats sorted by path_id
THashSet<ui64> GranulesInSplit;
THashSet<ui64> EmptyGranules;
THashMap<ui64, THashSet<ui64>> PathsGranulesOverloaded;
diff --git a/ydb/core/tx/columnshard/engines/columns_table.h b/ydb/core/tx/columnshard/engines/columns_table.h
index aaf428e2f1d..7b27c38bedc 100644
--- a/ydb/core/tx/columnshard/engines/columns_table.h
+++ b/ydb/core/tx/columnshard/engines/columns_table.h
@@ -1,5 +1,5 @@
#pragma once
-#include "defs.h"
+#include "defs.h"
#include "db_wrapper.h"
namespace NKikimr::NOlap {
@@ -13,7 +13,7 @@ struct TColumnRecord {
ui16 Chunk; // Number of blob for column ColumnName in Portion
ui64 XPlanStep{0}; // {XPlanStep, XTxId} is snapshot where the blob has been removed (i.e. compacted into another one)
ui64 XTxId{0};
- TBlobRange BlobRange;
+ TBlobRange BlobRange;
TString Metadata;
bool operator == (const TColumnRecord& rec) const {
@@ -22,7 +22,7 @@ struct TColumnRecord {
}
TString SerializedBlobId() const {
- return BlobRange.BlobId.SerializeBinary();
+ return BlobRange.BlobId.SerializeBinary();
}
bool Valid() const {
@@ -38,7 +38,7 @@ struct TColumnRecord {
}
bool ValidBlob() const {
- return BlobRange.BlobId.IsValid() && BlobRange.Size;
+ return BlobRange.BlobId.IsValid() && BlobRange.Size;
}
void SetSnapshot(const TSnapshot& snap) {
@@ -77,7 +77,7 @@ struct TColumnRecord {
if (rec.XPlanStep) {
out << '-' << rec.XPlanStep << ':' << (rec.XTxId == Max<ui64>() ? "max" : ToString(rec.XTxId));
}
- out << ',' << rec.BlobRange.ToString();
+ out << ',' << rec.BlobRange.ToString();
out << '}';
return out;
}
diff --git a/ydb/core/tx/columnshard/engines/db_wrapper.cpp b/ydb/core/tx/columnshard/engines/db_wrapper.cpp
index f95007f9559..89352303602 100644
--- a/ydb/core/tx/columnshard/engines/db_wrapper.cpp
+++ b/ydb/core/tx/columnshard/engines/db_wrapper.cpp
@@ -39,7 +39,7 @@ bool TDbWrapper::Load(THashMap<TWriteId, TInsertedData>& inserted,
THashMap<TWriteId, TInsertedData>& aborted,
const TInstant& loadTime) {
NIceDb::TNiceDb db(Database);
- return NColumnShard::Schema::InsertTable_Load(db, DsGroupSelector, inserted, committed, aborted, loadTime);
+ return NColumnShard::Schema::InsertTable_Load(db, DsGroupSelector, inserted, committed, aborted, loadTime);
}
void TDbWrapper::WriteGranule(ui32 index, const TGranuleRecord& row) {
@@ -69,7 +69,7 @@ void TDbWrapper::EraseColumn(ui32 index, const TColumnRecord& row) {
bool TDbWrapper::LoadColumns(ui32 index, std::function<void(TColumnRecord&&)> callback) {
NIceDb::TNiceDb db(Database);
- return NColumnShard::Schema::IndexColumns_Load(db, DsGroupSelector, index, callback);
+ return NColumnShard::Schema::IndexColumns_Load(db, DsGroupSelector, index, callback);
}
void TDbWrapper::WriteCounter(ui32 index, ui32 counterId, ui64 value) {
diff --git a/ydb/core/tx/columnshard/engines/db_wrapper.h b/ydb/core/tx/columnshard/engines/db_wrapper.h
index 715f91b745c..0b117c291b7 100644
--- a/ydb/core/tx/columnshard/engines/db_wrapper.h
+++ b/ydb/core/tx/columnshard/engines/db_wrapper.h
@@ -1,5 +1,5 @@
#pragma once
-#include "defs.h"
+#include "defs.h"
namespace NKikimr::NTable {
class TDatabase;
@@ -13,7 +13,7 @@ struct TGranuleRecord;
class IDbWrapper {
public:
- virtual ~IDbWrapper() = default;
+ virtual ~IDbWrapper() = default;
virtual void Insert(const TInsertedData& data) = 0;
virtual void Commit(const TInsertedData& data) = 0;
@@ -41,9 +41,9 @@ public:
class TDbWrapper : public IDbWrapper {
public:
- TDbWrapper(NTable::TDatabase& db, const IBlobGroupSelector* dsGroupSelector)
+ TDbWrapper(NTable::TDatabase& db, const IBlobGroupSelector* dsGroupSelector)
: Database(db)
- , DsGroupSelector(dsGroupSelector)
+ , DsGroupSelector(dsGroupSelector)
{}
void Insert(const TInsertedData& data) override;
@@ -71,7 +71,7 @@ public:
private:
NTable::TDatabase& Database;
- const IBlobGroupSelector* DsGroupSelector;
+ const IBlobGroupSelector* DsGroupSelector;
};
}
diff --git a/ydb/core/tx/columnshard/engines/defs.h b/ydb/core/tx/columnshard/engines/defs.h
index 52350ffea84..51801405b40 100644
--- a/ydb/core/tx/columnshard/engines/defs.h
+++ b/ydb/core/tx/columnshard/engines/defs.h
@@ -16,10 +16,10 @@ namespace NKikimr::NOlap {
using TLogThis = TCtorLogger<NKikimrServices::TX_COLUMNSHARD>;
-enum class TWriteId : ui64 {};
-
-inline TWriteId operator ++(TWriteId& w) { w = TWriteId{ui64(w) + 1}; return w; }
-
+enum class TWriteId : ui64 {};
+
+inline TWriteId operator ++(TWriteId& w) { w = TWriteId{ui64(w) + 1}; return w; }
+
struct TSnapshot {
ui64 PlanStep{0};
ui64 TxId{0};
@@ -47,10 +47,10 @@ struct TSnapshot {
static TSnapshot Max() {
return TSnapshot{(ui64)-1ll, (ui64)-1ll};
}
-
- friend IOutputStream& operator << (IOutputStream& out, const TSnapshot& s) {
- return out << "{" << s.PlanStep << "," << s.TxId << "}";
- }
+
+ friend IOutputStream& operator << (IOutputStream& out, const TSnapshot& s) {
+ return out << "{" << s.PlanStep << "," << s.TxId << "}";
+ }
};
inline bool snapLess(ui64 planStep1, ui64 txId1, ui64 planStep2, ui64 txId2) {
@@ -61,20 +61,20 @@ inline bool snapLessOrEqual(ui64 planStep1, ui64 txId1, ui64 planStep2, ui64 txI
return TSnapshot{planStep1, txId1} <= TSnapshot{planStep2, txId2};
}
-
-class IBlobGroupSelector {
-protected:
- virtual ~IBlobGroupSelector() = default;
-public:
- virtual ui32 GetGroup(const TLogoBlobID& blobId) const = 0;
-};
-
+
+class IBlobGroupSelector {
+protected:
+ virtual ~IBlobGroupSelector() = default;
+public:
+ virtual ui32 GetGroup(const TLogoBlobID& blobId) const = 0;
+};
+
}
-
-template<>
-struct THash<NKikimr::NOlap::TWriteId> {
- inline size_t operator()(const NKikimr::NOlap::TWriteId x) const noexcept {
- return THash<ui64>()(ui64(x));
- }
-};
-
+
+template<>
+struct THash<NKikimr::NOlap::TWriteId> {
+ inline size_t operator()(const NKikimr::NOlap::TWriteId x) const noexcept {
+ return THash<ui64>()(ui64(x));
+ }
+};
+
diff --git a/ydb/core/tx/columnshard/engines/index_info.cpp b/ydb/core/tx/columnshard/engines/index_info.cpp
index 28764621333..bf7bba36edf 100644
--- a/ydb/core/tx/columnshard/engines/index_info.cpp
+++ b/ydb/core/tx/columnshard/engines/index_info.cpp
@@ -7,9 +7,9 @@
namespace NKikimr::NOlap {
-const TString TIndexInfo::STORE_INDEX_STATS_TABLE = TString("/") + NSysView::SysPathName + "/" + NSysView::StorePrimaryIndexStatsName;
-const TString TIndexInfo::TABLE_INDEX_STATS_TABLE = TString("/") + NSysView::SysPathName + "/" + NSysView::TablePrimaryIndexStatsName;
-
+const TString TIndexInfo::STORE_INDEX_STATS_TABLE = TString("/") + NSysView::SysPathName + "/" + NSysView::StorePrimaryIndexStatsName;
+const TString TIndexInfo::TABLE_INDEX_STATS_TABLE = TString("/") + NSysView::SysPathName + "/" + NSysView::TablePrimaryIndexStatsName;
+
void ScalarToConstant(const arrow::Scalar& scalar, NKikimrSSA::TProgram_TConstant& value) {
switch (scalar.type->id()) {
case arrow::Type::BOOL:
diff --git a/ydb/core/tx/columnshard/engines/index_info.h b/ydb/core/tx/columnshard/engines/index_info.h
index 85505e9ff33..63cca9a6b9c 100644
--- a/ydb/core/tx/columnshard/engines/index_info.h
+++ b/ydb/core/tx/columnshard/engines/index_info.h
@@ -20,47 +20,47 @@ void ScalarToConstant(const arrow::Scalar& scalar, NKikimrSSA::TProgram_TConstan
std::shared_ptr<arrow::Scalar> ConstantToScalar(const NKikimrSSA::TProgram_TConstant& value,
const std::shared_ptr<arrow::DataType>& type);
-template <typename T>
-static std::shared_ptr<arrow::Schema> MakeArrowSchema(const NTable::TScheme::TTableSchema::TColumns& columns, const T& ids) {
- std::vector<std::shared_ptr<arrow::Field>> fields;
- fields.reserve(ids.size());
-
- for (ui32 id: ids) {
- auto it = columns.find(id);
- if (it == columns.end()) {
- return {};
- }
-
- const auto& column = it->second;
- std::string colName(column.Name.data(), column.Name.size());
- fields.emplace_back(std::make_shared<arrow::Field>(colName, NArrow::GetArrowType(column.PType)));
- }
-
- return std::make_shared<arrow::Schema>(fields);
-}
-
-inline
-TVector<std::pair<TString, NScheme::TTypeId>>
-GetColumns(const NTable::TScheme::TTableSchema& tableSchema, const TVector<ui32>& ids) {
- TVector<std::pair<TString, NScheme::TTypeId>> out;
- out.reserve(ids.size());
- for (ui32 id : ids) {
- Y_VERIFY(tableSchema.Columns.count(id));
- auto& column = tableSchema.Columns.find(id)->second;
- out.emplace_back(column.Name, column.PType);
- }
- return out;
-}
-
+template <typename T>
+static std::shared_ptr<arrow::Schema> MakeArrowSchema(const NTable::TScheme::TTableSchema::TColumns& columns, const T& ids) {
+ std::vector<std::shared_ptr<arrow::Field>> fields;
+ fields.reserve(ids.size());
+
+ for (ui32 id: ids) {
+ auto it = columns.find(id);
+ if (it == columns.end()) {
+ return {};
+ }
+
+ const auto& column = it->second;
+ std::string colName(column.Name.data(), column.Name.size());
+ fields.emplace_back(std::make_shared<arrow::Field>(colName, NArrow::GetArrowType(column.PType)));
+ }
+
+ return std::make_shared<arrow::Schema>(fields);
+}
+
+inline
+TVector<std::pair<TString, NScheme::TTypeId>>
+GetColumns(const NTable::TScheme::TTableSchema& tableSchema, const TVector<ui32>& ids) {
+ TVector<std::pair<TString, NScheme::TTypeId>> out;
+ out.reserve(ids.size());
+ for (ui32 id : ids) {
+ Y_VERIFY(tableSchema.Columns.count(id));
+ auto& column = tableSchema.Columns.find(id)->second;
+ out.emplace_back(column.Name, column.PType);
+ }
+ return out;
+}
+
struct TInsertedData;
/// Column engine index description in terms of tablet's local table.
/// We have to use YDB types for keys here.
-struct TIndexInfo : public NTable::TScheme::TTableSchema {
+struct TIndexInfo : public NTable::TScheme::TTableSchema {
static constexpr const char * SPEC_COL_PLAN_STEP = "_yql_plan_step";
static constexpr const char * SPEC_COL_TX_ID = "_yql_tx_id";
- static const TString STORE_INDEX_STATS_TABLE;
- static const TString TABLE_INDEX_STATS_TABLE;
+ static const TString STORE_INDEX_STATS_TABLE;
+ static const TString TABLE_INDEX_STATS_TABLE;
enum class ESpecialColumn : ui32 {
PLAN_STEP = 0xffffff00,
@@ -68,15 +68,15 @@ struct TIndexInfo : public NTable::TScheme::TTableSchema {
};
TIndexInfo(const TString& name, ui32 id)
- : NTable::TScheme::TTableSchema()
- , Id(id)
- , Name(name)
+ : NTable::TScheme::TTableSchema()
+ , Id(id)
+ , Name(name)
{}
- ui32 GetId() const {
- return Id;
- }
-
+ ui32 GetId() const {
+ return Id;
+ }
+
ui32 GetColumnId(const TString& name) const {
if (!ColumnNames.count(name)) {
if (name == SPEC_COL_PLAN_STEP) {
@@ -118,7 +118,7 @@ struct TIndexInfo : public NTable::TScheme::TTableSchema {
}
TVector<std::pair<TString, NScheme::TTypeId>> GetColumns(const TVector<ui32>& ids) const {
- return NOlap::GetColumns(*this, ids);
+ return NOlap::GetColumns(*this, ids);
}
// Traditional Primary Key (includes uniqueness, search and sorting logic)
@@ -195,8 +195,8 @@ struct TIndexInfo : public NTable::TScheme::TTableSchema {
void SetDefaultCompressionLevel(const std::optional<int>& level = {}) { DefaultCompressionLevel = level; }
private:
- ui32 Id;
- TString Name;
+ ui32 Id;
+ TString Name;
mutable std::shared_ptr<arrow::Schema> Schema;
mutable std::shared_ptr<arrow::Schema> SchemaWithSpecials;
std::shared_ptr<arrow::Schema> SortingKey;
diff --git a/ydb/core/tx/columnshard/engines/indexed_read_data.cpp b/ydb/core/tx/columnshard/engines/indexed_read_data.cpp
index 04cabf52ff7..d9c3a4f63eb 100644
--- a/ydb/core/tx/columnshard/engines/indexed_read_data.cpp
+++ b/ydb/core/tx/columnshard/engines/indexed_read_data.cpp
@@ -11,23 +11,23 @@ namespace NKikimr::NOlap {
namespace {
-// Slices a batch into smaller batches and appends them to result vector (which might be non-empty already)
-void SliceBatch(const std::shared_ptr<arrow::RecordBatch>& batch,
- const int64_t maxRowsInBatch,
- std::vector<std::shared_ptr<arrow::RecordBatch>>& result)
-{
- int64_t offset = 0;
- while (offset < batch->num_rows()) {
- int64_t rows = std::min<int64_t>(maxRowsInBatch, batch->num_rows() - offset);
- result.emplace_back(batch->Slice(offset, rows));
- offset += rows;
- }
-};
-
+// Slices a batch into smaller batches and appends them to result vector (which might be non-empty already)
+void SliceBatch(const std::shared_ptr<arrow::RecordBatch>& batch,
+ const int64_t maxRowsInBatch,
+ std::vector<std::shared_ptr<arrow::RecordBatch>>& result)
+{
+ int64_t offset = 0;
+ while (offset < batch->num_rows()) {
+ int64_t rows = std::min<int64_t>(maxRowsInBatch, batch->num_rows() - offset);
+ result.emplace_back(batch->Slice(offset, rows));
+ offset += rows;
+ }
+};
+
std::vector<std::shared_ptr<arrow::RecordBatch>> SpecialMergeSorted(const std::vector<std::shared_ptr<arrow::RecordBatch>>& src,
const TIndexInfo& indexInfo,
- const std::shared_ptr<NArrow::TSortDescription>& description,
- const int64_t maxRowsInBatch) {
+ const std::shared_ptr<NArrow::TSortDescription>& description,
+ const int64_t maxRowsInBatch) {
std::vector<std::shared_ptr<arrow::RecordBatch>> batches;
batches.reserve(src.size());
ui64 size = 0;
@@ -47,9 +47,9 @@ std::vector<std::shared_ptr<arrow::RecordBatch>> SpecialMergeSorted(const std::v
#if 1 // Optimization [remove portion's dups]
if (batches.size() == 1) {
Y_VERIFY_DEBUG(NArrow::IsSortedAndUnique(batches[0], description->ReplaceKey));
- std::vector<std::shared_ptr<arrow::RecordBatch>> out;
- SliceBatch(batches[0], maxRowsInBatch, out);
- return out;
+ std::vector<std::shared_ptr<arrow::RecordBatch>> out;
+ SliceBatch(batches[0], maxRowsInBatch, out);
+ return out;
}
#endif
@@ -99,14 +99,14 @@ std::vector<std::shared_ptr<arrow::RecordBatch>> SpecialMergeSorted(const std::v
// The core of optimization: do not merge slice if it's alone in its key range
if (slices.size() == 1) {
Y_VERIFY_DEBUG(NArrow::IsSortedAndUnique(slices[0], description->ReplaceKey));
- // Split big batch into smaller batches if needed
- SliceBatch(slices[0], maxRowsInBatch, out);
+ // Split big batch into smaller batches if needed
+ SliceBatch(slices[0], maxRowsInBatch, out);
continue;
}
- auto merged = NArrow::MergeSortedBatches(slices, description, maxRowsInBatch);
- Y_VERIFY(merged.size() >= 1);
- out.insert(out.end(), merged.begin(), merged.end());
+ auto merged = NArrow::MergeSortedBatches(slices, description, maxRowsInBatch);
+ Y_VERIFY(merged.size() >= 1);
+ out.insert(out.end(), merged.begin(), merged.end());
}
return out;
@@ -118,28 +118,28 @@ std::vector<std::shared_ptr<arrow::RecordBatch>> SpecialMergeSorted(const std::v
}
-std::unique_ptr<NColumnShard::TScanIteratorBase> TReadMetadata::StartScan() const {
- return std::make_unique<NColumnShard::TColumnShardScanIterator>(this->shared_from_this());
-}
-
-
-TVector<std::pair<TString, NScheme::TTypeId>> TReadStatsMetadata::GetResultYqlSchema() const {
- return NOlap::GetColumns(NColumnShard::PrimaryIndexStatsSchema, ResultColumnIds);
-}
-
-TVector<std::pair<TString, NScheme::TTypeId>> TReadStatsMetadata::GetKeyYqlSchema() const {
- return NOlap::GetColumns(NColumnShard::PrimaryIndexStatsSchema, NColumnShard::PrimaryIndexStatsSchema.KeyColumns);
-}
-
-std::unique_ptr<NColumnShard::TScanIteratorBase> TReadStatsMetadata::StartScan() const {
- return std::make_unique<NColumnShard::TStatsIterator>(this->shared_from_this());
-}
-
-
-THashMap<TBlobRange, ui64> TIndexedReadData::InitRead(ui32 inputBatch, bool inGranulesOrder) {
- Y_VERIFY(ReadMetadata->BlobSchema);
- Y_VERIFY(ReadMetadata->LoadSchema);
- Y_VERIFY(ReadMetadata->ResultSchema);
+std::unique_ptr<NColumnShard::TScanIteratorBase> TReadMetadata::StartScan() const {
+ return std::make_unique<NColumnShard::TColumnShardScanIterator>(this->shared_from_this());
+}
+
+
+TVector<std::pair<TString, NScheme::TTypeId>> TReadStatsMetadata::GetResultYqlSchema() const {
+ return NOlap::GetColumns(NColumnShard::PrimaryIndexStatsSchema, ResultColumnIds);
+}
+
+TVector<std::pair<TString, NScheme::TTypeId>> TReadStatsMetadata::GetKeyYqlSchema() const {
+ return NOlap::GetColumns(NColumnShard::PrimaryIndexStatsSchema, NColumnShard::PrimaryIndexStatsSchema.KeyColumns);
+}
+
+std::unique_ptr<NColumnShard::TScanIteratorBase> TReadStatsMetadata::StartScan() const {
+ return std::make_unique<NColumnShard::TStatsIterator>(this->shared_from_this());
+}
+
+
+THashMap<TBlobRange, ui64> TIndexedReadData::InitRead(ui32 inputBatch, bool inGranulesOrder) {
+ Y_VERIFY(ReadMetadata->BlobSchema);
+ Y_VERIFY(ReadMetadata->LoadSchema);
+ Y_VERIFY(ReadMetadata->ResultSchema);
Y_VERIFY(IndexInfo().GetSortingKey());
SortReplaceDescription = IndexInfo().SortReplaceDescription();
@@ -148,12 +148,12 @@ THashMap<TBlobRange, ui64> TIndexedReadData::InitRead(ui32 inputBatch, bool inGr
FirstIndexedBatch = inputBatch;
ui32 batchNo = inputBatch;
- BatchPortion.resize(inputBatch + ReadMetadata->SelectInfo->Portions.size());
- THashMap<TBlobRange, ui64> out;
+ BatchPortion.resize(inputBatch + ReadMetadata->SelectInfo->Portions.size());
+ THashMap<TBlobRange, ui64> out;
ui64 dataBytes = 0;
- for (auto& portionInfo : ReadMetadata->SelectInfo->Portions) {
- Y_VERIFY_S(portionInfo.Records.size() > 0, "ReadMeatadata: " << *ReadMetadata);
+ for (auto& portionInfo : ReadMetadata->SelectInfo->Portions) {
+ Y_VERIFY_S(portionInfo.Records.size() > 0, "ReadMeatadata: " << *ReadMetadata);
ui64 portion = portionInfo.Records[0].Portion;
ui64 granule = portionInfo.Records[0].Granule;
@@ -172,25 +172,25 @@ THashMap<TBlobRange, ui64> TIndexedReadData::InitRead(ui32 inputBatch, bool inGr
}
for (const NOlap::TColumnRecord& rec : portionInfo.Records) {
- WaitIndexed[batchNo].insert(rec.BlobRange);
- IndexedBlobs[rec.BlobRange] = batchNo;
- out[rec.BlobRange] = rec.Granule;
+ WaitIndexed[batchNo].insert(rec.BlobRange);
+ IndexedBlobs[rec.BlobRange] = batchNo;
+ out[rec.BlobRange] = rec.Granule;
dataBytes += rec.BlobRange.Size;
- Y_VERIFY_S(rec.Valid(), "ReadMeatadata: " << *ReadMetadata);
- Y_VERIFY_S(PortionGranule[rec.Portion] == rec.Granule, "ReadMeatadata: " << *ReadMetadata);
+ Y_VERIFY_S(rec.Valid(), "ReadMeatadata: " << *ReadMetadata);
+ Y_VERIFY_S(PortionGranule[rec.Portion] == rec.Granule, "ReadMeatadata: " << *ReadMetadata);
}
++batchNo;
}
if (inGranulesOrder) {
- for (auto& granuleInfo : ReadMetadata->SelectInfo->Granules) {
+ for (auto& granuleInfo : ReadMetadata->SelectInfo->Granules) {
ui64 granule = granuleInfo.Granule;
- Y_VERIFY_S(GranuleWaits.count(granule), "ReadMeatadata: " << *ReadMetadata);
- if (ReadMetadata->IsAscSorted()) {
+ Y_VERIFY_S(GranuleWaits.count(granule), "ReadMeatadata: " << *ReadMetadata);
+ if (ReadMetadata->IsAscSorted()) {
GranulesOutOrder.push_back(granule);
- } else if (ReadMetadata->IsDescSorted()) {
+ } else if (ReadMetadata->IsDescSorted()) {
GranulesOutOrder.push_front(granule);
}
}
@@ -208,26 +208,26 @@ THashMap<TBlobRange, ui64> TIndexedReadData::InitRead(ui32 inputBatch, bool inGr
TsGranules.emplace(0, 0);
}
- auto& stats = ReadMetadata->ReadStats;
+ auto& stats = ReadMetadata->ReadStats;
stats->IndexGranules = GranuleWaits.size();
stats->IndexPortions = PortionGranule.size();
- stats->IndexBatches = ReadMetadata->NumIndexedBlobs();
- stats->CommittedBatches = ReadMetadata->CommittedBlobs.size();
- stats->UsedColumns = ReadMetadata->LoadSchema->num_fields();
+ stats->IndexBatches = ReadMetadata->NumIndexedBlobs();
+ stats->CommittedBatches = ReadMetadata->CommittedBlobs.size();
+ stats->UsedColumns = ReadMetadata->LoadSchema->num_fields();
stats->DataBytes = dataBytes;
return out;
}
-void TIndexedReadData::AddIndexedColumn(const TBlobRange& blobRange, const TString& column) {
- Y_VERIFY(IndexedBlobs.count(blobRange));
- ui32 batchNo = IndexedBlobs[blobRange];
+void TIndexedReadData::AddIndexedColumn(const TBlobRange& blobRange, const TString& column) {
+ Y_VERIFY(IndexedBlobs.count(blobRange));
+ ui32 batchNo = IndexedBlobs[blobRange];
if (!WaitIndexed.count(batchNo)) {
return;
}
auto& waitingFor = WaitIndexed[batchNo];
- waitingFor.erase(blobRange);
+ waitingFor.erase(blobRange);
- Data[blobRange] = column;
+ Data[blobRange] = column;
if (waitingFor.empty()) {
WaitIndexed.erase(batchNo);
@@ -239,14 +239,14 @@ void TIndexedReadData::AddIndexedColumn(const TBlobRange& blobRange, const TStri
std::shared_ptr<arrow::RecordBatch> TIndexedReadData::AssembleIndexedBatch(ui32 batchNo) {
auto& portionInfo = Portion(batchNo);
- auto portion = portionInfo.Assemble(ReadMetadata->IndexInfo, ReadMetadata->LoadSchema, Data);
+ auto portion = portionInfo.Assemble(ReadMetadata->IndexInfo, ReadMetadata->LoadSchema, Data);
Y_VERIFY(portion);
- auto batch = NOlap::FilterPortion(portion, *ReadMetadata);
+ auto batch = NOlap::FilterPortion(portion, *ReadMetadata);
Y_VERIFY(batch);
for (auto& rec : portionInfo.Records) {
- auto& blobRange = rec.BlobRange;
- Data.erase(blobRange);
+ auto& blobRange = rec.BlobRange;
+ Data.erase(blobRange);
}
return batch;
@@ -266,26 +266,26 @@ void TIndexedReadData::UpdateGranuleWaits(ui32 batchNo) {
std::shared_ptr<arrow::RecordBatch>
TIndexedReadData::MakeNotIndexedBatch(const TString& blob) const {
- auto batch = NArrow::DeserializeBatch(blob, ReadMetadata->BlobSchema);
+ auto batch = NArrow::DeserializeBatch(blob, ReadMetadata->BlobSchema);
/// @note It adds special columns (planStep, txId) with NULL values
- batch = NArrow::ExtractColumns(batch, ReadMetadata->LoadSchema, true);
+ batch = NArrow::ExtractColumns(batch, ReadMetadata->LoadSchema, true);
Y_VERIFY(batch);
{ // Apply predicate
- // TODO: Extract this info function
+ // TODO: Extract this info function
std::vector<bool> less;
- if (ReadMetadata->LessPredicate) {
- auto cmpType = ReadMetadata->LessPredicate->Inclusive ?
+ if (ReadMetadata->LessPredicate) {
+ auto cmpType = ReadMetadata->LessPredicate->Inclusive ?
NArrow::ECompareType::LESS_OR_EQUAL : NArrow::ECompareType::LESS;
- less = NArrow::MakePredicateFilter(batch, ReadMetadata->LessPredicate->Batch, cmpType);
+ less = NArrow::MakePredicateFilter(batch, ReadMetadata->LessPredicate->Batch, cmpType);
}
std::vector<bool> greater;
- if (ReadMetadata->GreaterPredicate) {
- auto cmpType = ReadMetadata->GreaterPredicate->Inclusive ?
+ if (ReadMetadata->GreaterPredicate) {
+ auto cmpType = ReadMetadata->GreaterPredicate->Inclusive ?
NArrow::ECompareType::GREATER_OR_EQUAL : NArrow::ECompareType::GREATER;
- greater = NArrow::MakePredicateFilter(batch, ReadMetadata->GreaterPredicate->Batch, cmpType);
+ greater = NArrow::MakePredicateFilter(batch, ReadMetadata->GreaterPredicate->Batch, cmpType);
}
std::vector<bool> bits = NArrow::CombineFilters(std::move(less), std::move(greater));
@@ -301,7 +301,7 @@ TIndexedReadData::MakeNotIndexedBatch(const TString& blob) const {
return batch;
}
-TVector<TPartialReadResult> TIndexedReadData::GetReadyResults(const int64_t maxRowsInBatch) {
+TVector<TPartialReadResult> TIndexedReadData::GetReadyResults(const int64_t maxRowsInBatch) {
if (NotIndexed.size() != ReadyNotIndexed) {
// Wait till we have all not indexed data so we could replace keys in granules
return {};
@@ -342,9 +342,9 @@ TVector<TPartialReadResult> TIndexedReadData::GetReadyResults(const int64_t maxR
// Extract ready to out granules: ready granules that are not blocked by other (not ready) granules
bool requireResult = !HasIndexRead(); // not indexed or the last indexed read (even if it's emply)
- auto out = MakeResult(ReadyToOut(), maxRowsInBatch);
+ auto out = MakeResult(ReadyToOut(), maxRowsInBatch);
if (requireResult && out.empty()) {
- out.push_back({NArrow::MakeEmptyBatch(ReadMetadata->ResultSchema), nullptr});
+ out.push_back({NArrow::MakeEmptyBatch(ReadMetadata->ResultSchema), nullptr});
}
return out;
}
@@ -378,7 +378,7 @@ TVector<std::vector<std::shared_ptr<arrow::RecordBatch>>> TIndexedReadData::Read
out.reserve(ReadyGranules.size() + 1);
// Prepend not indexed data (less then first granule) before granules for ASC sorting
- if (ReadMetadata->IsAscSorted() && OutNotIndexed.count(0)) {
+ if (ReadMetadata->IsAscSorted() && OutNotIndexed.count(0)) {
out.push_back({});
out.back().push_back(OutNotIndexed[0]);
OutNotIndexed.erase(0);
@@ -407,14 +407,14 @@ TVector<std::vector<std::shared_ptr<arrow::RecordBatch>>> TIndexedReadData::Read
}
if (inGranule.empty()) {
- inGranule.push_back(NArrow::MakeEmptyBatch(ReadMetadata->ResultSchema));
+ inGranule.push_back(NArrow::MakeEmptyBatch(ReadMetadata->ResultSchema));
}
out.push_back(std::move(inGranule));
ReadyGranules.erase(granule);
}
// Append not indexed data (less then first granule) after granules for DESC sorting
- if (ReadMetadata->IsDescSorted() && GranulesOutOrder.empty() && OutNotIndexed.count(0)) {
+ if (ReadMetadata->IsDescSorted() && GranulesOutOrder.empty() && OutNotIndexed.count(0)) {
out.push_back({});
out.back().push_back(OutNotIndexed[0]);
OutNotIndexed.erase(0);
@@ -425,7 +425,7 @@ TVector<std::vector<std::shared_ptr<arrow::RecordBatch>>> TIndexedReadData::Read
THashMap<ui64, std::shared_ptr<arrow::RecordBatch>>
TIndexedReadData::SplitByGranules(const std::vector<std::shared_ptr<arrow::RecordBatch>>& batches) const {
- Y_VERIFY(ReadMetadata->IsSorted());
+ Y_VERIFY(ReadMetadata->IsSorted());
Y_VERIFY(IndexInfo().GetSortingKey());
// Reorder source for "last stream's equal wins" in MergingSorted stream.
@@ -452,21 +452,21 @@ TIndexedReadData::SplitByGranules(const std::vector<std::shared_ptr<arrow::Recor
return SliceIntoGranules(merged, TsGranules, indexInfo);
}
-TVector<TPartialReadResult>
-TIndexedReadData::MakeResult(TVector<std::vector<std::shared_ptr<arrow::RecordBatch>>>&& granules, const int64_t maxRowsInBatch) const {
- /// @warning The replace logic is correct only in assumption that predicate is applyed over a part of ReplaceKey.
+TVector<TPartialReadResult>
+TIndexedReadData::MakeResult(TVector<std::vector<std::shared_ptr<arrow::RecordBatch>>>&& granules, const int64_t maxRowsInBatch) const {
+ /// @warning The replace logic is correct only in assumption that predicate is applyed over a part of ReplaceKey.
/// It's not OK to apply predicate before replacing key duplicates otherwise.
/// Assumption: dup(A, B) <=> PK(A) = PK(B) => Predicate(A) = Predicate(B) => all or no dups for PK(A) here
- Y_VERIFY(ReadMetadata->IsSorted());
+ Y_VERIFY(ReadMetadata->IsSorted());
Y_VERIFY(SortReplaceDescription);
- TVector<TPartialReadResult> out;
+ TVector<TPartialReadResult> out;
- bool isDesc = ReadMetadata->IsDescSorted();
+ bool isDesc = ReadMetadata->IsDescSorted();
for (auto& vec : granules) {
- auto batches = SpecialMergeSorted(vec, IndexInfo(), SortReplaceDescription, maxRowsInBatch);
+ auto batches = SpecialMergeSorted(vec, IndexInfo(), SortReplaceDescription, maxRowsInBatch);
if (batches.empty()) {
continue;
}
@@ -486,25 +486,25 @@ TIndexedReadData::MakeResult(TVector<std::vector<std::shared_ptr<arrow::RecordBa
for (auto& batch : batches) {
Y_VERIFY_DEBUG(NArrow::IsSortedAndUnique(batch, IndexInfo().GetReplaceKey(), isDesc));
-
- if (batch->num_rows() == 0) {
- Y_VERIFY_DEBUG(false, "Unexpected empty batch");
- continue;
- }
-
- // Extract the last row's PK
- auto keyBatch = NArrow::ExtractColumns(batch, IndexInfo().GetReplaceKey());
- auto lastKey = keyBatch->Slice(keyBatch->num_rows()-1, 1);
-
- // Leave only requested columns
- auto resultBatch = NArrow::ExtractColumns(batch, ReadMetadata->ResultSchema);
- out.emplace_back(TPartialReadResult{std::move(resultBatch), std::move(lastKey)});
+
+ if (batch->num_rows() == 0) {
+ Y_VERIFY_DEBUG(false, "Unexpected empty batch");
+ continue;
+ }
+
+ // Extract the last row's PK
+ auto keyBatch = NArrow::ExtractColumns(batch, IndexInfo().GetReplaceKey());
+ auto lastKey = keyBatch->Slice(keyBatch->num_rows()-1, 1);
+
+ // Leave only requested columns
+ auto resultBatch = NArrow::ExtractColumns(batch, ReadMetadata->ResultSchema);
+ out.emplace_back(TPartialReadResult{std::move(resultBatch), std::move(lastKey)});
}
}
- if (ReadMetadata->HasProgram()) {
+ if (ReadMetadata->HasProgram()) {
for (auto& batch : out) {
- ApplyProgram(batch.ResultBatch, ReadMetadata->Program);
+ ApplyProgram(batch.ResultBatch, ReadMetadata->Program);
}
}
return out;
diff --git a/ydb/core/tx/columnshard/engines/indexed_read_data.h b/ydb/core/tx/columnshard/engines/indexed_read_data.h
index a43a97fbbf2..938c141fbc0 100644
--- a/ydb/core/tx/columnshard/engines/indexed_read_data.h
+++ b/ydb/core/tx/columnshard/engines/indexed_read_data.h
@@ -4,11 +4,11 @@
#include "predicate.h"
namespace NKikimr::NColumnShard {
-class TScanIteratorBase;
+class TScanIteratorBase;
}
-
+
namespace NKikimr::NOlap {
-
+
struct TReadStats {
TInstant BeginTimestamp;
ui32 SelectedIndex{0};
@@ -30,18 +30,18 @@ struct TReadStats {
};
// Holds all metedata that is needed to perform read/scan
-struct TReadMetadataBase {
- using TPtr = std::shared_ptr<TReadMetadataBase>;
- using TConstPtr = std::shared_ptr<const TReadMetadataBase>;
-
+struct TReadMetadataBase {
+ using TPtr = std::shared_ptr<TReadMetadataBase>;
+ using TConstPtr = std::shared_ptr<const TReadMetadataBase>;
+
enum class ESorting {
NONE = 0,
ASC,
DESC,
};
- virtual ~TReadMetadataBase() = default;
-
+ virtual ~TReadMetadataBase() = default;
+
std::shared_ptr<NOlap::TPredicate> LessPredicate;
std::shared_ptr<NOlap::TPredicate> GreaterPredicate;
std::shared_ptr<arrow::Schema> BlobSchema;
@@ -51,42 +51,42 @@ struct TReadMetadataBase {
ESorting Sorting{ESorting::ASC}; // Sorting inside returned batches
ui64 Limit{0}; // TODO
- bool IsAscSorted() const { return Sorting == ESorting::ASC; }
- bool IsDescSorted() const { return Sorting == ESorting::DESC; }
- bool IsSorted() const { return IsAscSorted() || IsDescSorted(); }
- void SetDescSorting() { Sorting = ESorting::DESC; }
-
- virtual TVector<std::pair<TString, NScheme::TTypeId>> GetResultYqlSchema() const = 0;
- virtual TVector<std::pair<TString, NScheme::TTypeId>> GetKeyYqlSchema() const = 0;
- virtual std::unique_ptr<NColumnShard::TScanIteratorBase> StartScan() const = 0;
- virtual void Dump(IOutputStream& out) const { Y_UNUSED(out); };
-
- bool HasProgram() const {
- return !Program.empty();
- }
-
- // TODO: can this only be done for base class?
- friend IOutputStream& operator << (IOutputStream& out, const TReadMetadataBase& meta) {
- meta.Dump(out);
- return out;
- }
-};
-
-// Holds all metadata that is needed to perform read/scan
-struct TReadMetadata : public TReadMetadataBase, public std::enable_shared_from_this<TReadMetadata> {
- using TPtr = std::shared_ptr<TReadMetadata>;
- using TConstPtr = std::shared_ptr<const TReadMetadata>;
-
- TIndexInfo IndexInfo;
- ui64 PlanStep = 0;
- ui64 TxId = 0;
- std::shared_ptr<TSelectInfo> SelectInfo;
- TVector<TUnifiedBlobId> CommittedBlobs;
- std::shared_ptr<TReadStats> ReadStats;
-
+ bool IsAscSorted() const { return Sorting == ESorting::ASC; }
+ bool IsDescSorted() const { return Sorting == ESorting::DESC; }
+ bool IsSorted() const { return IsAscSorted() || IsDescSorted(); }
+ void SetDescSorting() { Sorting = ESorting::DESC; }
+
+ virtual TVector<std::pair<TString, NScheme::TTypeId>> GetResultYqlSchema() const = 0;
+ virtual TVector<std::pair<TString, NScheme::TTypeId>> GetKeyYqlSchema() const = 0;
+ virtual std::unique_ptr<NColumnShard::TScanIteratorBase> StartScan() const = 0;
+ virtual void Dump(IOutputStream& out) const { Y_UNUSED(out); };
+
+ bool HasProgram() const {
+ return !Program.empty();
+ }
+
+ // TODO: can this only be done for base class?
+ friend IOutputStream& operator << (IOutputStream& out, const TReadMetadataBase& meta) {
+ meta.Dump(out);
+ return out;
+ }
+};
+
+// Holds all metadata that is needed to perform read/scan
+struct TReadMetadata : public TReadMetadataBase, public std::enable_shared_from_this<TReadMetadata> {
+ using TPtr = std::shared_ptr<TReadMetadata>;
+ using TConstPtr = std::shared_ptr<const TReadMetadata>;
+
+ TIndexInfo IndexInfo;
+ ui64 PlanStep = 0;
+ ui64 TxId = 0;
+ std::shared_ptr<TSelectInfo> SelectInfo;
+ TVector<TUnifiedBlobId> CommittedBlobs;
+ std::shared_ptr<TReadStats> ReadStats;
+
TReadMetadata(const TIndexInfo& info)
: IndexInfo(info)
- , ReadStats(std::make_shared<TReadStats>(info.GetId()))
+ , ReadStats(std::make_shared<TReadStats>(info.GetId()))
{}
bool Empty() const {
@@ -102,7 +102,7 @@ struct TReadMetadata : public TReadMetadataBase, public std::enable_shared_from_
return IndexInfo.GetReplaceKey();
}
- TVector<std::pair<TString, NScheme::TTypeId>> GetResultYqlSchema() const override {
+ TVector<std::pair<TString, NScheme::TTypeId>> GetResultYqlSchema() const override {
TVector<NTable::TTag> columnIds;
columnIds.reserve(ResultSchema->num_fields());
for (const auto& field: ResultSchema->fields()) {
@@ -112,9 +112,9 @@ struct TReadMetadata : public TReadMetadataBase, public std::enable_shared_from_
return IndexInfo.GetColumns(columnIds);
}
- TVector<std::pair<TString, NScheme::TTypeId>> GetKeyYqlSchema() const override {
- return IndexInfo.GetPK();
- }
+ TVector<std::pair<TString, NScheme::TTypeId>> GetKeyYqlSchema() const override {
+ return IndexInfo.GetPK();
+ }
size_t NumIndexedRecords() const {
Y_VERIFY(SelectInfo);
@@ -126,75 +126,75 @@ struct TReadMetadata : public TReadMetadataBase, public std::enable_shared_from_
return SelectInfo->Stats().Blobs;
}
- std::unique_ptr<NColumnShard::TScanIteratorBase> StartScan() const override;
-
- void Dump(IOutputStream& out) const override {
- out << "columns: " << (LoadSchema ? LoadSchema->num_fields() : 0)
- << " index records: " << NumIndexedRecords()
- << " index blobs: " << NumIndexedBlobs()
- << " committed blobs: " << CommittedBlobs.size()
- << " with program steps: " << Program.size()
- << (Sorting == ESorting::NONE ? " not" : (Sorting == ESorting::ASC ? " asc" : " desc"))
- << " sorted, at snapshot: " << PlanStep << ":" << TxId;
- if (GreaterPredicate) {
- out << " from{" << *GreaterPredicate << "}";
+ std::unique_ptr<NColumnShard::TScanIteratorBase> StartScan() const override;
+
+ void Dump(IOutputStream& out) const override {
+ out << "columns: " << (LoadSchema ? LoadSchema->num_fields() : 0)
+ << " index records: " << NumIndexedRecords()
+ << " index blobs: " << NumIndexedBlobs()
+ << " committed blobs: " << CommittedBlobs.size()
+ << " with program steps: " << Program.size()
+ << (Sorting == ESorting::NONE ? " not" : (Sorting == ESorting::ASC ? " asc" : " desc"))
+ << " sorted, at snapshot: " << PlanStep << ":" << TxId;
+ if (GreaterPredicate) {
+ out << " from{" << *GreaterPredicate << "}";
}
- if (LessPredicate) {
- out << " to{" << *LessPredicate << "}";
+ if (LessPredicate) {
+ out << " to{" << *LessPredicate << "}";
}
- if (SelectInfo) {
- out << ", " << *SelectInfo;
+ if (SelectInfo) {
+ out << ", " << *SelectInfo;
}
- }
-
- friend IOutputStream& operator << (IOutputStream& out, const TReadMetadata& meta) {
- meta.Dump(out);
+ }
+
+ friend IOutputStream& operator << (IOutputStream& out, const TReadMetadata& meta) {
+ meta.Dump(out);
return out;
}
};
-struct TReadStatsMetadata : public TReadMetadataBase, public std::enable_shared_from_this<TReadStatsMetadata> {
- using TPtr = std::shared_ptr<TReadStatsMetadata>;
- using TConstPtr = std::shared_ptr<const TReadStatsMetadata>;
-
- const ui64 TabletId;
- TVector<ui32> ReadColumnIds;
- TVector<ui32> ResultColumnIds;
- THashMap<ui64, std::shared_ptr<NOlap::TColumnEngineStats>> IndexStats;
-
- explicit TReadStatsMetadata(ui64 tabletId)
- : TabletId(tabletId)
- {}
-
- TVector<std::pair<TString, NScheme::TTypeId>> GetResultYqlSchema() const override;
-
- TVector<std::pair<TString, NScheme::TTypeId>> GetKeyYqlSchema() const override;
-
- std::unique_ptr<NColumnShard::TScanIteratorBase> StartScan() const override;
-};
-
-// Represents a batch of rows produced by ASC or DESC scan with applied filters and partial aggregation
-struct TPartialReadResult {
- std::shared_ptr<arrow::RecordBatch> ResultBatch;
-
- // This 1-row batch contains the last key that was read while producing the ResultBatch.
- // NOTE: it might be different from the Key of last row in ResulBatch in case of filtering/aggregation/limit
- std::shared_ptr<arrow::RecordBatch> LastReadKey;
-};
-
+struct TReadStatsMetadata : public TReadMetadataBase, public std::enable_shared_from_this<TReadStatsMetadata> {
+ using TPtr = std::shared_ptr<TReadStatsMetadata>;
+ using TConstPtr = std::shared_ptr<const TReadStatsMetadata>;
+
+ const ui64 TabletId;
+ TVector<ui32> ReadColumnIds;
+ TVector<ui32> ResultColumnIds;
+ THashMap<ui64, std::shared_ptr<NOlap::TColumnEngineStats>> IndexStats;
+
+ explicit TReadStatsMetadata(ui64 tabletId)
+ : TabletId(tabletId)
+ {}
+
+ TVector<std::pair<TString, NScheme::TTypeId>> GetResultYqlSchema() const override;
+
+ TVector<std::pair<TString, NScheme::TTypeId>> GetKeyYqlSchema() const override;
+
+ std::unique_ptr<NColumnShard::TScanIteratorBase> StartScan() const override;
+};
+
+// Represents a batch of rows produced by ASC or DESC scan with applied filters and partial aggregation
+struct TPartialReadResult {
+ std::shared_ptr<arrow::RecordBatch> ResultBatch;
+
+ // This 1-row batch contains the last key that was read while producing the ResultBatch.
+ // NOTE: it might be different from the Key of last row in ResulBatch in case of filtering/aggregation/limit
+ std::shared_ptr<arrow::RecordBatch> LastReadKey;
+};
+
class TIndexedReadData {
public:
- TIndexedReadData(NOlap::TReadMetadata::TConstPtr readMetadata)
+ TIndexedReadData(NOlap::TReadMetadata::TConstPtr readMetadata)
: ReadMetadata(readMetadata)
{
- Y_VERIFY(ReadMetadata->SelectInfo);
+ Y_VERIFY(ReadMetadata->SelectInfo);
}
/// @returns blobId -> granule map. Granules could be read independently
- THashMap<TBlobRange, ui64> InitRead(ui32 numNotIndexed, bool inGranulesOrder = false);
+ THashMap<TBlobRange, ui64> InitRead(ui32 numNotIndexed, bool inGranulesOrder = false);
- /// @returns batches and corresponding last keys in correct order (i.e. sorted by by PK)
- TVector<TPartialReadResult> GetReadyResults(const int64_t maxRowsInBatch);
+ /// @returns batches and corresponding last keys in correct order (i.e. sorted by by PK)
+ TVector<TPartialReadResult> GetReadyResults(const int64_t maxRowsInBatch);
void AddNotIndexed(ui32 batchNo, TString serializedBach) {
Y_VERIFY(batchNo < NotIndexed.size());
@@ -204,18 +204,18 @@ public:
NotIndexed[batchNo] = MakeNotIndexedBatch(serializedBach);
}
- void AddIndexedColumn(const TBlobRange& blobRange, const TString& column);
+ void AddIndexedColumn(const TBlobRange& blobRange, const TString& column);
size_t NumPortions() const { return PortionBatch.size(); }
bool HasIndexRead() const { return WaitIndexed.size() || Indexed.size(); }
private:
- NOlap::TReadMetadata::TConstPtr ReadMetadata;
+ NOlap::TReadMetadata::TConstPtr ReadMetadata;
ui32 FirstIndexedBatch{0};
- THashMap<TBlobRange, TString> Data;
+ THashMap<TBlobRange, TString> Data;
std::vector<std::shared_ptr<arrow::RecordBatch>> NotIndexed;
THashMap<ui32, std::shared_ptr<arrow::RecordBatch>> Indexed;
- THashMap<ui32, THashSet<TBlobRange>> WaitIndexed;
- THashMap<TBlobRange, ui32> IndexedBlobs; // blobId -> batchNo
+ THashMap<ui32, THashSet<TBlobRange>> WaitIndexed;
+ THashMap<TBlobRange, ui32> IndexedBlobs; // blobId -> batchNo
ui32 ReadyNotIndexed{0};
THashMap<ui64, std::shared_ptr<arrow::RecordBatch>> OutNotIndexed; // granule -> not indexed to append
THashMap<ui64, TMap<ui64, std::shared_ptr<arrow::RecordBatch>>> ReadyGranules; // granule -> portions
@@ -229,12 +229,12 @@ private:
std::shared_ptr<NArrow::TSortDescription> SortReplaceDescription;
const TIndexInfo& IndexInfo() const {
- return ReadMetadata->IndexInfo;
+ return ReadMetadata->IndexInfo;
}
const TPortionInfo& Portion(ui32 batchNo) const {
Y_VERIFY(batchNo >= FirstIndexedBatch);
- return ReadMetadata->SelectInfo->Portions[batchNo - FirstIndexedBatch];
+ return ReadMetadata->SelectInfo->Portions[batchNo - FirstIndexedBatch];
}
ui64 BatchGranule(ui32 batchNo) const {
@@ -250,9 +250,9 @@ private:
THashMap<ui64, std::shared_ptr<arrow::RecordBatch>> SplitByGranules(
const std::vector<std::shared_ptr<arrow::RecordBatch>>& batches) const;
TVector<std::vector<std::shared_ptr<arrow::RecordBatch>>> ReadyToOut();
- TVector<TPartialReadResult> MakeResult(
- TVector<std::vector<std::shared_ptr<arrow::RecordBatch>>>&& granules,
- const int64_t maxRowsInBatch) const;
+ TVector<TPartialReadResult> MakeResult(
+ TVector<std::vector<std::shared_ptr<arrow::RecordBatch>>>&& granules,
+ const int64_t maxRowsInBatch) const;
};
}
diff --git a/ydb/core/tx/columnshard/engines/insert_table.cpp b/ydb/core/tx/columnshard/engines/insert_table.cpp
index 3fdcdbc45df..f0edbdf05e4 100644
--- a/ydb/core/tx/columnshard/engines/insert_table.cpp
+++ b/ydb/core/tx/columnshard/engines/insert_table.cpp
@@ -20,12 +20,12 @@ bool TInsertTable::Insert(IDbWrapper& dbTable, const TInsertedData& data) {
TInsertTable::TCounters TInsertTable::Commit(IDbWrapper& dbTable, ui64 planStep, ui64 txId, ui64 metaShard,
const THashSet<TWriteId>& writeIds) {
Y_VERIFY(!writeIds.empty());
- Y_UNUSED(metaShard);
+ Y_UNUSED(metaShard);
TCounters counters;
- for (auto writeId : writeIds) {
- auto* data = Inserted.FindPtr(writeId);
- Y_VERIFY(data, "Commit %" PRIu64 ":%" PRIu64 " : writeId %" PRIu64 " not found", planStep, txId, (ui64)writeId);
+ for (auto writeId : writeIds) {
+ auto* data = Inserted.FindPtr(writeId);
+ Y_VERIFY(data, "Commit %" PRIu64 ":%" PRIu64 " : writeId %" PRIu64 " not found", planStep, txId, (ui64)writeId);
NKikimrTxColumnShard::TLogicalMetadata meta;
if (meta.ParseFromString(data->Metadata)) {
@@ -40,7 +40,7 @@ TInsertTable::TCounters TInsertTable::Commit(IDbWrapper& dbTable, ui64 planStep,
dbTable.Commit(*data);
CommittedByPathId[data->PathId].emplace(std::move(*data));
- Inserted.erase(writeId);
+ Inserted.erase(writeId);
}
return counters;
@@ -134,7 +134,7 @@ void TInsertTable::EraseCommitted(IDbWrapper& dbTable, const TInsertedData& data
}
dbTable.EraseCommitted(data);
- CommittedByPathId[data.PathId].erase(data);
+ CommittedByPathId[data.PathId].erase(data);
}
void TInsertTable::EraseAborted(IDbWrapper& dbTable, const TInsertedData& data) {
@@ -156,16 +156,16 @@ bool TInsertTable::Load(IDbWrapper& dbTable, const TInstant& loadTime) {
}
/// @note It must be stable
-TVector<TUnifiedBlobId> TInsertTable::Read(ui64 pathId, ui64 plan, ui64 txId) const {
- const auto* committed = CommittedByPathId.FindPtr(pathId);
-
- if (!committed)
- return {};
-
- TVector<TUnifiedBlobId> ret;
- ret.reserve(committed->size() / 2);
-
- for (auto& data : *committed) {
+TVector<TUnifiedBlobId> TInsertTable::Read(ui64 pathId, ui64 plan, ui64 txId) const {
+ const auto* committed = CommittedByPathId.FindPtr(pathId);
+
+ if (!committed)
+ return {};
+
+ TVector<TUnifiedBlobId> ret;
+ ret.reserve(committed->size() / 2);
+
+ for (auto& data : *committed) {
if (snapLessOrEqual(data.ShardOrPlan, data.WriteTxId, plan, txId)) {
ret.push_back(data.BlobId);
}
diff --git a/ydb/core/tx/columnshard/engines/insert_table.h b/ydb/core/tx/columnshard/engines/insert_table.h
index 231ef2cddc0..72955a170aa 100644
--- a/ydb/core/tx/columnshard/engines/insert_table.h
+++ b/ydb/core/tx/columnshard/engines/insert_table.h
@@ -2,23 +2,23 @@
#include <ydb/core/tx/columnshard/blob.h>
#include <util/generic/set.h>
-#include "defs.h"
-
+#include "defs.h"
+
namespace NKikimr::NOlap {
struct TInsertedData {
- ui64 ShardOrPlan = 0;
- ui64 WriteTxId = 0;
- ui64 PathId = 0;
+ ui64 ShardOrPlan = 0;
+ ui64 WriteTxId = 0;
+ ui64 PathId = 0;
TString DedupId;
- TUnifiedBlobId BlobId;
+ TUnifiedBlobId BlobId;
TString Metadata;
TInstant DirtyTime;
- TInsertedData() = default;
-
- TInsertedData(ui64 shardOrPlan, ui64 writeTxId, ui64 pathId, TString dedupId, const TUnifiedBlobId& blobId,
- const TString& meta, const TInstant& writeTime)
+ TInsertedData() = default;
+
+ TInsertedData(ui64 shardOrPlan, ui64 writeTxId, ui64 pathId, TString dedupId, const TUnifiedBlobId& blobId,
+ const TString& meta, const TInstant& writeTime)
: ShardOrPlan(shardOrPlan)
, WriteTxId(writeTxId)
, PathId(pathId)
@@ -109,19 +109,19 @@ public:
void EraseInserted(IDbWrapper& dbTable, const TInsertedData& key);
void EraseCommitted(IDbWrapper& dbTable, const TInsertedData& key);
void EraseAborted(IDbWrapper& dbTable, const TInsertedData& key);
- TVector<TUnifiedBlobId> Read(ui64 pathId, ui64 plan, ui64 txId) const;
+ TVector<TUnifiedBlobId> Read(ui64 pathId, ui64 plan, ui64 txId) const;
bool Load(IDbWrapper& dbTable, const TInstant& loadTime);
void GetCounters(TCounters& prepared, TCounters& committed) const;
size_t InsertedSize() const { return Inserted.size(); }
- const THashMap<ui64, TSet<TInsertedData>>& GetCommitted() const { return CommittedByPathId; }
+ const THashMap<ui64, TSet<TInsertedData>>& GetCommitted() const { return CommittedByPathId; }
const THashMap<TWriteId, TInsertedData>& GetAborted() const { return Aborted; }
void SetOverloaded(ui64 pathId, bool overload);
bool IsOverloaded(ui64 pathId) const { return PathsOverloaded.count(pathId); }
private:
- THashMap<TWriteId, TInsertedData> Inserted;
- THashMap<ui64, TSet<TInsertedData>> CommittedByPathId;
+ THashMap<TWriteId, TInsertedData> Inserted;
+ THashMap<ui64, TSet<TInsertedData>> CommittedByPathId;
THashMap<TWriteId, TInsertedData> Aborted;
THashSet<ui64> PathsOverloaded;
TInstant LastCleanup;
diff --git a/ydb/core/tx/columnshard/engines/portion_info.cpp b/ydb/core/tx/columnshard/engines/portion_info.cpp
index 911f420b8ed..e67ab39605d 100644
--- a/ydb/core/tx/columnshard/engines/portion_info.cpp
+++ b/ydb/core/tx/columnshard/engines/portion_info.cpp
@@ -59,9 +59,9 @@ TString TPortionInfo::AddOneChunkColumn(const std::shared_ptr<arrow::Array>& arr
std::shared_ptr<arrow::Table> TPortionInfo::Assemble(const TIndexInfo& indexInfo,
const std::shared_ptr<arrow::Schema>& schema,
- const THashMap<TBlobRange, TString>& blobsData) const {
+ const THashMap<TBlobRange, TString>& blobsData) const {
// Correct records order
- TMap<int, TMap<ui32, TBlobRange>> columnChunks; // position in schema -> ordered chunks
+ TMap<int, TMap<ui32, TBlobRange>> columnChunks; // position in schema -> ordered chunks
for (auto& rec : Records) {
ui32 columnId = rec.ColumnId;
@@ -72,7 +72,7 @@ std::shared_ptr<arrow::Table> TPortionInfo::Assemble(const TIndexInfo& indexInfo
continue; // no such column in schema - do not need it
}
- columnChunks[pos][rec.Chunk] = rec.BlobRange;
+ columnChunks[pos][rec.Chunk] = rec.BlobRange;
}
// Make chunked arrays for columns
@@ -102,7 +102,7 @@ std::shared_ptr<arrow::Table> TPortionInfo::Assemble(const TIndexInfo& indexInfo
std::shared_ptr<arrow::RecordBatch> TPortionInfo::AssembleInBatch(const TIndexInfo& indexInfo,
const std::shared_ptr<arrow::Schema>& schema,
- const THashMap<TBlobRange, TString>& data) const {
+ const THashMap<TBlobRange, TString>& data) const {
std::shared_ptr<arrow::Table> portion = Assemble(indexInfo, schema, data);
auto res = portion->CombineChunks();
Y_VERIFY(res.ok());
@@ -177,10 +177,10 @@ TString TPortionInfo::GetMetadata(const TColumnRecord& rec) const {
case TPortionMeta::SPLIT_COMPACTED:
meta.MutablePortionMeta()->SetIsSplitCompacted(true);
break;
- case TPortionMeta::INACTIVE:
- Y_FAIL("Unexpected inactive case");
- //meta.MutablePortionMeta()->SetInactive(true);
- break;
+ case TPortionMeta::INACTIVE:
+ Y_FAIL("Unexpected inactive case");
+ //meta.MutablePortionMeta()->SetInactive(true);
+ break;
}
}
diff --git a/ydb/core/tx/columnshard/engines/portion_info.h b/ydb/core/tx/columnshard/engines/portion_info.h
index 2f754ea319d..884610037e7 100644
--- a/ydb/core/tx/columnshard/engines/portion_info.h
+++ b/ydb/core/tx/columnshard/engines/portion_info.h
@@ -6,13 +6,13 @@
namespace NKikimr::NOlap {
struct TPortionMeta {
- // NOTE: These values are persisted in LocalDB so they must be stable
+ // NOTE: These values are persisted in LocalDB so they must be stable
enum EProduced : ui32 {
UNSPECIFIED = 0,
INSERTED = 1,
COMPACTED = 2,
SPLIT_COMPACTED = 3,
- INACTIVE = 4
+ INACTIVE = 4
};
struct TColumnMeta {
@@ -171,10 +171,10 @@ struct TPortionInfo {
std::shared_ptr<arrow::Table> Assemble(const TIndexInfo& indexInfo,
const std::shared_ptr<arrow::Schema>& schema,
- const THashMap<TBlobRange, TString>& data) const;
+ const THashMap<TBlobRange, TString>& data) const;
std::shared_ptr<arrow::RecordBatch> AssembleInBatch(const TIndexInfo& indexInfo,
const std::shared_ptr<arrow::Schema>& schema,
- const THashMap<TBlobRange, TString>& data) const;
+ const THashMap<TBlobRange, TString>& data) const;
TString AddOneChunkColumn(const std::shared_ptr<arrow::Array>& array,
const std::shared_ptr<arrow::Field>& field,
diff --git a/ydb/core/tx/columnshard/engines/predicate.h b/ydb/core/tx/columnshard/engines/predicate.h
index 17ede425a34..6b50e633e85 100644
--- a/ydb/core/tx/columnshard/engines/predicate.h
+++ b/ydb/core/tx/columnshard/engines/predicate.h
@@ -26,7 +26,7 @@ struct TPredicate {
}
std::string ToString() const {
- return Empty() ? "()" : Batch->schema()->ToString();
+ return Empty() ? "()" : Batch->schema()->ToString();
}
TPredicate() = default;
diff --git a/ydb/core/tx/columnshard/engines/ut_insert_table.cpp b/ydb/core/tx/columnshard/engines/ut_insert_table.cpp
index 7a4f5c25ee1..458b2282ec6 100644
--- a/ydb/core/tx/columnshard/engines/ut_insert_table.cpp
+++ b/ydb/core/tx/columnshard/engines/ut_insert_table.cpp
@@ -47,51 +47,51 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestInsertTable) {
ui64 writeId = 0;
ui64 tableId = 0;
TString dedupId = "0";
- TUnifiedBlobId blobId1(2222, 1, 1, 100, 1);
+ TUnifiedBlobId blobId1(2222, 1, 1, 100, 1);
TTestInsertTableDB dbTable;
TInsertTable insertTable;
// insert, not commited
TInstant time = TInstant::Now();
- bool ok = insertTable.Insert(dbTable, TInsertedData(metaShard, writeId, tableId, dedupId, blobId1, {}, time));
+ bool ok = insertTable.Insert(dbTable, TInsertedData(metaShard, writeId, tableId, dedupId, blobId1, {}, time));
UNIT_ASSERT(ok);
-
- // insert the same blobId1 again
- ok = insertTable.Insert(dbTable, TInsertedData(metaShard, writeId, tableId, dedupId, blobId1, {}, time));
+
+ // insert the same blobId1 again
+ ok = insertTable.Insert(dbTable, TInsertedData(metaShard, writeId, tableId, dedupId, blobId1, {}, time));
+ UNIT_ASSERT(!ok);
+
+ // insert different blodId with the same writeId and dedupId
+ TUnifiedBlobId blobId2(2222, 1, 2, 100, 1);
+ ok = insertTable.Insert(dbTable, TInsertedData(metaShard, writeId, tableId, dedupId, blobId2, {}, time));
UNIT_ASSERT(!ok);
- // insert different blodId with the same writeId and dedupId
- TUnifiedBlobId blobId2(2222, 1, 2, 100, 1);
- ok = insertTable.Insert(dbTable, TInsertedData(metaShard, writeId, tableId, dedupId, blobId2, {}, time));
- UNIT_ASSERT(!ok);
-
// read nothing
- auto blobs = insertTable.Read(tableId, 0, 0);
+ auto blobs = insertTable.Read(tableId, 0, 0);
+ UNIT_ASSERT_EQUAL(blobs.size(), 0);
+ blobs = insertTable.Read(tableId+1, 0, 0);
UNIT_ASSERT_EQUAL(blobs.size(), 0);
- blobs = insertTable.Read(tableId+1, 0, 0);
- UNIT_ASSERT_EQUAL(blobs.size(), 0);
// commit
ui64 planStep = 100;
ui64 txId = 42;
- insertTable.Commit(dbTable, planStep, txId, metaShard, {TWriteId{writeId}});
+ insertTable.Commit(dbTable, planStep, txId, metaShard, {TWriteId{writeId}});
auto committed = insertTable.GetCommitted();
UNIT_ASSERT_EQUAL(committed.size(), 1);
UNIT_ASSERT_EQUAL(committed.begin()->second.size(), 1);
// read old snapshot
- blobs = insertTable.Read(tableId, 0, 0);
+ blobs = insertTable.Read(tableId, 0, 0);
+ UNIT_ASSERT_EQUAL(blobs.size(), 0);
+ blobs = insertTable.Read(tableId+1, 0, 0);
UNIT_ASSERT_EQUAL(blobs.size(), 0);
- blobs = insertTable.Read(tableId+1, 0, 0);
- UNIT_ASSERT_EQUAL(blobs.size(), 0);
// read new snapshot
- blobs = insertTable.Read(tableId, planStep, txId);
+ blobs = insertTable.Read(tableId, planStep, txId);
UNIT_ASSERT_EQUAL(blobs.size(), 1);
- blobs = insertTable.Read(tableId+1, 0, 0);
- UNIT_ASSERT_EQUAL(blobs.size(), 0);
+ blobs = insertTable.Read(tableId+1, 0, 0);
+ UNIT_ASSERT_EQUAL(blobs.size(), 0);
}
}
diff --git a/ydb/core/tx/columnshard/engines/ut_logs_engine.cpp b/ydb/core/tx/columnshard/engines/ut_logs_engine.cpp
index 9bba49573df..7bdfb5b7bd7 100644
--- a/ydb/core/tx/columnshard/engines/ut_logs_engine.cpp
+++ b/ydb/core/tx/columnshard/engines/ut_logs_engine.cpp
@@ -20,7 +20,7 @@ public:
};
void Insert(const TInsertedData& data) override {
- Inserted[TWriteId{data.WriteTxId}] = data;
+ Inserted[TWriteId{data.WriteTxId}] = data;
}
void Commit(const TInsertedData& data) override {
@@ -32,11 +32,11 @@ public:
}
void EraseInserted(const TInsertedData& data) override {
- Inserted.erase(TWriteId{data.WriteTxId});
+ Inserted.erase(TWriteId{data.WriteTxId});
}
void EraseCommitted(const TInsertedData& data) override {
- Committed[data.PathId].erase(data);
+ Committed[data.PathId].erase(data);
}
void EraseAborted(const TInsertedData& data) override {
@@ -147,8 +147,8 @@ public:
}
private:
- THashMap<TWriteId, TInsertedData> Inserted;
- THashMap<ui64, TSet<TInsertedData>> Committed;
+ THashMap<TWriteId, TInsertedData> Inserted;
+ THashMap<ui64, TSet<TInsertedData>> Committed;
THashMap<TWriteId, TInsertedData> Aborted;
THashMap<ui32, TIndex> Indices;
};
@@ -235,9 +235,9 @@ private:
std::unique_ptr<arrow::RecordBatchBuilder> BatchBuilder;
};
-TBlobRange MakeBlobRange(ui32 step, ui32 blobSize) {
+TBlobRange MakeBlobRange(ui32 step, ui32 blobSize) {
// tabletId, generation, step, channel, blobSize, cookie
- return TBlobRange(TUnifiedBlobId(11111, TLogoBlobID(100500, 42, step, 3, blobSize, 0)), 0, blobSize);
+ return TBlobRange(TUnifiedBlobId(11111, TLogoBlobID(100500, 42, step, 3, blobSize, 0)), 0, blobSize);
}
TString MakeTestBlob(ui64 start = 0, ui64 end = 100) {
@@ -251,13 +251,13 @@ TString MakeTestBlob(ui64 start = 0, ui64 end = 100) {
}
void AddIdsToBlobs(const TVector<TString>& srcBlobs, TVector<TPortionInfo>& portions,
- THashMap<TBlobRange, TString>& blobs, ui32& step) {
+ THashMap<TBlobRange, TString>& blobs, ui32& step) {
ui32 pos = 0;
for (auto& portion : portions) {
for (auto& rec : portion.Records) {
- rec.BlobRange = MakeBlobRange(++step, srcBlobs[pos].size());
+ rec.BlobRange = MakeBlobRange(++step, srcBlobs[pos].size());
//UNIT_ASSERT(rec.Valid());
- blobs[rec.BlobRange] = srcBlobs[pos];
+ blobs[rec.BlobRange] = srcBlobs[pos];
++pos;
}
}
@@ -272,14 +272,14 @@ TCompactionLimits TestLimits() {
}
bool Insert(TColumnEngineForLogs& engine, TTestDbWrapper& db, TSnapshot snap,
- TVector<TInsertedData>&& dataToIndex, THashMap<TBlobRange, TString>& blobs, ui32& step) {
+ TVector<TInsertedData>&& dataToIndex, THashMap<TBlobRange, TString>& blobs, ui32& step) {
std::shared_ptr<TColumnEngineChanges> changes = engine.StartInsert(std::move(dataToIndex));
if (!changes) {
return false;
}
- changes->Blobs.insert(blobs.begin(), blobs.end());
-
+ changes->Blobs.insert(blobs.begin(), blobs.end());
+
TVector<TString> newBlobs = TColumnEngineForLogs::IndexBlobs(engine.GetIndexInfo(), changes);
UNIT_ASSERT_VALUES_EQUAL(changes->AppendedPortions.size(), 1);
@@ -372,23 +372,23 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
TString testBlob = MakeTestBlob();
- TVector<TBlobRange> blobRanges;
- blobRanges.push_back(MakeBlobRange(1, testBlob.size()));
- blobRanges.push_back(MakeBlobRange(2, testBlob.size()));
+ TVector<TBlobRange> blobRanges;
+ blobRanges.push_back(MakeBlobRange(1, testBlob.size()));
+ blobRanges.push_back(MakeBlobRange(2, testBlob.size()));
// PlanStep, TxId, PathId, DedupId, BlobId, Data, [Metadata]
TInstant writeTime = TInstant::Now();
TVector<TInsertedData> dataToIndex = {
- {1, 2, paths[0], "", blobRanges[0].BlobId, "", writeTime},
- {2, 1, paths[0], "", blobRanges[1].BlobId, "", writeTime}
+ {1, 2, paths[0], "", blobRanges[0].BlobId, "", writeTime},
+ {2, 1, paths[0], "", blobRanges[1].BlobId, "", writeTime}
};
// write
ui32 step = 1000;
- THashMap<TBlobRange, TString> blobs;
- blobs[blobRanges[0]] = testBlob;
- blobs[blobRanges[1]] = testBlob;
+ THashMap<TBlobRange, TString> blobs;
+ blobs[blobRanges[0]] = testBlob;
+ blobs[blobRanges[1]] = testBlob;
Insert(db, {1, 2}, std::move(dataToIndex), blobs, step);
// load
@@ -409,7 +409,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
ui64 planStep = 1;
ui64 txId = 0;
auto selectInfo = engine.Select(paths[0], {planStep, txId}, columnIds, {}, {});
- UNIT_ASSERT_VALUES_EQUAL(selectInfo->Granules.size(), 0);
+ UNIT_ASSERT_VALUES_EQUAL(selectInfo->Granules.size(), 0);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 0);
}
@@ -449,18 +449,18 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
// insert
ui64 planStep = 1;
- THashMap<TBlobRange, TString> blobs;
+ THashMap<TBlobRange, TString> blobs;
ui64 numRows = 1000;
ui64 rowPos = 0;
for (ui64 txId = 1; txId <= 20; ++txId, rowPos += numRows) {
TString testBlob = MakeTestBlob(rowPos, rowPos + numRows);
- auto blobRange = MakeBlobRange(++step, testBlob.size());
- blobs[blobRange] = testBlob;
+ auto blobRange = MakeBlobRange(++step, testBlob.size());
+ blobs[blobRange] = testBlob;
// PlanStep, TxId, PathId, DedupId, BlobId, Data, [Metadata]
TVector<TInsertedData> dataToIndex;
dataToIndex.push_back(
- TInsertedData{planStep, txId, pathId, "", blobRange.BlobId, "", TInstant::Now()});
+ TInsertedData{planStep, txId, pathId, "", blobRange.BlobId, "", TInstant::Now()});
bool ok = Insert(db, {planStep, txId}, std::move(dataToIndex), blobs, step);
UNIT_ASSERT(ok);
@@ -536,7 +536,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
// PlanStep, TxId, PathId, DedupId, BlobId, Data, [Metadata]
TVector<TInsertedData> dataToIndex;
dataToIndex.push_back(
- TInsertedData{planStep, txId, pathId, "", blobRange.BlobId, "", TInstant::Now()});
+ TInsertedData{planStep, txId, pathId, "", blobRange.BlobId, "", TInstant::Now()});
bool ok = Insert(engine, db, {planStep, txId}, std::move(dataToIndex), blobs, step);
// first overload returns ok: it's a postcondition
@@ -573,7 +573,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
// PlanStep, TxId, PathId, DedupId, BlobId, Data, [Metadata]
TVector<TInsertedData> dataToIndex;
dataToIndex.push_back(
- TInsertedData{planStep, txId, pathId, "", blobRange.BlobId, "", TInstant::Now()});
+ TInsertedData{planStep, txId, pathId, "", blobRange.BlobId, "", TInstant::Now()});
bool ok = Insert(engine, db, {planStep, txId}, std::move(dataToIndex), blobs, step);
bool overload = engine.GetOverloadedGranules(pathId);
@@ -608,7 +608,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
// PlanStep, TxId, PathId, DedupId, BlobId, Data, [Metadata]
TVector<TInsertedData> dataToIndex;
dataToIndex.push_back(
- TInsertedData{planStep, txId, pathId, "", blobRange.BlobId, "", TInstant::Now()});
+ TInsertedData{planStep, txId, pathId, "", blobRange.BlobId, "", TInstant::Now()});
bool ok = Insert(db, {planStep, txId}, std::move(dataToIndex), blobs, step);
UNIT_ASSERT(ok);
@@ -664,7 +664,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
// load
engine.Load(db);
- UNIT_ASSERT_VALUES_EQUAL(engine.GetTotalStats().EmptyGranules, 1);
+ UNIT_ASSERT_VALUES_EQUAL(engine.GetTotalStats().EmptyGranules, 1);
{ // full scan
ui64 txId = 1;
diff --git a/ydb/core/tx/columnshard/indexing_actor.cpp b/ydb/core/tx/columnshard/indexing_actor.cpp
index 4c7a8b8d8d8..5155051a9a5 100644
--- a/ydb/core/tx/columnshard/indexing_actor.cpp
+++ b/ydb/core/tx/columnshard/indexing_actor.cpp
@@ -7,10 +7,10 @@ namespace NKikimr::NColumnShard {
class TIndexingActor : public TActorBootstrapped<TIndexingActor> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::TX_COLUMNSHARD_INDEXING_ACTOR;
+ return NKikimrServices::TActivity::TX_COLUMNSHARD_INDEXING_ACTOR;
}
- TIndexingActor(ui64 tabletId, const TActorId& parent)
+ TIndexingActor(ui64 tabletId, const TActorId& parent)
: TabletId(tabletId)
, Parent(parent)
, BlobCacheActorId(NBlobCache::MakeBlobCacheServiceId())
@@ -28,10 +28,10 @@ public:
auto& blobsToIndex = indexChanges->DataToIndex;
for (size_t i = 0; i < blobsToIndex.size(); ++i) {
- auto& blobId = blobsToIndex[i].BlobId;
- auto res = BlobsToRead.emplace(blobId, i);
- Y_VERIFY(res.second, "Duplicate blob in DataToIndex: %s", blobId.ToStringNew().c_str());
- SendReadRequest(ctx, blobId);
+ auto& blobId = blobsToIndex[i].BlobId;
+ auto res = BlobsToRead.emplace(blobId, i);
+ Y_VERIFY(res.second, "Duplicate blob in DataToIndex: %s", blobId.ToStringNew().c_str());
+ SendReadRequest(ctx, blobId);
}
if (BlobsToRead.empty()) {
@@ -44,9 +44,9 @@ public:
<< ") at tablet " << TabletId << " (index)");
auto& event = *ev->Get();
- const TUnifiedBlobId& blobId = event.BlobRange.BlobId;
+ const TUnifiedBlobId& blobId = event.BlobRange.BlobId;
if (event.Status != NKikimrProto::EReplyStatus::OK) {
- LOG_S_ERROR("TEvReadBlobRangeResult cannot get blob " << blobId << " status " << event.Status
+ LOG_S_ERROR("TEvReadBlobRangeResult cannot get blob " << blobId << " status " << event.Status
<< " at tablet " << TabletId << " (index)");
BlobsToRead.erase(blobId);
@@ -71,7 +71,7 @@ public:
auto& indexChanges = TxEvent->IndexChanges;
Y_VERIFY(indexChanges);
Y_VERIFY(indexChanges->DataToIndex[pos].BlobId == blobId);
- indexChanges->Blobs[event.BlobRange] = blobData;
+ indexChanges->Blobs[event.BlobRange] = blobData;
if (BlobsToRead.empty()) {
Index(ctx);
@@ -97,12 +97,12 @@ private:
TActorId Parent;
TActorId BlobCacheActorId;
std::unique_ptr<TEvPrivate::TEvWriteIndex> TxEvent;
- THashMap<TUnifiedBlobId, ui32> BlobsToRead;
+ THashMap<TUnifiedBlobId, ui32> BlobsToRead;
- void SendReadRequest(const TActorContext&, const TUnifiedBlobId& blobId) {
+ void SendReadRequest(const TActorContext&, const TUnifiedBlobId& blobId) {
Y_VERIFY(blobId.BlobSize());
Send(BlobCacheActorId,
- new NBlobCache::TEvBlobCache::TEvReadBlobRange(NBlobCache::TBlobRange(blobId, 0, blobId.BlobSize()), false));
+ new NBlobCache::TEvBlobCache::TEvReadBlobRange(NBlobCache::TBlobRange(blobId, 0, blobId.BlobSize()), false));
}
void Index(const TActorContext& ctx) {
@@ -110,7 +110,7 @@ private:
if (TxEvent->PutStatus == NKikimrProto::UNKNOWN) {
LOG_S_DEBUG("Indexing started at tablet " << TabletId);
- TCpuGuard guard(TxEvent->ResourceUsage);
+ TCpuGuard guard(TxEvent->ResourceUsage);
TxEvent->Blobs = NOlap::TColumnEngineForLogs::IndexBlobs(TxEvent->IndexInfo, TxEvent->IndexChanges);
LOG_S_DEBUG("Indexing finished at tablet " << TabletId);
@@ -123,8 +123,8 @@ private:
}
};
-IActor* CreateIndexingActor(ui64 tabletId, const TActorId& parent) {
- return new TIndexingActor(tabletId, parent);
+IActor* CreateIndexingActor(ui64 tabletId, const TActorId& parent) {
+ return new TIndexingActor(tabletId, parent);
}
}
diff --git a/ydb/core/tx/columnshard/inflight_request_tracker.h b/ydb/core/tx/columnshard/inflight_request_tracker.h
index e63627e9a9d..a0bc1eb0678 100644
--- a/ydb/core/tx/columnshard/inflight_request_tracker.h
+++ b/ydb/core/tx/columnshard/inflight_request_tracker.h
@@ -1,114 +1,114 @@
#pragma once
-#include "blob.h"
+#include "blob.h"
#include <ydb/core/tx/columnshard/engines/indexed_read_data.h>
-
+
namespace NKikimr::NColumnShard {
-class IBlobInUseTracker {
-protected:
- ~IBlobInUseTracker() = default;
+class IBlobInUseTracker {
+protected:
+ ~IBlobInUseTracker() = default;
-public:
- // Marks the blob as "in use (or no longer in use) by an in-flight request", increments (or decrements)
- // it's ref count. This will prevent the blob from beeing physically deleted when DeleteBlob() is called
- // until all the references are released.
- // NOTE: this ref counts are in-memory only, so the blobs can be deleted if tablet restarts
- virtual void SetBlobInUse(const NOlap::TUnifiedBlobId& blobId, bool inUse) = 0;
+public:
+ // Marks the blob as "in use (or no longer in use) by an in-flight request", increments (or decrements)
+ // it's ref count. This will prevent the blob from beeing physically deleted when DeleteBlob() is called
+ // until all the references are released.
+ // NOTE: this ref counts are in-memory only, so the blobs can be deleted if tablet restarts
+ virtual void SetBlobInUse(const NOlap::TUnifiedBlobId& blobId, bool inUse) = 0;
};
-using NOlap::TReadMetadata;
-
-class TInFlightReadsTracker {
-public:
- // Returns a unique cookie associated with this request
- ui64 AddInFlightRequest(NOlap::TReadMetadataBase::TConstPtr readMeta, IBlobInUseTracker& blobTracker) {
- const ui64 cookie = NextCookie++;
- AddToInFlightRequest(cookie, readMeta, blobTracker);
- return cookie;
- }
-
- // Returns a unique cookie associated with this request
- template <class TReadMetadataList>
- ui64 AddInFlightRequest(const TReadMetadataList& readMetaList, IBlobInUseTracker& blobTracker) {
- const ui64 cookie = NextCookie++;
- for (const auto& readMetaPtr : readMetaList) {
- AddToInFlightRequest(cookie, readMetaPtr, blobTracker);
- }
- return cookie;
- }
-
- // Forget completed request
- void RemoveInFlightRequest(ui64 cookie, IBlobInUseTracker& blobTracker) {
- Y_VERIFY(RequestsMeta.count(cookie), "Unknown request cookie %" PRIu64, cookie);
- const auto& readMetaList = RequestsMeta[cookie];
-
- for (const auto& readMetaBase : readMetaList) {
- NOlap::TReadMetadata::TConstPtr readMeta = std::dynamic_pointer_cast<const NOlap::TReadMetadata>(readMetaBase);
-
- if (!readMeta) {
- continue;
- }
-
- for (const auto& portion : readMeta->SelectInfo->Portions) {
- const ui64 portionId = portion.Records[0].Portion;
- auto it = PortionUseCount.find(portionId);
- Y_VERIFY(it != PortionUseCount.end(), "Portion id %" PRIu64 " not found in request %" PRIu64, portionId, cookie);
- if (it->second == 1) {
- PortionUseCount.erase(it);
- } else {
- it->second--;
- }
- }
-
- for (const auto& committedBlob : readMeta->CommittedBlobs) {
- blobTracker.SetBlobInUse(committedBlob, false);
- }
- }
-
- RequestsMeta.erase(cookie);
- }
-
- // Checks if the portion is in use by any in-flight request
- bool IsPortionUsed(ui64 portionId) const {
- return PortionUseCount.count(portionId);
- }
-
- NOlap::TSelectInfo::TStats GetSelectStatsDelta() {
- auto delta = SelectStatsDelta;
- SelectStatsDelta = NOlap::TSelectInfo::TStats();
- return delta;
+using NOlap::TReadMetadata;
+
+class TInFlightReadsTracker {
+public:
+ // Returns a unique cookie associated with this request
+ ui64 AddInFlightRequest(NOlap::TReadMetadataBase::TConstPtr readMeta, IBlobInUseTracker& blobTracker) {
+ const ui64 cookie = NextCookie++;
+ AddToInFlightRequest(cookie, readMeta, blobTracker);
+ return cookie;
+ }
+
+ // Returns a unique cookie associated with this request
+ template <class TReadMetadataList>
+ ui64 AddInFlightRequest(const TReadMetadataList& readMetaList, IBlobInUseTracker& blobTracker) {
+ const ui64 cookie = NextCookie++;
+ for (const auto& readMetaPtr : readMetaList) {
+ AddToInFlightRequest(cookie, readMetaPtr, blobTracker);
+ }
+ return cookie;
+ }
+
+ // Forget completed request
+ void RemoveInFlightRequest(ui64 cookie, IBlobInUseTracker& blobTracker) {
+ Y_VERIFY(RequestsMeta.count(cookie), "Unknown request cookie %" PRIu64, cookie);
+ const auto& readMetaList = RequestsMeta[cookie];
+
+ for (const auto& readMetaBase : readMetaList) {
+ NOlap::TReadMetadata::TConstPtr readMeta = std::dynamic_pointer_cast<const NOlap::TReadMetadata>(readMetaBase);
+
+ if (!readMeta) {
+ continue;
+ }
+
+ for (const auto& portion : readMeta->SelectInfo->Portions) {
+ const ui64 portionId = portion.Records[0].Portion;
+ auto it = PortionUseCount.find(portionId);
+ Y_VERIFY(it != PortionUseCount.end(), "Portion id %" PRIu64 " not found in request %" PRIu64, portionId, cookie);
+ if (it->second == 1) {
+ PortionUseCount.erase(it);
+ } else {
+ it->second--;
+ }
+ }
+
+ for (const auto& committedBlob : readMeta->CommittedBlobs) {
+ blobTracker.SetBlobInUse(committedBlob, false);
+ }
+ }
+
+ RequestsMeta.erase(cookie);
+ }
+
+ // Checks if the portion is in use by any in-flight request
+ bool IsPortionUsed(ui64 portionId) const {
+ return PortionUseCount.count(portionId);
+ }
+
+ NOlap::TSelectInfo::TStats GetSelectStatsDelta() {
+ auto delta = SelectStatsDelta;
+ SelectStatsDelta = NOlap::TSelectInfo::TStats();
+ return delta;
+ }
+
+private:
+ void AddToInFlightRequest(const ui64 cookie, NOlap::TReadMetadataBase::TConstPtr readMetaBase, IBlobInUseTracker& blobTracker) {
+ RequestsMeta[cookie].push_back(readMetaBase);
+
+ NOlap::TReadMetadata::TConstPtr readMeta = std::dynamic_pointer_cast<const NOlap::TReadMetadata>(readMetaBase);
+
+ if (!readMeta) {
+ return;
+ }
+
+ auto selectInfo = readMeta->SelectInfo;
+ Y_VERIFY(selectInfo);
+ SelectStatsDelta += selectInfo->Stats();
+
+ for (const auto& portion : readMeta->SelectInfo->Portions) {
+ const ui64 portionId = portion.Records[0].Portion;
+ PortionUseCount[portionId]++;
+ }
+
+ for (const auto& committedBlob : readMeta->CommittedBlobs) {
+ blobTracker.SetBlobInUse(committedBlob, true);
+ }
}
-private:
- void AddToInFlightRequest(const ui64 cookie, NOlap::TReadMetadataBase::TConstPtr readMetaBase, IBlobInUseTracker& blobTracker) {
- RequestsMeta[cookie].push_back(readMetaBase);
-
- NOlap::TReadMetadata::TConstPtr readMeta = std::dynamic_pointer_cast<const NOlap::TReadMetadata>(readMetaBase);
-
- if (!readMeta) {
- return;
- }
-
- auto selectInfo = readMeta->SelectInfo;
- Y_VERIFY(selectInfo);
- SelectStatsDelta += selectInfo->Stats();
-
- for (const auto& portion : readMeta->SelectInfo->Portions) {
- const ui64 portionId = portion.Records[0].Portion;
- PortionUseCount[portionId]++;
- }
-
- for (const auto& committedBlob : readMeta->CommittedBlobs) {
- blobTracker.SetBlobInUse(committedBlob, true);
- }
- }
-
-private:
- ui64 NextCookie{1};
- THashMap<ui64, TList<NOlap::TReadMetadataBase::TConstPtr>> RequestsMeta;
- THashMap<ui64, ui64> PortionUseCount;
- NOlap::TSelectInfo::TStats SelectStatsDelta;
-};
-
+private:
+ ui64 NextCookie{1};
+ THashMap<ui64, TList<NOlap::TReadMetadataBase::TConstPtr>> RequestsMeta;
+ THashMap<ui64, ui64> PortionUseCount;
+ NOlap::TSelectInfo::TStats SelectStatsDelta;
+};
+
}
diff --git a/ydb/core/tx/columnshard/read_actor.cpp b/ydb/core/tx/columnshard/read_actor.cpp
index d2b1c7f9319..aafb2592d06 100644
--- a/ydb/core/tx/columnshard/read_actor.cpp
+++ b/ydb/core/tx/columnshard/read_actor.cpp
@@ -8,25 +8,25 @@ namespace NKikimr::NColumnShard {
class TReadActor : public TActorBootstrapped<TReadActor> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::TX_COLUMNSHARD_READ_ACTOR;
+ return NKikimrServices::TActivity::TX_COLUMNSHARD_READ_ACTOR;
}
TReadActor(ui64 tabletId,
const TActorId& dstActor,
std::unique_ptr<TEvColumnShard::TEvReadResult>&& event,
- NOlap::TReadMetadata::TConstPtr readMetadata,
- const TInstant& deadline,
- const TActorId& columnShardActorId,
- ui64 requestCookie)
+ NOlap::TReadMetadata::TConstPtr readMetadata,
+ const TInstant& deadline,
+ const TActorId& columnShardActorId,
+ ui64 requestCookie)
: TabletId(tabletId)
, DstActor(dstActor)
, BlobCacheActorId(NBlobCache::MakeBlobCacheServiceId())
, Result(std::move(event))
, ReadMetadata(readMetadata)
- , IndexedData(ReadMetadata)
+ , IndexedData(ReadMetadata)
, Deadline(deadline)
- , ColumnShardActorId(columnShardActorId)
- , RequestCookie(requestCookie)
+ , ColumnShardActorId(columnShardActorId)
+ , RequestCookie(requestCookie)
, ReturnedBatchNo(0)
{}
@@ -34,15 +34,15 @@ public:
LOG_S_TRACE("TEvReadBlobRangeResult at tablet " << TabletId << " (read)");
auto& event = *ev->Get();
- const TUnifiedBlobId& blobId = event.BlobRange.BlobId;
- Y_VERIFY(event.Data.size() == event.BlobRange.Size);
+ const TUnifiedBlobId& blobId = event.BlobRange.BlobId;
+ Y_VERIFY(event.Data.size() == event.BlobRange.Size);
- if (IndexedBlobs.count(event.BlobRange)) {
- if (!WaitIndexed.count(event.BlobRange)) {
+ if (IndexedBlobs.count(event.BlobRange)) {
+ if (!WaitIndexed.count(event.BlobRange)) {
return; // ignore duplicate parts
}
- WaitIndexed.erase(event.BlobRange);
- IndexedData.AddIndexedColumn(event.BlobRange, event.Data);
+ WaitIndexed.erase(event.BlobRange);
+ IndexedData.AddIndexedColumn(event.BlobRange, event.Data);
} else if (CommittedBlobs.count(blobId)) {
if (!WaitCommitted.count(blobId)) {
return; // ignore duplicate parts
@@ -56,7 +56,7 @@ public:
return;
}
- auto ready = IndexedData.GetReadyResults(Max<i64>());
+ auto ready = IndexedData.GetReadyResults(Max<i64>());
size_t next = 1;
for (auto it = ready.begin(); it != ready.end(); ++it, ++next) {
TString data = NArrow::SerializeBatchNoCompression(it->ResultBatch);
@@ -129,7 +129,7 @@ public:
void DieFinished(const TActorContext& ctx) {
if (Finished()) {
LOG_S_DEBUG("Finished read (with " << ReturnedBatchNo << " batches sent) at tablet " << TabletId);
- Send(ColumnShardActorId, new TEvPrivate::TEvReadFinished(RequestCookie));
+ Send(ColumnShardActorId, new TEvPrivate::TEvReadFinished(RequestCookie));
Die(ctx);
}
}
@@ -137,14 +137,14 @@ public:
void Bootstrap(const TActorContext& ctx) {
ui32 notIndexed = 0;
for (size_t i = 0; i < ReadMetadata->CommittedBlobs.size(); ++i, ++notIndexed) {
- const TUnifiedBlobId& blobId = ReadMetadata->CommittedBlobs[i];
+ const TUnifiedBlobId& blobId = ReadMetadata->CommittedBlobs[i];
CommittedBlobs.emplace(blobId);
WaitCommitted.emplace(blobId, notIndexed);
}
IndexedBlobs = IndexedData.InitRead(notIndexed);
- for (auto& [blobRange, granule] : IndexedBlobs) {
- WaitIndexed.insert(blobRange);
+ for (auto& [blobRange, granule] : IndexedBlobs) {
+ WaitIndexed.insert(blobRange);
}
LOG_S_DEBUG("Starting read (" << WaitIndexed.size() << " indexed, " << WaitCommitted.size()
@@ -167,10 +167,10 @@ public:
} else {
// TODO: Keep inflight
for (auto& [blobId, batchNo] : WaitCommitted) {
- SendReadRequest(ctx, NBlobCache::TBlobRange(blobId, 0, blobId.BlobSize()));
+ SendReadRequest(ctx, NBlobCache::TBlobRange(blobId, 0, blobId.BlobSize()));
}
- for (auto& [blobRange, granule] : IndexedBlobs) {
- SendReadRequest(ctx, blobRange);
+ for (auto& [blobRange, granule] : IndexedBlobs) {
+ SendReadRequest(ctx, blobRange);
}
}
@@ -184,10 +184,10 @@ public:
IndexedBlobs.clear();
}
- void SendReadRequest(const TActorContext& ctx, const NBlobCache::TBlobRange& blobRange) {
+ void SendReadRequest(const TActorContext& ctx, const NBlobCache::TBlobRange& blobRange) {
Y_UNUSED(ctx);
- Y_VERIFY(blobRange.Size);
- Send(BlobCacheActorId, new NBlobCache::TEvBlobCache::TEvReadBlobRange(blobRange));
+ Y_VERIFY(blobRange.Size);
+ Send(BlobCacheActorId, new NBlobCache::TEvBlobCache::TEvReadBlobRange(blobRange));
}
STFUNC(StateWait) {
@@ -204,15 +204,15 @@ private:
TActorId DstActor;
TActorId BlobCacheActorId;
std::unique_ptr<TEvColumnShard::TEvReadResult> Result;
- NOlap::TReadMetadata::TConstPtr ReadMetadata;
+ NOlap::TReadMetadata::TConstPtr ReadMetadata;
NOlap::TIndexedReadData IndexedData;
TInstant Deadline;
- TActorId ColumnShardActorId;
- const ui64 RequestCookie;
- THashMap<NBlobCache::TBlobRange, ui64> IndexedBlobs;
- THashSet<TUnifiedBlobId> CommittedBlobs;
- THashSet<NBlobCache::TBlobRange> WaitIndexed;
- THashMap<TUnifiedBlobId, ui32> WaitCommitted;
+ TActorId ColumnShardActorId;
+ const ui64 RequestCookie;
+ THashMap<NBlobCache::TBlobRange, ui64> IndexedBlobs;
+ THashSet<TUnifiedBlobId> CommittedBlobs;
+ THashSet<NBlobCache::TBlobRange> WaitIndexed;
+ THashMap<TUnifiedBlobId, ui32> WaitCommitted;
ui32 ReturnedBatchNo;
mutable TString SerializedSchema;
@@ -225,16 +225,16 @@ private:
}
};
-IActor* CreateReadActor(ui64 tabletId,
+IActor* CreateReadActor(ui64 tabletId,
const TActorId& dstActor,
std::unique_ptr<TEvColumnShard::TEvReadResult>&& event,
- NOlap::TReadMetadata::TConstPtr readMetadata,
- const TInstant& deadline,
- const TActorId& columnShardActorId,
- ui64 requestCookie)
-{
- return new TReadActor(tabletId, dstActor, std::move(event), readMetadata,
- deadline, columnShardActorId, requestCookie);
+ NOlap::TReadMetadata::TConstPtr readMetadata,
+ const TInstant& deadline,
+ const TActorId& columnShardActorId,
+ ui64 requestCookie)
+{
+ return new TReadActor(tabletId, dstActor, std::move(event), readMetadata,
+ deadline, columnShardActorId, requestCookie);
}
}
diff --git a/ydb/core/tx/columnshard/ut_columnshard_read_write.cpp b/ydb/core/tx/columnshard/ut_columnshard_read_write.cpp
index 2450382db37..00e3143fc61 100644
--- a/ydb/core/tx/columnshard/ut_columnshard_read_write.cpp
+++ b/ydb/core/tx/columnshard/ut_columnshard_read_write.cpp
@@ -118,8 +118,8 @@ bool CheckColumns(const TString& blob, const NKikimrTxColumnShard::TMetadata& me
}
}
- UNIT_ASSERT_VALUES_EQUAL((ui64)batch->num_columns(), colNames.size());
- UNIT_ASSERT_VALUES_EQUAL((ui64)batch->num_rows(), rowsCount);
+ UNIT_ASSERT_VALUES_EQUAL((ui64)batch->num_columns(), colNames.size());
+ UNIT_ASSERT_VALUES_EQUAL((ui64)batch->num_rows(), rowsCount);
UNIT_ASSERT(batch->ValidateFull().ok());
return true;
}
@@ -223,8 +223,8 @@ void TestWriteReadImpl(bool reboots, const TVector<std::pair<TString, TTypeId>>&
TTestBasicRuntime runtime;
TTester::Setup(runtime);
- runtime.SetLogPriority(NKikimrServices::BLOB_CACHE, NActors::NLog::PRI_DEBUG);
-
+ runtime.SetLogPriority(NKikimrServices::BLOB_CACHE, NActors::NLog::PRI_DEBUG);
+
TActorId sender = runtime.AllocateEdgeActor();
CreateTestBootstrapper(runtime, CreateTestTabletInfo(TTestTxConfig::TxTablet0, TTabletTypes::COLUMNSHARD), &CreateColumnShard);
@@ -1118,15 +1118,15 @@ Y_UNIT_TEST_SUITE(TColumnShardTestReadWrite) {
auto scan = runtime.GrabEdgeEvent<NKqp::TEvKqpCompute::TEvScanData>(handle);
auto batchStats = scan->ArrowBatch;
UNIT_ASSERT(batchStats);
- // Cerr << batchStats->ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(batchStats->num_rows(), 4);
+ // Cerr << batchStats->ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(batchStats->num_rows(), 4);
for (ui32 i = 0; i < batchStats->num_rows(); ++i) {
- auto paths = batchStats->GetColumnByName("PathId");
- auto kinds = batchStats->GetColumnByName("Kind");
- auto rows = batchStats->GetColumnByName("Rows");
- auto bytes = batchStats->GetColumnByName("Bytes");
- auto rawBytes = batchStats->GetColumnByName("RawBytes");
+ auto paths = batchStats->GetColumnByName("PathId");
+ auto kinds = batchStats->GetColumnByName("Kind");
+ auto rows = batchStats->GetColumnByName("Rows");
+ auto bytes = batchStats->GetColumnByName("Bytes");
+ auto rawBytes = batchStats->GetColumnByName("RawBytes");
ui64 pathId = static_cast<arrow::UInt64Array&>(*paths).Value(i);
ui32 kind = static_cast<arrow::UInt32Array&>(*kinds).Value(i);
@@ -1150,359 +1150,359 @@ Y_UNIT_TEST_SUITE(TColumnShardTestReadWrite) {
}
}
}
-
- Y_UNIT_TEST(ReadStale) {
- TTestBasicRuntime runtime;
- TTester::Setup(runtime);
-
- TActorId sender = runtime.AllocateEdgeActor();
+
+ Y_UNIT_TEST(ReadStale) {
+ TTestBasicRuntime runtime;
+ TTester::Setup(runtime);
+
+ TActorId sender = runtime.AllocateEdgeActor();
CreateTestBootstrapper(runtime, CreateTestTabletInfo(TTestTxConfig::TxTablet0, TTabletTypes::COLUMNSHARD), &CreateColumnShard);
-
- TDispatchOptions options;
- options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot));
- runtime.DispatchEvents(options);
-
+
+ TDispatchOptions options;
+ options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot));
+ runtime.DispatchEvents(options);
+
ui64 metaShard = TTestTxConfig::TxTablet1;
- ui64 writeId = 0;
- ui64 tableId = 1;
- ui64 planStep = 1000000;
- ui64 txId = 100;
-
+ ui64 writeId = 0;
+ ui64 tableId = 1;
+ ui64 planStep = 1000000;
+ ui64 txId = 100;
+
SetupSchema(runtime, sender, tableId);
- TAutoPtr<IEventHandle> handle;
-
- // Write some test data to adavnce the time
- {
- std::pair<ui64, ui64> triggerPortion = {1, 1000};
- TString triggerData = MakeTestBlob(triggerPortion, testYdbSchema);
-
- UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, triggerData));
-
+ TAutoPtr<IEventHandle> handle;
+
+ // Write some test data to adavnce the time
+ {
+ std::pair<ui64, ui64> triggerPortion = {1, 1000};
+ TString triggerData = MakeTestBlob(triggerPortion, testYdbSchema);
+
+ UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, triggerData));
+
ProposeCommit(runtime, sender, metaShard, txId, {writeId});
- PlanCommit(runtime, sender, planStep, txId);
- }
-
- TDuration staleness = TDuration::Minutes(6);
-
- // Try to read snapshot that is too old
- {
- {
- auto request = std::make_unique<TEvColumnShard::TEvRead>(sender, metaShard, planStep - staleness.MilliSeconds(), Max<ui64>(), tableId);
- request->Record.AddColumnNames("timestamp");
- request->Record.AddColumnNames("message");
-
+ PlanCommit(runtime, sender, planStep, txId);
+ }
+
+ TDuration staleness = TDuration::Minutes(6);
+
+ // Try to read snapshot that is too old
+ {
+ {
+ auto request = std::make_unique<TEvColumnShard::TEvRead>(sender, metaShard, planStep - staleness.MilliSeconds(), Max<ui64>(), tableId);
+ request->Record.AddColumnNames("timestamp");
+ request->Record.AddColumnNames("message");
+
ForwardToTablet(runtime, TTestTxConfig::TxTablet0, sender, request.release());
- }
-
- auto event = runtime.GrabEdgeEvent<TEvColumnShard::TEvReadResult>(handle);
- UNIT_ASSERT(event);
-
- auto& response = event->Record;
+ }
+
+ auto event = runtime.GrabEdgeEvent<TEvColumnShard::TEvReadResult>(handle);
+ UNIT_ASSERT(event);
+
+ auto& response = event->Record;
UNIT_ASSERT_VALUES_EQUAL(response.GetOrigin(), TTestTxConfig::TxTablet0);
- UNIT_ASSERT_VALUES_EQUAL(response.GetTxInitiator(), metaShard);
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), (ui32)NKikimrTxColumnShard::EResultStatus::ERROR);
- }
-
- // Try to scan snapshot that is too old
- {
- {
- auto request = std::make_unique<TEvColumnShard::TEvScan>();
- request->Record.SetTxId(1000);
- request->Record.SetScanId(1);
- request->Record.SetLocalPathId(tableId);
- request->Record.SetTablePath("test_olap_table");
- request->Record.MutableSnapshot()->SetStep(planStep - staleness.MilliSeconds());
- request->Record.MutableSnapshot()->SetTxId(Max<ui64>());
-
+ UNIT_ASSERT_VALUES_EQUAL(response.GetTxInitiator(), metaShard);
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), (ui32)NKikimrTxColumnShard::EResultStatus::ERROR);
+ }
+
+ // Try to scan snapshot that is too old
+ {
+ {
+ auto request = std::make_unique<TEvColumnShard::TEvScan>();
+ request->Record.SetTxId(1000);
+ request->Record.SetScanId(1);
+ request->Record.SetLocalPathId(tableId);
+ request->Record.SetTablePath("test_olap_table");
+ request->Record.MutableSnapshot()->SetStep(planStep - staleness.MilliSeconds());
+ request->Record.MutableSnapshot()->SetTxId(Max<ui64>());
+
ForwardToTablet(runtime, TTestTxConfig::TxTablet0, sender, request.release());
- }
-
- auto event = runtime.GrabEdgeEvent<NKqp::TEvKqpCompute::TEvScanError>(handle);
- UNIT_ASSERT(event);
-
- auto& response = event->Record;
- // Cerr << response << Endl;
- UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), Ydb::StatusIds::BAD_REQUEST);
- UNIT_ASSERT_VALUES_EQUAL(response.IssuesSize(), 1);
- UNIT_ASSERT_STRING_CONTAINS(response.GetIssues(0).message(), "Snapshot 640000:18446744073709551615 too old");
- }
- }
-
- // Private events of different actors reuse the same ES_PRIVATE range
- // So in order to capture the right private event we need to check its type via dynamic_cast
- template <class TPrivateEvent>
- TPrivateEvent* TryGetPrivateEvent(TAutoPtr<IEventHandle> &ev) {
- if (ev->GetTypeRewrite() != TPrivateEvent::EventType) {
- return nullptr;
- }
- return dynamic_cast<TPrivateEvent*>(ev->GetBase());
- }
-
- void TestCompactionGC(bool enableSmallBlobs) {
- TTestBasicRuntime runtime;
- TTester::Setup(runtime);
-
- runtime.SetLogPriority(NKikimrServices::BLOB_CACHE, NActors::NLog::PRI_DEBUG);
-
- TActorId sender = runtime.AllocateEdgeActor();
+ }
+
+ auto event = runtime.GrabEdgeEvent<NKqp::TEvKqpCompute::TEvScanError>(handle);
+ UNIT_ASSERT(event);
+
+ auto& response = event->Record;
+ // Cerr << response << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(response.GetStatus(), Ydb::StatusIds::BAD_REQUEST);
+ UNIT_ASSERT_VALUES_EQUAL(response.IssuesSize(), 1);
+ UNIT_ASSERT_STRING_CONTAINS(response.GetIssues(0).message(), "Snapshot 640000:18446744073709551615 too old");
+ }
+ }
+
+ // Private events of different actors reuse the same ES_PRIVATE range
+ // So in order to capture the right private event we need to check its type via dynamic_cast
+ template <class TPrivateEvent>
+ TPrivateEvent* TryGetPrivateEvent(TAutoPtr<IEventHandle> &ev) {
+ if (ev->GetTypeRewrite() != TPrivateEvent::EventType) {
+ return nullptr;
+ }
+ return dynamic_cast<TPrivateEvent*>(ev->GetBase());
+ }
+
+ void TestCompactionGC(bool enableSmallBlobs) {
+ TTestBasicRuntime runtime;
+ TTester::Setup(runtime);
+
+ runtime.SetLogPriority(NKikimrServices::BLOB_CACHE, NActors::NLog::PRI_DEBUG);
+
+ TActorId sender = runtime.AllocateEdgeActor();
CreateTestBootstrapper(runtime, CreateTestTabletInfo(TTestTxConfig::TxTablet0, TTabletTypes::COLUMNSHARD), &CreateColumnShard);
-
- TDispatchOptions options;
- options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot));
- runtime.DispatchEvents(options);
-
+
+ TDispatchOptions options;
+ options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvTablet::EvBoot));
+ runtime.DispatchEvents(options);
+
ui64 metaShard = TTestTxConfig::TxTablet1;
- ui64 writeId = 0;
- ui64 tableId = 1;
-
+ ui64 writeId = 0;
+ ui64 tableId = 1;
+
SetupSchema(runtime, sender, tableId);
- TAutoPtr<IEventHandle> handle;
-
- bool blockReadFinished = true;
- THashSet<ui64> inFlightReads;
- ui64 addedPortions = 0;
- THashSet<ui64> oldPortions;
- THashSet<ui64> deletedPortions;
- THashSet<TString> deletedBlobs;
- THashSet<TString> delayedBlobs;
- ui64 compactionsHappened = 0;
- ui64 cleanupsHappened = 0;
-
- auto captureEvents = [&](TTestActorRuntimeBase&, TAutoPtr<IEventHandle> &ev) {
- if (auto* msg = TryGetPrivateEvent<NColumnShard::TEvPrivate::TEvReadFinished>(ev)) {
- Cerr << "EvReadFinished " << msg->RequestCookie << Endl;
- inFlightReads.insert(msg->RequestCookie);
- if (blockReadFinished) {
- return true;
- }
- } else if (auto* msg = TryGetPrivateEvent<NColumnShard::TEvPrivate::TEvWriteIndex>(ev)) {
- // Cerr << "EvWriteIndex" << Endl << *msg->IndexChanges << Endl;
-
- if (!msg->IndexChanges->AppendedPortions.empty()) {
- Cerr << "Added portions:";
- for (const auto& portion : msg->IndexChanges->AppendedPortions) {
- ++addedPortions;
- ui64 portionId = addedPortions;
- Cerr << " " << portionId << "(" << portion.Records[0].Portion << ")";
- }
- Cerr << Endl;
- }
- if (msg->IndexChanges->CompactionInfo) {
- ++compactionsHappened;
- Cerr << "Compaction at snaphsot "<< msg->IndexChanges->ApplySnapshot
- << " old portions:";
- ui64 srcGranule{0};
+ TAutoPtr<IEventHandle> handle;
+
+ bool blockReadFinished = true;
+ THashSet<ui64> inFlightReads;
+ ui64 addedPortions = 0;
+ THashSet<ui64> oldPortions;
+ THashSet<ui64> deletedPortions;
+ THashSet<TString> deletedBlobs;
+ THashSet<TString> delayedBlobs;
+ ui64 compactionsHappened = 0;
+ ui64 cleanupsHappened = 0;
+
+ auto captureEvents = [&](TTestActorRuntimeBase&, TAutoPtr<IEventHandle> &ev) {
+ if (auto* msg = TryGetPrivateEvent<NColumnShard::TEvPrivate::TEvReadFinished>(ev)) {
+ Cerr << "EvReadFinished " << msg->RequestCookie << Endl;
+ inFlightReads.insert(msg->RequestCookie);
+ if (blockReadFinished) {
+ return true;
+ }
+ } else if (auto* msg = TryGetPrivateEvent<NColumnShard::TEvPrivate::TEvWriteIndex>(ev)) {
+ // Cerr << "EvWriteIndex" << Endl << *msg->IndexChanges << Endl;
+
+ if (!msg->IndexChanges->AppendedPortions.empty()) {
+ Cerr << "Added portions:";
+ for (const auto& portion : msg->IndexChanges->AppendedPortions) {
+ ++addedPortions;
+ ui64 portionId = addedPortions;
+ Cerr << " " << portionId << "(" << portion.Records[0].Portion << ")";
+ }
+ Cerr << Endl;
+ }
+ if (msg->IndexChanges->CompactionInfo) {
+ ++compactionsHappened;
+ Cerr << "Compaction at snaphsot "<< msg->IndexChanges->ApplySnapshot
+ << " old portions:";
+ ui64 srcGranule{0};
for (const auto& portionInfo : msg->IndexChanges->SwitchedPortions) {
ui64 granule = portionInfo.Granule();
- Y_VERIFY(!srcGranule || srcGranule == granule);
- srcGranule = granule;
+ Y_VERIFY(!srcGranule || srcGranule == granule);
+ srcGranule = granule;
ui64 portionId = portionInfo.Portion();
Cerr << " " << portionId;
oldPortions.insert(portionId);
- }
- Cerr << Endl;
- }
- if (!msg->IndexChanges->PortionsToDrop.empty()) {
- ++cleanupsHappened;
- Cerr << "Cleanup older than snaphsot "<< msg->IndexChanges->InitSnapshot
- << " old portions:";
- for (const auto& portion : msg->IndexChanges->PortionsToDrop) {
- ui64 portionId = portion.Records[0].Portion;
- Cerr << " " << portionId;
- deletedPortions.insert(portionId);
- }
- Cerr << Endl;
- }
- } else if (auto* msg = TryGetPrivateEvent<NActors::NLog::TEvLog>(ev)) {
- bool matchedEvent = false;
- {
- TString prefixes[2] = {"Delay Delete Blob ", "Delay Delete Small Blob "};
- for (TString prefix : prefixes) {
- size_t pos = msg->Line.find(prefix);
- if (pos != TString::npos) {
- TString blobId = msg->Line.substr(pos + prefix.size());
- Cerr << "Delayed delete: " << blobId << Endl;
- delayedBlobs.insert(blobId);
- matchedEvent = true;
- break;
- }
- }
- }
- if (!matchedEvent){
- TString prefix = "Delete Small Blob ";
- size_t pos = msg->Line.find(prefix);
- if (pos != TString::npos) {
- TString blobId = msg->Line.substr(pos + prefix.size());
- Cerr << "Delete small blob: " << blobId << Endl;
- deletedBlobs.insert(blobId);
- delayedBlobs.erase(blobId);
- matchedEvent = true;
- }
- }
- } else if (auto* msg = TryGetPrivateEvent<NKikimr::TEvBlobStorage::TEvCollectGarbage>(ev)) {
- // Extract and save all DoNotKeep blobIds
- Cerr << "GC for channel " << msg->Channel;
- if (msg->DoNotKeep) {
- Cerr << " deletes blobs: " << JoinStrings(msg->DoNotKeep->begin(), msg->DoNotKeep->end(), " ");
- for (const auto& blobId : *msg->DoNotKeep) {
- deletedBlobs.insert(blobId.ToString());
- delayedBlobs.erase(TUnifiedBlobId(0, blobId).ToStringNew());
- }
- }
- Cerr << Endl;
- }
- return false;
- };
- runtime.SetEventFilter(captureEvents);
-
- // Enable/Disable small blobs
- {
- TAtomic unused;
- TAtomic maxSmallBlobSize = enableSmallBlobs ? 1000000 : 0;
- runtime.GetAppData().Icb->SetValue("ColumnShardControls.MaxSmallBlobSize",maxSmallBlobSize, unused);
- }
-
- // Disable GC batching so that deleted blobs get collected without a delay
- {
- TAtomic unusedPrev;
- runtime.GetAppData().Icb->SetValue("ColumnShardControls.BlobCountToTriggerGC", 1, unusedPrev);
- }
-
- // Write different keys: grow on compaction
-
+ }
+ Cerr << Endl;
+ }
+ if (!msg->IndexChanges->PortionsToDrop.empty()) {
+ ++cleanupsHappened;
+ Cerr << "Cleanup older than snaphsot "<< msg->IndexChanges->InitSnapshot
+ << " old portions:";
+ for (const auto& portion : msg->IndexChanges->PortionsToDrop) {
+ ui64 portionId = portion.Records[0].Portion;
+ Cerr << " " << portionId;
+ deletedPortions.insert(portionId);
+ }
+ Cerr << Endl;
+ }
+ } else if (auto* msg = TryGetPrivateEvent<NActors::NLog::TEvLog>(ev)) {
+ bool matchedEvent = false;
+ {
+ TString prefixes[2] = {"Delay Delete Blob ", "Delay Delete Small Blob "};
+ for (TString prefix : prefixes) {
+ size_t pos = msg->Line.find(prefix);
+ if (pos != TString::npos) {
+ TString blobId = msg->Line.substr(pos + prefix.size());
+ Cerr << "Delayed delete: " << blobId << Endl;
+ delayedBlobs.insert(blobId);
+ matchedEvent = true;
+ break;
+ }
+ }
+ }
+ if (!matchedEvent){
+ TString prefix = "Delete Small Blob ";
+ size_t pos = msg->Line.find(prefix);
+ if (pos != TString::npos) {
+ TString blobId = msg->Line.substr(pos + prefix.size());
+ Cerr << "Delete small blob: " << blobId << Endl;
+ deletedBlobs.insert(blobId);
+ delayedBlobs.erase(blobId);
+ matchedEvent = true;
+ }
+ }
+ } else if (auto* msg = TryGetPrivateEvent<NKikimr::TEvBlobStorage::TEvCollectGarbage>(ev)) {
+ // Extract and save all DoNotKeep blobIds
+ Cerr << "GC for channel " << msg->Channel;
+ if (msg->DoNotKeep) {
+ Cerr << " deletes blobs: " << JoinStrings(msg->DoNotKeep->begin(), msg->DoNotKeep->end(), " ");
+ for (const auto& blobId : *msg->DoNotKeep) {
+ deletedBlobs.insert(blobId.ToString());
+ delayedBlobs.erase(TUnifiedBlobId(0, blobId).ToStringNew());
+ }
+ }
+ Cerr << Endl;
+ }
+ return false;
+ };
+ runtime.SetEventFilter(captureEvents);
+
+ // Enable/Disable small blobs
+ {
+ TAtomic unused;
+ TAtomic maxSmallBlobSize = enableSmallBlobs ? 1000000 : 0;
+ runtime.GetAppData().Icb->SetValue("ColumnShardControls.MaxSmallBlobSize",maxSmallBlobSize, unused);
+ }
+
+ // Disable GC batching so that deleted blobs get collected without a delay
+ {
+ TAtomic unusedPrev;
+ runtime.GetAppData().Icb->SetValue("ColumnShardControls.BlobCountToTriggerGC", 1, unusedPrev);
+ }
+
+ // Write different keys: grow on compaction
+
static const ui32 triggerPortionSize = 75 * 1000;
- std::pair<ui64, ui64> triggerPortion = {0, triggerPortionSize};
- TString triggerData = MakeTestBlob(triggerPortion, testYdbSchema);
+ std::pair<ui64, ui64> triggerPortion = {0, triggerPortionSize};
+ TString triggerData = MakeTestBlob(triggerPortion, testYdbSchema);
UNIT_ASSERT(triggerData.size() > NColumnShard::TLimits::MIN_BYTES_TO_INSERT);
UNIT_ASSERT(triggerData.size() < NColumnShard::TLimits::MAX_BLOB_SIZE);
-
- ui64 planStep = 5000000;
- ui64 txId = 1000;
-
- // Ovewrite the same data multiple times to produce multiple portions at different timestamps
+
+ ui64 planStep = 5000000;
+ ui64 txId = 1000;
+
+ // Ovewrite the same data multiple times to produce multiple portions at different timestamps
ui32 numWrites = 14; // trigger split granule compaction by GranuleBlobSplitSize
- for (ui32 i = 0; i < numWrites; ++i, ++writeId, ++planStep, ++txId) {
- UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, triggerData));
-
+ for (ui32 i = 0; i < numWrites; ++i, ++writeId, ++planStep, ++txId) {
+ UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, triggerData));
+
ProposeCommit(runtime, sender, metaShard, txId, {writeId});
- PlanCommit(runtime, sender, planStep, txId);
- }
-
- // Do a small write that is not indexed so that we will get a committed blob in read request
- {
- TString smallData = MakeTestBlob({0, 2}, testYdbSchema);
- UNIT_ASSERT(smallData.size() < 100 * 1024);
- UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, smallData));
-
+ PlanCommit(runtime, sender, planStep, txId);
+ }
+
+ // Do a small write that is not indexed so that we will get a committed blob in read request
+ {
+ TString smallData = MakeTestBlob({0, 2}, testYdbSchema);
+ UNIT_ASSERT(smallData.size() < 100 * 1024);
+ UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, smallData));
+
ProposeCommit(runtime, sender, metaShard, txId, {writeId});
- PlanCommit(runtime, sender, planStep, txId);
- ++writeId;
- ++planStep;
- ++txId;
- }
-
- --planStep;
- --txId;
-
+ PlanCommit(runtime, sender, planStep, txId);
+ ++writeId;
+ ++planStep;
+ ++txId;
+ }
+
+ --planStep;
+ --txId;
+
UNIT_ASSERT_VALUES_EQUAL(compactionsHappened, 2); // we catch it twice per action
- ui64 previousCompactionsHappened = compactionsHappened;
- ui64 previousCleanupsHappened = cleanupsHappened;
-
- // Send a request that reads the latest version
- // This request is expected to read at least 1 committed blob and several index portions
- // These committed blob and portions must not be deleted by the BlobManager until the read request finishes
- auto read = std::make_unique<TEvColumnShard::TEvRead>(sender, metaShard, planStep, txId, tableId);
- Proto(read.get()).AddColumnNames("timestamp");
- Proto(read.get()).AddColumnNames("message");
-
+ ui64 previousCompactionsHappened = compactionsHappened;
+ ui64 previousCleanupsHappened = cleanupsHappened;
+
+ // Send a request that reads the latest version
+ // This request is expected to read at least 1 committed blob and several index portions
+ // These committed blob and portions must not be deleted by the BlobManager until the read request finishes
+ auto read = std::make_unique<TEvColumnShard::TEvRead>(sender, metaShard, planStep, txId, tableId);
+ Proto(read.get()).AddColumnNames("timestamp");
+ Proto(read.get()).AddColumnNames("message");
+
ForwardToTablet(runtime, TTestTxConfig::TxTablet0, sender, read.release());
-
+
ui32 expected = 0;
ui32 num = 0;
while (!expected || num < expected) {
- auto event = runtime.GrabEdgeEvent<TEvColumnShard::TEvReadResult>(handle);
- UNIT_ASSERT(event);
-
- auto& resRead = Proto(event);
+ auto event = runtime.GrabEdgeEvent<TEvColumnShard::TEvReadResult>(handle);
+ UNIT_ASSERT(event);
+
+ auto& resRead = Proto(event);
UNIT_ASSERT_EQUAL(resRead.GetOrigin(), TTestTxConfig::TxTablet0);
- UNIT_ASSERT_EQUAL(resRead.GetTxInitiator(), metaShard);
- UNIT_ASSERT_EQUAL(resRead.GetStatus(), NKikimrTxColumnShard::EResultStatus::SUCCESS);
+ UNIT_ASSERT_EQUAL(resRead.GetTxInitiator(), metaShard);
+ UNIT_ASSERT_EQUAL(resRead.GetStatus(), NKikimrTxColumnShard::EResultStatus::SUCCESS);
if (resRead.GetFinished()) {
expected = resRead.GetBatch() + 1;
UNIT_ASSERT(resRead.HasMeta());
}
- UNIT_ASSERT(resRead.GetData().size() > 0);
+ UNIT_ASSERT(resRead.GetData().size() > 0);
++num;
UNIT_ASSERT(num < 10);
- }
-
- // We captured EvReadFinished event and dropped is so the columnshard still thinks that
- // read request is in progress and keeps the portions
-
- // Advance the time in order to trigger GC
- TDuration delay = TDuration::Minutes(6);
- planStep += delay.MilliSeconds();
+ }
+
+ // We captured EvReadFinished event and dropped is so the columnshard still thinks that
+ // read request is in progress and keeps the portions
+
+ // Advance the time in order to trigger GC
+ TDuration delay = TDuration::Minutes(6);
+ planStep += delay.MilliSeconds();
numWrites = 10; // trigger in granule compaction by size
- for (ui32 i = 0; i < numWrites; ++i, ++writeId, ++planStep, ++txId) {
- UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, triggerData));
-
+ for (ui32 i = 0; i < numWrites; ++i, ++writeId, ++planStep, ++txId) {
+ UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, triggerData));
+
ProposeCommit(runtime, sender, metaShard, txId, {writeId});
- PlanCommit(runtime, sender, planStep, txId);
- }
-
- Cerr << "Compactions happened: " << compactionsHappened << Endl;
- Cerr << "Cleanups happened: " << cleanupsHappened << Endl;
- Cerr << "Old portions: " << JoinStrings(oldPortions.begin(), oldPortions.end(), " ") << Endl;
- Cerr << "Cleaned up portions: " << JoinStrings(deletedPortions.begin(), deletedPortions.end(), " ") << Endl;
-
- // Check that GC happened but it didn't collect some old poritons
- UNIT_ASSERT_GT(compactionsHappened, previousCompactionsHappened);
- UNIT_ASSERT_GT(cleanupsHappened, previousCleanupsHappened);
- UNIT_ASSERT_GT_C(oldPortions.size(), deletedPortions.size(), "Some old portions must not be deleted because the are in use by read");
- UNIT_ASSERT_GT_C(delayedBlobs.size(), 0, "Read request is expected to have at least one committed blob, which deletion must be delayed");
- previousCompactionsHappened = compactionsHappened;
- previousCleanupsHappened = cleanupsHappened;
-
- // Send EvReadFinished to release kept portions
- blockReadFinished = false;
- UNIT_ASSERT_VALUES_EQUAL(inFlightReads.size(), 1);
- {
- auto read = std::make_unique<NColumnShard::TEvPrivate::TEvReadFinished>(*inFlightReads.begin());
+ PlanCommit(runtime, sender, planStep, txId);
+ }
+
+ Cerr << "Compactions happened: " << compactionsHappened << Endl;
+ Cerr << "Cleanups happened: " << cleanupsHappened << Endl;
+ Cerr << "Old portions: " << JoinStrings(oldPortions.begin(), oldPortions.end(), " ") << Endl;
+ Cerr << "Cleaned up portions: " << JoinStrings(deletedPortions.begin(), deletedPortions.end(), " ") << Endl;
+
+ // Check that GC happened but it didn't collect some old poritons
+ UNIT_ASSERT_GT(compactionsHappened, previousCompactionsHappened);
+ UNIT_ASSERT_GT(cleanupsHappened, previousCleanupsHappened);
+ UNIT_ASSERT_GT_C(oldPortions.size(), deletedPortions.size(), "Some old portions must not be deleted because the are in use by read");
+ UNIT_ASSERT_GT_C(delayedBlobs.size(), 0, "Read request is expected to have at least one committed blob, which deletion must be delayed");
+ previousCompactionsHappened = compactionsHappened;
+ previousCleanupsHappened = cleanupsHappened;
+
+ // Send EvReadFinished to release kept portions
+ blockReadFinished = false;
+ UNIT_ASSERT_VALUES_EQUAL(inFlightReads.size(), 1);
+ {
+ auto read = std::make_unique<NColumnShard::TEvPrivate::TEvReadFinished>(*inFlightReads.begin());
ForwardToTablet(runtime, TTestTxConfig::TxTablet0, sender, read.release());
- }
-
- // Advance the time and trigger some more compactions and cleanups
- planStep += 2*delay.MilliSeconds();
+ }
+
+ // Advance the time and trigger some more compactions and cleanups
+ planStep += 2*delay.MilliSeconds();
numWrites = 10;
- for (ui32 i = 0; i < numWrites; ++i, ++writeId, ++planStep, ++txId) {
- UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, triggerData));
-
+ for (ui32 i = 0; i < numWrites; ++i, ++writeId, ++planStep, ++txId) {
+ UNIT_ASSERT(WriteData(runtime, sender, metaShard, writeId, tableId, triggerData));
+
ProposeCommit(runtime, sender, metaShard, txId, {writeId});
- PlanCommit(runtime, sender, planStep, txId);
- }
-
- Cerr << "Compactions happened: " << compactionsHappened << Endl;
- Cerr << "Cleanups happened: " << cleanupsHappened << Endl;
- Cerr << "Old portions: " << JoinStrings(oldPortions.begin(), oldPortions.end(), " ") << Endl;
- Cerr << "Cleaned up portions: " << JoinStrings(deletedPortions.begin(), deletedPortions.end(), " ") << Endl;
-
- // Check that previously kept portions are collected
- UNIT_ASSERT_GE(compactionsHappened, previousCompactionsHappened);
- UNIT_ASSERT_GT(cleanupsHappened, previousCleanupsHappened);
- UNIT_ASSERT_VALUES_EQUAL_C(oldPortions.size(), deletedPortions.size(), "All old portions must be deleted after read has finished");
- UNIT_ASSERT_VALUES_EQUAL_C(delayedBlobs.size(), 0, "All previously delayed deletions must now happen");
- }
-
- Y_UNIT_TEST(CompactionGC) {
- TestCompactionGC(false);
- }
-
- Y_UNIT_TEST(CompactionGCWithSmallBlobs) {
- TestCompactionGC(true);
- }
+ PlanCommit(runtime, sender, planStep, txId);
+ }
+
+ Cerr << "Compactions happened: " << compactionsHappened << Endl;
+ Cerr << "Cleanups happened: " << cleanupsHappened << Endl;
+ Cerr << "Old portions: " << JoinStrings(oldPortions.begin(), oldPortions.end(), " ") << Endl;
+ Cerr << "Cleaned up portions: " << JoinStrings(deletedPortions.begin(), deletedPortions.end(), " ") << Endl;
+
+ // Check that previously kept portions are collected
+ UNIT_ASSERT_GE(compactionsHappened, previousCompactionsHappened);
+ UNIT_ASSERT_GT(cleanupsHappened, previousCleanupsHappened);
+ UNIT_ASSERT_VALUES_EQUAL_C(oldPortions.size(), deletedPortions.size(), "All old portions must be deleted after read has finished");
+ UNIT_ASSERT_VALUES_EQUAL_C(delayedBlobs.size(), 0, "All previously delayed deletions must now happen");
+ }
+
+ Y_UNIT_TEST(CompactionGC) {
+ TestCompactionGC(false);
+ }
+
+ Y_UNIT_TEST(CompactionGCWithSmallBlobs) {
+ TestCompactionGC(true);
+ }
}
}
diff --git a/ydb/core/tx/columnshard/ut_columnshard_schema.cpp b/ydb/core/tx/columnshard/ut_columnshard_schema.cpp
index 714c8ecb302..d2bb656e920 100644
--- a/ydb/core/tx/columnshard/ut_columnshard_schema.cpp
+++ b/ydb/core/tx/columnshard/ut_columnshard_schema.cpp
@@ -331,9 +331,9 @@ void TestDrop(bool reboots) {
}
-namespace NColumnShard {
-extern bool gAllowLogBatchingDefaultValue;
-}
+namespace NColumnShard {
+extern bool gAllowLogBatchingDefaultValue;
+}
Y_UNIT_TEST_SUITE(TColumnShardTestSchema) {
Y_UNIT_TEST(ExternalTTL) {
@@ -341,7 +341,7 @@ Y_UNIT_TEST_SUITE(TColumnShardTestSchema) {
}
Y_UNIT_TEST(RebootExternalTTL) {
- NColumnShard::gAllowLogBatchingDefaultValue = false;
+ NColumnShard::gAllowLogBatchingDefaultValue = false;
TestTtl(true, false);
}
diff --git a/ydb/core/tx/columnshard/write_actor.cpp b/ydb/core/tx/columnshard/write_actor.cpp
index 740e7da7f1f..a02d84e73f4 100644
--- a/ydb/core/tx/columnshard/write_actor.cpp
+++ b/ydb/core/tx/columnshard/write_actor.cpp
@@ -8,22 +8,22 @@ namespace NKikimr::NColumnShard {
class TWriteActor : public TActorBootstrapped<TWriteActor> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::TX_COLUMNSHARD_WRITE_ACTOR;
+ return NKikimrServices::TActivity::TX_COLUMNSHARD_WRITE_ACTOR;
}
- TWriteActor(ui64 tabletId,
+ TWriteActor(ui64 tabletId,
const NOlap::TIndexInfo& indexInfo,
const TActorId& dstActor,
- TBlobBatch&& blobBatch,
- bool blobGrouppingEnabled,
+ TBlobBatch&& blobBatch,
+ bool blobGrouppingEnabled,
TAutoPtr<TEvColumnShard::TEvWrite> writeEv,
TAutoPtr<TEvPrivate::TEvWriteIndex> writeIndexEv,
const TInstant& deadline)
- : TabletId(tabletId)
+ : TabletId(tabletId)
, IndexInfo(indexInfo)
, DstActor(dstActor)
- , BlobBatch(std::move(blobBatch))
- , BlobGrouppingEnabled(blobGrouppingEnabled)
+ , BlobBatch(std::move(blobBatch))
+ , BlobGrouppingEnabled(blobGrouppingEnabled)
, WriteEv(writeEv)
, WriteIndexEv(writeIndexEv)
, Deadline(deadline)
@@ -46,25 +46,25 @@ public:
}
- if (status != NKikimrProto::OK) {
+ if (status != NKikimrProto::OK) {
LOG_S_WARN("Unsuccessful TEvPutResult for blob " << msg->Id.ToString()
<< " status: " << status << " reason: " << msg->ErrorReason);
SendResultAndDie(ctx, status);
- return;
- }
+ return;
+ }
LOG_S_TRACE("TEvPutResult for blob " << msg->Id.ToString());
-
- BlobBatch.OnBlobWriteResult(ev);
- if (BlobBatch.AllBlobWritesCompleted()) {
- SendResultAndDie(ctx, NKikimrProto::OK);
+ BlobBatch.OnBlobWriteResult(ev);
+
+ if (BlobBatch.AllBlobWritesCompleted()) {
+ SendResultAndDie(ctx, NKikimrProto::OK);
}
}
void Handle(TEvents::TEvWakeup::TPtr& ev, const TActorContext& ctx) {
Y_UNUSED(ev);
- LOG_S_WARN("TEvWakeup: write timeout at tablet " << TabletId << " (write)");
+ LOG_S_WARN("TEvWakeup: write timeout at tablet " << TabletId << " (write)");
SendResultAndDie(ctx, NKikimrProto::TIMEOUT);
return;
@@ -120,28 +120,28 @@ public:
// Heavy operations inside. We cannot run them in tablet event handler.
TString strError;
- std::shared_ptr<arrow::RecordBatch> batch;
- {
- TCpuGuard guard(ResourceUsage);
- batch = IndexInfo.PrepareForInsert(srcData, meta, strError);
- }
+ std::shared_ptr<arrow::RecordBatch> batch;
+ {
+ TCpuGuard guard(ResourceUsage);
+ batch = IndexInfo.PrepareForInsert(srcData, meta, strError);
+ }
if (!batch) {
LOG_S_DEBUG("Bad data to write (" << strError << ") at tablet " << TabletId);
SendResultAndDie(ctx, NKikimrProto::ERROR);
return;
}
- TString data;
- {
- TCpuGuard guard(ResourceUsage);
- data = NArrow::SerializeBatchNoCompression(batch);
- }
- if (data.size() > TLimits::MAX_BLOB_SIZE) {
+ TString data;
+ {
+ TCpuGuard guard(ResourceUsage);
+ data = NArrow::SerializeBatchNoCompression(batch);
+ }
+ if (data.size() > TLimits::MAX_BLOB_SIZE) {
LOG_S_DEBUG("Extracted data (" << data.size() << " bytes) is bigger than source ("
- << srcData.size() << " bytes) and limit at tablet " << TabletId);
+ << srcData.size() << " bytes) and limit at tablet " << TabletId);
- SendResultAndDie(ctx, NKikimrProto::ERROR);
- return;
+ SendResultAndDie(ctx, NKikimrProto::ERROR);
+ return;
}
record.SetData(data); // modify for TxWrite
@@ -155,22 +155,22 @@ public:
Y_PROTOBUF_SUPPRESS_NODISCARD outMeta.SerializeToString(&meta);
}
record.MutableMeta()->SetLogicalMeta(meta);
-
- if (data.size() > WriteEv->MaxSmallBlobSize) {
- WriteEv->BlobId = DoSendWriteBlobRequest(data, ctx);
- } else {
- TUnifiedBlobId smallBlobId = BlobBatch.AddSmallBlob(data);
- Y_VERIFY(smallBlobId.IsSmallBlob());
- WriteEv->BlobId = smallBlobId;
- }
-
- Y_VERIFY(WriteEv->BlobId.BlobSize() == data.size());
-
- LOG_S_DEBUG("Write Blob " << WriteEv->BlobId.ToStringNew());
-
- if (BlobBatch.AllBlobWritesCompleted()) {
- SendResultAndDie(ctx, NKikimrProto::OK);
- }
+
+ if (data.size() > WriteEv->MaxSmallBlobSize) {
+ WriteEv->BlobId = DoSendWriteBlobRequest(data, ctx);
+ } else {
+ TUnifiedBlobId smallBlobId = BlobBatch.AddSmallBlob(data);
+ Y_VERIFY(smallBlobId.IsSmallBlob());
+ WriteEv->BlobId = smallBlobId;
+ }
+
+ Y_VERIFY(WriteEv->BlobId.BlobSize() == data.size());
+
+ LOG_S_DEBUG("Write Blob " << WriteEv->BlobId.ToStringNew());
+
+ if (BlobBatch.AllBlobWritesCompleted()) {
+ SendResultAndDie(ctx, NKikimrProto::OK);
+ }
}
void SendMultiWriteRequest(const TActorContext& ctx) {
@@ -183,70 +183,70 @@ public:
const TVector<TString>& blobs = WriteIndexEv->Blobs;
Y_VERIFY(blobs.size() > 0);
size_t blobsPos = 0;
-
- // Send accumulated data and update records with the blob Id
- auto fnFlushAcummultedBlob = [this, &ctx] (TString& accumulatedBlob, NOlap::TPortionInfo& portionInfo,
- TVector<std::pair<size_t, TString>>& recordsInBlob)
- {
- Y_VERIFY(accumulatedBlob.size() > 0);
- Y_VERIFY(recordsInBlob.size() > 0);
- auto blobId = DoSendWriteBlobRequest(accumulatedBlob, ctx);
- LOG_S_TRACE("Write Index Blob " << blobId << " with " << recordsInBlob.size() << " records");
- for (const auto& rec : recordsInBlob) {
- size_t i = rec.first;
- const TString& recData = rec.second;
- auto& blobRange = portionInfo.Records[i].BlobRange;
- blobRange.BlobId = blobId;
- Y_VERIFY(blobRange.Offset + blobRange.Size <= accumulatedBlob.size());
- Y_VERIFY(blobRange.Size == recData.size());
-
- if (WriteIndexEv->CacheData) {
- // Save original (non-accumulted) blobs with the corresponding TBlobRanges in order to
- // put them into cache at commit time
- WriteIndexEv->IndexChanges->Blobs[blobRange] = recData;
- }
- }
- accumulatedBlob.clear();
- recordsInBlob.clear();
- };
-
- TString accumulatedBlob;
- TVector<std::pair<size_t, TString>> recordsInBlob;
-
+
+ // Send accumulated data and update records with the blob Id
+ auto fnFlushAcummultedBlob = [this, &ctx] (TString& accumulatedBlob, NOlap::TPortionInfo& portionInfo,
+ TVector<std::pair<size_t, TString>>& recordsInBlob)
+ {
+ Y_VERIFY(accumulatedBlob.size() > 0);
+ Y_VERIFY(recordsInBlob.size() > 0);
+ auto blobId = DoSendWriteBlobRequest(accumulatedBlob, ctx);
+ LOG_S_TRACE("Write Index Blob " << blobId << " with " << recordsInBlob.size() << " records");
+ for (const auto& rec : recordsInBlob) {
+ size_t i = rec.first;
+ const TString& recData = rec.second;
+ auto& blobRange = portionInfo.Records[i].BlobRange;
+ blobRange.BlobId = blobId;
+ Y_VERIFY(blobRange.Offset + blobRange.Size <= accumulatedBlob.size());
+ Y_VERIFY(blobRange.Size == recData.size());
+
+ if (WriteIndexEv->CacheData) {
+ // Save original (non-accumulted) blobs with the corresponding TBlobRanges in order to
+ // put them into cache at commit time
+ WriteIndexEv->IndexChanges->Blobs[blobRange] = recData;
+ }
+ }
+ accumulatedBlob.clear();
+ recordsInBlob.clear();
+ };
+
+ TString accumulatedBlob;
+ TVector<std::pair<size_t, TString>> recordsInBlob;
+
for (auto& portionInfo : indexChanges->AppendedPortions) {
- auto& records = portionInfo.Records;
-
- accumulatedBlob.clear();
- recordsInBlob.clear();
-
+ auto& records = portionInfo.Records;
+
+ accumulatedBlob.clear();
+ recordsInBlob.clear();
+
for (size_t i = 0; i < records.size(); ++i, ++blobsPos) {
- const TString& currentBlob = blobs[blobsPos];
- Y_VERIFY(currentBlob.size());
-
- if ((accumulatedBlob.size() + currentBlob.size() > TLimits::MAX_BLOB_SIZE) ||
- (accumulatedBlob.size() && !BlobGrouppingEnabled))
- {
- fnFlushAcummultedBlob(accumulatedBlob, portionInfo, recordsInBlob);
- }
-
- // Accumulate data chunks into a single blob and save record indices of these chunks
- records[i].BlobRange.Offset = accumulatedBlob.size();
- records[i].BlobRange.Size = currentBlob.size();
- accumulatedBlob.append(currentBlob);
- recordsInBlob.emplace_back(i, currentBlob);
+ const TString& currentBlob = blobs[blobsPos];
+ Y_VERIFY(currentBlob.size());
+
+ if ((accumulatedBlob.size() + currentBlob.size() > TLimits::MAX_BLOB_SIZE) ||
+ (accumulatedBlob.size() && !BlobGrouppingEnabled))
+ {
+ fnFlushAcummultedBlob(accumulatedBlob, portionInfo, recordsInBlob);
+ }
+
+ // Accumulate data chunks into a single blob and save record indices of these chunks
+ records[i].BlobRange.Offset = accumulatedBlob.size();
+ records[i].BlobRange.Size = currentBlob.size();
+ accumulatedBlob.append(currentBlob);
+ recordsInBlob.emplace_back(i, currentBlob);
+ }
+ if (accumulatedBlob.size() != 0) {
+ fnFlushAcummultedBlob(accumulatedBlob, portionInfo, recordsInBlob);
}
- if (accumulatedBlob.size() != 0) {
- fnFlushAcummultedBlob(accumulatedBlob, portionInfo, recordsInBlob);
- }
}
Y_VERIFY(blobsPos == blobs.size());
}
- TUnifiedBlobId DoSendWriteBlobRequest(const TString& data, const TActorContext& ctx) {
- ResourceUsage.Network += data.size();
- return BlobBatch.SendWriteBlobRequest(data, Deadline, ctx);
- }
-
+ TUnifiedBlobId DoSendWriteBlobRequest(const TString& data, const TActorContext& ctx) {
+ ResourceUsage.Network += data.size();
+ return BlobBatch.SendWriteBlobRequest(data, Deadline, ctx);
+ }
+
STFUNC(StateWait) {
switch (ev->GetTypeRewrite()) {
HFunc(TEvBlobStorage::TEvPutResult, Handle);
@@ -257,39 +257,39 @@ public:
}
private:
- ui64 TabletId;
+ ui64 TabletId;
NOlap::TIndexInfo IndexInfo;
TActorId DstActor;
- TBlobBatch BlobBatch;
- bool BlobGrouppingEnabled;
+ TBlobBatch BlobBatch;
+ bool BlobGrouppingEnabled;
TAutoPtr<TEvColumnShard::TEvWrite> WriteEv;
TAutoPtr<TEvPrivate::TEvWriteIndex> WriteIndexEv;
TInstant Deadline;
THashSet<ui32> YellowMoveChannels;
THashSet<ui32> YellowStopChannels;
- TUsage ResourceUsage;
+ TUsage ResourceUsage;
- void SaveResourceUsage() {
+ void SaveResourceUsage() {
if (WriteEv) {
- WriteEv->ResourceUsage.Add(ResourceUsage);
+ WriteEv->ResourceUsage.Add(ResourceUsage);
} else {
- WriteIndexEv->ResourceUsage.Add(ResourceUsage);
+ WriteIndexEv->ResourceUsage.Add(ResourceUsage);
}
- ResourceUsage = TUsage();
+ ResourceUsage = TUsage();
}
void SendResult(const TActorContext& ctx, NKikimrProto::EReplyStatus status) {
- SaveResourceUsage();
+ SaveResourceUsage();
if (WriteEv) {
- LOG_S_DEBUG("Write Blob " << WriteEv->BlobId.ToStringNew() << " Status: " << status);
+ LOG_S_DEBUG("Write Blob " << WriteEv->BlobId.ToStringNew() << " Status: " << status);
WriteEv->PutStatus = status;
- WriteEv->BlobBatch = std::move(BlobBatch);
+ WriteEv->BlobBatch = std::move(BlobBatch);
WriteEv->YellowMoveChannels = TVector<ui32>(YellowMoveChannels.begin(), YellowMoveChannels.end());
WriteEv->YellowStopChannels = TVector<ui32>(YellowStopChannels.begin(), YellowStopChannels.end());
ctx.Send(DstActor, WriteEv.Release());
} else {
WriteIndexEv->PutStatus = status;
- WriteIndexEv->BlobBatch = std::move(BlobBatch);
+ WriteIndexEv->BlobBatch = std::move(BlobBatch);
WriteIndexEv->YellowMoveChannels = TVector<ui32>(YellowMoveChannels.begin(), YellowMoveChannels.end());
WriteIndexEv->YellowStopChannels = TVector<ui32>(YellowStopChannels.begin(), YellowStopChannels.end());
ctx.Send(DstActor, WriteIndexEv.Release());
@@ -297,16 +297,16 @@ private:
}
};
-IActor* CreateWriteActor(ui64 tabletId, const NOlap::TIndexInfo& indexTable,
- const TActorId& dstActor, TBlobBatch&& blobBatch, bool blobGrouppingEnabled,
- TAutoPtr<TEvColumnShard::TEvWrite> ev, const TInstant& deadline) {
- return new TWriteActor(tabletId, indexTable, dstActor, std::move(blobBatch), blobGrouppingEnabled, ev, {}, deadline);
+IActor* CreateWriteActor(ui64 tabletId, const NOlap::TIndexInfo& indexTable,
+ const TActorId& dstActor, TBlobBatch&& blobBatch, bool blobGrouppingEnabled,
+ TAutoPtr<TEvColumnShard::TEvWrite> ev, const TInstant& deadline) {
+ return new TWriteActor(tabletId, indexTable, dstActor, std::move(blobBatch), blobGrouppingEnabled, ev, {}, deadline);
}
-IActor* CreateWriteActor(ui64 tabletId, const NOlap::TIndexInfo& indexTable,
- const TActorId& dstActor, TBlobBatch&& blobBatch, bool blobGrouppingEnabled,
- TAutoPtr<TEvPrivate::TEvWriteIndex> ev, const TInstant& deadline) {
- return new TWriteActor(tabletId, indexTable, dstActor, std::move(blobBatch), blobGrouppingEnabled, {}, ev, deadline);
+IActor* CreateWriteActor(ui64 tabletId, const NOlap::TIndexInfo& indexTable,
+ const TActorId& dstActor, TBlobBatch&& blobBatch, bool blobGrouppingEnabled,
+ TAutoPtr<TEvPrivate::TEvWriteIndex> ev, const TInstant& deadline) {
+ return new TWriteActor(tabletId, indexTable, dstActor, std::move(blobBatch), blobGrouppingEnabled, {}, ev, deadline);
}
}
diff --git a/ydb/core/tx/columnshard/ya.make b/ydb/core/tx/columnshard/ya.make
index f815cde211f..c4c857fe7d1 100644
--- a/ydb/core/tx/columnshard/ya.make
+++ b/ydb/core/tx/columnshard/ya.make
@@ -3,11 +3,11 @@ LIBRARY()
OWNER(g:kikimr)
SRCS(
- blob.cpp
- blob_cache.cpp
- blob_manager.cpp
- blob_manager_db.cpp
- blob_manager_txs.cpp
+ blob.cpp
+ blob_cache.cpp
+ blob_manager.cpp
+ blob_manager_db.cpp
+ blob_manager_txs.cpp
columnshard__init.cpp
columnshard__notify_tx_completion.cpp
columnshard__plan_step.cpp
@@ -15,8 +15,8 @@ SRCS(
columnshard__propose_cancel.cpp
columnshard__propose_transaction.cpp
columnshard__read.cpp
- columnshard__read_blob_ranges.cpp
- columnshard__scan.cpp
+ columnshard__read_blob_ranges.cpp
+ columnshard__scan.cpp
columnshard__write.cpp
columnshard__write_index.cpp
columnshard.cpp
diff --git a/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp b/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp
index 7d5ffbfbb33..28a644503d9 100644
--- a/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp
+++ b/ydb/core/tx/coordinator/coordinator__mediators_confirmations.cpp
@@ -25,17 +25,17 @@ struct TTxCoordinator::TTxMediatorConfirmations : public TTransactionBase<TTxCoo
CompleteTransactions = 0;
NIceDb::TNiceDb db(txc.DB);
- ui64 internalTxGen = txc.Generation;
- ui64 internalTxStep = txc.Step;
-
+ ui64 internalTxGen = txc.Generation;
+ ui64 internalTxStep = txc.Step;
+
for (const auto &txidsx : Confirmations->Acks) {
const TTxId txid = txidsx.first;
auto txit = Self->Transactions.find(txid);
if (txit == Self->Transactions.end()) {
- FLOG_DEBUG_S(ctx, NKikimrServices::TX_COORDINATOR,
+ FLOG_DEBUG_S(ctx, NKikimrServices::TX_COORDINATOR,
"at tablet# " << Self->TabletID()
<< " gen:step " << internalTxGen << ":" << internalTxStep
- << " Mediator " << mediatorId << " confirmed finish of transaction " << txid << " but transaction wasn't found");
+ << " Mediator " << mediatorId << " confirmed finish of transaction " << txid << " but transaction wasn't found");
for (const TTabletId affected : txidsx.second) {
db.Table<Schema::AffectedSet>().Key(mediatorId, txid, affected).Delete();
}
@@ -46,25 +46,25 @@ struct TTxCoordinator::TTxMediatorConfirmations : public TTransactionBase<TTxCoo
for (const TTabletId affected : txidsx.second) {
THashSet<TTabletId>::size_type result = mediatorAffectedSet.erase(affected);
db.Table<Schema::AffectedSet>().Key(mediatorId, txid, affected).Delete();
- FLOG_DEBUG_S(ctx, NKikimrServices::TX_COORDINATOR,
+ FLOG_DEBUG_S(ctx, NKikimrServices::TX_COORDINATOR,
"at tablet# " << Self->TabletID()
<< " gen:step " << internalTxGen << ":" << internalTxStep
- << " Confirmed transaction " << txid << " for mediator " << mediatorId << " tablet " << affected << " result=" << result);
+ << " Confirmed transaction " << txid << " for mediator " << mediatorId << " tablet " << affected << " result=" << result);
}
if (mediatorAffectedSet.empty()) {
- FLOG_DEBUG_S(ctx, NKikimrServices::TX_COORDINATOR,
+ FLOG_DEBUG_S(ctx, NKikimrServices::TX_COORDINATOR,
"at tablet# " << Self->TabletID()
<< " gen:step " << internalTxGen << ":" << internalTxStep
- << " Mediator " << mediatorId << " confirmed finish of transaction " << txid);
+ << " Mediator " << mediatorId << " confirmed finish of transaction " << txid);
txit->second.UnconfirmedAffectedSet.erase(mediatorId);
}
if (txit->second.UnconfirmedAffectedSet.empty()) { // transaction finished
- FLOG_DEBUG_S(ctx, NKikimrServices::TX_COORDINATOR,
+ FLOG_DEBUG_S(ctx, NKikimrServices::TX_COORDINATOR,
"at tablet# " << Self->TabletID()
<< " gen:step " << internalTxGen << ":" << internalTxStep
- << " Transaction " << txid << " has been completed");
+ << " Transaction " << txid << " has been completed");
db.Table<Schema::Transaction>().Key(txid).Delete();
Self->Transactions.erase(txit);
++CompleteTransactions;
diff --git a/ydb/core/tx/coordinator/coordinator__restart_mediator.cpp b/ydb/core/tx/coordinator/coordinator__restart_mediator.cpp
index 99698688f33..673115d22b0 100644
--- a/ydb/core/tx/coordinator/coordinator__restart_mediator.cpp
+++ b/ydb/core/tx/coordinator/coordinator__restart_mediator.cpp
@@ -31,12 +31,12 @@ struct TTxCoordinator::TTxRestartMediatorQueue : public TTransactionBase<TTxCoor
for (const auto& it : pushToAffectedBuffer) {
TTransaction& transaction = Self->Transactions[it.first];
THashSet<TTabletId>& unconfirmedAffectedSet = transaction.UnconfirmedAffectedSet[MediatorId];
- Y_VERIFY(unconfirmedAffectedSet.size() == it.second.size(),
- "Incosistent affected set in mem in DB for txId %" PRIu64, it.first);
+ Y_VERIFY(unconfirmedAffectedSet.size() == it.second.size(),
+ "Incosistent affected set in mem in DB for txId %" PRIu64, it.first);
for (const TTabletId affectedTabletId : it.second) {
Y_VERIFY(unconfirmedAffectedSet.contains(affectedTabletId),
- "Incosistent affected set in mem in DB for txId %" PRIu64 " missing tabletId %" PRIu64,
- it.first, affectedTabletId);
+ "Incosistent affected set in mem in DB for txId %" PRIu64 " missing tabletId %" PRIu64,
+ it.first, affectedTabletId);
}
}
@@ -74,7 +74,7 @@ bool TTxCoordinator::RestoreMediatorInfo(TTabletId mediatorId, TVector<TAutoPtr<
if (!rowset.IsReady())
return false;
- // Later we will need this to be sorted by stepId
+ // Later we will need this to be sorted by stepId
TMap<TStepId, TAutoPtr<TMediatorStep>> mediatorSteps;
while (!rowset.EndOfSet()) {
diff --git a/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp b/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp
index c66967ca646..26a916bdff9 100644
--- a/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp
+++ b/ydb/core/tx/coordinator/coordinator__restore_transaction.cpp
@@ -3,8 +3,8 @@
#include <ydb/core/base/appdata.h>
#include <ydb/core/tablet/tablet_exception.h>
-#include <util/stream/file.h>
-
+#include <util/stream/file.h>
+
namespace NKikimr {
namespace NFlatTxCoordinator {
@@ -59,13 +59,13 @@ struct TTxCoordinator::TTxRestoreTransactions : public TTransactionBase<TTxCoord
}
if (errors > 0) {
- // DB is corrupt. Make a dump and stop
- const NScheme::TTypeRegistry& tr = *AppData(ctx)->TypeRegistry;
+ // DB is corrupt. Make a dump and stop
+ const NScheme::TTypeRegistry& tr = *AppData(ctx)->TypeRegistry;
TString dbDumpFile = Sprintf("/tmp/coordinator_db_dump_%" PRIu64 ".%" PRIi32, Self->TabletID(), getpid());
TFixedBufferFileOutput out(dbDumpFile);
- txc.DB.DebugDump(out, tr);
- out.Finish();
- Cerr << "Coordinator DB dumped to " << dbDumpFile;
+ txc.DB.DebugDump(out, tr);
+ out.Finish();
+ Cerr << "Coordinator DB dumped to " << dbDumpFile;
Sleep(TDuration::Seconds(10));
Y_FAIL("Transaction(s) not found!");
}
diff --git a/ydb/core/tx/coordinator/coordinator_impl.cpp b/ydb/core/tx/coordinator/coordinator_impl.cpp
index 952cc15ca82..b4eeb184e29 100644
--- a/ydb/core/tx/coordinator/coordinator_impl.cpp
+++ b/ydb/core/tx/coordinator/coordinator_impl.cpp
@@ -50,16 +50,16 @@ const ui32 TTxCoordinator::Schema::CurrentVersion = 1;
TTxCoordinator::TTxCoordinator(TTabletStorageInfo *info, const TActorId &tablet)
: TActor(&TThis::StateInit)
, TTabletExecutedFlat(info, tablet, new NMiniKQL::TMiniKQLFactory)
-#ifdef COORDINATOR_LOG_TO_FILE
- , DebugName(Sprintf("/tmp/coordinator_db_log_%" PRIu64 ".%" PRIi32 ".%" PRIu64 ".gz", TabletID(), getpid(), tablet.LocalId()))
- , DebugLogFile(DebugName)
- , DebugLog(&DebugLogFile, ZLib::GZip, 1)
-#endif
+#ifdef COORDINATOR_LOG_TO_FILE
+ , DebugName(Sprintf("/tmp/coordinator_db_log_%" PRIu64 ".%" PRIi32 ".%" PRIu64 ".gz", TabletID(), getpid(), tablet.LocalId()))
+ , DebugLogFile(DebugName)
+ , DebugLog(&DebugLogFile, ZLib::GZip, 1)
+#endif
{
-#ifdef COORDINATOR_LOG_TO_FILE
+#ifdef COORDINATOR_LOG_TO_FILE
// HACK
Cerr << "Coordinator LOG will be dumped to " << DebugName << Endl;
-#endif
+#endif
Config.PlanAhead = 50;
Config.Resolution = 1250;
diff --git a/ydb/core/tx/coordinator/coordinator_impl.h b/ydb/core/tx/coordinator/coordinator_impl.h
index 90e6033f489..39018aff1e7 100644
--- a/ydb/core/tx/coordinator/coordinator_impl.h
+++ b/ydb/core/tx/coordinator/coordinator_impl.h
@@ -20,7 +20,7 @@
#include <util/generic/hash_set.h>
#include <util/stream/file.h>
-#include <util/stream/zlib.h>
+#include <util/stream/zlib.h>
#include <algorithm>
@@ -142,9 +142,9 @@ using NTabletFlatExecutor::ITransaction;
using NTabletFlatExecutor::TTransactionBase;
using NTabletFlatExecutor::TTransactionContext;
-//#define COORDINATOR_LOG_TO_FILE
-
-#ifdef COORDINATOR_LOG_TO_FILE
+//#define COORDINATOR_LOG_TO_FILE
+
+#ifdef COORDINATOR_LOG_TO_FILE
#define FLOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, component, sampleBy, stream) \
do { \
::NActors::NLog::TSettings *mSettings = (::NActors::NLog::TSettings*)((actorCtxOrSystem).LoggerSettings()); \
@@ -157,10 +157,10 @@ do { \
} \
} while(0) \
/**/
-#else
-#define FLOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, component, sampleBy, stream) \
- LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, component, sampleBy, stream)
-#endif
+#else
+#define FLOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, component, sampleBy, stream) \
+ LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, component, sampleBy, stream)
+#endif
#define FLOG_LOG_S(actorCtxOrSystem, priority, component, stream) FLOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, component, 0ull, stream)
#define FLOG_DEBUG_S(actorCtxOrSystem, component, stream) FLOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, component, stream)
@@ -418,12 +418,12 @@ private:
bool Stopping = false;
-#ifdef COORDINATOR_LOG_TO_FILE
+#ifdef COORDINATOR_LOG_TO_FILE
// HACK
TString DebugName;
TFixedBufferFileOutput DebugLogFile;
- TZLibCompress DebugLog;
-#endif
+ TZLibCompress DebugLog;
+#endif
void Die(const TActorContext &ctx) override {
for (TMediatorsIndex::iterator it = Mediators.begin(), end = Mediators.end(); it != end; ++it) {
diff --git a/ydb/core/tx/datashard/check_data_tx_unit.cpp b/ydb/core/tx/datashard/check_data_tx_unit.cpp
index 07a9144af71..deb9f1f903a 100644
--- a/ydb/core/tx/datashard/check_data_tx_unit.cpp
+++ b/ydb/core/tx/datashard/check_data_tx_unit.cpp
@@ -156,10 +156,10 @@ EExecutionStatus TCheckDataTxUnit::Execute(TOperation::TPtr op,
for (const auto& cell : key.Key->Range.From) {
keySize += cell.Size();
}
- if (keySize > NLimits::MaxWriteKeySize) {
+ if (keySize > NLimits::MaxWriteKeySize) {
TString err = TStringBuilder()
<< "Operation " << *op << " writes key of " << keySize
- << " bytes which exceeds limit " << NLimits::MaxWriteKeySize
+ << " bytes which exceeds limit " << NLimits::MaxWriteKeySize
<< " bytes at " << DataShard.TabletID();
BuildResult(op, NKikimrTxDataShard::TEvProposeTransactionResult::BAD_REQUEST)
@@ -174,7 +174,7 @@ EExecutionStatus TCheckDataTxUnit::Execute(TOperation::TPtr op,
if (col.Operation == TKeyDesc::EColumnOperation::Set ||
col.Operation == TKeyDesc::EColumnOperation::InplaceUpdate)
{
- if (col.ImmediateUpdateSize > NLimits::MaxWriteValueSize) {
+ if (col.ImmediateUpdateSize > NLimits::MaxWriteValueSize) {
TString err = TStringBuilder()
<< "Transaction write value of " << col.ImmediateUpdateSize
<< " bytes is larger than the allowed threshold";
diff --git a/ydb/core/tx/datashard/complete_data_tx_unit.cpp b/ydb/core/tx/datashard/complete_data_tx_unit.cpp
index bab64233e02..c086fa70119 100644
--- a/ydb/core/tx/datashard/complete_data_tx_unit.cpp
+++ b/ydb/core/tx/datashard/complete_data_tx_unit.cpp
@@ -94,8 +94,8 @@ void TCompleteOperationUnit::CompleteOperation(TOperation::TPtr op,
if (result) {
result->Record.SetProposeLatency(duration.MilliSeconds());
- DataShard.FillExecutionStats(op->GetExecutionProfile(), *result);
-
+ DataShard.FillExecutionStats(op->GetExecutionProfile(), *result);
+
if (!gSkipRepliesFailPoint.Check(DataShard.TabletID(), op->GetTxId()))
DataShard.SendResult(ctx, result, op->GetTarget(), op->GetStep(), op->GetTxId());
}
diff --git a/ydb/core/tx/datashard/const.h b/ydb/core/tx/datashard/const.h
index 05574bd7bfb..d49d3d6fa55 100644
--- a/ydb/core/tx/datashard/const.h
+++ b/ydb/core/tx/datashard/const.h
@@ -11,10 +11,10 @@ constexpr ui64 MEMORY_REQUEST_FACTOR = 8;
// TODO: make configurable
constexpr ui64 MAX_REORDER_TX_KEYS = 100;
-namespace NLimits {
- static constexpr ui64 MaxWriteKeySize = 1024 * 1024 + 1024; // 1MB + small delta (for old ugc tests)
+namespace NLimits {
+ static constexpr ui64 MaxWriteKeySize = 1024 * 1024 + 1024; // 1MB + small delta (for old ugc tests)
static constexpr ui64 MaxWriteValueSize = 16 * 1024 * 1024; // 16MB
-}
-
+}
+
} // namespace NDataShard
} // namespace NKikimr
diff --git a/ydb/core/tx/datashard/datashard.cpp b/ydb/core/tx/datashard/datashard.cpp
index 2ec289713c6..d86568b4a15 100644
--- a/ydb/core/tx/datashard/datashard.cpp
+++ b/ydb/core/tx/datashard/datashard.cpp
@@ -80,17 +80,17 @@ private:
};
-class TDatashardKeySampler : public NMiniKQL::IKeyAccessSampler {
+class TDatashardKeySampler : public NMiniKQL::IKeyAccessSampler {
TDataShard& Self;
-public:
+public:
TDatashardKeySampler(TDataShard& self) : Self(self)
- {}
- void AddSample(const TTableId& tableId, const TArrayRef<const TCell>& key) override {
- Self.SampleKeyAccess(tableId, key);
- }
-};
-
-
+ {}
+ void AddSample(const TTableId& tableId, const TArrayRef<const TCell>& key) override {
+ Self.SampleKeyAccess(tableId, key);
+ }
+};
+
+
TDataShard::TDataShard(const TActorId &tablet, TTabletStorageInfo *info)
: TActor(&TThis::StateInit)
, TTabletExecutedFlat(info, tablet, new TDataShardMiniKQLFactory(this))
@@ -115,9 +115,9 @@ TDataShard::TDataShard(const TActorId &tablet, TTabletStorageInfo *info)
, LastChangeRecordGroup(1)
, TxReadSizeLimit(0)
, StatisticsDisabled(0)
- , DisabledKeySampler(new NMiniKQL::TNoopKeySampler())
- , EnabledKeySampler(new TDatashardKeySampler(*this))
- , CurrentKeySampler(DisabledKeySampler)
+ , DisabledKeySampler(new NMiniKQL::TNoopKeySampler())
+ , EnabledKeySampler(new TDatashardKeySampler(*this))
+ , CurrentKeySampler(DisabledKeySampler)
, TransQueue(this)
, OutReadSets(this)
, Pipeline(this)
@@ -129,15 +129,15 @@ TDataShard::TDataShard(const TActorId &tablet, TTabletStorageInfo *info)
, MaxTxLagMilliseconds(5*60*1000, 0, 30*24*3600*1000ll)
, CanCancelROWithReadSets(0, 0, 1)
, PerShardReadSizeLimit(5368709120, 0, 107374182400)
- , CpuUsageReportThreshlodPercent(60, -1, 146)
- , CpuUsageReportIntervalSeconds(60, 0, 365*86400)
- , HighDataSizeReportThreshlodBytes(10ull<<30, -1, Max<i64>())
- , HighDataSizeReportIntervalSeconds(60, 0, 365*86400)
+ , CpuUsageReportThreshlodPercent(60, -1, 146)
+ , CpuUsageReportIntervalSeconds(60, 0, 365*86400)
+ , HighDataSizeReportThreshlodBytes(10ull<<30, -1, Max<i64>())
+ , HighDataSizeReportIntervalSeconds(60, 0, 365*86400)
, DataTxProfileLogThresholdMs(0, 0, 86400000)
, DataTxProfileBufferThresholdMs(0, 0, 86400000)
, DataTxProfileBufferSize(0, 1000, 100)
- , ReadColumnsScanEnabled(1, 0, 1)
- , ReadColumnsScanInUserPool(0, 0, 1)
+ , ReadColumnsScanEnabled(1, 0, 1)
+ , ReadColumnsScanInUserPool(0, 0, 1)
, BackupReadAheadLo(0, 0, 64*1024*1024)
, BackupReadAheadHi(0, 0, 128*1024*1024)
, DataShardSysTables(InitDataShardSysTables(this))
@@ -290,14 +290,14 @@ void TDataShard::OnActivateExecutor(const TActorContext& ctx) {
AppData(ctx)->Icb->RegisterSharedControl(CanCancelROWithReadSets, "DataShardControls.CanCancelROWithReadSets");
AppData(ctx)->Icb->RegisterSharedControl(PerShardReadSizeLimit, "TxLimitControls.PerShardReadSizeLimit");
- AppData(ctx)->Icb->RegisterSharedControl(CpuUsageReportThreshlodPercent, "DataShardControls.CpuUsageReportThreshlodPercent");
- AppData(ctx)->Icb->RegisterSharedControl(CpuUsageReportIntervalSeconds, "DataShardControls.CpuUsageReportIntervalSeconds");
- AppData(ctx)->Icb->RegisterSharedControl(HighDataSizeReportThreshlodBytes, "DataShardControls.HighDataSizeReportThreshlodBytes");
- AppData(ctx)->Icb->RegisterSharedControl(HighDataSizeReportIntervalSeconds, "DataShardControls.HighDataSizeReportIntervalSeconds");
-
- AppData(ctx)->Icb->RegisterSharedControl(ReadColumnsScanEnabled, "DataShardControls.ReadColumnsScanEnabled");
- AppData(ctx)->Icb->RegisterSharedControl(ReadColumnsScanInUserPool, "DataShardControls.ReadColumnsScanInUserPool");
-
+ AppData(ctx)->Icb->RegisterSharedControl(CpuUsageReportThreshlodPercent, "DataShardControls.CpuUsageReportThreshlodPercent");
+ AppData(ctx)->Icb->RegisterSharedControl(CpuUsageReportIntervalSeconds, "DataShardControls.CpuUsageReportIntervalSeconds");
+ AppData(ctx)->Icb->RegisterSharedControl(HighDataSizeReportThreshlodBytes, "DataShardControls.HighDataSizeReportThreshlodBytes");
+ AppData(ctx)->Icb->RegisterSharedControl(HighDataSizeReportIntervalSeconds, "DataShardControls.HighDataSizeReportIntervalSeconds");
+
+ AppData(ctx)->Icb->RegisterSharedControl(ReadColumnsScanEnabled, "DataShardControls.ReadColumnsScanEnabled");
+ AppData(ctx)->Icb->RegisterSharedControl(ReadColumnsScanInUserPool, "DataShardControls.ReadColumnsScanInUserPool");
+
AppData(ctx)->Icb->RegisterSharedControl(BackupReadAheadLo, "DataShardControls.BackupReadAheadLo");
AppData(ctx)->Icb->RegisterSharedControl(BackupReadAheadHi, "DataShardControls.BackupReadAheadHi");
@@ -428,17 +428,17 @@ void TDataShard::SendResult(const TActorContext &ctx,
}
void TDataShard::FillExecutionStats(const TExecutionProfile& execProfile, TEvDataShard::TEvProposeTransactionResult& result) const {
- TDuration totalCpuTime;
- for (const auto& unit : execProfile.UnitProfiles) {
- totalCpuTime += unit.second.ExecuteTime;
- totalCpuTime += unit.second.CompleteTime;
- }
- result.Record.MutableTxStats()->MutablePerShardStats()->Clear();
- auto& stats = *result.Record.MutableTxStats()->AddPerShardStats();
- stats.SetShardId(TabletID());
- stats.SetCpuTimeUsec(totalCpuTime.MicroSeconds());
-}
-
+ TDuration totalCpuTime;
+ for (const auto& unit : execProfile.UnitProfiles) {
+ totalCpuTime += unit.second.ExecuteTime;
+ totalCpuTime += unit.second.CompleteTime;
+ }
+ result.Record.MutableTxStats()->MutablePerShardStats()->Clear();
+ auto& stats = *result.Record.MutableTxStats()->AddPerShardStats();
+ stats.SetShardId(TabletID());
+ stats.SetCpuTimeUsec(totalCpuTime.MicroSeconds());
+}
+
ui64 TDataShard::AllocateChangeRecordOrder(NIceDb::TNiceDb& db) {
const ui64 result = NextChangeRecordOrder++;
PersistSys(db, Schema::Sys_NextChangeRecordOrder, NextChangeRecordOrder);
@@ -1106,34 +1106,34 @@ void TDataShard::SetTableUpdateTime(const TTableId& tableId, TInstant ts) {
}
void TDataShard::SampleKeyAccess(const TTableId& tableId, const TArrayRef<const TCell>& row) {
- Y_VERIFY(!TSysTables::IsSystemTable(tableId));
-
- auto iter = TableInfos.find(tableId.PathId.LocalPathId);
- Y_VERIFY(iter != TableInfos.end());
-
- const ui64 samplingKeyPrefixSize = row.size();
- TArrayRef<const TCell> key(row.data(), samplingKeyPrefixSize);
- iter->second->Stats.AccessStats.Add(key);
-}
-
+ Y_VERIFY(!TSysTables::IsSystemTable(tableId));
+
+ auto iter = TableInfos.find(tableId.PathId.LocalPathId);
+ Y_VERIFY(iter != TableInfos.end());
+
+ const ui64 samplingKeyPrefixSize = row.size();
+ TArrayRef<const TCell> key(row.data(), samplingKeyPrefixSize);
+ iter->second->Stats.AccessStats.Add(key);
+}
+
NMiniKQL::IKeyAccessSampler::TPtr TDataShard::GetKeyAccessSampler() {
- return CurrentKeySampler;
-}
-
+ return CurrentKeySampler;
+}
+
void TDataShard::EnableKeyAccessSampling(const TActorContext &ctx, TInstant until) {
- if (CurrentKeySampler == DisabledKeySampler) {
- for (auto& table : TableInfos) {
- table.second->Stats.AccessStats.Clear();
- }
- CurrentKeySampler = EnabledKeySampler;
- StartedKeyAccessSamplingAt = AppData(ctx)->TimeProvider->Now();
- LOG_NOTICE_S(ctx, NKikimrServices::TX_DATASHARD, "Started key access sampling at datashard: " << TabletID());
+ if (CurrentKeySampler == DisabledKeySampler) {
+ for (auto& table : TableInfos) {
+ table.second->Stats.AccessStats.Clear();
+ }
+ CurrentKeySampler = EnabledKeySampler;
+ StartedKeyAccessSamplingAt = AppData(ctx)->TimeProvider->Now();
+ LOG_NOTICE_S(ctx, NKikimrServices::TX_DATASHARD, "Started key access sampling at datashard: " << TabletID());
} else {
LOG_NOTICE_S(ctx, NKikimrServices::TX_DATASHARD, "Extended key access sampling at datashard: " << TabletID());
- }
+ }
StopKeyAccessSamplingAt = until;
-}
-
+}
+
bool TDataShard::OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TActorContext &ctx) {
if (!Executor() || !Executor()->GetStats().IsActive)
return false;
@@ -1157,13 +1157,13 @@ bool TDataShard::OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TAc
return true;
}
- if (action == "key-access-sample") {
- TDuration duration = TDuration::Seconds(120);
- EnableKeyAccessSampling(ctx, ctx.Now() + duration);
- ctx.Send(ev->Sender, new NMon::TEvRemoteHttpInfoRes("Enabled key access sampling for " + duration.ToString()));
- return true;
- }
-
+ if (action == "key-access-sample") {
+ TDuration duration = TDuration::Seconds(120);
+ EnableKeyAccessSampling(ctx, ctx.Now() + duration);
+ ctx.Send(ev->Sender, new NMon::TEvRemoteHttpInfoRes("Enabled key access sampling for " + duration.ToString()));
+ return true;
+ }
+
ctx.Send(ev->Sender, new NMon::TEvRemoteBinaryInfoRes(NMonitoring::HTTPNOTFOUND));
return true;
}
@@ -1348,15 +1348,15 @@ void TDataShard::Handle(TEvDataShard::TEvSchemaChangedResult::TPtr& ev, const TA
}
void TDataShard::Handle(TEvDataShard::TEvStateChangedResult::TPtr& ev, const TActorContext& ctx) {
- Y_UNUSED(ev);
+ Y_UNUSED(ev);
LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
"Handle TEvStateChangedResult "
<< " datashard " << TabletID()
<< " state " << DatashardStateName(State));
- // TODO: implement
+ // TODO: implement
NTabletPipe::CloseAndForgetClient(SelfId(), StateReportPipe);
-}
-
+}
+
bool TDataShard::CheckDataTxReject(const TString& opDescr,
const TActorContext &ctx,
NKikimrTxDataShard::TEvProposeTransactionResult::EStatus &rejectStatus,
@@ -1365,7 +1365,7 @@ bool TDataShard::CheckDataTxReject(const TString& opDescr,
bool reject = false;
rejectStatus = NKikimrTxDataShard::TEvProposeTransactionResult::OVERLOADED;
TVector<TString> rejectReasons;
-
+
// In v0.5 reject all transactions on split Src after receiving EvSplit
if (State == TShardState::SplitSrcWaitForNoTxInFlight ||
State == TShardState::SplitSrcMakeSnapshot ||
@@ -1391,13 +1391,13 @@ bool TDataShard::CheckDataTxReject(const TString& opDescr,
<< "is in process of mvcc state change"
<< " state " << DatashardStateName(State));
}
-
+
if (Pipeline.HasDrop()) {
reject = true;
rejectReasons.push_back("is in process of drop");
rejectStatus = NKikimrTxDataShard::TEvProposeTransactionResult::ERROR;
}
-
+
ui64 txInfly = TxInFly();
TDuration lag = GetDataTxCompleteLag();
if (txInfly > 1 && lag > TDuration::MilliSeconds(MaxTxLagMilliseconds)) {
@@ -1406,7 +1406,7 @@ bool TDataShard::CheckDataTxReject(const TString& opDescr,
<< "lags behind, lag: " << lag
<< " in-flight tx count: " << txInfly);
}
-
+
const float rejectProbabilty = Executor()->GetRejectProbability();
if (!reject && rejectProbabilty > 0) {
float rnd = AppData(ctx)->RandomProvider->GenRandReal2();
@@ -1414,13 +1414,13 @@ bool TDataShard::CheckDataTxReject(const TString& opDescr,
if (reject)
rejectReasons.push_back("decided to reject due to given RejectProbability");
}
-
+
size_t totalInFly = (TxInFly() + ImmediateInFly() + ProposeQueue.Size() + TxWaiting());
if (totalInFly > GetMaxTxInFly()) {
reject = true;
rejectReasons.push_back("MaxTxInFly was exceeded");
}
-
+
if (!reject && Stopping) {
reject = true;
rejectReasons.push_back("is restarting");
@@ -1445,8 +1445,8 @@ bool TDataShard::CheckDataTxReject(const TString& opDescr,
}
return reject;
-}
-
+}
+
bool TDataShard::CheckDataTxRejectAndReply(TEvDataShard::TEvProposeTransaction* msg, const TActorContext& ctx)
{
switch (msg->GetTxKind()) {
@@ -1459,20 +1459,20 @@ bool TDataShard::CheckDataTxRejectAndReply(TEvDataShard::TEvProposeTransaction*
default:
return false;
}
-
+
TString txDescr = TStringBuilder() << "data TxId " << msg->GetTxId();
-
+
NKikimrTxDataShard::TEvProposeTransactionResult::EStatus rejectStatus;
TString rejectReason;
bool reject = CheckDataTxReject(txDescr, ctx, rejectStatus, rejectReason);
-
+
if (reject) {
THolder<TEvDataShard::TEvProposeTransactionResult> result =
THolder(new TEvDataShard::TEvProposeTransactionResult(msg->GetTxKind(),
TabletID(),
msg->GetTxId(),
rejectStatus));
-
+
result->AddError(NKikimrTxDataShard::TError::WRONG_SHARD_STATE, rejectReason);
LOG_NOTICE_S(ctx, NKikimrServices::TX_DATASHARD, rejectReason);
@@ -1480,8 +1480,8 @@ bool TDataShard::CheckDataTxRejectAndReply(TEvDataShard::TEvProposeTransaction*
IncCounter(COUNTER_PREPARE_OVERLOADED);
IncCounter(COUNTER_PREPARE_COMPLETE);
return true;
- }
-
+ }
+
return false;
}
@@ -1562,33 +1562,33 @@ void TDataShard::Handle(TEvDataShard::TEvProposeTransactionAttach::TPtr &ev, con
void TDataShard::HandleAsFollower(TEvDataShard::TEvProposeTransaction::TPtr &ev, const TActorContext &ctx) {
IncCounter(COUNTER_PREPARE_REQUEST);
-
+
if (TxInFly() > GetMaxTxInFly()) {
- THolder<TEvDataShard::TEvProposeTransactionResult> result =
+ THolder<TEvDataShard::TEvProposeTransactionResult> result =
THolder(new TEvDataShard::TEvProposeTransactionResult(ev->Get()->GetTxKind(), TabletID(),
ev->Get()->GetTxId(), NKikimrTxDataShard::TEvProposeTransactionResult::OVERLOADED));
- ctx.Send(ev->Get()->GetSource(), result.Release());
+ ctx.Send(ev->Get()->GetSource(), result.Release());
IncCounter(COUNTER_PREPARE_OVERLOADED);
IncCounter(COUNTER_PREPARE_COMPLETE);
- return;
- }
-
- if (ev->Get()->GetTxKind() == NKikimrTxDataShard::TX_KIND_DATA) {
+ return;
+ }
+
+ if (ev->Get()->GetTxKind() == NKikimrTxDataShard::TX_KIND_DATA) {
ProposeTransaction(std::move(ev), ctx);
- return;
- }
-
+ return;
+ }
+
THolder<TEvDataShard::TEvProposeTransactionResult> result
= THolder(new TEvDataShard::TEvProposeTransactionResult(ev->Get()->GetTxKind(),
TabletID(),
ev->Get()->GetTxId(),
NKikimrTxDataShard::TEvProposeTransactionResult::ERROR));
- result->AddError(NKikimrTxDataShard::TError::BAD_TX_KIND, "Unsupported transaction kind");
- ctx.Send(ev->Get()->GetSource(), result.Release());
+ result->AddError(NKikimrTxDataShard::TError::BAD_TX_KIND, "Unsupported transaction kind");
+ ctx.Send(ev->Get()->GetSource(), result.Release());
IncCounter(COUNTER_PREPARE_ERROR);
IncCounter(COUNTER_PREPARE_COMPLETE);
-}
-
+}
+
void TDataShard::CheckDelayedProposeQueue(const TActorContext &ctx) {
if (DelayedProposeQueue && !Pipeline.HasProposeDelayers()) {
for (auto& ev : DelayedProposeQueue) {
@@ -1638,16 +1638,16 @@ void TDataShard::Handle(TEvTxProcessing::TEvReadSet::TPtr &ev, const TActorConte
ui64 sender = ev->Get()->Record.GetTabletSource();
ui64 dest = ev->Get()->Record.GetTabletDest();
ui64 producer = ev->Get()->Record.GetTabletProducer();
- ui64 txId = ev->Get()->Record.GetTxId();
- LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Receive RS at %" PRIu64 " source %" PRIu64 " dest %" PRIu64 " producer %" PRIu64 " txId %" PRIu64,
- TabletID(), sender, dest, producer, txId);
+ ui64 txId = ev->Get()->Record.GetTxId();
+ LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Receive RS at %" PRIu64 " source %" PRIu64 " dest %" PRIu64 " producer %" PRIu64 " txId %" PRIu64,
+ TabletID(), sender, dest, producer, txId);
IncCounter(COUNTER_READSET_RECEIVED_COUNT);
IncCounter(COUNTER_READSET_RECEIVED_SIZE, ev->Get()->Record.GetReadSet().size());
Execute(new TTxReadSet(this, ev), ctx);
}
void TDataShard::Handle(TEvTxProcessing::TEvReadSetAck::TPtr &ev, const TActorContext &ctx) {
- OutReadSets.SaveAck(ctx, ev->Release());
+ OutReadSets.SaveAck(ctx, ev->Release());
// progress one more Tx to force delayed schema operations
if (Pipeline.HasSchemaOperation() && OutReadSets.Empty()) {
@@ -1725,12 +1725,12 @@ void TDataShard::Handle(TEvPrivate::TEvRegisterScanActor::TPtr &ev, const TActor
}
void TDataShard::Handle(TEvPrivate::TEvScanStats::TPtr& ev, const TActorContext &ctx) {
- Y_UNUSED(ctx);
-
- TabletCounters->Cumulative()[COUNTER_SCANNED_ROWS].Increment(ev->Get()->Rows);
- TabletCounters->Cumulative()[COUNTER_SCANNED_BYTES].Increment(ev->Get()->Bytes);
-}
-
+ Y_UNUSED(ctx);
+
+ TabletCounters->Cumulative()[COUNTER_SCANNED_ROWS].Increment(ev->Get()->Rows);
+ TabletCounters->Cumulative()[COUNTER_SCANNED_BYTES].Increment(ev->Get()->Bytes);
+}
+
void TDataShard::Handle(TEvPrivate::TEvPersistScanState::TPtr& ev, const TActorContext &ctx) {
TabletCounters->Cumulative()[COUNTER_SCANNED_ROWS].Increment(ev->Get()->Rows);
TabletCounters->Cumulative()[COUNTER_SCANNED_BYTES].Increment(ev->Get()->Bytes);
@@ -1739,7 +1739,7 @@ void TDataShard::Handle(TEvPrivate::TEvPersistScanState::TPtr& ev, const TActorC
void TDataShard::Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TActorContext &ctx) {
Y_VERIFY(ev->Get()->Leader, "Unexpectedly connected to follower of tablet %" PRIu64, ev->Get()->TabletId);
-
+
if (ev->Get()->ClientId == SchemeShardPipe) {
if (!TransQueue.HasNotAckedSchemaTx()) {
LOG_ERROR(ctx, NKikimrServices::TX_DATASHARD,
@@ -1753,46 +1753,46 @@ void TDataShard::Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TActo
return;
}
- if (ev->Get()->Status != NKikimrProto::OK) {
- if (ev->Get()->ClientId == StateReportPipe) {
+ if (ev->Get()->Status != NKikimrProto::OK) {
+ if (ev->Get()->ClientId == StateReportPipe) {
StateReportPipe = TActorId();
- ReportState(ctx, State);
- return;
- }
-
- if (ev->Get()->ClientId == DbStatsReportPipe) {
+ ReportState(ctx, State);
+ return;
+ }
+
+ if (ev->Get()->ClientId == DbStatsReportPipe) {
DbStatsReportPipe = TActorId();
- return;
- }
+ return;
+ }
if (ev->Get()->ClientId == TableResolvePipe) {
TableResolvePipe = TActorId();
ResolveTablePath(ctx);
return;
}
- }
-
- if (LoanReturnTracker.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
- if (ev->Get()->Status != NKikimrProto::OK) {
- if (!ev->Get()->Dead) {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
- "Resending loan returns from " << TabletID() << " to " << ev->Get()->TabletId);
- LoanReturnTracker.ResendLoans(ev->Get()->TabletId, ctx);
- } else {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
- "Auto-Acking loan returns to dead " << ev->Get()->TabletId << " from " << TabletID());
- LoanReturnTracker.AutoAckLoans(ev->Get()->TabletId, ctx);
- }
- }
- }
-
- // Resend split-related messages in needed
- if (SplitSrcSnapshotSender.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
- if (ev->Get()->Status != NKikimrProto::OK) {
- SplitSrcSnapshotSender.DoSend(ev->Get()->TabletId, ctx);
- }
- }
-
+ }
+
+ if (LoanReturnTracker.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
+ if (ev->Get()->Status != NKikimrProto::OK) {
+ if (!ev->Get()->Dead) {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
+ "Resending loan returns from " << TabletID() << " to " << ev->Get()->TabletId);
+ LoanReturnTracker.ResendLoans(ev->Get()->TabletId, ctx);
+ } else {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
+ "Auto-Acking loan returns to dead " << ev->Get()->TabletId << " from " << TabletID());
+ LoanReturnTracker.AutoAckLoans(ev->Get()->TabletId, ctx);
+ }
+ }
+ }
+
+ // Resend split-related messages in needed
+ if (SplitSrcSnapshotSender.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
+ if (ev->Get()->Status != NKikimrProto::OK) {
+ SplitSrcSnapshotSender.DoSend(ev->Get()->TabletId, ctx);
+ }
+ }
+
if (ChangeSenderActivator.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
if (ev->Get()->Status != NKikimrProto::OK) {
if (!ev->Get()->Dead) {
@@ -1804,12 +1804,12 @@ void TDataShard::Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TActo
}
if (!PipeClientCache->OnConnect(ev)) {
- if (ev->Get()->Dead) {
- AckRSToDeletedTablet(ev->Get()->TabletId, ctx);
- } else {
- LOG_NOTICE(ctx, NKikimrServices::TX_DATASHARD, "Failed to connect to tablet %" PRIu64 " from tablet %" PRIu64, ev->Get()->TabletId, TabletID());
- RestartPipeRS(ev->Get()->TabletId, ctx);
- }
+ if (ev->Get()->Dead) {
+ AckRSToDeletedTablet(ev->Get()->TabletId, ctx);
+ } else {
+ LOG_NOTICE(ctx, NKikimrServices::TX_DATASHARD, "Failed to connect to tablet %" PRIu64 " from tablet %" PRIu64, ev->Get()->TabletId, TabletID());
+ RestartPipeRS(ev->Get()->TabletId, ctx);
+ }
} else {
LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Connected to tablet %" PRIu64 " from tablet %" PRIu64, ev->Get()->TabletId, TabletID());
}
@@ -1826,29 +1826,29 @@ void TDataShard::Handle(TEvTabletPipe::TEvClientDestroyed::TPtr &ev, const TActo
return;
}
- if (ev->Get()->ClientId == StateReportPipe) {
+ if (ev->Get()->ClientId == StateReportPipe) {
StateReportPipe = TActorId();
- ReportState(ctx, State);
- return;
- }
-
- if (ev->Get()->ClientId == DbStatsReportPipe) {
+ ReportState(ctx, State);
+ return;
+ }
+
+ if (ev->Get()->ClientId == DbStatsReportPipe) {
DbStatsReportPipe = TActorId();
- return;
- }
-
- // Resend loan-related messages in needed
- if (LoanReturnTracker.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
- "Resending loan returns from " << TabletID() << " to " << ev->Get()->TabletId);
- LoanReturnTracker.ResendLoans(ev->Get()->TabletId, ctx);
- }
-
- // Resend split-related messages in needed
- if (SplitSrcSnapshotSender.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
- SplitSrcSnapshotSender.DoSend(ev->Get()->TabletId, ctx);
- }
-
+ return;
+ }
+
+ // Resend loan-related messages in needed
+ if (LoanReturnTracker.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
+ "Resending loan returns from " << TabletID() << " to " << ev->Get()->TabletId);
+ LoanReturnTracker.ResendLoans(ev->Get()->TabletId, ctx);
+ }
+
+ // Resend split-related messages in needed
+ if (SplitSrcSnapshotSender.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
+ SplitSrcSnapshotSender.DoSend(ev->Get()->TabletId, ctx);
+ }
+
if (ChangeSenderActivator.Has(ev->Get()->TabletId, ev->Get()->ClientId)) {
ChangeSenderActivator.DoSend(ev->Get()->TabletId, ctx);
}
@@ -1868,11 +1868,11 @@ void TDataShard::RestartPipeRS(ui64 tabletId, const TActorContext& ctx) {
}
void TDataShard::AckRSToDeletedTablet(ui64 tabletId, const TActorContext& ctx) {
- for (auto seqno : ResendReadSetPipeTracker.FindTx(tabletId)) {
- LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Pipe reset to dead tablet %" PRIu64 " caused ack of readset %" PRIu64
- " at tablet %" PRIu64, tabletId, seqno, TabletID());
-
- OutReadSets.AckForDeletedDestination(tabletId, seqno, ctx);
+ for (auto seqno : ResendReadSetPipeTracker.FindTx(tabletId)) {
+ LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Pipe reset to dead tablet %" PRIu64 " caused ack of readset %" PRIu64
+ " at tablet %" PRIu64, tabletId, seqno, TabletID());
+
+ OutReadSets.AckForDeletedDestination(tabletId, seqno, ctx);
// progress one more Tx to force delayed schema operations
if (Pipeline.HasSchemaOperation() && OutReadSets.Empty()) {
@@ -1880,13 +1880,13 @@ void TDataShard::AckRSToDeletedTablet(ui64 tabletId, const TActorContext& ctx) {
Pipeline.AddCandidateUnit(EExecutionUnitKind::PlanQueue);
PlanQueue.Progress(ctx);
}
- }
+ }
CheckStateChange(ctx);
-}
-
+}
+
void TDataShard::Handle(TEvTabletPipe::TEvServerConnected::TPtr &ev, const TActorContext &ctx) {
Y_UNUSED(ev); Y_UNUSED(ctx);
- LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Server connected at tablet %s %" PRIu64 ,
+ LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Server connected at tablet %s %" PRIu64 ,
Executor()->GetStats().IsFollower ? "follower" : "leader", ev->Get()->TabletId);
}
@@ -1966,31 +1966,31 @@ bool TDataShard::CheckChangesQueueOverflow() const {
}
void TDataShard::Handle(TEvDataShard::TEvCancelTransactionProposal::TPtr &ev, const TActorContext &ctx) {
- ui64 txId = ev->Get()->Record.GetTxId();
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Got TEvDataShard::TEvCancelTransactionProposal " << TabletID()
- << " txId " << txId);
+ ui64 txId = ev->Get()->Record.GetTxId();
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Got TEvDataShard::TEvCancelTransactionProposal " << TabletID()
+ << " txId " << txId);
// Mark any queued proposals as cancelled
ProposeQueue.Cancel(txId);
// Cancel transactions that have already been proposed
- Execute(new TTxCancelTransactionProposal(this, txId), ctx);
-}
-
+ Execute(new TTxCancelTransactionProposal(this, txId), ctx);
+}
+
void TDataShard::DoPeriodicTasks(const TActorContext &ctx) {
- UpdateLagCounters(ctx);
- UpdateTableStats(ctx);
- SendPeriodicTableStats(ctx);
- CollectCpuUsage(ctx);
-
- if (CurrentKeySampler == EnabledKeySampler && ctx.Now() > StopKeyAccessSamplingAt) {
- CurrentKeySampler = DisabledKeySampler;
- LOG_NOTICE_S(ctx, NKikimrServices::TX_DATASHARD, "Stoped key access sampling at datashard: " << TabletID());
- }
-
- ctx.Schedule(TDuration::Seconds(5), new TEvPrivate::TEvPeriodicWakeup());
-}
-
+ UpdateLagCounters(ctx);
+ UpdateTableStats(ctx);
+ SendPeriodicTableStats(ctx);
+ CollectCpuUsage(ctx);
+
+ if (CurrentKeySampler == EnabledKeySampler && ctx.Now() > StopKeyAccessSamplingAt) {
+ CurrentKeySampler = DisabledKeySampler;
+ LOG_NOTICE_S(ctx, NKikimrServices::TX_DATASHARD, "Stoped key access sampling at datashard: " << TabletID());
+ }
+
+ ctx.Schedule(TDuration::Seconds(5), new TEvPrivate::TEvPeriodicWakeup());
+}
+
void TDataShard::UpdateLagCounters(const TActorContext &ctx) {
TDuration dataTxCompleteLag = GetDataTxCompleteLag();
TabletCounters->Simple()[COUNTER_TX_COMPLETE_LAG].Set(dataTxCompleteLag.MilliSeconds());
@@ -1998,17 +1998,17 @@ void TDataShard::UpdateLagCounters(const TActorContext &ctx) {
LOG_WARN_S(ctx, NKikimrServices::TX_DATASHARD,
"Tx completion lag (" << dataTxCompleteLag << ") is > 5 min on tablet "
<< TabletID());
- }
-
+ }
+
TDuration scanTxCompleteLag = GetScanTxCompleteLag();
TabletCounters->Simple()[COUNTER_SCAN_TX_COMPLETE_LAG].Set(scanTxCompleteLag.MilliSeconds());
if (scanTxCompleteLag > TDuration::Hours(1)) {
LOG_WARN_S(ctx, NKikimrServices::TX_DATASHARD,
"Scan completion lag (" << scanTxCompleteLag << ") is > 1 hour on tablet "
<< TabletID());
- }
-}
-
+ }
+}
+
void TDataShard::FillSplitTrajectory(ui64 origin, NKikimrTx::TBalanceTrackList& tracks) {
Y_UNUSED(origin);
Y_UNUSED(tracks);
@@ -2035,7 +2035,7 @@ void TDataShard::SendReadSet(const TActorContext& ctx, ui64 step,
IncCounter(COUNTER_READSET_SENT_COUNT);
IncCounter(COUNTER_READSET_SENT_SIZE, body.size());
-
+
PipeClientCache->Send(ctx, target, ev.Release());
}
@@ -2173,32 +2173,32 @@ void TDataShard::SerializeHistogram(const TUserTable &tinfo,
}
void TDataShard::SerializeKeySample(const TUserTable &tinfo,
- const NTable::TKeyAccessSample &keySample,
- const NScheme::TTypeRegistry &typeRegistry,
- NKikimrTxDataShard::TEvGetDataHistogramResponse::THistogram &hist)
-{
- THashMap<TString, ui64> accessCounts;
-
- for (auto &key : keySample.GetSample()) {
- accessCounts[key.first]++;
- // TODO: count access kinds separately
- }
-
- for (auto &item : accessCounts) {
- auto &rec = *hist.AddItems();
- rec.SetValue(item.second);
-
- TSerializedCellVec key(item.first);
- for (ui32 ki = 0; ki < tinfo.KeyColumnIds.size() && ki < key.GetCells().size(); ++ki) {
- NScheme::ITypeSP t = typeRegistry.GetType(tinfo.KeyColumnTypes[ki]);
- DbgPrintValue(*rec.AddKeyValues(), key.GetCells()[ki], t.GetTypeId());
- }
- }
- Sort(hist.MutableItems()->begin(), hist.MutableItems()->end(),
- [] (const auto& a, const auto& b) { return a.GetValue() > b.GetValue(); });
-}
-
-
+ const NTable::TKeyAccessSample &keySample,
+ const NScheme::TTypeRegistry &typeRegistry,
+ NKikimrTxDataShard::TEvGetDataHistogramResponse::THistogram &hist)
+{
+ THashMap<TString, ui64> accessCounts;
+
+ for (auto &key : keySample.GetSample()) {
+ accessCounts[key.first]++;
+ // TODO: count access kinds separately
+ }
+
+ for (auto &item : accessCounts) {
+ auto &rec = *hist.AddItems();
+ rec.SetValue(item.second);
+
+ TSerializedCellVec key(item.first);
+ for (ui32 ki = 0; ki < tinfo.KeyColumnIds.size() && ki < key.GetCells().size(); ++ki) {
+ NScheme::ITypeSP t = typeRegistry.GetType(tinfo.KeyColumnTypes[ki]);
+ DbgPrintValue(*rec.AddKeyValues(), key.GetCells()[ki], t.GetTypeId());
+ }
+ }
+ Sort(hist.MutableItems()->begin(), hist.MutableItems()->end(),
+ [] (const auto& a, const auto& b) { return a.GetValue() > b.GetValue(); });
+}
+
+
void TDataShard::Handle(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult::TPtr ev,
const TActorContext &ctx)
{
@@ -2274,7 +2274,7 @@ void TDataShard::Handle(TEvDataShard::TEvGetDataHistogramRequest::TPtr &ev,
hist.AddKeyNames(tinfo.Columns.FindPtr(ki)->Name);
SerializeHistogram(tinfo, stats.DataSizeHistogram, reg, *hist.MutableSizeHistogram());
SerializeHistogram(tinfo, stats.RowCountHistogram, reg, *hist.MutableCountHistogram());
- SerializeKeySample(tinfo, tinfo.Stats.AccessStats, reg, *hist.MutableKeyAccessSample());
+ SerializeKeySample(tinfo, tinfo.Stats.AccessStats, reg, *hist.MutableKeyAccessSample());
}
ctx.Send(ev->Sender, response);
@@ -2514,37 +2514,37 @@ void TDataShard::ScanComplete(NTable::EAbort,
ui64 cookie,
const TActorContext &ctx)
{
- if (auto* noTxScan = dynamic_cast<INoTxScan*>(prod.Get())) {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Non-transactinal scan complete at "
- << TabletID());
-
- noTxScan->OnFinished(this);
- prod.Destroy();
- } else {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
- "FullScan complete at " << TabletID());
-
- auto op = Pipeline.FindOp(cookie);
- if (op) {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Found op"
+ if (auto* noTxScan = dynamic_cast<INoTxScan*>(prod.Get())) {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Non-transactinal scan complete at "
+ << TabletID());
+
+ noTxScan->OnFinished(this);
+ prod.Destroy();
+ } else {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
+ "FullScan complete at " << TabletID());
+
+ auto op = Pipeline.FindOp(cookie);
+ if (op) {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Found op"
<< ": cookie: " << cookie
<< ", at: "<< TabletID());
- if (op->IsWaitingForScan()) {
- op->SetScanResult(prod);
- Pipeline.AddCandidateOp(op);
- }
- } else {
- if (InFlightCondErase && InFlightCondErase.TxId == cookie) {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Conditional erase complete"
- << ": cookie: " << cookie
- << ", at: "<< TabletID());
-
- InFlightCondErase.Clear();
- } else if (!Pipeline.FinishStreamingTx(cookie)) {
- LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD,
- "Scan complete at " << TabletID() << " for unknown tx " << cookie);
- }
+ if (op->IsWaitingForScan()) {
+ op->SetScanResult(prod);
+ Pipeline.AddCandidateOp(op);
+ }
+ } else {
+ if (InFlightCondErase && InFlightCondErase.TxId == cookie) {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Conditional erase complete"
+ << ": cookie: " << cookie
+ << ", at: "<< TabletID());
+
+ InFlightCondErase.Clear();
+ } else if (!Pipeline.FinishStreamingTx(cookie)) {
+ LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD,
+ "Scan complete at " << TabletID() << " for unknown tx " << cookie);
+ }
}
}
diff --git a/ydb/core/tx/datashard/datashard.h b/ydb/core/tx/datashard/datashard.h
index ba5e04d4318..5a93c035026 100644
--- a/ydb/core/tx/datashard/datashard.h
+++ b/ydb/core/tx/datashard/datashard.h
@@ -23,7 +23,7 @@ class RecordBatch;
namespace NKikimr {
namespace NDataShard {
- using TShardState = NKikimrTxDataShard::EDatashardState;
+ using TShardState = NKikimrTxDataShard::EDatashardState;
struct TTxFlags {
enum Flags : ui64 {
@@ -125,39 +125,39 @@ namespace NDataShard {
| BlockingImmediateOps | BlockingImmediateWrites,
};
};
-
+
// Old datashard uses Uint32 column type for flags in local database.
static_assert(TTxFlags::PreservedPrivateFlagsMask <= Max<ui64>());
static_assert(TTxFlags::PublicFlagsMask <= Max<ui32>());
- // NOTE: this switch should be modified only in tests !!!
- extern bool gAllowLogBatchingDefaultValue;
- extern TDuration gDbStatsReportInterval;
- extern ui64 gDbStatsDataSizeResolution;
- extern ui64 gDbStatsRowCountResolution;
-
- // This SeqNo is used to discard outdated schema Tx requests on datashards.
- // In case of tablet restart on network disconnects SS can resend same Propose for the same schema Tx.
- // Because of this a DS might receive this Propose multiple times. In particular it might get Propose
- // for a Tx that has already been completed and erased from the queue. So the duplicate Proposal might be
- // treated as new. In order to avoid this the SS includes this SeqNo in each Proposal.
- // The SeqNo consists of SS tablet Generation (it's incremented on each SS restart) and Round within this
- // generation. The logic on DS is the following. When sending a Propose SS assings Round to it and increments
- // it's in-mem Round counter. If SS retires sending the same Propose it uses the previously assigned Round value.
- // This assigned Round value is not persisted on SS so in case of SS restart retry will be done with incremented
- // Generation and newly assigned Round. DS has LastSeenSeqNo persisted in it's local DB.
- // If it receives Propose with Generation < LastSeen.Generation it means that SS has restarted and it's
- // going to resend the Propose. If Generation == LastSeen.Generation && Round < LastSeen.Round then this is
- // an old Tx that should have already been acked (becuase SS never start new schema Tx before the previous one was
- // finised)
- struct TSchemeOpSeqNo : public TMessageSeqNo {
- explicit TSchemeOpSeqNo(ui64 gen = 0, ui64 round = 0)
- : TMessageSeqNo(gen, round)
- {}
-
- explicit TSchemeOpSeqNo(const NKikimrTxDataShard::TSchemeOpSeqNo& pb)
- : TMessageSeqNo(pb.GetGeneration(), pb.GetRound())
- {}
+ // NOTE: this switch should be modified only in tests !!!
+ extern bool gAllowLogBatchingDefaultValue;
+ extern TDuration gDbStatsReportInterval;
+ extern ui64 gDbStatsDataSizeResolution;
+ extern ui64 gDbStatsRowCountResolution;
+
+ // This SeqNo is used to discard outdated schema Tx requests on datashards.
+ // In case of tablet restart on network disconnects SS can resend same Propose for the same schema Tx.
+ // Because of this a DS might receive this Propose multiple times. In particular it might get Propose
+ // for a Tx that has already been completed and erased from the queue. So the duplicate Proposal might be
+ // treated as new. In order to avoid this the SS includes this SeqNo in each Proposal.
+ // The SeqNo consists of SS tablet Generation (it's incremented on each SS restart) and Round within this
+ // generation. The logic on DS is the following. When sending a Propose SS assings Round to it and increments
+ // it's in-mem Round counter. If SS retires sending the same Propose it uses the previously assigned Round value.
+ // This assigned Round value is not persisted on SS so in case of SS restart retry will be done with incremented
+ // Generation and newly assigned Round. DS has LastSeenSeqNo persisted in it's local DB.
+ // If it receives Propose with Generation < LastSeen.Generation it means that SS has restarted and it's
+ // going to resend the Propose. If Generation == LastSeen.Generation && Round < LastSeen.Round then this is
+ // an old Tx that should have already been acked (becuase SS never start new schema Tx before the previous one was
+ // finised)
+ struct TSchemeOpSeqNo : public TMessageSeqNo {
+ explicit TSchemeOpSeqNo(ui64 gen = 0, ui64 round = 0)
+ : TMessageSeqNo(gen, round)
+ {}
+
+ explicit TSchemeOpSeqNo(const NKikimrTxDataShard::TSchemeOpSeqNo& pb)
+ : TMessageSeqNo(pb.GetGeneration(), pb.GetRound())
+ {}
TSchemeOpSeqNo& operator++() {
if (0 == ++Round) {
@@ -165,8 +165,8 @@ namespace NDataShard {
}
return *this;
}
- };
-
+ };
+
}
// legacy
@@ -196,7 +196,7 @@ struct TEvDataShard {
EvReadOperationHistogram,
EvUpdateConfig,
EvSchemaChanged,
- EvStateChanged,
+ EvStateChanged,
EvCancelBackup,
EvMigrateSchemeShardRequest,
EvMigrateSchemeShardResponse,
@@ -207,33 +207,33 @@ struct TEvDataShard {
EvReadOperationHistogramResult,
EvUpdateConfigResult,
EvSchemaChangedResult,
- EvStateChangedResult,
-
- EvReturnBorrowedPart = 6 * 512,
- EvReturnBorrowedPartAck,
-
- EvInitSplitMergeDestination = EvProposeTransaction + 7*512,
- EvInitSplitMergeDestinationAck,
- EvSplit,
- EvSplitAck,
- EvSplitTransferSnapshot,
- EvSplitTransferSnapshotAck,
- EvSplitPartitioningChanged,
- EvSplitPartitioningChangedAck,
-
- EvGetTableStats,
- EvGetTableStatsResult,
- EvPeriodicTableStats,
-
- EvS3ListingRequest,
- EvS3ListingResponse,
-
- EvUploadRowsRequest,
- EvUploadRowsResponse,
-
- EvReadColumnsRequest,
- EvReadColumnsResponse,
-
+ EvStateChangedResult,
+
+ EvReturnBorrowedPart = 6 * 512,
+ EvReturnBorrowedPartAck,
+
+ EvInitSplitMergeDestination = EvProposeTransaction + 7*512,
+ EvInitSplitMergeDestinationAck,
+ EvSplit,
+ EvSplitAck,
+ EvSplitTransferSnapshot,
+ EvSplitTransferSnapshotAck,
+ EvSplitPartitioningChanged,
+ EvSplitPartitioningChangedAck,
+
+ EvGetTableStats,
+ EvGetTableStatsResult,
+ EvPeriodicTableStats,
+
+ EvS3ListingRequest,
+ EvS3ListingResponse,
+
+ EvUploadRowsRequest,
+ EvUploadRowsResponse,
+
+ EvReadColumnsRequest,
+ EvReadColumnsResponse,
+
EvGetInfoRequest,
EvGetInfoResponse,
EvListOperationsRequest,
@@ -372,33 +372,33 @@ struct TEvDataShard {
}
};
- struct TEvStateChanged : public TEventPB<TEvStateChanged, NKikimrTxDataShard::TEvStateChanged,
- TEvDataShard::EvStateChanged> {
- TEvStateChanged()
- {}
-
+ struct TEvStateChanged : public TEventPB<TEvStateChanged, NKikimrTxDataShard::TEvStateChanged,
+ TEvDataShard::EvStateChanged> {
+ TEvStateChanged()
+ {}
+
TEvStateChanged(const TActorId& source, ui64 tabletId, ui32 state) {
ActorIdToProto(source, Record.MutableSource());
- Record.SetTabletId(tabletId);
- Record.SetState(state);
- }
-
+ Record.SetTabletId(tabletId);
+ Record.SetState(state);
+ }
+
TActorId GetSource() const {
return ActorIdFromProto(Record.GetSource());
- }
- };
-
- struct TEvStateChangedResult : public TEventPB<TEvStateChangedResult, NKikimrTxDataShard::TEvStateChangedResult,
- TEvDataShard::EvStateChangedResult> {
- TEvStateChangedResult()
- {}
-
- TEvStateChangedResult(ui64 tabletId, ui32 state) {
- Record.SetTabletId(tabletId);
- Record.SetState(state);
- }
- };
-
+ }
+ };
+
+ struct TEvStateChangedResult : public TEventPB<TEvStateChangedResult, NKikimrTxDataShard::TEvStateChangedResult,
+ TEvDataShard::EvStateChangedResult> {
+ TEvStateChangedResult()
+ {}
+
+ TEvStateChangedResult(ui64 tabletId, ui32 state) {
+ Record.SetTabletId(tabletId);
+ Record.SetState(state);
+ }
+ };
+
struct TEvProposeTransaction : public TEventPB<TEvProposeTransaction, NKikimrTxDataShard::TEvProposeTransaction,
TEvDataShard::EvProposeTransaction> {
TEvProposeTransaction()
@@ -638,7 +638,7 @@ struct TEvDataShard {
bool ForceOnline = false;
bool ForceDirty = false;
};
-
+
struct TEvProposeTransactionRestart : public TEventPB<TEvProposeTransactionRestart, NKikimrTxDataShard::TEvProposeTransactionRestart, TEvDataShard::EvProposeTransactionRestart> {
TEvProposeTransactionRestart() = default;
TEvProposeTransactionRestart(ui64 tabletId, ui64 txId) {
@@ -664,103 +664,103 @@ struct TEvDataShard {
}
};
- struct TEvReturnBorrowedPart : public TEventPB<TEvReturnBorrowedPart, NKikimrTxDataShard::TEvReturnBorrowedPart, TEvDataShard::EvReturnBorrowedPart> {
- TEvReturnBorrowedPart() = default;
+ struct TEvReturnBorrowedPart : public TEventPB<TEvReturnBorrowedPart, NKikimrTxDataShard::TEvReturnBorrowedPart, TEvDataShard::EvReturnBorrowedPart> {
+ TEvReturnBorrowedPart() = default;
TEvReturnBorrowedPart(ui64 tabletId, const TVector<TLogoBlobID>& partMetaVec) {
- Record.SetFromTabletId(tabletId);
- for (const auto& partMeta : partMetaVec) {
- LogoBlobIDFromLogoBlobID(partMeta, Record.AddPartMetadata());
- }
- }
- };
-
- struct TEvReturnBorrowedPartAck : public TEventPB<TEvReturnBorrowedPartAck, NKikimrTxDataShard::TEvReturnBorrowedPartAck, TEvDataShard::EvReturnBorrowedPartAck> {
- TEvReturnBorrowedPartAck() = default;
+ Record.SetFromTabletId(tabletId);
+ for (const auto& partMeta : partMetaVec) {
+ LogoBlobIDFromLogoBlobID(partMeta, Record.AddPartMetadata());
+ }
+ }
+ };
+
+ struct TEvReturnBorrowedPartAck : public TEventPB<TEvReturnBorrowedPartAck, NKikimrTxDataShard::TEvReturnBorrowedPartAck, TEvDataShard::EvReturnBorrowedPartAck> {
+ TEvReturnBorrowedPartAck() = default;
explicit TEvReturnBorrowedPartAck(const TVector<TLogoBlobID>& partMetaVec) {
- for (const auto& partMeta : partMetaVec) {
- LogoBlobIDFromLogoBlobID(partMeta, Record.AddPartMetadata());
- }
- }
- };
-
-
- struct TEvInitSplitMergeDestination : public TEventPB<TEvInitSplitMergeDestination,
- NKikimrTxDataShard::TEvInitSplitMergeDestination,
- TEvDataShard::EvInitSplitMergeDestination> {
- TEvInitSplitMergeDestination() = default;
+ for (const auto& partMeta : partMetaVec) {
+ LogoBlobIDFromLogoBlobID(partMeta, Record.AddPartMetadata());
+ }
+ }
+ };
+
+
+ struct TEvInitSplitMergeDestination : public TEventPB<TEvInitSplitMergeDestination,
+ NKikimrTxDataShard::TEvInitSplitMergeDestination,
+ TEvDataShard::EvInitSplitMergeDestination> {
+ TEvInitSplitMergeDestination() = default;
TEvInitSplitMergeDestination(ui64 opId, ui64 schemeshardTabletId, ui64 subDomainPathId,
const NKikimrTxDataShard::TSplitMergeDescription &splitDesc,
const NKikimrSubDomains::TProcessingParams &processingParams) {
- Record.SetOperationCookie(opId);
- Record.SetSchemeshardTabletId(schemeshardTabletId);
+ Record.SetOperationCookie(opId);
+ Record.SetSchemeshardTabletId(schemeshardTabletId);
Record.SetSubDomainPathId(subDomainPathId);
Record.MutableSplitDescription()->CopyFrom(splitDesc);
Record.MutableProcessingParams()->CopyFrom(processingParams);
- }
- };
-
- struct TEvInitSplitMergeDestinationAck : public TEventPB<TEvInitSplitMergeDestinationAck,
- NKikimrTxDataShard::TEvInitSplitMergeDestinationAck,
- TEvDataShard::EvInitSplitMergeDestinationAck> {
- TEvInitSplitMergeDestinationAck() = default;
- explicit TEvInitSplitMergeDestinationAck(ui64 opId, ui64 tabletId) {
- Record.SetOperationCookie(opId);
- Record.SetTabletId(tabletId);
- }
- };
-
- struct TEvSplit : public TEventPB<TEvSplit, NKikimrTxDataShard::TEvSplit, TEvDataShard::EvSplit> {
- TEvSplit() = default;
- explicit TEvSplit(ui64 opId) {
- Record.SetOperationCookie(opId);
- }
- };
-
- struct TEvSplitAck : public TEventPB<TEvSplitAck, NKikimrTxDataShard::TEvSplitAck, TEvDataShard::EvSplitAck> {
- TEvSplitAck() = default;
- explicit TEvSplitAck(ui64 opId, ui64 tabletId) {
- Record.SetOperationCookie(opId);
- Record.SetTabletId(tabletId);
- }
- };
-
- struct TEvSplitTransferSnapshot : public TEventPB<TEvSplitTransferSnapshot,
- NKikimrTxDataShard::TEvSplitTransferSnapshot,
- TEvDataShard::EvSplitTransferSnapshot> {
- TEvSplitTransferSnapshot() = default;
- explicit TEvSplitTransferSnapshot(ui64 opId) {
- Record.SetOperationCookie(opId);
- }
- };
-
- struct TEvSplitTransferSnapshotAck : public TEventPB<TEvSplitTransferSnapshotAck,
- NKikimrTxDataShard::TEvSplitTransferSnapshotAck,
- TEvDataShard::EvSplitTransferSnapshotAck> {
- TEvSplitTransferSnapshotAck() = default;
- explicit TEvSplitTransferSnapshotAck(ui64 opId, ui64 tabletId) {
- Record.SetOperationCookie(opId);
- Record.SetTabletId(tabletId);
- }
- };
-
- struct TEvSplitPartitioningChanged : public TEventPB<TEvSplitPartitioningChanged,
- NKikimrTxDataShard::TEvSplitPartitioningChanged,
- TEvDataShard::EvSplitPartitioningChanged> {
- TEvSplitPartitioningChanged() = default;
- explicit TEvSplitPartitioningChanged(ui64 opId) {
- Record.SetOperationCookie(opId);
- }
- };
-
- struct TEvSplitPartitioningChangedAck : public TEventPB<TEvSplitPartitioningChangedAck,
- NKikimrTxDataShard::TEvSplitPartitioningChangedAck,
- TEvDataShard::EvSplitPartitioningChangedAck> {
- TEvSplitPartitioningChangedAck() = default;
- explicit TEvSplitPartitioningChangedAck(ui64 opId, ui64 tabletId) {
- Record.SetOperationCookie(opId);
- Record.SetTabletId(tabletId);
- }
- };
+ }
+ };
+
+ struct TEvInitSplitMergeDestinationAck : public TEventPB<TEvInitSplitMergeDestinationAck,
+ NKikimrTxDataShard::TEvInitSplitMergeDestinationAck,
+ TEvDataShard::EvInitSplitMergeDestinationAck> {
+ TEvInitSplitMergeDestinationAck() = default;
+ explicit TEvInitSplitMergeDestinationAck(ui64 opId, ui64 tabletId) {
+ Record.SetOperationCookie(opId);
+ Record.SetTabletId(tabletId);
+ }
+ };
+
+ struct TEvSplit : public TEventPB<TEvSplit, NKikimrTxDataShard::TEvSplit, TEvDataShard::EvSplit> {
+ TEvSplit() = default;
+ explicit TEvSplit(ui64 opId) {
+ Record.SetOperationCookie(opId);
+ }
+ };
+
+ struct TEvSplitAck : public TEventPB<TEvSplitAck, NKikimrTxDataShard::TEvSplitAck, TEvDataShard::EvSplitAck> {
+ TEvSplitAck() = default;
+ explicit TEvSplitAck(ui64 opId, ui64 tabletId) {
+ Record.SetOperationCookie(opId);
+ Record.SetTabletId(tabletId);
+ }
+ };
+
+ struct TEvSplitTransferSnapshot : public TEventPB<TEvSplitTransferSnapshot,
+ NKikimrTxDataShard::TEvSplitTransferSnapshot,
+ TEvDataShard::EvSplitTransferSnapshot> {
+ TEvSplitTransferSnapshot() = default;
+ explicit TEvSplitTransferSnapshot(ui64 opId) {
+ Record.SetOperationCookie(opId);
+ }
+ };
+
+ struct TEvSplitTransferSnapshotAck : public TEventPB<TEvSplitTransferSnapshotAck,
+ NKikimrTxDataShard::TEvSplitTransferSnapshotAck,
+ TEvDataShard::EvSplitTransferSnapshotAck> {
+ TEvSplitTransferSnapshotAck() = default;
+ explicit TEvSplitTransferSnapshotAck(ui64 opId, ui64 tabletId) {
+ Record.SetOperationCookie(opId);
+ Record.SetTabletId(tabletId);
+ }
+ };
+
+ struct TEvSplitPartitioningChanged : public TEventPB<TEvSplitPartitioningChanged,
+ NKikimrTxDataShard::TEvSplitPartitioningChanged,
+ TEvDataShard::EvSplitPartitioningChanged> {
+ TEvSplitPartitioningChanged() = default;
+ explicit TEvSplitPartitioningChanged(ui64 opId) {
+ Record.SetOperationCookie(opId);
+ }
+ };
+
+ struct TEvSplitPartitioningChangedAck : public TEventPB<TEvSplitPartitioningChangedAck,
+ NKikimrTxDataShard::TEvSplitPartitioningChangedAck,
+ TEvDataShard::EvSplitPartitioningChangedAck> {
+ TEvSplitPartitioningChangedAck() = default;
+ explicit TEvSplitPartitioningChangedAck(ui64 opId, ui64 tabletId) {
+ Record.SetOperationCookie(opId);
+ Record.SetTabletId(tabletId);
+ }
+ };
struct TEvCancelBackup
: public TEventPB<TEvCancelBackup,
@@ -773,7 +773,7 @@ struct TEvDataShard {
Record.SetBackupTxId(txid);
Record.SetTableId(tableId);
}
- };
+ };
struct TEvCancelRestore
: public TEventPB<TEvCancelRestore,
@@ -788,87 +788,87 @@ struct TEvDataShard {
}
};
- struct TEvGetTableStats : public TEventPB<TEvGetTableStats,
- NKikimrTxDataShard::TEvGetTableStats,
- TEvDataShard::EvGetTableStats> {
- TEvGetTableStats() = default;
- explicit TEvGetTableStats(ui64 tableId, ui64 dataSizeResolution = 0, ui64 rowCountResolution = 0, bool collectKeySample = false) {
- Record.SetTableId(tableId);
- Record.SetDataSizeResolution(dataSizeResolution);
- Record.SetRowCountResolution(rowCountResolution);
- Record.SetCollectKeySample(collectKeySample);
- }
- };
+ struct TEvGetTableStats : public TEventPB<TEvGetTableStats,
+ NKikimrTxDataShard::TEvGetTableStats,
+ TEvDataShard::EvGetTableStats> {
+ TEvGetTableStats() = default;
+ explicit TEvGetTableStats(ui64 tableId, ui64 dataSizeResolution = 0, ui64 rowCountResolution = 0, bool collectKeySample = false) {
+ Record.SetTableId(tableId);
+ Record.SetDataSizeResolution(dataSizeResolution);
+ Record.SetRowCountResolution(rowCountResolution);
+ Record.SetCollectKeySample(collectKeySample);
+ }
+ };
- struct TEvGetTableStatsResult : public TEventPB<TEvGetTableStatsResult,
- NKikimrTxDataShard::TEvGetTableStatsResult,
- TEvDataShard::EvGetTableStatsResult> {
- TEvGetTableStatsResult() = default;
+ struct TEvGetTableStatsResult : public TEventPB<TEvGetTableStatsResult,
+ NKikimrTxDataShard::TEvGetTableStatsResult,
+ TEvDataShard::EvGetTableStatsResult> {
+ TEvGetTableStatsResult() = default;
TEvGetTableStatsResult(ui64 datashardId, ui64 tableOwnerId, ui64 tableLocalId) {
- Record.SetDatashardId(datashardId);
+ Record.SetDatashardId(datashardId);
Record.SetTableOwnerId(tableOwnerId);
Record.SetTableLocalId(tableLocalId);
- }
+ }
};
-
- struct TEvPeriodicTableStats : public TEventPB<TEvPeriodicTableStats,
- NKikimrTxDataShard::TEvPeriodicTableStats,
- TEvDataShard::EvPeriodicTableStats> {
- TEvPeriodicTableStats() = default;
+
+ struct TEvPeriodicTableStats : public TEventPB<TEvPeriodicTableStats,
+ NKikimrTxDataShard::TEvPeriodicTableStats,
+ TEvDataShard::EvPeriodicTableStats> {
+ TEvPeriodicTableStats() = default;
TEvPeriodicTableStats(ui64 datashardId, ui64 tableOwnerId, ui64 tableLocalId) {
- Record.SetDatashardId(datashardId);
+ Record.SetDatashardId(datashardId);
Record.SetTableOwnerId(tableOwnerId);
Record.SetTableLocalId(tableLocalId);
- }
- };
-
- struct TEvS3ListingRequest : public TEventPB<TEvS3ListingRequest,
- NKikimrTxDataShard::TEvS3ListingRequest,
- TEvDataShard::EvS3ListingRequest> {
- TEvS3ListingRequest() = default;
- };
-
- struct TEvS3ListingResponse : public TEventPB<TEvS3ListingResponse,
- NKikimrTxDataShard::TEvS3ListingResponse,
- TEvDataShard::EvS3ListingResponse> {
- TEvS3ListingResponse() = default;
-
- explicit TEvS3ListingResponse(ui64 tabletId, ui32 status = NKikimrTxDataShard::TError::OK) {
- Record.SetTabletID(tabletId);
- Record.SetStatus(status);
- }
- };
-
- struct TEvUploadRowsRequest : public TEventPBWithArena<TEvUploadRowsRequest,
- NKikimrTxDataShard::TEvUploadRowsRequest,
- TEvDataShard::EvUploadRowsRequest,
- 16200, 32500> {
- TEvUploadRowsRequest() = default;
- };
-
- struct TEvUploadRowsResponse : public TEventPB<TEvUploadRowsResponse,
- NKikimrTxDataShard::TEvUploadRowsResponse,
- TEvDataShard::EvUploadRowsResponse> {
- TEvUploadRowsResponse() = default;
-
- explicit TEvUploadRowsResponse(ui64 tabletId, ui32 status = NKikimrTxDataShard::TError::OK) {
- Record.SetTabletID(tabletId);
- Record.SetStatus(status);
- }
- };
-
+ }
+ };
+
+ struct TEvS3ListingRequest : public TEventPB<TEvS3ListingRequest,
+ NKikimrTxDataShard::TEvS3ListingRequest,
+ TEvDataShard::EvS3ListingRequest> {
+ TEvS3ListingRequest() = default;
+ };
+
+ struct TEvS3ListingResponse : public TEventPB<TEvS3ListingResponse,
+ NKikimrTxDataShard::TEvS3ListingResponse,
+ TEvDataShard::EvS3ListingResponse> {
+ TEvS3ListingResponse() = default;
+
+ explicit TEvS3ListingResponse(ui64 tabletId, ui32 status = NKikimrTxDataShard::TError::OK) {
+ Record.SetTabletID(tabletId);
+ Record.SetStatus(status);
+ }
+ };
+
+ struct TEvUploadRowsRequest : public TEventPBWithArena<TEvUploadRowsRequest,
+ NKikimrTxDataShard::TEvUploadRowsRequest,
+ TEvDataShard::EvUploadRowsRequest,
+ 16200, 32500> {
+ TEvUploadRowsRequest() = default;
+ };
+
+ struct TEvUploadRowsResponse : public TEventPB<TEvUploadRowsResponse,
+ NKikimrTxDataShard::TEvUploadRowsResponse,
+ TEvDataShard::EvUploadRowsResponse> {
+ TEvUploadRowsResponse() = default;
+
+ explicit TEvUploadRowsResponse(ui64 tabletId, ui32 status = NKikimrTxDataShard::TError::OK) {
+ Record.SetTabletID(tabletId);
+ Record.SetStatus(status);
+ }
+ };
+
struct TEvUnsafeUploadRowsRequest : public TEventPBWithArena<TEvUnsafeUploadRowsRequest,
NKikimrTxDataShard::TEvUploadRowsRequest,
TEvDataShard::EvUnsafeUploadRowsRequest,
16200, 32500> {
TEvUnsafeUploadRowsRequest() = default;
};
-
+
struct TEvUnsafeUploadRowsResponse : public TEventPB<TEvUnsafeUploadRowsResponse,
NKikimrTxDataShard::TEvUploadRowsResponse,
TEvDataShard::EvUnsafeUploadRowsResponse> {
TEvUnsafeUploadRowsResponse() = default;
-
+
explicit TEvUnsafeUploadRowsResponse(ui64 tabletId, ui32 status = NKikimrTxDataShard::TError::OK) {
Record.SetTabletID(tabletId);
Record.SetStatus(status);
@@ -994,23 +994,23 @@ struct TEvDataShard {
TEvReadCancel() = default;
};
- struct TEvReadColumnsRequest : public TEventPB<TEvReadColumnsRequest,
+ struct TEvReadColumnsRequest : public TEventPB<TEvReadColumnsRequest,
NKikimrTxDataShard::TEvReadColumnsRequest,
TEvDataShard::EvReadColumnsRequest> {
- TEvReadColumnsRequest() = default;
- };
-
- struct TEvReadColumnsResponse : public TEventPB<TEvReadColumnsResponse,
+ TEvReadColumnsRequest() = default;
+ };
+
+ struct TEvReadColumnsResponse : public TEventPB<TEvReadColumnsResponse,
NKikimrTxDataShard::TEvReadColumnsResponse,
TEvDataShard::EvReadColumnsResponse> {
- TEvReadColumnsResponse() = default;
-
- explicit TEvReadColumnsResponse(ui64 tabletId, ui32 status = NKikimrTxDataShard::TError::OK) {
- Record.SetTabletID(tabletId);
- Record.SetStatus(status);
- }
- };
-
+ TEvReadColumnsResponse() = default;
+
+ explicit TEvReadColumnsResponse(ui64 tabletId, ui32 status = NKikimrTxDataShard::TError::OK) {
+ Record.SetTabletID(tabletId);
+ Record.SetStatus(status);
+ }
+ };
+
struct TEvGetInfoRequest : public TEventPB<TEvGetInfoRequest,
NKikimrTxDataShard::TEvGetInfoRequest,
TEvDataShard::EvGetInfoRequest> {
@@ -1545,7 +1545,7 @@ IActor* CreateDataShard(const TActorId &tablet, TTabletStorageInfo *info);
}
-inline TString DatashardStateName(ui32 state) {
- NKikimrTxDataShard::EDatashardState s = (NKikimrTxDataShard::EDatashardState)state;
- return NKikimrTxDataShard::EDatashardState_Name(s);
-}
+inline TString DatashardStateName(ui32 state) {
+ NKikimrTxDataShard::EDatashardState s = (NKikimrTxDataShard::EDatashardState)state;
+ return NKikimrTxDataShard::EDatashardState_Name(s);
+}
diff --git a/ydb/core/tx/datashard/datashard__engine_host.cpp b/ydb/core/tx/datashard/datashard__engine_host.cpp
index 65f2d063ea4..876c3370933 100644
--- a/ydb/core/tx/datashard/datashard__engine_host.cpp
+++ b/ydb/core/tx/datashard/datashard__engine_host.cpp
@@ -281,15 +281,15 @@ TIntrusivePtr<TThrRefBase> InitDataShardSysTables(TDataShard* self) {
class TDataShardEngineHost : public TEngineHost {
public:
TDataShardEngineHost(TDataShard* self, NTable::TDatabase& db, TEngineHostCounters& counters, ui64& lockTxId, TInstant now)
- : TEngineHost(db, counters,
- TEngineHostSettings(self->TabletID(),
+ : TEngineHost(db, counters,
+ TEngineHostSettings(self->TabletID(),
(self->State == TShardState::Readonly || self->State == TShardState::Frozen),
- self->ByKeyFilterDisabled(),
- self->GetKeyAccessSampler()))
+ self->ByKeyFilterDisabled(),
+ self->GetKeyAccessSampler()))
, Self(self)
, DB(db)
, LockTxId(lockTxId)
- , Now(now)
+ , Now(now)
{}
void SetWriteVersion(TRowVersion writeVersion) {
@@ -368,7 +368,7 @@ public:
Self->SysLocksTable().SetLock(tableId, row, LockTxId);
- Self->SetTableAccessTime(tableId, Now);
+ Self->SetTableAccessTime(tableId, Now);
return TEngineHost::SelectRow(tableId, row, columnIds, returnType, readTarget, holderFactory);
}
@@ -381,7 +381,7 @@ public:
Self->SysLocksTable().SetLock(tableId, range, LockTxId);
- Self->SetTableAccessTime(tableId, Now);
+ Self->SetTableAccessTime(tableId, Now);
return TEngineHost::SelectRange(tableId, range, columnIds, skipNullKeys, returnType, readTarget,
itemsLimit, bytesLimit, reverse, forbidNullArgs, holderFactory);
}
@@ -446,7 +446,7 @@ public:
Self->SysLocksTable().BreakLock(tableId, row);
- Self->SetTableUpdateTime(tableId, Now);
+ Self->SetTableUpdateTime(tableId, Now);
TEngineHost::EraseRow(tableId, row);
}
@@ -454,13 +454,13 @@ public:
bool IsMyKey(const TTableId& tableId, const TArrayRef<const TCell>& row) const override {
if (TSysTables::IsSystemTable(tableId))
return DataShardSysTable(tableId).IsMyKey(row);
-
+
auto iter = Self->TableInfos.find(tableId.PathId.LocalPathId);
- if (iter == Self->TableInfos.end()) {
- // TODO: can this happen?
- return false;
- }
-
+ if (iter == Self->TableInfos.end()) {
+ // TODO: can this happen?
+ return false;
+ }
+
// Check row against range
const TUserTable& info = *iter->second;
return (ComparePointAndRange(row, info.GetTableRange(), info.KeyColumnTypes, info.KeyColumnTypes) == 0);
@@ -500,7 +500,7 @@ private:
NTable::TDatabase& DB;
const ui64& LockTxId;
bool IsImmediateTx = false;
- TInstant Now;
+ TInstant Now;
TRowVersion WriteVersion = TRowVersion::Max();
TRowVersion ReadVersion = TRowVersion::Min();
mutable THashMap<TTableId, THolder<IChangeCollector>> ChangeCollectors;
@@ -517,11 +517,11 @@ TEngineBay::TEngineBay(TDataShard * self, TTransactionContext& txc, const TActor
EngineHost = MakeHolder<TDataShardEngineHost>(self, txc.DB, EngineHostCounters, LockTxId, now);
EngineSettings = MakeHolder<TEngineFlatSettings>(IEngineFlat::EProtocol::V1, AppData(ctx)->FunctionRegistry,
- *TAppData::RandomProvider, *TAppData::TimeProvider, EngineHost.Get(), self->AllocCounters);
+ *TAppData::RandomProvider, *TAppData::TimeProvider, EngineHost.Get(), self->AllocCounters);
ui64 tabletId = self->TabletID();
TraceMessage = Sprintf("Shard %" PRIu64 ", txid %" PRIu64, tabletId, stepTxId.second);
- const TActorSystem* actorSystem = ctx.ExecutorThread.ActorSystem;
+ const TActorSystem* actorSystem = ctx.ExecutorThread.ActorSystem;
EngineSettings->LogErrorWriter = [actorSystem, this](const TString& message) {
LOG_ERROR_S(*actorSystem, NKikimrServices::MINIKQL_ENGINE, TraceMessage
<< ", engine error: " << message);
diff --git a/ydb/core/tx/datashard/datashard__engine_host.h b/ydb/core/tx/datashard/datashard__engine_host.h
index 6c618a9c1e6..8e3064c83d8 100644
--- a/ydb/core/tx/datashard/datashard__engine_host.h
+++ b/ydb/core/tx/datashard/datashard__engine_host.h
@@ -28,7 +28,7 @@ public:
using TValidationInfo = NMiniKQL::IEngineFlat::TValidationInfo;
using TValidatedKey = NMiniKQL::IEngineFlat::TValidatedKey;
using EResult = NMiniKQL::IEngineFlat::EResult;
- using TEngineHostCounters = NMiniKQL::TEngineHostCounters;
+ using TEngineHostCounters = NMiniKQL::TEngineHostCounters;
struct TSizes {
ui64 ReadSize = 0;
diff --git a/ydb/core/tx/datashard/datashard__init.cpp b/ydb/core/tx/datashard/datashard__init.cpp
index 6198f7e003f..dc12c49959b 100644
--- a/ydb/core/tx/datashard/datashard__init.cpp
+++ b/ydb/core/tx/datashard/datashard__init.cpp
@@ -4,7 +4,7 @@
#include <ydb/core/tablet/tablet_exception.h>
#include <ydb/core/util/pb.h>
-
+
namespace NKikimr {
namespace NDataShard {
@@ -18,29 +18,29 @@ bool TDataShard::TTxInit::Execute(TTransactionContext& txc, const TActorContext&
LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "TDataShard::TTxInit::Execute");
try {
- Self->State = TShardState::Unknown;
- Self->LastLocalTid = Schema::MinLocalTid;
- Self->LastSeqno = 1;
+ Self->State = TShardState::Unknown;
+ Self->LastLocalTid = Schema::MinLocalTid;
+ Self->LastSeqno = 1;
Self->NextChangeRecordOrder = 1;
Self->LastChangeRecordGroup = 1;
- Self->TransQueue.Reset();
+ Self->TransQueue.Reset();
Self->SnapshotManager.Reset();
Self->SchemaSnapshotManager.Reset();
Self->S3Uploads.Reset();
Self->S3Downloads.Reset();
-
+
Self->KillChangeSender(ctx);
Self->ChangesQueue.clear();
ChangeRecords.clear();
- bool done = ReadEverything(txc);
+ bool done = ReadEverything(txc);
if (done && Self->State != TShardState::Offline) {
Self->SnapshotManager.Fix_KIKIMR_12289(txc.DB);
Self->SnapshotManager.Fix_KIKIMR_14259(txc.DB);
}
- return done;
+ return done;
} catch (const TNotReadyTabletException &) {
return false;
} catch (const TSchemeErrorTabletException &ex) {
@@ -54,21 +54,21 @@ bool TDataShard::TTxInit::Execute(TTransactionContext& txc, const TActorContext&
void TDataShard::TTxInit::Complete(const TActorContext &ctx) {
LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "TDataShard::TTxInit::Complete");
- // Start MakeSnapshot() if we started in SplitSrcMakeSnapshot state
- if (Self->State == TShardState::SplitSrcMakeSnapshot) {
- Self->Execute(Self->CreateTxStartSplit(), ctx);
- } else if (Self->State == TShardState::SplitSrcSendingSnapshot) {
- if (!Self->SplitSrcSnapshotSender.AllAcked()) {
- Self->SplitSrcSnapshotSender.DoSend(ctx);
- }
- } else if (Self->State == TShardState::Offline) {
- // Remind the schemeshard that this shard is in Offline state and can be deleted
- Self->ReportState(ctx, Self->State);
+ // Start MakeSnapshot() if we started in SplitSrcMakeSnapshot state
+ if (Self->State == TShardState::SplitSrcMakeSnapshot) {
+ Self->Execute(Self->CreateTxStartSplit(), ctx);
+ } else if (Self->State == TShardState::SplitSrcSendingSnapshot) {
+ if (!Self->SplitSrcSnapshotSender.AllAcked()) {
+ Self->SplitSrcSnapshotSender.DoSend(ctx);
+ }
+ } else if (Self->State == TShardState::Offline) {
+ // Remind the schemeshard that this shard is in Offline state and can be deleted
+ Self->ReportState(ctx, Self->State);
} else if (Self->State == TShardState::Ready) {
// Make sure schema defaults are updated when changed
Self->Execute(Self->CreateTxInitSchemaDefaults(), ctx);
- }
-
+ }
+
Self->SwitchToWork(ctx);
Self->SendRegistrationRequestTimeCast(ctx);
@@ -132,26 +132,26 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
NIceDb::TNiceDb db(txc.DB);
- {
- bool ready = true;
-
-#define PRECHARGE_SYS_TABLE(table) \
- { \
- if (txc.DB.GetScheme().GetTableInfo(table::TableId)) { \
- auto rowset = db.Table<table>().Range().Select(); \
- ready &= rowset.IsReady(); \
- } \
- }
-
- PRECHARGE_SYS_TABLE(Schema::Sys);
- PRECHARGE_SYS_TABLE(Schema::UserTables);
- PRECHARGE_SYS_TABLE(Schema::TxMain);
- PRECHARGE_SYS_TABLE(Schema::OutReadSets);
- PRECHARGE_SYS_TABLE(Schema::PlanQueue);
- PRECHARGE_SYS_TABLE(Schema::DeadlineQueue);
- PRECHARGE_SYS_TABLE(Schema::SchemaOperations);
- PRECHARGE_SYS_TABLE(Schema::SplitSrcSnapshots);
- PRECHARGE_SYS_TABLE(Schema::SplitDstReceivedSnapshots);
+ {
+ bool ready = true;
+
+#define PRECHARGE_SYS_TABLE(table) \
+ { \
+ if (txc.DB.GetScheme().GetTableInfo(table::TableId)) { \
+ auto rowset = db.Table<table>().Range().Select(); \
+ ready &= rowset.IsReady(); \
+ } \
+ }
+
+ PRECHARGE_SYS_TABLE(Schema::Sys);
+ PRECHARGE_SYS_TABLE(Schema::UserTables);
+ PRECHARGE_SYS_TABLE(Schema::TxMain);
+ PRECHARGE_SYS_TABLE(Schema::OutReadSets);
+ PRECHARGE_SYS_TABLE(Schema::PlanQueue);
+ PRECHARGE_SYS_TABLE(Schema::DeadlineQueue);
+ PRECHARGE_SYS_TABLE(Schema::SchemaOperations);
+ PRECHARGE_SYS_TABLE(Schema::SplitSrcSnapshots);
+ PRECHARGE_SYS_TABLE(Schema::SplitDstReceivedSnapshots);
PRECHARGE_SYS_TABLE(Schema::Snapshots);
PRECHARGE_SYS_TABLE(Schema::S3Uploads);
PRECHARGE_SYS_TABLE(Schema::S3UploadedParts);
@@ -164,13 +164,13 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
PRECHARGE_SYS_TABLE(Schema::DstReplicationSourceOffsetsReceived);
PRECHARGE_SYS_TABLE(Schema::UserTablesStats);
PRECHARGE_SYS_TABLE(Schema::SchemaSnapshots);
-
- if (!ready)
- return false;
-
-#undef PRECHARGE_SYS_TABLE
- }
-
+
+ if (!ready)
+ return false;
+
+#undef PRECHARGE_SYS_TABLE
+ }
+
// Reads from Sys table
LOAD_SYS_UI64(db, Schema::Sys_State, Self->State);
LOAD_SYS_UI64(db, Schema::Sys_LastLocalTid, Self->LastLocalTid);
@@ -180,8 +180,8 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
LOAD_SYS_UI64(db, Schema::Sys_TxReadSizeLimit, Self->TxReadSizeLimit);
LOAD_SYS_UI64(db, Schema::Sys_PathOwnerId, Self->PathOwnerId);
LOAD_SYS_UI64(db, Schema::Sys_CurrentSchemeShardId, Self->CurrentSchemeShardId);
- LOAD_SYS_UI64(db, Schema::Sys_LastSchemeShardGeneration, Self->LastSchemeOpSeqNo.Generation);
- LOAD_SYS_UI64(db, Schema::Sys_LastSchemeShardRound, Self->LastSchemeOpSeqNo.Round);
+ LOAD_SYS_UI64(db, Schema::Sys_LastSchemeShardGeneration, Self->LastSchemeOpSeqNo.Generation);
+ LOAD_SYS_UI64(db, Schema::Sys_LastSchemeShardRound, Self->LastSchemeOpSeqNo.Round);
LOAD_SYS_UI64(db, Schema::Sys_StatisticsDisabled, Self->StatisticsDisabled);
ui64 subDomainOwnerId = 0;
@@ -209,7 +209,7 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
return false;
{ // Reads user tables metadata
- Self->TableInfos.clear(); // For idempotency
+ Self->TableInfos.clear(); // For idempotency
auto rowset = db.Table<Schema::UserTables>().GreaterOrEqual(0).Select(); // TODO[serxa]: this should be Range() but it is not working right now
if (!rowset.IsReady())
return false;
@@ -219,8 +219,8 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
ui32 shadowTid = rowset.GetValueOrDefault<Schema::UserTables::ShadowTid>();
TString schema = rowset.GetValue<Schema::UserTables::Schema>();
NKikimrSchemeOp::TTableDescription descr;
- bool parseOk = ParseFromStringNoSizeLimit(descr, schema);
- Y_VERIFY(parseOk);
+ bool parseOk = ParseFromStringNoSizeLimit(descr, schema);
+ Y_VERIFY(parseOk);
Self->AddUserTable(TPathId(Self->GetPathOwnerId(), tableId), new TUserTable(localTid, descr, shadowTid));
if (!rowset.Next())
return false;
@@ -246,27 +246,27 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
}
}
- { // Read split snapshots on src tablet
- auto rowset = db.Table<Schema::SplitSrcSnapshots>().GreaterOrEqual(0).Select();
- if (!rowset.IsReady())
- return false;
+ { // Read split snapshots on src tablet
+ auto rowset = db.Table<Schema::SplitSrcSnapshots>().GreaterOrEqual(0).Select();
+ if (!rowset.IsReady())
+ return false;
- while (!rowset.EndOfSet()) {
- ui64 dstTablet = rowset.GetValue<Schema::SplitSrcSnapshots::DstTabletId>();
+ while (!rowset.EndOfSet()) {
+ ui64 dstTablet = rowset.GetValue<Schema::SplitSrcSnapshots::DstTabletId>();
TString snapBody = rowset.GetValue<Schema::SplitSrcSnapshots::SnapshotMeta>();
-
- TAutoPtr<NKikimrTxDataShard::TEvSplitTransferSnapshot> snapshot = new NKikimrTxDataShard::TEvSplitTransferSnapshot;
- bool parseOk = ParseFromStringNoSizeLimit(*snapshot, snapBody);
- Y_VERIFY(parseOk);
- Self->SplitSrcSnapshotSender.AddDst(dstTablet);
- Self->SplitSrcSnapshotSender.SaveSnapshotForSending(dstTablet, snapshot);
-
- if (!rowset.Next())
- return false;
- }
- Self->SplitSnapshotStarted = false;
- }
-
+
+ TAutoPtr<NKikimrTxDataShard::TEvSplitTransferSnapshot> snapshot = new NKikimrTxDataShard::TEvSplitTransferSnapshot;
+ bool parseOk = ParseFromStringNoSizeLimit(*snapshot, snapBody);
+ Y_VERIFY(parseOk);
+ Self->SplitSrcSnapshotSender.AddDst(dstTablet);
+ Self->SplitSrcSnapshotSender.SaveSnapshotForSending(dstTablet, snapshot);
+
+ if (!rowset.Next())
+ return false;
+ }
+ Self->SplitSnapshotStarted = false;
+ }
+
if (db.HaveTable<Schema::SrcChangeSenderActivations>()) {
// Read change sender activations on src tablet
auto rowset = db.Table<Schema::SrcChangeSenderActivations>().GreaterOrEqual(0).Select();
@@ -282,40 +282,40 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
}
}
- // Split/Merge description on DST
- LOAD_SYS_UI64(db, Schema::Sys_DstSplitOpId, Self->DstSplitOpId);
- {
+ // Split/Merge description on DST
+ LOAD_SYS_UI64(db, Schema::Sys_DstSplitOpId, Self->DstSplitOpId);
+ {
TString splitDescr;
- LOAD_SYS_BYTES(db, Schema::Sys_DstSplitDescription, splitDescr);
- if (!splitDescr.empty()) {
+ LOAD_SYS_BYTES(db, Schema::Sys_DstSplitDescription, splitDescr);
+ if (!splitDescr.empty()) {
Self->DstSplitDescription = std::make_shared<NKikimrTxDataShard::TSplitMergeDescription>();
- bool parseOk = ParseFromStringNoSizeLimit(*Self->DstSplitDescription, splitDescr);
- Y_VERIFY(parseOk);
- }
-
+ bool parseOk = ParseFromStringNoSizeLimit(*Self->DstSplitDescription, splitDescr);
+ Y_VERIFY(parseOk);
+ }
+
LOAD_SYS_BOOL(db, Schema::Sys_DstSplitSchemaInitialized, Self->DstSplitSchemaInitialized);
- // Add all SRC datashards to the list
- Self->ReceiveSnapshotsFrom.clear();
+ // Add all SRC datashards to the list
+ Self->ReceiveSnapshotsFrom.clear();
Self->ReceiveActivationsFrom.clear();
- if (Self->DstSplitDescription) {
- for (ui32 i = 0; i < Self->DstSplitDescription->SourceRangesSize(); ++i) {
- ui64 srcTabletId = Self->DstSplitDescription->GetSourceRanges(i).GetTabletID();
- Self->ReceiveSnapshotsFrom.insert(srcTabletId);
- }
- }
-
+ if (Self->DstSplitDescription) {
+ for (ui32 i = 0; i < Self->DstSplitDescription->SourceRangesSize(); ++i) {
+ ui64 srcTabletId = Self->DstSplitDescription->GetSourceRanges(i).GetTabletID();
+ Self->ReceiveSnapshotsFrom.insert(srcTabletId);
+ }
+ }
+
{
auto rowset = db.Table<Schema::SplitDstReceivedSnapshots>().GreaterOrEqual(0).Select();
if (!rowset.IsReady())
return false;
-
+
// Exclude SRC datashards from which the snapshots have already been received
while (!rowset.EndOfSet()) {
ui64 srcTabletId = rowset.GetValue<Schema::SplitDstReceivedSnapshots::SrcTabletId>();
Self->ReceiveSnapshotsFrom.erase(srcTabletId);
-
+
if (!rowset.Next())
return false;
}
@@ -324,7 +324,7 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
if (db.HaveTable<Schema::DstChangeSenderActivations>()) {
auto rowset = db.Table<Schema::DstChangeSenderActivations>().GreaterOrEqual(0).Select();
if (!rowset.IsReady())
- return false;
+ return false;
while (!rowset.EndOfSet()) {
ui64 srcTabletId = rowset.GetValue<Schema::DstChangeSenderActivations::SrcTabletId>();
@@ -333,44 +333,44 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
if (!rowset.Next())
return false;
}
- }
- }
-
- // Split/Merge description on SRC
- LOAD_SYS_UI64(db, Schema::Sys_SrcSplitOpId, Self->SrcSplitOpId);
- {
+ }
+ }
+
+ // Split/Merge description on SRC
+ LOAD_SYS_UI64(db, Schema::Sys_SrcSplitOpId, Self->SrcSplitOpId);
+ {
TString splitDescr;
- LOAD_SYS_BYTES(db, Schema::Sys_SrcSplitDescription, splitDescr);
- if (!splitDescr.empty()) {
+ LOAD_SYS_BYTES(db, Schema::Sys_SrcSplitDescription, splitDescr);
+ if (!splitDescr.empty()) {
Self->SrcSplitDescription = std::make_shared<NKikimrTxDataShard::TSplitMergeDescription>();
- bool parseOk = ParseFromStringNoSizeLimit(*Self->SrcSplitDescription, splitDescr);
- Y_VERIFY(parseOk);
+ bool parseOk = ParseFromStringNoSizeLimit(*Self->SrcSplitDescription, splitDescr);
+ Y_VERIFY(parseOk);
for (ui32 i = 0; i < Self->SrcSplitDescription->DestinationRangesSize(); ++i) {
ui64 dstTablet = Self->SrcSplitDescription->GetDestinationRanges(i).GetTabletID();
Self->ChangeExchangeSplitter.AddDst(dstTablet);
}
- }
- }
-
- Y_VERIFY(Self->State != TShardState::Unknown);
-
- Y_VERIFY(Self->SplitSrcSnapshotSender.AllAcked() || Self->State == TShardState::SplitSrcSendingSnapshot,
- "Unexpected state %s while having unsent split snapshots at datashard %" PRIu64,
+ }
+ }
+
+ Y_VERIFY(Self->State != TShardState::Unknown);
+
+ Y_VERIFY(Self->SplitSrcSnapshotSender.AllAcked() || Self->State == TShardState::SplitSrcSendingSnapshot,
+ "Unexpected state %s while having unsent split snapshots at datashard %" PRIu64,
DatashardStateName(Self->State).data(), Self->TabletID());
-
- Y_VERIFY(Self->ReceiveSnapshotsFrom.empty() || Self->State == TShardState::SplitDstReceivingSnapshot,
- "Unexpected state %s while having non-received split snapshots at datashard %" PRIu64,
+
+ Y_VERIFY(Self->ReceiveSnapshotsFrom.empty() || Self->State == TShardState::SplitDstReceivingSnapshot,
+ "Unexpected state %s while having non-received split snapshots at datashard %" PRIu64,
DatashardStateName(Self->State).data(), Self->TabletID());
-
+
// Load unsent ReadSets
if (!Self->OutReadSets.LoadReadSets(db))
return false;
- // TODO: properly check shard state
+ // TODO: properly check shard state
if (Self->State != TShardState::Offline && txc.DB.GetScheme().GetTableInfo(Schema::TxMain::TableId)) {
- if (!Self->TransQueue.Load(db))
- return false;
+ if (!Self->TransQueue.Load(db))
+ return false;
for (auto &pr : Self->TransQueue.GetTxsInFly()) {
pr.second->BuildExecutionPlan(true);
@@ -381,7 +381,7 @@ bool TDataShard::TTxInit::ReadEverything(TTransactionContext &txc) {
if (Self->TransQueue.GetPlan().size())
Self->Pipeline.AddCandidateUnit(EExecutionUnitKind::PlanQueue);
// TODO: add propose blockers to blockers list
- }
+ }
if (Self->State != TShardState::Offline && txc.DB.GetScheme().GetTableInfo(Schema::Snapshots::TableId)) {
if (!Self->SnapshotManager.Reload(db))
@@ -489,21 +489,21 @@ public:
: TBase(self)
{}
- TTxType GetTxType() const override { return TXTYPE_INIT_SCHEMA; }
-
+ TTxType GetTxType() const override { return TXTYPE_INIT_SCHEMA; }
+
bool Execute(TTransactionContext &txc, const TActorContext &ctx) override {
Y_UNUSED(txc);
LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "TxInitSchema.Execute");
- NIceDb::TNiceDb db(txc.DB);
-
+ NIceDb::TNiceDb db(txc.DB);
+
bool isCreate = txc.DB.GetScheme().IsEmpty();
- if (isCreate) {
- Self->State = TShardState::WaitScheme;
- } else {
- LOAD_SYS_UI64(db, Schema::Sys_State, Self->State);
- }
+ if (isCreate) {
+ Self->State = TShardState::WaitScheme;
+ } else {
+ LOAD_SYS_UI64(db, Schema::Sys_State, Self->State);
+ }
// Skip full schema migration (and dropped system table recreation)
// if the datashard is in the process of drop.
@@ -513,12 +513,12 @@ public:
Schema::SchemaTables<Schema::ScanProgress>::Materialize(txc.DB, NIceDb::EMaterializationMode::NonExisting);
} else {
db.Materialize<Schema>();
- }
-
+ }
+
if (isCreate) {
txc.DB.Alter().SetExecutorAllowLogBatching(gAllowLogBatchingDefaultValue);
txc.DB.Alter().SetExecutorLogFlushPeriod(TDuration::MicroSeconds(500));
-
+
Self->PersistSys(db, Schema::Sys_State, Self->State);
if (AppData(ctx)->FeatureFlags.GetEnableMvcc()) {
@@ -569,7 +569,7 @@ public:
void Complete(const TActorContext &ctx) override {
LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "TxInitSchema.Complete");
- Self->Execute(Self->CreateTxInit(), ctx);
+ Self->Execute(Self->CreateTxInit(), ctx);
}
};
@@ -600,9 +600,9 @@ public:
};
ITransaction* TDataShard::CreateTxInit() {
- return new TTxInit(this);
-}
-
+ return new TTxInit(this);
+}
+
ITransaction* TDataShard::CreateTxInitSchema() {
return new TTxInitSchema(this);
}
@@ -612,39 +612,39 @@ ITransaction* TDataShard::CreateTxInitSchemaDefaults() {
}
bool TDataShard::SyncSchemeOnFollower(TTransactionContext &txc, const TActorContext &ctx,
- NKikimrTxDataShard::TError::EKind & status, TString& errMessage)
-{
- status = NKikimrTxDataShard::TError::OK;
- errMessage.clear();
-
- const auto& scheme = txc.DB.GetScheme();
-
+ NKikimrTxDataShard::TError::EKind & status, TString& errMessage)
+{
+ status = NKikimrTxDataShard::TError::OK;
+ errMessage.clear();
+
+ const auto& scheme = txc.DB.GetScheme();
+
// Check that TxInit from leader has been already replicated to the follower
- // and all internal tables have already been created
- bool isInitialized = scheme.GetTableInfo(Schema::Sys::TableId);
- if (!isInitialized) {
- status = NKikimrTxDataShard::TError::WRONG_SHARD_STATE;
+ // and all internal tables have already been created
+ bool isInitialized = scheme.GetTableInfo(Schema::Sys::TableId);
+ if (!isInitialized) {
+ status = NKikimrTxDataShard::TError::WRONG_SHARD_STATE;
errMessage = Sprintf("Follower has not been initialized yet: tablet id: %" PRIu64, TabletID());
- return true;
- }
-
+ return true;
+ }
+
auto* userTablesSchema = scheme.GetTableInfo(Schema::UserTables::TableId);
Y_VERIFY(userTablesSchema, "UserTables");
- // Check if user tables schema has changed since last time we synchronized it
- ui64 lastSchemeUpdate = txc.DB.Head(Schema::UserTables::TableId).Serial;
+ // Check if user tables schema has changed since last time we synchronized it
+ ui64 lastSchemeUpdate = txc.DB.Head(Schema::UserTables::TableId).Serial;
if (lastSchemeUpdate > FollowerState.LastSchemeUpdate) {
- NIceDb::TNiceDb db(txc.DB);
- {
- LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD,
+ NIceDb::TNiceDb db(txc.DB);
+ {
+ LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD,
"Updating tables metadata on follower, tabletId %" PRIu64
- " prevGen %" PRIu64 " prevStep %" PRIu64 " newGen %" PRIu64 " newStep %" PRIu64,
+ " prevGen %" PRIu64 " prevStep %" PRIu64 " newGen %" PRIu64 " newStep %" PRIu64,
TabletID(), FollowerState.LastSchemeUpdate >> 32,
FollowerState.LastSchemeUpdate & (ui32)-1,
- lastSchemeUpdate >> 32, lastSchemeUpdate & (ui32)-1);
-
- // Reload user tables metadata
- TableInfos.clear();
+ lastSchemeUpdate >> 32, lastSchemeUpdate & (ui32)-1);
+
+ // Reload user tables metadata
+ TableInfos.clear();
if (userTablesSchema->Columns.contains(Schema::UserTables::ShadowTid::ColumnId)) {
// New schema with ShadowTid column
@@ -654,7 +654,7 @@ bool TDataShard::SyncSchemeOnFollower(TTransactionContext &txc, const TActorCont
Schema::UserTables::Schema,
Schema::UserTables::ShadowTid>();
if (!rowset.IsReady())
- return false;
+ return false;
while (!rowset.EndOfSet()) {
ui64 tableId = rowset.GetValue<Schema::UserTables::Tid>();
ui32 localTid = rowset.GetValue<Schema::UserTables::LocalTid>();
@@ -687,11 +687,11 @@ bool TDataShard::SyncSchemeOnFollower(TTransactionContext &txc, const TActorCont
if (!rowset.Next())
return false;
}
- }
- }
+ }
+ }
FollowerState.LastSchemeUpdate = lastSchemeUpdate;
- }
-
+ }
+
// N.B. follower with snapshots support may be loaded in datashard without a snapshots table
if (scheme.GetTableInfo(Schema::Snapshots::TableId)) {
ui64 lastSnapshotsUpdate = txc.DB.Head(Schema::Snapshots::TableId).Serial;
@@ -718,7 +718,7 @@ bool TDataShard::SyncSchemeOnFollower(TTransactionContext &txc, const TActorCont
}
}
- return true;
-}
-
+ return true;
+}
+
}}
diff --git a/ydb/core/tx/datashard/datashard__kqp_scan.cpp b/ydb/core/tx/datashard/datashard__kqp_scan.cpp
index 503eff5406a..b6b6b7e337d 100644
--- a/ydb/core/tx/datashard/datashard__kqp_scan.cpp
+++ b/ydb/core/tx/datashard/datashard__kqp_scan.cpp
@@ -485,7 +485,7 @@ private:
// send a batch and try to send an empty batch again without adding rows, then a copy of the batch will be send
// instead. So we check Rows here.
if (Rows != 0) {
- Result->ArrowBatch = Tags.empty() ? NArrow::CreateNoColumnsBatch(Rows) : BatchBuilder->FlushBatch(true);
+ Result->ArrowBatch = Tags.empty() ? NArrow::CreateNoColumnsBatch(Rows) : BatchBuilder->FlushBatch(true);
}
}
diff --git a/ydb/core/tx/datashard/datashard__monitoring.cpp b/ydb/core/tx/datashard/datashard__monitoring.cpp
index aed25c35217..644f9a74199 100644
--- a/ydb/core/tx/datashard/datashard__monitoring.cpp
+++ b/ydb/core/tx/datashard/datashard__monitoring.cpp
@@ -1,17 +1,17 @@
-#include "datashard_impl.h"
+#include "datashard_impl.h"
#include "operation.h"
#include <ydb/core/tablet_flat/flat_stat_table.h>
#include <ydb/core/util/pb.h>
-
+
#include <library/cpp/mime/types/mime.h>
#include <library/cpp/resource/resource.h>
#include <library/cpp/html/pcdata/pcdata.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NDataShard {
-
+
class TDataShard::TTxMonitoring : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
NMon::TEvRemoteHttpInfo::TPtr Ev;
@@ -89,12 +89,12 @@ public:
stats.SetLastStatsUpdateTime(Self->LastDbStatsUpdateTime.ToStringLocalUpToSeconds());
stats.SetLastStatsReportTime(Self->LastDbStatsReportTime.ToStringLocalUpToSeconds());
}
-
- auto *resourceMetrics = Self->Executor()->GetResourceMetrics();
- if (resourceMetrics) {
- auto &metrics = *rec.MutableMetrics();
- resourceMetrics->Fill(metrics);
- }
+
+ auto *resourceMetrics = Self->Executor()->GetResourceMetrics();
+ if (resourceMetrics) {
+ auto &metrics = *rec.MutableMetrics();
+ resourceMetrics->Fill(metrics);
+ }
}
auto &info = *response->Record.MutableTabletInfo();
@@ -327,4 +327,4 @@ ITransaction *TDataShard::CreateTxGetOperation(TDataShard *self, TEvDataShard::T
return new TTxGetOperation(self, ev);
}
-}}
+}}
diff --git a/ydb/core/tx/datashard/datashard__op_rows.cpp b/ydb/core/tx/datashard/datashard__op_rows.cpp
index 109a148acda..3b3c9d9d6f9 100644
--- a/ydb/core/tx/datashard/datashard__op_rows.cpp
+++ b/ydb/core/tx/datashard/datashard__op_rows.cpp
@@ -1,53 +1,53 @@
-#include "datashard_impl.h"
+#include "datashard_impl.h"
#include "datashard_direct_transaction.h"
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NDataShard {
-
-using namespace NTabletFlatExecutor;
-
+
+using namespace NTabletFlatExecutor;
+
template <typename TEvRequest>
class TTxDirectBase : public TTransactionBase<TDataShard> {
TEvRequest Ev;
-
+
TOperation::TPtr Op;
TVector<EExecutionUnitKind> CompleteList;
-public:
+public:
TTxDirectBase(TDataShard* ds, TEvRequest ev)
- : TBase(ds)
- , Ev(ev)
+ : TBase(ds)
+ , Ev(ev)
{
}
-
+
bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
LOG_INFO_S(ctx, NKikimrServices::TX_DATASHARD, "TTxDirectBase(" << GetTxType() << ") Execute"
<< ": at tablet# " << Self->TabletID());
if (Self->IsFollower()) {
return true; // TODO: report error
- }
-
+ }
+
if (Ev) {
const ui64 tieBreaker = Self->NextTieBreakerIndex++;
Op = new TDirectTransaction(tieBreaker, ctx.Now(), tieBreaker, Ev);
Op->BuildExecutionPlan(false);
Self->Pipeline.GetExecutionUnit(Op->GetCurrentUnit()).AddOperation(Op);
-
+
Ev = nullptr;
- }
-
+ }
+
auto status = Self->Pipeline.RunExecutionPlan(Op, CompleteList, txc, ctx);
if (!CompleteList.empty()) {
- return true;
+ return true;
} else if (status == EExecutionStatus::Restart) {
return false;
} else {
Op = nullptr;
return true;
- }
- }
-
+ }
+ }
+
void Complete(const TActorContext& ctx) override {
LOG_INFO_S(ctx, NKikimrServices::TX_DATASHARD, "TTxDirectBase(" << GetTxType() << ") Complete"
<< ": at tablet# " << Self->TabletID());
@@ -56,7 +56,7 @@ public:
if (Self->Pipeline.CanRunAnotherOp()) {
Self->PlanQueue.Progress(ctx);
}
- }
+ }
}; // TTxDirectBase
@@ -64,8 +64,8 @@ class TDataShard::TTxUploadRows : public TTxDirectBase<TEvDataShard::TEvUploadRo
public:
using TTxDirectBase::TTxDirectBase;
TTxType GetTxType() const override { return TXTYPE_UPLOAD_ROWS; }
-};
-
+};
+
class TDataShard::TTxEraseRows : public TTxDirectBase<TEvDataShard::TEvEraseRowsRequest::TPtr> {
public:
using TTxDirectBase::TTxDirectBase;
@@ -91,11 +91,11 @@ static void WrongShardState(NKikimrTxDataShard::TEvEraseRowsResponse& response)
template <typename TEvResponse, typename TEvRequest>
static bool MaybeReject(TDataShard* self, TEvRequest& ev, const TActorContext& ctx, const TString& txDesc, bool isWrite) {
- NKikimrTxDataShard::TEvProposeTransactionResult::EStatus rejectStatus;
+ NKikimrTxDataShard::TEvProposeTransactionResult::EStatus rejectStatus;
TString rejectReason;
bool reject = self->CheckDataTxReject(txDesc, ctx, rejectStatus, rejectReason);
bool outOfSpace = false;
-
+
if (!reject && isWrite) {
if (self->IsAnyChannelYellowStop()) {
reject = true;
@@ -113,11 +113,11 @@ static bool MaybeReject(TDataShard* self, TEvRequest& ev, const TActorContext& c
if (!reject) {
return false;
}
-
+
LOG_NOTICE_S(ctx, NKikimrServices::TX_DATASHARD, "Rejecting " << txDesc << " request on datashard"
<< ": tablet# " << self->TabletID()
<< ", error# " << rejectReason);
-
+
auto response = MakeHolder<TEvResponse>();
response->Record.SetTabletID(self->TabletID());
if (outOfSpace) {
@@ -127,7 +127,7 @@ static bool MaybeReject(TDataShard* self, TEvRequest& ev, const TActorContext& c
}
response->Record.SetErrorDescription(rejectReason);
ctx.Send(ev->Sender, std::move(response));
-
+
return true;
}
@@ -135,10 +135,10 @@ void TDataShard::Handle(TEvDataShard::TEvUploadRowsRequest::TPtr& ev, const TAct
if (!MaybeReject<TEvDataShard::TEvUploadRowsResponse>(this, ev, ctx, "bulk upsert", true)) {
Executor()->Execute(new TTxUploadRows(this, ev), ctx);
} else {
- IncCounter(COUNTER_BULK_UPSERT_OVERLOADED);
- }
-}
-
+ IncCounter(COUNTER_BULK_UPSERT_OVERLOADED);
+ }
+}
+
void TDataShard::Handle(TEvDataShard::TEvEraseRowsRequest::TPtr& ev, const TActorContext& ctx) {
if (!MaybeReject<TEvDataShard::TEvEraseRowsResponse>(this, ev, ctx, "erase", false)) {
Executor()->Execute(new TTxEraseRows(this, ev), ctx);
diff --git a/ydb/core/tx/datashard/datashard__plan_step.cpp b/ydb/core/tx/datashard/datashard__plan_step.cpp
index a196e36c2bb..6399598865a 100644
--- a/ydb/core/tx/datashard/datashard__plan_step.cpp
+++ b/ydb/core/tx/datashard/datashard__plan_step.cpp
@@ -1,7 +1,7 @@
#include "datashard_txs.h"
-#include <util/string/vector.h>
-
+#include <util/string/vector.h>
+
namespace NKikimr {
namespace NDataShard {
@@ -19,43 +19,43 @@ TDataShard::TTxPlanStep::TTxPlanStep(TDataShard *self, TEvTxProcessing::TEvPlanS
bool TDataShard::TTxPlanStep::Execute(TTransactionContext &txc, const TActorContext &ctx) {
Y_VERIFY(Ev);
- // TEvPlanStep are strictly ordered by mediator so this Tx must not be retried not to break this ordering!
- txc.DB.NoMoreReadsForTx();
+ // TEvPlanStep are strictly ordered by mediator so this Tx must not be retried not to break this ordering!
+ txc.DB.NoMoreReadsForTx();
- TxByAck.clear();
- IsAccepted = false;
+ TxByAck.clear();
+ IsAccepted = false;
- const ui64 step = Ev->Get()->Record.GetStep();
+ const ui64 step = Ev->Get()->Record.GetStep();
Self->LastKnownMediator = Ev->Get()->Record.GetMediatorID();
-
+
TVector<ui64> txIds;
txIds.reserve(Ev->Get()->Record.TransactionsSize());
- for (const auto& tx : Ev->Get()->Record.GetTransactions()) {
- Y_VERIFY(tx.HasTxId());
- Y_VERIFY(tx.HasAckTo());
-
+ for (const auto& tx : Ev->Get()->Record.GetTransactions()) {
+ Y_VERIFY(tx.HasTxId());
+ Y_VERIFY(tx.HasAckTo());
+
txIds.push_back(tx.GetTxId());
-
+
TActorId txOwner = ActorIdFromProto(tx.GetAckTo());
TxByAck[txOwner].push_back(tx.GetTxId());
- }
+ }
- if (Self->State != TShardState::Offline && Self->State != TShardState::PreOffline) {
- // The DS is completing Drop, so we just ignore PlanStep assuming that it might only contain
- // transactions that have already been executed.
- // NOTE: There is a scenario when because of retries the Coordinator might send some old Tx with
- // a new Step.
+ if (Self->State != TShardState::Offline && Self->State != TShardState::PreOffline) {
+ // The DS is completing Drop, so we just ignore PlanStep assuming that it might only contain
+ // transactions that have already been executed.
+ // NOTE: There is a scenario when because of retries the Coordinator might send some old Tx with
+ // a new Step.
IsAccepted = Self->Pipeline.PlanTxs(step, txIds, txc, ctx);
- }
-
+ }
+
if (! IsAccepted) {
LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD,
"Ignore old txIds [" << JoinStrings(txIds.begin(), txIds.end(), ", ")
<< "] for step " << step << " outdated step " << Self->Pipeline.OutdatedCleanupStep()
<< " at tablet " << Self->TabletID());
Self->IncCounter(COUNTER_PLAN_STEP_IGNORED);
- return true;
- }
+ return true;
+ }
for (ui64 txId : txIds) {
LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
@@ -63,9 +63,9 @@ bool TDataShard::TTxPlanStep::Execute(TTransactionContext &txc, const TActorCont
<< " at tablet " << Self->TabletID() << " " << Ev->Get()->Record);
}
- Self->PlanQueue.Progress(ctx);
+ Self->PlanQueue.Progress(ctx);
Self->IncCounter(COUNTER_PLAN_STEP_ACCEPTED);
- return true;
+ return true;
}
void TDataShard::TTxPlanStep::Complete(const TActorContext &ctx) {
@@ -76,14 +76,14 @@ void TDataShard::TTxPlanStep::Complete(const TActorContext &ctx) {
THolder<TEvTxProcessing::TEvPlanStepAck> ack =
MakeHolder<TEvTxProcessing::TEvPlanStepAck>(Self->TabletID(), step, kv.second.begin(), kv.second.end());
LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Sending '" << ack->ToString());
-
+
ctx.Send(kv.first, ack.Release()); // Ack to Tx coordinator
}
THolder<TEvTxProcessing::TEvPlanStepAccepted> accepted =
MakeHolder<TEvTxProcessing::TEvPlanStepAccepted>(Self->TabletID(), step);
LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, "Sending '" << accepted->ToString());
-
+
ctx.Send(Ev->Sender, accepted.Release()); // Reply to the mediator
if (IsAccepted) {
diff --git a/ydb/core/tx/datashard/datashard__progress_tx.cpp b/ydb/core/tx/datashard/datashard__progress_tx.cpp
index 6d071faaa9a..3f1fab6b46e 100644
--- a/ydb/core/tx/datashard/datashard__progress_tx.cpp
+++ b/ydb/core/tx/datashard/datashard__progress_tx.cpp
@@ -131,7 +131,7 @@ void TDataShard::TTxProgressTransaction::Complete(const TActorContext &ctx) {
Self->PlanQueue.Progress(ctx);
}
- Self->CheckSplitCanStart(ctx);
+ Self->CheckSplitCanStart(ctx);
Self->CheckMvccStateChangeCanStart(ctx);
}
diff --git a/ydb/core/tx/datashard/datashard__propose_tx_base.cpp b/ydb/core/tx/datashard/datashard__propose_tx_base.cpp
index 1e10155e696..7dfb1a294e6 100644
--- a/ydb/core/tx/datashard/datashard__propose_tx_base.cpp
+++ b/ydb/core/tx/datashard/datashard__propose_tx_base.cpp
@@ -38,18 +38,18 @@ bool TDataShard::TTxProposeTransactionBase::Execute(NTabletFlatExecutor::TTransa
// If tablet is in follower mode then we should sync scheme
// before we build and check operation.
if (Self->IsFollower()) {
- NKikimrTxDataShard::TError::EKind status = NKikimrTxDataShard::TError::OK;
- TString errMessage;
-
+ NKikimrTxDataShard::TError::EKind status = NKikimrTxDataShard::TError::OK;
+ TString errMessage;
+
if (!Self->SyncSchemeOnFollower(txc, ctx, status, errMessage))
return false;
-
- if (status != NKikimrTxDataShard::TError::OK) {
- auto kind = static_cast<NKikimrTxDataShard::ETransactionKind>(Kind);
+
+ if (status != NKikimrTxDataShard::TError::OK) {
+ auto kind = static_cast<NKikimrTxDataShard::ETransactionKind>(Kind);
result.Reset(new TEvDataShard::TEvProposeTransactionResult(kind, Self->TabletID(), TxId,
NKikimrTxDataShard::TEvProposeTransactionResult::ERROR));
- result->AddError(status, errMessage);
- }
+ result->AddError(status, errMessage);
+ }
}
if (result) {
@@ -147,8 +147,8 @@ bool TDataShard::TTxProposeTransactionBase::Execute(NTabletFlatExecutor::TTransa
Y_FAIL();
} catch (const TMemoryLimitExceededException &ex) {
Y_FAIL("there must be no leaked exceptions: TMemoryLimitExceededException");
- } catch (const std::exception &e) {
- Y_FAIL("there must be no leaked exceptions: %s", e.what());
+ } catch (const std::exception &e) {
+ Y_FAIL("there must be no leaked exceptions: %s", e.what());
} catch (...) {
Y_FAIL("there must be no leaked exceptions");
}
diff --git a/ydb/core/tx/datashard/datashard__read_columns.cpp b/ydb/core/tx/datashard/datashard__read_columns.cpp
index 20ca671ae03..2b56a192d10 100644
--- a/ydb/core/tx/datashard/datashard__read_columns.cpp
+++ b/ydb/core/tx/datashard/datashard__read_columns.cpp
@@ -1,284 +1,284 @@
-#include "datashard_impl.h"
+#include "datashard_impl.h"
#include <ydb/core/formats/factory.h>
-#include <util/string/vector.h>
-
-namespace NKikimr {
+#include <util/string/vector.h>
+
+namespace NKikimr {
namespace NDataShard {
-
-using namespace NTabletFlatExecutor;
-
+
+using namespace NTabletFlatExecutor;
+
class TTxReleaseSnaphotReference : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
- TSnapshotKey SnapshotKey;
-
-public:
+ TSnapshotKey SnapshotKey;
+
+public:
TTxReleaseSnaphotReference(TDataShard* self, const TSnapshotKey& snapshotKey)
: TTransactionBase<TDataShard>(self)
- , SnapshotKey(snapshotKey)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_READ_COLUMNS; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Self->GetSnapshotManager().ReleaseReference(SnapshotKey, txc.DB, ctx.Now());
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- Y_UNUSED(ctx);
- }
-};
-
-
-struct TKeyBoundary {
- TSerializedCellVec Key;
- bool Inclusive;
-};
-
-class TReadColumnsScan : public INoTxScan {
+ , SnapshotKey(snapshotKey)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_READ_COLUMNS; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ Self->GetSnapshotManager().ReleaseReference(SnapshotKey, txc.DB, ctx.Now());
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ Y_UNUSED(ctx);
+ }
+};
+
+
+struct TKeyBoundary {
+ TSerializedCellVec Key;
+ bool Inclusive;
+};
+
+class TReadColumnsScan : public INoTxScan {
const TActorId ReplyTo;
const TActorId DatashardActorId;
- const TString TableName;
- const ui64 TabletId;
- const TKeyBoundary From;
- const TKeyBoundary To;
- const TVector<NTable::TTag> ValueColumns;
- const TVector<NScheme::TTypeId> ValueColumnTypes;
- const ui64 RowsLimit = 100000;
- const ui64 BytesLimit = 1024*1024;
- const TKeyBoundary ShardEnd;
- TMaybe<TSnapshotKey> SnapshotKey;
-
- std::unique_ptr<IBlockBuilder> BlockBuilder;
- ui64 Rows = 0;
- ui64 Bytes = 0;
- bool ShardFinished = true;
- TString LastKeySerialized;
- TAutoPtr<TEvDataShard::TEvReadColumnsResponse> Result;
-
- IDriver *Driver = nullptr;
+ const TString TableName;
+ const ui64 TabletId;
+ const TKeyBoundary From;
+ const TKeyBoundary To;
+ const TVector<NTable::TTag> ValueColumns;
+ const TVector<NScheme::TTypeId> ValueColumnTypes;
+ const ui64 RowsLimit = 100000;
+ const ui64 BytesLimit = 1024*1024;
+ const TKeyBoundary ShardEnd;
+ TMaybe<TSnapshotKey> SnapshotKey;
+
+ std::unique_ptr<IBlockBuilder> BlockBuilder;
+ ui64 Rows = 0;
+ ui64 Bytes = 0;
+ bool ShardFinished = true;
+ TString LastKeySerialized;
+ TAutoPtr<TEvDataShard::TEvReadColumnsResponse> Result;
+
+ IDriver *Driver = nullptr;
TIntrusiveConstPtr<TScheme> Scheme;
-
-public:
- TReadColumnsScan(const TKeyBoundary& keyFrom,
- const TKeyBoundary& keyTo,
- const TVector<NTable::TTag>& valueColumns,
- const TVector<NScheme::TTypeId> valueColumnTypes,
- std::unique_ptr<IBlockBuilder>&& blockBuilder,
- ui64 rowsLimit, ui64 bytesLimit,
- const TKeyBoundary& shardEnd,
+
+public:
+ TReadColumnsScan(const TKeyBoundary& keyFrom,
+ const TKeyBoundary& keyTo,
+ const TVector<NTable::TTag>& valueColumns,
+ const TVector<NScheme::TTypeId> valueColumnTypes,
+ std::unique_ptr<IBlockBuilder>&& blockBuilder,
+ ui64 rowsLimit, ui64 bytesLimit,
+ const TKeyBoundary& shardEnd,
const TActorId& replyTo,
const TActorId& datashardActorId,
- TMaybe<TSnapshotKey> snapshotKey,
- const TString& tableName,
- ui64 tabletId)
- : ReplyTo(replyTo)
- , DatashardActorId(datashardActorId)
- , TableName(tableName)
- , TabletId(tabletId)
- , From(keyFrom)
- , To(keyTo)
- , ValueColumns(valueColumns)
- , ValueColumnTypes(valueColumnTypes)
- , RowsLimit(rowsLimit)
- , BytesLimit(bytesLimit)
- , ShardEnd(shardEnd)
- , SnapshotKey(snapshotKey)
- , BlockBuilder(std::move(blockBuilder))
- {}
-
+ TMaybe<TSnapshotKey> snapshotKey,
+ const TString& tableName,
+ ui64 tabletId)
+ : ReplyTo(replyTo)
+ , DatashardActorId(datashardActorId)
+ , TableName(tableName)
+ , TabletId(tabletId)
+ , From(keyFrom)
+ , To(keyTo)
+ , ValueColumns(valueColumns)
+ , ValueColumnTypes(valueColumnTypes)
+ , RowsLimit(rowsLimit)
+ , BytesLimit(bytesLimit)
+ , ShardEnd(shardEnd)
+ , SnapshotKey(snapshotKey)
+ , BlockBuilder(std::move(blockBuilder))
+ {}
+
THello Prepare(IDriver* driver, TIntrusiveConstPtr<TScheme> scheme) noexcept override {
- Driver = driver;
- Scheme = std::move(scheme);
-
- THello hello;
- hello.Scan = EScan::Reset;
- return hello;
- }
-
- EScan Seek(TLead& lead, ui64 seq) noexcept override {
- Y_VERIFY(seq == 0, "Unexpected repeated Seek");
-
- lead.To(ValueColumns, From.Key.GetCells(), From.Inclusive ? NTable::ESeek::Lower : NTable::ESeek::Upper);
- lead.Until(To.Key.GetCells(), To.Inclusive);
-
- return EScan::Feed;
- }
-
- EScan Feed(TArrayRef<const TCell> key, const TRow& row) noexcept override {
- const auto& keyTypes = Scheme->Keys->BasicTypes();
-
- Y_VERIFY(key.size() == keyTypes.size());
- Y_VERIFY((*row).size() == ValueColumnTypes.size());
-
- TDbTupleRef rowKey(keyTypes.data(), key.data(), keyTypes.size());
- TDbTupleRef rowValues(ValueColumnTypes.data(), (*row).data(), ValueColumnTypes.size());
-
- BlockBuilder->AddRow(rowKey, rowValues);
-
- Rows++;
- Bytes = BlockBuilder->Bytes();
-
- if (Rows >= RowsLimit || Bytes >= BytesLimit) {
- ShardFinished = false;
- LastKeySerialized = TSerializedCellVec::Serialize(key);
- return EScan::Final;
- }
-
- return EScan::Feed;
- }
-
+ Driver = driver;
+ Scheme = std::move(scheme);
+
+ THello hello;
+ hello.Scan = EScan::Reset;
+ return hello;
+ }
+
+ EScan Seek(TLead& lead, ui64 seq) noexcept override {
+ Y_VERIFY(seq == 0, "Unexpected repeated Seek");
+
+ lead.To(ValueColumns, From.Key.GetCells(), From.Inclusive ? NTable::ESeek::Lower : NTable::ESeek::Upper);
+ lead.Until(To.Key.GetCells(), To.Inclusive);
+
+ return EScan::Feed;
+ }
+
+ EScan Feed(TArrayRef<const TCell> key, const TRow& row) noexcept override {
+ const auto& keyTypes = Scheme->Keys->BasicTypes();
+
+ Y_VERIFY(key.size() == keyTypes.size());
+ Y_VERIFY((*row).size() == ValueColumnTypes.size());
+
+ TDbTupleRef rowKey(keyTypes.data(), key.data(), keyTypes.size());
+ TDbTupleRef rowValues(ValueColumnTypes.data(), (*row).data(), ValueColumnTypes.size());
+
+ BlockBuilder->AddRow(rowKey, rowValues);
+
+ Rows++;
+ Bytes = BlockBuilder->Bytes();
+
+ if (Rows >= RowsLimit || Bytes >= BytesLimit) {
+ ShardFinished = false;
+ LastKeySerialized = TSerializedCellVec::Serialize(key);
+ return EScan::Final;
+ }
+
+ return EScan::Feed;
+ }
+
TAutoPtr<IDestructable> Finish(EAbort reason) noexcept override {
- Result = new TEvDataShard::TEvReadColumnsResponse(TabletId);
-
- if (reason == EAbort::None) {
- TString buffer = BlockBuilder->Finish();
- buffer.resize(BlockBuilder->Bytes());
- BlockBuilder.reset();
-
- Result->Record.SetBlocks(buffer);
- Result->Record.SetLastKey(ShardFinished ? ShardEnd.Key.GetBuffer() : LastKeySerialized);
- Result->Record.SetLastKeyInclusive(ShardFinished ? ShardEnd.Inclusive : true);
- Result->Record.SetEndOfShard(ShardFinished);
-
- LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_DATASHARD, TabletId
- << " Read columns scan result for table [" << TableName << "]: "
- << Rows << " rows, " << Bytes << " bytes (event size "
- << Result->Record.GetBlocks().size() << ") shardFinished: " << ShardFinished);
- } else {
- LOG_NOTICE_S(*TlsActivationContext, NKikimrServices::TX_DATASHARD, TabletId
- << " Read columns scan failed for table [" << TableName << "]");
-
- Result->Record.SetStatus(NKikimrTxDataShard::TError::WRONG_SHARD_STATE);
- Result->Record.SetErrorDescription("Scan aborted");
- }
-
+ Result = new TEvDataShard::TEvReadColumnsResponse(TabletId);
+
+ if (reason == EAbort::None) {
+ TString buffer = BlockBuilder->Finish();
+ buffer.resize(BlockBuilder->Bytes());
+ BlockBuilder.reset();
+
+ Result->Record.SetBlocks(buffer);
+ Result->Record.SetLastKey(ShardFinished ? ShardEnd.Key.GetBuffer() : LastKeySerialized);
+ Result->Record.SetLastKeyInclusive(ShardFinished ? ShardEnd.Inclusive : true);
+ Result->Record.SetEndOfShard(ShardFinished);
+
+ LOG_DEBUG_S(*TlsActivationContext, NKikimrServices::TX_DATASHARD, TabletId
+ << " Read columns scan result for table [" << TableName << "]: "
+ << Rows << " rows, " << Bytes << " bytes (event size "
+ << Result->Record.GetBlocks().size() << ") shardFinished: " << ShardFinished);
+ } else {
+ LOG_NOTICE_S(*TlsActivationContext, NKikimrServices::TX_DATASHARD, TabletId
+ << " Read columns scan failed for table [" << TableName << "]");
+
+ Result->Record.SetStatus(NKikimrTxDataShard::TError::WRONG_SHARD_STATE);
+ Result->Record.SetErrorDescription("Scan aborted");
+ }
+
TlsActivationContext->Send(new IEventHandle(ReplyTo, TActorId(), Result.Release()));
TlsActivationContext->Send(new IEventHandle(DatashardActorId, TActorId(), new TDataShard::TEvPrivate::TEvScanStats(Rows, Bytes)));
-
- return this;
- }
-
- EScan Exhausted() noexcept override {
- return EScan::Final;
- }
-
- void Describe(IOutputStream& str) const noexcept override {
- str << "ReadColumnsScan table: ["<< TableName << "]shard: " << TabletId;
- }
-
+
+ return this;
+ }
+
+ EScan Exhausted() noexcept override {
+ return EScan::Final;
+ }
+
+ void Describe(IOutputStream& str) const noexcept override {
+ str << "ReadColumnsScan table: ["<< TableName << "]shard: " << TabletId;
+ }
+
void OnFinished(TDataShard* self) override {
- if (SnapshotKey) {
- self->Execute(new TTxReleaseSnaphotReference(self, *SnapshotKey));
- }
- }
-};
-
-
+ if (SnapshotKey) {
+ self->Execute(new TTxReleaseSnaphotReference(self, *SnapshotKey));
+ }
+ }
+};
+
+
class TDataShard::TTxReadColumns : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvReadColumnsRequest::TPtr Ev;
- TAutoPtr<TEvDataShard::TEvReadColumnsResponse> Result;
- TSmallVec<TRawTypeValue> KeyFrom;
- TSmallVec<TRawTypeValue> KeyTo;
- bool InclusiveFrom;
- bool InclusiveTo;
+private:
+ TEvDataShard::TEvReadColumnsRequest::TPtr Ev;
+ TAutoPtr<TEvDataShard::TEvReadColumnsResponse> Result;
+ TSmallVec<TRawTypeValue> KeyFrom;
+ TSmallVec<TRawTypeValue> KeyTo;
+ bool InclusiveFrom;
+ bool InclusiveTo;
ui64 RowsLimit = 100000;
ui64 BytesLimit = 1024*1024;
- ui64 Restarts = 0;
+ ui64 Restarts = 0;
TRowVersion ReadVersion = TRowVersion::Max();
-
-public:
+
+public:
TTxReadColumns(TDataShard* ds, TEvDataShard::TEvReadColumnsRequest::TPtr ev)
- : TBase(ds)
- , Ev(ev)
+ : TBase(ds)
+ , Ev(ev)
{
if (Ev->Get()->Record.HasSnapshotStep() && Ev->Get()->Record.HasSnapshotTxId()) {
ReadVersion.Step = Ev->Get()->Record.GetSnapshotStep();
ReadVersion.TxId = Ev->Get()->Record.GetSnapshotTxId();
}
}
-
- TTxType GetTxType() const override { return TXTYPE_READ_COLUMNS; }
-
- bool Precharge(NTable::TDatabase& db, ui32 localTid, const TVector<NTable::TTag>& valueColumns) {
- bool ready = db.Precharge(localTid,
- KeyFrom,
- KeyTo,
- valueColumns,
- 0,
+
+ TTxType GetTxType() const override { return TXTYPE_READ_COLUMNS; }
+
+ bool Precharge(NTable::TDatabase& db, ui32 localTid, const TVector<NTable::TTag>& valueColumns) {
+ bool ready = db.Precharge(localTid,
+ KeyFrom,
+ KeyTo,
+ valueColumns,
+ 0,
RowsLimit, BytesLimit,
NTable::EDirection::Forward, ReadVersion);
- return ready;
- }
-
+ return ready;
+ }
+
bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Result = new TEvDataShard::TEvReadColumnsResponse(Self->TabletID());
-
- bool useScan = Self->ReadColumnsScanEnabled;
-
+ Result = new TEvDataShard::TEvReadColumnsResponse(Self->TabletID());
+
+ bool useScan = Self->ReadColumnsScanEnabled;
+
if (Self->IsFollower()) {
- NKikimrTxDataShard::TError::EKind status = NKikimrTxDataShard::TError::OK;
- TString errMessage;
-
+ NKikimrTxDataShard::TError::EKind status = NKikimrTxDataShard::TError::OK;
+ TString errMessage;
+
if (!Self->SyncSchemeOnFollower(txc, ctx, status, errMessage))
- return false;
-
- if (status != NKikimrTxDataShard::TError::OK) {
- SetError(status, errMessage);
- return true;
- }
-
- if (!ReadVersion.IsMax()) {
- NIceDb::TNiceDb db(txc.DB);
- TRowVersion lastCompleteTx;
+ return false;
+
+ if (status != NKikimrTxDataShard::TError::OK) {
+ SetError(status, errMessage);
+ return true;
+ }
+
+ if (!ReadVersion.IsMax()) {
+ NIceDb::TNiceDb db(txc.DB);
+ TRowVersion lastCompleteTx;
if (!TDataShard::SysGetUi64(db, Schema::Sys_LastCompleteStep, lastCompleteTx.Step))
- return false;
+ return false;
if (!TDataShard::SysGetUi64(db, Schema::Sys_LastCompleteTx, lastCompleteTx.TxId))
- return false;
-
- if (ReadVersion > lastCompleteTx) {
- SetError(NKikimrTxDataShard::TError::WRONG_SHARD_STATE,
- TStringBuilder() << "RO replica last version " << lastCompleteTx
- << " lags behind the requested snapshot " << ReadVersion
- << " shard " << Self->TabletID());
- return true;
- }
- }
-
- useScan = false;
- }
-
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " Read columns: " << Ev->Get()->Record);
-
- if (Self->State != TShardState::Ready &&
- Self->State != TShardState::Readonly)
- {
- SetError(NKikimrTxDataShard::TError::WRONG_SHARD_STATE,
- Sprintf("Wrong shard state: %s tablet id: %" PRIu64,
- DatashardStateName(Self->State).c_str(), Self->TabletID()));
- return true;
- }
-
- const ui64 tableId = Ev->Get()->Record.GetTableId();
- if (Ev->Get()->Record.GetMaxBytes()) {
- BytesLimit = Ev->Get()->Record.GetMaxBytes();
- }
-
- if (!Self->TableInfos.contains(tableId)) {
- SetError(NKikimrTxDataShard::TError::SCHEME_ERROR, Sprintf("Unknown table id %" PRIu64, tableId));
- return true;
- }
-
- TMaybe<TSnapshotKey> snapshotKey;
+ return false;
+
+ if (ReadVersion > lastCompleteTx) {
+ SetError(NKikimrTxDataShard::TError::WRONG_SHARD_STATE,
+ TStringBuilder() << "RO replica last version " << lastCompleteTx
+ << " lags behind the requested snapshot " << ReadVersion
+ << " shard " << Self->TabletID());
+ return true;
+ }
+ }
+
+ useScan = false;
+ }
+
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " Read columns: " << Ev->Get()->Record);
+
+ if (Self->State != TShardState::Ready &&
+ Self->State != TShardState::Readonly)
+ {
+ SetError(NKikimrTxDataShard::TError::WRONG_SHARD_STATE,
+ Sprintf("Wrong shard state: %s tablet id: %" PRIu64,
+ DatashardStateName(Self->State).c_str(), Self->TabletID()));
+ return true;
+ }
+
+ const ui64 tableId = Ev->Get()->Record.GetTableId();
+ if (Ev->Get()->Record.GetMaxBytes()) {
+ BytesLimit = Ev->Get()->Record.GetMaxBytes();
+ }
+
+ if (!Self->TableInfos.contains(tableId)) {
+ SetError(NKikimrTxDataShard::TError::SCHEME_ERROR, Sprintf("Unknown table id %" PRIu64, tableId));
+ return true;
+ }
+
+ TMaybe<TSnapshotKey> snapshotKey;
if (!ReadVersion.IsMax()) {
// FIXME: protocol needs a full table id (both owner id and path id)
ui64 ownerId = Self->GetPathOwnerId();
- snapshotKey = TSnapshotKey(ownerId, tableId, ReadVersion.Step, ReadVersion.TxId);
+ snapshotKey = TSnapshotKey(ownerId, tableId, ReadVersion.Step, ReadVersion.TxId);
// Check if readVersion is a valid snapshot
- if (!Self->GetSnapshotManager().FindAvailable(*snapshotKey)) {
+ if (!Self->GetSnapshotManager().FindAvailable(*snapshotKey)) {
SetError(NKikimrTxDataShard::TError::SNAPSHOT_NOT_EXIST,
- TStringBuilder() << "Table id " << tableId << " has no snapshot at " << ReadVersion
+ TStringBuilder() << "Table id " << tableId << " has no snapshot at " << ReadVersion
<< " shard " << Self->TabletID() << (Self->IsFollower() ? " RO replica" : ""));
return true;
}
@@ -290,225 +290,225 @@ public:
return true;
}
- const ui32 localTableId = tableInfo.LocalTid;
- THashMap<TString, ui32> columnsByName;
- for (const auto& col : tableInfo.Columns) {
- columnsByName[col.second.Name] = col.first;
- }
-
- TString format = "clickhouse_native";
+ const ui32 localTableId = tableInfo.LocalTid;
+ THashMap<TString, ui32> columnsByName;
+ for (const auto& col : tableInfo.Columns) {
+ columnsByName[col.second.Name] = col.first;
+ }
+
+ TString format = "clickhouse_native";
if (Ev->Get()->Record.HasFormat()) {
format = Ev->Get()->Record.GetFormat();
}
- std::unique_ptr<IBlockBuilder> blockBuilder = AppData()->FormatFactory->CreateBlockBuilder(format);
- if (!blockBuilder) {
- SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT,
- Sprintf("Unsupported block format \"%s\"", format.data()));
- return true;
- }
-
- // TODO: check schemas
-
- TSerializedCellVec fromKeyCells(Ev->Get()->Record.GetFromKey());
- KeyFrom.clear();
- for (ui32 i = 0; i < fromKeyCells.GetCells().size(); ++i) {
- KeyFrom.push_back(TRawTypeValue(fromKeyCells.GetCells()[i].AsRef(), tableInfo.KeyColumnTypes[i]));
- }
- KeyFrom.resize(tableInfo.KeyColumnTypes.size());
- InclusiveFrom = Ev->Get()->Record.GetFromKeyInclusive();
- KeyTo.clear();
- InclusiveTo = true;
-
- TSerializedCellVec toKeyCells;
-
- if (!useScan) {
- // Use histogram to limit the range for single request
- const auto& sizeHistogram = tableInfo.Stats.DataStats.DataSizeHistogram;
- auto histIt = LowerBound(sizeHistogram.begin(), sizeHistogram.end(), fromKeyCells,
- [&tableInfo] (const NTable::TBucket& bucket, const TSerializedCellVec& key) {
- TSerializedCellVec bk(bucket.EndKey);
- return CompareTypedCellVectors(
- bk.GetCells().data(), key.GetCells().data(),
- tableInfo.KeyColumnTypes.data(),
- bk.GetCells().size(), key.GetCells().size()) < 0;
- });
-
- if (histIt != sizeHistogram.end() && ++histIt != sizeHistogram.end()) {
- toKeyCells.Parse(histIt->EndKey);
- for (ui32 i = 0; i < toKeyCells.GetCells().size(); ++i) {
- KeyTo.push_back(TRawTypeValue(toKeyCells.GetCells()[i].AsRef(), tableInfo.KeyColumnTypes[i]));
- }
- }
- }
-
- TVector<NTable::TTag> valueColumns;
- TVector<NScheme::TTypeId> valueColumnTypes;
- TVector<std::pair<TString, NScheme::TTypeId>> columns;
-
- if (Ev->Get()->Record.GetColumns().empty()) {
- SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT, "Empty column list");
- return true;
- }
-
+ std::unique_ptr<IBlockBuilder> blockBuilder = AppData()->FormatFactory->CreateBlockBuilder(format);
+ if (!blockBuilder) {
+ SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT,
+ Sprintf("Unsupported block format \"%s\"", format.data()));
+ return true;
+ }
+
+ // TODO: check schemas
+
+ TSerializedCellVec fromKeyCells(Ev->Get()->Record.GetFromKey());
+ KeyFrom.clear();
+ for (ui32 i = 0; i < fromKeyCells.GetCells().size(); ++i) {
+ KeyFrom.push_back(TRawTypeValue(fromKeyCells.GetCells()[i].AsRef(), tableInfo.KeyColumnTypes[i]));
+ }
+ KeyFrom.resize(tableInfo.KeyColumnTypes.size());
+ InclusiveFrom = Ev->Get()->Record.GetFromKeyInclusive();
+ KeyTo.clear();
+ InclusiveTo = true;
+
+ TSerializedCellVec toKeyCells;
+
+ if (!useScan) {
+ // Use histogram to limit the range for single request
+ const auto& sizeHistogram = tableInfo.Stats.DataStats.DataSizeHistogram;
+ auto histIt = LowerBound(sizeHistogram.begin(), sizeHistogram.end(), fromKeyCells,
+ [&tableInfo] (const NTable::TBucket& bucket, const TSerializedCellVec& key) {
+ TSerializedCellVec bk(bucket.EndKey);
+ return CompareTypedCellVectors(
+ bk.GetCells().data(), key.GetCells().data(),
+ tableInfo.KeyColumnTypes.data(),
+ bk.GetCells().size(), key.GetCells().size()) < 0;
+ });
+
+ if (histIt != sizeHistogram.end() && ++histIt != sizeHistogram.end()) {
+ toKeyCells.Parse(histIt->EndKey);
+ for (ui32 i = 0; i < toKeyCells.GetCells().size(); ++i) {
+ KeyTo.push_back(TRawTypeValue(toKeyCells.GetCells()[i].AsRef(), tableInfo.KeyColumnTypes[i]));
+ }
+ }
+ }
+
+ TVector<NTable::TTag> valueColumns;
+ TVector<NScheme::TTypeId> valueColumnTypes;
+ TVector<std::pair<TString, NScheme::TTypeId>> columns;
+
+ if (Ev->Get()->Record.GetColumns().empty()) {
+ SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT, "Empty column list");
+ return true;
+ }
+
for (const auto& col : Ev->Get()->Record.GetColumns()) {
- if (!columnsByName.contains(col)) {
- SetError(NKikimrTxDataShard::TError::SCHEME_ERROR,
- Sprintf("Unknown column: %s", col.data()));
- return true;
- }
-
- NTable::TTag colId = columnsByName[col];
- valueColumns.push_back(colId);
- valueColumnTypes.push_back(tableInfo.Columns.at(colId).Type);
+ if (!columnsByName.contains(col)) {
+ SetError(NKikimrTxDataShard::TError::SCHEME_ERROR,
+ Sprintf("Unknown column: %s", col.data()));
+ return true;
+ }
+
+ NTable::TTag colId = columnsByName[col];
+ valueColumns.push_back(colId);
+ valueColumnTypes.push_back(tableInfo.Columns.at(colId).Type);
columns.push_back({col, tableInfo.Columns.at(colId).Type});
- }
-
- ui64 rowsPerBlock = Ev->Get()->Record.GetMaxRows() ? Ev->Get()->Record.GetMaxRows() : 64000;
- ui64 bytesPerBlock = 64000;
-
- TString err;
- if (!blockBuilder->Start(columns, rowsPerBlock, bytesPerBlock, err)) {
- SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT,
- Sprintf("Failed to create block builder \"%s\"", err.data()));
- return true;
- }
-
- tableInfo.Stats.AccessTime = TAppData::TimeProvider->Now();
-
- if (useScan) {
- if (snapshotKey) {
- if (!Self->GetSnapshotManager().AcquireReference(*snapshotKey)) {
- SetError(NKikimrTxDataShard::TError::SNAPSHOT_NOT_EXIST,
- TStringBuilder() << "Table id " << tableId << " has no snapshot at " << ReadVersion
+ }
+
+ ui64 rowsPerBlock = Ev->Get()->Record.GetMaxRows() ? Ev->Get()->Record.GetMaxRows() : 64000;
+ ui64 bytesPerBlock = 64000;
+
+ TString err;
+ if (!blockBuilder->Start(columns, rowsPerBlock, bytesPerBlock, err)) {
+ SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT,
+ Sprintf("Failed to create block builder \"%s\"", err.data()));
+ return true;
+ }
+
+ tableInfo.Stats.AccessTime = TAppData::TimeProvider->Now();
+
+ if (useScan) {
+ if (snapshotKey) {
+ if (!Self->GetSnapshotManager().AcquireReference(*snapshotKey)) {
+ SetError(NKikimrTxDataShard::TError::SNAPSHOT_NOT_EXIST,
+ TStringBuilder() << "Table id " << tableId << " has no snapshot at " << ReadVersion
<< " shard " << Self->TabletID() << (Self->IsFollower() ? " RO replica" : ""));
- return true;
- }
- }
-
- auto* scan = new TReadColumnsScan(TKeyBoundary{fromKeyCells, InclusiveFrom},
- TKeyBoundary{toKeyCells, InclusiveTo},
- valueColumns, valueColumnTypes,
- std::move(blockBuilder), RowsLimit, BytesLimit,
- TKeyBoundary{tableInfo.Range.To, tableInfo.Range.ToInclusive},
- Ev->Sender, ctx.SelfID,
- snapshotKey,
- tableInfo.Path,
- Self->TabletID());
- auto opts = TScanOptions()
- .SetResourceBroker("scan", 10)
- .SetSnapshotRowVersion(ReadVersion)
- .SetActorPoolId(Self->ReadColumnsScanInUserPool ? AppData(ctx)->UserPoolId : AppData(ctx)->BatchPoolId)
- .SetReadAhead(512*1024, 1024*1024)
- .SetReadPrio(TScanOptions::EReadPrio::Low);
-
- ui64 cookie = -1; // Should be ignored
- Self->QueueScan(localTableId, scan, cookie, opts);
-
- Result.Destroy(); // Scan is now responsible for sending the result
-
- return true;
- }
-
- // TODO: make sure KeyFrom and KeyTo properly reference non-inline cells data
-
- if (!Precharge(txc.DB, localTableId, valueColumns))
- return false;
-
- size_t rows = 0;
- size_t bytes = 0;
- bool shardFinished = false;
-
- {
+ return true;
+ }
+ }
+
+ auto* scan = new TReadColumnsScan(TKeyBoundary{fromKeyCells, InclusiveFrom},
+ TKeyBoundary{toKeyCells, InclusiveTo},
+ valueColumns, valueColumnTypes,
+ std::move(blockBuilder), RowsLimit, BytesLimit,
+ TKeyBoundary{tableInfo.Range.To, tableInfo.Range.ToInclusive},
+ Ev->Sender, ctx.SelfID,
+ snapshotKey,
+ tableInfo.Path,
+ Self->TabletID());
+ auto opts = TScanOptions()
+ .SetResourceBroker("scan", 10)
+ .SetSnapshotRowVersion(ReadVersion)
+ .SetActorPoolId(Self->ReadColumnsScanInUserPool ? AppData(ctx)->UserPoolId : AppData(ctx)->BatchPoolId)
+ .SetReadAhead(512*1024, 1024*1024)
+ .SetReadPrio(TScanOptions::EReadPrio::Low);
+
+ ui64 cookie = -1; // Should be ignored
+ Self->QueueScan(localTableId, scan, cookie, opts);
+
+ Result.Destroy(); // Scan is now responsible for sending the result
+
+ return true;
+ }
+
+ // TODO: make sure KeyFrom and KeyTo properly reference non-inline cells data
+
+ if (!Precharge(txc.DB, localTableId, valueColumns))
+ return false;
+
+ size_t rows = 0;
+ size_t bytes = 0;
+ bool shardFinished = false;
+
+ {
NTable::TKeyRange iterRange;
iterRange.MinKey = KeyFrom;
iterRange.MinInclusive = InclusiveFrom;
-
+
auto iter = txc.DB.IterateRange(localTableId, iterRange, valueColumns, ReadVersion);
- TString lastKeySerialized;
- bool lastKeyInclusive = true;
- while (iter->Next(NTable::ENext::All) == NTable::EReady::Data) {
- TDbTupleRef rowKey = iter->GetKey();
- lastKeySerialized = TSerializedCellVec::Serialize(rowKey.Cells());
-
- // Compare current row with right boundary
- int cmp = -1;// CompareTypedCellVectors(tuple.Columns, KeyTo.data(), tuple.Types, KeyTo.size());
-
- if (cmp == 0 && KeyTo.size() < rowKey.ColumnCount) {
- cmp = -1;
- }
- if (InclusiveTo) {
- if (cmp > 0)
- break; // Stop iff greater(cmp > 0)
- } else {
- if (cmp >= 0)
- break; // Stop iff equal(cmp == 0) or greater(cmp > 0)
- }
-
- // Skip erased row
+ TString lastKeySerialized;
+ bool lastKeyInclusive = true;
+ while (iter->Next(NTable::ENext::All) == NTable::EReady::Data) {
+ TDbTupleRef rowKey = iter->GetKey();
+ lastKeySerialized = TSerializedCellVec::Serialize(rowKey.Cells());
+
+ // Compare current row with right boundary
+ int cmp = -1;// CompareTypedCellVectors(tuple.Columns, KeyTo.data(), tuple.Types, KeyTo.size());
+
+ if (cmp == 0 && KeyTo.size() < rowKey.ColumnCount) {
+ cmp = -1;
+ }
+ if (InclusiveTo) {
+ if (cmp > 0)
+ break; // Stop iff greater(cmp > 0)
+ } else {
+ if (cmp >= 0)
+ break; // Stop iff equal(cmp == 0) or greater(cmp > 0)
+ }
+
+ // Skip erased row
if (iter->Row().GetRowState() == NTable::ERowOp::Erase) {
- continue;
- }
-
- TDbTupleRef rowValues = iter->GetValues();
-
- blockBuilder->AddRow(rowKey, rowValues);
-
- rows++;
- bytes = blockBuilder->Bytes();
-
- if (rows >= RowsLimit || bytes >= BytesLimit)
- break;
- }
-
- // We don't want to do many restarts if pages weren't precharged
- // So we just return whatever we read so far and the client can request more rows
- if (iter->Last() == NTable::EReady::Page && rows < 1000 && bytes < 100000 && Restarts < 1) {
- ++Restarts;
- return false;
- }
-
- if (iter->Last() == NTable::EReady::Gone) {
- shardFinished = true;
- lastKeySerialized = tableInfo.Range.To.GetBuffer();
- lastKeyInclusive = tableInfo.Range.ToInclusive;
- }
-
- TString buffer = blockBuilder->Finish();
- buffer.resize(blockBuilder->Bytes());
-
- Result->Record.SetBlocks(buffer);
- Result->Record.SetLastKey(lastKeySerialized);
- Result->Record.SetLastKeyInclusive(lastKeyInclusive);
+ continue;
+ }
+
+ TDbTupleRef rowValues = iter->GetValues();
+
+ blockBuilder->AddRow(rowKey, rowValues);
+
+ rows++;
+ bytes = blockBuilder->Bytes();
+
+ if (rows >= RowsLimit || bytes >= BytesLimit)
+ break;
+ }
+
+ // We don't want to do many restarts if pages weren't precharged
+ // So we just return whatever we read so far and the client can request more rows
+ if (iter->Last() == NTable::EReady::Page && rows < 1000 && bytes < 100000 && Restarts < 1) {
+ ++Restarts;
+ return false;
+ }
+
+ if (iter->Last() == NTable::EReady::Gone) {
+ shardFinished = true;
+ lastKeySerialized = tableInfo.Range.To.GetBuffer();
+ lastKeyInclusive = tableInfo.Range.ToInclusive;
+ }
+
+ TString buffer = blockBuilder->Finish();
+ buffer.resize(blockBuilder->Bytes());
+
+ Result->Record.SetBlocks(buffer);
+ Result->Record.SetLastKey(lastKeySerialized);
+ Result->Record.SetLastKeyInclusive(lastKeyInclusive);
Result->Record.SetEndOfShard(shardFinished);
- }
-
- Self->IncCounter(COUNTER_READ_COLUMNS_ROWS, rows);
- Self->IncCounter(COUNTER_READ_COLUMNS_BYTES, bytes);
-
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID()
- << " Read columns result for table [" << tableInfo.Path << "]: "
- << rows << " rows, " << bytes << " bytes (event size "
- << Result->Record.GetBlocks().size() << ") shardFinished: " << shardFinished);
-
- return true;
- }
-
+ }
+
+ Self->IncCounter(COUNTER_READ_COLUMNS_ROWS, rows);
+ Self->IncCounter(COUNTER_READ_COLUMNS_BYTES, bytes);
+
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID()
+ << " Read columns result for table [" << tableInfo.Path << "]: "
+ << rows << " rows, " << bytes << " bytes (event size "
+ << Result->Record.GetBlocks().size() << ") shardFinished: " << shardFinished);
+
+ return true;
+ }
+
void Complete(const TActorContext& ctx) override {
- if (Result) {
- ctx.Send(Ev->Sender, Result.Release());
- }
- }
-
-private:
- void SetError(ui32 status, TString descr) {
- Result->Record.SetStatus(status);
- Result->Record.SetErrorDescription(descr);
- }
-};
-
+ if (Result) {
+ ctx.Send(Ev->Sender, Result.Release());
+ }
+ }
+
+private:
+ void SetError(ui32 status, TString descr) {
+ Result->Record.SetStatus(status);
+ Result->Record.SetErrorDescription(descr);
+ }
+};
+
void TDataShard::Handle(TEvDataShard::TEvReadColumnsRequest::TPtr& ev, const TActorContext& ctx) {
- Executor()->Execute(new TTxReadColumns(this, ev), ctx);
-}
-
-}}
+ Executor()->Execute(new TTxReadColumns(this, ev), ctx);
+}
+
+}}
diff --git a/ydb/core/tx/datashard/datashard__s3.cpp b/ydb/core/tx/datashard/datashard__s3.cpp
index 6605a84d246..2f7a74bbab6 100644
--- a/ydb/core/tx/datashard/datashard__s3.cpp
+++ b/ydb/core/tx/datashard/datashard__s3.cpp
@@ -1,126 +1,126 @@
-#include "datashard_impl.h"
+#include "datashard_impl.h"
#include <util/string/vector.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NDataShard {
-
-using namespace NTabletFlatExecutor;
-
+
+using namespace NTabletFlatExecutor;
+
class TDataShard::TTxS3Listing : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvS3ListingRequest::TPtr Ev;
- TAutoPtr<TEvDataShard::TEvS3ListingResponse> Result;
-
- // Used to continue iteration from last known position instead of restarting from the beginning
- // This greatly improves performance for the cases with many deletion markers but sacrifices
- // consitency within the shard. This in not a big deal because listings are not consistent across shards.
- TString LastPath;
- TString LastCommonPath;
- ui32 RestartCount;
-
-public:
+private:
+ TEvDataShard::TEvS3ListingRequest::TPtr Ev;
+ TAutoPtr<TEvDataShard::TEvS3ListingResponse> Result;
+
+ // Used to continue iteration from last known position instead of restarting from the beginning
+ // This greatly improves performance for the cases with many deletion markers but sacrifices
+ // consitency within the shard. This in not a big deal because listings are not consistent across shards.
+ TString LastPath;
+ TString LastCommonPath;
+ ui32 RestartCount;
+
+public:
TTxS3Listing(TDataShard* ds, TEvDataShard::TEvS3ListingRequest::TPtr ev)
- : TBase(ds)
- , Ev(ev)
- , RestartCount(0)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_S3_LISTING; }
-
+ : TBase(ds)
+ , Ev(ev)
+ , RestartCount(0)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_S3_LISTING; }
+
bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- ++RestartCount;
-
- if (!Result) {
- Result = new TEvDataShard::TEvS3ListingResponse(Self->TabletID());
- }
-
- if (Self->State != TShardState::Ready &&
- Self->State != TShardState::Readonly &&
+ ++RestartCount;
+
+ if (!Result) {
+ Result = new TEvDataShard::TEvS3ListingResponse(Self->TabletID());
+ }
+
+ if (Self->State != TShardState::Ready &&
+ Self->State != TShardState::Readonly &&
Self->State != TShardState::SplitSrcWaitForNoTxInFlight &&
Self->State != TShardState::Frozen)
- {
- SetError(NKikimrTxDataShard::TError::WRONG_SHARD_STATE,
- Sprintf("Wrong shard state: %" PRIu32 " tablet id: %" PRIu64, Self->State, Self->TabletID()));
- return true;
- }
-
- const ui64 tableId = Ev->Get()->Record.GetTableId();
- const ui64 maxKeys = Ev->Get()->Record.GetMaxKeys();
-
+ {
+ SetError(NKikimrTxDataShard::TError::WRONG_SHARD_STATE,
+ Sprintf("Wrong shard state: %" PRIu32 " tablet id: %" PRIu64, Self->State, Self->TabletID()));
+ return true;
+ }
+
+ const ui64 tableId = Ev->Get()->Record.GetTableId();
+ const ui64 maxKeys = Ev->Get()->Record.GetMaxKeys();
+
if (!Self->TableInfos.contains(tableId)) {
- SetError(NKikimrTxDataShard::TError::SCHEME_ERROR, Sprintf("Unknown table id %" PRIu64, tableId));
- return true;
- }
-
+ SetError(NKikimrTxDataShard::TError::SCHEME_ERROR, Sprintf("Unknown table id %" PRIu64, tableId));
+ return true;
+ }
+
const TUserTable& tableInfo = *Self->TableInfos[tableId];
if (tableInfo.IsBackup) {
SetError(NKikimrTxDataShard::TError::SCHEME_ERROR, "Cannot read from a backup table");
return true;
}
- const ui32 localTableId = tableInfo.LocalTid;
-
+ const ui32 localTableId = tableInfo.LocalTid;
+
TVector<TRawTypeValue> key;
TVector<TRawTypeValue> endKey;
-
- // TODO: check prefix column count against key column count
- const TSerializedCellVec prefixColumns(Ev->Get()->Record.GetSerializedKeyPrefix());
- for (ui32 ki = 0; ki < prefixColumns.GetCells().size(); ++ki) {
- // TODO: check prefix column type
- key.emplace_back(prefixColumns.GetCells()[ki].Data(), prefixColumns.GetCells()[ki].Size(), tableInfo.KeyColumnTypes[ki]);
+
+ // TODO: check prefix column count against key column count
+ const TSerializedCellVec prefixColumns(Ev->Get()->Record.GetSerializedKeyPrefix());
+ for (ui32 ki = 0; ki < prefixColumns.GetCells().size(); ++ki) {
+ // TODO: check prefix column type
+ key.emplace_back(prefixColumns.GetCells()[ki].Data(), prefixColumns.GetCells()[ki].Size(), tableInfo.KeyColumnTypes[ki]);
endKey.emplace_back(prefixColumns.GetCells()[ki].Data(), prefixColumns.GetCells()[ki].Size(), tableInfo.KeyColumnTypes[ki]);
- }
- const ui32 pathColPos = prefixColumns.GetCells().size();
-
- // TODO: check path column is present in schema and has Utf8 type
- const TString pathPrefix = Ev->Get()->Record.GetPathColumnPrefix();
- const TString pathSeparator = Ev->Get()->Record.GetPathColumnDelimiter();
- TSerializedCellVec suffixColumns;
- TString startAfterPath;
- if (Ev->Get()->Record.GetSerializedStartAfterKeySuffix().empty()) {
- key.emplace_back(pathPrefix.data(), pathPrefix.size(), NScheme::NTypeIds::Utf8);
+ }
+ const ui32 pathColPos = prefixColumns.GetCells().size();
+
+ // TODO: check path column is present in schema and has Utf8 type
+ const TString pathPrefix = Ev->Get()->Record.GetPathColumnPrefix();
+ const TString pathSeparator = Ev->Get()->Record.GetPathColumnDelimiter();
+ TSerializedCellVec suffixColumns;
+ TString startAfterPath;
+ if (Ev->Get()->Record.GetSerializedStartAfterKeySuffix().empty()) {
+ key.emplace_back(pathPrefix.data(), pathPrefix.size(), NScheme::NTypeIds::Utf8);
key.resize(txc.DB.GetScheme().GetTableInfo(localTableId)->KeyColumns.size());
- } else {
- suffixColumns.Parse(Ev->Get()->Record.GetSerializedStartAfterKeySuffix());
- size_t prefixSize = prefixColumns.GetCells().size();
- for (size_t i = 0; i < suffixColumns.GetCells().size(); ++i) {
- size_t ki = prefixSize + i;
- key.emplace_back(suffixColumns.GetCells()[i].Data(), suffixColumns.GetCells()[i].Size(), tableInfo.KeyColumnTypes[ki]);
- }
- startAfterPath = TString(suffixColumns.GetCells()[0].Data(), suffixColumns.GetCells()[0].Size());
- }
-
- TString lastCommonPath; // we will skip a common prefix iff it has been already returned from the prevoius shard
- if (Ev->Get()->Record.HasLastCommonPrefix()) {
- TSerializedCellVec lastCommonPrefix(Ev->Get()->Record.GetLastCommonPrefix());
- if (lastCommonPrefix.GetCells().size() > 0) {
- lastCommonPath = TString(lastCommonPrefix.GetCells()[0].Data(), lastCommonPrefix.GetCells()[0].Size());
- }
- }
-
- // If this trasaction has restarted we want to continue from the last seen key
- if (LastPath) {
- const size_t pathColIdx = prefixColumns.GetCells().size();
- key.resize(pathColIdx);
- key.emplace_back(LastPath.data(), LastPath.size(), NScheme::NTypeIds::Utf8);
- key.resize(txc.DB.GetScheme().GetTableInfo(localTableId)->KeyColumns.size());
-
- lastCommonPath = LastCommonPath;
- } else {
- LastCommonPath = lastCommonPath;
- }
-
- const TString pathEndPrefix = NextPrefix(pathPrefix);
+ } else {
+ suffixColumns.Parse(Ev->Get()->Record.GetSerializedStartAfterKeySuffix());
+ size_t prefixSize = prefixColumns.GetCells().size();
+ for (size_t i = 0; i < suffixColumns.GetCells().size(); ++i) {
+ size_t ki = prefixSize + i;
+ key.emplace_back(suffixColumns.GetCells()[i].Data(), suffixColumns.GetCells()[i].Size(), tableInfo.KeyColumnTypes[ki]);
+ }
+ startAfterPath = TString(suffixColumns.GetCells()[0].Data(), suffixColumns.GetCells()[0].Size());
+ }
+
+ TString lastCommonPath; // we will skip a common prefix iff it has been already returned from the prevoius shard
+ if (Ev->Get()->Record.HasLastCommonPrefix()) {
+ TSerializedCellVec lastCommonPrefix(Ev->Get()->Record.GetLastCommonPrefix());
+ if (lastCommonPrefix.GetCells().size() > 0) {
+ lastCommonPath = TString(lastCommonPrefix.GetCells()[0].Data(), lastCommonPrefix.GetCells()[0].Size());
+ }
+ }
+
+ // If this trasaction has restarted we want to continue from the last seen key
+ if (LastPath) {
+ const size_t pathColIdx = prefixColumns.GetCells().size();
+ key.resize(pathColIdx);
+ key.emplace_back(LastPath.data(), LastPath.size(), NScheme::NTypeIds::Utf8);
+ key.resize(txc.DB.GetScheme().GetTableInfo(localTableId)->KeyColumns.size());
+
+ lastCommonPath = LastCommonPath;
+ } else {
+ LastCommonPath = lastCommonPath;
+ }
+
+ const TString pathEndPrefix = NextPrefix(pathPrefix);
if (pathEndPrefix) {
endKey.emplace_back(pathEndPrefix.data(), pathEndPrefix.size(), NScheme::NTypeIds::Utf8);
}
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " S3 Listing: start at key ("
- << JoinVectorIntoString(key, " ") << "), end at key (" << JoinVectorIntoString(endKey, " ") << ")"
- << " restarted: " << RestartCount-1 << " last path: \"" << LastPath << "\""
- << " contents: " << Result->Record.ContentsRowsSize()
- << " common prefixes: " << Result->Record.CommonPrefixesRowsSize());
-
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " S3 Listing: start at key ("
+ << JoinVectorIntoString(key, " ") << "), end at key (" << JoinVectorIntoString(endKey, " ") << ")"
+ << " restarted: " << RestartCount-1 << " last path: \"" << LastPath << "\""
+ << " contents: " << Result->Record.ContentsRowsSize()
+ << " common prefixes: " << Result->Record.CommonPrefixesRowsSize());
+
Result->Record.SetMoreRows(!IsKeyInRange(endKey, tableInfo));
if (!maxKeys) {
@@ -128,89 +128,89 @@ public:
return true;
}
- // Select path column and all user-requested columns
- const TVector<ui32> columnsToReturn(Ev->Get()->Record.GetColumnsToReturn().begin(), Ev->Get()->Record.GetColumnsToReturn().end());
-
+ // Select path column and all user-requested columns
+ const TVector<ui32> columnsToReturn(Ev->Get()->Record.GetColumnsToReturn().begin(), Ev->Get()->Record.GetColumnsToReturn().end());
+
NTable::TKeyRange keyRange;
keyRange.MinKey = key;
keyRange.MinInclusive = suffixColumns.GetCells().empty();
keyRange.MaxKey = endKey;
keyRange.MaxInclusive = false;
-
- if (LastPath) {
- // Don't include the last key in case of restart
- keyRange.MinInclusive = false;
- }
-
+
+ if (LastPath) {
+ // Don't include the last key in case of restart
+ keyRange.MinInclusive = false;
+ }
+
TAutoPtr<NTable::TTableIt> iter = txc.DB.IterateRange(localTableId, keyRange, columnsToReturn);
-
- ui64 foundKeys = Result->Record.ContentsRowsSize() + Result->Record.CommonPrefixesRowsSize();
- while (iter->Next(NTable::ENext::All) == NTable::EReady::Data) {
- TDbTupleRef currentKey = iter->GetKey();
-
- // Check all columns that prefix columns are in the current key are equal to the specified values
- Y_VERIFY(currentKey.Cells().size() > prefixColumns.GetCells().size());
+
+ ui64 foundKeys = Result->Record.ContentsRowsSize() + Result->Record.CommonPrefixesRowsSize();
+ while (iter->Next(NTable::ENext::All) == NTable::EReady::Data) {
+ TDbTupleRef currentKey = iter->GetKey();
+
+ // Check all columns that prefix columns are in the current key are equal to the specified values
+ Y_VERIFY(currentKey.Cells().size() > prefixColumns.GetCells().size());
Y_VERIFY_DEBUG(
0 == CompareTypedCellVectors(
- prefixColumns.GetCells().data(),
- currentKey.Cells().data(),
- currentKey.Types,
+ prefixColumns.GetCells().data(),
+ currentKey.Cells().data(),
+ currentKey.Types,
prefixColumns.GetCells().size()),
"Unexpected out of range key returned from iterator");
-
+
Y_VERIFY(currentKey.Types[pathColPos] == NScheme::NTypeIds::Utf8);
- const TCell& pathCell = currentKey.Cells()[pathColPos];
- TString path = TString((const char*)pathCell.Data(), pathCell.Size());
-
- LastPath = path;
-
- // Explicitly skip erased rows after saving LastPath. This allows to continue exactly from
- // this key in case of restart
+ const TCell& pathCell = currentKey.Cells()[pathColPos];
+ TString path = TString((const char*)pathCell.Data(), pathCell.Size());
+
+ LastPath = path;
+
+ // Explicitly skip erased rows after saving LastPath. This allows to continue exactly from
+ // this key in case of restart
if (iter->Row().GetRowState() == NTable::ERowOp::Erase) {
- continue;
- }
-
+ continue;
+ }
+
// Check that path begins with the specified prefix
Y_VERIFY_DEBUG(path.StartsWith(pathPrefix),
"Unexpected out of range key returned from iterator");
-
- bool isLeafPath = true;
- if (!pathSeparator.empty()) {
- size_t separatorPos = path.find_first_of(pathSeparator, pathPrefix.length());
- if (separatorPos != TString::npos) {
- path.resize(separatorPos + pathSeparator.length());
- isLeafPath = false;
- }
- }
-
- TDbTupleRef value = iter->GetValues();
- LOG_TRACE_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " S3 Listing: "
- "\"" << path << "\"" << (isLeafPath ? " -> " + DbgPrintTuple(value, *AppData(ctx)->TypeRegistry) : TString()));
-
- if (isLeafPath) {
- Y_VERIFY(value.Cells()[0].Size() >= 1);
- Y_VERIFY(path == TStringBuf((const char*)value.Cells()[0].Data(), value.Cells()[0].Size()),
- "Path column must be requested at pos 0");
-
- TString newContentsRow = TSerializedCellVec::Serialize(value.Cells());
-
- if (Result->Record.GetContentsRows().empty() ||
- *Result->Record.GetContentsRows().rbegin() != newContentsRow)
- {
- // Add a row with path column and all columns requested by user
- Result->Record.AddContentsRows(newContentsRow);
- if (++foundKeys >= maxKeys)
- break;
- }
- } else {
- // For prefix save a row with 1 column
- if (path > startAfterPath && path != lastCommonPath) {
- LastCommonPath = path;
- Result->Record.AddCommonPrefixesRows(TSerializedCellVec::Serialize({TCell(path.data(), path.size())}));
+
+ bool isLeafPath = true;
+ if (!pathSeparator.empty()) {
+ size_t separatorPos = path.find_first_of(pathSeparator, pathPrefix.length());
+ if (separatorPos != TString::npos) {
+ path.resize(separatorPos + pathSeparator.length());
+ isLeafPath = false;
+ }
+ }
+
+ TDbTupleRef value = iter->GetValues();
+ LOG_TRACE_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " S3 Listing: "
+ "\"" << path << "\"" << (isLeafPath ? " -> " + DbgPrintTuple(value, *AppData(ctx)->TypeRegistry) : TString()));
+
+ if (isLeafPath) {
+ Y_VERIFY(value.Cells()[0].Size() >= 1);
+ Y_VERIFY(path == TStringBuf((const char*)value.Cells()[0].Data(), value.Cells()[0].Size()),
+ "Path column must be requested at pos 0");
+
+ TString newContentsRow = TSerializedCellVec::Serialize(value.Cells());
+
+ if (Result->Record.GetContentsRows().empty() ||
+ *Result->Record.GetContentsRows().rbegin() != newContentsRow)
+ {
+ // Add a row with path column and all columns requested by user
+ Result->Record.AddContentsRows(newContentsRow);
+ if (++foundKeys >= maxKeys)
+ break;
+ }
+ } else {
+ // For prefix save a row with 1 column
+ if (path > startAfterPath && path != lastCommonPath) {
+ LastCommonPath = path;
+ Result->Record.AddCommonPrefixesRows(TSerializedCellVec::Serialize({TCell(path.data(), path.size())}));
if (++foundKeys >= maxKeys)
break;
- }
-
+ }
+
TString lookup = NextPrefix(path);
if (!lookup) {
// May only happen if path is equal to separator, which consists of only '\xff'
@@ -218,36 +218,36 @@ public:
// other path exists after the current prefix.
break;
}
-
+
// Skip to the next key after path+separator
- key.resize(prefixColumns.GetCells().size());
- key.emplace_back(lookup.data(), lookup.size(), NScheme::NTypeIds::Utf8);
+ key.resize(prefixColumns.GetCells().size());
+ key.emplace_back(lookup.data(), lookup.size(), NScheme::NTypeIds::Utf8);
if (!iter->SkipTo(key, /* inclusive = */ true)) {
return false;
}
- }
- }
-
+ }
+ }
+
return iter->Last() != NTable::EReady::Page;
- }
-
+ }
+
void Complete(const TActorContext& ctx) override {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " S3 Listing: finished "
- << " status: " << Result->Record.GetStatus()
- << " description: \"" << Result->Record.GetErrorDescription() << "\""
- << " contents: " << Result->Record.ContentsRowsSize()
- << " common prefixes: " << Result->Record.CommonPrefixesRowsSize());
- ctx.Send(Ev->Sender, Result.Release());
- }
-
-private:
- void SetError(ui32 status, TString descr) {
- Result = new TEvDataShard::TEvS3ListingResponse(Self->TabletID());
-
- Result->Record.SetStatus(status);
- Result->Record.SetErrorDescription(descr);
- }
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " S3 Listing: finished "
+ << " status: " << Result->Record.GetStatus()
+ << " description: \"" << Result->Record.GetErrorDescription() << "\""
+ << " contents: " << Result->Record.ContentsRowsSize()
+ << " common prefixes: " << Result->Record.CommonPrefixesRowsSize());
+ ctx.Send(Ev->Sender, Result.Release());
+ }
+
+private:
+ void SetError(ui32 status, TString descr) {
+ Result = new TEvDataShard::TEvS3ListingResponse(Self->TabletID());
+
+ Result->Record.SetStatus(status);
+ Result->Record.SetErrorDescription(descr);
+ }
static bool IsKeyInRange(TArrayRef<const TRawTypeValue> key, const TUserTable& tableInfo) {
if (!key) {
@@ -283,10 +283,10 @@ private:
return p;
}
-};
-
+};
+
void TDataShard::Handle(TEvDataShard::TEvS3ListingRequest::TPtr& ev, const TActorContext& ctx) {
- Executor()->Execute(new TTxS3Listing(this, ev), ctx);
-}
-
-}}
+ Executor()->Execute(new TTxS3Listing(this, ev), ctx);
+}
+
+}}
diff --git a/ydb/core/tx/datashard/datashard__schema_changed.cpp b/ydb/core/tx/datashard/datashard__schema_changed.cpp
index f9fe6fd000b..3d6b2a31cbc 100644
--- a/ydb/core/tx/datashard/datashard__schema_changed.cpp
+++ b/ydb/core/tx/datashard/datashard__schema_changed.cpp
@@ -13,14 +13,14 @@ public:
, TxId(0)
{}
- TTxType GetTxType() const override { return TXTYPE_SCHEMA_CHANGED; }
-
+ TTxType GetTxType() const override { return TXTYPE_SCHEMA_CHANGED; }
+
bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
TxId = Ev->Get()->Record.GetTxId();
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " Got TEvSchemaChangedResult from SS at "
- << Self->TabletID());
-
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " Got TEvSchemaChangedResult from SS at "
+ << Self->TabletID());
+
NIceDb::TNiceDb db(txc.DB);
Self->Pipeline.CompleteSchemaTx(db, TxId);
return true;
@@ -28,7 +28,7 @@ public:
void Complete(const TActorContext& ctx) override {
NTabletPipe::CloseAndForgetClient(Self->SelfId(), Self->SchemeShardPipe);
- Self->CheckStateChange(ctx);
+ Self->CheckStateChange(ctx);
}
private:
diff --git a/ydb/core/tx/datashard/datashard__stats.cpp b/ydb/core/tx/datashard/datashard__stats.cpp
index 88873fd09d7..129a14dca5f 100644
--- a/ydb/core/tx/datashard/datashard__stats.cpp
+++ b/ydb/core/tx/datashard/datashard__stats.cpp
@@ -1,96 +1,96 @@
-#include "datashard_impl.h"
+#include "datashard_impl.h"
#include <ydb/core/tablet_flat/flat_stat_table.h>
#include <ydb/core/tablet_flat/flat_dbase_sz_env.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NDataShard {
-
-
-class TAsyncTableStatsBuilder : public TActorBootstrapped<TAsyncTableStatsBuilder> {
-public:
+
+
+class TAsyncTableStatsBuilder : public TActorBootstrapped<TAsyncTableStatsBuilder> {
+public:
TAsyncTableStatsBuilder(TActorId replyTo, ui64 tableId, ui64 indexSize, const TAutoPtr<NTable::TSubset> subset,
ui64 memRowCount, ui64 memDataSize,
ui64 rowCountResolution, ui64 dataSizeResolution, ui64 searchHeight, TInstant statsUpdateTime)
- : ReplyTo(replyTo)
- , TableId(tableId)
- , IndexSize(indexSize)
- , StatsUpdateTime(statsUpdateTime)
- , Subset(subset)
+ : ReplyTo(replyTo)
+ , TableId(tableId)
+ , IndexSize(indexSize)
+ , StatsUpdateTime(statsUpdateTime)
+ , Subset(subset)
, MemRowCount(memRowCount)
, MemDataSize(memDataSize)
- , RowCountResolution(rowCountResolution)
- , DataSizeResolution(dataSizeResolution)
+ , RowCountResolution(rowCountResolution)
+ , DataSizeResolution(dataSizeResolution)
, SearchHeight(searchHeight)
- {}
-
+ {}
+
static constexpr auto ActorActivityType() {
return NKikimrServices::TActivity::DATASHARD_STATS_BUILDER;
- }
-
- void Bootstrap(const TActorContext& ctx) {
+ }
+
+ void Bootstrap(const TActorContext& ctx) {
THolder<TDataShard::TEvPrivate::TEvAsyncTableStats> ev = MakeHolder<TDataShard::TEvPrivate::TEvAsyncTableStats>();
- ev->TableId = TableId;
- ev->IndexSize = IndexSize;
- ev->StatsUpdateTime = StatsUpdateTime;
+ ev->TableId = TableId;
+ ev->IndexSize = IndexSize;
+ ev->StatsUpdateTime = StatsUpdateTime;
ev->PartCount = Subset->Flatten.size() + Subset->ColdParts.size();
ev->MemRowCount = MemRowCount;
ev->MemDataSize = MemDataSize;
ev->SearchHeight = SearchHeight;
-
+
NTable::GetPartOwners(*Subset, ev->PartOwners);
- NTable::TSizeEnv szEnv;
+ NTable::TSizeEnv szEnv;
Subset->ColdParts.clear(); // stats won't include cold parts, if any
- NTable::BuildStats(*Subset, ev->Stats, RowCountResolution, DataSizeResolution, &szEnv);
-
- ctx.Send(ReplyTo, ev.Release());
-
- return Die(ctx);
- }
-
-private:
+ NTable::BuildStats(*Subset, ev->Stats, RowCountResolution, DataSizeResolution, &szEnv);
+
+ ctx.Send(ReplyTo, ev.Release());
+
+ return Die(ctx);
+ }
+
+private:
TActorId ReplyTo;
ui64 TableId;
- ui64 IndexSize;
- TInstant StatsUpdateTime;
+ ui64 IndexSize;
+ TInstant StatsUpdateTime;
TAutoPtr<NTable::TSubset> Subset;
ui64 MemRowCount;
ui64 MemDataSize;
- ui64 RowCountResolution;
- ui64 DataSizeResolution;
+ ui64 RowCountResolution;
+ ui64 DataSizeResolution;
ui64 SearchHeight;
-};
-
-
+};
+
+
class TDataShard::TTxGetTableStats : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvGetTableStats::TPtr Ev;
- TAutoPtr<TEvDataShard::TEvGetTableStatsResult> Result;
-
-public:
+private:
+ TEvDataShard::TEvGetTableStats::TPtr Ev;
+ TAutoPtr<TEvDataShard::TEvGetTableStatsResult> Result;
+
+public:
TTxGetTableStats(TDataShard* ds, TEvDataShard::TEvGetTableStats::TPtr ev)
- : TBase(ds)
- , Ev(ev)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_GET_TABLE_STATS; }
-
+ : TBase(ds)
+ , Ev(ev)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_GET_TABLE_STATS; }
+
bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Y_UNUSED(ctx);
-
- ui64 tableId = Ev->Get()->Record.GetTableId();
-
+ Y_UNUSED(ctx);
+
+ ui64 tableId = Ev->Get()->Record.GetTableId();
+
Result = new TEvDataShard::TEvGetTableStatsResult(Self->TabletID(), Self->PathOwnerId, tableId);
-
+
if (!Self->TableInfos.contains(tableId))
- return true;
-
- if (Ev->Get()->Record.GetCollectKeySample()) {
- Self->EnableKeyAccessSampling(ctx, AppData(ctx)->TimeProvider->Now() + TDuration::Seconds(60));
- }
-
+ return true;
+
+ if (Ev->Get()->Record.GetCollectKeySample()) {
+ Self->EnableKeyAccessSampling(ctx, AppData(ctx)->TimeProvider->Now() + TDuration::Seconds(60));
+ }
+
const TUserTable& tableInfo = *Self->TableInfos[tableId];
-
+
auto indexSize = txc.DB.GetTableIndexSize(tableInfo.LocalTid);
auto memSize = txc.DB.GetTableMemSize(tableInfo.LocalTid);
@@ -101,148 +101,148 @@ public:
Result->Record.MutableTableStats()->SetIndexSize(indexSize);
Result->Record.MutableTableStats()->SetInMemSize(memSize);
- Result->Record.MutableTableStats()->SetLastAccessTime(tableInfo.Stats.AccessTime.MilliSeconds());
- Result->Record.MutableTableStats()->SetLastUpdateTime(tableInfo.Stats.UpdateTime.MilliSeconds());
-
- tableInfo.Stats.DataSizeResolution = Ev->Get()->Record.GetDataSizeResolution();
- tableInfo.Stats.RowCountResolution = Ev->Get()->Record.GetRowCountResolution();
-
- // Check if first stats update has been completed
- bool ready = (tableInfo.Stats.StatsUpdateTime != TInstant());
- Result->Record.SetFullStatsReady(ready);
- if (!ready)
- return true;
-
- const NTable::TStats& stats = tableInfo.Stats.DataStats;
- Result->Record.MutableTableStats()->SetDataSize(stats.DataSize);
- Result->Record.MutableTableStats()->SetRowCount(stats.RowCount);
- FillHistogram(stats.DataSizeHistogram, *Result->Record.MutableTableStats()->MutableDataSizeHistogram());
- FillHistogram(stats.RowCountHistogram, *Result->Record.MutableTableStats()->MutableRowCountHistogram());
- // Fill key access sample if it was collected not too long ago
- if (Self->StopKeyAccessSamplingAt + TDuration::Seconds(30) >= AppData(ctx)->TimeProvider->Now()) {
- FillKeyAccessSample(tableInfo.Stats.AccessStats, *Result->Record.MutableTableStats()->MutableKeyAccessSample());
- }
-
- Result->Record.MutableTableStats()->SetPartCount(tableInfo.Stats.PartCount);
+ Result->Record.MutableTableStats()->SetLastAccessTime(tableInfo.Stats.AccessTime.MilliSeconds());
+ Result->Record.MutableTableStats()->SetLastUpdateTime(tableInfo.Stats.UpdateTime.MilliSeconds());
+
+ tableInfo.Stats.DataSizeResolution = Ev->Get()->Record.GetDataSizeResolution();
+ tableInfo.Stats.RowCountResolution = Ev->Get()->Record.GetRowCountResolution();
+
+ // Check if first stats update has been completed
+ bool ready = (tableInfo.Stats.StatsUpdateTime != TInstant());
+ Result->Record.SetFullStatsReady(ready);
+ if (!ready)
+ return true;
+
+ const NTable::TStats& stats = tableInfo.Stats.DataStats;
+ Result->Record.MutableTableStats()->SetDataSize(stats.DataSize);
+ Result->Record.MutableTableStats()->SetRowCount(stats.RowCount);
+ FillHistogram(stats.DataSizeHistogram, *Result->Record.MutableTableStats()->MutableDataSizeHistogram());
+ FillHistogram(stats.RowCountHistogram, *Result->Record.MutableTableStats()->MutableRowCountHistogram());
+ // Fill key access sample if it was collected not too long ago
+ if (Self->StopKeyAccessSamplingAt + TDuration::Seconds(30) >= AppData(ctx)->TimeProvider->Now()) {
+ FillKeyAccessSample(tableInfo.Stats.AccessStats, *Result->Record.MutableTableStats()->MutableKeyAccessSample());
+ }
+
+ Result->Record.MutableTableStats()->SetPartCount(tableInfo.Stats.PartCount);
Result->Record.MutableTableStats()->SetSearchHeight(tableInfo.Stats.SearchHeight);
Result->Record.MutableTableStats()->SetLastFullCompactionTs(tableInfo.Stats.LastFullCompaction.Seconds());
-
- Result->Record.SetShardState(Self->State);
- for (const auto& pi : tableInfo.Stats.PartOwners) {
- Result->Record.AddUserTablePartOwners(pi);
- }
-
- for (const auto& pi : Self->SysTablesPartOnwers) {
- Result->Record.AddSysTablesPartOwners(pi);
- }
-
- return true;
- }
-
+
+ Result->Record.SetShardState(Self->State);
+ for (const auto& pi : tableInfo.Stats.PartOwners) {
+ Result->Record.AddUserTablePartOwners(pi);
+ }
+
+ for (const auto& pi : Self->SysTablesPartOnwers) {
+ Result->Record.AddSysTablesPartOwners(pi);
+ }
+
+ return true;
+ }
+
void Complete(const TActorContext& ctx) override {
- ctx.Send(Ev->Sender, Result.Release());
- }
-
-private:
- static void FillHistogram(const NTable::THistogram& h, NKikimrTableStats::THistogram& pb) {
- for (auto& b : h) {
- auto bucket = pb.AddBuckets();
- bucket->SetKey(b.EndKey);
- bucket->SetValue(b.Value);
- }
- }
-
- static void FillKeyAccessSample(const NTable::TKeyAccessSample& s, NKikimrTableStats::THistogram& pb) {
- for (const auto& k : s.GetSample()) {
- auto bucket = pb.AddBuckets();
- bucket->SetKey(k.first);
- bucket->SetValue(1);
- }
- }
-};
-
+ ctx.Send(Ev->Sender, Result.Release());
+ }
+
+private:
+ static void FillHistogram(const NTable::THistogram& h, NKikimrTableStats::THistogram& pb) {
+ for (auto& b : h) {
+ auto bucket = pb.AddBuckets();
+ bucket->SetKey(b.EndKey);
+ bucket->SetValue(b.Value);
+ }
+ }
+
+ static void FillKeyAccessSample(const NTable::TKeyAccessSample& s, NKikimrTableStats::THistogram& pb) {
+ for (const auto& k : s.GetSample()) {
+ auto bucket = pb.AddBuckets();
+ bucket->SetKey(k.first);
+ bucket->SetValue(1);
+ }
+ }
+};
+
void TDataShard::Handle(TEvDataShard::TEvGetTableStats::TPtr& ev, const TActorContext& ctx) {
- Executor()->Execute(new TTxGetTableStats(this, ev), ctx);
-}
-
-template <class TTables>
-void ListTableNames(const TTables& tables, TStringBuilder& names) {
- for (auto& t : tables) {
- if (!names.Empty()) {
- names << ", ";
- }
- names << "[" << t.second->Path << "]";
- }
-}
-
+ Executor()->Execute(new TTxGetTableStats(this, ev), ctx);
+}
+
+template <class TTables>
+void ListTableNames(const TTables& tables, TStringBuilder& names) {
+ for (auto& t : tables) {
+ if (!names.Empty()) {
+ names << ", ";
+ }
+ names << "[" << t.second->Path << "]";
+ }
+}
+
void TDataShard::Handle(TEvPrivate::TEvAsyncTableStats::TPtr& ev, const TActorContext& ctx) {
ui64 tableId = ev->Get()->TableId;
- LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Stats rebuilt at datashard %" PRIu64, TabletID());
-
- i64 dataSize = 0;
+ LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Stats rebuilt at datashard %" PRIu64, TabletID());
+
+ i64 dataSize = 0;
if (TableInfos.contains(tableId)) {
const TUserTable& tableInfo = *TableInfos[tableId];
-
- if (!tableInfo.StatsUpdateInProgress) {
- // How can this happen?
- LOG_ERROR(ctx, NKikimrServices::TX_DATASHARD,
- "Unexpected async stats update at datashard %" PRIu64, TabletID());
- }
- tableInfo.Stats.Update(std::move(ev->Get()->Stats), ev->Get()->IndexSize,
- std::move(ev->Get()->PartOwners), ev->Get()->PartCount,
- ev->Get()->StatsUpdateTime);
+
+ if (!tableInfo.StatsUpdateInProgress) {
+ // How can this happen?
+ LOG_ERROR(ctx, NKikimrServices::TX_DATASHARD,
+ "Unexpected async stats update at datashard %" PRIu64, TabletID());
+ }
+ tableInfo.Stats.Update(std::move(ev->Get()->Stats), ev->Get()->IndexSize,
+ std::move(ev->Get()->PartOwners), ev->Get()->PartCount,
+ ev->Get()->StatsUpdateTime);
tableInfo.Stats.MemRowCount = ev->Get()->MemRowCount;
tableInfo.Stats.MemDataSize = ev->Get()->MemDataSize;
- dataSize += tableInfo.Stats.DataStats.DataSize;
-
+ dataSize += tableInfo.Stats.DataStats.DataSize;
+
UpdateSearchHeightStats(tableInfo.Stats, ev->Get()->SearchHeight);
- tableInfo.StatsUpdateInProgress = false;
-
- SendPeriodicTableStats(ctx);
- }
-
- if (dataSize > HighDataSizeReportThreshlodBytes) {
- TInstant now = AppData(ctx)->TimeProvider->Now();
-
- if (LastDataSizeWarnTime + TDuration::Seconds(HighDataSizeReportIntervalSeconds) > now)
- return;
-
- LastDataSizeWarnTime = now;
-
- TStringBuilder names;
- ListTableNames(GetUserTables(), names);
-
- LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD, "Data size " << dataSize
- << " is higher than threshold of " << (i64)HighDataSizeReportThreshlodBytes
- << " at datashard: " << TabletID()
- << " table: " << names
- << " consider reconfiguring table partitioning settings");
- }
-}
-
-
+ tableInfo.StatsUpdateInProgress = false;
+
+ SendPeriodicTableStats(ctx);
+ }
+
+ if (dataSize > HighDataSizeReportThreshlodBytes) {
+ TInstant now = AppData(ctx)->TimeProvider->Now();
+
+ if (LastDataSizeWarnTime + TDuration::Seconds(HighDataSizeReportIntervalSeconds) > now)
+ return;
+
+ LastDataSizeWarnTime = now;
+
+ TStringBuilder names;
+ ListTableNames(GetUserTables(), names);
+
+ LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD, "Data size " << dataSize
+ << " is higher than threshold of " << (i64)HighDataSizeReportThreshlodBytes
+ << " at datashard: " << TabletID()
+ << " table: " << names
+ << " consider reconfiguring table partitioning settings");
+ }
+}
+
+
class TDataShard::TTxInitiateStatsUpdate : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvGetTableStats::TPtr Ev;
- TAutoPtr<TEvDataShard::TEvGetTableStatsResult> Result;
-
-public:
+private:
+ TEvDataShard::TEvGetTableStats::TPtr Ev;
+ TAutoPtr<TEvDataShard::TEvGetTableStatsResult> Result;
+
+public:
TTxInitiateStatsUpdate(TDataShard* ds)
- : TBase(ds)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_INITIATE_STATS_UPDATE; }
-
+ : TBase(ds)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_INITIATE_STATS_UPDATE; }
+
bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- if (Self->State != TShardState::Ready)
- return true;
-
- for (auto& ti : Self->TableInfos) {
+ if (Self->State != TShardState::Ready)
+ return true;
+
+ for (auto& ti : Self->TableInfos) {
const ui32 localTableId = ti.second->LocalTid;
const ui32 shadowTableId = ti.second->ShadowTid;
-
+
if (ti.second->StatsUpdateInProgress) {
// We don't want to update mem counters during updates, since
// it would result in value inconsistencies
@@ -257,46 +257,46 @@ public:
memDataSize += txc.DB.GetTableMemSize(shadowTableId);
searchHeight = 0;
}
-
+
Self->UpdateFullCompactionTsMetric(ti.second->Stats);
if (!ti.second->StatsNeedUpdate) {
ti.second->Stats.MemRowCount = memRowCount;
ti.second->Stats.MemDataSize = memDataSize;
Self->UpdateSearchHeightStats(ti.second->Stats, searchHeight);
- continue;
+ continue;
}
-
+
ui64 tableId = ti.first;
- ui64 rowCountResolution = gDbStatsRowCountResolution;
- ui64 dataSizeResolution = gDbStatsDataSizeResolution;
-
- const ui64 MaxBuckets = 500;
-
- if (ti.second->Stats.DataSizeResolution &&
- ti.second->Stats.DataStats.DataSize / ti.second->Stats.DataSizeResolution <= MaxBuckets)
- {
- dataSizeResolution = ti.second->Stats.DataSizeResolution;
- }
-
- if (ti.second->Stats.RowCountResolution &&
- ti.second->Stats.DataStats.RowCount / ti.second->Stats.RowCountResolution <= MaxBuckets)
- {
- rowCountResolution = ti.second->Stats.RowCountResolution;
- }
-
+ ui64 rowCountResolution = gDbStatsRowCountResolution;
+ ui64 dataSizeResolution = gDbStatsDataSizeResolution;
+
+ const ui64 MaxBuckets = 500;
+
+ if (ti.second->Stats.DataSizeResolution &&
+ ti.second->Stats.DataStats.DataSize / ti.second->Stats.DataSizeResolution <= MaxBuckets)
+ {
+ dataSizeResolution = ti.second->Stats.DataSizeResolution;
+ }
+
+ if (ti.second->Stats.RowCountResolution &&
+ ti.second->Stats.DataStats.RowCount / ti.second->Stats.RowCountResolution <= MaxBuckets)
+ {
+ rowCountResolution = ti.second->Stats.RowCountResolution;
+ }
+
ti.second->StatsUpdateInProgress = true;
ti.second->StatsNeedUpdate = false;
-
- ui64 indexSize = txc.DB.GetTableIndexSize(localTableId);
+
+ ui64 indexSize = txc.DB.GetTableIndexSize(localTableId);
if (shadowTableId) {
indexSize += txc.DB.GetTableIndexSize(shadowTableId);
}
-
+
TAutoPtr<NTable::TSubset> subsetForStats = txc.DB.Subset(localTableId, NTable::TEpoch::Max(), NTable::TRawVals(), NTable::TRawVals());
- // Remove memtables from the subset as we only want to look at indexes for parts
- subsetForStats->Frozen.clear();
-
+ // Remove memtables from the subset as we only want to look at indexes for parts
+ subsetForStats->Frozen.clear();
+
if (shadowTableId) {
// HACK: we combine subsets of different tables
// It's only safe to do as long as stats collector performs
@@ -313,54 +313,54 @@ public:
shadowSubset->ColdParts.end());
}
- auto* builder = new TAsyncTableStatsBuilder(ctx.SelfID,
- tableId,
- indexSize,
- subsetForStats,
+ auto* builder = new TAsyncTableStatsBuilder(ctx.SelfID,
+ tableId,
+ indexSize,
+ subsetForStats,
memRowCount,
memDataSize,
- rowCountResolution,
- dataSizeResolution,
+ rowCountResolution,
+ dataSizeResolution,
searchHeight,
- AppData(ctx)->TimeProvider->Now());
-
+ AppData(ctx)->TimeProvider->Now());
+
ctx.Register(builder, TMailboxType::HTSwap, AppData(ctx)->BatchPoolId);
- }
-
- Self->SysTablesPartOnwers.clear();
- for (ui32 sysTableId : Self->SysTablesToTransferAtSplit) {
- THashSet<ui64> sysPartOwners;
+ }
+
+ Self->SysTablesPartOnwers.clear();
+ for (ui32 sysTableId : Self->SysTablesToTransferAtSplit) {
+ THashSet<ui64> sysPartOwners;
auto subset = txc.DB.Subset(sysTableId, NTable::TEpoch::Max(), { }, { });
NTable::GetPartOwners(*subset, sysPartOwners);
- Self->SysTablesPartOnwers.insert(sysPartOwners.begin(), sysPartOwners.end());
- }
- return true;
- }
-
+ Self->SysTablesPartOnwers.insert(sysPartOwners.begin(), sysPartOwners.end());
+ }
+ return true;
+ }
+
void Complete(const TActorContext& ctx) override {
- Y_UNUSED(ctx);
- }
-};
-
+ Y_UNUSED(ctx);
+ }
+};
+
void TDataShard::UpdateTableStats(const TActorContext &ctx) {
- if (StatisticsDisabled)
- return;
-
- TInstant now = AppData(ctx)->TimeProvider->Now();
-
- if (LastDbStatsUpdateTime + gDbStatsReportInterval > now)
- return;
-
- if (State != TShardState::Ready)
- return;
-
- LastDbStatsUpdateTime = now;
-
- LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "UpdateTableStats at datashard %" PRIu64, TabletID());
-
- Executor()->Execute(new TTxInitiateStatsUpdate(this), ctx);
-}
-
+ if (StatisticsDisabled)
+ return;
+
+ TInstant now = AppData(ctx)->TimeProvider->Now();
+
+ if (LastDbStatsUpdateTime + gDbStatsReportInterval > now)
+ return;
+
+ if (State != TShardState::Ready)
+ return;
+
+ LastDbStatsUpdateTime = now;
+
+ LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "UpdateTableStats at datashard %" PRIu64, TabletID());
+
+ Executor()->Execute(new TTxInitiateStatsUpdate(this), ctx);
+}
+
void TDataShard::UpdateSearchHeightStats(TUserTable::TStats& stats, ui64 newSearchHeight) {
if (TabletCounters) {
if (stats.LastSearchHeightMetricSet)
@@ -375,7 +375,7 @@ void TDataShard::UpdateSearchHeightStats(TUserTable::TStats& stats, ui64 newSear
void TDataShard::UpdateFullCompactionTsMetric(TUserTable::TStats& stats) {
if (!TabletCounters)
return;
-
+
auto now = AppData()->TimeProvider->Now();
if (now < stats.LastFullCompaction) {
// extra sanity check
@@ -394,36 +394,36 @@ void TDataShard::UpdateFullCompactionTsMetric(TUserTable::TStats& stats) {
}
void TDataShard::CollectCpuUsage(const TActorContext &ctx) {
- auto* metrics = Executor()->GetResourceMetrics();
- TInstant now = AppData(ctx)->TimeProvider->Now();
-
- // advance CPU usage collector to the current time and report very-very small usage
- metrics->CPU.Increment(10, now);
- metrics->TryUpdate(ctx);
-
- if (!metrics->CPU.IsValueReady()) {
- return;
- }
-
- ui64 cpuUsec = metrics->CPU.GetValue();
- float cpuPercent = cpuUsec / 10000.0;
-
- if (cpuPercent > CpuUsageReportThreshlodPercent) {
- if (LastCpuWarnTime + TDuration::Seconds(CpuUsageReportIntervalSeconds) > now)
- return;
-
- LastCpuWarnTime = now;
-
- TStringBuilder names;
- ListTableNames(GetUserTables(), names);
-
- LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD, "CPU usage " << cpuPercent
- << "% is higher than threshold of " << (i64)CpuUsageReportThreshlodPercent
- << "% in-flight Tx: " << TxInFly()
- << " immediate Tx: " << ImmediateInFly()
- << " at datashard: " << TabletID()
- << " table: " << names);
- }
-}
-
-}}
+ auto* metrics = Executor()->GetResourceMetrics();
+ TInstant now = AppData(ctx)->TimeProvider->Now();
+
+ // advance CPU usage collector to the current time and report very-very small usage
+ metrics->CPU.Increment(10, now);
+ metrics->TryUpdate(ctx);
+
+ if (!metrics->CPU.IsValueReady()) {
+ return;
+ }
+
+ ui64 cpuUsec = metrics->CPU.GetValue();
+ float cpuPercent = cpuUsec / 10000.0;
+
+ if (cpuPercent > CpuUsageReportThreshlodPercent) {
+ if (LastCpuWarnTime + TDuration::Seconds(CpuUsageReportIntervalSeconds) > now)
+ return;
+
+ LastCpuWarnTime = now;
+
+ TStringBuilder names;
+ ListTableNames(GetUserTables(), names);
+
+ LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD, "CPU usage " << cpuPercent
+ << "% is higher than threshold of " << (i64)CpuUsageReportThreshlodPercent
+ << "% in-flight Tx: " << TxInFly()
+ << " immediate Tx: " << ImmediateInFly()
+ << " at datashard: " << TabletID()
+ << " table: " << names);
+ }
+}
+
+}}
diff --git a/ydb/core/tx/datashard/datashard_active_transaction.cpp b/ydb/core/tx/datashard/datashard_active_transaction.cpp
index 01d86d0749d..0226bd42e25 100644
--- a/ydb/core/tx/datashard/datashard_active_transaction.cpp
+++ b/ydb/core/tx/datashard/datashard_active_transaction.cpp
@@ -4,7 +4,7 @@
#include "datashard_kqp.h"
#include "datashard_locks.h"
#include "datashard_impl.h"
-#include "datashard_failpoints.h"
+#include "datashard_failpoints.h"
#include "key_conflicts.h"
#include <library/cpp/actors/core/memory_track.h>
@@ -27,9 +27,9 @@ TValidatedDataTx::TValidatedDataTx(TDataShard *self,
, TxCacheUsage(0)
, IsReleased(false)
, IsReadOnly(true)
- , AllowCancelROwithReadsets(self->AllowCancelROwithReadsets())
- , Cancelled(false)
- , ReceivedAt_(receivedAt)
+ , AllowCancelROwithReadsets(self->AllowCancelROwithReadsets())
+ , Cancelled(false)
+ , ReceivedAt_(receivedAt)
{
bool success = Tx.ParseFromArray(TxBody.data(), TxBody.size());
if (!success) {
@@ -192,7 +192,7 @@ TValidatedDataTx::TValidatedDataTx(TDataShard *self,
ErrCode = ConvertErrCode(result);
}
- ComputeDeadline();
+ ComputeDeadline();
}
TValidatedDataTx::~TValidatedDataTx() {
@@ -268,16 +268,16 @@ ETxOrder TValidatedDataTx::CheckOrder(const TSysLocks& sysLocks, const TValidate
}
bool TValidatedDataTx::CanCancel() {
- if (!IsTxReadOnly()) {
- return false;
- }
-
- if (!AllowCancelROwithReadsets) {
- if (HasOutReadsets() || HasInReadsets()) {
- return false;
- }
- }
-
+ if (!IsTxReadOnly()) {
+ return false;
+ }
+
+ if (!AllowCancelROwithReadsets) {
+ if (HasOutReadsets() || HasInReadsets()) {
+ return false;
+ }
+ }
+
return true;
}
@@ -290,18 +290,18 @@ bool TValidatedDataTx::CheckCancelled() {
return false;
}
- TInstant now = AppData()->TimeProvider->Now();
- Cancelled = (now >= Deadline());
-
- Cancelled = Cancelled || gCancelTxFailPoint.Check(TabletId(), TxId());
-
- if (Cancelled) {
- LOG_NOTICE_S(*TlsActivationContext->ExecutorThread.ActorSystem, NKikimrServices::TX_DATASHARD,
- "CANCELLED TxId " << TxId() << " at " << TabletId());
- }
- return Cancelled;
-}
-
+ TInstant now = AppData()->TimeProvider->Now();
+ Cancelled = (now >= Deadline());
+
+ Cancelled = Cancelled || gCancelTxFailPoint.Check(TabletId(), TxId());
+
+ if (Cancelled) {
+ LOG_NOTICE_S(*TlsActivationContext->ExecutorThread.ActorSystem, NKikimrServices::TX_DATASHARD,
+ "CANCELLED TxId " << TxId() << " at " << TabletId());
+ }
+ return Cancelled;
+}
+
void TValidatedDataTx::ReleaseTxData() {
TxBody = "";
auto lock = Tx.GetLockTxId();
@@ -321,14 +321,14 @@ void TValidatedDataTx::ComputeTxSize() {
TxSize += Tx.ByteSize();
}
-void TValidatedDataTx::ComputeDeadline() {
- Deadline_ = Tx.GetCancelDeadlineMs() ? TInstant::MilliSeconds(Tx.GetCancelDeadlineMs()) : TInstant::Max();
- if (ReceivedAt_ && Tx.GetCancelAfterMs()) {
- // If local timeout is specified in CancelAfterMs then take it into account as well
- Deadline_ = Min(Deadline_, ReceivedAt_ + TDuration::MilliSeconds(Tx.GetCancelAfterMs()));
- }
-}
-
+void TValidatedDataTx::ComputeDeadline() {
+ Deadline_ = Tx.GetCancelDeadlineMs() ? TInstant::MilliSeconds(Tx.GetCancelDeadlineMs()) : TInstant::Max();
+ if (ReceivedAt_ && Tx.GetCancelAfterMs()) {
+ // If local timeout is specified in CancelAfterMs then take it into account as well
+ Deadline_ = Min(Deadline_, ReceivedAt_ + TDuration::MilliSeconds(Tx.GetCancelAfterMs()));
+ }
+}
+
//
TActiveTransaction::TActiveTransaction(const TBasicOpInfo &op,
diff --git a/ydb/core/tx/datashard/datashard_active_transaction.h b/ydb/core/tx/datashard/datashard_active_transaction.h
index 5551c8dcda5..43abd525e15 100644
--- a/ydb/core/tx/datashard/datashard_active_transaction.h
+++ b/ydb/core/tx/datashard/datashard_active_transaction.h
@@ -139,9 +139,9 @@ public:
bool Immediate() const { return Tx.GetImmediate(); }
bool ReadOnly() const { return Tx.GetReadOnly(); }
bool NeedDiagnostics() const { return Tx.GetNeedDiagnostics(); }
- bool CollectStats() const { return Tx.GetCollectStats(); }
- TInstant ReceivedAt() const { return ReceivedAt_; }
- TInstant Deadline() const { return Deadline_; }
+ bool CollectStats() const { return Tx.GetCollectStats(); }
+ TInstant ReceivedAt() const { return ReceivedAt_; }
+ TInstant Deadline() const { return Deadline_; }
TMaybe<ui64> PerShardKeysSizeLimitBytes() const { return PerShardKeysSizeLimitBytes_; }
bool Ready() const { return ErrCode == NKikimrTxDataShard::TError::OK; }
@@ -167,11 +167,11 @@ public:
NMiniKQL::IEngineFlat *GetEngine() { return EngineBay.GetEngine(); }
void DestroyEngine() { EngineBay.DestroyEngine(); }
- const NMiniKQL::TEngineHostCounters& GetCounters() { return EngineBay.GetCounters(); }
+ const NMiniKQL::TEngineHostCounters& GetCounters() { return EngineBay.GetCounters(); }
void ResetCounters() { EngineBay.ResetCounters(); }
bool CanCancel();
- bool CheckCancelled();
+ bool CheckCancelled();
void SetWriteVersion(TRowVersion writeVersion) { EngineBay.SetWriteVersion(writeVersion); }
void SetReadVersion(TRowVersion readVersion) { EngineBay.SetReadVersion(readVersion); }
@@ -221,9 +221,9 @@ public:
bool IsTxReadOnly() const { return IsReadOnly; }
- bool HasOutReadsets() const { return TxInfo().HasOutReadsets; }
- bool HasInReadsets() const { return TxInfo().HasInReadsets; }
-
+ bool HasOutReadsets() const { return TxInfo().HasOutReadsets; }
+ bool HasInReadsets() const { return TxInfo().HasInReadsets; }
+
const NMiniKQL::IEngineFlat::TValidationInfo& TxInfo() const { return EngineBay.TxInfo(); }
private:
@@ -240,13 +240,13 @@ private:
bool IsReleased;
TMaybe<ui64> PerShardKeysSizeLimitBytes_;
bool IsReadOnly;
- bool AllowCancelROwithReadsets;
- bool Cancelled;
- const TInstant ReceivedAt_; // For local timeout tracking
- TInstant Deadline_;
+ bool AllowCancelROwithReadsets;
+ bool Cancelled;
+ const TInstant ReceivedAt_; // For local timeout tracking
+ TInstant Deadline_;
void ComputeTxSize();
- void ComputeDeadline();
+ void ComputeDeadline();
};
enum class ERestoreDataStatus {
diff --git a/ydb/core/tx/datashard/datashard_common_upload.cpp b/ydb/core/tx/datashard/datashard_common_upload.cpp
index f705bcecc04..088da2b5213 100644
--- a/ydb/core/tx/datashard/datashard_common_upload.cpp
+++ b/ydb/core/tx/datashard/datashard_common_upload.cpp
@@ -21,11 +21,11 @@ bool TCommonUploadOps<TEvRequest, TEvResponse>::Execute(TDataShard* self, TTrans
Result = MakeHolder<TEvResponse>(self->TabletID());
TInstant deadline = TInstant::MilliSeconds(record.GetCancelDeadlineMs());
- if (deadline && deadline < AppData()->TimeProvider->Now()) {
- SetError(NKikimrTxDataShard::TError::EXECUTION_CANCELLED, "Deadline exceeded");
+ if (deadline && deadline < AppData()->TimeProvider->Now()) {
+ SetError(NKikimrTxDataShard::TError::EXECUTION_CANCELLED, "Deadline exceeded");
return true;
- }
-
+ }
+
const ui64 tableId = record.GetTableId();
const TTableId fullTableId(self->GetPathOwnerId(), tableId);
const ui64 localTableId = self->GetLocalTableId(fullTableId);
@@ -101,32 +101,32 @@ bool TCommonUploadOps<TEvRequest, TEvResponse>::Execute(TDataShard* self, TTrans
if (keyCells.GetCells().size() != tableInfo.KeyColumnTypes.size() ||
valueCells.GetCells().size() != valueCols.size())
{
- SetError(NKikimrTxDataShard::TError::SCHEME_ERROR, "Cell count doesn't match row scheme");
+ SetError(NKikimrTxDataShard::TError::SCHEME_ERROR, "Cell count doesn't match row scheme");
return true;
}
key.clear();
size_t ki = 0;
- ui64 keyBytes = 0;
+ ui64 keyBytes = 0;
for (const auto& kt : tableInfo.KeyColumnTypes) {
- const TCell& c = keyCells.GetCells()[ki];
- if (kt == NScheme::NTypeIds::Uint8 && !c.IsNull() && c.AsValue<ui8>() > 127) {
- SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT, "Keys with Uint8 column values >127 are currently prohibited");
+ const TCell& c = keyCells.GetCells()[ki];
+ if (kt == NScheme::NTypeIds::Uint8 && !c.IsNull() && c.AsValue<ui8>() > 127) {
+ SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT, "Keys with Uint8 column values >127 are currently prohibited");
return true;
- }
-
- keyBytes += c.Size();
- key.emplace_back(TRawTypeValue(c.AsRef(), kt));
+ }
+
+ keyBytes += c.Size();
+ key.emplace_back(TRawTypeValue(c.AsRef(), kt));
++ki;
}
- if (keyBytes > NLimits::MaxWriteKeySize) {
- SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT,
- Sprintf("Row key size of %" PRISZT " bytes is larger than the allowed threshold %" PRIu64,
- keyBytes, NLimits::MaxWriteKeySize));
+ if (keyBytes > NLimits::MaxWriteKeySize) {
+ SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT,
+ Sprintf("Row key size of %" PRISZT " bytes is larger than the allowed threshold %" PRIu64,
+ keyBytes, NLimits::MaxWriteKeySize));
return true;
- }
-
+ }
+
if (readForTableShadow) {
rowState.Init(tagsForSelect.size());
@@ -148,13 +148,13 @@ bool TCommonUploadOps<TEvRequest, TEvResponse>::Execute(TDataShard* self, TTrans
value.clear();
size_t vi = 0;
for (const auto& vt : valueCols) {
- if (valueCells.GetCells()[vi].Size() > NLimits::MaxWriteValueSize) {
- SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT,
- Sprintf("Row cell size of %" PRISZT " bytes is larger than the allowed threshold %" PRIu64,
- valueCells.GetBuffer().Size(), NLimits::MaxWriteValueSize));
+ if (valueCells.GetCells()[vi].Size() > NLimits::MaxWriteValueSize) {
+ SetError(NKikimrTxDataShard::TError::BAD_ARGUMENT,
+ Sprintf("Row cell size of %" PRISZT " bytes is larger than the allowed threshold %" PRIu64,
+ valueCells.GetBuffer().Size(), NLimits::MaxWriteValueSize));
return true;
- }
-
+ }
+
bool allowUpdate = true;
if (readForTableShadow && rowState == NTable::ERowOp::Upsert && rowState.GetOp(vi) != NTable::ECellOp::Empty) {
// We don't want to overwrite columns that already has some value
@@ -212,11 +212,11 @@ template <typename TEvRequest, typename TEvResponse>
void TCommonUploadOps<TEvRequest, TEvResponse>::SendResult(TDataShard* self, const TActorContext& ctx) {
Y_VERIFY(Result);
- if (Result->Record.GetStatus() == NKikimrTxDataShard::TError::OK) {
- self->IncCounter(COUNTER_BULK_UPSERT_SUCCESS);
- } else {
- self->IncCounter(COUNTER_BULK_UPSERT_ERROR);
- }
+ if (Result->Record.GetStatus() == NKikimrTxDataShard::TError::OK) {
+ self->IncCounter(COUNTER_BULK_UPSERT_SUCCESS);
+ } else {
+ self->IncCounter(COUNTER_BULK_UPSERT_ERROR);
+ }
ctx.Send(Ev->Sender, std::move(Result));
}
diff --git a/ydb/core/tx/datashard/datashard_failpoints.cpp b/ydb/core/tx/datashard/datashard_failpoints.cpp
index 580845f5677..a8f785dab51 100644
--- a/ydb/core/tx/datashard/datashard_failpoints.cpp
+++ b/ydb/core/tx/datashard/datashard_failpoints.cpp
@@ -1,9 +1,9 @@
-#include "datashard_failpoints.h"
-
-namespace NKikimr {
+#include "datashard_failpoints.h"
+
+namespace NKikimr {
namespace NDataShard {
-
-TCancelTxFailPoint gCancelTxFailPoint;
+
+TCancelTxFailPoint gCancelTxFailPoint;
TSkipRepliesFailPoint gSkipRepliesFailPoint;
-
-}}
+
+}}
diff --git a/ydb/core/tx/datashard/datashard_failpoints.h b/ydb/core/tx/datashard/datashard_failpoints.h
index a0bd847cf18..f6b8cac58bd 100644
--- a/ydb/core/tx/datashard/datashard_failpoints.h
+++ b/ydb/core/tx/datashard/datashard_failpoints.h
@@ -1,71 +1,71 @@
-#pragma once
-
+#pragma once
+
#include <ydb/core/base/defs.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NDataShard {
-
-// Allows to Cancel transaction by TabletID and TxId
-struct TCancelTxFailPoint {
- TAtomic Enabled;
- TSpinLock Lock;
- ui64 TabletId;
- ui64 TxId;
- ui64 FailAtCount;
- ui64 CurrentCount;
- bool Hit;
-
- TCancelTxFailPoint() {
- Disable();
- }
-
- void Enable(ui64 tabletId, ui64 txId, ui64 count) {
- Disable();
-
- TGuard<TSpinLock> g(Lock);
- TabletId = tabletId;
- TxId = txId;
- FailAtCount = count;
- CurrentCount = 0;
- Hit = false;
- AtomicSet(Enabled, 1);
- }
-
- void Disable() {
- TGuard<TSpinLock> g(Lock);
- AtomicSet(Enabled, 0);
- TabletId = 0;
- TxId = 0;
- FailAtCount = -1;
- CurrentCount = 0;
- Hit = false;
- }
-
- bool Check(ui64 tabletId, ui64 txId) {
- if (!AtomicGet(Enabled))
- return false;
-
- TGuard<TSpinLock> g(Lock);
-
- ui64 failTabletId = AtomicGet(TabletId);
- ui64 failTxId = AtomicGet(TxId);
- i64 failCount = AtomicGet(FailAtCount);
-
- if ((tabletId != failTabletId && failTabletId != (ui64)-1) ||
- (txId != failTxId && failTxId != (ui64)-1)) {
- return false;
- }
-
- i64 prevCount = CurrentCount++;
- if (prevCount == failCount) {
- Hit = true;
- return true;
- }
-
- return false;
- }
-};
-
+
+// Allows to Cancel transaction by TabletID and TxId
+struct TCancelTxFailPoint {
+ TAtomic Enabled;
+ TSpinLock Lock;
+ ui64 TabletId;
+ ui64 TxId;
+ ui64 FailAtCount;
+ ui64 CurrentCount;
+ bool Hit;
+
+ TCancelTxFailPoint() {
+ Disable();
+ }
+
+ void Enable(ui64 tabletId, ui64 txId, ui64 count) {
+ Disable();
+
+ TGuard<TSpinLock> g(Lock);
+ TabletId = tabletId;
+ TxId = txId;
+ FailAtCount = count;
+ CurrentCount = 0;
+ Hit = false;
+ AtomicSet(Enabled, 1);
+ }
+
+ void Disable() {
+ TGuard<TSpinLock> g(Lock);
+ AtomicSet(Enabled, 0);
+ TabletId = 0;
+ TxId = 0;
+ FailAtCount = -1;
+ CurrentCount = 0;
+ Hit = false;
+ }
+
+ bool Check(ui64 tabletId, ui64 txId) {
+ if (!AtomicGet(Enabled))
+ return false;
+
+ TGuard<TSpinLock> g(Lock);
+
+ ui64 failTabletId = AtomicGet(TabletId);
+ ui64 failTxId = AtomicGet(TxId);
+ i64 failCount = AtomicGet(FailAtCount);
+
+ if ((tabletId != failTabletId && failTabletId != (ui64)-1) ||
+ (txId != failTxId && failTxId != (ui64)-1)) {
+ return false;
+ }
+
+ i64 prevCount = CurrentCount++;
+ if (prevCount == failCount) {
+ Hit = true;
+ return true;
+ }
+
+ return false;
+ }
+};
+
// Allows to skip specified number of replies from datashard by TabletID and TxId
struct TSkipRepliesFailPoint {
TAtomic Enabled;
@@ -119,7 +119,7 @@ struct TSkipRepliesFailPoint {
}
};
-extern TCancelTxFailPoint gCancelTxFailPoint;
+extern TCancelTxFailPoint gCancelTxFailPoint;
extern TSkipRepliesFailPoint gSkipRepliesFailPoint;
-
-}}
+
+}}
diff --git a/ydb/core/tx/datashard/datashard_impl.h b/ydb/core/tx/datashard/datashard_impl.h
index 2faeda0af69..11313474f80 100644
--- a/ydb/core/tx/datashard/datashard_impl.h
+++ b/ydb/core/tx/datashard/datashard_impl.h
@@ -57,58 +57,58 @@ using NTabletFlatExecutor::TScanOptions;
// For CopyTable and MoveShadow
class TTxTableSnapshotContext : public NTabletFlatExecutor::TTableSnapshotContext {
-public:
+public:
TTxTableSnapshotContext(ui64 step, ui64 txId, TVector<ui32>&& tables)
- : StepOrder(step, txId)
- , Tables(tables)
- {}
-
- const TStepOrder& GetStepOrder() const {
- return StepOrder;
- }
-
- virtual TConstArrayRef<ui32> TablesToSnapshot() const override {
- return Tables;
- }
-
-private:
- TStepOrder StepOrder;
+ : StepOrder(step, txId)
+ , Tables(tables)
+ {}
+
+ const TStepOrder& GetStepOrder() const {
+ return StepOrder;
+ }
+
+ virtual TConstArrayRef<ui32> TablesToSnapshot() const override {
+ return Tables;
+ }
+
+private:
+ TStepOrder StepOrder;
TVector<ui32> Tables;
-};
-
-// For Split
-class TSplitSnapshotContext : public NTabletFlatExecutor::TTableSnapshotContext {
-public:
+};
+
+// For Split
+class TSplitSnapshotContext : public NTabletFlatExecutor::TTableSnapshotContext {
+public:
TSplitSnapshotContext(ui64 txId, TVector<ui32> &&tables,
TRowVersion completeEdge = TRowVersion::Min(),
TRowVersion incompleteEdge = TRowVersion::Min(),
TRowVersion lowWatermark = TRowVersion::Min())
- : TxId(txId)
+ : TxId(txId)
, CompleteEdge(completeEdge)
, IncompleteEdge(incompleteEdge)
, LowWatermark(lowWatermark)
- , Tables(tables)
- {}
-
- virtual TConstArrayRef<ui32> TablesToSnapshot() const override {
- return Tables;
- }
-
- ui64 TxId;
+ , Tables(tables)
+ {}
+
+ virtual TConstArrayRef<ui32> TablesToSnapshot() const override {
+ return Tables;
+ }
+
+ ui64 TxId;
TRowVersion CompleteEdge;
TRowVersion IncompleteEdge;
TRowVersion LowWatermark;
-
-private:
+
+private:
TVector<ui32> Tables;
-};
-
-// Base class for non-Transactional scans of DataShard data
-class INoTxScan : public NTable::IScan {
-public:
+};
+
+// Base class for non-Transactional scans of DataShard data
+class INoTxScan : public NTable::IScan {
+public:
virtual void OnFinished(TDataShard* self) = 0;
-};
-
+};
+
struct TReadWriteVersions {
TReadWriteVersions(const TRowVersion& readVersion, const TRowVersion& writeVersion)
: ReadVersion(readVersion)
@@ -148,38 +148,38 @@ class TDataShard
class TTxProgressTransaction;
class TTxCleanupTransaction;
class TTxProposeDataTransaction;
- class TTxProposeSchemeTransaction;
- class TTxCancelTransactionProposal;
+ class TTxProposeSchemeTransaction;
+ class TTxCancelTransactionProposal;
class TTxProposeTransactionBase;
class TTxReadSet;
class TTxSchemaChanged;
- class TTxInitiateBorrowedPartsReturn;
- class TTxReturnBorrowedPart;
- class TTxReturnBorrowedPartAck;
- class TTxInitSplitMergeDestination;
- class TTxSplit;
- class TTxStartSplit;
- class TTxSplitSnapshotComplete;
+ class TTxInitiateBorrowedPartsReturn;
+ class TTxReturnBorrowedPart;
+ class TTxReturnBorrowedPartAck;
+ class TTxInitSplitMergeDestination;
+ class TTxSplit;
+ class TTxStartSplit;
+ class TTxSplitSnapshotComplete;
class TTxSplitReplicationSourceOffsets;
- class TTxSplitTransferSnapshot;
- class TTxSplitTransferSnapshotAck;
- class TTxSplitPartitioningChanged;
+ class TTxSplitTransferSnapshot;
+ class TTxSplitTransferSnapshotAck;
+ class TTxSplitPartitioningChanged;
class TTxStoreTablePath;
- class TTxGoOffline;
- class TTxGetTableStats;
- class TTxMonitoring;
+ class TTxGoOffline;
+ class TTxGetTableStats;
+ class TTxMonitoring;
class TTxMonitoringCleanupBorrowedParts;
class TTxMonitoringCleanupBorrowedPartsActor;
class TTxMonitoringResetSchemaVersion;
class TTxUndelivered;
- class TTxS3Listing;
+ class TTxS3Listing;
class TTxInterruptTransaction;
- class TTxInitiateStatsUpdate;
+ class TTxInitiateStatsUpdate;
class TTxCheckInReadSets;
class TTxRemoveOldInReadSets;
class TTxRead;
class TTxReadContinue;
- class TTxReadColumns;
+ class TTxReadColumns;
class TTxGetInfo;
class TTxListOperations;
class TTxGetOperation;
@@ -223,12 +223,12 @@ class TDataShard
friend class TDataShardMiniKQLFactory;
friend class TDataTransactionProcessor;
- friend class TSchemeTransactionProcessor;
+ friend class TSchemeTransactionProcessor;
friend class TScanTransactionProcessor;
friend class TDataShardEngineHost;
- friend class TTxS3Listing;
+ friend class TTxS3Listing;
friend class TExecuteKqpScanTxUnit;
- friend class TTableScan;
+ friend class TTableScan;
friend class TKqpScan;
friend class TTransQueue;
@@ -237,21 +237,21 @@ class TDataShard
friend class TLocksDataShardAdapter<TDataShard>;
friend class TActiveTransaction;
friend class TValidatedDataTx;
- friend class TEngineBay;
+ friend class TEngineBay;
friend class NMiniKQL::TKqpScanComputeContext;
friend class TSnapshotManager;
friend class TSchemaSnapshotManager;
friend class TReplicationSourceOffsetsClient;
friend class TReplicationSourceOffsetsServer;
- friend class TAsyncTableStatsBuilder;
+ friend class TAsyncTableStatsBuilder;
friend class TReadTableScan;
friend class TWaitForStreamClearanceUnit;
friend class TBuildIndexScan;
- friend class TReadColumnsScan;
+ friend class TReadColumnsScan;
friend class TCondEraseScan;
- friend class TDatashardKeySampler;
-
+ friend class TDatashardKeySampler;
+
friend class TS3UploadsManager;
friend class TS3DownloadsManager;
friend class TS3Downloader;
@@ -283,12 +283,12 @@ class TDataShard
EvFlushOperationCounters,
EvDelayedFlushOperationCounters,
EvProgressOperationHistogramScan,
- EvPeriodicWakeup,
- EvAsyncTableStats,
+ EvPeriodicWakeup,
+ EvAsyncTableStats,
EvRemoveOldInReadSets, // WARNING: tests use ES_PRIVATE + 9
EvRegisterScanActor,
EvNodeDisconnected,
- EvScanStats,
+ EvScanStats,
EvPersistScanState,
EvPersistScanStateAck,
EvConditionalEraseRowsRegistered,
@@ -318,20 +318,20 @@ class TDataShard
struct TEvDelayedFlushOperationCounters : public TEventLocal<TEvDelayedFlushOperationCounters, EvDelayedFlushOperationCounters> {};
struct TEvProgressOperationHistogramScan : public TEventLocal<TEvProgressOperationHistogramScan, EvProgressOperationHistogramScan> {};
-
- struct TEvPeriodicWakeup : public TEventLocal<TEvPeriodicWakeup, EvPeriodicWakeup> {};
-
- struct TEvAsyncTableStats : public TEventLocal<TEvAsyncTableStats, EvAsyncTableStats> {
+
+ struct TEvPeriodicWakeup : public TEventLocal<TEvPeriodicWakeup, EvPeriodicWakeup> {};
+
+ struct TEvAsyncTableStats : public TEventLocal<TEvAsyncTableStats, EvAsyncTableStats> {
ui64 TableId = -1;
- ui64 IndexSize = 0;
- TInstant StatsUpdateTime;
- NTable::TStats Stats;
- THashSet<ui64> PartOwners;
- ui64 PartCount = 0;
+ ui64 IndexSize = 0;
+ TInstant StatsUpdateTime;
+ NTable::TStats Stats;
+ THashSet<ui64> PartOwners;
+ ui64 PartCount = 0;
ui64 MemRowCount = 0;
ui64 MemDataSize = 0;
ui64 SearchHeight = 0;
- };
+ };
struct TEvRemoveOldInReadSets : public TEventLocal<TEvRemoveOldInReadSets, EvRemoveOldInReadSets> {};
@@ -352,12 +352,12 @@ class TDataShard
ui32 NodeId;
};
-
- struct TEvScanStats : public TEventLocal<TEvScanStats, EvScanStats> {
- TEvScanStats(ui64 rows, ui64 bytes) : Rows(rows), Bytes(bytes) {}
- ui64 Rows;
- ui64 Bytes;
- };
+
+ struct TEvScanStats : public TEventLocal<TEvScanStats, EvScanStats> {
+ TEvScanStats(ui64 rows, ui64 bytes) : Rows(rows), Bytes(bytes) {}
+ ui64 Rows;
+ ui64 Bytes;
+ };
// Also updates scan statistic, i.e. like TEvScanStats but persist state for given tx
struct TEvPersistScanState : public TEventLocal<TEvPersistScanState, EvPersistScanState> {
@@ -549,24 +549,24 @@ class TDataShard
using TColumns = TableColumns<TxId, Operation, Source, SourceTablet,
MinStep, MaxStep, PlanStep, ReadOnly, Success, Error, DataSize, Rows>;
};
-
- // Here we persist snapshots metadata to preserve it across Src datashard restarts
- struct SplitSrcSnapshots : Table<10> {
- struct DstTabletId : Column<1, NScheme::NTypeIds::Uint64> {};
+
+ // Here we persist snapshots metadata to preserve it across Src datashard restarts
+ struct SplitSrcSnapshots : Table<10> {
+ struct DstTabletId : Column<1, NScheme::NTypeIds::Uint64> {};
struct SnapshotMeta : Column<2, NScheme::NTypeIds::String> { using Type = TString; };
-
- using TKey = TableKey<DstTabletId>;
- using TColumns = TableColumns<DstTabletId, SnapshotMeta>;
- };
-
- // Here we persist the fact that snapshot has ben received by Dst datashard
- struct SplitDstReceivedSnapshots : Table<11> {
- struct SrcTabletId : Column<1, NScheme::NTypeIds::Uint64> {};
-
- using TKey = TableKey<SrcTabletId>;
- using TColumns = TableColumns<SrcTabletId>;
- };
-
+
+ using TKey = TableKey<DstTabletId>;
+ using TColumns = TableColumns<DstTabletId, SnapshotMeta>;
+ };
+
+ // Here we persist the fact that snapshot has ben received by Dst datashard
+ struct SplitDstReceivedSnapshots : Table<11> {
+ struct SrcTabletId : Column<1, NScheme::NTypeIds::Uint64> {};
+
+ using TKey = TableKey<SrcTabletId>;
+ using TColumns = TableColumns<SrcTabletId>;
+ };
+
// Additional tx artifacts which can be reused on tx restart.
struct TxArtifacts : Table<12> {
struct TxId : Column<1, NScheme::NTypeIds::Uint64> {};
@@ -755,14 +755,14 @@ class TDataShard
ReplicationSourceOffsets, ReplicationSources, DstReplicationSourceOffsetsReceived,
UserTablesStats, SchemaSnapshots>;
- // These settings are persisted on each Init. So we use empty settings in order not to overwrite what
- // was changed by the user
- struct EmptySettings {
- static void Materialize(NIceDb::TToughDb&) {}
- };
-
- using TSettings = SchemaSettings<EmptySettings>;
-
+ // These settings are persisted on each Init. So we use empty settings in order not to overwrite what
+ // was changed by the user
+ struct EmptySettings {
+ static void Materialize(NIceDb::TToughDb&) {}
+ };
+
+ using TSettings = SchemaSettings<EmptySettings>;
+
enum ESysTableKeys : ui64 {
Sys_Config = 1,
Sys_State,
@@ -775,13 +775,13 @@ class TDataShard
Sys_AliveStep, // Last known step we shouldn't drop at
Sys_TxReadSizeLimit_DEPRECATED, // 10// No longer used but is present in old tables
Sys_CurrentSchemeShardId, // TabletID of the schmemeshard that manages the datashard right now
- Sys_DstSplitDescription, // Split/Merge operation description at destination shard
- Sys_DstSplitOpId, // TxId of split operation at destination shard
- Sys_SrcSplitDescription, // Split/Merge operation description at source shard
- Sys_SrcSplitOpId, // TxId of split operation at source shard
- Sys_LastSchemeShardGeneration, // LastSchemeOpSeqNo.Generation
- Sys_LastSchemeShardRound, // LastSchemeOpSeqNo.Round
- Sys_TxReadSizeLimit, // Maximum size in bytes that is allowed to be read by a single Tx
+ Sys_DstSplitDescription, // Split/Merge operation description at destination shard
+ Sys_DstSplitOpId, // TxId of split operation at destination shard
+ Sys_SrcSplitDescription, // Split/Merge operation description at source shard
+ Sys_SrcSplitOpId, // TxId of split operation at source shard
+ Sys_LastSchemeShardGeneration, // LastSchemeOpSeqNo.Generation
+ Sys_LastSchemeShardRound, // LastSchemeOpSeqNo.Round
+ Sys_TxReadSizeLimit, // Maximum size in bytes that is allowed to be read by a single Tx
Sys_SubDomainInfo, //19 Subdomain setting which owns this table
Sys_StatisticsDisabled,
Sys_DstSplitSchemaInitialized,
@@ -817,7 +817,7 @@ class TDataShard
static constexpr ui64 MinLocalTid = TSysTables::SysTableMAX + 1; // 1000
- static constexpr const char* UserTablePrefix = "__user__";
+ static constexpr const char* UserTablePrefix = "__user__";
static constexpr const char* ShadowTablePrefix = "__shadow__";
};
@@ -855,14 +855,14 @@ class TDataShard
}
inline static bool SysGetBytes(NIceDb::TNiceDb& db, ui64 row, TString& value) {
- auto rowset = db.Table<Schema::Sys>().Key(row).Select<Schema::Sys::Bytes>();
- if (!rowset.IsReady())
- return false;
- if (rowset.IsValid())
- value = rowset.GetValue<Schema::Sys::Bytes>();
- return true;
- }
-
+ auto rowset = db.Table<Schema::Sys>().Key(row).Select<Schema::Sys::Bytes>();
+ if (!rowset.IsReady())
+ return false;
+ if (rowset.IsValid())
+ value = rowset.GetValue<Schema::Sys::Bytes>();
+ return true;
+ }
+
template <typename TEvHandle>
void ForwardEventToOperation(TAutoPtr<TEvHandle> ev, const TActorContext &ctx) {
TOperation::TPtr op = Pipeline.FindOp(ev->Get()->Record.GetTxId());
@@ -883,7 +883,7 @@ class TDataShard
void Handle(TEvents::TEvPoisonPill::TPtr &ev, const TActorContext &ctx);
void Handle(TEvDataShard::TEvGetShardState::TPtr &ev, const TActorContext &ctx);
void Handle(TEvDataShard::TEvSchemaChangedResult::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvDataShard::TEvStateChangedResult::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvDataShard::TEvStateChangedResult::TPtr &ev, const TActorContext &ctx);
void Handle(TEvDataShard::TEvProposeTransaction::TPtr &ev, const TActorContext &ctx);
void Handle(TEvDataShard::TEvProposeTransactionAttach::TPtr &ev, const TActorContext &ctx);
void HandleAsFollower(TEvDataShard::TEvProposeTransaction::TPtr &ev, const TActorContext &ctx);
@@ -897,7 +897,7 @@ class TDataShard
void Handle(TEvPrivate::TEvProgressResendReadSet::TPtr &ev, const TActorContext &ctx);
void Handle(TEvPrivate::TEvRemoveOldInReadSets::TPtr &ev, const TActorContext &ctx);
void Handle(TEvPrivate::TEvRegisterScanActor::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvPrivate::TEvScanStats::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvPrivate::TEvScanStats::TPtr &ev, const TActorContext &ctx);
void Handle(TEvPrivate::TEvPersistScanState::TPtr &ev, const TActorContext &ctx);
void Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TActorContext &ctx);
void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr &ev, const TActorContext &ctx);
@@ -905,20 +905,20 @@ class TDataShard
void Handle(TEvTabletPipe::TEvServerDisconnected::TPtr &ev, const TActorContext &ctx);
void Handle(TEvMediatorTimecast::TEvRegisterTabletResult::TPtr& ev, const TActorContext& ctx);
void Handle(TEvMediatorTimecast::TEvNotifyPlanStep::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvCancelTransactionProposal::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvDataShard::TEvReturnBorrowedPart::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvReturnBorrowedPartAck::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvInitSplitMergeDestination::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvSplit::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvSplitTransferSnapshot::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvCancelTransactionProposal::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvDataShard::TEvReturnBorrowedPart::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvReturnBorrowedPartAck::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvInitSplitMergeDestination::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvSplit::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvSplitTransferSnapshot::TPtr& ev, const TActorContext& ctx);
void Handle(TEvPrivate::TEvReplicationSourceOffsets::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvSplitTransferSnapshotAck::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvSplitPartitioningChanged::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvGetTableStats::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvPrivate::TEvAsyncTableStats::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvS3ListingRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvSplitTransferSnapshotAck::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvSplitPartitioningChanged::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvGetTableStats::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvPrivate::TEvAsyncTableStats::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvS3ListingRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDataShard::TEvKqpScan::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvUploadRowsRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvUploadRowsRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDataShard::TEvEraseRowsRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDataShard::TEvConditionalEraseRowsRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvPrivate::TEvConditionalEraseRowsRegistered::TPtr& ev, const TActorContext& ctx);
@@ -926,7 +926,7 @@ class TDataShard
void Handle(TEvDataShard::TEvReadContinue::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDataShard::TEvReadAck::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDataShard::TEvReadCancel::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvReadColumnsRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvReadColumnsRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDataShard::TEvGetInfoRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDataShard::TEvListOperationsRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvDataShard::TEvGetDataHistogramRequest::TPtr& ev, const TActorContext& ctx);
@@ -994,7 +994,7 @@ class TDataShard
void HandleByReplicationSourceOffsetsServer(STATEFN_SIG);
- void DoPeriodicTasks(const TActorContext &ctx);
+ void DoPeriodicTasks(const TActorContext &ctx);
TDuration GetDataTxCompleteLag()
{
@@ -1007,9 +1007,9 @@ class TDataShard
return TDuration::MilliSeconds(Pipeline.GetScanTxCompleteLag(mediatorTime));
}
- void UpdateLagCounters(const TActorContext &ctx);
+ void UpdateLagCounters(const TActorContext &ctx);
static NTabletPipe::TClientConfig GetPipeClientConfig();
-
+
void OnDetach(const TActorContext &ctx) override;
void OnTabletStop(TEvTablet::TEvTabletStop::TPtr &ev, const TActorContext &ctx) override;
void OnStopGuardStarting(const TActorContext &ctx);
@@ -1024,10 +1024,10 @@ class TDataShard
TMaybe<TInstant> GetTxPlanStartTimeAndCleanup(ui64 step);
void RestartPipeRS(ui64 tabletId, const TActorContext& ctx);
- void AckRSToDeletedTablet(ui64 tabletId, const TActorContext& ctx);
+ void AckRSToDeletedTablet(ui64 tabletId, const TActorContext& ctx);
void DefaultSignalTabletActive(const TActorContext &ctx) override {
- // This overriden in order to pospone SignalTabletActive until TxInit completes
+ // This overriden in order to pospone SignalTabletActive until TxInit completes
Y_UNUSED(ctx);
}
@@ -1039,8 +1039,8 @@ class TDataShard
void PersistUserTableFullCompactionTs(NIceDb::TNiceDb& db, ui64 tableId, ui64 ts);
void PersistMoveUserTable(NIceDb::TNiceDb& db, ui64 prevTableId, ui64 tableId, const TUserTable& tableInfo);
- void DropAllUserTables(TTransactionContext& txc);
- void PurgeTxTables(TTransactionContext& txc);
+ void DropAllUserTables(TTransactionContext& txc);
+ void PurgeTxTables(TTransactionContext& txc);
bool CheckMediatorAuthorisation(ui64 mediatorId);
@@ -1113,8 +1113,8 @@ public:
bool IsStateActive() const {
return State == TShardState::Ready ||
- State == TShardState::Readonly ||
- State == TShardState::WaitScheme ||
+ State == TShardState::Readonly ||
+ State == TShardState::WaitScheme ||
State == TShardState::SplitSrcWaitForNoTxInFlight ||
State == TShardState::Frozen;
}
@@ -1127,11 +1127,11 @@ public:
ui32 Generation() const { return Executor()->Generation(); }
bool IsFollower() const { return Executor()->GetStats().IsFollower; }
bool SyncSchemeOnFollower(NTabletFlatExecutor::TTransactionContext &txc, const TActorContext &ctx,
- NKikimrTxDataShard::TError::EKind & status, TString& errMessage);
+ NKikimrTxDataShard::TError::EKind & status, TString& errMessage);
ui64 GetMaxTxInFly() { return MaxTxInFly; }
-
- static constexpr ui64 DefaultTxStepDeadline() { return 30 * 1000; }
+
+ static constexpr ui64 DefaultTxStepDeadline() { return 30 * 1000; }
static constexpr ui64 PipeClientCachePoolLimit() { return 100; }
ui64 TxInFly() const { return TransQueue.TxInFly(); }
@@ -1146,20 +1146,20 @@ public:
}
bool CanDrop() const {
- Y_VERIFY(State != TShardState::Offline, "Unexpexted repeated drop");
- return (TxInFly() == 1) && OutReadSets.Empty() && (State != TShardState::PreOffline);
+ Y_VERIFY(State != TShardState::Offline, "Unexpexted repeated drop");
+ return (TxInFly() == 1) && OutReadSets.Empty() && (State != TShardState::PreOffline);
}
void UpdateProposeQueueSize() const;
void CheckDelayedProposeQueue(const TActorContext &ctx);
- bool CheckDataTxReject(const TString& opDescr,
+ bool CheckDataTxReject(const TString& opDescr,
const TActorContext &ctx,
NKikimrTxDataShard::TEvProposeTransactionResult::EStatus& rejectStatus,
TString &reason);
bool CheckDataTxRejectAndReply(TEvDataShard::TEvProposeTransaction* msg, const TActorContext& ctx);
-
+
TSysLocks& SysLocksTable() { return SysLocks; }
static const TString& GetUserTablePrefix() {
@@ -1198,10 +1198,10 @@ public:
return it == TableInfos.end() ? 0 : it->second->ShadowTid;
}
- ui64 GetTxReadSizeLimit() const {
+ ui64 GetTxReadSizeLimit() const {
return TxReadSizeLimit ? TxReadSizeLimit : (ui64)PerShardReadSizeLimit;
- }
-
+ }
+
ui64 GetDataTxProfileLogThresholdMs() const {
return DataTxProfileLogThresholdMs;
}
@@ -1250,34 +1250,34 @@ public:
return Executor()->BorrowSnapshot(tableId, ctx, from, to, loaner);
}
- void SnapshotComplete(TIntrusivePtr<NTabletFlatExecutor::TTableSnapshotContext> snapContext, const TActorContext &ctx) override;
- void CompactionComplete(ui32 tableId, const TActorContext &ctx) override;
+ void SnapshotComplete(TIntrusivePtr<NTabletFlatExecutor::TTableSnapshotContext> snapContext, const TActorContext &ctx) override;
+ void CompactionComplete(ui32 tableId, const TActorContext &ctx) override;
void CompletedLoansChanged(const TActorContext &ctx) override;
void ReplyCompactionWaiters(ui32 tableId, ui64 edge, const TActorContext &ctx);
TUserTable::TSpecialUpdate SpecialUpdates(const NTable::TDatabase& db, const TTableId& tableId) const;
- void SetTableAccessTime(const TTableId& tableId, TInstant ts);
- void SetTableUpdateTime(const TTableId& tableId, TInstant ts);
- void SampleKeyAccess(const TTableId& tableId, const TArrayRef<const TCell>& row);
- NMiniKQL::IKeyAccessSampler::TPtr GetKeyAccessSampler();
- void EnableKeyAccessSampling(const TActorContext &ctx, TInstant until);
- void UpdateTableStats(const TActorContext& ctx);
+ void SetTableAccessTime(const TTableId& tableId, TInstant ts);
+ void SetTableUpdateTime(const TTableId& tableId, TInstant ts);
+ void SampleKeyAccess(const TTableId& tableId, const TArrayRef<const TCell>& row);
+ NMiniKQL::IKeyAccessSampler::TPtr GetKeyAccessSampler();
+ void EnableKeyAccessSampling(const TActorContext &ctx, TInstant until);
+ void UpdateTableStats(const TActorContext& ctx);
void UpdateSearchHeightStats(TUserTable::TStats& stats, ui64 newSearchHeight);
void UpdateFullCompactionTsMetric(TUserTable::TStats& stats);
- void CollectCpuUsage(const TActorContext& ctx);
-
+ void CollectCpuUsage(const TActorContext& ctx);
+
void ScanComplete(NTable::EAbort status, TAutoPtr<IDestructable> prod, ui64 cookie, const TActorContext &ctx) override;
bool ReassignChannelsEnabled() const override;
ui64 GetMemoryUsage() const override;
- bool HasSharedBlobs() const;
+ bool HasSharedBlobs() const;
void CheckInitiateBorrowedPartsReturn(const TActorContext& ctx);
- void CheckStateChange(const TActorContext& ctx);
- void CheckSplitCanStart(const TActorContext& ctx);
+ void CheckStateChange(const TActorContext& ctx);
+ void CheckSplitCanStart(const TActorContext& ctx);
void CheckMvccStateChangeCanStart(const TActorContext& ctx);
-
+
ui32 GetState() const { return State; }
TSwitchState GetMvccSwitchState() { return MvccSwitchState; }
void SetPersistState(ui32 state, TTransactionContext &txc)
@@ -1421,8 +1421,8 @@ public:
TReadWriteVersions GetReadWriteVersions(TOperation* op = nullptr) const;
- void FillExecutionStats(const TExecutionProfile& execProfile, TEvDataShard::TEvProposeTransactionResult& result) const;
-
+ void FillExecutionStats(const TExecutionProfile& execProfile, TEvDataShard::TEvProposeTransactionResult& result) const;
+
// Executes TTxProgressTransaction without specific operation
void ExecuteProgressTx(const TActorContext& ctx);
@@ -1445,44 +1445,44 @@ public:
private:
///
- class TLoanReturnTracker {
- struct TLoanReturnInfo {
+ class TLoanReturnTracker {
+ struct TLoanReturnInfo {
TActorId PipeToOwner;
THashSet<TLogoBlobID> PartMeta;
- };
-
- ui64 MyTabletID;
- // TabletID -> non-acked loans
+ };
+
+ ui64 MyTabletID;
+ // TabletID -> non-acked loans
THashMap<ui64, TLoanReturnInfo> LoanReturns;
- // part -> owner
+ // part -> owner
THashMap<TLogoBlobID, ui64> LoanOwners;
NTabletPipe::TClientRetryPolicy PipeRetryPolicy;
-
- public:
- explicit TLoanReturnTracker(ui64 myTabletId)
- : MyTabletID(myTabletId)
+
+ public:
+ explicit TLoanReturnTracker(ui64 myTabletId)
+ : MyTabletID(myTabletId)
, PipeRetryPolicy{
.RetryLimitCount = 20,
.MinRetryTime = TDuration::MilliSeconds(10),
.MaxRetryTime = TDuration::MilliSeconds(500),
.BackoffMultiplier = 2}
- {}
-
- TLoanReturnTracker(const TLoanReturnTracker&) = delete;
- TLoanReturnTracker& operator=(const TLoanReturnTracker&) = delete;
-
- void Shutdown(const TActorContext& ctx) {
- for (auto& info : LoanReturns) {
- NTabletPipe::CloseClient(ctx, info.second.PipeToOwner);
- }
- LoanReturns.clear();
- }
-
+ {}
+
+ TLoanReturnTracker(const TLoanReturnTracker&) = delete;
+ TLoanReturnTracker& operator=(const TLoanReturnTracker&) = delete;
+
+ void Shutdown(const TActorContext& ctx) {
+ for (auto& info : LoanReturns) {
+ NTabletPipe::CloseClient(ctx, info.second.PipeToOwner);
+ }
+ LoanReturns.clear();
+ }
+
void ReturnLoan(ui64 ownerTabletId, const TVector<TLogoBlobID>& partMetaVec, const TActorContext& ctx) {
- TLoanReturnInfo& info = LoanReturns[ownerTabletId];
-
+ TLoanReturnInfo& info = LoanReturns[ownerTabletId];
+
TVector<TLogoBlobID> partsToReturn(Reserve(partMetaVec.size()));
- for (const auto& partMeta : partMetaVec) {
+ for (const auto& partMeta : partMetaVec) {
auto it = LoanOwners.find(partMeta);
if (it != LoanOwners.end()) {
Y_VERIFY(it->second == ownerTabletId,
@@ -1494,153 +1494,153 @@ private:
partsToReturn.emplace_back(partMeta);
}
}
-
+
if (partsToReturn.empty()) {
return;
- }
-
- if (!info.PipeToOwner) {
- NTabletPipe::TClientConfig clientConfig;
- clientConfig.CheckAliveness = true;
- clientConfig.RetryPolicy = PipeRetryPolicy;
- info.PipeToOwner = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, ownerTabletId, clientConfig));
- }
-
+ }
+
+ if (!info.PipeToOwner) {
+ NTabletPipe::TClientConfig clientConfig;
+ clientConfig.CheckAliveness = true;
+ clientConfig.RetryPolicy = PipeRetryPolicy;
+ info.PipeToOwner = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, ownerTabletId, clientConfig));
+ }
+
THolder<TEvDataShard::TEvReturnBorrowedPart> ev = MakeHolder<TEvDataShard::TEvReturnBorrowedPart>(MyTabletID, partMetaVec);
- NTabletPipe::SendData(ctx, info.PipeToOwner, ev.Release());
- }
-
- void ResendLoans(ui64 ownerTabletId, const TActorContext& ctx) {
+ NTabletPipe::SendData(ctx, info.PipeToOwner, ev.Release());
+ }
+
+ void ResendLoans(ui64 ownerTabletId, const TActorContext& ctx) {
if (!LoanReturns.contains(ownerTabletId))
- return;
-
+ return;
+
THashSet<TLogoBlobID> toResend;
- toResend.swap(LoanReturns[ownerTabletId].PartMeta);
-
- LoanReturns.erase(ownerTabletId);
-
- ReturnLoan(ownerTabletId, {toResend.begin(), toResend.end()}, ctx);
- }
-
- void AutoAckLoans(ui64 deadTabletId, const TActorContext& ctx) {
+ toResend.swap(LoanReturns[ownerTabletId].PartMeta);
+
+ LoanReturns.erase(ownerTabletId);
+
+ ReturnLoan(ownerTabletId, {toResend.begin(), toResend.end()}, ctx);
+ }
+
+ void AutoAckLoans(ui64 deadTabletId, const TActorContext& ctx) {
if (!LoanReturns.contains(deadTabletId))
- return;
-
- TVector<TLogoBlobID> partMetaVec(LoanReturns[deadTabletId].PartMeta.begin(), LoanReturns[deadTabletId].PartMeta.end());
-
- ctx.Send(ctx.SelfID, new TEvDataShard::TEvReturnBorrowedPartAck(partMetaVec));
- }
-
- void LoanDone(TLogoBlobID partMeta, const TActorContext& ctx) {
+ return;
+
+ TVector<TLogoBlobID> partMetaVec(LoanReturns[deadTabletId].PartMeta.begin(), LoanReturns[deadTabletId].PartMeta.end());
+
+ ctx.Send(ctx.SelfID, new TEvDataShard::TEvReturnBorrowedPartAck(partMetaVec));
+ }
+
+ void LoanDone(TLogoBlobID partMeta, const TActorContext& ctx) {
if (!LoanOwners.contains(partMeta))
- return;
-
- ui64 ownerTabletId = LoanOwners[partMeta];
- LoanOwners.erase(partMeta);
- LoanReturns[ownerTabletId].PartMeta.erase(partMeta);
-
- if (LoanReturns[ownerTabletId].PartMeta.empty()) {
- NTabletPipe::CloseClient(ctx, LoanReturns[ownerTabletId].PipeToOwner);
- LoanReturns.erase(ownerTabletId);
- }
- }
-
+ return;
+
+ ui64 ownerTabletId = LoanOwners[partMeta];
+ LoanOwners.erase(partMeta);
+ LoanReturns[ownerTabletId].PartMeta.erase(partMeta);
+
+ if (LoanReturns[ownerTabletId].PartMeta.empty()) {
+ NTabletPipe::CloseClient(ctx, LoanReturns[ownerTabletId].PipeToOwner);
+ LoanReturns.erase(ownerTabletId);
+ }
+ }
+
bool Has(ui64 ownerTabletId, TActorId pipeClientActorId) const {
return LoanReturns.contains(ownerTabletId) && LoanReturns.FindPtr(ownerTabletId)->PipeToOwner == pipeClientActorId;
- }
-
- bool Empty() const {
- return LoanReturns.empty();
- }
- };
-
+ }
+
+ bool Empty() const {
+ return LoanReturns.empty();
+ }
+ };
+
///
- class TSplitSrcSnapshotSender {
- public:
+ class TSplitSrcSnapshotSender {
+ public:
TSplitSrcSnapshotSender(TDataShard* self)
: Self(self)
{ }
- void AddDst(ui64 dstTabeltId) {
- Dst.insert(dstTabeltId);
- }
-
+ void AddDst(ui64 dstTabeltId) {
+ Dst.insert(dstTabeltId);
+ }
+
const THashSet<ui64>& GetDstSet() const {
- return Dst;
- }
-
- void SaveSnapshotForSending(ui64 dstTabletId, TAutoPtr<NKikimrTxDataShard::TEvSplitTransferSnapshot> snapshot) {
+ return Dst;
+ }
+
+ void SaveSnapshotForSending(ui64 dstTabletId, TAutoPtr<NKikimrTxDataShard::TEvSplitTransferSnapshot> snapshot) {
Y_VERIFY(Dst.contains(dstTabletId));
- DataToSend[dstTabletId] = snapshot;
- }
-
- void DoSend(const TActorContext &ctx) {
- Y_VERIFY(Dst.size() == DataToSend.size());
- for (const auto& ds : DataToSend) {
- ui64 dstTablet = ds.first;
- DoSend(dstTablet, ctx);
- }
- }
-
- void DoSend(ui64 dstTabletId, const TActorContext &ctx) {
+ DataToSend[dstTabletId] = snapshot;
+ }
+
+ void DoSend(const TActorContext &ctx) {
+ Y_VERIFY(Dst.size() == DataToSend.size());
+ for (const auto& ds : DataToSend) {
+ ui64 dstTablet = ds.first;
+ DoSend(dstTablet, ctx);
+ }
+ }
+
+ void DoSend(ui64 dstTabletId, const TActorContext &ctx) {
Y_VERIFY(Dst.contains(dstTabletId));
- NTabletPipe::TClientConfig clientConfig;
- PipesToDstShards[dstTabletId] = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, dstTabletId, clientConfig));
-
+ NTabletPipe::TClientConfig clientConfig;
+ PipesToDstShards[dstTabletId] = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, dstTabletId, clientConfig));
+
THolder<TEvDataShard::TEvSplitTransferSnapshot> ev = MakeHolder<TEvDataShard::TEvSplitTransferSnapshot>(0);
- ev->Record.CopyFrom(*DataToSend[dstTabletId]);
+ ev->Record.CopyFrom(*DataToSend[dstTabletId]);
ev->Record.SetSrcTabletGeneration(Self->Generation());
-
- auto fnCalcTotalSize = [] (const TEvDataShard::TEvSplitTransferSnapshot& ev) {
- ui64 size = 0;
- for (ui32 i = 0; i < ev.Record.TableSnapshotSize(); ++i) {
- size += ev.Record.GetTableSnapshot(i).GetSnapshotData().size();
- }
- return size;
- };
-
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
- "Sending snapshot for split opId " << ev->Record.GetOperationCookie()
- << " from datashard " << ev->Record.GetSrcTabletId()
- << " to datashard " << dstTabletId << " size " << fnCalcTotalSize(*ev));
-
- NTabletPipe::SendData(ctx, PipesToDstShards[dstTabletId], ev.Release());
- }
-
- void AckSnapshot(ui64 dstTabletId, const TActorContext &ctx) {
+
+ auto fnCalcTotalSize = [] (const TEvDataShard::TEvSplitTransferSnapshot& ev) {
+ ui64 size = 0;
+ for (ui32 i = 0; i < ev.Record.TableSnapshotSize(); ++i) {
+ size += ev.Record.GetTableSnapshot(i).GetSnapshotData().size();
+ }
+ return size;
+ };
+
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
+ "Sending snapshot for split opId " << ev->Record.GetOperationCookie()
+ << " from datashard " << ev->Record.GetSrcTabletId()
+ << " to datashard " << dstTabletId << " size " << fnCalcTotalSize(*ev));
+
+ NTabletPipe::SendData(ctx, PipesToDstShards[dstTabletId], ev.Release());
+ }
+
+ void AckSnapshot(ui64 dstTabletId, const TActorContext &ctx) {
if (!DataToSend.contains(dstTabletId))
- return;
-
- NTabletPipe::CloseClient(ctx, PipesToDstShards[dstTabletId]);
- PipesToDstShards.erase(dstTabletId);
- DataToSend.erase(dstTabletId);
- }
-
- bool AllAcked() const {
- return DataToSend.empty();
- }
-
+ return;
+
+ NTabletPipe::CloseClient(ctx, PipesToDstShards[dstTabletId]);
+ PipesToDstShards.erase(dstTabletId);
+ DataToSend.erase(dstTabletId);
+ }
+
+ bool AllAcked() const {
+ return DataToSend.empty();
+ }
+
bool Acked(ui64 dstTabletId) const {
return !DataToSend.contains(dstTabletId);
}
bool Has(ui64 dstTabletId, TActorId pipeClientActorId) const {
return PipesToDstShards.contains(dstTabletId) && *PipesToDstShards.FindPtr(dstTabletId) == pipeClientActorId;
- }
-
- void Shutdown(const TActorContext &ctx) {
- for (const auto& p : PipesToDstShards) {
- NTabletPipe::CloseClient(ctx, p.second);
- }
- }
-
- private:
+ }
+
+ void Shutdown(const TActorContext &ctx) {
+ for (const auto& p : PipesToDstShards) {
+ NTabletPipe::CloseClient(ctx, p.second);
+ }
+ }
+
+ private:
TDataShard* Self;
THashSet<ui64> Dst;
THashMap<ui64, TAutoPtr<NKikimrTxDataShard::TEvSplitTransferSnapshot>> DataToSend;
THashMap<ui64, TActorId> PipesToDstShards;
- };
-
+ };
+
///
class TChangeSenderActivator {
public:
@@ -1786,7 +1786,7 @@ private:
TTabletCountersBase* TabletCounters;
TAutoPtr<TTabletCountersBase> TabletCountersPtr;
- TAlignedPagePoolCounters AllocCounters;
+ TAlignedPagePoolCounters AllocCounters;
TTxProgressIdempotentScalarQueue<TEvPrivate::TEvProgressTransaction> PlanQueue;
TTxProgressIdempotentScalarScheduleQueue<TEvPrivate::TEvCleanupTransaction> CleanupQueue;
@@ -1900,11 +1900,11 @@ private:
TSchemeOpSeqNo LastSchemeOpSeqNo;
TInstant LastDbStatsUpdateTime;
TInstant LastDbStatsReportTime;
- TInstant LastCpuWarnTime;
- TInstant LastDataSizeWarnTime;
+ TInstant LastCpuWarnTime;
+ TInstant LastDataSizeWarnTime;
TActorId DbStatsReportPipe;
TActorId TableResolvePipe;
- ui64 StatsReportRound = 0;
+ ui64 StatsReportRound = 0;
TActorId FindSubDomainPathIdActor;
@@ -1918,8 +1918,8 @@ private:
TSwitchState MvccSwitchState;
bool SplitSnapshotStarted; // Non-persistent flag that is used to restart snapshot in case of datashard restart
- TSplitSrcSnapshotSender SplitSrcSnapshotSender;
- // TODO: make this persitent
+ TSplitSrcSnapshotSender SplitSrcSnapshotSender;
+ // TODO: make this persitent
THashSet<ui64> ReceiveSnapshotsFrom;
ui64 DstSplitOpId;
ui64 SrcSplitOpId;
@@ -1928,15 +1928,15 @@ private:
std::shared_ptr<NKikimrTxDataShard::TSplitMergeDescription> SrcSplitDescription;
THashSet<TActorId> SrcAckSplitTo;
THashMap<TActorId, THashSet<ui64>> SrcAckPartitioningChangedTo;
- const ui32 SysTablesToTransferAtSplit[4] = {
- Schema::TxMain::TableId,
- Schema::TxDetails::TableId,
- // Schema::InReadSets::TableId, // need to fix InReadSets cleanup
- Schema::PlanQueue::TableId,
- Schema::DeadlineQueue::TableId
- };
- THashSet<ui64> SysTablesPartOnwers;
-
+ const ui32 SysTablesToTransferAtSplit[4] = {
+ Schema::TxMain::TableId,
+ Schema::TxDetails::TableId,
+ // Schema::InReadSets::TableId, // need to fix InReadSets cleanup
+ Schema::PlanQueue::TableId,
+ Schema::DeadlineQueue::TableId
+ };
+ THashSet<ui64> SysTablesPartOnwers;
+
// Sys table contents
ui32 State;
ui32 LastLocalTid;
@@ -1947,18 +1947,18 @@ private:
ui64 StatisticsDisabled;
bool Stopping = false;
- NMiniKQL::IKeyAccessSampler::TPtr DisabledKeySampler;
- NMiniKQL::IKeyAccessSampler::TPtr EnabledKeySampler;
- NMiniKQL::IKeyAccessSampler::TPtr CurrentKeySampler; // Points to enbaled or disabled
- TInstant StartedKeyAccessSamplingAt;
- TInstant StopKeyAccessSamplingAt;
-
+ NMiniKQL::IKeyAccessSampler::TPtr DisabledKeySampler;
+ NMiniKQL::IKeyAccessSampler::TPtr EnabledKeySampler;
+ NMiniKQL::IKeyAccessSampler::TPtr CurrentKeySampler; // Points to enbaled or disabled
+ TInstant StartedKeyAccessSamplingAt;
+ TInstant StopKeyAccessSamplingAt;
+
THashMap<ui64, TUserTable::TCPtr> TableInfos; // tableId -> local table info
TTransQueue TransQueue;
TOutReadSets OutReadSets;
TPipeline Pipeline;
TSysLocks SysLocks;
-
+
TSnapshotManager SnapshotManager;
TSchemaSnapshotManager SchemaSnapshotManager;
@@ -1972,23 +1972,23 @@ private:
TIntrusivePtr<TMediatorTimecastEntry> MediatorTimeCastEntry;
TSet<ui64> MediatorTimeCastWaitingSteps;
- TControlWrapper DisableByKeyFilter;
+ TControlWrapper DisableByKeyFilter;
TControlWrapper MaxTxInFly;
- TControlWrapper MaxTxLagMilliseconds;
+ TControlWrapper MaxTxLagMilliseconds;
TControlWrapper CanCancelROWithReadSets;
TControlWrapper PerShardReadSizeLimit;
- TControlWrapper CpuUsageReportThreshlodPercent;
- TControlWrapper CpuUsageReportIntervalSeconds;
- TControlWrapper HighDataSizeReportThreshlodBytes;
- TControlWrapper HighDataSizeReportIntervalSeconds;
-
+ TControlWrapper CpuUsageReportThreshlodPercent;
+ TControlWrapper CpuUsageReportIntervalSeconds;
+ TControlWrapper HighDataSizeReportThreshlodBytes;
+ TControlWrapper HighDataSizeReportIntervalSeconds;
+
TControlWrapper DataTxProfileLogThresholdMs;
TControlWrapper DataTxProfileBufferThresholdMs;
TControlWrapper DataTxProfileBufferSize;
- TControlWrapper ReadColumnsScanEnabled;
- TControlWrapper ReadColumnsScanInUserPool;
-
+ TControlWrapper ReadColumnsScanEnabled;
+ TControlWrapper ReadColumnsScanInUserPool;
+
TControlWrapper BackupReadAheadLo;
TControlWrapper BackupReadAheadHi;
@@ -2099,18 +2099,18 @@ protected:
// Redundant init state required by flat executor implementation
void StateInit(TAutoPtr<NActors::IEventHandle> &ev, const NActors::TActorContext &ctx) {
TRACE_EVENT(NKikimrServices::TX_DATASHARD);
- switch (ev->GetTypeRewrite()) {
+ switch (ev->GetTypeRewrite()) {
HFuncTraced(TEvents::TEvPoisonPill, Handle);
- default:
- StateInitImpl(ev, ctx);
- }
+ default:
+ StateInitImpl(ev, ctx);
+ }
}
- void Enqueue(STFUNC_SIG) override {
+ void Enqueue(STFUNC_SIG) override {
LOG_WARN_S(ctx, NKikimrServices::TX_DATASHARD, "TDataShard::StateInit unhandled event type: " << ev->GetTypeRewrite()
<< " event: " << (ev->HasEvent() ? ev->GetBase()->ToString().data() : "serialized?"));
- }
-
+ }
+
// In this state we are not handling external pipes to datashard tablet (it's just another init phase)
void StateInactive(TAutoPtr<NActors::IEventHandle> &ev, const NActors::TActorContext &ctx) {
TRACE_EVENT(NKikimrServices::TX_DATASHARD);
@@ -2134,7 +2134,7 @@ protected:
HFuncTraced(TEvents::TEvPoisonPill, Handle);
HFuncTraced(TEvDataShard::TEvGetShardState, Handle);
HFuncTraced(TEvDataShard::TEvSchemaChangedResult, Handle);
- HFuncTraced(TEvDataShard::TEvStateChangedResult, Handle);
+ HFuncTraced(TEvDataShard::TEvStateChangedResult, Handle);
HFuncTraced(TEvDataShard::TEvProposeTransaction, Handle);
HFuncTraced(TEvDataShard::TEvProposeTransactionAttach, Handle);
HFuncTraced(TEvDataShard::TEvCancelBackup, Handle);
@@ -2158,7 +2158,7 @@ protected:
HFuncTraced(TEvPrivate::TEvProgressResendReadSet, Handle);
HFuncTraced(TEvPrivate::TEvRemoveOldInReadSets, Handle);
HFuncTraced(TEvPrivate::TEvRegisterScanActor, Handle);
- HFuncTraced(TEvPrivate::TEvScanStats, Handle);
+ HFuncTraced(TEvPrivate::TEvScanStats, Handle);
HFuncTraced(TEvPrivate::TEvPersistScanState, Handle);
HFuncTraced(TEvTabletPipe::TEvClientConnected, Handle);
HFuncTraced(TEvTabletPipe::TEvClientDestroyed, Handle);
@@ -2168,19 +2168,19 @@ protected:
HFuncTraced(TEvMediatorTimecast::TEvNotifyPlanStep, Handle);
HFuncTraced(TEvDataShard::TEvCancelTransactionProposal, Handle);
HFuncTraced(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult, Handle);
- HFunc(TEvDataShard::TEvReturnBorrowedPart, Handle);
- HFunc(TEvDataShard::TEvReturnBorrowedPartAck, Handle);
- HFunc(TEvDataShard::TEvInitSplitMergeDestination, Handle);
- HFunc(TEvDataShard::TEvSplit, Handle);
- HFunc(TEvDataShard::TEvSplitTransferSnapshot, Handle);
+ HFunc(TEvDataShard::TEvReturnBorrowedPart, Handle);
+ HFunc(TEvDataShard::TEvReturnBorrowedPartAck, Handle);
+ HFunc(TEvDataShard::TEvInitSplitMergeDestination, Handle);
+ HFunc(TEvDataShard::TEvSplit, Handle);
+ HFunc(TEvDataShard::TEvSplitTransferSnapshot, Handle);
HFunc(TEvPrivate::TEvReplicationSourceOffsets, Handle);
- HFunc(TEvDataShard::TEvSplitTransferSnapshotAck, Handle);
- HFunc(TEvDataShard::TEvSplitPartitioningChanged, Handle);
- HFunc(TEvDataShard::TEvGetTableStats, Handle);
- HFunc(TEvPrivate::TEvAsyncTableStats, Handle);
- HFunc(TEvDataShard::TEvS3ListingRequest, Handle);
+ HFunc(TEvDataShard::TEvSplitTransferSnapshotAck, Handle);
+ HFunc(TEvDataShard::TEvSplitPartitioningChanged, Handle);
+ HFunc(TEvDataShard::TEvGetTableStats, Handle);
+ HFunc(TEvPrivate::TEvAsyncTableStats, Handle);
+ HFunc(TEvDataShard::TEvS3ListingRequest, Handle);
HFunc(TEvDataShard::TEvKqpScan, Handle);
- HFunc(TEvDataShard::TEvUploadRowsRequest, Handle);
+ HFunc(TEvDataShard::TEvUploadRowsRequest, Handle);
HFunc(TEvDataShard::TEvEraseRowsRequest, Handle);
HFunc(TEvDataShard::TEvConditionalEraseRowsRequest, Handle);
HFunc(TEvPrivate::TEvConditionalEraseRowsRegistered, Handle);
@@ -2188,7 +2188,7 @@ protected:
HFunc(TEvDataShard::TEvReadContinue, Handle);
HFunc(TEvDataShard::TEvReadAck, Handle);
HFunc(TEvDataShard::TEvReadCancel, Handle);
- HFunc(TEvDataShard::TEvReadColumnsRequest, Handle);
+ HFunc(TEvDataShard::TEvReadColumnsRequest, Handle);
HFunc(TEvDataShard::TEvGetInfoRequest, Handle);
HFunc(TEvDataShard::TEvListOperationsRequest, Handle);
HFunc(TEvDataShard::TEvGetDataHistogramRequest, Handle);
@@ -2202,7 +2202,7 @@ protected:
HFunc(TEvDataShard::TEvDiscardVolatileSnapshotRequest, Handle);
HFuncTraced(TEvDataShard::TEvBuildIndexCreateRequest, Handle);
HFunc(TEvPrivate::TEvAsyncJobComplete, Handle);
- CFunc(TEvPrivate::EvPeriodicWakeup, DoPeriodicTasks);
+ CFunc(TEvPrivate::EvPeriodicWakeup, DoPeriodicTasks);
HFunc(TEvents::TEvUndelivered, Handle);
IgnoreFunc(TEvInterconnect::TEvNodeConnected);
HFunc(TEvInterconnect::TEvNodeDisconnected, Handle);
@@ -2237,26 +2237,26 @@ protected:
}
}
- // This is the main state
+ // This is the main state
void StateWorkAsFollower(TAutoPtr<NActors::IEventHandle> &ev, const NActors::TActorContext &ctx) {
TRACE_EVENT(NKikimrServices::TX_DATASHARD);
- switch (ev->GetTypeRewrite()) {
+ switch (ev->GetTypeRewrite()) {
hFunc(TEvents::TEvGone, Handle);
HFuncTraced(TEvents::TEvPoisonPill, Handle);
HFuncTraced(TEvDataShard::TEvProposeTransaction, HandleAsFollower);
HFuncTraced(TEvPrivate::TEvDelayedProposeTransaction, Handle);
- HFuncTraced(TEvDataShard::TEvReadColumnsRequest, Handle);
+ HFuncTraced(TEvDataShard::TEvReadColumnsRequest, Handle);
HFuncTraced(TEvTabletPipe::TEvServerConnected, Handle);
HFuncTraced(TEvTabletPipe::TEvServerDisconnected, Handle);
- default:
- if (!HandleDefaultEvents(ev, ctx)) {
+ default:
+ if (!HandleDefaultEvents(ev, ctx)) {
LOG_WARN_S(ctx, NKikimrServices::TX_DATASHARD, "TDataShard::StateWorkAsFollower unhandled event type: " << ev->GetTypeRewrite()
<< " event: " << (ev->HasEvent() ? ev->GetBase()->ToString().data() : "serialized?"));
- }
- break;
- }
- }
-
+ }
+ break;
+ }
+ }
+
// State after tablet takes poison pill
void StateBroken(TAutoPtr<NActors::IEventHandle> &ev, const NActors::TActorContext &ctx) {
TRACE_EVENT(NKikimrServices::TX_DATASHARD);
@@ -2294,9 +2294,9 @@ protected:
StopFindSubDomainPathId();
StopWatchingSubDomainPathId();
- LoanReturnTracker.Shutdown(ctx);
- Y_VERIFY(LoanReturnTracker.Empty());
- SplitSrcSnapshotSender.Shutdown(ctx);
+ LoanReturnTracker.Shutdown(ctx);
+ Y_VERIFY(LoanReturnTracker.Empty());
+ SplitSrcSnapshotSender.Shutdown(ctx);
return IActor::Die(ctx);
}
@@ -2309,7 +2309,7 @@ protected:
void SendViaSchemeshardPipe(const TActorContext &ctx, ui64 tabletId, THolder<TEvDataShard::TEvSchemaChanged> event) {
Y_VERIFY(tabletId);
Y_VERIFY(CurrentSchemeShardId == tabletId);
-
+
if (!SchemeShardPipe) {
NTabletPipe::TClientConfig clientConfig;
SchemeShardPipe = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, tabletId, clientConfig));
@@ -2317,108 +2317,108 @@ protected:
NTabletPipe::SendData(ctx, SchemeShardPipe, event.Release());
}
- void ReportState(const TActorContext &ctx, ui32 state) {
- LOG_INFO_S(ctx, NKikimrServices::TX_DATASHARD, TabletID() << " Reporting state " << DatashardStateName(State)
+ void ReportState(const TActorContext &ctx, ui32 state) {
+ LOG_INFO_S(ctx, NKikimrServices::TX_DATASHARD, TabletID() << " Reporting state " << DatashardStateName(State)
<< " to schemeshard " << CurrentSchemeShardId);
- Y_VERIFY(state != TShardState::Offline || !HasSharedBlobs(),
- "Datashard %" PRIu64 " tried to go offline while having shared blobs", TabletID());
- if (!StateReportPipe) {
- NTabletPipe::TClientConfig clientConfig;
- clientConfig.RetryPolicy = SchemeShardPipeRetryPolicy;
+ Y_VERIFY(state != TShardState::Offline || !HasSharedBlobs(),
+ "Datashard %" PRIu64 " tried to go offline while having shared blobs", TabletID());
+ if (!StateReportPipe) {
+ NTabletPipe::TClientConfig clientConfig;
+ clientConfig.RetryPolicy = SchemeShardPipeRetryPolicy;
StateReportPipe = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, CurrentSchemeShardId, clientConfig));
- }
- THolder<TEvDataShard::TEvStateChanged> ev(new TEvDataShard::TEvStateChanged(ctx.SelfID, TabletID(), state));
- NTabletPipe::SendData(ctx, StateReportPipe, ev.Release());
- }
-
- void SendPeriodicTableStats(const TActorContext &ctx) {
+ }
+ THolder<TEvDataShard::TEvStateChanged> ev(new TEvDataShard::TEvStateChanged(ctx.SelfID, TabletID(), state));
+ NTabletPipe::SendData(ctx, StateReportPipe, ev.Release());
+ }
+
+ void SendPeriodicTableStats(const TActorContext &ctx) {
if (StatisticsDisabled)
return;
- TInstant now = AppData(ctx)->TimeProvider->Now();
-
- if (LastDbStatsReportTime + gDbStatsReportInterval > now)
- return;
-
+ TInstant now = AppData(ctx)->TimeProvider->Now();
+
+ if (LastDbStatsReportTime + gDbStatsReportInterval > now)
+ return;
+
auto* resourceMetrics = Executor()->GetResourceMetrics();
- for (const auto& t : TableInfos) {
- ui64 tableId = t.first;
-
+ for (const auto& t : TableInfos) {
+ ui64 tableId = t.first;
+
const TUserTable &ti = *t.second;
-
- // Don't report stats until they are build for the first time
- if (!ti.Stats.StatsUpdateTime)
- break;
-
- if (!DbStatsReportPipe) {
- NTabletPipe::TClientConfig clientConfig;
+
+ // Don't report stats until they are build for the first time
+ if (!ti.Stats.StatsUpdateTime)
+ break;
+
+ if (!DbStatsReportPipe) {
+ NTabletPipe::TClientConfig clientConfig;
DbStatsReportPipe = ctx.Register(NTabletPipe::CreateClient(ctx.SelfID, CurrentSchemeShardId, clientConfig));
- }
-
+ }
+
THolder<TEvDataShard::TEvPeriodicTableStats> ev(new TEvDataShard::TEvPeriodicTableStats(TabletID(), PathOwnerId, tableId));
- ev->Record.SetShardState(State);
- ev->Record.SetGeneration(Executor()->Generation());
- ev->Record.SetRound(StatsReportRound++);
- ev->Record.MutableTableStats()->SetRowCount(ti.Stats.DataStats.RowCount + ti.Stats.MemRowCount);
- ev->Record.MutableTableStats()->SetDataSize(ti.Stats.DataStats.DataSize + ti.Stats.MemDataSize);
- ev->Record.MutableTableStats()->SetIndexSize(ti.Stats.IndexSize);
- ev->Record.MutableTableStats()->SetLastAccessTime(ti.Stats.AccessTime.MilliSeconds());
- ev->Record.MutableTableStats()->SetLastUpdateTime(ti.Stats.UpdateTime.MilliSeconds());
-
- ev->Record.MutableTableStats()->SetImmediateTxCompleted(TabletCounters->Cumulative()[COUNTER_PREPARE_IMMEDIATE].Get());
- ev->Record.MutableTableStats()->SetPlannedTxCompleted(TabletCounters->Cumulative()[COUNTER_PLANNED_TX_COMPLETE].Get());
- ev->Record.MutableTableStats()->SetTxRejectedByOverload(TabletCounters->Cumulative()[COUNTER_PREPARE_OVERLOADED].Get());
- ev->Record.MutableTableStats()->SetTxRejectedBySpace(TabletCounters->Cumulative()[COUNTER_PREPARE_OUT_OF_SPACE].Get());
- ev->Record.MutableTableStats()->SetTxCompleteLagMsec(TabletCounters->Simple()[COUNTER_TX_COMPLETE_LAG].Get());
- ev->Record.MutableTableStats()->SetInFlightTxCount(TabletCounters->Simple()[COUNTER_TX_IN_FLY].Get() +
- TabletCounters->Simple()[COUNTER_IMMEDIATE_TX_IN_FLY].Get());
-
- ev->Record.MutableTableStats()->SetRowUpdates(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_UPDATE_ROW].Get() +
- TabletCounters->Cumulative()[COUNTER_UPLOAD_ROWS].Get());
- ev->Record.MutableTableStats()->SetRowDeletes(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_ERASE_ROW].Get());
- ev->Record.MutableTableStats()->SetRowReads(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_SELECT_ROW].Get());
- ev->Record.MutableTableStats()->SetRangeReads(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_SELECT_RANGE].Get());
- ev->Record.MutableTableStats()->SetRangeReadRows(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_SELECT_RANGE_ROWS].Get());
+ ev->Record.SetShardState(State);
+ ev->Record.SetGeneration(Executor()->Generation());
+ ev->Record.SetRound(StatsReportRound++);
+ ev->Record.MutableTableStats()->SetRowCount(ti.Stats.DataStats.RowCount + ti.Stats.MemRowCount);
+ ev->Record.MutableTableStats()->SetDataSize(ti.Stats.DataStats.DataSize + ti.Stats.MemDataSize);
+ ev->Record.MutableTableStats()->SetIndexSize(ti.Stats.IndexSize);
+ ev->Record.MutableTableStats()->SetLastAccessTime(ti.Stats.AccessTime.MilliSeconds());
+ ev->Record.MutableTableStats()->SetLastUpdateTime(ti.Stats.UpdateTime.MilliSeconds());
+
+ ev->Record.MutableTableStats()->SetImmediateTxCompleted(TabletCounters->Cumulative()[COUNTER_PREPARE_IMMEDIATE].Get());
+ ev->Record.MutableTableStats()->SetPlannedTxCompleted(TabletCounters->Cumulative()[COUNTER_PLANNED_TX_COMPLETE].Get());
+ ev->Record.MutableTableStats()->SetTxRejectedByOverload(TabletCounters->Cumulative()[COUNTER_PREPARE_OVERLOADED].Get());
+ ev->Record.MutableTableStats()->SetTxRejectedBySpace(TabletCounters->Cumulative()[COUNTER_PREPARE_OUT_OF_SPACE].Get());
+ ev->Record.MutableTableStats()->SetTxCompleteLagMsec(TabletCounters->Simple()[COUNTER_TX_COMPLETE_LAG].Get());
+ ev->Record.MutableTableStats()->SetInFlightTxCount(TabletCounters->Simple()[COUNTER_TX_IN_FLY].Get() +
+ TabletCounters->Simple()[COUNTER_IMMEDIATE_TX_IN_FLY].Get());
+
+ ev->Record.MutableTableStats()->SetRowUpdates(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_UPDATE_ROW].Get() +
+ TabletCounters->Cumulative()[COUNTER_UPLOAD_ROWS].Get());
+ ev->Record.MutableTableStats()->SetRowDeletes(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_ERASE_ROW].Get());
+ ev->Record.MutableTableStats()->SetRowReads(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_SELECT_ROW].Get());
+ ev->Record.MutableTableStats()->SetRangeReads(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_SELECT_RANGE].Get());
+ ev->Record.MutableTableStats()->SetRangeReadRows(TabletCounters->Cumulative()[COUNTER_ENGINE_HOST_SELECT_RANGE_ROWS].Get());
if (resourceMetrics != nullptr) {
resourceMetrics->Fill(*ev->Record.MutableTabletMetrics());
}
-
- ev->Record.MutableTableStats()->SetPartCount(ti.Stats.PartCount);
+
+ ev->Record.MutableTableStats()->SetPartCount(ti.Stats.PartCount);
ev->Record.MutableTableStats()->SetSearchHeight(ti.Stats.SearchHeight);
ev->Record.MutableTableStats()->SetLastFullCompactionTs(ti.Stats.LastFullCompaction.Seconds());
-
+
if (!ti.Stats.PartOwners.contains(TabletID())) {
- ev->Record.AddUserTablePartOwners(TabletID());
- }
- for (const auto& pi : ti.Stats.PartOwners) {
- ev->Record.AddUserTablePartOwners(pi);
- }
- for (const auto& pi : SysTablesPartOnwers) {
- ev->Record.AddSysTablesPartOwners(pi);
- }
-
+ ev->Record.AddUserTablePartOwners(TabletID());
+ }
+ for (const auto& pi : ti.Stats.PartOwners) {
+ ev->Record.AddUserTablePartOwners(pi);
+ }
+ for (const auto& pi : SysTablesPartOnwers) {
+ ev->Record.AddSysTablesPartOwners(pi);
+ }
+
ev->Record.SetNodeId(ctx.ExecutorThread.ActorSystem->NodeId);
ev->Record.SetStartTime(StartTime().MilliSeconds());
- NTabletPipe::SendData(ctx, DbStatsReportPipe, ev.Release());
- }
-
- LastDbStatsReportTime = now;
- }
-
- bool OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TActorContext &ctx) override;
+ NTabletPipe::SendData(ctx, DbStatsReportPipe, ev.Release());
+ }
+
+ LastDbStatsReportTime = now;
+ }
+
+ bool OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TActorContext &ctx) override;
void SerializeHistogram(const TUserTable &tinfo,
const NTable::THistogram &histogram,
const NScheme::TTypeRegistry &typeRegistry,
NKikimrTxDataShard::TEvGetDataHistogramResponse::THistogram &hist);
- void SerializeKeySample(const TUserTable &tinfo,
- const NTable::TKeyAccessSample &keySample,
- const NScheme::TTypeRegistry &typeRegistry,
- NKikimrTxDataShard::TEvGetDataHistogramResponse::THistogram &hist);
+ void SerializeKeySample(const TUserTable &tinfo,
+ const NTable::TKeyAccessSample &keySample,
+ const NScheme::TTypeRegistry &typeRegistry,
+ NKikimrTxDataShard::TEvGetDataHistogramResponse::THistogram &hist);
- bool ByKeyFilterDisabled() const;
- bool AllowCancelROwithReadsets() const;
+ bool ByKeyFilterDisabled() const;
+ bool AllowCancelROwithReadsets() const;
void ResolveTablePath(const TActorContext &ctx);
};
diff --git a/ydb/core/tx/datashard/datashard_loans.cpp b/ydb/core/tx/datashard/datashard_loans.cpp
index 21e90b9f781..adbfc4769db 100644
--- a/ydb/core/tx/datashard/datashard_loans.cpp
+++ b/ydb/core/tx/datashard/datashard_loans.cpp
@@ -1,222 +1,222 @@
-#include "datashard_impl.h"
-
+#include "datashard_impl.h"
+
#include <ydb/core/tablet_flat/tablet_flat_executor.h>
-
+
#include <util/string/join.h>
-namespace NKikimr {
+namespace NKikimr {
namespace NDataShard {
-
-// Find and return parts that are no longer needed on the target datashard
+
+// Find and return parts that are no longer needed on the target datashard
class TDataShard::TTxInitiateBorrowedPartsReturn : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
+private:
THashMap<TLogoBlobID, NTabletFlatExecutor::TCompactedPartLoans> PartsToReturn;
-
-public:
+
+public:
TTxInitiateBorrowedPartsReturn(TDataShard* ds)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_INITIATE_BORROWED_PARTS_RETURN; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Y_UNUSED(txc);
- Y_UNUSED(ctx);
- // Prepare the list of parts to return
- PartsToReturn = *Self->Executor()->GetStats().CompactedPartLoans;
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- // group parts by owner tablet
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_INITIATE_BORROWED_PARTS_RETURN; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ Y_UNUSED(txc);
+ Y_UNUSED(ctx);
+ // Prepare the list of parts to return
+ PartsToReturn = *Self->Executor()->GetStats().CompactedPartLoans;
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ // group parts by owner tablet
THashMap<ui64, TVector<TLogoBlobID>> perTabletParts;
- for (const auto& p : PartsToReturn) {
- ui64 ownerTabletId = p.second.Lender;
- TLogoBlobID partMeta = p.second.MetaInfoId;
-
- perTabletParts[ownerTabletId].push_back(partMeta);
- }
-
- for (const auto& batch : perTabletParts) {
- // open a pipe to the part owner and send part metadata batch
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " initiating parts " << batch.second << " return to " << batch.first);
- Self->LoanReturnTracker.ReturnLoan(batch.first, batch.second, ctx);
- }
- }
-};
-
+ for (const auto& p : PartsToReturn) {
+ ui64 ownerTabletId = p.second.Lender;
+ TLogoBlobID partMeta = p.second.MetaInfoId;
+
+ perTabletParts[ownerTabletId].push_back(partMeta);
+ }
+
+ for (const auto& batch : perTabletParts) {
+ // open a pipe to the part owner and send part metadata batch
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " initiating parts " << batch.second << " return to " << batch.first);
+ Self->LoanReturnTracker.ReturnLoan(batch.first, batch.second, ctx);
+ }
+ }
+};
+
NTabletFlatExecutor::ITransaction* TDataShard::CreateTxInitiateBorrowedPartsReturn() {
- return new TTxInitiateBorrowedPartsReturn(this);
-}
-
+ return new TTxInitiateBorrowedPartsReturn(this);
+}
+
void TDataShard::CompletedLoansChanged(const TActorContext &ctx) {
- Y_VERIFY(Executor()->GetStats().CompactedPartLoans);
-
+ Y_VERIFY(Executor()->GetStats().CompactedPartLoans);
+
CheckInitiateBorrowedPartsReturn(ctx);
-}
-
-// Accept returned part on the source datashard
+}
+
+// Accept returned part on the source datashard
class TDataShard::TTxReturnBorrowedPart : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvReturnBorrowedPart::TPtr Ev;
+private:
+ TEvDataShard::TEvReturnBorrowedPart::TPtr Ev;
TVector<TLogoBlobID> PartMetaVec;
- ui64 FromTabletId;
-public:
+ ui64 FromTabletId;
+public:
TTxReturnBorrowedPart(TDataShard* ds, TEvDataShard::TEvReturnBorrowedPart::TPtr& ev)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- , Ev(ev)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_RETURN_BORROWED_PART; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Y_UNUSED(ctx);
-
- FromTabletId = Ev->Get()->Record.GetFromTabletId();
- for (ui32 i = 0; i < Ev->Get()->Record.PartMetadataSize(); ++i) {
- TLogoBlobID partMeta = LogoBlobIDFromLogoBlobID(Ev->Get()->Record.GetPartMetadata(i));
- PartMetaVec.push_back(partMeta);
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " got returned parts " << PartMetaVec << " from " << FromTabletId);
-
+ , Ev(ev)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_RETURN_BORROWED_PART; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ Y_UNUSED(ctx);
+
+ FromTabletId = Ev->Get()->Record.GetFromTabletId();
+ for (ui32 i = 0; i < Ev->Get()->Record.PartMetadataSize(); ++i) {
+ TLogoBlobID partMeta = LogoBlobIDFromLogoBlobID(Ev->Get()->Record.GetPartMetadata(i));
+ PartMetaVec.push_back(partMeta);
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " got returned parts " << PartMetaVec << " from " << FromTabletId);
+
txc.Env.CleanupLoan(partMeta, FromTabletId);
- }
-
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- // Send Ack
+ }
+
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ // Send Ack
TActorId ackTo = Ev->Sender;
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack parts " << PartMetaVec << " return to tablet " << FromTabletId);
-
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack parts " << PartMetaVec << " return to tablet " << FromTabletId);
+
ctx.Send(ackTo, new TEvDataShard::TEvReturnBorrowedPartAck(PartMetaVec), 0, Ev->Cookie);
- Self->CheckStateChange(ctx);
- }
-};
-
-// Forget the returned part on the target after source Ack from the source
+ Self->CheckStateChange(ctx);
+ }
+};
+
+// Forget the returned part on the target after source Ack from the source
class TDataShard::TTxReturnBorrowedPartAck : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvReturnBorrowedPartAck::TPtr Ev;
+private:
+ TEvDataShard::TEvReturnBorrowedPartAck::TPtr Ev;
TVector<TLogoBlobID> PartMetaVec;
-
-public:
+
+public:
TTxReturnBorrowedPartAck(TDataShard* ds, TEvDataShard::TEvReturnBorrowedPartAck::TPtr& ev)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- , Ev(ev)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_RETURN_BORROWED_PART_ACK; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Y_UNUSED(ctx);
-
- // Unregistered returned part
- for (ui32 i = 0; i < Ev->Get()->Record.PartMetadataSize(); ++i) {
- TLogoBlobID partMeta = LogoBlobIDFromLogoBlobID(Ev->Get()->Record.GetPartMetadata(i));
- PartMetaVec.push_back(partMeta);
-
- TLogoBlobID borrowId;
+ , Ev(ev)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_RETURN_BORROWED_PART_ACK; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ Y_UNUSED(ctx);
+
+ // Unregistered returned part
+ for (ui32 i = 0; i < Ev->Get()->Record.PartMetadataSize(); ++i) {
+ TLogoBlobID partMeta = LogoBlobIDFromLogoBlobID(Ev->Get()->Record.GetPartMetadata(i));
+ PartMetaVec.push_back(partMeta);
+
+ TLogoBlobID borrowId;
txc.Env.ConfirmLoan(partMeta, borrowId);
- }
-
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " parts " << PartMetaVec << " return ack processed");
- for (const auto& partMeta : PartMetaVec) {
- Self->LoanReturnTracker.LoanDone(partMeta, ctx);
- }
- Self->CheckStateChange(ctx);
- }
-};
-
+ }
+
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " parts " << PartMetaVec << " return ack processed");
+ for (const auto& partMeta : PartMetaVec) {
+ Self->LoanReturnTracker.LoanDone(partMeta, ctx);
+ }
+ Self->CheckStateChange(ctx);
+ }
+};
+
void TDataShard::Handle(TEvDataShard::TEvReturnBorrowedPart::TPtr& ev, const TActorContext& ctx) {
- Execute(new TTxReturnBorrowedPart(this, ev), ctx);
-}
-
+ Execute(new TTxReturnBorrowedPart(this, ev), ctx);
+}
+
void TDataShard::Handle(TEvDataShard::TEvReturnBorrowedPartAck::TPtr& ev, const TActorContext& ctx) {
- Execute(new TTxReturnBorrowedPartAck(this, ev), ctx);
-}
-
+ Execute(new TTxReturnBorrowedPartAck(this, ev), ctx);
+}
+
bool TDataShard::HasSharedBlobs() const {
- const bool* hasSharedBlobsPtr = Executor()->GetStats().HasSharedBlobs;
+ const bool* hasSharedBlobsPtr = Executor()->GetStats().HasSharedBlobs;
if (!hasSharedBlobsPtr) {
Y_VERIFY(Executor()->GetStats().IsFollower);
return false;
}
- return *hasSharedBlobsPtr;
-}
-
-
-// Switch to Offline state and notify the schemeshard to that it can initiate tablet deletion
+ return *hasSharedBlobsPtr;
+}
+
+
+// Switch to Offline state and notify the schemeshard to that it can initiate tablet deletion
class TDataShard::TTxGoOffline : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-public:
+public:
explicit TTxGoOffline(TDataShard* ds)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_GO_OFFLINE; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Y_UNUSED(ctx);
-
- if (Self->State == TShardState::Offline)
- return true;
-
- Y_VERIFY(Self->State == TShardState::PreOffline, "Unexpected state %s tabletId %" PRIu64,
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_GO_OFFLINE; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ Y_UNUSED(ctx);
+
+ if (Self->State == TShardState::Offline)
+ return true;
+
+ Y_VERIFY(Self->State == TShardState::PreOffline, "Unexpected state %s tabletId %" PRIu64,
DatashardStateName(Self->State).data(), Self->TabletID());
- Y_VERIFY(!Self->HasSharedBlobs(), "Cannot go offline while there are shared blobs at tablet %" PRIu64, Self->TabletID());
+ Y_VERIFY(!Self->HasSharedBlobs(), "Cannot go offline while there are shared blobs at tablet %" PRIu64, Self->TabletID());
Y_VERIFY(!Self->TransQueue.TxInFly(), "Cannot go offline while there is a Tx in flight at tablet %" PRIu64, Self->TabletID());
- Y_VERIFY(Self->OutReadSets.Empty(), "Cannot go offline while there is a non-Ack-ed readset at tablet %" PRIu64, Self->TabletID());
- Y_VERIFY(Self->TransQueue.GetSchemaOperations().empty(), "Cannot go offline while there is a schema Tx in flight at tablet %" PRIu64, Self->TabletID());
-
- LOG_INFO_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " Initiating switch from "
- << DatashardStateName(Self->State) << " to Offline state");
-
- Self->PurgeTxTables(txc);
-
- NIceDb::TNiceDb db(txc.DB);
-
- Self->State = TShardState::Offline;
+ Y_VERIFY(Self->OutReadSets.Empty(), "Cannot go offline while there is a non-Ack-ed readset at tablet %" PRIu64, Self->TabletID());
+ Y_VERIFY(Self->TransQueue.GetSchemaOperations().empty(), "Cannot go offline while there is a schema Tx in flight at tablet %" PRIu64, Self->TabletID());
+
+ LOG_INFO_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " Initiating switch from "
+ << DatashardStateName(Self->State) << " to Offline state");
+
+ Self->PurgeTxTables(txc);
+
+ NIceDb::TNiceDb db(txc.DB);
+
+ Self->State = TShardState::Offline;
Self->PersistSys(db, TDataShard::Schema::Sys_State, Self->State);
-
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- Self->ReportState(ctx, Self->State);
- }
-};
-
+
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ Self->ReportState(ctx, Self->State);
+ }
+};
+
void TDataShard::CheckInitiateBorrowedPartsReturn(const TActorContext &ctx) {
- if (!Executor()->GetStats().CompactedPartLoans->empty()) {
- Execute(CreateTxInitiateBorrowedPartsReturn(), ctx);
- }
+ if (!Executor()->GetStats().CompactedPartLoans->empty()) {
+ Execute(CreateTxInitiateBorrowedPartsReturn(), ctx);
+ }
}
-
+
void TDataShard::CheckStateChange(const TActorContext& ctx) {
- if (State == TShardState::PreOffline) {
- auto fnListTxIds = [](const auto& txMap) {
- TStringStream str;
- str << "[";
- for (const auto& it : txMap) {
- str << " " << it.first;
- }
- str << " ]";
- return str.Str();
- };
-
+ if (State == TShardState::PreOffline) {
+ auto fnListTxIds = [](const auto& txMap) {
+ TStringStream str;
+ str << "[";
+ for (const auto& it : txMap) {
+ str << " " << it.first;
+ }
+ str << " ]";
+ return str.Str();
+ };
+
LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, TabletID() << " in PreOffline state"
<< " HasSharedBobs: " << HasSharedBlobs()
- << " SchemaOperations: " << fnListTxIds(TransQueue.GetSchemaOperations())
+ << " SchemaOperations: " << fnListTxIds(TransQueue.GetSchemaOperations())
<< " OutReadSets count: " << OutReadSets.CountReadSets()
<< " ChangesQueue size: " << ChangesQueue.size()
<< " ChangeExchangeSplit: " << ChangeExchangeSplitter.Done()
<< " siblings to be activated: " << ChangeSenderActivator.Dump()
<< " wait to activation from: " << JoinSeq(", ", ReceiveActivationsFrom));
-
+
const bool hasSharedBlobs = HasSharedBlobs();
const bool hasSchemaOps = !TransQueue.GetSchemaOperations().empty();
const bool hasOutRs = !OutReadSets.Empty();
@@ -224,10 +224,10 @@ void TDataShard::CheckStateChange(const TActorContext& ctx) {
const bool mustActivateOthers = !ChangeSenderActivator.AllAcked();
if (!hasSharedBlobs && !hasSchemaOps && !hasOutRs && !hasChangeRecords && !mustActivateOthers) {
- Y_VERIFY(!TxInFly());
- Execute(new TTxGoOffline(this), ctx);
- }
- }
-}
-
-}}
+ Y_VERIFY(!TxInFly());
+ Execute(new TTxGoOffline(this), ctx);
+ }
+ }
+}
+
+}}
diff --git a/ydb/core/tx/datashard/datashard_outreadset.cpp b/ydb/core/tx/datashard/datashard_outreadset.cpp
index 8d60000de0e..36ded0e8972 100644
--- a/ydb/core/tx/datashard/datashard_outreadset.cpp
+++ b/ydb/core/tx/datashard/datashard_outreadset.cpp
@@ -8,43 +8,43 @@ namespace NDataShard {
void TOutReadSets::UpdateMonCounter() const {
Self->SetCounter(COUNTER_OUT_READSETS_IN_FLIGHT, CurrentReadSets.size());
-}
-
+}
+
bool TOutReadSets::LoadReadSets(NIceDb::TNiceDb& db) {
using Schema = TDataShard::Schema;
CurrentReadSets.clear(); // For idempotency
- CurrentReadSetInfos.clear();
+ CurrentReadSetInfos.clear();
// TODO[serxa]: this should be Range but it is not working right now
- auto rowset = db.Table<Schema::OutReadSets>().GreaterOrEqual(0).Select<
- Schema::OutReadSets::Seqno,
- Schema::OutReadSets::TxId,
- Schema::OutReadSets::Origin,
- Schema::OutReadSets::From,
- Schema::OutReadSets::To>();
+ auto rowset = db.Table<Schema::OutReadSets>().GreaterOrEqual(0).Select<
+ Schema::OutReadSets::Seqno,
+ Schema::OutReadSets::TxId,
+ Schema::OutReadSets::Origin,
+ Schema::OutReadSets::From,
+ Schema::OutReadSets::To>();
if (!rowset.IsReady())
return false;
while (!rowset.EndOfSet()) {
- ui64 seqNo = rowset.GetValue<Schema::OutReadSets::Seqno>();
- ui64 txId = rowset.GetValue<Schema::OutReadSets::TxId>();
- ui64 origin = rowset.GetValue<Schema::OutReadSets::Origin>();
- ui64 source = rowset.GetValue<Schema::OutReadSets::From>();
- ui64 target = rowset.GetValue<Schema::OutReadSets::To>();
-
+ ui64 seqNo = rowset.GetValue<Schema::OutReadSets::Seqno>();
+ ui64 txId = rowset.GetValue<Schema::OutReadSets::TxId>();
+ ui64 origin = rowset.GetValue<Schema::OutReadSets::Origin>();
+ ui64 source = rowset.GetValue<Schema::OutReadSets::From>();
+ ui64 target = rowset.GetValue<Schema::OutReadSets::To>();
+
TReadSetKey rsInfo(txId, origin, source, target);
-
+
Y_VERIFY(!CurrentReadSets.contains(seqNo));
Y_VERIFY(!CurrentReadSetInfos.contains(rsInfo));
-
- CurrentReadSets[seqNo] = rsInfo;
- CurrentReadSetInfos[rsInfo] = seqNo;
-
+
+ CurrentReadSets[seqNo] = rsInfo;
+ CurrentReadSetInfos[rsInfo] = seqNo;
+
if (!rowset.Next())
return false;
}
- UpdateMonCounter();
+ UpdateMonCounter();
return true;
}
@@ -53,12 +53,12 @@ void TOutReadSets::SaveReadSet(NIceDb::TNiceDb& db, ui64 seqNo, ui64 step, const
Y_VERIFY(!CurrentReadSets.contains(seqNo));
Y_VERIFY(!CurrentReadSetInfos.contains(rsInfo));
-
- CurrentReadSetInfos[rsInfo] = seqNo;
- CurrentReadSets[seqNo] = rsInfo;
-
- UpdateMonCounter();
-
+
+ CurrentReadSetInfos[rsInfo] = seqNo;
+ CurrentReadSets[seqNo] = rsInfo;
+
+ UpdateMonCounter();
+
db.Table<Schema::OutReadSets>().Key(seqNo).Update(
NIceDb::TUpdate<Schema::OutReadSets::Step>(step),
NIceDb::TUpdate<Schema::OutReadSets::TxId>(rsInfo.TxId),
@@ -71,45 +71,45 @@ void TOutReadSets::SaveReadSet(NIceDb::TNiceDb& db, ui64 seqNo, ui64 step, const
void TOutReadSets::AckForDeletedDestination(ui64 tabletId, ui64 seqNo, const TActorContext &ctx) {
const TReadSetKey* rsInfo = CurrentReadSets.FindPtr(seqNo);
- if (!rsInfo) {
- LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD,
- "Unknown seqNo %" PRIu64 " for readset to tablet %" PRIu64 " at tablet %" PRIu64,
- seqNo, tabletId, Self->TabletID());
- return;
- }
-
- TAutoPtr<TEvTxProcessing::TEvReadSetAck> ev = new TEvTxProcessing::TEvReadSetAck;
-
- ev->Record.SetSeqno(seqNo);
- ev->Record.SetTabletSource(rsInfo->From);
- ev->Record.SetTabletDest(rsInfo->To);
- ev->Record.SetTabletConsumer(rsInfo->Origin);
- ev->Record.SetTxId(rsInfo->TxId);
-
- SaveAck(ctx, ev);
-}
-
+ if (!rsInfo) {
+ LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD,
+ "Unknown seqNo %" PRIu64 " for readset to tablet %" PRIu64 " at tablet %" PRIu64,
+ seqNo, tabletId, Self->TabletID());
+ return;
+ }
+
+ TAutoPtr<TEvTxProcessing::TEvReadSetAck> ev = new TEvTxProcessing::TEvReadSetAck;
+
+ ev->Record.SetSeqno(seqNo);
+ ev->Record.SetTabletSource(rsInfo->From);
+ ev->Record.SetTabletDest(rsInfo->To);
+ ev->Record.SetTabletConsumer(rsInfo->Origin);
+ ev->Record.SetTxId(rsInfo->TxId);
+
+ SaveAck(ctx, ev);
+}
+
void TOutReadSets::SaveAck(const TActorContext &ctx, TAutoPtr<TEvTxProcessing::TEvReadSetAck> ev) {
- ui64 seqno = ev->Record.GetSeqno();
- ui64 sender = ev->Record.GetTabletSource();
- ui64 dest = ev->Record.GetTabletDest();
- ui64 consumer = ev->Record.GetTabletConsumer();
- ui64 txId = ev->Record.GetTxId();
-
+ ui64 seqno = ev->Record.GetSeqno();
+ ui64 sender = ev->Record.GetTabletSource();
+ ui64 dest = ev->Record.GetTabletDest();
+ ui64 consumer = ev->Record.GetTabletConsumer();
+ ui64 txId = ev->Record.GetTxId();
+
LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD,
"Receive RS Ack at %" PRIu64 " source %" PRIu64 " dest %" PRIu64 " consumer %" PRIu64 " txId %" PRIu64,
Self->TabletID(), sender, dest, consumer, txId);
- ReadSetAcks.emplace_back(ev.Release());
+ ReadSetAcks.emplace_back(ev.Release());
AckedSeqno.insert(seqno);
-
+
if (CurrentReadSets.contains(seqno)) {
TReadSetKey rsInfo(txId, Self->TabletID(), sender, dest);
- Y_VERIFY(CurrentReadSetInfos[rsInfo] == seqno);
-
- CurrentReadSets.erase(seqno);
- CurrentReadSetInfos.erase(rsInfo);
- }
+ Y_VERIFY(CurrentReadSetInfos[rsInfo] == seqno);
+
+ CurrentReadSets.erase(seqno);
+ CurrentReadSetInfos.erase(rsInfo);
+ }
}
void TOutReadSets::Cleanup(NIceDb::TNiceDb& db, const TActorContext& ctx) {
@@ -133,8 +133,8 @@ void TOutReadSets::Cleanup(NIceDb::TNiceDb& db, const TActorContext& ctx) {
}
ReadSetAcks.clear();
AckedSeqno.clear();
-
- UpdateMonCounter();
+
+ UpdateMonCounter();
}
void TOutReadSets::ResendAll(const TActorContext& ctx) {
diff --git a/ydb/core/tx/datashard/datashard_outreadset.h b/ydb/core/tx/datashard/datashard_outreadset.h
index b4854f4227e..901fd4d827a 100644
--- a/ydb/core/tx/datashard/datashard_outreadset.h
+++ b/ydb/core/tx/datashard/datashard_outreadset.h
@@ -55,8 +55,8 @@ public:
bool LoadReadSets(NIceDb::TNiceDb& db);
void SaveReadSet(NIceDb::TNiceDb& db, ui64 seqNo, ui64 step, const TReadSetKey& rsKey, TString body);
- void SaveAck(const TActorContext& ctx, TAutoPtr<TEvTxProcessing::TEvReadSetAck> ev);
- void AckForDeletedDestination(ui64 tabletId, ui64 seqNo, const TActorContext &ctx);
+ void SaveAck(const TActorContext& ctx, TAutoPtr<TEvTxProcessing::TEvReadSetAck> ev);
+ void AckForDeletedDestination(ui64 tabletId, ui64 seqNo, const TActorContext &ctx);
bool ResendRS(NTabletFlatExecutor::TTransactionContext& txc, const TActorContext& ctx, ui64 seqNo);
void ResendAll(const TActorContext& ctx);
void Cleanup(NIceDb::TNiceDb& db, const TActorContext& ctx);
@@ -69,9 +69,9 @@ public:
ui64 CountAcks() const { return ReadSetAcks.size(); }
private:
- void UpdateMonCounter() const;
-
-private:
+ void UpdateMonCounter() const;
+
+private:
TDataShard * Self;
THashMap<ui64, TReadSetKey> CurrentReadSets; // SeqNo -> Info
THashMap<TReadSetKey, ui64> CurrentReadSetInfos; // Info -> SeqNo
diff --git a/ydb/core/tx/datashard/datashard_pipeline.cpp b/ydb/core/tx/datashard/datashard_pipeline.cpp
index fa4b0369189..3498162c17e 100644
--- a/ydb/core/tx/datashard/datashard_pipeline.cpp
+++ b/ydb/core/tx/datashard/datashard_pipeline.cpp
@@ -44,7 +44,7 @@ TPipeline::~TPipeline()
bool TPipeline::Load(NIceDb::TNiceDb& db) {
using Schema = TDataShard::Schema;
- Y_VERIFY(!SchemaTx);
+ Y_VERIFY(!SchemaTx);
LOAD_SYS_UI64(db, Schema::Sys_LastPlannedStep, LastPlannedTx.Step);
LOAD_SYS_UI64(db, Schema::Sys_LastPlannedTx, LastPlannedTx.TxId);
LOAD_SYS_UI64(db, Schema::Sys_LastCompleteStep, LastCompleteTx.Step);
@@ -133,8 +133,8 @@ TDuration TPipeline::CleanupTimeout() const {
ECleanupStatus TPipeline::Cleanup(NIceDb::TNiceDb& db, const TActorContext& ctx) {
bool foundExpired = false;
TOperation::TPtr op;
- ui64 step = 0;
- ui64 txId = 0;
+ ui64 step = 0;
+ ui64 txId = 0;
while (!op) {
Self->TransQueue.GetPlannedTxId(step, txId);
@@ -157,10 +157,10 @@ ECleanupStatus TPipeline::Cleanup(NIceDb::TNiceDb& db, const TActorContext& ctx)
db.NoMoreReadsForTx();
foundExpired = true;
- // Local DB Tx doesn't see it's own updates, so if we erase a row we must move to the next key
- ++txId;
- if (txId == 0)
- ++step;
+ // Local DB Tx doesn't see it's own updates, so if we erase a row we must move to the next key
+ ++txId;
+ if (txId == 0)
+ ++step;
}
}
diff --git a/ydb/core/tx/datashard/datashard_pipeline.h b/ydb/core/tx/datashard/datashard_pipeline.h
index 80a0a987da5..67c35260b28 100644
--- a/ydb/core/tx/datashard/datashard_pipeline.h
+++ b/ydb/core/tx/datashard/datashard_pipeline.h
@@ -156,7 +156,7 @@ public:
bool HasDrop() const { return SchemaTx && SchemaTx->IsDrop(); }
bool HasBackup() const { return SchemaTx && SchemaTx->IsBackup(); }
bool HasRestore() const { return SchemaTx && SchemaTx->IsRestore(); }
- bool HasCopy() const { return SchemaTx && SchemaTx->IsCopy(); }
+ bool HasCopy() const { return SchemaTx && SchemaTx->IsCopy(); }
bool HasCreatePersistentSnapshot() const { return SchemaTx && SchemaTx->IsCreatePersistentSnapshot(); }
bool HasDropPersistentSnapshot() const { return SchemaTx && SchemaTx->IsDropPersistentSnapshot(); }
bool HasInitiateBuilIndex() const { return SchemaTx && SchemaTx->IsInitiateBuildIndex(); }
diff --git a/ydb/core/tx/datashard/datashard_split_dst.cpp b/ydb/core/tx/datashard/datashard_split_dst.cpp
index 089de291a39..65750810013 100644
--- a/ydb/core/tx/datashard/datashard_split_dst.cpp
+++ b/ydb/core/tx/datashard/datashard_split_dst.cpp
@@ -1,33 +1,33 @@
-#include "datashard_impl.h"
-
+#include "datashard_impl.h"
+
#include <ydb/core/tablet_flat/tablet_flat_executor.h>
-
+
#include <util/string/escape.h>
-namespace NKikimr {
+namespace NKikimr {
namespace NDataShard {
-
-
+
+
class TDataShard::TTxInitSplitMergeDestination : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvInitSplitMergeDestination::TPtr Ev;
-
-public:
+private:
+ TEvDataShard::TEvInitSplitMergeDestination::TPtr Ev;
+
+public:
TTxInitSplitMergeDestination(TDataShard* ds, TEvDataShard::TEvInitSplitMergeDestination::TPtr ev)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- , Ev(ev)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_INIT_SPLIT_MERGE_DESTINATION; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- Y_UNUSED(ctx);
-
- if (Self->State != TShardState::WaitScheme) {
- // TODO: check if this is really a repeated messages and not a buggy one
- return true;
- }
-
+ , Ev(ev)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_INIT_SPLIT_MERGE_DESTINATION; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ Y_UNUSED(ctx);
+
+ if (Self->State != TShardState::WaitScheme) {
+ // TODO: check if this is really a repeated messages and not a buggy one
+ return true;
+ }
+
NIceDb::TNiceDb db(txc.DB);
const bool initializeSchema = Ev->Get()->Record.HasCreateTable();
@@ -54,29 +54,29 @@ public:
}
Self->DstSplitDescription = std::make_shared<NKikimrTxDataShard::TSplitMergeDescription>(Ev->Get()->Record.GetSplitDescription());
-
- for (ui32 i = 0; i < Self->DstSplitDescription->SourceRangesSize(); ++i) {
- ui64 srcTabletId = Self->DstSplitDescription->GetSourceRanges(i).GetTabletID();
- Self->ReceiveSnapshotsFrom.insert(srcTabletId);
- }
-
- // Persist split description
+
+ for (ui32 i = 0; i < Self->DstSplitDescription->SourceRangesSize(); ++i) {
+ ui64 srcTabletId = Self->DstSplitDescription->GetSourceRanges(i).GetTabletID();
+ Self->ReceiveSnapshotsFrom.insert(srcTabletId);
+ }
+
+ // Persist split description
TString splitDescr;
- bool serilaizeOk = Self->DstSplitDescription->SerializeToString(&splitDescr);
- Y_VERIFY(serilaizeOk, "Failed to serialize split/merge description");
+ bool serilaizeOk = Self->DstSplitDescription->SerializeToString(&splitDescr);
+ Y_VERIFY(serilaizeOk, "Failed to serialize split/merge description");
Self->PersistSys(db, Schema::Sys_DstSplitDescription, splitDescr);
-
+
if (initializeSchema) {
Self->DstSplitSchemaInitialized = true;
Self->PersistSys(db, Schema::Sys_DstSplitSchemaInitialized, ui64(1));
}
- Self->State = TShardState::SplitDstReceivingSnapshot;
- Self->PersistSys(db, Schema::Sys_State, Self->State);
-
+ Self->State = TShardState::SplitDstReceivingSnapshot;
+ Self->PersistSys(db, Schema::Sys_State, Self->State);
+
Self->CurrentSchemeShardId = Ev->Get()->Record.GetSchemeshardTabletId();
Self->PersistSys(db, Schema::Sys_CurrentSchemeShardId, Self->CurrentSchemeShardId);
-
+
if (!Self->ProcessingParams && Ev->Get()->Record.HasProcessingParams()) {
Self->ProcessingParams.reset(new NKikimrSubDomains::TProcessingParams());
Self->ProcessingParams->CopyFrom(Ev->Get()->Record.GetProcessingParams());
@@ -91,66 +91,66 @@ public:
Self->StartFindSubDomainPathId();
}
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- // Send Ack
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ // Send Ack
TActorId ackTo = Ev->Sender;
- ui64 opId = Ev->Get()->Record.GetOperationCookie();
-
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack init split/merge destination OpId " << opId);
-
- ctx.Send(ackTo, new TEvDataShard::TEvInitSplitMergeDestinationAck(opId, Self->TabletID()));
+ ui64 opId = Ev->Get()->Record.GetOperationCookie();
+
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack init split/merge destination OpId " << opId);
+
+ ctx.Send(ackTo, new TEvDataShard::TEvInitSplitMergeDestinationAck(opId, Self->TabletID()));
Self->SendRegistrationRequestTimeCast(ctx);
- }
-};
-
-
+ }
+};
+
+
class TDataShard::TTxSplitTransferSnapshot : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvSplitTransferSnapshot::TPtr Ev;
- bool LastSnapshotReceived;
-
-public:
+private:
+ TEvDataShard::TEvSplitTransferSnapshot::TPtr Ev;
+ bool LastSnapshotReceived;
+
+public:
TTxSplitTransferSnapshot(TDataShard* ds, TEvDataShard::TEvSplitTransferSnapshot::TPtr& ev)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- , Ev(ev)
- , LastSnapshotReceived(false)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_SPLIT_TRANSFER_SNAPSHOT; }
-
+ , Ev(ev)
+ , LastSnapshotReceived(false)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_SPLIT_TRANSFER_SNAPSHOT; }
+
/**
* Initialize schema based on the first received snapshot
*
* Legacy code path for splits initiated by old-style schemeshard
*/
void LegacyInitSchema(TTransactionContext& txc) {
- const auto& tableScheme = Ev->Get()->Record.GetUserTableScheme();
+ const auto& tableScheme = Ev->Get()->Record.GetUserTableScheme();
TString tableName = TDataShard::Schema::UserTablePrefix + tableScheme.GetName();
if (!txc.DB.GetScheme().TableNames.contains(tableName)) { // TODO: properly check if table has already been created
NKikimrSchemeOp::TTableDescription newTableScheme(tableScheme);
-
- // Get this shard's range boundaries from the split/merge description
+
+ // Get this shard's range boundaries from the split/merge description
TString rangeBegin, rangeEnd;
- for (ui32 di = 0; di < Self->DstSplitDescription->DestinationRangesSize(); ++di) {
- const auto& dstRange = Self->DstSplitDescription->GetDestinationRanges(di);
- if (dstRange.GetTabletID() != Self->TabletID())
- continue;
- rangeBegin = dstRange.GetKeyRangeBegin();
- rangeEnd = dstRange.GetKeyRangeEnd();
- }
-
- newTableScheme.SetPartitionRangeBegin(rangeBegin);
- newTableScheme.SetPartitionRangeEnd(rangeEnd);
- newTableScheme.SetPartitionRangeBeginIsInclusive(true);
- newTableScheme.SetPartitionRangeEndIsInclusive(false);
-
+ for (ui32 di = 0; di < Self->DstSplitDescription->DestinationRangesSize(); ++di) {
+ const auto& dstRange = Self->DstSplitDescription->GetDestinationRanges(di);
+ if (dstRange.GetTabletID() != Self->TabletID())
+ continue;
+ rangeBegin = dstRange.GetKeyRangeBegin();
+ rangeEnd = dstRange.GetKeyRangeEnd();
+ }
+
+ newTableScheme.SetPartitionRangeBegin(rangeBegin);
+ newTableScheme.SetPartitionRangeEnd(rangeEnd);
+ newTableScheme.SetPartitionRangeBeginIsInclusive(true);
+ newTableScheme.SetPartitionRangeEndIsInclusive(false);
+
Self->CreateUserTable(txc, newTableScheme);
- }
+ }
}
-
+
bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
const auto& record = Ev->Get()->Record;
@@ -170,15 +170,15 @@ public:
LegacyInitSchema(txc);
}
- for (ui32 i = 0 ; i < Ev->Get()->Record.TableSnapshotSize(); ++i) {
- ui32 localTableId = Ev->Get()->Record.GetTableSnapshot(i).GetTableId();
- TString compressedBody = Ev->Get()->Record.GetTableSnapshot(i).GetSnapshotData();
- TString snapBody = NBlockCodecs::Codec("lz4fast")->Decode(compressedBody);
+ for (ui32 i = 0 ; i < Ev->Get()->Record.TableSnapshotSize(); ++i) {
+ ui32 localTableId = Ev->Get()->Record.GetTableSnapshot(i).GetTableId();
+ TString compressedBody = Ev->Get()->Record.GetTableSnapshot(i).GetSnapshotData();
+ TString snapBody = NBlockCodecs::Codec("lz4fast")->Decode(compressedBody);
txc.Env.LoanTable(localTableId, snapBody);
- }
-
- NIceDb::TNiceDb db(txc.DB);
-
+ }
+
+ NIceDb::TNiceDb db(txc.DB);
+
// Choose the highest write version, so we won't overwrite any important data
TRowVersion minWriteVersion(record.GetMinWriteVersionStep(), record.GetMinWriteVersionTxId());
@@ -230,18 +230,18 @@ public:
}
}
- // Persist the fact that the snapshot has been received, so that duplicate event can be ignored
- db.Table<Schema::SplitDstReceivedSnapshots>().Key(srcTabletId).Update();
- Self->ReceiveSnapshotsFrom.erase(srcTabletId);
-
+ // Persist the fact that the snapshot has been received, so that duplicate event can be ignored
+ db.Table<Schema::SplitDstReceivedSnapshots>().Key(srcTabletId).Update();
+ Self->ReceiveSnapshotsFrom.erase(srcTabletId);
+
if (record.GetWaitForActivation()) {
Self->ReceiveActivationsFrom.insert(srcTabletId);
db.Table<Schema::DstChangeSenderActivations>().Key(srcTabletId).Update();
}
- if (Self->ReceiveSnapshotsFrom.empty()) {
- LastSnapshotReceived = true;
-
+ if (Self->ReceiveSnapshotsFrom.empty()) {
+ LastSnapshotReceived = true;
+
const auto minVersion = mvcc ? Self->GetSnapshotManager().GetLowWatermark()
: Self->GetSnapshotManager().GetMinWriteVersion();
@@ -271,32 +271,32 @@ public:
kv.second.OptimizeSplitKeys(rdb);
}
- Self->State = TShardState::Ready;
- Self->PersistSys(db, Schema::Sys_State, Self->State);
- }
-
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
+ Self->State = TShardState::Ready;
+ Self->PersistSys(db, Schema::Sys_State, Self->State);
+ }
+
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
TActorId ackTo = Ev->Sender;
- ui64 opId = Ev->Get()->Record.GetOperationCookie();
-
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack snapshot OpId " << opId);
-
- ctx.Send(ackTo, new TEvDataShard::TEvSplitTransferSnapshotAck(opId, Self->TabletID()));
-
- if (LastSnapshotReceived) {
- // We have received all the data, reload everything from the received system tables
- Self->Execute(Self->CreateTxInit(), ctx);
- }
- }
-};
-
+ ui64 opId = Ev->Get()->Record.GetOperationCookie();
+
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack snapshot OpId " << opId);
+
+ ctx.Send(ackTo, new TEvDataShard::TEvSplitTransferSnapshotAck(opId, Self->TabletID()));
+
+ if (LastSnapshotReceived) {
+ // We have received all the data, reload everything from the received system tables
+ Self->Execute(Self->CreateTxInit(), ctx);
+ }
+ }
+};
+
class TDataShard::TTxSplitReplicationSourceOffsets : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
private:
TEvPrivate::TEvReplicationSourceOffsets::TPtr Ev;
-
+
public:
TTxSplitReplicationSourceOffsets(TDataShard* ds, TEvPrivate::TEvReplicationSourceOffsets::TPtr& ev)
: TTransactionBase(ds)
@@ -508,9 +508,9 @@ public:
};
void TDataShard::Handle(TEvDataShard::TEvInitSplitMergeDestination::TPtr& ev, const TActorContext& ctx) {
- Execute(new TTxInitSplitMergeDestination(this, ev), ctx);
-}
-
+ Execute(new TTxInitSplitMergeDestination(this, ev), ctx);
+}
+
void TDataShard::Handle(TEvDataShard::TEvSplitTransferSnapshot::TPtr& ev, const TActorContext& ctx) {
const auto* msg = ev->Get();
const ui64 srcTabletId = msg->Record.GetSrcTabletId();
@@ -544,12 +544,12 @@ void TDataShard::Handle(TEvDataShard::TEvSplitTransferSnapshot::TPtr& ev, const
}
}
- Execute(new TTxSplitTransferSnapshot(this, ev), ctx);
-}
-
+ Execute(new TTxSplitTransferSnapshot(this, ev), ctx);
+}
+
void TDataShard::Handle(TEvPrivate::TEvReplicationSourceOffsets::TPtr& ev, const TActorContext& ctx) {
Actors.erase(ev->Sender);
Execute(new TTxSplitReplicationSourceOffsets(this, ev), ctx);
}
-}}
+}}
diff --git a/ydb/core/tx/datashard/datashard_split_src.cpp b/ydb/core/tx/datashard/datashard_split_src.cpp
index a55e1aa9448..a7349fc9cf3 100644
--- a/ydb/core/tx/datashard/datashard_split_src.cpp
+++ b/ydb/core/tx/datashard/datashard_split_src.cpp
@@ -1,48 +1,48 @@
-#include "datashard_impl.h"
-
+#include "datashard_impl.h"
+
#include <ydb/core/tablet_flat/tablet_flat_executor.h>
#include <ydb/core/util/pb.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NDataShard {
-
-
+
+
class TDataShard::TTxSplit : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvSplit::TPtr Ev;
- bool SplitAlreadyFinished;
-
-public:
+private:
+ TEvDataShard::TEvSplit::TPtr Ev;
+ bool SplitAlreadyFinished;
+
+public:
TTxSplit(TDataShard* ds, TEvDataShard::TEvSplit::TPtr& ev)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- , Ev(ev)
- , SplitAlreadyFinished(false)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_SPLIT; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- ui64 opId = Ev->Get()->Record.GetOperationCookie();
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " received split OpId " << opId
- << " at state " << DatashardStateName(Self->State));
-
- NIceDb::TNiceDb db(txc.DB);
-
- if (Self->State == TShardState::Ready) {
+ , Ev(ev)
+ , SplitAlreadyFinished(false)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_SPLIT; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ ui64 opId = Ev->Get()->Record.GetOperationCookie();
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " received split OpId " << opId
+ << " at state " << DatashardStateName(Self->State));
+
+ NIceDb::TNiceDb db(txc.DB);
+
+ if (Self->State == TShardState::Ready) {
Self->SrcAckSplitTo.insert(Ev->Sender);
- Self->SrcSplitOpId = opId;
+ Self->SrcSplitOpId = opId;
Self->SrcSplitDescription = std::make_shared<NKikimrTxDataShard::TSplitMergeDescription>(Ev->Get()->Record.GetSplitDescription());
-
- // Persist split description
+
+ // Persist split description
TString splitDescr;
- bool serilaizeOk = Self->SrcSplitDescription->SerializeToString(&splitDescr);
- Y_VERIFY(serilaizeOk, "Failed to serialize split/merge description");
- db.Table<Schema::Sys>().Key(Schema::Sys_SrcSplitDescription).Update(NIceDb::TUpdate<Schema::Sys::Bytes>(splitDescr));
-
- Self->PersistSys(db, Schema::Sys_SrcSplitOpId, Self->SrcSplitOpId);
-
- Self->State = TShardState::SplitSrcWaitForNoTxInFlight;
- Self->PersistSys(db, Schema::Sys_State, Self->State);
+ bool serilaizeOk = Self->SrcSplitDescription->SerializeToString(&splitDescr);
+ Y_VERIFY(serilaizeOk, "Failed to serialize split/merge description");
+ db.Table<Schema::Sys>().Key(Schema::Sys_SrcSplitDescription).Update(NIceDb::TUpdate<Schema::Sys::Bytes>(splitDescr));
+
+ Self->PersistSys(db, Schema::Sys_SrcSplitOpId, Self->SrcSplitOpId);
+
+ Self->State = TShardState::SplitSrcWaitForNoTxInFlight;
+ Self->PersistSys(db, Schema::Sys_State, Self->State);
// Wake up immediate ops, so they abort as soon as possible
for (const auto& kv : Self->Pipeline.GetImmediateOps()) {
@@ -50,48 +50,48 @@ public:
Self->Pipeline.AddCandidateOp(op);
Self->PlanQueue.Progress(ctx);
}
- } else {
- // Check that this is the same split request
- Y_VERIFY(opId == Self->SrcSplitOpId,
- "Datashard %" PRIu64 " got unexpected split request opId %" PRIu64 " while already executing split request opId %" PRIu64,
- Self->TabletID(), opId, Self->SrcSplitOpId);
-
+ } else {
+ // Check that this is the same split request
+ Y_VERIFY(opId == Self->SrcSplitOpId,
+ "Datashard %" PRIu64 " got unexpected split request opId %" PRIu64 " while already executing split request opId %" PRIu64,
+ Self->TabletID(), opId, Self->SrcSplitOpId);
+
Self->SrcAckSplitTo.insert(Ev->Sender);
-
- // Already waiting?
- if (Self->State == TShardState::SplitSrcWaitForNoTxInFlight ||
- Self->State == TShardState::SplitSrcMakeSnapshot) {
- SplitAlreadyFinished = false;
- return true;
- } else if (Self->State == TShardState::SplitSrcSendingSnapshot) {
- Y_VERIFY(!Self->SplitSrcSnapshotSender.AllAcked(), "State should have changed at the moment when last ack was recevied");
- // Do nothing because we are still waiting for acks from DSTs
- } else {
- Y_VERIFY(
- Self->State == TShardState::SplitSrcWaitForPartitioningChanged ||
- Self->State == TShardState::PreOffline ||
- Self->State == TShardState::Offline);
-
- SplitAlreadyFinished = true;
- }
- }
-
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- if (SplitAlreadyFinished) {
- // Send the Ack
+
+ // Already waiting?
+ if (Self->State == TShardState::SplitSrcWaitForNoTxInFlight ||
+ Self->State == TShardState::SplitSrcMakeSnapshot) {
+ SplitAlreadyFinished = false;
+ return true;
+ } else if (Self->State == TShardState::SplitSrcSendingSnapshot) {
+ Y_VERIFY(!Self->SplitSrcSnapshotSender.AllAcked(), "State should have changed at the moment when last ack was recevied");
+ // Do nothing because we are still waiting for acks from DSTs
+ } else {
+ Y_VERIFY(
+ Self->State == TShardState::SplitSrcWaitForPartitioningChanged ||
+ Self->State == TShardState::PreOffline ||
+ Self->State == TShardState::Offline);
+
+ SplitAlreadyFinished = true;
+ }
+ }
+
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ if (SplitAlreadyFinished) {
+ // Send the Ack
for (const TActorId& ackTo : Self->SrcAckSplitTo) {
LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack split to schemeshard " << Self->SrcSplitOpId);
ctx.Send(ackTo, new TEvDataShard::TEvSplitAck(Self->SrcSplitOpId, Self->TabletID()));
}
- } else {
- Self->CheckSplitCanStart(ctx);
- }
- }
-};
-
+ } else {
+ Self->CheckSplitCanStart(ctx);
+ }
+ }
+};
+
void TDataShard::CheckSplitCanStart(const TActorContext& ctx) {
if (State == TShardState::SplitSrcWaitForNoTxInFlight) {
ui64 txInFly = TxInFly();
@@ -101,78 +101,78 @@ void TDataShard::CheckSplitCanStart(const TActorContext& ctx) {
if (txInFly == 0 && immediateTxInFly == 0 && !Pipeline.HasWaitingSchemeOps()) {
Execute(CreateTxStartSplit(), ctx);
}
- }
-}
-
-
+ }
+}
+
+
class TDataShard::TTxStartSplit : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-public:
+public:
explicit TTxStartSplit(TDataShard* ds)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_START_SPLIT; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- if (Self->State != TShardState::SplitSrcWaitForNoTxInFlight &&
- Self->State != TShardState::SplitSrcMakeSnapshot) {
- // Already initiated
- return true;
- }
-
- if (Self->State == TShardState::SplitSrcMakeSnapshot && Self->SplitSnapshotStarted) {
- // Already making snapshot
- return true;
- }
-
- Y_VERIFY(Self->TxInFly() == 0, "Currently split operation shouldn't start while there are in-flight transactions");
-
- ui64 opId = Self->SrcSplitOpId;
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " starting snapshot for split OpId " << opId);
-
- NIceDb::TNiceDb db(txc.DB);
-
-#define VERIFY_TABLE_IS_EMPTY(table, isStrictCheck) \
- { \
- auto rowset = db.Table<Schema::table>().Range().Select(); \
- if (!rowset.IsReady()) \
- return false; \
- TStringStream str; \
- THolder<NScheme::TTypeRegistry> tr; \
- while (!rowset.EndOfSet()) { \
- if (!tr) \
- tr.Reset(new NScheme::TTypeRegistry()); \
- str << rowset.DbgPrint(*tr) << "\n"; \
- if (!rowset.Next()) \
- return false; \
- } \
- if (isStrictCheck) { \
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_START_SPLIT; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ if (Self->State != TShardState::SplitSrcWaitForNoTxInFlight &&
+ Self->State != TShardState::SplitSrcMakeSnapshot) {
+ // Already initiated
+ return true;
+ }
+
+ if (Self->State == TShardState::SplitSrcMakeSnapshot && Self->SplitSnapshotStarted) {
+ // Already making snapshot
+ return true;
+ }
+
+ Y_VERIFY(Self->TxInFly() == 0, "Currently split operation shouldn't start while there are in-flight transactions");
+
+ ui64 opId = Self->SrcSplitOpId;
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " starting snapshot for split OpId " << opId);
+
+ NIceDb::TNiceDb db(txc.DB);
+
+#define VERIFY_TABLE_IS_EMPTY(table, isStrictCheck) \
+ { \
+ auto rowset = db.Table<Schema::table>().Range().Select(); \
+ if (!rowset.IsReady()) \
+ return false; \
+ TStringStream str; \
+ THolder<NScheme::TTypeRegistry> tr; \
+ while (!rowset.EndOfSet()) { \
+ if (!tr) \
+ tr.Reset(new NScheme::TTypeRegistry()); \
+ str << rowset.DbgPrint(*tr) << "\n"; \
+ if (!rowset.Next()) \
+ return false; \
+ } \
+ if (isStrictCheck) { \
Y_VERIFY(str.empty(), #table " table is not empty when starting Split at tablet %" PRIu64 " : \n%s", Self->TabletID(), str.Str().data()); \
- } else if (!str.empty()) { \
- LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD, \
- #table " table is not empty when starting Split at tablet " << Self->TabletID() << " : " << str.Str()); \
- } \
- }
-
- VERIFY_TABLE_IS_EMPTY(TxMain, true);
- VERIFY_TABLE_IS_EMPTY(TxDetails, true);
- VERIFY_TABLE_IS_EMPTY(PlanQueue, true);
- VERIFY_TABLE_IS_EMPTY(DeadlineQueue, true);
-
- // InReadSets could contain data for already completed Tx's in case of lost Ack's and retries on readset source tablets
- // Just ignore if for now but we should only persist readsets for known TxId's
- VERIFY_TABLE_IS_EMPTY(InReadSets, false);
-
- TVector<ui32> tablesToSnapshot(Self->SysTablesToTransferAtSplit,
- Self->SysTablesToTransferAtSplit + Y_ARRAY_SIZE(Self->SysTablesToTransferAtSplit));
-
- for (const auto& ti : Self->TableInfos) {
+ } else if (!str.empty()) { \
+ LOG_ERROR_S(ctx, NKikimrServices::TX_DATASHARD, \
+ #table " table is not empty when starting Split at tablet " << Self->TabletID() << " : " << str.Str()); \
+ } \
+ }
+
+ VERIFY_TABLE_IS_EMPTY(TxMain, true);
+ VERIFY_TABLE_IS_EMPTY(TxDetails, true);
+ VERIFY_TABLE_IS_EMPTY(PlanQueue, true);
+ VERIFY_TABLE_IS_EMPTY(DeadlineQueue, true);
+
+ // InReadSets could contain data for already completed Tx's in case of lost Ack's and retries on readset source tablets
+ // Just ignore if for now but we should only persist readsets for known TxId's
+ VERIFY_TABLE_IS_EMPTY(InReadSets, false);
+
+ TVector<ui32> tablesToSnapshot(Self->SysTablesToTransferAtSplit,
+ Self->SysTablesToTransferAtSplit + Y_ARRAY_SIZE(Self->SysTablesToTransferAtSplit));
+
+ for (const auto& ti : Self->TableInfos) {
tablesToSnapshot.push_back(ti.second->LocalTid);
if (ti.second->ShadowTid) {
tablesToSnapshot.push_back(ti.second->ShadowTid);
}
- }
-
+ }
+
TIntrusivePtr<NTabletFlatExecutor::TTableSnapshotContext> snapContext;
if (Self->IsMvccEnabled()) {
snapContext = new TSplitSnapshotContext(opId, std::move(tablesToSnapshot),
@@ -184,52 +184,52 @@ public:
}
txc.Env.MakeSnapshot(snapContext);
-
- Self->SplitSnapshotStarted = true;
- Self->State = TShardState::SplitSrcMakeSnapshot;
- Self->PersistSys(db, Schema::Sys_State, Self->State);
-
- for (ui32 i = 0; i < Self->SrcSplitDescription->DestinationRangesSize(); ++i) {
- ui64 dstTablet = Self->SrcSplitDescription->GetDestinationRanges(i).GetTabletID();
- Self->SplitSrcSnapshotSender.AddDst(dstTablet);
- }
-
- return true;
- }
-
+
+ Self->SplitSnapshotStarted = true;
+ Self->State = TShardState::SplitSrcMakeSnapshot;
+ Self->PersistSys(db, Schema::Sys_State, Self->State);
+
+ for (ui32 i = 0; i < Self->SrcSplitDescription->DestinationRangesSize(); ++i) {
+ ui64 dstTablet = Self->SrcSplitDescription->GetDestinationRanges(i).GetTabletID();
+ Self->SplitSrcSnapshotSender.AddDst(dstTablet);
+ }
+
+ return true;
+ }
+
void Complete(const TActorContext &) override {
- }
-};
-
-
+ }
+};
+
+
NTabletFlatExecutor::ITransaction* TDataShard::CreateTxStartSplit() {
- return new TTxStartSplit(this);
-}
-
+ return new TTxStartSplit(this);
+}
+
class TDataShard::TTxSplitSnapshotComplete : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TIntrusivePtr<TSplitSnapshotContext> SnapContext;
+private:
+ TIntrusivePtr<TSplitSnapshotContext> SnapContext;
bool ChangeExchangeSplit;
-
-public:
+
+public:
TTxSplitSnapshotComplete(TDataShard* ds, TIntrusivePtr<TSplitSnapshotContext> snapContext)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- , SnapContext(snapContext)
+ , SnapContext(snapContext)
, ChangeExchangeSplit(false)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_SPLIT_SNASHOT_COMPLETE; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- ui64 opId = Self->SrcSplitOpId;
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " snapshot complete for split OpId " << opId);
-
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_SPLIT_SNASHOT_COMPLETE; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ ui64 opId = Self->SrcSplitOpId;
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " snapshot complete for split OpId " << opId);
+
Y_VERIFY(Self->State == TShardState::SplitSrcMakeSnapshot, "Datashard in unexpected state %s", DatashardStateName(Self->State).data());
-
+
txc.Env.ClearSnapshot(*SnapContext);
- NIceDb::TNiceDb db(txc.DB);
-
+ NIceDb::TNiceDb db(txc.DB);
+
ui64 sourceOffsetsBytes = 0;
for (const auto& kv : Self->ReplicatedTables) {
for (const auto& kvSource : kv.second.SourceById) {
@@ -237,79 +237,79 @@ public:
}
}
- bool needToReadPages = false;
- ui64 totalSnapshotSize = 0;
- // Build snapshot data of all tables for each destination shard
- for (ui32 i = 0; i < Self->SrcSplitDescription->DestinationRangesSize(); ++i) {
- const auto& dstRangeDescr = Self->SrcSplitDescription->GetDestinationRanges(i);
+ bool needToReadPages = false;
+ ui64 totalSnapshotSize = 0;
+ // Build snapshot data of all tables for each destination shard
+ for (ui32 i = 0; i < Self->SrcSplitDescription->DestinationRangesSize(); ++i) {
+ const auto& dstRangeDescr = Self->SrcSplitDescription->GetDestinationRanges(i);
const ui64 dstTablet = dstRangeDescr.GetTabletID();
-
- TAutoPtr<NKikimrTxDataShard::TEvSplitTransferSnapshot> snapshot = new NKikimrTxDataShard::TEvSplitTransferSnapshot;
- snapshot->SetSrcTabletId(Self->TabletID());
- snapshot->SetOperationCookie(opId);
-
- // Fill user table scheme
- Y_VERIFY(Self->TableInfos.size() == 1, "Support for more than 1 user table in a datashard is not implemented here");
+
+ TAutoPtr<NKikimrTxDataShard::TEvSplitTransferSnapshot> snapshot = new NKikimrTxDataShard::TEvSplitTransferSnapshot;
+ snapshot->SetSrcTabletId(Self->TabletID());
+ snapshot->SetOperationCookie(opId);
+
+ // Fill user table scheme
+ Y_VERIFY(Self->TableInfos.size() == 1, "Support for more than 1 user table in a datashard is not implemented here");
const TUserTable& tableInfo = *Self->TableInfos.begin()->second;
tableInfo.GetSchema(*snapshot->MutableUserTableScheme());
-
- for (ui32 localTableId : SnapContext->TablesToSnapshot()) {
+
+ for (ui32 localTableId : SnapContext->TablesToSnapshot()) {
TString snapBody;
- if (localTableId > Schema::MinLocalTid) {
- // Extract dst range from split/merge description to pass it to BorrowSnapshot
- TSerializedCellVec fromCells(dstRangeDescr.GetKeyRangeBegin());
- TSerializedCellVec toCells(dstRangeDescr.GetKeyRangeEnd());
+ if (localTableId > Schema::MinLocalTid) {
+ // Extract dst range from split/merge description to pass it to BorrowSnapshot
+ TSerializedCellVec fromCells(dstRangeDescr.GetKeyRangeBegin());
+ TSerializedCellVec toCells(dstRangeDescr.GetKeyRangeEnd());
- auto cellsToRawValues = [&tableInfo] (const TSerializedCellVec& cells) {
+ auto cellsToRawValues = [&tableInfo] (const TSerializedCellVec& cells) {
TVector<TRawTypeValue> rawVals;
- ui32 ki = 0;
- for (; ki < cells.GetCells().size(); ++ki) {
- rawVals.push_back(
- cells.GetCells()[ki].IsNull() ?
- TRawTypeValue() :
- TRawTypeValue(cells.GetCells()[ki].Data(), cells.GetCells()[ki].Size(), tableInfo.KeyColumnTypes[ki])
- );
- }
- // Extend with NULLs if needed
- for (; ki < tableInfo.KeyColumnTypes.size(); ++ki) {
- rawVals.push_back(TRawTypeValue());
- }
- return rawVals;
- };
-
- TVector<TRawTypeValue> from = cellsToRawValues(fromCells);
- TVector<TRawTypeValue> to;
- if (!toCells.GetCells().empty()) { // special case: empty vec means +INF
- to = cellsToRawValues(toCells);
- }
-
- // Apply dst range to user table
+ ui32 ki = 0;
+ for (; ki < cells.GetCells().size(); ++ki) {
+ rawVals.push_back(
+ cells.GetCells()[ki].IsNull() ?
+ TRawTypeValue() :
+ TRawTypeValue(cells.GetCells()[ki].Data(), cells.GetCells()[ki].Size(), tableInfo.KeyColumnTypes[ki])
+ );
+ }
+ // Extend with NULLs if needed
+ for (; ki < tableInfo.KeyColumnTypes.size(); ++ki) {
+ rawVals.push_back(TRawTypeValue());
+ }
+ return rawVals;
+ };
+
+ TVector<TRawTypeValue> from = cellsToRawValues(fromCells);
+ TVector<TRawTypeValue> to;
+ if (!toCells.GetCells().empty()) { // special case: empty vec means +INF
+ to = cellsToRawValues(toCells);
+ }
+
+ // Apply dst range to user table
snapBody = Self->Executor()->BorrowSnapshot(localTableId, *SnapContext, from, to, dstTablet);
- } else {
- // Transfer full contents of system table
+ } else {
+ // Transfer full contents of system table
snapBody = Self->Executor()->BorrowSnapshot(localTableId, *SnapContext, {}, {}, dstTablet);
- }
-
- if (snapBody.empty()) {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " BorrowSnapshot needs to load pages for table "
- << localTableId << " for split OpId " << opId);
- needToReadPages = true;
- } else {
- totalSnapshotSize += snapBody.size();
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " BorrowSnapshot: table "
- << localTableId << " snapshot size is " << snapBody.size() << " total snapshot size is "
- << totalSnapshotSize << " for split OpId " << opId);
- }
-
- if (!needToReadPages) {
- auto* tableSnapshot = snapshot->AddTableSnapshot();
- tableSnapshot->SetTableId(localTableId);
- TString compressedBody = NBlockCodecs::Codec("lz4fast")->Encode(snapBody);
- tableSnapshot->SetSnapshotData(compressedBody);
- }
- }
-
- if (!needToReadPages) {
+ }
+
+ if (snapBody.empty()) {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " BorrowSnapshot needs to load pages for table "
+ << localTableId << " for split OpId " << opId);
+ needToReadPages = true;
+ } else {
+ totalSnapshotSize += snapBody.size();
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " BorrowSnapshot: table "
+ << localTableId << " snapshot size is " << snapBody.size() << " total snapshot size is "
+ << totalSnapshotSize << " for split OpId " << opId);
+ }
+
+ if (!needToReadPages) {
+ auto* tableSnapshot = snapshot->AddTableSnapshot();
+ tableSnapshot->SetTableId(localTableId);
+ TString compressedBody = NBlockCodecs::Codec("lz4fast")->Encode(snapBody);
+ tableSnapshot->SetSnapshotData(compressedBody);
+ }
+ }
+
+ if (!needToReadPages) {
// Send version at which data is not protected by persistent snapshots
if (auto minVersion = Self->GetSnapshotManager().GetMinWriteVersion()) {
snapshot->SetMinWriteVersionStep(minVersion.Step);
@@ -355,97 +355,97 @@ public:
snapshot->SetReplicationSourceOffsetsBytes(sourceOffsetsBytes);
}
- // Persist snapshot data so that it can be sent if this datashard restarts
- TString snapshotMeta;
+ // Persist snapshot data so that it can be sent if this datashard restarts
+ TString snapshotMeta;
Y_PROTOBUF_SUPPRESS_NODISCARD snapshot->SerializeToString(&snapshotMeta);
- db.Table<Schema::SplitSrcSnapshots>()
- .Key(dstTablet)
- .Update(NIceDb::TUpdate<Schema::SplitSrcSnapshots::SnapshotMeta>(snapshotMeta));
-
- Self->SplitSrcSnapshotSender.SaveSnapshotForSending(dstTablet, snapshot);
- }
- }
-
+ db.Table<Schema::SplitSrcSnapshots>()
+ .Key(dstTablet)
+ .Update(NIceDb::TUpdate<Schema::SplitSrcSnapshots::SnapshotMeta>(snapshotMeta));
+
+ Self->SplitSrcSnapshotSender.SaveSnapshotForSending(dstTablet, snapshot);
+ }
+ }
+
ChangeExchangeSplit = !Self->ChangesQueue && !Self->ChangeExchangeSplitter.Done();
- if (needToReadPages) {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " BorrowSnapshot is restarting for split OpId " << opId);
- return false;
- } else {
+ if (needToReadPages) {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " BorrowSnapshot is restarting for split OpId " << opId);
+ return false;
+ } else {
txc.Env.DropSnapshot(SnapContext);
-
- Self->State = TShardState::SplitSrcSendingSnapshot;
- Self->PersistSys(db, Schema::Sys_State, Self->State);
-
- return true;
- }
- }
-
- void Complete(const TActorContext &ctx) override {
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " Sending snapshots from src for split OpId " << Self->SrcSplitOpId);
- Self->SplitSrcSnapshotSender.DoSend(ctx);
+
+ Self->State = TShardState::SplitSrcSendingSnapshot;
+ Self->PersistSys(db, Schema::Sys_State, Self->State);
+
+ return true;
+ }
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " Sending snapshots from src for split OpId " << Self->SrcSplitOpId);
+ Self->SplitSrcSnapshotSender.DoSend(ctx);
if (ChangeExchangeSplit) {
Self->ChangeExchangeSplitter.DoSplit(ctx);
}
- }
-};
-
-
+ }
+};
+
+
NTabletFlatExecutor::ITransaction* TDataShard::CreateTxSplitSnapshotComplete(TIntrusivePtr<TSplitSnapshotContext> snapContext) {
- return new TTxSplitSnapshotComplete(this, snapContext);
-}
-
-
+ return new TTxSplitSnapshotComplete(this, snapContext);
+}
+
+
class TDataShard::TTxSplitTransferSnapshotAck : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvSplitTransferSnapshotAck::TPtr Ev;
- bool AllDstAcksReceived;
+private:
+ TEvDataShard::TEvSplitTransferSnapshotAck::TPtr Ev;
+ bool AllDstAcksReceived;
bool Activate;
-
-public:
+
+public:
TTxSplitTransferSnapshotAck(TDataShard* ds, TEvDataShard::TEvSplitTransferSnapshotAck::TPtr& ev)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- , Ev(ev)
- , AllDstAcksReceived(false)
+ , Ev(ev)
+ , AllDstAcksReceived(false)
, Activate(false)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_SPLIT_TRANSFER_SNAPSHOT_ACK; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
- NIceDb::TNiceDb db(txc.DB);
-
- ui64 opId = Ev->Get()->Record.GetOperationCookie();
- ui64 dstTabletId = Ev->Get()->Record.GetTabletId();
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
- Self->TabletID() << " Received snapshot Ack from dst " << dstTabletId << " for split OpId " << opId);
-
- Self->SplitSrcSnapshotSender.AckSnapshot(dstTabletId, ctx);
-
- if (Self->SplitSrcSnapshotSender.AllAcked()) {
- AllDstAcksReceived = true;
- Self->State = TShardState::SplitSrcWaitForPartitioningChanged;
- Self->PersistSys(db, Schema::Sys_State, Self->State);
- }
-
- // Remove the row for acked snapshot
- db.Table<Schema::SplitSrcSnapshots>().Key(dstTabletId).Delete();
-
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_SPLIT_TRANSFER_SNAPSHOT_ACK; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ NIceDb::TNiceDb db(txc.DB);
+
+ ui64 opId = Ev->Get()->Record.GetOperationCookie();
+ ui64 dstTabletId = Ev->Get()->Record.GetTabletId();
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD,
+ Self->TabletID() << " Received snapshot Ack from dst " << dstTabletId << " for split OpId " << opId);
+
+ Self->SplitSrcSnapshotSender.AckSnapshot(dstTabletId, ctx);
+
+ if (Self->SplitSrcSnapshotSender.AllAcked()) {
+ AllDstAcksReceived = true;
+ Self->State = TShardState::SplitSrcWaitForPartitioningChanged;
+ Self->PersistSys(db, Schema::Sys_State, Self->State);
+ }
+
+ // Remove the row for acked snapshot
+ db.Table<Schema::SplitSrcSnapshots>().Key(dstTabletId).Delete();
+
if (!Self->ChangesQueue && Self->ChangeExchangeSplitter.Done()) {
Activate = !Self->ChangeSenderActivator.Acked(dstTabletId);
}
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
- if (AllDstAcksReceived) {
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
+ if (AllDstAcksReceived) {
for (const TActorId& ackTo : Self->SrcAckSplitTo) {
- ui64 opId = Self->SrcSplitOpId;
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack split to schemeshard " << opId);
- ctx.Send(ackTo, new TEvDataShard::TEvSplitAck(opId, Self->TabletID()));
- }
- }
+ ui64 opId = Self->SrcSplitOpId;
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack split to schemeshard " << opId);
+ ctx.Send(ackTo, new TEvDataShard::TEvSplitAck(opId, Self->TabletID()));
+ }
+ }
if (Activate) {
const ui64 dstTabletId = Ev->Get()->Record.GetTabletId();
@@ -453,29 +453,29 @@ public:
Self->ChangeSenderActivator.DoSend(dstTabletId, ctx);
}
}
- }
-};
-
-
+ }
+};
+
+
class TDataShard::TTxSplitPartitioningChanged : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-private:
- TEvDataShard::TEvSplitPartitioningChanged::TPtr Ev;
+private:
+ TEvDataShard::TEvSplitPartitioningChanged::TPtr Ev;
bool DelayPartitioningChangedAck = false;
-
-public:
+
+public:
TTxSplitPartitioningChanged(TDataShard* ds, TEvDataShard::TEvSplitPartitioningChanged::TPtr& ev)
: NTabletFlatExecutor::TTransactionBase<TDataShard>(ds)
- , Ev(ev)
- {}
-
- TTxType GetTxType() const override { return TXTYPE_SPLIT_PARTITIONING_CHANGED; }
-
- bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
+ , Ev(ev)
+ {}
+
+ TTxType GetTxType() const override { return TXTYPE_SPLIT_PARTITIONING_CHANGED; }
+
+ bool Execute(TTransactionContext& txc, const TActorContext& ctx) override {
ui64 opId = Ev->Get()->Record.GetOperationCookie();
- LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Got TEvSplitPartitioningChanged opId %" PRIu64 " at datashard %" PRIu64 " state %s",
+ LOG_DEBUG(ctx, NKikimrServices::TX_DATASHARD, "Got TEvSplitPartitioningChanged opId %" PRIu64 " at datashard %" PRIu64 " state %s",
opId, Self->TabletID(), DatashardStateName(Self->State).data());
-
+
if (Self->ChangesQueue || !Self->ChangeSenderActivator.AllAcked()) {
LOG_NOTICE_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " delay partitioning changed ack"
<< " ChangesQueue size: " << Self->ChangesQueue.size()
@@ -485,50 +485,50 @@ public:
Self->SrcAckPartitioningChangedTo[Ev->Sender].insert(opId);
}
- // TODO: At this point Src should start rejecting all new Tx with SchemaChanged status
- if (Self->State != TShardState::SplitSrcWaitForPartitioningChanged) {
- Y_VERIFY(Self->State == TShardState::PreOffline || Self->State == TShardState::Offline,
- "Unexpected TEvSplitPartitioningChanged opId %" PRIu64 " at datashard %" PRIu64 " state %s",
+ // TODO: At this point Src should start rejecting all new Tx with SchemaChanged status
+ if (Self->State != TShardState::SplitSrcWaitForPartitioningChanged) {
+ Y_VERIFY(Self->State == TShardState::PreOffline || Self->State == TShardState::Offline,
+ "Unexpected TEvSplitPartitioningChanged opId %" PRIu64 " at datashard %" PRIu64 " state %s",
Ev->Get()->Record.GetOperationCookie(), Self->TabletID(), DatashardStateName(Self->State).data());
-
- return true;
- }
-
- Self->DropAllUserTables(txc);
-
- NIceDb::TNiceDb db(txc.DB);
- Self->State = TShardState::PreOffline;
- Self->PersistSys(db, Schema::Sys_State, Self->State);
-
- return true;
- }
-
- void Complete(const TActorContext &ctx) override {
+
+ return true;
+ }
+
+ Self->DropAllUserTables(txc);
+
+ NIceDb::TNiceDb db(txc.DB);
+ Self->State = TShardState::PreOffline;
+ Self->PersistSys(db, Schema::Sys_State, Self->State);
+
+ return true;
+ }
+
+ void Complete(const TActorContext &ctx) override {
TActorId ackTo = Ev->Sender;
- ui64 opId = Ev->Get()->Record.GetOperationCookie();
-
+ ui64 opId = Ev->Get()->Record.GetOperationCookie();
+
if (DelayPartitioningChangedAck) {
return;
}
- LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack split partitioning changed to schemeshard " << opId);
- ctx.Send(ackTo, new TEvDataShard::TEvSplitPartitioningChangedAck(opId, Self->TabletID()));
-
- // TODO: properly check if there are no loans
- Self->CheckStateChange(ctx);
- }
-};
-
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_DATASHARD, Self->TabletID() << " ack split partitioning changed to schemeshard " << opId);
+ ctx.Send(ackTo, new TEvDataShard::TEvSplitPartitioningChangedAck(opId, Self->TabletID()));
+
+ // TODO: properly check if there are no loans
+ Self->CheckStateChange(ctx);
+ }
+};
+
void TDataShard::Handle(TEvDataShard::TEvSplit::TPtr& ev, const TActorContext& ctx) {
- Execute(new TTxSplit(this, ev), ctx);
-}
-
+ Execute(new TTxSplit(this, ev), ctx);
+}
+
void TDataShard::Handle(TEvDataShard::TEvSplitTransferSnapshotAck::TPtr& ev, const TActorContext& ctx) {
- Execute(new TTxSplitTransferSnapshotAck(this, ev), ctx);
-}
-
+ Execute(new TTxSplitTransferSnapshotAck(this, ev), ctx);
+}
+
void TDataShard::Handle(TEvDataShard::TEvSplitPartitioningChanged::TPtr& ev, const TActorContext& ctx) {
- Execute(new TTxSplitPartitioningChanged(this, ev), ctx);
-}
-
-}}
+ Execute(new TTxSplitPartitioningChanged(this, ev), ctx);
+}
+
+}}
diff --git a/ydb/core/tx/datashard/datashard_trans_queue.cpp b/ydb/core/tx/datashard/datashard_trans_queue.cpp
index 915c8a6c72d..d7096c25b32 100644
--- a/ydb/core/tx/datashard/datashard_trans_queue.cpp
+++ b/ydb/core/tx/datashard/datashard_trans_queue.cpp
@@ -21,7 +21,7 @@ void TTransQueue::AddTxInFly(TOperation::TPtr op) {
Self->SetCounter(COUNTER_TX_IN_FLY, TxsInFly.size());
}
-void TTransQueue::RemoveTxInFly(ui64 txId) {
+void TTransQueue::RemoveTxInFly(ui64 txId) {
auto it = TxsInFly.find(txId);
if (it != TxsInFly.end()) {
if (!it->second->GetStep()) {
@@ -36,16 +36,16 @@ void TTransQueue::RemoveTxInFly(ui64 txId) {
bool TTransQueue::Load(NIceDb::TNiceDb& db) {
using Schema = TDataShard::Schema;
- // Load must be idempotent
- Y_VERIFY(TxsInFly.empty());
- Y_VERIFY(SchemaOps.empty());
+ // Load must be idempotent
+ Y_VERIFY(TxsInFly.empty());
+ Y_VERIFY(SchemaOps.empty());
Y_VERIFY(PlannedTxs.empty());
Y_VERIFY(DeadlineQueue.empty());
Y_VERIFY(ProposeDelayers.empty());
Y_VERIFY(PlanWaitingTxCount == 0);
-
- TInstant now = AppData()->TimeProvider->Now();
-
+
+ TInstant now = AppData()->TimeProvider->Now();
+
THashSet<ui64> schemaTxs;
{
auto rowset = db.Table<Schema::TxMain>().Range().Select();
@@ -188,15 +188,15 @@ bool TTransQueue::Load(NIceDb::TNiceDb& db) {
void TTransQueue::ProposeSchemaTx(NIceDb::TNiceDb& db, const TSchemaOperation& op) {
using Schema = TDataShard::Schema;
- // Auto-ack previous schema operation
- if (!SchemaOps.empty()) {
- Y_VERIFY(SchemaOps.begin()->first != op.TxId, "Duplicate Tx %" PRIu64 " wasn't properly handled", op.TxId);
- Y_VERIFY(SchemaOps.size() == 1, "Cannot have multiple un-Ack-ed previous schema operations");
- Y_VERIFY(SchemaOps.begin()->second.Done,
- "Previous Tx %" PRIu64 " must be in state when it only waits for Ack", SchemaOps.begin()->first);
- RemoveSchemaOperation(db, SchemaOps.begin()->second.TxId);
- }
-
+ // Auto-ack previous schema operation
+ if (!SchemaOps.empty()) {
+ Y_VERIFY(SchemaOps.begin()->first != op.TxId, "Duplicate Tx %" PRIu64 " wasn't properly handled", op.TxId);
+ Y_VERIFY(SchemaOps.size() == 1, "Cannot have multiple un-Ack-ed previous schema operations");
+ Y_VERIFY(SchemaOps.begin()->second.Done,
+ "Previous Tx %" PRIu64 " must be in state when it only waits for Ack", SchemaOps.begin()->first);
+ RemoveSchemaOperation(db, SchemaOps.begin()->second.TxId);
+ }
+
auto saved = SchemaOps.insert(std::make_pair(op.TxId, op));
db.Table<Schema::SchemaOperations>().Key(op.TxId).Update(
NIceDb::TUpdate<Schema::SchemaOperations::TxId>(op.TxId),
@@ -423,8 +423,8 @@ ECleanupStatus TTransQueue::CleanupOutdated(NIceDb::TNiceDb& db, ui64 outdatedSt
}
for (ui64 txId : outdatedTxs) {
RemoveTxInFly(txId);
- }
-
+ }
+
Self->IncCounter(COUNTER_TX_PROGRESS_OUTDATED, outdatedTxs.size());
return ECleanupStatus::Success;
}
diff --git a/ydb/core/tx/datashard/datashard_trans_queue.h b/ydb/core/tx/datashard/datashard_trans_queue.h
index d472c699b19..1d392676da3 100644
--- a/ydb/core/tx/datashard/datashard_trans_queue.h
+++ b/ydb/core/tx/datashard/datashard_trans_queue.h
@@ -31,21 +31,21 @@ public:
: Self(self)
{}
- void Reset() {
- TxsInFly.clear();
- SchemaOps.clear();
+ void Reset() {
+ TxsInFly.clear();
+ SchemaOps.clear();
PlannedTxs.clear();
DeadlineQueue.clear();
ProposeDelayers.clear();
PlanWaitingTxCount = 0;
- }
-
+ }
+
bool Load(NIceDb::TNiceDb& db);
const THashMap<ui64, TOperation::TPtr> &GetTxsInFly() const { return TxsInFly; }
ui64 TxInFly() const { return TxsInFly.size(); }
void AddTxInFly(TOperation::TPtr op);
- void RemoveTxInFly(ui64 txId);
+ void RemoveTxInFly(ui64 txId);
TOperation::TPtr FindTxInFly(ui64 txId) const
{
auto it = TxsInFly.find(txId);
diff --git a/ydb/core/tx/datashard/datashard_txs.h b/ydb/core/tx/datashard/datashard_txs.h
index 8c7ab38f256..4a9ab008789 100644
--- a/ydb/core/tx/datashard/datashard_txs.h
+++ b/ydb/core/tx/datashard/datashard_txs.h
@@ -31,7 +31,7 @@ public:
TTxGetShardState(TDataShard* ds, TEvDataShard::TEvGetShardState::TPtr ev);
bool Execute(TTransactionContext& txc, const TActorContext& ctx) override;
void Complete(const TActorContext &ctx) override;
- TTxType GetTxType() const override { return TXTYPE_GET_STARD_STATE; }
+ TTxType GetTxType() const override { return TXTYPE_GET_STARD_STATE; }
private:
TEvDataShard::TEvGetShardState::TPtr Ev;
THolder<TEvDataShard::TEvGetShardStateResult> Result;
@@ -101,7 +101,7 @@ protected:
const TInstant ReceivedAt;
const ui64 TieBreakerIndex;
EOperationKind Kind;
- ui64 TxId;
+ ui64 TxId;
TVector<EExecutionUnitKind> CompleteList;
TInstant CommitStart;
bool Acked;
@@ -127,21 +127,21 @@ public:
TTxProgressResendRS(TDataShard *self, ui64 seqno);
bool Execute(TTransactionContext &txc, const TActorContext &ctx) override;
void Complete(const TActorContext &ctx) override;
- TTxType GetTxType() const override { return TXTYPE_PROGRESS_RESEND_RS; }
+ TTxType GetTxType() const override { return TXTYPE_PROGRESS_RESEND_RS; }
private:
const ui64 Seqno;
};
class TDataShard::TTxCancelTransactionProposal : public NTabletFlatExecutor::TTransactionBase<TDataShard> {
-public:
+public:
TTxCancelTransactionProposal(TDataShard *self, ui64 txId);
- bool Execute(TTransactionContext &txc, const TActorContext &ctx) override;
- void Complete(const TActorContext &ctx) override;
- TTxType GetTxType() const override { return TXTYPE_CANCEL_TX_PROPOSAL; }
-private:
- const ui64 TxId;
-};
-
+ bool Execute(TTransactionContext &txc, const TActorContext &ctx) override;
+ void Complete(const TActorContext &ctx) override;
+ TTxType GetTxType() const override { return TXTYPE_CANCEL_TX_PROPOSAL; }
+private:
+ const ui64 TxId;
+};
+
inline bool MaybeRequestMoreTxMemory(ui64 usage, NTabletFlatExecutor::TTransactionContext &txc) {
if (usage > txc.GetMemoryLimit()) {
ui64 request = Max(usage - txc.GetMemoryLimit(), txc.GetMemoryLimit() * MEMORY_REQUEST_FACTOR);
diff --git a/ydb/core/tx/datashard/datashard_user_table.cpp b/ydb/core/tx/datashard/datashard_user_table.cpp
index 40b982c8364..70bc9400733 100644
--- a/ydb/core/tx/datashard/datashard_user_table.cpp
+++ b/ydb/core/tx/datashard/datashard_user_table.cpp
@@ -408,7 +408,7 @@ void TUserTable::DoApplyCreate(
}
// N.B. some settings only apply to the main table
-
+
if (!shadow) {
if (partConfig.HasExecutorCacheSize()) {
alter.SetExecutorCacheSize(partConfig.GetExecutorCacheSize());
@@ -552,11 +552,11 @@ void TUserTable::ApplyAlter(
alter.SetExecutorResourceProfile(configDelta.GetResourceProfile());
}
- if (configDelta.HasExecutorFastLogPolicy()) {
- config.SetExecutorFastLogPolicy(configDelta.GetExecutorFastLogPolicy());
- alter.SetExecutorFastLogPolicy(configDelta.GetExecutorFastLogPolicy());
- }
-
+ if (configDelta.HasExecutorFastLogPolicy()) {
+ config.SetExecutorFastLogPolicy(configDelta.GetExecutorFastLogPolicy());
+ alter.SetExecutorFastLogPolicy(configDelta.GetExecutorFastLogPolicy());
+ }
+
if (configDelta.HasEnableEraseCache() || configDelta.HasEraseCacheMinRows() || configDelta.HasEraseCacheMaxBytes()) {
if (configDelta.HasEnableEraseCache()) {
config.SetEnableEraseCache(configDelta.GetEnableEraseCache());
diff --git a/ydb/core/tx/datashard/datashard_user_table.h b/ydb/core/tx/datashard/datashard_user_table.h
index f4e4b823a43..0dab30cb84f 100644
--- a/ydb/core/tx/datashard/datashard_user_table.h
+++ b/ydb/core/tx/datashard/datashard_user_table.h
@@ -104,7 +104,7 @@ struct TUserTable : public TThrRefBase {
return 1;
}
- ui32 OuterChannel() const {
+ ui32 OuterChannel() const {
if (!*Room) {
return OuterChannelByStorageEnum();
}
@@ -113,16 +113,16 @@ struct TUserTable : public TThrRefBase {
}
ui32 OuterChannelByStorageEnum() const {
- switch (Storage) {
+ switch (Storage) {
case NKikimrSchemeOp::EColumnStorage::ColumnStorage1Med2Ext2:
case NKikimrSchemeOp::EColumnStorage::ColumnStorage2Med2Ext2:
- return 2;
- default:
- break;
- }
+ return 2;
+ default:
+ break;
+ }
return MainChannelByStorageEnum();
- }
-
+ }
+
ui32 ExternalChannel() const {
if (!*Room) {
return ExternalChannelByStorageEnum();
@@ -300,36 +300,36 @@ struct TUserTable : public TThrRefBase {
}
};
- struct TStats {
- NTable::TStats DataStats;
- ui64 IndexSize = 0;
- ui64 MemRowCount = 0;
- ui64 MemDataSize = 0;
- TInstant AccessTime;
- TInstant UpdateTime;
+ struct TStats {
+ NTable::TStats DataStats;
+ ui64 IndexSize = 0;
+ ui64 MemRowCount = 0;
+ ui64 MemDataSize = 0;
+ TInstant AccessTime;
+ TInstant UpdateTime;
TInstant LastFullCompaction;
- THashSet<ui64> PartOwners;
- ui64 PartCount = 0;
+ THashSet<ui64> PartOwners;
+ ui64 PartCount = 0;
ui64 SearchHeight = 0;
- TInstant StatsUpdateTime;
- ui64 DataSizeResolution = 0;
- ui64 RowCountResolution = 0;
+ TInstant StatsUpdateTime;
+ ui64 DataSizeResolution = 0;
+ ui64 RowCountResolution = 0;
ui64 BackgroundCompactionRequests = 0;
- NTable::TKeyAccessSample AccessStats;
-
+ NTable::TKeyAccessSample AccessStats;
+
bool LastSearchHeightMetricSet = false;
std::optional<ui32> HoursSinceFullCompaction;
- void Update(NTable::TStats&& dataStats, ui64 indexSize, THashSet<ui64>&& partOwners, ui64 partCount, TInstant statsUpdateTime) {
- DataStats = dataStats;
- IndexSize = indexSize;
- PartOwners = partOwners;
- PartCount = partCount;
- StatsUpdateTime = statsUpdateTime;
- }
- };
-
+ void Update(NTable::TStats&& dataStats, ui64 indexSize, THashSet<ui64>&& partOwners, ui64 partCount, TInstant statsUpdateTime) {
+ DataStats = dataStats;
+ IndexSize = indexSize;
+ PartOwners = partOwners;
+ PartCount = partCount;
+ StatsUpdateTime = statsUpdateTime;
+ }
+ };
+
struct TSpecialUpdate {
bool HasUpdates = false;
diff --git a/ydb/core/tx/datashard/datashard_ut_common.cpp b/ydb/core/tx/datashard/datashard_ut_common.cpp
index 72e81ea750c..d754f7ad0e2 100644
--- a/ydb/core/tx/datashard/datashard_ut_common.cpp
+++ b/ydb/core/tx/datashard/datashard_ut_common.cpp
@@ -65,8 +65,8 @@ TTester::TTester(ESchema schema, const TOptions& opts)
{
Setup(Runtime, opts);
Sender = Runtime.AllocateEdgeActor();
-
- // Schemeshard is only used to receive notifications
+
+ // Schemeshard is only used to receive notifications
CreateTestBootstrapper(Runtime, CreateTestTabletInfo(FAKE_SCHEMESHARD_TABLET_ID, TTabletTypes::FLAT_SCHEMESHARD), &CreateFlatTxSchemeShard);
CreateTestBootstrapper(Runtime, CreateTestTabletInfo(FAKE_TX_ALLOCATOR_TABLET_ID, TTabletTypes::TX_ALLOCATOR), &CreateTxAllocator);
CreateSchema(schema, opts);
@@ -84,8 +84,8 @@ TTester::TTester(ESchema schema, const TString& dispatchName, std::function<void
AllowIncompleteResult = (dispatchName != INITIAL_TEST_DISPATCH_NAME);
ActiveZone = &activeZone;
DispatchName = dispatchName;
-
- // Schemeshard is only used to receive notifications
+
+ // Schemeshard is only used to receive notifications
CreateTestBootstrapper(Runtime, CreateTestTabletInfo(FAKE_SCHEMESHARD_TABLET_ID, TTabletTypes::FLAT_SCHEMESHARD), &CreateFlatTxSchemeShard);
CreateTestBootstrapper(Runtime, CreateTestTabletInfo(FAKE_TX_ALLOCATOR_TABLET_ID, TTabletTypes::TX_ALLOCATOR), &CreateTxAllocator);
CreateSchema(schema, opts);
diff --git a/ydb/core/tx/datashard/datashard_ut_locks.cpp b/ydb/core/tx/datashard/datashard_ut_locks.cpp
index 232dc4036ed..1d25c39f99d 100644
--- a/ydb/core/tx/datashard/datashard_ut_locks.cpp
+++ b/ydb/core/tx/datashard/datashard_ut_locks.cpp
@@ -687,7 +687,7 @@ void CheckLocksCacheUsage(bool waitForLocksStore) {
bool operator()(IEventHandle& ev)
{
if (ev.GetTypeRewrite() == TEvTablet::EvCommitResult) {
- if (ev.Cookie == (ui64)NKikimr::NTabletFlatExecutor::ECommit::Redo) {
+ if (ev.Cookie == (ui64)NKikimr::NTabletFlatExecutor::ECommit::Redo) {
if (!Recipient)
Recipient = ev.Recipient;
else if (Recipient != ev.Recipient)
diff --git a/ydb/core/tx/datashard/datashard_ut_minikql.cpp b/ydb/core/tx/datashard/datashard_ut_minikql.cpp
index e548b4ca0f1..7974e474861 100644
--- a/ydb/core/tx/datashard/datashard_ut_minikql.cpp
+++ b/ydb/core/tx/datashard/datashard_ut_minikql.cpp
@@ -161,7 +161,7 @@ Y_UNIT_TEST(ReadSpecialColumns) {
Y_UNIT_TEST(ReadNonExisting) {
TTester t(TTester::ESchema_KV);
TFakeMiniKQLProxy proxy(t);
-
+
{
auto programText = R"___((
(let row '('('key (Uint32 '42))))
@@ -171,16 +171,16 @@ Y_UNIT_TEST(ReadNonExisting) {
))
(return pgmReturn)
))___";
-
+
NKikimrMiniKQL::TResult res;
UNIT_ASSERT_EQUAL(proxy.Execute(programText, res), IEngineFlat::EStatus::Complete);
TValue value = TValue::Create(res.GetValue(), res.GetType());
TValue row = value["myRes"];
UNIT_ASSERT(!row.HaveValue());
- }
+ }
}
-
+
Y_UNIT_TEST(WriteEraseRead) {
TTester t(TTester::ESchema_KV);
TFakeMiniKQLProxy proxy(t);
@@ -266,10 +266,10 @@ Y_UNIT_TEST(SelectRange) {
))
(return pgmReturn)
))___";
-
+
UNIT_ASSERT_EQUAL(proxy.Execute(programText), IEngineFlat::EStatus::Complete);
}
-
+
{
auto programText = R"___((
(let range '('ExcFrom '('key (Uint32 '2) (Void))))
@@ -492,7 +492,7 @@ Y_UNIT_TEST(SelectRangeWithNotFullKey) {
(let $18 (AsList $17))
(return $18)
))___";
-
+
NKikimrMiniKQL::TResult res;
UNIT_ASSERT_EQUAL(proxy.Execute(programText, res), IEngineFlat::EStatus::Complete);
@@ -502,7 +502,7 @@ Y_UNIT_TEST(SelectRangeWithNotFullKey) {
TValue rsl = rs0["List"];
UNIT_ASSERT_EQUAL(rsl.Size(), 0);
}
-
+
{
auto programText = R"___((
(let r1 '('key1 (Uint32 '345) (Uint32 '347)))
@@ -626,7 +626,7 @@ Y_UNIT_TEST(SelectRangeWithNotFullKey) {
UNIT_ASSERT_EQUAL(TString(row1["value"]), "Tables");
UNIT_ASSERT_EQUAL(TString(row2["value"]), "Paulson");
}
-
+
// (345,inf).. (347,inf)
{
auto programText = R"___((
@@ -639,7 +639,7 @@ Y_UNIT_TEST(SelectRangeWithNotFullKey) {
))
(return pgmReturn)
))___";
-
+
NKikimrMiniKQL::TResult res;
UNIT_ASSERT_EQUAL(proxy.Execute(programText, res), IEngineFlat::EStatus::Complete);
@@ -721,21 +721,21 @@ Y_UNIT_TEST(WriteAndReadMultipleShards) {
void GetTableStats(TTestActorRuntime &runtime, ui64 tabletId, ui64 tableId, NKikimrTableStats::TTableStats& stats) {
TAutoPtr<TEvDataShard::TEvGetTableStats> ev(new TEvDataShard::TEvGetTableStats(tableId));
-
+
TActorId edge = runtime.AllocateEdgeActor();
runtime.SendToPipe(tabletId, edge, ev.Release());
TAutoPtr<IEventHandle> handle;
TEvDataShard::TEvGetTableStatsResult* response = runtime.GrabEdgeEventRethrow<TEvDataShard::TEvGetTableStatsResult>(handle);
stats.Swap(response->Record.MutableTableStats());
}
-
+
Y_UNIT_TEST(TableStats) {
TTester t(TTester::ESchema_MultiShardKV);
TFakeMiniKQLProxy proxy(t);
-
+
ui64 tableId = 13;
ui64 ds1 = TTestTxConfig::TxTablet0;
-
+
{
auto programText = R"___((
(let row1 '('('key (Uint32 '100))))
@@ -754,10 +754,10 @@ Y_UNIT_TEST(TableStats) {
))
(return pgmReturn)
))___";
-
+
UNIT_ASSERT_EQUAL(proxy.Execute(programText), IEngineFlat::EStatus::Complete);
}
-
+
NKikimrTableStats::TTableStats statsAfterUpdate;
GetTableStats(t.Runtime, ds1, tableId, statsAfterUpdate);
Cerr << statsAfterUpdate << Endl;
@@ -766,13 +766,13 @@ Y_UNIT_TEST(TableStats) {
UNIT_ASSERT_C(statsAfterUpdate.GetLastUpdateTime() > 0, "LastUpdateTime wasn't set");
UNIT_ASSERT_VALUES_EQUAL_C(statsAfterUpdate.GetLastAccessTime(), statsAfterUpdate.GetLastUpdateTime(),
"After update LastAccessTime should be equal to LastUpdateTime ");
-
+
// Non-existing table
NKikimrTableStats::TTableStats stats;
GetTableStats(t.Runtime, ds1, tableId+42, stats);
Cerr << stats << Endl;
UNIT_ASSERT_C(!stats.HasIndexSize() && !stats.HasInMemSize(), "Unknown table shouldn't have stats");
-
+
{
auto programText = R"___((
(let row1 '('('key (Uint32 '100))))
@@ -784,11 +784,11 @@ Y_UNIT_TEST(TableStats) {
(SelectRow 'table1 row1 select)
(SelectRow 'table1 row2 select)
(SelectRow 'table1 row3 select)
- ))
+ ))
))
(return pgmReturn)
))___";
-
+
NKikimrMiniKQL::TResult res;
UNIT_ASSERT_EQUAL(proxy.Execute(programText, res), IEngineFlat::EStatus::Complete);
@@ -800,8 +800,8 @@ Y_UNIT_TEST(TableStats) {
UNIT_ASSERT_EQUAL(TString(row1["value"]), "ImInShard1");
UNIT_ASSERT_EQUAL(TString(row2["value"]), "ImInShard2");
UNIT_ASSERT_EQUAL(TString(row3["value"]), "ImInShard3");
- }
-
+ }
+
NKikimrTableStats::TTableStats statsAfterRead;
GetTableStats(t.Runtime, ds1, tableId, statsAfterRead);
Cerr << statsAfterRead << Endl;
@@ -810,11 +810,11 @@ Y_UNIT_TEST(TableStats) {
UNIT_ASSERT_C(statsAfterRead.GetLastAccessTime() > statsAfterUpdate.GetLastAccessTime(),
"LastAccessTime should change after read");
}
-
+
Y_UNIT_TEST(TableStatsHistograms) {
TTester t(TTester::ESchema_MultiShardKV);
TFakeMiniKQLProxy proxy(t);
-
+
ui64 tableId = 13;
ui64 ds1 = TTestTxConfig::TxTablet0;
@@ -831,23 +831,23 @@ Y_UNIT_TEST(TableStatsHistograms) {
'('value (Utf8 'ImInShard333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333))))
(let pgmReturn (AsList
(UpdateRow 'table1 row1 myUpd1)
-# (UpdateRow 'table1 row2 myUpd2)
-# (UpdateRow 'table1 row3 myUpd3)
+# (UpdateRow 'table1 row2 myUpd2)
+# (UpdateRow 'table1 row3 myUpd3)
))
(return pgmReturn)
))___";
-
+
TString query = Sprintf(programText, i, 1+1000, i+2000);
-
+
UNIT_ASSERT_EQUAL(proxy.Execute(query), IEngineFlat::EStatus::Complete);
Cerr << ".";
- }
-
+ }
+
NKikimrTableStats::TTableStats statsAfterUpdate;
GetTableStats(t.Runtime, ds1, tableId, statsAfterUpdate);
Cerr << statsAfterUpdate << Endl;
}
-
+
//
void InitCrossShard(TFakeMiniKQLProxy& proxy) {
diff --git a/ydb/core/tx/datashard/finish_propose_unit.cpp b/ydb/core/tx/datashard/finish_propose_unit.cpp
index ec553bcd604..de0e9e4ad2e 100644
--- a/ydb/core/tx/datashard/finish_propose_unit.cpp
+++ b/ydb/core/tx/datashard/finish_propose_unit.cpp
@@ -155,8 +155,8 @@ void TFinishProposeUnit::CompleteRequest(TOperation::TPtr op,
if (op->HasNeedDiagnosticsFlag())
AddDiagnosticsResult(res);
- DataShard.FillExecutionStats(op->GetExecutionProfile(), *res);
-
+ DataShard.FillExecutionStats(op->GetExecutionProfile(), *res);
+
DataShard.IncCounter(COUNTER_TX_RESULT_SIZE, res->Record.GetTxResult().size());
if (!gSkipRepliesFailPoint.Check(DataShard.TabletID(), op->GetTxId()))
diff --git a/ydb/core/tx/datashard/ya.make b/ydb/core/tx/datashard/ya.make
index 3f759545a69..9c98e8e79b0 100644
--- a/ydb/core/tx/datashard/ya.make
+++ b/ydb/core/tx/datashard/ya.make
@@ -60,24 +60,24 @@ SRCS(
datashard__propose_tx_base.cpp
datashard__readset.cpp
datashard__read_iterator.cpp
- datashard__read_columns.cpp
- datashard__s3.cpp
+ datashard__read_columns.cpp
+ datashard__s3.cpp
datashard__s3_download_txs.cpp
datashard__s3_upload_txs.cpp
datashard__kqp_scan.cpp
datashard__snapshot_txs.cpp
- datashard__stats.cpp
+ datashard__stats.cpp
datashard__store_table_path.cpp
datashard__store_scan_state.cpp
datashard_change_receiving.cpp
datashard_change_sender_activation.cpp
datashard_change_sending.cpp
datashard_counters.cpp
- datashard_loans.cpp
+ datashard_loans.cpp
datashard_locks.h
datashard_locks.cpp
- datashard_split_dst.cpp
- datashard_split_src.cpp
+ datashard_split_dst.cpp
+ datashard_split_src.cpp
datashard_switch_mvcc_state.cpp
datashard_trans_queue.cpp
datashard_trans_queue.h
@@ -91,8 +91,8 @@ SRCS(
datashard_direct_erase.cpp
datashard_direct_upload.cpp
datashard_distributed_erase.cpp
- datashard_failpoints.cpp
- datashard_failpoints.h
+ datashard_failpoints.cpp
+ datashard_failpoints.h
datashard_dep_tracker.cpp
datashard_dep_tracker.h
datashard_pipeline.cpp
diff --git a/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp b/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp
index beef022a7cc..b3e5967eaf9 100644
--- a/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp
+++ b/ydb/core/tx/long_tx_service/acquire_snapshot_impl.cpp
@@ -38,7 +38,7 @@ namespace NLongTxService {
private:
void PassAway() override {
Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(0));
- TActor::PassAway();
+ TActor::PassAway();
}
private:
diff --git a/ydb/core/tx/long_tx_service/commit_impl.cpp b/ydb/core/tx/long_tx_service/commit_impl.cpp
index e99ed9f2e36..3a2872bad07 100644
--- a/ydb/core/tx/long_tx_service/commit_impl.cpp
+++ b/ydb/core/tx/long_tx_service/commit_impl.cpp
@@ -44,7 +44,7 @@ namespace NLongTxService {
if (Services.LeaderPipeCache) {
Send(Services.LeaderPipeCache, new TEvPipeCache::TEvUnlink(0));
}
- TActor::PassAway();
+ TActor::PassAway();
}
void SendAllocateTxId() {
diff --git a/ydb/core/tx/mediator/execute_queue.cpp b/ydb/core/tx/mediator/execute_queue.cpp
index a6a9d4ec925..07740e9f826 100644
--- a/ydb/core/tx/mediator/execute_queue.cpp
+++ b/ydb/core/tx/mediator/execute_queue.cpp
@@ -137,7 +137,7 @@ namespace NTxMediator {
for (TVector<std::pair<TTabletId, std::size_t>>::const_iterator it = step->TabletsToTransaction.begin(), end = step->TabletsToTransaction.end(); it != end; ++it) {
if (activeTablet != it->first) {
if (activeTablet)
- SendStepToBucket<TEvTxMediator::TEvOoOTabletStep>(activeTablet, step->Step, currentTx, ctx);
+ SendStepToBucket<TEvTxMediator::TEvOoOTabletStep>(activeTablet, step->Step, currentTx, ctx);
activeTablet = it->first;
currentTx.clear();
}
@@ -147,7 +147,7 @@ namespace NTxMediator {
}
if (activeTablet)
- SendStepToBucket<TEvTxMediator::TEvOoOTabletStep>(activeTablet, step->Step, currentTx, ctx);
+ SendStepToBucket<TEvTxMediator::TEvOoOTabletStep>(activeTablet, step->Step, currentTx, ctx);
}
void Handle(TEvMediatorTimecast::TEvWatch::TPtr &ev, const TActorContext &ctx) {
diff --git a/ydb/core/tx/mediator/tablet_queue.cpp b/ydb/core/tx/mediator/tablet_queue.cpp
index a56bc32bcc3..b28179517ca 100644
--- a/ydb/core/tx/mediator/tablet_queue.cpp
+++ b/ydb/core/tx/mediator/tablet_queue.cpp
@@ -408,7 +408,7 @@ void TTxMediatorTabletQueue::TTabletEntry::MergeOutOfOrder(TStep *sx) {
++oooIt;
++planIt;
} else {
- Y_FAIL("Inconsistency: Plan TxId %" PRIu64 " > OutOfOrder TxId %" PRIu64, planIt->TxId, oooIt->TxId);
+ Y_FAIL("Inconsistency: Plan TxId %" PRIu64 " > OutOfOrder TxId %" PRIu64, planIt->TxId, oooIt->TxId);
}
}
OutOfOrder.erase(ox);
diff --git a/ydb/core/tx/message_seqno.h b/ydb/core/tx/message_seqno.h
index 270866c80ff..a0151301ee7 100644
--- a/ydb/core/tx/message_seqno.h
+++ b/ydb/core/tx/message_seqno.h
@@ -1,55 +1,55 @@
-#pragma once
-
-#include "defs.h"
-
+#pragma once
+
+#include "defs.h"
+
#include <util/stream/output.h>
-namespace NKikimr {
- // A helper for check the order of messages sent by a tablet
- struct TMessageSeqNo {
- ui64 Generation;
- ui64 Round;
-
+namespace NKikimr {
+ // A helper for check the order of messages sent by a tablet
+ struct TMessageSeqNo {
+ ui64 Generation;
+ ui64 Round;
+
TMessageSeqNo()
: Generation(0)
, Round(0)
{}
TMessageSeqNo(ui64 gen, ui64 round)
- : Generation(gen)
- , Round(round)
- {}
-
- operator bool () const {
- return Generation != 0;
- }
-
- bool operator == (const TMessageSeqNo& other) const {
- return Generation == other.Generation &&
- Round == other.Round;
- }
-
- bool operator != (const TMessageSeqNo& other) const {
- return !(*this == other);
- }
-
- bool operator < (const TMessageSeqNo& other) const {
- return Generation < other.Generation ||
- (Generation == other.Generation && Round < other.Round);
- }
-
- bool operator > (const TMessageSeqNo& other) const {
- return (other < *this);
- }
-
- bool operator <= (const TMessageSeqNo& other) const {
+ : Generation(gen)
+ , Round(round)
+ {}
+
+ operator bool () const {
+ return Generation != 0;
+ }
+
+ bool operator == (const TMessageSeqNo& other) const {
+ return Generation == other.Generation &&
+ Round == other.Round;
+ }
+
+ bool operator != (const TMessageSeqNo& other) const {
+ return !(*this == other);
+ }
+
+ bool operator < (const TMessageSeqNo& other) const {
+ return Generation < other.Generation ||
+ (Generation == other.Generation && Round < other.Round);
+ }
+
+ bool operator > (const TMessageSeqNo& other) const {
+ return (other < *this);
+ }
+
+ bool operator <= (const TMessageSeqNo& other) const {
return Generation < other.Generation ||
(Generation == other.Generation && Round <= other.Round);
- }
-
- bool operator >= (const TMessageSeqNo& other) const {
- return (other <= *this);
- }
+ }
+
+ bool operator >= (const TMessageSeqNo& other) const {
+ return (other <= *this);
+ }
TMessageSeqNo& operator ++ () {
if (0 == ++Round) {
@@ -61,9 +61,9 @@ namespace NKikimr {
void Out(IOutputStream& o) const {
o << Generation << ":" << Round;
}
- };
-
-}
+ };
+
+}
template<>
diff --git a/ydb/core/tx/scheme_board/cache.cpp b/ydb/core/tx/scheme_board/cache.cpp
index 6e6246c0061..8e778a4a1d8 100644
--- a/ydb/core/tx/scheme_board/cache.cpp
+++ b/ydb/core/tx/scheme_board/cache.cpp
@@ -205,7 +205,7 @@ namespace {
entry.KesusInfo.Drop();
entry.SolomonVolumeInfo.Drop();
entry.PQGroupInfo.Drop();
- entry.OlapStoreInfo.Drop();
+ entry.OlapStoreInfo.Drop();
entry.OlapTableInfo.Drop();
entry.CdcStreamInfo.Drop();
entry.SequenceInfo.Drop();
@@ -704,7 +704,7 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
KesusInfo.Drop();
SolomonVolumeInfo.Drop();
PQGroupInfo.Drop();
- OlapStoreInfo.Drop();
+ OlapStoreInfo.Drop();
OlapTableInfo.Drop();
CdcStreamInfo.Drop();
SequenceInfo.Drop();
@@ -762,16 +762,16 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
}
void FillTableInfoFromOlapStore(const NKikimrSchemeOp::TPathDescription& pathDesc) {
- if (pathDesc.HasDomainDescription()) {
- DomainInfo = new NSchemeCache::TDomainInfo(pathDesc.GetDomainDescription());
- }
- }
+ if (pathDesc.HasDomainDescription()) {
+ DomainInfo = new NSchemeCache::TDomainInfo(pathDesc.GetDomainDescription());
+ }
+ }
void FillTableInfoFromOlapTable(const NKikimrSchemeOp::TPathDescription& pathDesc) {
const auto& desc = pathDesc.GetColumnTableDescription();
THashMap<TString, ui32> nameToId;
- const auto& schemaDesc = desc.GetSchema();
+ const auto& schemaDesc = desc.GetSchema();
for (const auto& columnDesc : schemaDesc.GetColumns()) {
auto& column = Columns[columnDesc.GetId()];
column.Id = columnDesc.GetId();
@@ -1371,9 +1371,9 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
break;
case NKikimrSchemeOp::EPathTypeColumnTable:
Kind = TNavigate::KindOlapTable;
- if (Created) {
- FillTableInfoFromOlapTable(pathDesc);
- }
+ if (Created) {
+ FillTableInfoFromOlapTable(pathDesc);
+ }
FillInfo(Kind, OlapTableInfo, std::move(*pathDesc.MutableColumnTableDescription()));
if (OlapTableInfo->Description.HasColumnStorePathId()) {
auto& p = OlapTableInfo->Description.GetColumnStorePathId();
@@ -1555,13 +1555,13 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
}
entry.Kind = TNavigate::KindTable;
- if (target == NSysView::ISystemViewResolver::ETarget::OlapStore ||
- target == NSysView::ISystemViewResolver::ETarget::OlapTable)
- {
- // OLAP sys views are represented by OLAP tables
- entry.Kind =TNavigate::KindOlapTable;
- }
-
+ if (target == NSysView::ISystemViewResolver::ETarget::OlapStore ||
+ target == NSysView::ISystemViewResolver::ETarget::OlapTable)
+ {
+ // OLAP sys views are represented by OLAP tables
+ entry.Kind =TNavigate::KindOlapTable;
+ }
+
entry.Columns = std::move(schema->Columns);
if (entry.RequestType == TNavigate::TEntry::ERequestType::ByPath) {
@@ -1606,21 +1606,21 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
}
} else if (Kind == TNavigate::KindSubdomain || Kind == TNavigate::KindExtSubdomain) {
return FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::SubDomain);
- } else if (Kind == TNavigate::KindOlapStore) {
- FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::OlapStore);
- entry.OlapStoreInfo = OlapStoreInfo;
- return;
- } else if (Kind == TNavigate::KindOlapTable) {
- FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::OlapTable);
- entry.OlapStoreInfo = OlapStoreInfo;
- entry.OlapTableInfo = OlapTableInfo;
- return;
+ } else if (Kind == TNavigate::KindOlapStore) {
+ FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::OlapStore);
+ entry.OlapStoreInfo = OlapStoreInfo;
+ return;
+ } else if (Kind == TNavigate::KindOlapTable) {
+ FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::OlapTable);
+ entry.OlapStoreInfo = OlapStoreInfo;
+ entry.OlapTableInfo = OlapTableInfo;
+ return;
}
return SetError(context, entry, TNavigate::EStatus::PathErrorUnknown);
}
- const bool isTable = Kind == TNavigate::KindTable || Kind == TNavigate::KindOlapTable;
+ const bool isTable = Kind == TNavigate::KindTable || Kind == TNavigate::KindOlapTable;
if (entry.Operation == TNavigate::OpTable && !isTable) {
return SetError(context, entry, TNavigate::EStatus::PathNotTable);
}
@@ -1675,7 +1675,7 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
entry.KesusInfo = KesusInfo;
entry.SolomonVolumeInfo = SolomonVolumeInfo;
entry.PQGroupInfo = PQGroupInfo;
- entry.OlapStoreInfo = OlapStoreInfo;
+ entry.OlapStoreInfo = OlapStoreInfo;
entry.OlapTableInfo = OlapTableInfo;
entry.CdcStreamInfo = CdcStreamInfo;
entry.SequenceInfo = SequenceInfo;
@@ -1803,22 +1803,22 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
}
} else if (Kind == TNavigate::KindSubdomain || Kind == TNavigate::KindExtSubdomain) {
return FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::SubDomain);
- } else if (Kind == TNavigate::KindOlapStore) {
- FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::OlapStore);
- // Add all shards of the OLAP store
- for (ui64 columnShard : OlapStoreInfo->Description.GetColumnShards()) {
- keyDesc.Partitions.push_back(TKeyDesc::TPartitionInfo(columnShard));
- keyDesc.Partitions.back().Range = TKeyDesc::TPartitionRangeInfo();
- }
- return;
- } else if (Kind == TNavigate::KindOlapTable) {
- FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::OlapTable);
- // Add all shards of the OLAP table
- for (ui64 columnShard : OlapTableInfo->Description.GetSharding().GetColumnShards()) {
- keyDesc.Partitions.push_back(TKeyDesc::TPartitionInfo(columnShard));
- keyDesc.Partitions.back().Range = TKeyDesc::TPartitionRangeInfo();
- }
- return;
+ } else if (Kind == TNavigate::KindOlapStore) {
+ FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::OlapStore);
+ // Add all shards of the OLAP store
+ for (ui64 columnShard : OlapStoreInfo->Description.GetColumnShards()) {
+ keyDesc.Partitions.push_back(TKeyDesc::TPartitionInfo(columnShard));
+ keyDesc.Partitions.back().Range = TKeyDesc::TPartitionRangeInfo();
+ }
+ return;
+ } else if (Kind == TNavigate::KindOlapTable) {
+ FillSystemViewEntry(context, entry, NSysView::ISystemViewResolver::ETarget::OlapTable);
+ // Add all shards of the OLAP table
+ for (ui64 columnShard : OlapTableInfo->Description.GetSharding().GetColumnShards()) {
+ keyDesc.Partitions.push_back(TKeyDesc::TPartitionInfo(columnShard));
+ keyDesc.Partitions.back().Range = TKeyDesc::TPartitionRangeInfo();
+ }
+ return;
}
return SetError(context, entry, TResolve::EStatus::PathErrorNotExist, TKeyDesc::EStatus::NotExists);
@@ -1849,12 +1849,12 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
++context->Request->ErrorCount;
}
}
- } else if (OlapTableInfo) {
- // TODO: return proper partitioning info (KIKIMR-11069)
- for (ui64 columnShard : OlapTableInfo->Description.GetSharding().GetColumnShards()) {
+ } else if (OlapTableInfo) {
+ // TODO: return proper partitioning info (KIKIMR-11069)
+ for (ui64 columnShard : OlapTableInfo->Description.GetSharding().GetColumnShards()) {
keyDesc.Partitions.push_back(TKeyDesc::TPartitionInfo(columnShard));
- keyDesc.Partitions.back().Range = TKeyDesc::TPartitionRangeInfo();
- }
+ keyDesc.Partitions.back().Range = TKeyDesc::TPartitionRangeInfo();
+ }
} else {
if (Partitioning) {
FillRangePartitioning(keyDesc.Range, keyDesc.Partitions);
@@ -1930,10 +1930,10 @@ class TSchemeCache: public TMonitorableActor<TSchemeCache> {
// PQ specific
TIntrusivePtr<TNavigate::TPQGroupInfo> PQGroupInfo;
- // OlapStore specific
- TIntrusivePtr<TNavigate::TOlapStoreInfo> OlapStoreInfo;
+ // OlapStore specific
+ TIntrusivePtr<TNavigate::TOlapStoreInfo> OlapStoreInfo;
TIntrusivePtr<TNavigate::TOlapTableInfo> OlapTableInfo;
-
+
// CDC specific
TIntrusivePtr<TNavigate::TCdcStreamInfo> CdcStreamInfo;
diff --git a/ydb/core/tx/scheme_cache/scheme_cache.h b/ydb/core/tx/scheme_cache/scheme_cache.h
index 0615be969aa..26a07897a11 100644
--- a/ydb/core/tx/scheme_cache/scheme_cache.h
+++ b/ydb/core/tx/scheme_cache/scheme_cache.h
@@ -174,11 +174,11 @@ struct TSchemeCacheNavigate {
NKikimrSchemeOp::TSolomonVolumeDescription Description;
};
- struct TOlapStoreInfo : public TAtomicRefCount<TOlapStoreInfo> {
- EKind Kind = KindUnknown;
+ struct TOlapStoreInfo : public TAtomicRefCount<TOlapStoreInfo> {
+ EKind Kind = KindUnknown;
NKikimrSchemeOp::TColumnStoreDescription Description;
- };
-
+ };
+
struct TOlapTableInfo : public TAtomicRefCount<TOlapTableInfo> {
EKind Kind = KindUnknown;
NKikimrSchemeOp::TColumnTableDescription Description;
@@ -240,7 +240,7 @@ struct TSchemeCacheNavigate {
TIntrusiveConstPtr<TRtmrVolumeInfo> RTMRVolumeInfo;
TIntrusiveConstPtr<TKesusInfo> KesusInfo;
TIntrusiveConstPtr<TSolomonVolumeInfo> SolomonVolumeInfo;
- TIntrusiveConstPtr<TOlapStoreInfo> OlapStoreInfo;
+ TIntrusiveConstPtr<TOlapStoreInfo> OlapStoreInfo;
TIntrusiveConstPtr<TOlapTableInfo> OlapTableInfo;
TIntrusiveConstPtr<TCdcStreamInfo> CdcStreamInfo;
TIntrusiveConstPtr<TSequenceInfo> SequenceInfo;
@@ -522,7 +522,7 @@ public:
{}
};
};
-
+
inline TActorId MakeSchemeCacheID() {
return TActorId(0, TStringBuf("SchmCcheSrv"));
}
diff --git a/ydb/core/tx/schemeshard/schemeshard.h b/ydb/core/tx/schemeshard/schemeshard.h
index bac07d2d432..d33173508ce 100644
--- a/ydb/core/tx/schemeshard/schemeshard.h
+++ b/ydb/core/tx/schemeshard/schemeshard.h
@@ -1,5 +1,5 @@
-#pragma once
-#include "defs.h"
+#pragma once
+#include "defs.h"
#include <ydb/core/base/path.h>
#include <ydb/core/base/storage_pools.h>
@@ -9,37 +9,37 @@
#include <ydb/core/protos/tx_scheme.pb.h>
#include <ydb/core/protos/flat_tx_scheme.pb.h>
#include <ydb/core/scheme/scheme_tablecell.h>
-
+
#include <library/cpp/deprecated/enum_codegen/enum_codegen.h>
-
+
#include "schemeshard_identificators.h"
-namespace NKikimr {
+namespace NKikimr {
namespace NSchemeShard {
-
+
static constexpr ui64 RootSchemeShardId = 0;
static constexpr ui64 RootPathId = 1;
struct TSchemeLimits;
struct TEvSchemeShard {
- enum EEv {
+ enum EEv {
EvModifySchemeTransaction = EventSpaceBegin(TKikimrEvents::ES_FLAT_TX_SCHEMESHARD), // 271122432
- EvModifySchemeTransactionResult = EvModifySchemeTransaction + 1 * 512,
- EvDescribeScheme,
- EvDescribeSchemeResult,
+ EvModifySchemeTransactionResult = EvModifySchemeTransaction + 1 * 512,
+ EvDescribeScheme,
+ EvDescribeSchemeResult,
EvFindTabletSubDomainPathId,
EvFindTabletSubDomainPathIdResult,
-
- EvInitRootShard = EvModifySchemeTransaction + 5 * 512,
- EvInitRootShardResult,
- EvUpdateConfig,
- EvUpdateConfigResult,
- EvNotifyTxCompletion,
- EvNotifyTxCompletionRegistered,
- EvNotifyTxCompletionResult,
- EvMeasureSelfResponseTime,
- EvWakeupToMeasureSelfResponseTime,
+
+ EvInitRootShard = EvModifySchemeTransaction + 5 * 512,
+ EvInitRootShardResult,
+ EvUpdateConfig,
+ EvUpdateConfigResult,
+ EvNotifyTxCompletion,
+ EvNotifyTxCompletionRegistered,
+ EvNotifyTxCompletionResult,
+ EvMeasureSelfResponseTime,
+ EvWakeupToMeasureSelfResponseTime,
EvInitTenantSchemeShard,
EvInitTenantSchemeShardResult, // 271125002
EvSyncTenantSchemeShard,
@@ -54,44 +54,44 @@ struct TEvSchemeShard {
EvPublishTenantResult, // 271125012
EvLogin,
EvLoginResult,
-
+
EvBackupDatashard = EvModifySchemeTransaction + 6 * 512,
EvBackupDatashardResult,
EvCancelTx,
EvCancelTxResult,
- EvEnd
- };
-
- static_assert(EvEnd < EventSpaceEnd(TKikimrEvents::ES_FLAT_TX_SCHEMESHARD), "expect EvEnd < EventSpaceEnd(TKikimrEvents::ES_FLAT_TX_SCHEMESHARD)");
-
- struct TEvModifySchemeTransaction : public TEventPB<TEvModifySchemeTransaction,
+ EvEnd
+ };
+
+ static_assert(EvEnd < EventSpaceEnd(TKikimrEvents::ES_FLAT_TX_SCHEMESHARD), "expect EvEnd < EventSpaceEnd(TKikimrEvents::ES_FLAT_TX_SCHEMESHARD)");
+
+ struct TEvModifySchemeTransaction : public TEventPB<TEvModifySchemeTransaction,
NKikimrScheme::TEvModifySchemeTransaction,
- EvModifySchemeTransaction> {
- TEvModifySchemeTransaction()
- {}
-
+ EvModifySchemeTransaction> {
+ TEvModifySchemeTransaction()
+ {}
+
TEvModifySchemeTransaction(ui64 txid, ui64 tabletId)
- {
- Record.SetTxId(txid);
- Record.SetTabletId(tabletId);
- }
-
+ {
+ Record.SetTxId(txid);
+ Record.SetTabletId(tabletId);
+ }
+
TString ToString() const {
- TStringStream str;
- str << "{TEvModifySchemeTransaction";
- if (Record.HasTxId()) {
- str << " txid# " << Record.GetTxId();
- }
- if (Record.HasTabletId()) {
- str << " TabletId# " << Record.GetTabletId();
- }
- str << "}";
- return str.Str();
- }
- };
-
+ TStringStream str;
+ str << "{TEvModifySchemeTransaction";
+ if (Record.HasTxId()) {
+ str << " txid# " << Record.GetTxId();
+ }
+ if (Record.HasTabletId()) {
+ str << " TabletId# " << Record.GetTabletId();
+ }
+ str << "}";
+ return str.Str();
+ }
+ };
+
struct TEvCancelTx
: public TEventPB<TEvCancelTx,
NKikimrScheme::TEvCancelTx,
@@ -112,28 +112,28 @@ struct TEvSchemeShard {
};
using EStatus = NKikimrScheme::EStatus;
-
- struct TEvModifySchemeTransactionResult : public TEventPB<TEvModifySchemeTransactionResult,
+
+ struct TEvModifySchemeTransactionResult : public TEventPB<TEvModifySchemeTransactionResult,
NKikimrScheme::TEvModifySchemeTransactionResult,
- EvModifySchemeTransactionResult> {
-
- TEvModifySchemeTransactionResult()
- {}
-
+ EvModifySchemeTransactionResult> {
+
+ TEvModifySchemeTransactionResult()
+ {}
+
TEvModifySchemeTransactionResult(TTxId txid, TTabletId schemeshardId) {
Record.SetTxId(ui64(txid));
Record.SetSchemeshardId(ui64(schemeshardId));
}
- TEvModifySchemeTransactionResult(EStatus status, ui64 txid, ui64 schemeshardId, const TStringBuf& reason = TStringBuf())
+ TEvModifySchemeTransactionResult(EStatus status, ui64 txid, ui64 schemeshardId, const TStringBuf& reason = TStringBuf())
: TEvModifySchemeTransactionResult(TTxId(txid), TTabletId(schemeshardId))
- {
- Record.SetStatus(status);
+ {
+ Record.SetStatus(status);
if (reason.size() > 0) {
Record.SetReason(reason.data(), reason.size());
- }
- }
-
+ }
+ }
+
bool IsAccepted() const {
return Record.GetReason().empty() && (Record.GetStatus() == EStatus::StatusAccepted);
}
@@ -164,75 +164,75 @@ struct TEvSchemeShard {
void SetPathId(ui64 pathId) { Record.SetPathId(pathId); }
TString ToString() const {
- TStringStream str;
- str << "{TEvModifySchemeTransactionResult";
- if (Record.HasStatus()) {
- str << " Status# " << Record.GetStatus();
- }
- if (Record.HasTxId()) {
- str << " txid# " << Record.GetTxId();
- }
- if (Record.HasReason()) {
- str << " Reason# " << Record.GetReason();
- }
- str << "}";
- return str.Str();
- }
- };
-
- struct TEvInitRootShard : public TEventPB<TEvInitRootShard, NKikimrTxScheme::TEvInitRootShard, EvInitRootShard> {
- TEvInitRootShard()
- {}
-
+ TStringStream str;
+ str << "{TEvModifySchemeTransactionResult";
+ if (Record.HasStatus()) {
+ str << " Status# " << Record.GetStatus();
+ }
+ if (Record.HasTxId()) {
+ str << " txid# " << Record.GetTxId();
+ }
+ if (Record.HasReason()) {
+ str << " Reason# " << Record.GetReason();
+ }
+ str << "}";
+ return str.Str();
+ }
+ };
+
+ struct TEvInitRootShard : public TEventPB<TEvInitRootShard, NKikimrTxScheme::TEvInitRootShard, EvInitRootShard> {
+ TEvInitRootShard()
+ {}
+
TEvInitRootShard(const TActorId& source, ui32 rootTag, const TString& rootTagName)
- {
+ {
ActorIdToProto(source, Record.MutableSource());
- Record.SetRootTag(rootTag);
- Record.SetRootTagName(rootTagName);
- }
- };
-
- struct TEvInitRootShardResult : public TEventPB<TEvInitRootShardResult,
- NKikimrTxScheme::TEvInitRootShardResult, EvInitRootShardResult> {
- enum EStatus {
- StatusUnknown,
- StatusSuccess,
- StatusAlreadyInitialized,
- StatusBadArgument
- };
-
- TEvInitRootShardResult()
- {}
-
- TEvInitRootShardResult(ui64 origin, EStatus status)
- {
- Record.SetOrigin(origin);
- Record.SetStatus(status);
- }
- };
-
- struct TEvDescribeScheme : public TEventPB<TEvDescribeScheme,
+ Record.SetRootTag(rootTag);
+ Record.SetRootTagName(rootTagName);
+ }
+ };
+
+ struct TEvInitRootShardResult : public TEventPB<TEvInitRootShardResult,
+ NKikimrTxScheme::TEvInitRootShardResult, EvInitRootShardResult> {
+ enum EStatus {
+ StatusUnknown,
+ StatusSuccess,
+ StatusAlreadyInitialized,
+ StatusBadArgument
+ };
+
+ TEvInitRootShardResult()
+ {}
+
+ TEvInitRootShardResult(ui64 origin, EStatus status)
+ {
+ Record.SetOrigin(origin);
+ Record.SetStatus(status);
+ }
+ };
+
+ struct TEvDescribeScheme : public TEventPB<TEvDescribeScheme,
NKikimrSchemeOp::TDescribePath,
- EvDescribeScheme> {
- TEvDescribeScheme()
- {}
-
+ EvDescribeScheme> {
+ TEvDescribeScheme()
+ {}
+
TEvDescribeScheme(const NKikimrSchemeOp::TDescribePath& describePath)
{
Record.CopyFrom(describePath);
}
TEvDescribeScheme(const TString& path)
- {
- Record.SetPath(path);
- }
-
- TEvDescribeScheme(ui64 tabletId, ui64 pathId)
- {
- Record.SetSchemeshardId(tabletId);
- Record.SetPathId(pathId);
- }
-
+ {
+ Record.SetPath(path);
+ }
+
+ TEvDescribeScheme(ui64 tabletId, ui64 pathId)
+ {
+ Record.SetSchemeshardId(tabletId);
+ Record.SetPathId(pathId);
+ }
+
TEvDescribeScheme(TTableId tableId)
{
Record.SetSchemeshardId(tableId.PathId.OwnerId);
@@ -244,22 +244,22 @@ struct TEvSchemeShard {
Record.SetSchemeshardId(pathId.OwnerId);
Record.SetPathId(pathId.LocalPathId);
}
- };
-
+ };
+
struct TEvDescribeSchemeResult : public TEventPreSerializedPB<TEvDescribeSchemeResult,
NKikimrScheme::TEvDescribeSchemeResult,
EvDescribeSchemeResult> {
TEvDescribeSchemeResult() = default;
-
+
TEvDescribeSchemeResult(const TString& path, ui64 pathOwner, TPathId pathId)
- {
- Record.SetPath(path);
+ {
+ Record.SetPath(path);
Record.SetPathOwner(pathOwner);
Record.SetPathId(pathId.LocalPathId);
Record.SetPathOwnerId(pathId.OwnerId);
- }
- };
-
+ }
+ };
+
struct TEvDescribeSchemeResultBuilder : TEvDescribeSchemeResult {
using TBase::Record;
@@ -271,38 +271,38 @@ struct TEvSchemeShard {
}
};
- struct TEvNotifyTxCompletion : public TEventPB<TEvNotifyTxCompletion,
+ struct TEvNotifyTxCompletion : public TEventPB<TEvNotifyTxCompletion,
NKikimrScheme::TEvNotifyTxCompletion,
- EvNotifyTxCompletion> {
- explicit TEvNotifyTxCompletion(ui64 txId = 0)
- {
- Record.SetTxId(txId);
- }
- };
-
- struct TEvNotifyTxCompletionRegistered : public TEventPB<TEvNotifyTxCompletionRegistered,
+ EvNotifyTxCompletion> {
+ explicit TEvNotifyTxCompletion(ui64 txId = 0)
+ {
+ Record.SetTxId(txId);
+ }
+ };
+
+ struct TEvNotifyTxCompletionRegistered : public TEventPB<TEvNotifyTxCompletionRegistered,
NKikimrScheme::TEvNotifyTxCompletionRegistered,
- EvNotifyTxCompletionRegistered> {
- explicit TEvNotifyTxCompletionRegistered(ui64 txId = 0)
- {
- Record.SetTxId(txId);
- }
- };
-
- struct TEvNotifyTxCompletionResult : public TEventPB<TEvNotifyTxCompletionResult,
+ EvNotifyTxCompletionRegistered> {
+ explicit TEvNotifyTxCompletionRegistered(ui64 txId = 0)
+ {
+ Record.SetTxId(txId);
+ }
+ };
+
+ struct TEvNotifyTxCompletionResult : public TEventPB<TEvNotifyTxCompletionResult,
NKikimrScheme::TEvNotifyTxCompletionResult,
- EvNotifyTxCompletionResult> {
- explicit TEvNotifyTxCompletionResult(ui64 txId = 0)
- {
- Record.SetTxId(txId);
- }
- };
-
- struct TEvMeasureSelfResponseTime : public TEventLocal<TEvMeasureSelfResponseTime, EvMeasureSelfResponseTime> {
- };
-
- struct TEvWakeupToMeasureSelfResponseTime : public TEventLocal<TEvWakeupToMeasureSelfResponseTime, EvWakeupToMeasureSelfResponseTime> {
- };
+ EvNotifyTxCompletionResult> {
+ explicit TEvNotifyTxCompletionResult(ui64 txId = 0)
+ {
+ Record.SetTxId(txId);
+ }
+ };
+
+ struct TEvMeasureSelfResponseTime : public TEventLocal<TEvMeasureSelfResponseTime, EvMeasureSelfResponseTime> {
+ };
+
+ struct TEvWakeupToMeasureSelfResponseTime : public TEventLocal<TEvWakeupToMeasureSelfResponseTime, EvWakeupToMeasureSelfResponseTime> {
+ };
struct TEvInitTenantSchemeShard: public TEventPB<TEvInitTenantSchemeShard,
NKikimrScheme::TEvInitTenantSchemeShard,
@@ -536,9 +536,9 @@ struct TEvSchemeShard {
struct TEvLoginResult : TEventPB<TEvLoginResult, NKikimrScheme::TEvLoginResult, EvLoginResult> {
TEvLoginResult() = default;
};
-};
-
-}
+};
+
+}
IActor* CreateFlatTxSchemeShard(const TActorId &tablet, TTabletStorageInfo *info);
bool PartitionConfigHasExternalBlobsEnabled(const NKikimrSchemeOp::TPartitionConfig &partitionConfig);
diff --git a/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp b/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp
index fc907940932..855553cddc8 100644
--- a/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp
@@ -15,9 +15,9 @@ struct TSchemeShard::TTxDeleteTabletReply : public TSchemeShard::TRwTxBase {
: TRwTxBase(self)
, Ev(ev)
, ShardIdx(self->MakeLocalId(TLocalShardIdx(Ev->Get()->Record.GetTxId_Deprecated()))) // We use TxId field as a cookie where we store shrdIdx
- , TabletId(InvalidTabletId)
- , Status(Ev->Get()->Record.GetStatus())
- , HiveId(Ev->Get()->Record.GetOrigin())
+ , TabletId(InvalidTabletId)
+ , Status(Ev->Get()->Record.GetStatus())
+ , HiveId(Ev->Get()->Record.GetOrigin())
{
if (Ev->Get()->Record.HasShardOwnerId()) {
Y_VERIFY(Ev->Get()->Record.ShardLocalIdxSize() == 1);
@@ -32,7 +32,7 @@ struct TSchemeShard::TTxDeleteTabletReply : public TSchemeShard::TRwTxBase {
TTxType GetTxType() const override { return TXTYPE_FREE_TABLET_RESULT; }
void DoExecute(TTransactionContext &txc, const TActorContext &ctx) override {
- if (Status != NKikimrProto::OK) {
+ if (Status != NKikimrProto::OK) {
if (Status == NKikimrProto::INVALID_OWNER) {
LOG_WARN_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Got DeleteTabletReply with Forward response from Hive " << HiveId << " to Hive " << ForwardToHiveId << " shardIdx " << ShardIdx);
@@ -40,13 +40,13 @@ struct TSchemeShard::TTxDeleteTabletReply : public TSchemeShard::TRwTxBase {
Self->ShardDeleter.RedirectDeleteRequest(HiveId, ForwardToHiveId, ShardIdx, Self->ShardInfos, ctx);
return;
}
- // WTF could happen that hive failed to delete the freaking tablet?
+ // WTF could happen that hive failed to delete the freaking tablet?
LOG_ALERT_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Got DeleteTabletReply from Hive " << HiveId << " shardIdx " << ShardIdx << " status " << Status);
return;
}
- // "Forget" the deleted shard
+ // "Forget" the deleted shard
if (Self->ShardInfos.contains(ShardIdx)) {
auto tabletType = Self->ShardInfos[ShardIdx].TabletType;
switch (tabletType) {
@@ -136,9 +136,9 @@ struct TSchemeShard::TTxDeleteTabletReply : public TSchemeShard::TRwTxBase {
}
TabletId = shardInfo.TabletID;
- Self->TabletIdToShardIdx[TabletId] = ShardIdx;
+ Self->TabletIdToShardIdx[TabletId] = ShardIdx;
- Self->ShardInfos.erase(ShardIdx);
+ Self->ShardInfos.erase(ShardIdx);
Self->DecrementPathDbRefCount(pathId, "shard deleted");
@@ -154,24 +154,24 @@ struct TSchemeShard::TTxDeleteTabletReply : public TSchemeShard::TRwTxBase {
NIceDb::TNiceDb db(txc.DB);
Self->PersistUnknownShardDeleted(db, ShardIdx);
}
- }
+ }
- void DoComplete(const TActorContext &ctx) override {
- if (Status == NKikimrProto::OK) {
+ void DoComplete(const TActorContext &ctx) override {
+ if (Status == NKikimrProto::OK) {
LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Deleted shardIdx " << ShardIdx);
- Self->ShardDeleter.ShardDeleted(ShardIdx, ctx);
-
- if (TabletId != InvalidTabletId) {
+ Self->ShardDeleter.ShardDeleted(ShardIdx, ctx);
+
+ if (TabletId != InvalidTabletId) {
LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Close pipe to deleted shardIdx " << ShardIdx << " tabletId " << TabletId);
Self->PipeClientCache->ForceClose(ctx, ui64(TabletId));
- }
+ }
}
}
-private:
+private:
TShardIdx ShardIdx;
TTabletId TabletId;
NKikimrProto::EReplyStatus Status;
diff --git a/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp b/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp
index c847a047ea0..e6d96356527 100644
--- a/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp
@@ -3,16 +3,16 @@
#include <util/stream/format.h>
-namespace NKikimr {
+namespace NKikimr {
namespace NSchemeShard {
-
-using namespace NTabletFlatExecutor;
-
+
+using namespace NTabletFlatExecutor;
+
struct TSchemeShard::TTxDescribeScheme : public TSchemeShard::TRwTxBase {
const TActorId Sender;
const ui64 Cookie;
TPathDescriber PathDescriber;
-
+
THolder<TEvSchemeShard::TEvDescribeSchemeResultBuilder> Result;
TTxDescribeScheme(TSelf *self, TEvSchemeShard::TEvDescribeScheme::TPtr &ev)
@@ -20,8 +20,8 @@ struct TSchemeShard::TTxDescribeScheme : public TSchemeShard::TRwTxBase {
, Sender(ev->Sender)
, Cookie(ev->Cookie)
, PathDescriber(self, std::move(ev->Get()->Record))
- {}
-
+ {}
+
TTxType GetTxType() const override { return TXTYPE_DESCRIBE_SCHEME; }
void DoExecute(TTransactionContext& /*txc*/, const TActorContext& ctx) override {
@@ -42,13 +42,13 @@ struct TSchemeShard::TTxDescribeScheme : public TSchemeShard::TRwTxBase {
<< " describe pathId " << params.GetPathId()
<< " took " << HumanReadable(ExecuteDuration)
<< " result status " <<NKikimrScheme::EStatus_Name(Result->Record.GetStatus()));
- } else {
+ } else {
LOG_INFO_S(ctx, NKikimrServices::SCHEMESHARD_DESCRIBE,
"Tablet " << Self->TabletID()
<< " describe path \"" << params.GetPath() << "\""
<< " took " << HumanReadable(ExecuteDuration)
<< " result status " <<NKikimrScheme::EStatus_Name(Result->Record.GetStatus()));
- }
+ }
LOG_DEBUG_S(ctx, NKikimrServices::SCHEMESHARD_DESCRIBE,
"TTxDescribeScheme DoComplete"
@@ -58,10 +58,10 @@ struct TSchemeShard::TTxDescribeScheme : public TSchemeShard::TRwTxBase {
ctx.Send(Sender, std::move(Result), 0, Cookie);
}
-};
-
+};
+
NTabletFlatExecutor::ITransaction* TSchemeShard::CreateTxDescribeScheme(TEvSchemeShard::TEvDescribeScheme::TPtr &ev) {
- return new TTxDescribeScheme(this, ev);
-}
-
-}}
+ return new TTxDescribeScheme(this, ev);
+}
+
+}}
diff --git a/ydb/core/tx/schemeshard/schemeshard__init.cpp b/ydb/core/tx/schemeshard/schemeshard__init.cpp
index 69dbde67b10..650506549e0 100644
--- a/ydb/core/tx/schemeshard/schemeshard__init.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__init.cpp
@@ -3,12 +3,12 @@
#include <ydb/core/tablet/tablet_exception.h>
#include <ydb/core/tablet_flat/flat_cxx_database.h>
#include <ydb/core/util/pb.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NSchemeShard {
-
-using namespace NTabletFlatExecutor;
-
+
+using namespace NTabletFlatExecutor;
+
struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
TSideEffects OnComplete;
TMemoryChanges MemChanges;
@@ -18,20 +18,20 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
TDeque<TPathId> BlockStoreVolumesToClean;
TVector<ui64> ExportsToResume;
TVector<ui64> ImportsToResume;
-
- explicit TTxInit(TSelf *self)
- : TBase(self)
- {}
-
- bool CreateScheme(TTransactionContext &txc) {
+
+ explicit TTxInit(TSelf *self)
+ : TBase(self)
+ {}
+
+ bool CreateScheme(TTransactionContext &txc) {
if (!txc.DB.GetScheme().IsEmpty())
- return false;
-
+ return false;
+
NIceDb::TNiceDb(txc.DB).Materialize<Schema>();
-
- return true;
- }
-
+
+ return true;
+ }
+
void CollectObjectsToClean() {
THashSet<TPathId> underOperation;
for (auto& item : Self->TxInFlight) {
@@ -187,7 +187,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
rows.GetValue<Schema::Paths::ParentId>());
TString name = rows.GetValue<Schema::Paths::Name>();
-
+
TPathElement::EPathType pathType = (TPathElement::EPathType)rows.GetValue<Schema::Paths::PathType>();
TStepId stepCreated = rows.GetValueOrDefault<Schema::Paths::StepCreated>(InvalidStepId);
@@ -214,7 +214,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
stepCreated, txIdCreated, stepDropped, txIdDropped,
acl, lastTxId, dirAlterVer, userAttrsAlterVer, aclAlterVer);
}
-
+
if (!rows.Next()) {
return false;
}
@@ -1356,7 +1356,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
TPathesRows pathesRows;
if (!LoadPathes(db, pathesRows)) {
return false;
- }
+ }
LOG_NOTICE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"TTxInit for Pathes"
@@ -1396,8 +1396,8 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Self->AttachChild(path);
Self->PathsById[path->PathId] = path;
}
- }
-
+ }
+
// Read user attrs
{
TUserAttrsRows userAttrsRows;
@@ -1454,7 +1454,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
}
// Read SubDomains
- {
+ {
TSchemeLimits rootLimits = TSchemeShard::DefaultLimits;
if (Self->PathsById.contains(Self->RootPathId()) && Self->IsDomainSchemeShard) {
@@ -1732,7 +1732,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
{
TTableRows tableRows;
if (!LoadTables(db, tableRows)) {
- return false;
+ return false;
}
LOG_NOTICE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
@@ -1746,7 +1746,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Y_VERIFY_S(Self->PathsById.contains(pathId), "Path doesn't exist, pathId: " << pathId);
Y_VERIFY_S(Self->PathsById.at(pathId)->IsTable(), "Path is not a table, pathId: " << pathId);
Y_VERIFY_S(Self->Tables.FindPtr(pathId) == nullptr, "Table duplicated in DB, pathId: " << pathId);
-
+
TTableInfo::TPtr tableInfo = new TTableInfo();
tableInfo->NextColumnId = std::get<1>(rec);
tableInfo->AlterVersion = std::get<2>(rec);
@@ -1755,7 +1755,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
if (partitionConfig) {
auto& config = tableInfo->MutablePartitionConfig();
bool parseOk = ParseFromStringNoSizeLimit(config, partitionConfig);
- Y_VERIFY(parseOk);
+ Y_VERIFY(parseOk);
if (config.ColumnFamiliesSize() > 1) {
// Fix any incorrect legacy config at load time
@@ -1765,7 +1765,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
if (config.HasCrossDataCenterFollowerCount()) {
config.ClearFollowerCount();
}
- }
+ }
TString alterTabletFull = std::get<4>(rec);
TString alterTabletDiff = std::get<5>(rec);
@@ -1804,15 +1804,15 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Self->TTLEnabledTables[pathId] = tableInfo;
Self->TabletCounters->Simple()[COUNTER_TTL_ENABLED_TABLE_COUNT].Add(1);
}
- }
+ }
+
+ }
- }
-
- // Read table columns
- {
+ // Read table columns
+ {
TColumnRows columnRows;
if (!LoadColumns(db, columnRows)) {
- return false;
+ return false;
}
LOG_NOTICE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
@@ -1836,12 +1836,12 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Y_VERIFY_S(Self->PathsById.contains(pathId), "Path doesn't exist, pathId: " << pathId);
Y_VERIFY_S(Self->PathsById.at(pathId)->IsTable(), "Path is not a table, pathId: " << pathId);
Y_VERIFY_S(Self->Tables.FindPtr(pathId), "Table doen't exist, pathId: " << pathId);
-
+
TTableInfo::TPtr tableInfo = Self->Tables[pathId];
Y_VERIFY_S(colId < tableInfo->NextColumnId, "Column id should be less than NextColId"
<< ", columnId: " << colId
<< ", NextColId: " << tableInfo->NextColumnId);
-
+
TTableInfo::TColumn colInfo(colName, colId, typeId);
colInfo.KeyOrder = keyOrder;
colInfo.CreateVersion = createVersion;
@@ -1851,15 +1851,15 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
colInfo.DefaultValue = defaultValue;
colInfo.NotNull = notNull;
- tableInfo->Columns[colId] = colInfo;
+ tableInfo->Columns[colId] = colInfo;
+
+ if (colInfo.KeyOrder != (ui32)-1) {
+ tableInfo->KeyColumnIds.resize(Max<ui32>(tableInfo->KeyColumnIds.size(), colInfo.KeyOrder + 1));
+ tableInfo->KeyColumnIds[colInfo.KeyOrder] = colId;
+ }
+ }
+ }
- if (colInfo.KeyOrder != (ui32)-1) {
- tableInfo->KeyColumnIds.resize(Max<ui32>(tableInfo->KeyColumnIds.size(), colInfo.KeyOrder + 1));
- tableInfo->KeyColumnIds[colInfo.KeyOrder] = colId;
- }
- }
- }
-
// Read table columns' alters
{
TColumnRows columnRows;
@@ -1914,20 +1914,20 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
THashMap<TPathId, TShardIdx> fileStoreShards; // pathId -> shardIdx
THashMap<TPathId, TShardIdx> kesusShards; // pathId -> shardIdx
THashMap<TPathId, TVector<TShardIdx>> olapColumnShards;
- {
+ {
TShardsRows shards;
if (!LoadShards(db, shards)) {
- return false;
+ return false;
}
LOG_NOTICE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"TTxInit for Shards"
<< ", readed records: " << shards.size()
<< ", at schemeshard: " << Self->TabletID());
-
+
for (auto& rec: shards) {
TShardIdx idx = std::get<0>(rec);
-
+
LOG_TRACE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"TTxInit for Shards"
<< ", read: " << idx
@@ -1993,13 +1993,13 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
}
}
- // Read partitions
- {
+ // Read partitions
+ {
TTablePartitionsRows tablePartitions;
if (!LoadTablePartitions(db, tablePartitions)) {
- return false;
+ return false;
}
-
+
LOG_NOTICE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"TTxInit for TablePartitions"
<< ", readed records: " << tablePartitions.size()
@@ -2007,7 +2007,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
TPathId prevTableId;
TVector<TTableShardInfo> partitions;
-
+
const auto now = ctx.Now();
for (auto& rec: tablePartitions) {
TPathId tableId = std::get<0>(rec);
@@ -2019,20 +2019,20 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
if (tableId != prevTableId) {
if (prevTableId) {
- Y_VERIFY(!partitions.empty());
+ Y_VERIFY(!partitions.empty());
Y_VERIFY(Self->Tables.contains(prevTableId));
TTableInfo::TPtr tableInfo = Self->Tables.at(prevTableId);
Self->SetPartitioning(prevTableId, tableInfo, std::move(partitions));
- partitions.clear();
- }
+ partitions.clear();
+ }
prevTableId = tableId;
- }
-
- // TODO: check that table exists
- if (partitions.size() <= id) {
- partitions.resize(id+1);
- }
+ }
+
+ // TODO: check that table exists
+ if (partitions.size() <= id) {
+ partitions.resize(id+1);
+ }
partitions[id] = TTableShardInfo(datashardIdx, rangeEnd, lastCondErase, nextCondErase);
@@ -2047,18 +2047,18 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Self->TabletCounters->Percentile()[COUNTER_NUM_SHARDS_BY_TTL_LAG].IncrementFor(lag->Seconds());
}
- // TODO: check rangeEnd validity
- // TODO: check datashard idx existence
- }
-
+ // TODO: check rangeEnd validity
+ // TODO: check datashard idx existence
+ }
+
if (prevTableId) {
- Y_VERIFY(!partitions.empty());
+ Y_VERIFY(!partitions.empty());
Y_VERIFY(Self->Tables.contains(prevTableId));
TTableInfo::TPtr tableInfo = Self->Tables.at(prevTableId);
Self->SetPartitioning(prevTableId, tableInfo, std::move(partitions));
- }
- }
-
+ }
+ }
+
// Read partition config patches
{
TTableShardPartitionConfigRows tablePartitions;
@@ -2155,8 +2155,8 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
stats.RangeReads = rowSet.GetValue<Schema::TablePartitionStats::RangeReads>();
stats.RangeReadRows = rowSet.GetValue<Schema::TablePartitionStats::RangeReadRows>();
- TInstant now = AppData(ctx)->TimeProvider->Now();
- stats.SetCurrentRawCpuUsage(rowSet.GetValue<Schema::TablePartitionStats::CPU>(), now);
+ TInstant now = AppData(ctx)->TimeProvider->Now();
+ stats.SetCurrentRawCpuUsage(rowSet.GetValue<Schema::TablePartitionStats::CPU>(), now);
stats.Memory = rowSet.GetValue<Schema::TablePartitionStats::Memory>();
stats.Network = rowSet.GetValue<Schema::TablePartitionStats::Network>();
stats.Storage = rowSet.GetValue<Schema::TablePartitionStats::Storage>();
@@ -3099,15 +3099,15 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
TVector<TOperationId> splitOpIds;
TVector<TOperationId> forceDropOpIds;
THashSet<TPathId> pathsUnderOperation;
- // Read in-flight txid
- {
+ // Read in-flight txid
+ {
auto txInFlightRowset = db.Table<Schema::TxInFlightV2>().Range().Select();
- if (!txInFlightRowset.IsReady())
- return false;
- while (!txInFlightRowset.EndOfSet()) {
+ if (!txInFlightRowset.IsReady())
+ return false;
+ while (!txInFlightRowset.EndOfSet()) {
auto operationId = TOperationId(txInFlightRowset.GetValue<Schema::TxInFlightV2::TxId>(),
txInFlightRowset.GetValue<Schema::TxInFlightV2::TxPartId>());
-
+
TTxState& txState = Self->TxInFlight[operationId];
txState.TxType = (TTxState::ETxType)txInFlightRowset.GetValue<Schema::TxInFlightV2::TxType>();
@@ -3128,7 +3128,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
txState.DataTotalSize = txInFlightRowset.GetValueOrDefault<Schema::TxInFlightV2::DataTotalSize>(0);
txState.Cancel = txInFlightRowset.GetValueOrDefault<Schema::TxInFlightV2::CancelBackup>(false);
txState.BuildIndexId = txInFlightRowset.GetValueOrDefault<Schema::TxInFlightV2::BuildIndexId>();
-
+
txState.SourcePathId = TPathId(txInFlightRowset.GetValueOrDefault<Schema::TxInFlightV2::SourceOwnerId>(),
txInFlightRowset.GetValueOrDefault<Schema::TxInFlightV2::SourceLocalPathId>());
@@ -3161,22 +3161,22 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Y_VERIFY(subDomainInfo->GetAlter());
}
} else if (txState.TxType == TTxState::TxSplitTablePartition || txState.TxType == TTxState::TxMergeTablePartition) {
- Y_VERIFY(!extraData.empty(), "Split Tx must have non-empty split description");
+ Y_VERIFY(!extraData.empty(), "Split Tx must have non-empty split description");
txState.SplitDescription = std::make_shared<NKikimrTxDataShard::TSplitMergeDescription>();
- bool deserializeRes = ParseFromStringNoSizeLimit(*txState.SplitDescription, extraData);
- Y_VERIFY(deserializeRes);
+ bool deserializeRes = ParseFromStringNoSizeLimit(*txState.SplitDescription, extraData);
+ Y_VERIFY(deserializeRes);
splitOpIds.push_back(operationId);
-
+
Y_VERIFY(Self->Tables.contains(txState.TargetPathId));
TTableInfo::TPtr tableInfo = Self->Tables.at(txState.TargetPathId);
tableInfo->RegisterSplitMegreOp(operationId, txState);
- } else if (txState.TxType == TTxState::TxAlterTable) {
- if (txState.State <= TTxState::Propose) {
- // If state is >=Propose then alter has already been applied to the table
- // and AlterData should be cleared
-
+ } else if (txState.TxType == TTxState::TxAlterTable) {
+ if (txState.State <= TTxState::Propose) {
+ // If state is >=Propose then alter has already been applied to the table
+ // and AlterData should be cleared
+
TPathId tablePathId = txState.TargetPathId;
-
+
Y_VERIFY_S(Self->PathsById.contains(tablePathId), "Path doesn't exist, pathId: " << tablePathId);
Y_VERIFY_S(Self->PathsById.at(tablePathId)->IsTable(), "Path is not a table, pathId: " << tablePathId);
Y_VERIFY_S(Self->Tables.FindPtr(tablePathId), "Table doen't exist, pathId: " << tablePathId);
@@ -3185,8 +3185,8 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Y_VERIFY(Self->Tables.contains(tablePathId));
TTableInfo::TPtr tableInfo = Self->Tables.at(tablePathId);
tableInfo->InitAlterData();
- tableInfo->DeserializeAlterExtraData(extraData);
- }
+ tableInfo->DeserializeAlterExtraData(extraData);
+ }
} else if (txState.TxType == TTxState::TxBackup || txState.TxType == TTxState::TxRestore) {
auto byShardBackupStatus = db.Table<Schema::ShardBackupStatus>().Range(operationId.GetTxId()).Select();
auto byMigratedShardBackupStatus = db.Table<Schema::MigratedShardBackupStatus>().Range(operationId.GetTxId()).Select();
@@ -3214,8 +3214,8 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
if (!path->UserAttrs->AlterData) {
path->UserAttrs->AlterData = new TUserAttributes(path->UserAttrs->AlterVersion + 1);
}
- }
-
+ }
+
Y_VERIFY(txState.TxType != TTxState::TxInvalid);
Y_VERIFY(txState.State != TTxState::Invalid);
@@ -3259,11 +3259,11 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
ISubOperationBase::TPtr part = operation->RestorePart(txState.TxType, txState.State);
operation->AddPart(part);
- if (!txInFlightRowset.Next())
- return false;
- }
- }
-
+ if (!txInFlightRowset.Next())
+ return false;
+ }
+ }
+
// Read tx's shards
{
TTxShardsRows txShardsRows;
@@ -3344,30 +3344,30 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
srcPath->PathState = TPathElement::EPathState::EPathStateCopying;
srcPath->DbRefCount++;
}
- }
+ }
}
}
- // After all shard operations are loaded we can fill range ends for shards participating in split operations
+ // After all shard operations are loaded we can fill range ends for shards participating in split operations
for (TOperationId opId : splitOpIds) {
THashMap<TShardIdx, TString> shardIdxToRangeEnd;
TTxState* txState = Self->FindTx(opId);
Y_VERIFY_S(txState, "No txState for split/merge opId, txId: " << opId.GetTxId());
- Y_VERIFY(txState->SplitDescription);
- for (ui32 i = 0; i < txState->SplitDescription->DestinationRangesSize(); ++i) {
- const auto& dst = txState->SplitDescription->GetDestinationRanges(i);
+ Y_VERIFY(txState->SplitDescription);
+ for (ui32 i = 0; i < txState->SplitDescription->DestinationRangesSize(); ++i) {
+ const auto& dst = txState->SplitDescription->GetDestinationRanges(i);
auto localShardIdx = TLocalShardIdx(dst.GetShardIdx());
auto shardIdx = Self->MakeLocalId(localShardIdx);
- shardIdxToRangeEnd[shardIdx] = dst.GetKeyRangeEnd();
- }
- for (TTxState::TShardOperation& shardOp : txState->Shards) {
- if (shardOp.Operation == TTxState::CreateParts) {
+ shardIdxToRangeEnd[shardIdx] = dst.GetKeyRangeEnd();
+ }
+ for (TTxState::TShardOperation& shardOp : txState->Shards) {
+ if (shardOp.Operation == TTxState::CreateParts) {
Y_VERIFY(shardIdxToRangeEnd.contains(shardOp.Idx));
shardOp.RangeEnd = shardIdxToRangeEnd.at(shardOp.Idx);
- }
- }
- }
-
+ }
+ }
+ }
+
//after all txs and splitTxs was loaded and processed, it is valid to initiate force drops
for (TOperationId opId: forceDropOpIds) {
TTxState* txState = Self->FindTx(opId);
@@ -3376,16 +3376,16 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Self->MarkAsDroping(pathes, opId.GetTxId(), ctx);
}
- // Read txid dependencies
- {
+ // Read txid dependencies
+ {
auto txDependenciesRowset = db.Table<Schema::TxDependencies>().Range().Select();
- if (!txDependenciesRowset.IsReady())
- return false;
+ if (!txDependenciesRowset.IsReady())
+ return false;
- while (!txDependenciesRowset.EndOfSet()) {
+ while (!txDependenciesRowset.EndOfSet()) {
auto txId = txDependenciesRowset.GetValue<Schema::TxDependencies::TxId>();
auto dependentTxId = txDependenciesRowset.GetValue<Schema::TxDependencies::DependentTxId>();
-
+
Y_VERIFY_S(Self->Operations.contains(txId), "Parent operation is not found"
<< ", parent txId " << txId
<< ", dependentTxId " << dependentTxId);
@@ -3393,20 +3393,20 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Y_VERIFY_S(Self->Operations.contains(dependentTxId), "Dependent operation is not found"
<< ", dependent txId:" << dependentTxId
<< ", parent txId " << txId);
-
+
Self->Operations.at(txId)->DependentOperations.insert(dependentTxId);
Self->Operations.at(dependentTxId)->WaitOperations.insert(txId);
-
- if (!txDependenciesRowset.Next())
- return false;
- }
- }
-
- // Read shards to delete
- {
+
+ if (!txDependenciesRowset.Next())
+ return false;
+ }
+ }
+
+ // Read shards to delete
+ {
TShardsToDeleteRows shardsToDelete;
if (!LoadShardsToDelete(db, shardsToDelete)) {
- return false;
+ return false;
}
LOG_NOTICE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
@@ -3416,9 +3416,9 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
for (auto& rec: shardsToDelete) {
OnComplete.DeleteShard(std::get<0>(rec));
- }
- }
-
+ }
+ }
+
// Read backup settings
{
TBackupSettingsRows backupSettings;
@@ -3645,13 +3645,13 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
}
}
- // Other persistent params
- for (const auto& si : Self->ShardInfos) {
+ // Other persistent params
+ for (const auto& si : Self->ShardInfos) {
auto shardIdx = si.first;
auto tabletId = si.second.TabletID;
auto pathId = si.second.PathId;
Self->TabletIdToShardIdx[tabletId] = shardIdx;
-
+
Y_VERIFY(Self->PathsById.contains(pathId));
auto path = Self->PathsById.at(pathId); //path should't be dropeed?
path->IncShardsInside();
@@ -3659,21 +3659,21 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
auto domainInfo = Self->ResolveDomainInfo(pathId); //domain should't be dropeed?
domainInfo->AddInternalShard(shardIdx);
- switch (si.second.TabletType) {
+ switch (si.second.TabletType) {
case ETabletType::DataShard:
- {
- const auto table = Self->Tables.FindPtr(pathId);
- if (tabletId != InvalidTabletId) {
+ {
+ const auto table = Self->Tables.FindPtr(pathId);
+ if (tabletId != InvalidTabletId) {
bool active = !path->Dropped() &&
!path->PlannedToDrop() &&
table && (*table)->GetStats().PartitionStats.contains(shardIdx);
- Self->TabletCounters->Simple()[active ? COUNTER_TABLE_SHARD_ACTIVE_COUNT : COUNTER_TABLE_SHARD_INACTIVE_COUNT].Add(1);
- }
- break;
- }
+ Self->TabletCounters->Simple()[active ? COUNTER_TABLE_SHARD_ACTIVE_COUNT : COUNTER_TABLE_SHARD_INACTIVE_COUNT].Add(1);
+ }
+ break;
+ }
case ETabletType::PersQueue:
- Self->TabletCounters->Simple()[COUNTER_PQ_SHARD_COUNT].Add(1);
- break;
+ Self->TabletCounters->Simple()[COUNTER_PQ_SHARD_COUNT].Add(1);
+ break;
case ETabletType::PersQueueReadBalancer:
Self->TabletCounters->Simple()[COUNTER_PQ_RB_SHARD_COUNT].Add(1);
break;
@@ -3730,16 +3730,16 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
<< ", pathId: " << pathId
<< ", shardId: " << shardIdx
<< ", tabletId: " << tabletId);
- }
- }
-
+ }
+ }
+
for (const auto& item : Self->PathsById) {
auto& path = item.second;
if (path->Dropped()) {
- continue;
- }
-
+ continue;
+ }
+
TPathElement::TPtr parent = Self->PathsById.at(path->ParentPathId);
TPathElement::TPtr inclusiveDomainPath = Self->PathsById.at(Self->ResolveDomainId(parent)); // take upper domain id info even when the path is domain by itself
TSubDomainInfo::TPtr inclusivedomainInfo = Self->ResolveDomainInfo(parent);
@@ -3782,11 +3782,11 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
}
if (path->IsDirectory()) {
- Self->TabletCounters->Simple()[COUNTER_DIR_COUNT].Add(1);
+ Self->TabletCounters->Simple()[COUNTER_DIR_COUNT].Add(1);
} else if (path->IsTable()) {
- Self->TabletCounters->Simple()[COUNTER_TABLE_COUNT].Add(1);
+ Self->TabletCounters->Simple()[COUNTER_TABLE_COUNT].Add(1);
} else if (path->IsPQGroup()) {
- Self->TabletCounters->Simple()[COUNTER_PQ_GROUP_COUNT].Add(1);
+ Self->TabletCounters->Simple()[COUNTER_PQ_GROUP_COUNT].Add(1);
} if (path->IsSubDomainRoot()) {
Self->TabletCounters->Simple()[COUNTER_SUB_DOMAIN_COUNT].Add(1);
} if (path->IsExternalSubDomainRoot()) {
@@ -3805,11 +3805,11 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
Self->TabletCounters->Simple()[COUNTER_SEQUENCE_COUNT].Add(1);
} else if (path->IsReplication()) {
Self->TabletCounters->Simple()[COUNTER_REPLICATION_COUNT].Add(1);
- }
+ }
path->ApplySpecialAttributes();
- }
-
+ }
+
for (const auto& kv : Self->BlockStoreVolumes) {
auto itPath = Self->PathsById.find(kv.first);
if (itPath == Self->PathsById.end() || itPath->second->Dropped()) {
@@ -3820,12 +3820,12 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
domainDir->ChangeVolumeSpaceBegin(volumeSpace, { });
}
- // Find all operations that were in the process of execution
+ // Find all operations that were in the process of execution
for (auto& item : Self->TxInFlight) {
const TTxState& txState = item.second;
ui32 inFlightCounter = TTxState::TxTypeInFlightCounter(txState.TxType);
- Self->TabletCounters->Simple()[inFlightCounter].Add(1);
+ Self->TabletCounters->Simple()[inFlightCounter].Add(1);
}
// Publications
@@ -4534,35 +4534,35 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
TOperationContext context{Self, txc, ctx, OnComplete, MemChanges, DbChanges};
part->ProgressState(context);
}
- }
-
+ }
+
CollectObjectsToClean();
OnComplete.ApplyOnExecute(Self, txc, ctx);
DbChanges.Apply(Self, txc, ctx);
- return true;
- }
-
+ return true;
+ }
+
TTxType GetTxType() const override { return TXTYPE_INIT; }
- bool Execute(TTransactionContext &txc, const TActorContext &ctx) override {
- try {
- bool newScheme = CreateScheme(txc);
- if (newScheme)
- return true;
+ bool Execute(TTransactionContext &txc, const TActorContext &ctx) override {
+ try {
+ bool newScheme = CreateScheme(txc);
+ if (newScheme)
+ return true;
return ReadEverything(txc, ctx);
- } catch (const TNotReadyTabletException &) {
- return false;
- } catch (const TSchemeErrorTabletException &ex) {
- Y_FAIL("there must be no leaked scheme error exceptions: %s", ex.what());
- } catch (const std::exception& ex) {
- Y_FAIL("there must be no leaked exceptions: %s", ex.what());
- } catch (...) {
+ } catch (const TNotReadyTabletException &) {
+ return false;
+ } catch (const TSchemeErrorTabletException &ex) {
+ Y_FAIL("there must be no leaked scheme error exceptions: %s", ex.what());
+ } catch (const std::exception& ex) {
+ Y_FAIL("there must be no leaked exceptions: %s", ex.what());
+ } catch (...) {
Y_FAIL("there must be no leaked exceptions");
- }
- }
-
- void Complete(const TActorContext &ctx) override {
+ }
+ }
+
+ void Complete(const TActorContext &ctx) override {
auto delayPublications = OnComplete.ExtractPublicationsToSchemeBoard(); //there no Populator exist jet
for (auto& [txId, pathIds] : Publications) {
std::move(pathIds.begin(), pathIds.end(), std::back_inserter(delayPublications[txId]));
@@ -4586,11 +4586,11 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
ExportsToResume, ImportsToResume,
std::move(TablesToClean), std::move(BlockStoreVolumesToClean)
);
- }
-};
-
+ }
+};
+
NTabletFlatExecutor::ITransaction* TSchemeShard::CreateTxInit() {
- return new TTxInit(this);
-}
-
-}}
+ return new TTxInit(this);
+}
+
+}}
diff --git a/ydb/core/tx/schemeshard/schemeshard__init_root.cpp b/ydb/core/tx/schemeshard/schemeshard__init_root.cpp
index fb50afbea7e..74399680cda 100644
--- a/ydb/core/tx/schemeshard/schemeshard__init_root.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__init_root.cpp
@@ -4,22 +4,22 @@
#include <ydb/core/tablet/tablet_exception.h>
#include <ydb/core/tablet_flat/flat_cxx_database.h>
#include <ydb/library/aclib/aclib.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NSchemeShard {
-
-using namespace NTabletFlatExecutor;
-
+
+using namespace NTabletFlatExecutor;
+
struct TSchemeShard::TTxInitRoot : public TSchemeShard::TRwTxBase {
TTxInitRoot(TSelf *self)
: TRwTxBase(self)
- {}
-
+ {}
+
TTxType GetTxType() const override { return TXTYPE_INIT_ROOT; }
void DoExecute(TTransactionContext &txc, const TActorContext &ctx) override {
- NIceDb::TNiceDb db(txc.DB);
-
+ NIceDb::TNiceDb db(txc.DB);
+
const TDomainsInfo::TDomain& selfDomain = Self->GetDomainDescription(ctx);
TString rootName = selfDomain.Name;
@@ -52,7 +52,7 @@ struct TSchemeShard::TTxInitRoot : public TSchemeShard::TRwTxBase {
Self->NextLocalShardIdx = 1;
Self->ShardInfos.clear();
Self->RootPathElemets = std::move(rootPathElemets);
-
+
TSubDomainInfo::TPtr newDomain = new TSubDomainInfo(0, Self->RootPathId());
newDomain->InitializeAsGlobal(Self->CreateRootProcessingParams(ctx));
@@ -66,9 +66,9 @@ struct TSchemeShard::TTxInitRoot : public TSchemeShard::TRwTxBase {
Self->InitState = TTenantInitState::Done;
Self->PersistInitState(db);
- }
-
- void DoComplete(const TActorContext &ctx) override {
+ }
+
+ void DoComplete(const TActorContext &ctx) override {
LOG_NOTICE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"TTxInitRoot DoComplete"
<< ", at schemeshard: " << Self->TabletID());
@@ -76,13 +76,13 @@ struct TSchemeShard::TTxInitRoot : public TSchemeShard::TRwTxBase {
Self->SignalTabletActive(ctx);
Self->ActivateAfterInitialization(ctx);
- }
-};
-
+ }
+};
+
NTabletFlatExecutor::ITransaction* TSchemeShard::CreateTxInitRoot() {
return new TTxInitRoot(this);
-}
-
+}
+
struct TSchemeShard::TTxInitRootCompatibility : public TSchemeShard::TRwTxBase {
TEvSchemeShard::TEvInitRootShard::TPtr Ev;
TSideEffects OnComplete;
@@ -685,4 +685,4 @@ struct TSchemeShard::TTxMigrate : public TSchemeShard::TRwTxBase {
NTabletFlatExecutor::ITransaction* TSchemeShard::CreateTxMigrate(TEvSchemeShard::TEvMigrateSchemeShard::TPtr &ev) {
return new TTxMigrate(this, ev);
}
-}}
+}}
diff --git a/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp b/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp
index 8f47aaea8b9..76e73182496 100644
--- a/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp
@@ -1,5 +1,5 @@
-#include "schemeshard_impl.h"
-
+#include "schemeshard_impl.h"
+
#include <ydb/core/base/tablet_pipecache.h>
#include <ydb/core/protos/tx_datashard.pb.h>
#include <ydb/core/tx/datashard/range_ops.h>
@@ -7,7 +7,7 @@
#include <library/cpp/html/pcdata/pcdata.h>
#include <util/string/cast.h>
-
+
static ui64 TryParseTabletId(TStringBuf tabletIdParam) {
if (tabletIdParam.StartsWith("0x"))
return IntFromString<ui64, 16>(tabletIdParam.substr(2));
@@ -15,9 +15,9 @@ static ui64 TryParseTabletId(TStringBuf tabletIdParam) {
return FromStringWithDefault<ui64>(tabletIdParam, ui64(NKikimr::NSchemeShard::InvalidTabletId));
}
-namespace NKikimr {
+namespace NKikimr {
namespace NSchemeShard {
-
+
struct TCgi {
struct TParam {
const TStringBuf Name;
@@ -228,19 +228,19 @@ private:
};
struct TSchemeShard::TTxMonitoring : public NTabletFlatExecutor::TTransactionBase<TSchemeShard> {
- NMon::TEvRemoteHttpInfo::TPtr Ev;
+ NMon::TEvRemoteHttpInfo::TPtr Ev;
TStringStream Answer;
-public:
+public:
TTxMonitoring(TSchemeShard *self, NMon::TEvRemoteHttpInfo::TPtr ev)
- : TBase(self)
- , Ev(ev)
+ : TBase(self)
+ , Ev(ev)
{
}
-
- bool Execute(NTabletFlatExecutor::TTransactionContext &txc, const TActorContext &ctx) override {
- Y_UNUSED(txc);
-
+
+ bool Execute(NTabletFlatExecutor::TTransactionContext &txc, const TActorContext &ctx) override {
+ Y_UNUSED(txc);
+
const TCgiParameters& cgi = Ev->Get()->Cgi();
const TString page = cgi.Has(TCgi::Page) ? cgi.Get(TCgi::Page) : ToString(TCgi::TPages::MainPage);
@@ -296,30 +296,30 @@ public:
else if (page == TCgi::TPages::AdminPage)
{
OutputAdminPage(Answer);
- }
+ }
else if (page == TCgi::TPages::BuildIndexInfo)
{
auto id = TIndexBuildId(FromStringWithDefault<ui64>(cgi.Get(TCgi::BuildIndexId), ui64(InvalidIndexBuildId)));
BuildIndexInfoPage(id, Answer);
}
-
- return true;
- }
-
+
+ return true;
+ }
+
void Complete(const TActorContext &ctx) override {
if (Answer) {
ctx.Send(Ev->Sender, new NMon::TEvRemoteHttpInfoRes(Answer.Str()));
}
}
-private:
+private:
void LinkToMain(TStringStream& str) const {
str << "<a href='app?" << TCgi::TabletID.AsCgiParam(Self->TabletID())
<< "&" << TCgi::Page.AsCgiParam(TCgi::TPages::MainPage) << "'>";
str << "Back to main scheme shard page";
str << "</a><br>";
}
-
+
void OutputAdminRequestPage(TStringStream& str, NIceDb::TNiceDb& db, const TCgiParameters& cgi, const TActorContext& ctx) const {
if (cgi.Has(TCgi::IsReadOnlyMode)) {
TString rowStr = cgi.Get(TCgi::IsReadOnlyMode);
@@ -539,7 +539,7 @@ private:
}
void OutputMainPage(TStringStream& str) const {
- HTML(str) {
+ HTML(str) {
H3() {str << "SchemeShard main page:";}
{
@@ -806,36 +806,36 @@ private:
void TableTxInfly(TStringStream& str) const {
HTML(str) {
- TABLE_SORTABLE_CLASS("table") {
- TABLEHEAD() {
- TABLER() {
+ TABLE_SORTABLE_CLASS("table") {
+ TABLEHEAD() {
+ TABLER() {
TABLEH() {str << "OpId";}
- TABLEH() {str << "Type";}
- TABLEH() {str << "State";}
- TABLEH() {str << "Shards in progress";}
- }
- str << "\n";
- }
-
- for (const auto& tx : Self->TxInFlight) {
+ TABLEH() {str << "Type";}
+ TABLEH() {str << "State";}
+ TABLEH() {str << "Shards in progress";}
+ }
+ str << "\n";
+ }
+
+ for (const auto& tx : Self->TxInFlight) {
TOperationId opId = tx.first;
const TTxState txState = tx.second;
- TABLER() {
+ TABLER() {
TABLED() { str << "<a href='app?" << TCgi::Page.AsCgiParam(TCgi::TPages::TransactionInfo)
<< "&" << TCgi::TabletID.AsCgiParam(Self->TabletID())
<< "&" << TCgi::TxId.AsCgiParam(opId.GetTxId())
<< "&" << TCgi::PartId.AsCgiParam(opId.GetSubTxId())
<< "'>" << opId << "</a>"; }
- TABLED() { str << TTxState::TypeName(txState.TxType); }
- TABLED() { str << TTxState::StateName(txState.State); }
- TABLED() { str << txState.ShardsInProgress.size(); }
- }
- str << "\n";
- }
- }
- }
- }
-
+ TABLED() { str << TTxState::TypeName(txState.TxType); }
+ TABLED() { str << TTxState::StateName(txState.State); }
+ TABLED() { str << txState.ShardsInProgress.size(); }
+ }
+ str << "\n";
+ }
+ }
+ }
+ }
+
void OutputTxListPage(TStringStream& str) const {
HTML(str) {
H3() {str << "Transactions in flight:";}
@@ -845,28 +845,28 @@ private:
}
void OutputTxInfoPage(TOperationId operationId, TStringStream& str) const {
- HTML(str) {
+ HTML(str) {
H3() {str << "Transaction " << operationId;}
-
+
auto txInfo = Self->FindTx(operationId);
- if (!txInfo) {
- PRE() {
- str << "Unknown Tx\n";
- }
- } else {
+ if (!txInfo) {
+ PRE() {
+ str << "Unknown Tx\n";
+ }
+ } else {
const TTxState txState = *txInfo;
- H3() {str << "Shards in progress : " << txState.ShardsInProgress.size() << "\n";}
- TABLE_SORTABLE_CLASS("table") {
- TABLEHEAD() {
- TABLER() {
+ H3() {str << "Shards in progress : " << txState.ShardsInProgress.size() << "\n";}
+ TABLE_SORTABLE_CLASS("table") {
+ TABLEHEAD() {
+ TABLER() {
TABLEH() {str << "OwnerShardIdx";}
TABLEH() {str << "LocalShardIdx";}
- TABLEH() {str << "TabletId";}
- }
- }
+ TABLEH() {str << "TabletId";}
+ }
+ }
for (auto shardIdx : txState.ShardsInProgress) {
- TABLER() {
- TABLED() {
+ TABLER() {
+ TABLED() {
str << "<a href='../tablets/app?" << TCgi::TabletID.AsCgiParam(Self->TabletID())
<< "&" << TCgi::Page.AsCgiParam(TCgi::TPages::ShardInfoByShardIdx)
<< "&" << TCgi::OwnerShardIdx.AsCgiParam(shardIdx.GetOwnerId())
@@ -879,18 +879,18 @@ private:
str << "<a href='../tablets?"
<< TCgi::TabletID.AsCgiParam(tabletId)
<< "'>" << tabletId <<"</a>";
- } else {
- str << "UNKNOWN_TABLET!";
- }
- }
- }
- str << "\n";
- }
- }
- }
- }
- }
-
+ } else {
+ str << "UNKNOWN_TABLET!";
+ }
+ }
+ }
+ str << "\n";
+ }
+ }
+ }
+ }
+ }
+
void OutputShardInfo(TShardIdx shardIdx, TStringStream& str) const {
HTML(str) {
if (!Self->ShardInfos.contains(shardIdx)) {
@@ -1323,20 +1323,20 @@ private:
}
}
- TTxType GetTxType() const override { return TXTYPE_MONITORING; }
-};
-
+ TTxType GetTxType() const override { return TXTYPE_MONITORING; }
+};
+
bool TSchemeShard::OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TActorContext &ctx) {
- if (!Executor() || !Executor()->GetStats().IsActive)
- return false;
-
- if (!ev)
- return true;
-
+ if (!Executor() || !Executor()->GetStats().IsActive)
+ return false;
+
+ if (!ev)
+ return true;
+
LOG_DEBUG(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "Handle TEvRemoteHttpInfo: %s", ev->Get()->Query.data());
- Execute(new TTxMonitoring(this, ev), ctx);
-
- return true;
-}
-
-}}
+ Execute(new TTxMonitoring(this, ev), ctx);
+
+ return true;
+}
+
+}}
diff --git a/ydb/core/tx/schemeshard/schemeshard__notify.cpp b/ydb/core/tx/schemeshard/schemeshard__notify.cpp
index c24335c5f76..d390367947a 100644
--- a/ydb/core/tx/schemeshard/schemeshard__notify.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__notify.cpp
@@ -1,26 +1,26 @@
#include "schemeshard_impl.h"
#include <ydb/core/base/appdata.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NSchemeShard {
-
-using namespace NTabletFlatExecutor;
-
+
+using namespace NTabletFlatExecutor;
+
struct TSchemeShard::TTxNotifyCompletion : public TSchemeShard::TRwTxBase {
TEvSchemeShard::TEvNotifyTxCompletion::TPtr Ev;
- TAutoPtr<IEventBase> Result;
-
+ TAutoPtr<IEventBase> Result;
+
TTxNotifyCompletion(TSelf *self, TEvSchemeShard::TEvNotifyTxCompletion::TPtr &ev)
: TRwTxBase(self)
- , Ev(ev)
- {}
-
- void DoExecute(TTransactionContext &txc, const TActorContext &ctx) override {
+ , Ev(ev)
+ {}
+
+ void DoExecute(TTransactionContext &txc, const TActorContext &ctx) override {
Y_UNUSED(txc);
Y_UNUSED(ctx);
auto rawTxId = Ev->Get()->Record.GetTxId();
-
+
if (Self->Operations.contains(TTxId(rawTxId))) {
auto txId = TTxId(rawTxId);
LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
@@ -126,23 +126,23 @@ struct TSchemeShard::TTxNotifyCompletion : public TSchemeShard::TRwTxBase {
Result = new TEvSchemeShard::TEvNotifyTxCompletionResult(rawTxId);
return;
}
-
+
LOG_INFO_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"NotifyTxCompletion"
<< " transaction is registered"
<< ", txId: " << rawTxId
<< ", at schemeshard: " << Self->TabletID());
- }
-
- void DoComplete(const TActorContext &ctx) override {
- if (Result) {
- ctx.Send(Ev->Sender, Result.Release());
- }
- }
-};
-
+ }
+
+ void DoComplete(const TActorContext &ctx) override {
+ if (Result) {
+ ctx.Send(Ev->Sender, Result.Release());
+ }
+ }
+};
+
NTabletFlatExecutor::ITransaction* TSchemeShard::CreateTxNotifyTxCompletion(TEvSchemeShard::TEvNotifyTxCompletion::TPtr &ev) {
- return new TTxNotifyCompletion(this, ev);
-}
-
-}}
+ return new TTxNotifyCompletion(this, ev);
+}
+
+}}
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_alter_olap_table.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_alter_olap_table.cpp
index 0b37ceafdc6..2e89cad9869 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_alter_olap_table.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_alter_olap_table.cpp
@@ -47,38 +47,38 @@ TOlapTableInfo::TPtr ParseParams(
if (alter.HasAlterTtlSettings()) {
const NKikimrSchemeOp::TColumnTableSchema* tableSchema = nullptr;
- if (tableInfo->Description.HasSchema()) {
- tableSchema = &tableInfo->Description.GetSchema();
- } else {
- auto& preset = storeInfo->SchemaPresets.at(tableInfo->Description.GetSchemaPresetId());
- auto& presetProto = storeInfo->Description.GetSchemaPresets(preset.ProtoIndex);
- tableSchema = &presetProto.GetSchema();
- }
-
+ if (tableInfo->Description.HasSchema()) {
+ tableSchema = &tableInfo->Description.GetSchema();
+ } else {
+ auto& preset = storeInfo->SchemaPresets.at(tableInfo->Description.GetSchemaPresetId());
+ auto& presetProto = storeInfo->Description.GetSchemaPresets(preset.ProtoIndex);
+ tableSchema = &presetProto.GetSchema();
+ }
+
THashSet<TString> knownTiers;
for (const auto& tier : tableSchema->GetStorageTiers()) {
knownTiers.insert(tier.GetName());
}
- THashMap<ui32, TOlapSchema::TColumn> columns;
- THashMap<TString, ui32> columnsByName;
- for (const auto& col : tableSchema->GetColumns()) {
- ui32 id = col.GetId();
- TString name = col.GetName();
- columns[id] = TOlapSchema::TColumn{id, name, static_cast<NScheme::TTypeId>(col.GetTypeId()), Max<ui32>()};
- columnsByName[name] = id;
- }
-
+ THashMap<ui32, TOlapSchema::TColumn> columns;
+ THashMap<TString, ui32> columnsByName;
+ for (const auto& col : tableSchema->GetColumns()) {
+ ui32 id = col.GetId();
+ TString name = col.GetName();
+ columns[id] = TOlapSchema::TColumn{id, name, static_cast<NScheme::TTypeId>(col.GetTypeId()), Max<ui32>()};
+ columnsByName[name] = id;
+ }
+
if (!ValidateTtlSettings(alter.GetAlterTtlSettings(), columns, columnsByName, knownTiers, errStr)) {
status = NKikimrScheme::StatusInvalidParameter;
- return nullptr;
- }
-
+ return nullptr;
+ }
+
if (!ValidateTtlSettingsChange(tableInfo->Description.GetTtlSettings(), alter.GetAlterTtlSettings(), errStr)) {
status = NKikimrScheme::StatusInvalidParameter;
- return nullptr;
- }
-
+ return nullptr;
+ }
+
*alterData->Description.MutableTtlSettings() = alter.GetAlterTtlSettings();
alterData->Description.MutableTtlSettings()->SetVersion(currentTtlVersion + 1);
#if 0
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp
index c2d355298e0..b6c0b6c8b25 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp
@@ -28,39 +28,39 @@ bool CheckFreezeStateAlredySet(const TTableInfo::TPtr table, const NKikimrScheme
return false;
}
-bool IsSuperUser(const NACLib::TUserToken* userToken) {
- if (!userToken)
- return false;
-
- const auto& adminSids = AppData()->AdministrationAllowedSIDs;
- auto hasSid = [userToken](const TString& sid) -> bool {
- return userToken->IsExist(sid);
- };
- auto it = std::find_if(adminSids.begin(), adminSids.end(), hasSid);
- return (it != adminSids.end());
-}
-
+bool IsSuperUser(const NACLib::TUserToken* userToken) {
+ if (!userToken)
+ return false;
+
+ const auto& adminSids = AppData()->AdministrationAllowedSIDs;
+ auto hasSid = [userToken](const TString& sid) -> bool {
+ return userToken->IsExist(sid);
+ };
+ auto it = std::find_if(adminSids.begin(), adminSids.end(), hasSid);
+ return (it != adminSids.end());
+}
+
TTableInfo::TAlterDataPtr ParseParams(const TPath& path, TTableInfo::TPtr table, const NKikimrSchemeOp::TTableDescription& alter,
const bool shadowDataAllowed,
TString& errStr, NKikimrScheme::EStatus& status, TOperationContext& context) {
const TAppData* appData = AppData(context.Ctx);
- if (!path.IsCommonSensePath()) {
+ if (!path.IsCommonSensePath()) {
Y_VERIFY_DEBUG(IsSuperUser(context.UserToken.Get()) || context.IsAllowedPrivateTables, "Only superuser can alter index impl table");
-
- if (alter.ColumnsSize() != 0 || alter.DropColumnsSize() != 0) {
- errStr = "Adding or dropping columns in index table is not supported";
+
+ if (alter.ColumnsSize() != 0 || alter.DropColumnsSize() != 0) {
+ errStr = "Adding or dropping columns in index table is not supported";
status = NKikimrScheme::StatusInvalidParameter;
- return nullptr;
- }
+ return nullptr;
+ }
if (alter.HasTTLSettings()) {
errStr = "TTL on index table is not supported";
status = NKikimrScheme::StatusInvalidParameter;
return nullptr;
}
- }
-
+ }
+
auto copyAlter = alter;
const bool hasSchemaChanges = (
@@ -510,12 +510,12 @@ public:
.IsResolved()
.NotDeleted()
.IsTable()
- .NotUnderOperation();
+ .NotUnderOperation();
if (!context.IsAllowedPrivateTables && !IsSuperUser(context.UserToken.Get())) {
- checks.IsCommonSensePath(); //forbid alter impl index tables
- }
-
+ checks.IsCommonSensePath(); //forbid alter impl index tables
+ }
+
if (!checks) {
TString explain = TStringBuilder() << "path fail checks"
<< ", path: " << path.PathString();
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_create_olap_store.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_create_olap_store.cpp
index 8d23a04fffb..a3e6896b6f1 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_create_olap_store.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_create_olap_store.cpp
@@ -196,11 +196,11 @@ TOlapStoreInfo::TPtr CreateOlapStore(const NKikimrSchemeOp::TColumnStoreDescript
if (op.GetRESERVED_MetaShardCount() != 0) {
status = NKikimrScheme::StatusSchemeError;
- errStr = Sprintf("trying to create OLAP store with meta shards (not supported yet)");
- return nullptr;
- }
-
- if (!op.HasColumnShardCount()) {
+ errStr = Sprintf("trying to create OLAP store with meta shards (not supported yet)");
+ return nullptr;
+ }
+
+ if (!op.HasColumnShardCount()) {
status = NKikimrScheme::StatusSchemeError;
errStr = Sprintf("trying to create OLAP store without shards number specified");
return nullptr;
@@ -415,8 +415,8 @@ public:
context.SS->ClearDescribePathCaches(parentDir);
context.OnComplete.PublishToSchemeBoard(OperationId, parentDir->PathId);
- ++path->DirAlterVersion;
- context.SS->PersistPathDirAlterVersion(db, path);
+ ++path->DirAlterVersion;
+ context.SS->PersistPathDirAlterVersion(db, path);
context.SS->ClearDescribePathCaches(path);
context.OnComplete.PublishToSchemeBoard(OperationId, path->PathId);
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_create_olap_table.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_create_olap_table.cpp
index 256f33f7f76..09f556c6558 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_create_olap_table.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_create_olap_table.cpp
@@ -222,14 +222,14 @@ TOlapTableInfo::TPtr CreateOlapTable(
op.MutableTtlSettings()->SetVersion(1);
}
- // Validate ttl settings and schema compatibility
- if (op.HasTtlSettings()) {
+ // Validate ttl settings and schema compatibility
+ if (op.HasTtlSettings()) {
if (!ValidateTtlSettings(op.GetTtlSettings(), pSchema->Columns, pSchema->ColumnsByName, storageTiers, errStr)) {
status = NKikimrScheme::StatusInvalidParameter;
- return nullptr;
- }
- }
-
+ return nullptr;
+ }
+ }
+
if (op.HasSharding()) {
tableInfo->Sharding = std::move(*op.MutableSharding());
op.ClearSharding();
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp
index 1b41841a71a..a2afe4741af 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp
@@ -172,12 +172,12 @@ public:
checks
.IsResolved()
.NotUnderDeleting()
- .FailOnExist({
- TPathElement::EPathType::EPathTypeDir,
- TPathElement::EPathType::EPathTypeSubDomain,
- TPathElement::EPathType::EPathTypeExtSubDomain,
+ .FailOnExist({
+ TPathElement::EPathType::EPathTypeDir,
+ TPathElement::EPathType::EPathTypeSubDomain,
+ TPathElement::EPathType::EPathTypeExtSubDomain,
TPathElement::EPathType::EPathTypeColumnStore
- }, acceptExisted);
+ }, acceptExisted);
} else {
checks
.NotEmpty()
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_part.h b/ydb/core/tx/schemeshard/schemeshard__operation_part.h
index d6fba01af4b..4a639e63429 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_part.h
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_part.h
@@ -83,7 +83,7 @@ struct TOperationContext {
TSideEffects& OnComplete;
TMemoryChanges& MemChanges;
TStorageChanges& DbChanges;
- TAutoPtr<NACLib::TUserToken> UserToken = nullptr;
+ TAutoPtr<NACLib::TUserToken> UserToken = nullptr;
bool IsAllowedPrivateTables = false;
};
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp
index 57c2146f192..645d7f37e19 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp
@@ -138,20 +138,20 @@ public:
<< " alterVersion: " << alterVersion
<< " at tablet: " << context.SS->TabletID());
- const ui32 rangeIdx = getDstRangeIdx(datashardId);
- const auto& rangeDescr = splitDescr.GetDestinationRanges(rangeIdx);
-
- // For each destination shard we construct an individual description
- // that contains all src shards and only this one dst shard with its range
- NKikimrTxDataShard::TSplitMergeDescription splitDescForShard;
- splitDescForShard.MutableSourceRanges()->CopyFrom(txState->SplitDescription->GetSourceRanges());
- splitDescForShard.AddDestinationRanges()->CopyFrom(rangeDescr);
-
+ const ui32 rangeIdx = getDstRangeIdx(datashardId);
+ const auto& rangeDescr = splitDescr.GetDestinationRanges(rangeIdx);
+
+ // For each destination shard we construct an individual description
+ // that contains all src shards and only this one dst shard with its range
+ NKikimrTxDataShard::TSplitMergeDescription splitDescForShard;
+ splitDescForShard.MutableSourceRanges()->CopyFrom(txState->SplitDescription->GetSourceRanges());
+ splitDescForShard.AddDestinationRanges()->CopyFrom(rangeDescr);
+
Y_VERIFY(txState->SplitDescription);
THolder<TEvDataShard::TEvInitSplitMergeDestination> event =
THolder(new TEvDataShard::TEvInitSplitMergeDestination(ui64(OperationId.GetTxId()), context.SS->TabletID(),
subDomainPathId,
- splitDescForShard,
+ splitDescForShard,
context.SS->SelectProcessingPrarams(txState->TargetPathId)));
// Add a new-style CreateTable with correct per-shard settings
@@ -789,7 +789,7 @@ public:
return result;
}
- const THashMap<TShardIdx, ui64>& shardIdx2partition = tableInfo->GetShard2PartitionIdx();
+ const THashMap<TShardIdx, ui64>& shardIdx2partition = tableInfo->GetShard2PartitionIdx();
TVector<ui64> srcPartitionIdxs;
i64 totalSrcPartCount = 0;
@@ -835,7 +835,7 @@ public:
return result;
}
- if (context.SS->SplitSettings.SplitMergePartCountLimit != -1) {
+ if (context.SS->SplitSettings.SplitMergePartCountLimit != -1) {
const auto* stats = tableInfo->GetStats().PartitionStats.FindPtr(srcShardIdx);
if (!stats || stats->ShardState != NKikimrTxDataShard::Ready) {
TString errMsg = TStringBuilder() << "Src TabletId " << srcTabletId << " is not in Ready state";
@@ -851,9 +851,9 @@ public:
srcPartitionIdxs.push_back(pi);
}
- if (context.SS->SplitSettings.SplitMergePartCountLimit != -1 &&
- totalSrcPartCount >= context.SS->SplitSettings.SplitMergePartCountLimit)
- {
+ if (context.SS->SplitSettings.SplitMergePartCountLimit != -1 &&
+ totalSrcPartCount >= context.SS->SplitSettings.SplitMergePartCountLimit)
+ {
result->SetError(NKikimrScheme::StatusNotAvailable,
Sprintf("Split/Merge operation involves too many parts: %" PRIu64, totalSrcPartCount));
diff --git a/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp b/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp
index e91b2d0a862..ffa6e34d509 100644
--- a/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp
@@ -10,14 +10,14 @@ namespace NSchemeShard {
using namespace NTabletFlatExecutor;
struct TSchemeShard::TTxShardStateChanged : public TSchemeShard::TRwTxBase {
- TEvDataShard::TEvStateChanged::TPtr Ev;
+ TEvDataShard::TEvStateChanged::TPtr Ev;
TSideEffects SideEffects;
-
- TTxShardStateChanged(TSelf *self, TEvDataShard::TEvStateChanged::TPtr& ev)
+
+ TTxShardStateChanged(TSelf *self, TEvDataShard::TEvStateChanged::TPtr& ev)
: TRwTxBase(self)
- , Ev(ev)
- {}
-
+ , Ev(ev)
+ {}
+
TTxType GetTxType() const override { return TXTYPE_DATASHARD_STATE_RESULT; }
void DeleteShard(TTabletId tabletId, const TActorContext &ctx) {
@@ -72,10 +72,10 @@ struct TSchemeShard::TTxShardStateChanged : public TSchemeShard::TRwTxBase {
}
}
- void DoExecute(TTransactionContext &txc, const TActorContext &ctx) override {
+ void DoExecute(TTransactionContext &txc, const TActorContext &ctx) override {
auto tabletId = TTabletId(Ev->Get()->Record.GetTabletId());
- auto state = Ev->Get()->Record.GetState();
-
+ auto state = Ev->Get()->Record.GetState();
+
LOG_INFO_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"TTxShardStateChanged DoExecute"
<< ", datashard informs about state changing"
@@ -91,20 +91,20 @@ struct TSchemeShard::TTxShardStateChanged : public TSchemeShard::TRwTxBase {
if (state == NDataShard::TShardState::Offline) {
DeleteShard(tabletId, ctx);
ProgressDependentOperation(tabletId, ctx);
- }
+ }
SideEffects.ApplyOnExecute(Self, txc, ctx);
- }
-
- void DoComplete(const TActorContext &ctx) override {
+ }
+
+ void DoComplete(const TActorContext &ctx) override {
SideEffects.ApplyOnComplete(Self, ctx);
- }
-};
-
+ }
+};
+
NTabletFlatExecutor::ITransaction* TSchemeShard::CreateTxShardStateChanged(
TEvDataShard::TEvStateChanged::TPtr& ev)
{
return new TTxShardStateChanged(this, ev);
-}
-
+}
+
}}
diff --git a/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp b/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp
index 2238783919f..b2e7db88b79 100644
--- a/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp
@@ -1,10 +1,10 @@
-#include "schemeshard_impl.h"
+#include "schemeshard_impl.h"
#include <ydb/core/base/appdata.h>
#include <ydb/core/protos/sys_view.pb.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NSchemeShard {
-
+
template <typename T>
static ui64 GetThroughput(const T& c) {
ui64 acc = 0;
@@ -35,7 +35,7 @@ auto TSchemeShard::BuildStatsForCollector(TPathId pathId, TShardIdx shardIdx, TT
sysStats.SetDataSize(stats.DataSize);
sysStats.SetRowCount(stats.RowCount);
sysStats.SetIndexSize(stats.IndexSize);
- sysStats.SetCPUCores(std::min(stats.GetCurrentRawCpuUsage() / 1000000., 1.0));
+ sysStats.SetCPUCores(std::min(stats.GetCurrentRawCpuUsage() / 1000000., 1.0));
sysStats.SetTabletId(ui64(datashardId));
sysStats.SetAccessTime(stats.LastAccessTime.MilliSeconds());
sysStats.SetUpdateTime(stats.LastUpdateTime.MilliSeconds());
@@ -125,47 +125,47 @@ bool TTxStorePartitionStats::Execute(TTransactionContext& txc, const TActorConte
tableId = Self->MakeLocalId(TLocalPathId(rec.GetTableLocalId()));
}
- const auto& tableStats = rec.GetTableStats();
+ const auto& tableStats = rec.GetTableStats();
const auto& tabletMetrics = rec.GetTabletMetrics();
- ui64 dataSize = tableStats.GetDataSize();
- ui64 rowCount = tableStats.GetRowCount();
-
+ ui64 dataSize = tableStats.GetDataSize();
+ ui64 rowCount = tableStats.GetRowCount();
+
if (!Self->Tables.contains(tableId)) {
return true;
}
-
+
TTableInfo::TPtr table = Self->Tables[tableId];
-
+
if (!Self->TabletIdToShardIdx.contains(datashardId)) {
return true;
}
-
+
auto shardIdx = Self->TabletIdToShardIdx[datashardId];
-
- TTableInfo::TPartitionStats newStats;
- newStats.SeqNo = TMessageSeqNo(rec.GetGeneration(), rec.GetRound());
-
- newStats.RowCount = tableStats.GetRowCount();
- newStats.DataSize = tableStats.GetDataSize();
- newStats.IndexSize = tableStats.GetIndexSize();
- newStats.LastAccessTime = TInstant::MilliSeconds(tableStats.GetLastAccessTime());
- newStats.LastUpdateTime = TInstant::MilliSeconds(tableStats.GetLastUpdateTime());
-
- newStats.ImmediateTxCompleted = tableStats.GetImmediateTxCompleted();
- newStats.PlannedTxCompleted = tableStats.GetPlannedTxCompleted();
- newStats.TxRejectedByOverload = tableStats.GetTxRejectedByOverload();
- newStats.TxRejectedBySpace = tableStats.GetTxRejectedBySpace();
- newStats.TxCompleteLag = TDuration::MilliSeconds(tableStats.GetTxCompleteLagMsec());
- newStats.InFlightTxCount = tableStats.GetInFlightTxCount();
-
- newStats.RowUpdates = tableStats.GetRowUpdates();
- newStats.RowDeletes = tableStats.GetRowDeletes();
- newStats.RowReads = tableStats.GetRowReads();
- newStats.RangeReads = tableStats.GetRangeReads();
- newStats.RangeReadRows = tableStats.GetRangeReadRows();
-
- TInstant now = AppData(ctx)->TimeProvider->Now();
- newStats.SetCurrentRawCpuUsage(tabletMetrics.GetCPU(), now);
+
+ TTableInfo::TPartitionStats newStats;
+ newStats.SeqNo = TMessageSeqNo(rec.GetGeneration(), rec.GetRound());
+
+ newStats.RowCount = tableStats.GetRowCount();
+ newStats.DataSize = tableStats.GetDataSize();
+ newStats.IndexSize = tableStats.GetIndexSize();
+ newStats.LastAccessTime = TInstant::MilliSeconds(tableStats.GetLastAccessTime());
+ newStats.LastUpdateTime = TInstant::MilliSeconds(tableStats.GetLastUpdateTime());
+
+ newStats.ImmediateTxCompleted = tableStats.GetImmediateTxCompleted();
+ newStats.PlannedTxCompleted = tableStats.GetPlannedTxCompleted();
+ newStats.TxRejectedByOverload = tableStats.GetTxRejectedByOverload();
+ newStats.TxRejectedBySpace = tableStats.GetTxRejectedBySpace();
+ newStats.TxCompleteLag = TDuration::MilliSeconds(tableStats.GetTxCompleteLagMsec());
+ newStats.InFlightTxCount = tableStats.GetInFlightTxCount();
+
+ newStats.RowUpdates = tableStats.GetRowUpdates();
+ newStats.RowDeletes = tableStats.GetRowDeletes();
+ newStats.RowReads = tableStats.GetRowReads();
+ newStats.RangeReads = tableStats.GetRangeReads();
+ newStats.RangeReadRows = tableStats.GetRangeReadRows();
+
+ TInstant now = AppData(ctx)->TimeProvider->Now();
+ newStats.SetCurrentRawCpuUsage(tabletMetrics.GetCPU(), now);
newStats.Memory = tabletMetrics.GetMemory();
newStats.Network = tabletMetrics.GetNetwork();
newStats.Storage = tabletMetrics.GetStorage();
@@ -173,23 +173,23 @@ bool TTxStorePartitionStats::Execute(TTransactionContext& txc, const TActorConte
newStats.WriteThroughput = GetThroughput(tabletMetrics.GetGroupWriteThroughput());
newStats.ReadIops = GetIops(tabletMetrics.GetGroupReadIops());
newStats.WriteIops = GetIops(tabletMetrics.GetGroupWriteIops());
- newStats.PartCount = tableStats.GetPartCount();
+ newStats.PartCount = tableStats.GetPartCount();
newStats.SearchHeight = tableStats.GetSearchHeight();
- newStats.StartTime = TInstant::MilliSeconds(rec.GetStartTime());
- for (ui64 tabletId : rec.GetUserTablePartOwners()) {
+ newStats.StartTime = TInstant::MilliSeconds(rec.GetStartTime());
+ for (ui64 tabletId : rec.GetUserTablePartOwners()) {
newStats.PartOwners.insert(TTabletId(tabletId));
if (tabletId != rec.GetDatashardId()) {
newStats.HasBorrowed = true;
}
- }
- for (ui64 tabletId : rec.GetSysTablesPartOwners()) {
+ }
+ for (ui64 tabletId : rec.GetSysTablesPartOwners()) {
newStats.PartOwners.insert(TTabletId(tabletId));
- }
- newStats.ShardState = rec.GetShardState();
-
+ }
+ newStats.ShardState = rec.GetShardState();
+
auto oldAggrStats = table->GetStats().Aggregated;
- table->UpdateShardStats(shardIdx, newStats);
-
+ table->UpdateShardStats(shardIdx, newStats);
+
if (Self->CompactionQueue) {
TShardCompactionInfo compactionInfo(shardIdx, newStats.SearchHeight);
if (newStats.SearchHeight >= Self->CompactionSearchHeightThreshold) {
@@ -256,7 +256,7 @@ bool TTxStorePartitionStats::Execute(TTransactionContext& txc, const TActorConte
}
TVector<TShardIdx> shardsToMerge;
- if (table->CheckCanMergePartitions(Self->SplitSettings, shardIdx, shardsToMerge)) {
+ if (table->CheckCanMergePartitions(Self->SplitSettings, shardIdx, shardsToMerge)) {
TTxId txId = Self->GetCachedTxId(ctx);
if (!txId) {
@@ -266,8 +266,8 @@ bool TTxStorePartitionStats::Execute(TTransactionContext& txc, const TActorConte
<< ", shardIdx: " << shardIdx
<< ", size of merge: " << shardsToMerge.size());
return true;
- }
-
+ }
+
auto request = MergeRequest(Self, txId, Self->ShardInfos[shardIdx].PathId, shardsToMerge);
TMemoryChanges memChanges;
@@ -280,29 +280,29 @@ bool TTxStorePartitionStats::Execute(TTransactionContext& txc, const TActorConte
MergeOpSideEffects.ApplyOnExecute(Self, txc, ctx);
return true;
- }
-
- if (rec.GetShardState() != NKikimrTxDataShard::Ready) {
+ }
+
+ if (rec.GetShardState() != NKikimrTxDataShard::Ready) {
return true;
- }
-
- ui64 dataSizeResolution = 0; // Datashard will use default resolution
- ui64 rowCountResolution = 0; // Datashard will use default resolution
- bool collectKeySample = false;
+ }
+
+ ui64 dataSizeResolution = 0; // Datashard will use default resolution
+ ui64 rowCountResolution = 0; // Datashard will use default resolution
+ bool collectKeySample = false;
if (table->CheckFastSplitForPartition(Self->SplitSettings, shardIdx, dataSize, rowCount)) {
- dataSizeResolution = Max<ui64>(dataSize / 100, 100*1024);
- rowCountResolution = Max<ui64>(rowCount / 100, 1000);
- collectKeySample = true;
- } else if (table->CheckSplitByLoad(Self->SplitSettings, shardIdx, dataSize, rowCount)) {
- collectKeySample = true;
- } else if (dataSize < table->GetShardSizeToSplit()) {
+ dataSizeResolution = Max<ui64>(dataSize / 100, 100*1024);
+ rowCountResolution = Max<ui64>(rowCount / 100, 1000);
+ collectKeySample = true;
+ } else if (table->CheckSplitByLoad(Self->SplitSettings, shardIdx, dataSize, rowCount)) {
+ collectKeySample = true;
+ } else if (dataSize < table->GetShardSizeToSplit()) {
return true;
}
- if (table->GetPartitions().size() >= table->GetMaxPartitionsCount()) {
+ if (table->GetPartitions().size() >= table->GetMaxPartitionsCount()) {
return true;
- }
-
+ }
+
{
constexpr ui64 deltaShards = 2;
TPathElement::TPtr path = Self->PathsById.at(tableId);
@@ -339,11 +339,11 @@ bool TTxStorePartitionStats::Execute(TTransactionContext& txc, const TActorConte
}
// Request histograms from the datashard
- GetStatsEv.Reset(new TEvDataShard::TEvGetTableStats(tableId.LocalPathId, dataSizeResolution, rowCountResolution, collectKeySample));
-
+ GetStatsEv.Reset(new TEvDataShard::TEvGetTableStats(tableId.LocalPathId, dataSizeResolution, rowCountResolution, collectKeySample));
+
return true;
-}
-
+}
+
void TTxStorePartitionStats::Complete(const TActorContext& ctx) {
MergeOpSideEffects.ApplyOnComplete(Self, ctx);
@@ -389,4 +389,4 @@ void TSchemeShard::Handle(TEvDataShard::TEvPeriodicTableStats::TPtr& ev, const T
Execute(new TTxStorePartitionStats(this, ev), ctx);
}
-}}
+}}
diff --git a/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp b/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp
index 74cf03b08b8..a5fb3bdac4b 100644
--- a/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp
@@ -1,207 +1,207 @@
-#include "schemeshard_impl.h"
+#include "schemeshard_impl.h"
#include <ydb/core/base/appdata.h>
#include <ydb/core/tx/tx_proxy/proxy.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NSchemeShard {
-
-static bool IsIntegerType(NScheme::TTypeId typeId) {
- switch (typeId) {
- case NScheme::NTypeIds::Bool:
-
- case NScheme::NTypeIds::Int8:
- case NScheme::NTypeIds::Uint8:
- case NScheme::NTypeIds::Int16:
- case NScheme::NTypeIds::Uint16:
- case NScheme::NTypeIds::Int32:
- case NScheme::NTypeIds::Uint32:
- case NScheme::NTypeIds::Int64:
- case NScheme::NTypeIds::Uint64:
-
- case NScheme::NTypeIds::Date:
- case NScheme::NTypeIds::Datetime:
- case NScheme::NTypeIds::Timestamp:
- case NScheme::NTypeIds::Interval:
- return true;
-
- default:
- return false;
- }
-}
-
-TSerializedCellVec ChooseSplitKeyByHistogram(const NKikimrTableStats::THistogram& histogram, const TConstArrayRef<NScheme::TTypeId> &keyColumnTypes) {
- ui64 bucketsCount = histogram.BucketsSize();
- ui64 idxLo = bucketsCount * 0.33;
- ui64 idxMed = bucketsCount * 0.5;
- ui64 idxHi = bucketsCount * 0.66;
-
- TSerializedCellVec keyLo(histogram.GetBuckets(idxLo).GetKey());
- TSerializedCellVec keyMed(histogram.GetBuckets(idxMed).GetKey());
- TSerializedCellVec keyHi(histogram.GetBuckets(idxHi).GetKey());
-
- TVector<TCell> splitKey(keyMed.GetCells().size());
-
- for (size_t i = 0; i < keyMed.GetCells().size(); ++i) {
- NScheme::TTypeId columnType = keyColumnTypes[i];
-
- if (0 == CompareTypedCells(keyLo.GetCells()[i], keyHi.GetCells()[i], columnType)) {
- // lo == hi, so we add this value and proceed to the next column
- splitKey[i] = keyLo.GetCells()[i];
- continue;
- }
-
- if (0 != CompareTypedCells(keyLo.GetCells()[i], keyMed.GetCells()[i], columnType)) {
- // med != lo
- splitKey[i] = keyMed.GetCells()[i];
- } else {
- // med == lo and med != hi, so we want to find a value that is > med and <= hi
- if (IsIntegerType(columnType) && !keyMed.GetCells()[i].IsNull()) {
- // For interger types we can add 1 to med
- ui64 val = 0;
- size_t sz = keyMed.GetCells()[i].Size();
- memcpy(&val, keyMed.GetCells()[i].Data(), sz);
- val++;
- splitKey[i] = TCell((const char*)&val, sz);
- } else {
- // For other types let's do binary search between med and hi to find smallest key > med
-
- // Compares only i-th cell in keys
- auto fnCmpCurrentCell = [i, columnType] (const auto& bucket1, const auto& bucket2) {
- TSerializedCellVec key1(bucket1.GetKey());
- TSerializedCellVec key2(bucket2.GetKey());
- return CompareTypedCells(key1.GetCells()[i], key2.GetCells()[i], columnType) < 0;
- };
- const auto bucketsBegin = histogram.GetBuckets().begin();
- const auto it = UpperBound(
- bucketsBegin + idxMed,
- bucketsBegin + idxHi,
- histogram.GetBuckets(idxMed),
- fnCmpCurrentCell);
- TSerializedCellVec keyFound(it->GetKey());
- splitKey[i] = keyFound.GetCells()[i];
- }
- }
- break;
- }
-
- return TSerializedCellVec(TSerializedCellVec::Serialize(splitKey));
-}
-
-TSerializedCellVec DoFindSplitKey(const TVector<std::pair<TSerializedCellVec, ui64>>& keysHist,
- const TConstArrayRef<NScheme::TTypeId>& keyColumnTypes,
- const size_t prefixSize)
-{
- ui64 total = keysHist.back().second;
-
- // Compares bucket value
- auto fnValueLess = [] (ui64 val, const auto& bucket) {
- return val < bucket.second;
- };
-
- // Find the position of total/2
- auto halfIt = std::upper_bound(keysHist.begin(), keysHist.end(), total*0.5, fnValueLess);
- auto loIt = std::upper_bound(keysHist.begin(), keysHist.end(), total*0.1, fnValueLess);
- auto hiIt = std::upper_bound(keysHist.begin(), keysHist.end(), total*0.9, fnValueLess);
-
- auto fnCmp = [&keyColumnTypes, prefixSize] (const auto& bucket1, const auto& bucket2) {
- return CompareTypedCellVectors(bucket1.first.GetCells().data(), bucket2.first.GetCells().data(),
- keyColumnTypes.data(),
- std::min(bucket1.first.GetCells().size(), prefixSize), std::min(bucket2.first.GetCells().size(), prefixSize));
- };
-
- // Check if half key is no equal to low and high keys
- if (fnCmp(*halfIt, *loIt) == 0)
- return TSerializedCellVec();
- if (fnCmp(*halfIt, *hiIt) == 0)
- return TSerializedCellVec();
-
- // Build split key by leaving the prefix and extending it with NULLs
- TVector<TCell> splitKey(halfIt->first.GetCells().begin(), halfIt->first.GetCells().end());
- splitKey.resize(prefixSize);
- splitKey.resize(keyColumnTypes.size());
-
- return TSerializedCellVec(TSerializedCellVec::Serialize(splitKey));
-}
-
-TSerializedCellVec ChooseSplitKeyByKeySample(const NKikimrTableStats::THistogram& keySample, const TConstArrayRef<NScheme::TTypeId>& keyColumnTypes) {
- TVector<std::pair<TSerializedCellVec, ui64>> keysHist;
- for (const auto& s : keySample.GetBuckets()) {
- keysHist.emplace_back(std::make_pair(TSerializedCellVec(s.GetKey()), s.GetValue()));
- }
-
- auto fnCmp = [&keyColumnTypes] (const auto& key1, const auto& key2) {
- return CompareTypedCellVectors(key1.first.GetCells().data(), key2.first.GetCells().data(),
- keyColumnTypes.data(),
- key1.first.GetCells().size(), key2.first.GetCells().size());
- };
-
- Sort(keysHist, [&fnCmp] (const auto& key1, const auto& key2) { return fnCmp(key1, key2) < 0; });
-
- // The keys are now sorted. Next we convert the stats into a histogram by accumulating
- // stats for all previous keys at each key.
- size_t last = 0;
- for (size_t i = 1; i < keysHist.size(); ++i) {
- // Accumulate stats
- keysHist[i].second += keysHist[i-1].second;
-
- if (fnCmp(keysHist[i], keysHist[last]) == 0) {
- // Merge equal keys
- keysHist[last].second = keysHist[i].second;
- } else {
- ++last;
- if (last != i) {
- keysHist[last] = keysHist[i];
- }
- }
- }
- keysHist.resize(std::min(keysHist.size(), last + 1));
-
- if (keysHist.size() < 2)
- return TSerializedCellVec();
-
- // Find the median key with the shortest prefix
- size_t minPrefix = 0;
- size_t maxPrefix = keyColumnTypes.size();
-
- // Binary search for shortest prefix that can be used to split the load
- TSerializedCellVec splitKey;
- while (minPrefix + 1 < maxPrefix) {
- size_t prefixSize = (minPrefix + maxPrefix + 1) / 2;
- splitKey = DoFindSplitKey(keysHist, keyColumnTypes, prefixSize);
- if (splitKey.GetCells().empty()) {
- minPrefix = prefixSize;
- } else {
- maxPrefix = prefixSize;
- }
- }
- splitKey = DoFindSplitKey(keysHist, keyColumnTypes, maxPrefix);
-
- return splitKey;
-}
-
-enum struct ESplitReason {
- NO_SPLIT = 0,
- FAST_SPLIT_INDEX,
- SPLIT_BY_SIZE,
- SPLIT_BY_LOAD
-};
-
-const char* ToString(ESplitReason splitReason) {
- switch (splitReason) {
- case ESplitReason::NO_SPLIT:
- return "No split";
- case ESplitReason::FAST_SPLIT_INDEX:
- return "Fast split index table";
- case ESplitReason::SPLIT_BY_SIZE:
- return "Split by size";
- case ESplitReason::SPLIT_BY_LOAD:
- return "Split by load";
- default:
- Y_VERIFY_DEBUG(!"Unexpected enum value");
- return "Unexpected enum value";
- }
-}
-
+
+static bool IsIntegerType(NScheme::TTypeId typeId) {
+ switch (typeId) {
+ case NScheme::NTypeIds::Bool:
+
+ case NScheme::NTypeIds::Int8:
+ case NScheme::NTypeIds::Uint8:
+ case NScheme::NTypeIds::Int16:
+ case NScheme::NTypeIds::Uint16:
+ case NScheme::NTypeIds::Int32:
+ case NScheme::NTypeIds::Uint32:
+ case NScheme::NTypeIds::Int64:
+ case NScheme::NTypeIds::Uint64:
+
+ case NScheme::NTypeIds::Date:
+ case NScheme::NTypeIds::Datetime:
+ case NScheme::NTypeIds::Timestamp:
+ case NScheme::NTypeIds::Interval:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+TSerializedCellVec ChooseSplitKeyByHistogram(const NKikimrTableStats::THistogram& histogram, const TConstArrayRef<NScheme::TTypeId> &keyColumnTypes) {
+ ui64 bucketsCount = histogram.BucketsSize();
+ ui64 idxLo = bucketsCount * 0.33;
+ ui64 idxMed = bucketsCount * 0.5;
+ ui64 idxHi = bucketsCount * 0.66;
+
+ TSerializedCellVec keyLo(histogram.GetBuckets(idxLo).GetKey());
+ TSerializedCellVec keyMed(histogram.GetBuckets(idxMed).GetKey());
+ TSerializedCellVec keyHi(histogram.GetBuckets(idxHi).GetKey());
+
+ TVector<TCell> splitKey(keyMed.GetCells().size());
+
+ for (size_t i = 0; i < keyMed.GetCells().size(); ++i) {
+ NScheme::TTypeId columnType = keyColumnTypes[i];
+
+ if (0 == CompareTypedCells(keyLo.GetCells()[i], keyHi.GetCells()[i], columnType)) {
+ // lo == hi, so we add this value and proceed to the next column
+ splitKey[i] = keyLo.GetCells()[i];
+ continue;
+ }
+
+ if (0 != CompareTypedCells(keyLo.GetCells()[i], keyMed.GetCells()[i], columnType)) {
+ // med != lo
+ splitKey[i] = keyMed.GetCells()[i];
+ } else {
+ // med == lo and med != hi, so we want to find a value that is > med and <= hi
+ if (IsIntegerType(columnType) && !keyMed.GetCells()[i].IsNull()) {
+ // For interger types we can add 1 to med
+ ui64 val = 0;
+ size_t sz = keyMed.GetCells()[i].Size();
+ memcpy(&val, keyMed.GetCells()[i].Data(), sz);
+ val++;
+ splitKey[i] = TCell((const char*)&val, sz);
+ } else {
+ // For other types let's do binary search between med and hi to find smallest key > med
+
+ // Compares only i-th cell in keys
+ auto fnCmpCurrentCell = [i, columnType] (const auto& bucket1, const auto& bucket2) {
+ TSerializedCellVec key1(bucket1.GetKey());
+ TSerializedCellVec key2(bucket2.GetKey());
+ return CompareTypedCells(key1.GetCells()[i], key2.GetCells()[i], columnType) < 0;
+ };
+ const auto bucketsBegin = histogram.GetBuckets().begin();
+ const auto it = UpperBound(
+ bucketsBegin + idxMed,
+ bucketsBegin + idxHi,
+ histogram.GetBuckets(idxMed),
+ fnCmpCurrentCell);
+ TSerializedCellVec keyFound(it->GetKey());
+ splitKey[i] = keyFound.GetCells()[i];
+ }
+ }
+ break;
+ }
+
+ return TSerializedCellVec(TSerializedCellVec::Serialize(splitKey));
+}
+
+TSerializedCellVec DoFindSplitKey(const TVector<std::pair<TSerializedCellVec, ui64>>& keysHist,
+ const TConstArrayRef<NScheme::TTypeId>& keyColumnTypes,
+ const size_t prefixSize)
+{
+ ui64 total = keysHist.back().second;
+
+ // Compares bucket value
+ auto fnValueLess = [] (ui64 val, const auto& bucket) {
+ return val < bucket.second;
+ };
+
+ // Find the position of total/2
+ auto halfIt = std::upper_bound(keysHist.begin(), keysHist.end(), total*0.5, fnValueLess);
+ auto loIt = std::upper_bound(keysHist.begin(), keysHist.end(), total*0.1, fnValueLess);
+ auto hiIt = std::upper_bound(keysHist.begin(), keysHist.end(), total*0.9, fnValueLess);
+
+ auto fnCmp = [&keyColumnTypes, prefixSize] (const auto& bucket1, const auto& bucket2) {
+ return CompareTypedCellVectors(bucket1.first.GetCells().data(), bucket2.first.GetCells().data(),
+ keyColumnTypes.data(),
+ std::min(bucket1.first.GetCells().size(), prefixSize), std::min(bucket2.first.GetCells().size(), prefixSize));
+ };
+
+ // Check if half key is no equal to low and high keys
+ if (fnCmp(*halfIt, *loIt) == 0)
+ return TSerializedCellVec();
+ if (fnCmp(*halfIt, *hiIt) == 0)
+ return TSerializedCellVec();
+
+ // Build split key by leaving the prefix and extending it with NULLs
+ TVector<TCell> splitKey(halfIt->first.GetCells().begin(), halfIt->first.GetCells().end());
+ splitKey.resize(prefixSize);
+ splitKey.resize(keyColumnTypes.size());
+
+ return TSerializedCellVec(TSerializedCellVec::Serialize(splitKey));
+}
+
+TSerializedCellVec ChooseSplitKeyByKeySample(const NKikimrTableStats::THistogram& keySample, const TConstArrayRef<NScheme::TTypeId>& keyColumnTypes) {
+ TVector<std::pair<TSerializedCellVec, ui64>> keysHist;
+ for (const auto& s : keySample.GetBuckets()) {
+ keysHist.emplace_back(std::make_pair(TSerializedCellVec(s.GetKey()), s.GetValue()));
+ }
+
+ auto fnCmp = [&keyColumnTypes] (const auto& key1, const auto& key2) {
+ return CompareTypedCellVectors(key1.first.GetCells().data(), key2.first.GetCells().data(),
+ keyColumnTypes.data(),
+ key1.first.GetCells().size(), key2.first.GetCells().size());
+ };
+
+ Sort(keysHist, [&fnCmp] (const auto& key1, const auto& key2) { return fnCmp(key1, key2) < 0; });
+
+ // The keys are now sorted. Next we convert the stats into a histogram by accumulating
+ // stats for all previous keys at each key.
+ size_t last = 0;
+ for (size_t i = 1; i < keysHist.size(); ++i) {
+ // Accumulate stats
+ keysHist[i].second += keysHist[i-1].second;
+
+ if (fnCmp(keysHist[i], keysHist[last]) == 0) {
+ // Merge equal keys
+ keysHist[last].second = keysHist[i].second;
+ } else {
+ ++last;
+ if (last != i) {
+ keysHist[last] = keysHist[i];
+ }
+ }
+ }
+ keysHist.resize(std::min(keysHist.size(), last + 1));
+
+ if (keysHist.size() < 2)
+ return TSerializedCellVec();
+
+ // Find the median key with the shortest prefix
+ size_t minPrefix = 0;
+ size_t maxPrefix = keyColumnTypes.size();
+
+ // Binary search for shortest prefix that can be used to split the load
+ TSerializedCellVec splitKey;
+ while (minPrefix + 1 < maxPrefix) {
+ size_t prefixSize = (minPrefix + maxPrefix + 1) / 2;
+ splitKey = DoFindSplitKey(keysHist, keyColumnTypes, prefixSize);
+ if (splitKey.GetCells().empty()) {
+ minPrefix = prefixSize;
+ } else {
+ maxPrefix = prefixSize;
+ }
+ }
+ splitKey = DoFindSplitKey(keysHist, keyColumnTypes, maxPrefix);
+
+ return splitKey;
+}
+
+enum struct ESplitReason {
+ NO_SPLIT = 0,
+ FAST_SPLIT_INDEX,
+ SPLIT_BY_SIZE,
+ SPLIT_BY_LOAD
+};
+
+const char* ToString(ESplitReason splitReason) {
+ switch (splitReason) {
+ case ESplitReason::NO_SPLIT:
+ return "No split";
+ case ESplitReason::FAST_SPLIT_INDEX:
+ return "Fast split index table";
+ case ESplitReason::SPLIT_BY_SIZE:
+ return "Split by size";
+ case ESplitReason::SPLIT_BY_LOAD:
+ return "Split by load";
+ default:
+ Y_VERIFY_DEBUG(!"Unexpected enum value");
+ return "Unexpected enum value";
+ }
+}
+
class TTxPartitionHistogram: public NTabletFlatExecutor::TTransactionBase<TSchemeShard> {
TEvDataShard::TEvGetTableStatsResult::TPtr Ev;
@@ -227,8 +227,8 @@ public:
void TSchemeShard::Handle(TEvDataShard::TEvGetTableStatsResult::TPtr& ev, const TActorContext& ctx) {
- const auto& rec = ev->Get()->Record;
-
+ const auto& rec = ev->Get()->Record;
+
auto datashardId = TTabletId(rec.GetDatashardId());
ui64 dataSize = rec.GetTableStats().GetDataSize();
ui64 rowCount = rec.GetTableStats().GetRowCount();
@@ -272,9 +272,9 @@ THolder<TProposeRequest> SplitRequest(
bool TTxPartitionHistogram::Execute(TTransactionContext& txc, const TActorContext& ctx) {
const auto& rec = Ev->Get()->Record;
- if (!rec.GetFullStatsReady())
+ if (!rec.GetFullStatsReady())
return true;
-
+
auto datashardId = TTabletId(rec.GetDatashardId());
TPathId tableId = InvalidPathId;
if (rec.HasTableOwnerId()) {
@@ -283,10 +283,10 @@ bool TTxPartitionHistogram::Execute(TTransactionContext& txc, const TActorContex
} else {
tableId = Self->MakeLocalId(TLocalPathId(rec.GetTableLocalId()));
}
- ui64 dataSize = rec.GetTableStats().GetDataSize();
- ui64 rowCount = rec.GetTableStats().GetRowCount();
-
- LOG_INFO_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ ui64 dataSize = rec.GetTableStats().GetDataSize();
+ ui64 rowCount = rec.GetTableStats().GetRowCount();
+
+ LOG_INFO_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"TTxPartitionHistogram::Execute partition histogran"
<< " at tablet " << Self->SelfTabletId()
<< " from datashard " << datashardId
@@ -294,87 +294,87 @@ bool TTxPartitionHistogram::Execute(TTransactionContext& txc, const TActorContex
<< " state '" << DatashardStateName(rec.GetShardState()).data() << "'"
<< " dataSize " << dataSize
<< " rowCount " << rowCount);
-
+
if (!Self->Tables.contains(tableId))
return true;
-
+
TTableInfo::TPtr table = Self->Tables[tableId];
-
+
if (!Self->TabletIdToShardIdx.contains(datashardId))
return true;
-
+
// Don't split/merge backup tables
if (table->IsBackup)
return true;
auto shardIdx = Self->TabletIdToShardIdx[datashardId];
-
- ESplitReason splitReason = ESplitReason::NO_SPLIT;
+
+ ESplitReason splitReason = ESplitReason::NO_SPLIT;
if (table->CheckFastSplitForPartition(Self->SplitSettings, shardIdx, dataSize, rowCount)) {
const TTableInfo* parentTable = Self->GetMainTableForIndex(tableId);
- if (parentTable && table->GetPartitions().size() < parentTable->GetPartitions().size()) {
- splitReason = ESplitReason::FAST_SPLIT_INDEX;
- }
- }
-
- if (splitReason == ESplitReason::NO_SPLIT && dataSize >= table->GetShardSizeToSplit()) {
- splitReason = ESplitReason::SPLIT_BY_SIZE;
- }
-
+ if (parentTable && table->GetPartitions().size() < parentTable->GetPartitions().size()) {
+ splitReason = ESplitReason::FAST_SPLIT_INDEX;
+ }
+ }
+
+ if (splitReason == ESplitReason::NO_SPLIT && dataSize >= table->GetShardSizeToSplit()) {
+ splitReason = ESplitReason::SPLIT_BY_SIZE;
+ }
+
if (splitReason == ESplitReason::NO_SPLIT && table->CheckSplitByLoad(Self->SplitSettings, shardIdx, dataSize, rowCount)) {
- splitReason = ESplitReason::SPLIT_BY_LOAD;
- }
-
+ splitReason = ESplitReason::SPLIT_BY_LOAD;
+ }
+
if (splitReason == ESplitReason::NO_SPLIT) {
return true;
}
-
-
+
+
if (table->GetPartitions().size() >= table->GetMaxPartitionsCount()) {
return true;
}
- TSmallVec<NScheme::TTypeId> keyColumnTypes(table->KeyColumnIds.size());
- for (size_t ki = 0; ki < table->KeyColumnIds.size(); ++ki) {
- keyColumnTypes[ki] = table->Columns.FindPtr(table->KeyColumnIds[ki])->PType;
- }
-
- TSerializedCellVec splitKey;
- if (splitReason == ESplitReason::SPLIT_BY_LOAD) {
- // TODO: choose split key based on access stats for split by load
- const auto& keySample = rec.GetTableStats().GetKeyAccessSample();
- splitKey = ChooseSplitKeyByKeySample(keySample, keyColumnTypes);
-
- // TODO: check that the choosen key is valid
- } else {
- // Choose number of parts and split boundaries
- const auto& histogram = rec.GetTableStats().GetDataSizeHistogram();
+ TSmallVec<NScheme::TTypeId> keyColumnTypes(table->KeyColumnIds.size());
+ for (size_t ki = 0; ki < table->KeyColumnIds.size(); ++ki) {
+ keyColumnTypes[ki] = table->Columns.FindPtr(table->KeyColumnIds[ki])->PType;
+ }
+
+ TSerializedCellVec splitKey;
+ if (splitReason == ESplitReason::SPLIT_BY_LOAD) {
+ // TODO: choose split key based on access stats for split by load
+ const auto& keySample = rec.GetTableStats().GetKeyAccessSample();
+ splitKey = ChooseSplitKeyByKeySample(keySample, keyColumnTypes);
+
+ // TODO: check that the choosen key is valid
+ } else {
+ // Choose number of parts and split boundaries
+ const auto& histogram = rec.GetTableStats().GetDataSizeHistogram();
if (histogram.BucketsSize() < 2) {
return true;
}
-
- splitKey = ChooseSplitKeyByHistogram(histogram, keyColumnTypes);
-
- // Split key must not be less than the first key
- TSerializedCellVec lowestKey(histogram.GetBuckets(0).GetKey());
- if (0 < CompareTypedCellVectors(lowestKey.GetCells().data(), splitKey.GetCells().data(),
- keyColumnTypes.data(),
- lowestKey.GetCells().size(), splitKey.GetCells().size()))
- {
- LOG_WARN(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "Failed to find proper split key for '%s' of datashard %" PRIu64,
- ToString(splitReason), datashardId);
+
+ splitKey = ChooseSplitKeyByHistogram(histogram, keyColumnTypes);
+
+ // Split key must not be less than the first key
+ TSerializedCellVec lowestKey(histogram.GetBuckets(0).GetKey());
+ if (0 < CompareTypedCellVectors(lowestKey.GetCells().data(), splitKey.GetCells().data(),
+ keyColumnTypes.data(),
+ lowestKey.GetCells().size(), splitKey.GetCells().size()))
+ {
+ LOG_WARN(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "Failed to find proper split key for '%s' of datashard %" PRIu64,
+ ToString(splitReason), datashardId);
return true;
- }
- }
-
- if (splitKey.GetBuffer().empty()) {
- LOG_WARN(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "Failed to find proper split key for '%s' of datashard %" PRIu64,
- ToString(splitReason), datashardId);
+ }
+ }
+
+ if (splitKey.GetBuffer().empty()) {
+ LOG_WARN(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "Failed to find proper split key for '%s' of datashard %" PRIu64,
+ ToString(splitReason), datashardId);
return true;
- }
-
+ }
+
TTxId txId = Self->GetCachedTxId(ctx);
if (!txId) {
@@ -386,7 +386,7 @@ bool TTxPartitionHistogram::Execute(TTransactionContext& txc, const TActorContex
}
auto request = SplitRequest(Self, txId, tableId, datashardId, splitKey.GetBuffer());
-
+
TMemoryChanges memChanges;
TStorageChanges dbChanges;
TOperationContext context{Self, txc, ctx, SplitOpSideEffects, memChanges, dbChanges};
@@ -397,11 +397,11 @@ bool TTxPartitionHistogram::Execute(TTransactionContext& txc, const TActorContex
SplitOpSideEffects.ApplyOnExecute(Self, txc, ctx);
return true;
-}
-
+}
+
void TTxPartitionHistogram::Complete(const TActorContext& ctx) {
SplitOpSideEffects.ApplyOnComplete(Self, ctx);
}
-}}
+}}
diff --git a/ydb/core/tx/schemeshard/schemeshard_impl.cpp b/ydb/core/tx/schemeshard/schemeshard_impl.cpp
index 910b4c51a5d..0715543a229 100644
--- a/ydb/core/tx/schemeshard/schemeshard_impl.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_impl.cpp
@@ -1,6 +1,6 @@
#include "schemeshard.h"
#include "schemeshard_impl.h"
-
+
#include <ydb/core/tablet_flat/tablet_flat_executed.h>
#include <ydb/core/tablet/tablet_counters_aggregator.h>
#include <ydb/core/tablet/tablet_counters_protobuf.h>
@@ -10,10 +10,10 @@
#include <ydb/core/engine/mkql_proto.h>
#include <ydb/core/sys_view/partition_stats/partition_stats.h>
#include <ydb/library/yql/minikql/mkql_type_ops.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NSchemeShard {
-
+
const ui64 NEW_TABLE_ALTER_VERSION = 1;
namespace {
@@ -1186,27 +1186,27 @@ TShardIdx TSchemeShard::RegisterShardInfo(const TShardIdx& shardIdx, const TShar
}
const TTableInfo* TSchemeShard::GetMainTableForIndex(TPathId indexTableId) const {
- if (!Tables.contains(indexTableId))
- return nullptr;
-
- auto pathEl = PathsById.FindPtr(indexTableId);
- if (!pathEl)
- return nullptr;
-
- TPathId parentId = (*pathEl)->ParentPathId;
- auto parentEl = PathsById.FindPtr(parentId);
-
- if (!parentEl || !(*parentEl)->IsTableIndex())
- return nullptr;
-
- TPathId grandParentId = (*parentEl)->ParentPathId;
-
- if (!Tables.contains(grandParentId))
- return nullptr;
-
- return Tables.FindPtr(grandParentId)->Get();
-}
-
+ if (!Tables.contains(indexTableId))
+ return nullptr;
+
+ auto pathEl = PathsById.FindPtr(indexTableId);
+ if (!pathEl)
+ return nullptr;
+
+ TPathId parentId = (*pathEl)->ParentPathId;
+ auto parentEl = PathsById.FindPtr(parentId);
+
+ if (!parentEl || !(*parentEl)->IsTableIndex())
+ return nullptr;
+
+ TPathId grandParentId = (*parentEl)->ParentPathId;
+
+ if (!Tables.contains(grandParentId))
+ return nullptr;
+
+ return Tables.FindPtr(grandParentId)->Get();
+}
+
TPathElement::EPathState TSchemeShard::CalcPathState(TTxState::ETxType txType, TPathElement::EPathState oldState) {
// Do not change state if PathId is dropped. It can't become alive.
switch (oldState) {
@@ -1304,55 +1304,55 @@ TPathElement::EPathState TSchemeShard::CalcPathState(TTxState::ETxType txType, T
}
bool TSchemeShard::TRwTxBase::Execute(NTabletFlatExecutor::TTransactionContext &txc, const TActorContext &ctx) {
- THPTimer cpuTimer;
-
- // Transactions don't read anything from the DB, they all use in-mem structures and do writes to the DB
- // That's why transactions should never be retried
- txc.DB.NoMoreReadsForTx();
-
- try {
- DoExecute(txc, ctx);
+ THPTimer cpuTimer;
+
+ // Transactions don't read anything from the DB, they all use in-mem structures and do writes to the DB
+ // That's why transactions should never be retried
+ txc.DB.NoMoreReadsForTx();
+
+ try {
+ DoExecute(txc, ctx);
} catch (const std::exception& ex) {
Y_FAIL_S("there must be no leaked exceptions: " << ex.what() << ", at schemeshard: " << Self->TabletID());
- } catch (...) {
+ } catch (...) {
Y_FAIL_S("there must be no leaked exceptions, at schemeshard: " << Self->TabletID());
- }
-
- ExecuteDuration = TDuration::Seconds(cpuTimer.Passed());
- return true;
-}
-
+ }
+
+ ExecuteDuration = TDuration::Seconds(cpuTimer.Passed());
+ return true;
+}
+
void TSchemeShard::TRwTxBase::Complete(const TActorContext &ctx) {
- DoComplete(ctx);
-}
-
+ DoComplete(ctx);
+}
+
void TSchemeShard::PersistTableIndex(NIceDb::TNiceDb& db, const TPathId& pathId) {
Y_VERIFY(PathsById.contains(pathId));
TPathElement::TPtr elemnt = PathsById.at(pathId);
Y_VERIFY(Indexes.contains(pathId));
TTableIndexInfo::TPtr index = Indexes.at(pathId);
-
+
Y_VERIFY(IsLocalId(elemnt->PathId));
Y_VERIFY(elemnt->IsTableIndex());
TTableIndexInfo::TPtr alterData = index->AlterData;
Y_VERIFY(alterData);
Y_VERIFY(index->AlterVersion < alterData->AlterVersion);
-
+
db.Table<Schema::TableIndex>().Key(elemnt->PathId.LocalPathId).Update(
NIceDb::TUpdate<Schema::TableIndex::AlterVersion>(alterData->AlterVersion),
NIceDb::TUpdate<Schema::TableIndex::IndexType>(alterData->Type),
NIceDb::TUpdate<Schema::TableIndex::State>(alterData->State));
-
+
db.Table<Schema::TableIndexAlterData>().Key(elemnt->PathId.LocalPathId).Delete();
-
+
for (ui32 keyIdx = 0; keyIdx < alterData->IndexKeys.size(); ++keyIdx) {
db.Table<Schema::TableIndexKeys>().Key(elemnt->PathId.LocalPathId, keyIdx).Update(
NIceDb::TUpdate<Schema::TableIndexKeys::KeyName>(alterData->IndexKeys[keyIdx]));
-
+
db.Table<Schema::TableIndexKeysAlterData>().Key(elemnt->PathId.LocalPathId, keyIdx).Delete();
- }
+ }
for (ui32 dataColIdx = 0; dataColIdx < alterData->IndexDataColumns.size(); ++dataColIdx) {
db.Table<Schema::TableIndexDataColumns>().Key(elemnt->PathId.OwnerId, elemnt->PathId.LocalPathId, dataColIdx).Update(
@@ -1360,37 +1360,37 @@ void TSchemeShard::PersistTableIndex(NIceDb::TNiceDb& db, const TPathId& pathId)
db.Table<Schema::TableIndexDataColumnsAlterData>().Key(elemnt->PathId.OwnerId, elemnt->PathId.LocalPathId, dataColIdx).Delete();
}
-}
-
+}
+
void TSchemeShard::PersistTableIndexAlterData(NIceDb::TNiceDb& db, const TPathId& pathId) {
Y_VERIFY(PathsById.contains(pathId));
TPathElement::TPtr elemnt = PathsById.at(pathId);
Y_VERIFY(Indexes.contains(pathId));
TTableIndexInfo::TPtr index = Indexes.at(pathId);
-
+
Y_VERIFY(IsLocalId(pathId));
Y_VERIFY(elemnt->IsTableIndex());
TTableIndexInfo::TPtr alterData = index->AlterData;
Y_VERIFY(alterData);
-
+
db.Table<Schema::TableIndexAlterData>().Key(elemnt->PathId.LocalPathId).Update(
NIceDb::TUpdate<Schema::TableIndexAlterData::AlterVersion>(alterData->AlterVersion),
NIceDb::TUpdate<Schema::TableIndexAlterData::IndexType>(alterData->Type),
NIceDb::TUpdate<Schema::TableIndexAlterData::State>(alterData->State));
-
+
for (ui32 keyIdx = 0; keyIdx < alterData->IndexKeys.size(); ++keyIdx) {
db.Table<Schema::TableIndexKeysAlterData>().Key(elemnt->PathId.LocalPathId, keyIdx).Update(
NIceDb::TUpdate<Schema::TableIndexKeysAlterData::KeyName>(alterData->IndexKeys[keyIdx]));
- }
+ }
for (ui32 dataColIdx = 0; dataColIdx < alterData->IndexDataColumns.size(); ++dataColIdx) {
db.Table<Schema::TableIndexDataColumnsAlterData>().Key(elemnt->PathId.OwnerId, elemnt->PathId.LocalPathId, dataColIdx).Update(
NIceDb::TUpdate<Schema::TableIndexDataColumnsAlterData::DataColumnName>(alterData->IndexDataColumns[dataColIdx]));
}
-}
-
+}
+
void TSchemeShard::PersistCdcStream(NIceDb::TNiceDb& db, const TPathId& pathId) {
Y_VERIFY(PathsById.contains(pathId));
auto path = PathsById.at(pathId);
@@ -1586,8 +1586,8 @@ void TSchemeShard::PersistPath(NIceDb::TNiceDb& db, const TPathId& pathId) {
NIceDb::TUpdate<Schema::MigratedPaths::ACLVersion>(elemnt->ACLVersion)
);
}
-}
-
+}
+
void TSchemeShard::PersistRemovePath(NIceDb::TNiceDb& db, const TPathElement::TPtr path) {
Y_VERIFY(path->Dropped() && path->DbRefCount == 0);
@@ -1938,19 +1938,19 @@ void TSchemeShard::PersistTxState(NIceDb::TNiceDb& db, const TOperationId opId)
Y_VERIFY(txState.State != TTxState::Invalid);
TString extraData;
if (txState.TxType == TTxState::TxSplitTablePartition || txState.TxType == TTxState::TxMergeTablePartition) {
- Y_VERIFY(txState.SplitDescription, "Split Tx must have non-empty split description");
- bool serializeRes = txState.SplitDescription->SerializeToString(&extraData);
- Y_VERIFY(serializeRes);
- } else if (txState.TxType == TTxState::TxAlterTable) {
+ Y_VERIFY(txState.SplitDescription, "Split Tx must have non-empty split description");
+ bool serializeRes = txState.SplitDescription->SerializeToString(&extraData);
+ Y_VERIFY(serializeRes);
+ } else if (txState.TxType == TTxState::TxAlterTable) {
TPathId pathId = txState.TargetPathId;
-
+
Y_VERIFY_S(PathsById.contains(pathId), "Path id " << pathId << " doesn't exist");
Y_VERIFY_S(PathsById.at(pathId)->IsTable(), "Path id " << pathId << " is not a table");
Y_VERIFY_S(Tables.FindPtr(pathId), "Table " << pathId << " doen't exist");
-
+
TTableInfo::TPtr tableInfo = Tables.at(pathId);
- extraData = tableInfo->SerializeAlterExtraData();
- }
+ extraData = tableInfo->SerializeAlterExtraData();
+ }
db.Table<Schema::TxInFlightV2>().Key(opId.GetTxId(), opId.GetSubTxId()).Update(
NIceDb::TUpdate<Schema::TxInFlightV2::TxType>((ui8)txState.TxType),
NIceDb::TUpdate<Schema::TxInFlightV2::TargetPathId>(txState.TargetPathId.LocalPathId),
@@ -1962,13 +1962,13 @@ void TSchemeShard::PersistTxState(NIceDb::TNiceDb& db, const TOperationId opId)
NIceDb::TUpdate<Schema::TxInFlightV2::BuildIndexId>(txState.BuildIndexId),
NIceDb::TUpdate<Schema::TxInFlightV2::SourceLocalPathId>(txState.SourcePathId.LocalPathId),
NIceDb::TUpdate<Schema::TxInFlightV2::SourceOwnerId>(txState.SourcePathId.OwnerId)
- );
+ );
for (const auto& shardOp : txState.Shards) {
PersistUpdateTxShard(db, opId, shardOp.Idx, shardOp.Operation);
}
-}
-
+}
+
void TSchemeShard::PersistTxMinStep(NIceDb::TNiceDb& db, const TOperationId opId, TStepId minStep) {
db.Table<Schema::TxInFlightV2>().Key(opId.GetTxId(), opId.GetSubTxId()).Update(
NIceDb::TUpdate<Schema::TxInFlightV2::MinStep>(minStep)
@@ -1989,8 +1989,8 @@ void TSchemeShard::ChangeTxState(NIceDb::TNiceDb& db, const TOperationId opId, T
FindTx(opId)->State = newState;
db.Table<Schema::TxInFlightV2>().Key(opId.GetTxId(), opId.GetSubTxId()).Update(
NIceDb::TUpdate<Schema::TxInFlightV2::State>(newState));
-}
-
+}
+
void TSchemeShard::PersistCancelTx(NIceDb::TNiceDb &db, const TOperationId opId, const TTxState &txState) {
Y_VERIFY(txState.TxType == TTxState::TxBackup || txState.TxType == TTxState::TxRestore);
@@ -2015,9 +2015,9 @@ void TSchemeShard::PersistTable(NIceDb::TNiceDb& db, const TPathId tableId) {
const TTableInfo::TPtr tableInfo = Tables.at(tableId);
PersistTableAltered(db, tableId, tableInfo);
- PersistTablePartitioning(db, tableId, tableInfo);
-}
-
+ PersistTablePartitioning(db, tableId, tableInfo);
+}
+
void TSchemeShard::PersistChannelsBinding(NIceDb::TNiceDb& db, const TShardIdx shardId, const TChannelsBindings& bindedChannels) {
for (ui32 channelId = 0; channelId < bindedChannels.size(); ++channelId) {
const auto& bind = bindedChannels[channelId];
@@ -2034,8 +2034,8 @@ void TSchemeShard::PersistChannelsBinding(NIceDb::TNiceDb& db, const TShardIdx s
}
void TSchemeShard::PersistTablePartitioning(NIceDb::TNiceDb& db, const TPathId pathId, const TTableInfo::TPtr tableInfo) {
- for (ui64 pi = 0; pi < tableInfo->GetPartitions().size(); ++pi) {
- const auto& partition = tableInfo->GetPartitions()[pi];
+ for (ui64 pi = 0; pi < tableInfo->GetPartitions().size(); ++pi) {
+ const auto& partition = tableInfo->GetPartitions()[pi];
if (IsLocalId(pathId)) {
Y_VERIFY(IsLocalId(partition.ShardIdx));
db.Table<Schema::TablePartitions>().Key(pathId.LocalPathId, pi).Update(
@@ -2051,7 +2051,7 @@ void TSchemeShard::PersistTablePartitioning(NIceDb::TNiceDb& db, const TPathId p
NIceDb::TUpdate<Schema::MigratedTablePartitions::LastCondErase>(partition.LastCondErase.GetValue()),
NIceDb::TUpdate<Schema::MigratedTablePartitions::NextCondErase>(partition.NextCondErase.GetValue()));
}
- }
+ }
if (IsLocalId(pathId)) {
db.Table<Schema::Tables>().Key(pathId.LocalPathId).Update(
NIceDb::TUpdate<Schema::Tables::PartitioningVersion>(++tableInfo->PartitioningVersion));
@@ -2059,8 +2059,8 @@ void TSchemeShard::PersistTablePartitioning(NIceDb::TNiceDb& db, const TPathId p
db.Table<Schema::MigratedTables>().Key(pathId.OwnerId, pathId.LocalPathId).Update(
NIceDb::TUpdate<Schema::MigratedTables::PartitioningVersion>(++tableInfo->PartitioningVersion));
}
-}
-
+}
+
void TSchemeShard::DeleteTablePartitioning(NIceDb::TNiceDb& db, const TPathId pathId, const TTableInfo::TPtr tableInfo) {
const auto& partitions = tableInfo->GetPartitions();
for (ui64 pi = 0; pi < partitions.size(); ++pi) {
@@ -2071,9 +2071,9 @@ void TSchemeShard::DeleteTablePartitioning(NIceDb::TNiceDb& db, const TPathId pa
db.Table<Schema::TablePartitionStats>().Key(pathId.OwnerId, pathId.LocalPathId, pi).Delete();
CompactionQueue->Remove(TShardCompactionInfo(partitions[pi].ShardIdx));
- }
-}
-
+ }
+}
+
void TSchemeShard::PersistTablePartitionCondErase(NIceDb::TNiceDb& db, const TPathId& pathId, ui64 id, const TTableInfo::TPtr tableInfo) {
const auto& partition = tableInfo->GetPartitions()[id];
@@ -2118,7 +2118,7 @@ void TSchemeShard::PersistTablePartitionStats(NIceDb::TNiceDb& db, const TPathId
NIceDb::TUpdate<Schema::TablePartitionStats::RangeReads>(stats.RangeReads),
NIceDb::TUpdate<Schema::TablePartitionStats::RangeReadRows>(stats.RangeReadRows),
- NIceDb::TUpdate<Schema::TablePartitionStats::CPU>(stats.GetCurrentRawCpuUsage()),
+ NIceDb::TUpdate<Schema::TablePartitionStats::CPU>(stats.GetCurrentRawCpuUsage()),
NIceDb::TUpdate<Schema::TablePartitionStats::Memory>(stats.Memory),
NIceDb::TUpdate<Schema::TablePartitionStats::Network>(stats.Network),
NIceDb::TUpdate<Schema::TablePartitionStats::Storage>(stats.Storage),
@@ -2575,9 +2575,9 @@ void TSchemeShard::PersistShardsToDelete(NIceDb::TNiceDb& db, const THashSet<TSh
} else {
db.Table<Schema::MigratedShardsToDelete>().Key(shardIdx.GetOwnerId(), shardIdx.GetLocalId()).Update();
}
- }
-}
-
+ }
+}
+
void TSchemeShard::PersistShardDeleted(NIceDb::TNiceDb& db, TShardIdx shardIdx, const TChannelsBindings& bindedChannels) {
if (shardIdx.GetOwnerId() == TabletID()) {
db.Table<Schema::ShardsToDelete>().Key(shardIdx.GetLocalId()).Delete();
@@ -2594,8 +2594,8 @@ void TSchemeShard::PersistShardDeleted(NIceDb::TNiceDb& db, TShardIdx shardIdx,
db.Table<Schema::MigratedChannelsBinding>().Key(shardIdx.GetOwnerId(), shardIdx.GetLocalId(), channelId).Delete();
}
db.Table<Schema::MigratedTableShardPartitionConfigs>().Key(shardIdx.GetOwnerId(), shardIdx.GetLocalId()).Delete();
-}
-
+}
+
void TSchemeShard::PersistUnknownShardDeleted(NIceDb::TNiceDb& db, TShardIdx shardIdx) {
if (shardIdx.GetOwnerId() == TabletID()) {
db.Table<Schema::ShardsToDelete>().Key(shardIdx.GetLocalId()).Delete();
@@ -3340,27 +3340,27 @@ void TSchemeShard::PersistRemovePublishingPath(NIceDb::TNiceDb& db, TTxId txId,
}
TTabletId TSchemeShard::GetGlobalHive(const TActorContext& ctx) const {
- auto domainsInfo = AppData(ctx)->DomainsInfo;
-
- ui32 domainUid = domainsInfo->GetDomainUidByTabletId(TabletID());
- auto domain = domainsInfo->GetDomain(domainUid);
+ auto domainsInfo = AppData(ctx)->DomainsInfo;
+
+ ui32 domainUid = domainsInfo->GetDomainUidByTabletId(TabletID());
+ auto domain = domainsInfo->GetDomain(domainUid);
const ui32 hiveIdx = Max<ui32>();
- ui32 hiveUid = domain.GetHiveUidByIdx(hiveIdx);
+ ui32 hiveUid = domain.GetHiveUidByIdx(hiveIdx);
return TTabletId(domainsInfo->GetHive(hiveUid));
-}
-
+}
+
TShardIdx TSchemeShard::GetShardIdx(TTabletId tabletId) const {
- const auto* pIdx = TabletIdToShardIdx.FindPtr(tabletId);
+ const auto* pIdx = TabletIdToShardIdx.FindPtr(tabletId);
if (!pIdx) {
return InvalidShardIdx;
}
-
+
Y_VERIFY(*pIdx != InvalidShardIdx);
- return *pIdx;
-}
-
+ return *pIdx;
+}
+
TShardIdx TSchemeShard::MustGetShardIdx(TTabletId tabletId) const {
auto shardIdx = GetShardIdx(tabletId);
Y_VERIFY_S(shardIdx != InvalidShardIdx, "Cannot find shard idx for tablet " << tabletId);
@@ -3429,8 +3429,8 @@ void TSchemeShard::DoShardsDeletion(const THashSet<TShardIdx>& shardIdxs, const
const auto& shards = item.second;
ShardDeleter.SendDeleteRequests(hive, shards, ShardInfos, ctx);
}
-}
-
+}
+
NKikimrSchemeOp::TPathVersion TSchemeShard::GetPathVersion(const TPath& path) const {
NKikimrSchemeOp::TPathVersion result;
@@ -3625,25 +3625,25 @@ TActorId TSchemeShard::TPipeClientFactory::CreateClient(const TActorContext& ctx
}
TSchemeShard::TSchemeShard(const TActorId &tablet, TTabletStorageInfo *info)
- : TActor(&TThis::StateInit)
+ : TActor(&TThis::StateInit)
, TTabletExecutedFlat(info, tablet, new NMiniKQL::TMiniKQLFactory)
, AllowConditionalEraseOperations(1, 0, 1)
, AllowServerlessStorageBilling(0, 0, 1)
- , SplitSettings()
- , IsReadOnlyMode(false)
+ , SplitSettings()
+ , IsReadOnlyMode(false)
, ParentDomainLink(this)
, SubDomainsLinks(this)
, PipeClientCache(NTabletPipe::CreateBoundedClientCache(
new NTabletPipe::TBoundedClientCacheConfig(),
GetPipeClientConfig(),
new TPipeClientFactory(this)))
- , PipeTracker(*PipeClientCache)
+ , PipeTracker(*PipeClientCache)
, CompactionStarter(this)
- , ShardDeleter(info->TabletID)
+ , ShardDeleter(info->TabletID)
, AllowDataColumnForIndexTable(0, 0, 1)
, EnableAsyncIndexes(0, 0, 1)
, EnableSchemeTransactionsAtSchemeShard(0, 0, 1)
-{
+{
TabletCountersPtr.Reset(new TProtobufTabletCounters<
ESimpleCounters_descriptor,
ECumulativeCounters_descriptor,
@@ -3651,10 +3651,10 @@ TSchemeShard::TSchemeShard(const TActorId &tablet, TTabletStorageInfo *info)
ETxTypes_descriptor
>());
TabletCounters = TabletCountersPtr.Get();
-
+
SelfPinger = new TSelfPinger(SelfTabletId(), TabletCounters);
-}
-
+}
+
const TDomainsInfo::TDomain& TSchemeShard::GetDomainDescription(const TActorContext &ctx) const {
auto appdata = AppData(ctx);
Y_VERIFY(appdata);
@@ -3701,7 +3701,7 @@ void TSchemeShard::Die(const TActorContext &ctx) {
ctx.Send(TxAllocatorClient, new TEvents::TEvPoisonPill());
ctx.Send(SysPartitionStatsCollector, new TEvents::TEvPoisonPill());
- ShardDeleter.Shutdown(ctx);
+ ShardDeleter.Shutdown(ctx);
ParentDomainLink.Shutdown(ctx);
PipeClientCache->Detach(ctx);
@@ -3709,18 +3709,18 @@ void TSchemeShard::Die(const TActorContext &ctx) {
if (CompactionQueue)
CompactionQueue->Shutdown(ctx);
- return IActor::Die(ctx);
-}
-
+ return IActor::Die(ctx);
+}
+
void TSchemeShard::OnDetach(const TActorContext &ctx) {
- Die(ctx);
-}
-
+ Die(ctx);
+}
+
void TSchemeShard::OnTabletDead(TEvTablet::TEvTabletDead::TPtr &ev, const TActorContext &ctx) {
Y_UNUSED(ev);
- Die(ctx);
-}
-
+ Die(ctx);
+}
+
static TVector<ui64> CollectTxAllocators(const TAppData *appData) {
TVector<ui64> allocators;
for (auto it: appData->DomainsInfo->Domains) {
@@ -3775,50 +3775,50 @@ void TSchemeShard::OnActivateExecutor(const TActorContext &ctx) {
SysPartitionStatsCollector = Register(NSysView::CreatePartitionStatsCollector().Release());
- SplitSettings.Register(appData->Icb);
-
+ SplitSettings.Register(appData->Icb);
+
Executor()->RegisterExternalTabletCounters(TabletCountersPtr);
Execute(CreateTxInitSchema(), ctx);
SubscribeConsoleConfigs(ctx);
-}
-
-// This is overriden as noop in order to activate the table only at the end of Init transaction
-// when all the in-mem state has been populated
+}
+
+// This is overriden as noop in order to activate the table only at the end of Init transaction
+// when all the in-mem state has been populated
void TSchemeShard::DefaultSignalTabletActive(const TActorContext &ctx) {
Y_UNUSED(ctx);
-}
-
+}
+
void TSchemeShard::Cleanup(const TActorContext &ctx) {
Y_UNUSED(ctx);
-}
-
+}
+
void TSchemeShard::Enqueue(STFUNC_SIG) {
Y_UNUSED(ctx);
Y_FAIL_S("No enqueue method emplemented."
<< " unhandled event type: " << ev->GetTypeRewrite()
<< " event: " << (ev->HasEvent() ? ev->GetBase()->ToString().data() : "serialized?"));
-}
-
+}
+
void TSchemeShard::StateInit(STFUNC_SIG) {
TRACE_EVENT(NKikimrServices::FLAT_TX_SCHEMESHARD);
- switch (ev->GetTypeRewrite()) {
+ switch (ev->GetTypeRewrite()) {
HFuncTraced(TEvents::TEvPoisonPill, Handle);
//console configs
HFuncTraced(NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse, Handle);
HFunc(NConsole::TEvConsole::TEvConfigNotificationRequest, Handle);
- default:
- StateInitImpl(ev, ctx);
- }
-}
-
+ default:
+ StateInitImpl(ev, ctx);
+ }
+}
+
void TSchemeShard::StateConfigure(STFUNC_SIG) {
- SelfPinger->OnAnyEvent(ctx);
-
+ SelfPinger->OnAnyEvent(ctx);
+
TRACE_EVENT(NKikimrServices::FLAT_TX_SCHEMESHARD);
- switch (ev->GetTypeRewrite()) {
+ switch (ev->GetTypeRewrite()) {
HFuncTraced(TEvents::TEvPoisonPill, Handle);
HFuncTraced(TEvSchemeShard::TEvInitRootShard, Handle);
@@ -3887,11 +3887,11 @@ void TSchemeShard::StateWork(STFUNC_SIG) {
HFuncTraced(TEvDataShard::TEvProposeTransactionResult, Handle);
HFuncTraced(TEvDataShard::TEvSchemaChanged, Handle);
HFuncTraced(TEvDataShard::TEvStateChanged, Handle);
- HFuncTraced(TEvDataShard::TEvInitSplitMergeDestinationAck, Handle);
- HFuncTraced(TEvDataShard::TEvSplitAck, Handle);
- HFuncTraced(TEvDataShard::TEvSplitPartitioningChangedAck, Handle);
- HFuncTraced(TEvDataShard::TEvPeriodicTableStats, Handle);
- HFuncTraced(TEvDataShard::TEvGetTableStatsResult, Handle);
+ HFuncTraced(TEvDataShard::TEvInitSplitMergeDestinationAck, Handle);
+ HFuncTraced(TEvDataShard::TEvSplitAck, Handle);
+ HFuncTraced(TEvDataShard::TEvSplitPartitioningChangedAck, Handle);
+ HFuncTraced(TEvDataShard::TEvPeriodicTableStats, Handle);
+ HFuncTraced(TEvDataShard::TEvGetTableStatsResult, Handle);
//
HFuncTraced(TEvColumnShard::TEvProposeTransactionResult, Handle);
@@ -4004,12 +4004,12 @@ void TSchemeShard::StateWork(STFUNC_SIG) {
<< " event: " << (ev->HasEvent() ? ev->GetBase()->ToString().data() : "serialized?"));
}
break;
- }
-}
-
+ }
+}
+
void TSchemeShard::BrokenState(STFUNC_SIG) {
TRACE_EVENT(NKikimrServices::FLAT_TX_SCHEMESHARD);
- switch (ev->GetTypeRewrite()) {
+ switch (ev->GetTypeRewrite()) {
HFuncTraced(TEvTablet::TEvTabletDead, HandleTabletDead);
default:
if (!HandleDefaultEvents(ev, ctx)) {
@@ -4019,9 +4019,9 @@ void TSchemeShard::BrokenState(STFUNC_SIG) {
<< " event: " << (ev->HasEvent() ? ev->GetBase()->ToString().data() : "serialized?"));
}
break;
- }
-}
-
+ }
+}
+
void TSchemeShard::DeleteSplitOp(TOperationId operationId, TTxState& txState) {
Y_VERIFY(txState.ShardsInProgress.empty(), "All shards should have already completed their steps");
@@ -4412,7 +4412,7 @@ void TSchemeShard::Handle(TEvDataShard::TEvSchemaChanged::TPtr& ev, const TActor
THolder(new TEvDataShard::TEvSchemaChangedResult(ui64(txId)));
ctx.Send(ackTo, event.Release());
return;
- }
+ }
auto partId = Operations.at(txId)->FindRelatedPartByTabletId(tableId, ctx);
if (partId == InvalidSubTxId) {
@@ -4428,8 +4428,8 @@ void TSchemeShard::Handle(TEvDataShard::TEvSchemaChanged::TPtr& ev, const TActor
}
Execute(CreateTxOperationReply(TOperationId(txId, partId), ev), ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvDataShard::TEvStateChanged::TPtr &ev, const TActorContext &ctx) {
LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Handle TEvStateChanged"
@@ -4483,41 +4483,41 @@ void TSchemeShard::Handle(TEvDataShard::TEvSplitPartitioningChangedAck::TPtr& ev
}
void TSchemeShard::Handle(TEvSchemeShard::TEvDescribeScheme::TPtr &ev, const TActorContext &ctx) {
- Execute(CreateTxDescribeScheme(ev), ctx);
-}
-
+ Execute(CreateTxDescribeScheme(ev), ctx);
+}
+
void TSchemeShard::Handle(TEvSchemeShard::TEvNotifyTxCompletion::TPtr &ev, const TActorContext &ctx) {
- Execute(CreateTxNotifyTxCompletion(ev), ctx);
-}
-
+ Execute(CreateTxNotifyTxCompletion(ev), ctx);
+}
+
void TSchemeShard::Handle(TEvSchemeShard::TEvInitRootShard::TPtr &ev, const TActorContext &ctx) {
Execute(CreateTxInitRootCompatibility(ev), ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvSchemeShard::TEvInitTenantSchemeShard::TPtr &ev, const TActorContext &ctx) {
Execute(CreateTxInitTenantSchemeShard(ev), ctx);
}
void TSchemeShard::Handle(TEvSchemeShard::TEvModifySchemeTransaction::TPtr &ev, const TActorContext &ctx) {
- if (IsReadOnlyMode) {
- ui64 txId = ev->Get()->Record.GetTxId();
- ui64 selfId = TabletID();
+ if (IsReadOnlyMode) {
+ ui64 txId = ev->Get()->Record.GetTxId();
+ ui64 selfId = TabletID();
THolder<TEvSchemeShard::TEvModifySchemeTransactionResult> result =
THolder(new TEvSchemeShard::TEvModifySchemeTransactionResult(
NKikimrScheme::StatusReadOnly, txId, selfId, "Schema is in ReadOnly mode"));
-
- ctx.Send(ev->Sender, result.Release());
-
+
+ ctx.Send(ev->Sender, result.Release());
+
LOG_WARN_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Schema modification rejected because of ReadOnly mode"
<< ", at tablet: " << selfId
<< " txid: " << txId);
- return;
- }
-
+ return;
+ }
+
Execute(CreateTxOperationPropose(ev), ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvPrivate::TEvProgressOperation::TPtr &ev, const TActorContext &ctx) {
const auto txId = TTxId(ev->Get()->TxId);
if (!Operations.contains(txId)) {
@@ -4529,8 +4529,8 @@ void TSchemeShard::Handle(TEvPrivate::TEvProgressOperation::TPtr &ev, const TAct
Y_VERIFY(ev->Get()->TxPartId != InvalidSubTxId);
Execute(CreateTxOperationProgress(TOperationId(txId, ev->Get()->TxPartId)), ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TActorContext &ctx) {
const auto tabletId = TTabletId(ev->Get()->TabletId);
const TActorId clientId = ev->Get()->ClientId;
@@ -4545,8 +4545,8 @@ void TSchemeShard::Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TAc
if (PipeClientCache->OnConnect(ev)) {
return; //all Ok
- }
-
+ }
+
if (IndexBuildPipes.Has(clientId)) {
Execute(CreatePipeRetry(IndexBuildPipes.GetOwnerId(clientId), IndexBuildPipes.GetTabletId(clientId)), ctx);
return;
@@ -4555,7 +4555,7 @@ void TSchemeShard::Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TAc
if (ShardDeleter.Has(tabletId, clientId)) {
ShardDeleter.ResendDeleteRequests(TTabletId(ev->Get()->TabletId), ShardInfos, ctx);
return;
- }
+ }
if (ParentDomainLink.HasPipeTo(tabletId, clientId)) {
ParentDomainLink.AtPipeError(ctx);
@@ -4568,16 +4568,16 @@ void TSchemeShard::Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TAc
<< ", at schemeshard: " << TabletID());
RestartPipeTx(tabletId, ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvTabletPipe::TEvServerConnected::TPtr &ev, const TActorContext &ctx) {
Y_UNUSED(ev);
Y_UNUSED(ctx);
LOG_TRACE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Pipe server connected"
<< ", at tablet: " << ev->Get()->TabletId);
-}
-
+}
+
void TSchemeShard::Handle(TEvTabletPipe::TEvClientDestroyed::TPtr &ev, const TActorContext &ctx) {
const auto tabletId = TTabletId(ev->Get()->TabletId);
const TActorId clientId = ev->Get()->ClientId;
@@ -4587,7 +4587,7 @@ void TSchemeShard::Handle(TEvTabletPipe::TEvClientDestroyed::TPtr &ev, const TAc
<< ", to tablet: " << tabletId
<< ", from:" << TabletID() << " is reset");
- PipeClientCache->OnDisconnect(ev);
+ PipeClientCache->OnDisconnect(ev);
if (IndexBuildPipes.Has(clientId)) {
Execute(CreatePipeRetry(IndexBuildPipes.GetOwnerId(clientId), IndexBuildPipes.GetTabletId(clientId)), ctx);
@@ -4597,7 +4597,7 @@ void TSchemeShard::Handle(TEvTabletPipe::TEvClientDestroyed::TPtr &ev, const TAc
if (ShardDeleter.Has(tabletId, clientId)) {
ShardDeleter.ResendDeleteRequests(tabletId, ShardInfos, ctx);
return;
- }
+ }
if (ParentDomainLink.HasPipeTo(tabletId, clientId)) {
ParentDomainLink.AtPipeError(ctx);
@@ -4605,15 +4605,15 @@ void TSchemeShard::Handle(TEvTabletPipe::TEvClientDestroyed::TPtr &ev, const TAc
}
RestartPipeTx(tabletId, ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvTabletPipe::TEvServerDisconnected::TPtr &ev, const TActorContext &ctx) {
Y_UNUSED(ev);
LOG_TRACE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Server pipe is reset"
<< ", at schemeshard: " << TabletID());
-}
-
+}
+
void TSchemeShard::Handle(TEvSchemeShard::TEvSyncTenantSchemeShard::TPtr& ev, const TActorContext& ctx) {
const auto& record = ev->Get()->Record;
LOG_INFO_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
@@ -4671,8 +4671,8 @@ void TSchemeShard::Handle(TSchemeBoardEvents::TEvUpdateAck::TPtr& ev, const TAct
void TSchemeShard::Handle(TEvTxProcessing::TEvPlanStep::TPtr &ev, const TActorContext &ctx) {
Execute(CreateTxOperationPlanStep(ev), ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvHive::TEvCreateTabletReply::TPtr &ev, const TActorContext &ctx) {
LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Handle TEvCreateTabletReply"
@@ -4715,8 +4715,8 @@ void TSchemeShard::Handle(TEvHive::TEvCreateTabletReply::TPtr &ev, const TActorC
}
Execute(CreateTxOperationReply(TOperationId(txId, partId), ev), ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvHive::TEvAdoptTabletReply::TPtr &ev, const TActorContext &ctx) {
auto shardIdx = MakeLocalId(TLocalShardIdx(ev->Get()->Record.GetOwnerIdx())); // internal id
@@ -4998,8 +4998,8 @@ void TSchemeShard::Handle(TEvDataShard::TEvProposeTransactionResult::TPtr &ev, c
return;
}
Execute(CreateTxOperationReply(TOperationId(txId, partId), ev), ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvSubDomain::TEvConfigureStatus::TPtr &ev, const TActorContext &ctx) {
const auto& record = ev->Get()->Record;
auto tabletId = TTabletId(record.GetOnTabletId());
@@ -5025,7 +5025,7 @@ void TSchemeShard::Handle(TEvSubDomain::TEvConfigureStatus::TPtr &ev, const TAct
Execute(CreateTxOperationReply(opId, ev), ctx);
}
-
+
void TSchemeShard::Handle(TEvBlockStore::TEvUpdateVolumeConfigResponse::TPtr& ev, const TActorContext& ctx) {
const auto txId = TTxId(ev->Get()->Record.GetTxId());
if (!Operations.contains(txId)) {
@@ -5034,8 +5034,8 @@ void TSchemeShard::Handle(TEvBlockStore::TEvUpdateVolumeConfigResponse::TPtr& ev
<< " for unknown txId " << txId
<< " tabletId " << ev->Get()->Record.GetOrigin());
return;
- }
-
+ }
+
auto tabletId = TTabletId(ev->Get()->Record.GetOrigin());
auto partId = Operations.at(txId)->FindRelatedPartByTabletId(tabletId, ctx);
if (partId == InvalidSubTxId) {
@@ -5048,8 +5048,8 @@ void TSchemeShard::Handle(TEvBlockStore::TEvUpdateVolumeConfigResponse::TPtr& ev
}
Execute(CreateTxOperationReply(TOperationId(txId, partId), ev), ctx);
-}
-
+}
+
void TSchemeShard::Handle(TEvFileStore::TEvUpdateConfigResponse::TPtr& ev, const TActorContext& ctx) {
const auto txId = TTxId(ev->Get()->Record.GetTxId());
if (!Operations.contains(txId)) {
@@ -5163,17 +5163,17 @@ void TSchemeShard::Handle(NKesus::TEvKesus::TEvSetConfigResult::TPtr& ev, const
const auto& record = ev->Get()->Record;
auto tabletId = TTabletId(record.GetTabletId());
-
+
TOperationId opId = RouteIncomming(tabletId, ctx);
if (!opId) {
LOG_WARN_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Got NKesus::TEvKesus::TEvSetConfigResult"
<< " no route has found by tabletId " << tabletId
<< " message " << ev->Get()->Record.ShortDebugString());
- return;
+ return;
}
Y_VERIFY(opId.GetTxId());
-
+
if (opId.GetSubTxId() == InvalidSubTxId) {
LOG_WARN_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"Got NKesus::TEvKesus::TEvSetConfigResult but partId in unknown"
@@ -5184,8 +5184,8 @@ void TSchemeShard::Handle(NKesus::TEvKesus::TEvSetConfigResult::TPtr& ev, const
}
Execute(CreateTxOperationReply(opId, ev), ctx);
-}
-
+}
+
TOperationId TSchemeShard::RouteIncomming(TTabletId tabletId, const TActorContext& ctx) {
auto transactionIds = PipeTracker.FindTx(ui64(tabletId));
@@ -5239,7 +5239,7 @@ void TSchemeShard::RestartPipeTx(TTabletId tabletId, const TActorContext& ctx) {
<< ", at schemeshardId: " << TabletID());
continue;
}
-
+
for (auto& items: operation->PipeBindedMessages.at(tabletId)) {
TPipeMessageId msgCookie = items.first;
TOperation::TPreSerialisedMessage& preSerialisedMessages = items.second;
@@ -5254,19 +5254,19 @@ void TSchemeShard::RestartPipeTx(TTabletId tabletId, const TActorContext& ctx) {
PipeClientCache->Send(ctx, ui64(tabletId), preSerialisedMessages.first, preSerialisedMessages.second, msgCookie.second);
}
- }
- }
-}
-
+ }
+ }
+}
+
void TSchemeShard::Handle(TEvents::TEvPoisonPill::TPtr &ev, const TActorContext &ctx) {
Y_UNUSED(ev);
BreakTabletAndRestart(ctx);
-}
-
+}
+
void TSchemeShard::Handle(NMon::TEvRemoteHttpInfo::TPtr& ev, const TActorContext& ctx) {
- RenderHtmlPage(ev, ctx);
-}
-
+ RenderHtmlPage(ev, ctx);
+}
+
void TSchemeShard::Handle(TEvSchemeShard::TEvCancelTx::TPtr& ev, const TActorContext& ctx) {
if (IsReadOnlyMode) {
LOG_ERROR_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
@@ -5514,10 +5514,10 @@ void TSchemeShard::Handle(TEvIndexBuilder::TEvCancelResponse::TPtr& ev, const TA
}
void TSchemeShard::FillSeqNo(NKikimrTxDataShard::TFlatSchemeTransaction& tx, TMessageSeqNo seqNo) {
- tx.MutableSeqNo()->SetGeneration(seqNo.Generation);
- tx.MutableSeqNo()->SetRound(seqNo.Round);
-}
-
+ tx.MutableSeqNo()->SetGeneration(seqNo.Generation);
+ tx.MutableSeqNo()->SetRound(seqNo.Round);
+}
+
void TSchemeShard::FillSeqNo(NKikimrTxColumnShard::TSchemaTxBody& tx, TMessageSeqNo seqNo) {
tx.MutableSeqNo()->SetGeneration(seqNo.Generation);
tx.MutableSeqNo()->SetRound(seqNo.Round);
@@ -6090,4 +6090,4 @@ void TSchemeShard::Handle(TEvSchemeShard::TEvLogin::TPtr &ev, const TActorContex
}
} // namespace NSchemeShard
-} // namespace NKikimr
+} // namespace NKikimr
diff --git a/ydb/core/tx/schemeshard/schemeshard_impl.h b/ydb/core/tx/schemeshard/schemeshard_impl.h
index 9e872215303..e1a1d08b9a6 100644
--- a/ydb/core/tx/schemeshard/schemeshard_impl.h
+++ b/ydb/core/tx/schemeshard/schemeshard_impl.h
@@ -1,5 +1,5 @@
-#pragma once
-
+#pragma once
+
#include "schemeshard.h"
#include "schemeshard_export.h"
#include "schemeshard_import.h"
@@ -13,7 +13,7 @@
#include "schemeshard_tx_infly.h"
#include "schemeshard_utils.h"
#include "schemeshard_schema.h"
-
+
#include "schemeshard__operation.h"
#include "operation_queue_timer.h"
@@ -44,17 +44,17 @@
#include <ydb/core/tx/sequenceshard/public/events.h>
#include <ydb/core/tx/tx_processing.h>
#include <ydb/core/util/pb.h>
-
+
#include <ydb/core/blockstore/core/blockstore.h>
#include <ydb/core/filestore/core/filestore.h>
#include <ydb/library/login/login.h>
-#include <util/generic/ptr.h>
-
-namespace NKikimr {
+#include <util/generic/ptr.h>
+
+namespace NKikimr {
namespace NSchemeShard {
-
+
extern const ui64 NEW_TABLE_ALTER_VERSION;
class TSchemeShard
@@ -106,16 +106,16 @@ public:
static constexpr ui32 MaxPQWriteSpeedPerPartition = 50*1024*1024;
static constexpr ui32 MaxPQLifetimeSeconds = 31 * 86400;
static constexpr ui32 PublishChunkSize = 1000;
-
+
static const TSchemeLimits DefaultLimits;
TIntrusivePtr<TChannelProfiles> ChannelProfiles;
-
+
TControlWrapper AllowConditionalEraseOperations;
TControlWrapper AllowServerlessStorageBilling;
- TSplitSettings SplitSettings;
-
+ TSplitSettings SplitSettings;
+
struct TTenantInitState {
enum EInitState {
InvalidState = 0,
@@ -185,30 +185,30 @@ public:
THashMap<TShardIdx, TAdoptedShard> AdoptedShards;
THashMap<TTabletId, TShardIdx> TabletIdToShardIdx;
THashMap<TShardIdx, TVector<TActorId>> ShardDeletionSubscribers; // for tests
-
+
TActorId SchemeBoardPopulator;
static constexpr ui32 InitiateCachedTxIdsCount = 100;
TDeque<TTxId> CachedTxIds;
TActorId TxAllocatorClient;
- TAutoPtr<NTabletPipe::IClientCache> PipeClientCache;
- TPipeTracker PipeTracker;
-
+ TAutoPtr<NTabletPipe::IClientCache> PipeClientCache;
+ TPipeTracker PipeTracker;
+
TCompactionStarter CompactionStarter;
TCompactionQueue* CompactionQueue = nullptr;
bool EnableBackgroundCompaction = false;
bool EnableBackgroundCompactionServerless = false;
ui32 CompactionSearchHeightThreshold = 0;
- TShardDeleter ShardDeleter;
-
+ TShardDeleter ShardDeleter;
+
// Counter-strike stuff
TTabletCountersBase* TabletCounters = nullptr;
TAutoPtr<TTabletCountersBase> TabletCountersPtr;
-
- TAutoPtr<TSelfPinger> SelfPinger;
-
+
+ TAutoPtr<TSelfPinger> SelfPinger;
+
TActorId SysPartitionStatsCollector;
TSet<TPathId> CleanDroppedPathsCandidates;
@@ -278,7 +278,7 @@ public:
void Clear();
void BreakTabletAndRestart(const TActorContext& ctx);
-
+
bool IsShemeShardConfigured() const;
ui64 Generation() const;
@@ -360,8 +360,8 @@ public:
void ClearDescribePathCaches(const TPathElement::TPtr node);
TString PathToString(TPathElement::TPtr item);
NKikimrSchemeOp::TPathVersion GetPathVersion(const TPath& pathEl) const;
-
- const TTableInfo* GetMainTableForIndex(TPathId indexTableId) const;
+
+ const TTableInfo* GetMainTableForIndex(TPathId indexTableId) const;
TPathId ResolveDomainId(TPathId pathId) const;
TPathId ResolveDomainId(TPathElement::TPtr pathEl) const;
@@ -395,12 +395,12 @@ public:
TMaybe<NKikimrSchemeOp::TPartitionConfig> GetTablePartitionConfigWithAlterData(TPathId pathId) const;
void DeleteSplitOp(TOperationId txId, TTxState& txState);
bool ShardIsUnderSplitMergeOp(const TShardIdx& idx) const;
-
+
THashSet<TShardIdx> CollectAllShards(const THashSet<TPathId>& pathes) const;
void ExamineTreeVFS(TPathId nodeId, std::function<void(TPathElement::TPtr)> func, const TActorContext& ctx);
THashSet<TPathId> ListSubThee(TPathId subdomain_root, const TActorContext& ctx);
THashSet<TTxId> GetRelatedTransactions(const THashSet<TPathId>& pathes, const TActorContext &ctx);
-
+
void MarkAsDroping(TPathElement::TPtr node, TTxId txId, const TActorContext& ctx);
void MarkAsDroping(const THashSet<TPathId>& pathes, TTxId txId, const TActorContext& ctx);
@@ -409,9 +409,9 @@ public:
void DropNode(TPathElement::TPtr node, TStepId step, TTxId txId, NIceDb::TNiceDb& db, const TActorContext& ctx);
void DropPathes(const THashSet<TPathId>& pathes, TStepId step, TTxId txId, NIceDb::TNiceDb& db, const TActorContext& ctx);
-
+
void DoShardsDeletion(const THashSet<TShardIdx>& shardIdx, const TActorContext& ctx);
-
+
void SetPartitioning(TPathId pathId, TTableInfo::TPtr tableInfo, TVector<TTableShardInfo>&& newPartitioning);
auto BuildStatsForCollector(TPathId tableId, TShardIdx shardIdx, TTabletId datashardId,
TMaybe<ui32> nodeId, TMaybe<ui64> startTime, const TTableInfo::TPartitionStats& stats);
@@ -583,11 +583,11 @@ public:
TShardIdx GetShardIdx(TTabletId tabletId) const;
TShardIdx MustGetShardIdx(TTabletId tabletId) const;
TTabletTypes::EType GetTabletType(TTabletId tabletId) const;
-
+
struct TTxMonitoring;
//OnRenderAppHtmlPage
- struct TTxInit;
+ struct TTxInit;
NTabletFlatExecutor::ITransaction* CreateTxInit();
struct TTxInitRoot;
@@ -648,16 +648,16 @@ public:
struct TTxMigrate;
NTabletFlatExecutor::ITransaction* CreateTxMigrate(TEvSchemeShard::TEvMigrateSchemeShard::TPtr &ev);
- struct TTxDescribeScheme;
+ struct TTxDescribeScheme;
NTabletFlatExecutor::ITransaction* CreateTxDescribeScheme(TEvSchemeShard::TEvDescribeScheme::TPtr &ev);
- struct TTxNotifyCompletion;
+ struct TTxNotifyCompletion;
NTabletFlatExecutor::ITransaction* CreateTxNotifyTxCompletion(TEvSchemeShard::TEvNotifyTxCompletion::TPtr &ev);
struct TTxDeleteTabletReply;
NTabletFlatExecutor::ITransaction* CreateTxDeleteTabletReply(TEvHive::TEvDeleteTabletReply::TPtr& ev);
- struct TTxShardStateChanged;
+ struct TTxShardStateChanged;
NTabletFlatExecutor::ITransaction* CreateTxShardStateChanged(TEvDataShard::TEvStateChanged::TPtr& ev);
struct TTxRunConditionalErase;
@@ -670,7 +670,7 @@ public:
NTabletFlatExecutor::ITransaction* CreateTxSyncTenant(TPathId tabletId);
struct TTxUpdateTenant;
NTabletFlatExecutor::ITransaction* CreateTxUpdateTenant(TEvSchemeShard::TEvUpdateTenantSchemeShard::TPtr& ev);
-
+
struct TTxPublishToSchemeBoard;
NTabletFlatExecutor::ITransaction* CreateTxPublishToSchemeBoard(THashMap<TTxId, TDeque<TPathId>>&& paths);
struct TTxAckPublishToSchemeBoard;
@@ -724,16 +724,16 @@ public:
static bool FillSplitPartitioning(TVector<TString>& rangeEnds, const TConstArrayRef<NScheme::TTypeId>& keyColTypes,
const ::google::protobuf::RepeatedPtrField<NKikimrSchemeOp::TSplitBoundary>& boundaries,
TString& errStr);
-
+
TString FillAlterTableTxBody(TPathId tableId, TShardIdx shardIdx, TMessageSeqNo seqNo) const;
TString FillBackupTxBody(TPathId pathId, const NKikimrSchemeOp::TBackupTask& task, ui32 shardNum, TMessageSeqNo seqNo) const;
static void FillSeqNo(NKikimrTxDataShard::TFlatSchemeTransaction &tx, TMessageSeqNo seqNo);
static void FillSeqNo(NKikimrTxColumnShard::TSchemaTxBody &tx, TMessageSeqNo seqNo);
-
+
void FillAsyncIndexInfo(const TPathId& tableId, NKikimrTxDataShard::TFlatSchemeTransaction& tx);
- void DescribeTable(const TTableInfo::TPtr tableInfo, const NScheme::TTypeRegistry* typeRegistry,
+ void DescribeTable(const TTableInfo::TPtr tableInfo, const NScheme::TTypeRegistry* typeRegistry,
bool fillConfig, bool fillBoundaries, NKikimrSchemeOp::TTableDescription* entry) const;
void DescribeTableIndex(const TPathId& pathId, const TString& name, NKikimrSchemeOp::TIndexDescription& entry);
void DescribeTableIndex(const TPathId& pathId, const TString& name, TTableIndexInfo::TPtr indexInfo, NKikimrSchemeOp::TIndexDescription& entry);
@@ -744,24 +744,24 @@ public:
void DescribeReplication(const TPathId& pathId, const TString& name, NKikimrSchemeOp::TReplicationDescription& desc);
void DescribeReplication(const TPathId& pathId, const TString& name, TReplicationInfo::TPtr info, NKikimrSchemeOp::TReplicationDescription& desc);
static void FillTableBoundaries(const TTableInfo::TPtr tableInfo, google::protobuf::RepeatedPtrField<NKikimrSchemeOp::TSplitBoundary>& boundaries);
-
+
void Handle(TEvSchemeShard::TEvInitRootShard::TPtr &ev, const TActorContext &ctx);
void Handle(TEvSchemeShard::TEvInitTenantSchemeShard::TPtr &ev, const TActorContext &ctx);
void Handle(TEvSchemeShard::TEvModifySchemeTransaction::TPtr &ev, const TActorContext &ctx);
void Handle(TEvSchemeShard::TEvDescribeScheme::TPtr &ev, const TActorContext &ctx);
void Handle(TEvSchemeShard::TEvNotifyTxCompletion::TPtr &ev, const TActorContext &ctx);
-
+
void Handle(TEvSchemeShard::TEvCancelTx::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvPrivate::TEvProgressOperation::TPtr &ev, const TActorContext &ctx);
-
- void Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvTabletPipe::TEvServerConnected::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvTabletPipe::TEvServerDisconnected::TPtr &ev, const TActorContext &ctx);
-
- void Handle(TEvHive::TEvCreateTabletReply::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvPrivate::TEvProgressOperation::TPtr &ev, const TActorContext &ctx);
+
+ void Handle(TEvTabletPipe::TEvClientConnected::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvTabletPipe::TEvClientDestroyed::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvTabletPipe::TEvServerConnected::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvTabletPipe::TEvServerDisconnected::TPtr &ev, const TActorContext &ctx);
+
+ void Handle(TEvHive::TEvCreateTabletReply::TPtr &ev, const TActorContext &ctx);
void Handle(TEvHive::TEvAdoptTabletReply::TPtr &ev, const TActorContext &ctx);
void Handle(TEvHive::TEvDeleteTabletReply::TPtr &ev, const TActorContext &ctx);
void Handle(TEvPrivate::TEvSubscribeToShardDeletion::TPtr &ev, const TActorContext &ctx);
@@ -777,9 +777,9 @@ public:
void Handle(NSequenceShard::TEvSequenceShard::TEvRedirectSequenceResult::TPtr &ev, const TActorContext &ctx);
void Handle(NReplication::TEvController::TEvCreateReplicationResult::TPtr &ev, const TActorContext &ctx);
void Handle(NReplication::TEvController::TEvDropReplicationResult::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvDataShard::TEvProposeTransactionResult::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvDataShard::TEvProposeTransactionResult::TPtr &ev, const TActorContext &ctx);
void Handle(TEvDataShard::TEvSchemaChanged::TPtr &ev, const TActorContext &ctx);
- void Handle(TEvDataShard::TEvStateChanged::TPtr &ev, const TActorContext &ctx);
+ void Handle(TEvDataShard::TEvStateChanged::TPtr &ev, const TActorContext &ctx);
void Handle(TEvPersQueue::TEvUpdateConfigResponse::TPtr &ev, const TActorContext &ctx);
void Handle(TEvSubDomain::TEvConfigureStatus::TPtr &ev, const TActorContext &ctx);
void Handle(TEvBlockStore::TEvUpdateVolumeConfigResponse::TPtr& ev, const TActorContext& ctx);
@@ -797,21 +797,21 @@ public:
void Handle(TEvSchemeShard::TEvSyncTenantSchemeShard::TPtr& ev, const TActorContext& ctx);
void Handle(TEvSchemeShard::TEvUpdateTenantSchemeShard::TPtr& ev, const TActorContext& ctx);
-
+
void Handle(TSchemeBoardEvents::TEvUpdateAck::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvTxProcessing::TEvPlanStep::TPtr &ev, const TActorContext &ctx);
-
+ void Handle(TEvTxProcessing::TEvPlanStep::TPtr &ev, const TActorContext &ctx);
+
void Handle(TEvents::TEvPoisonPill::TPtr& ev, const TActorContext& ctx);
- void Handle(NMon::TEvRemoteHttpInfo::TPtr& ev, const TActorContext& ctx);
-
- void Handle(TEvDataShard::TEvInitSplitMergeDestinationAck::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvSplitAck::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvSplitPartitioningChangedAck::TPtr& ev, const TActorContext& ctx);
-
- void Handle(TEvDataShard::TEvPeriodicTableStats::TPtr& ev, const TActorContext& ctx);
- void Handle(TEvDataShard::TEvGetTableStatsResult::TPtr& ev, const TActorContext& ctx);
-
+ void Handle(NMon::TEvRemoteHttpInfo::TPtr& ev, const TActorContext& ctx);
+
+ void Handle(TEvDataShard::TEvInitSplitMergeDestinationAck::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvSplitAck::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvSplitPartitioningChangedAck::TPtr& ev, const TActorContext& ctx);
+
+ void Handle(TEvDataShard::TEvPeriodicTableStats::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvDataShard::TEvGetTableStatsResult::TPtr& ev, const TActorContext& ctx);
+
void Handle(TEvSchemeShard::TEvFindTabletSubDomainPathId::TPtr& ev, const TActorContext& ctx);
void ScheduleConditionalEraseRun(const TActorContext& ctx);
@@ -829,7 +829,7 @@ public:
void Handle(TEvSchemeShard::TEvLogin::TPtr& ev, const TActorContext& ctx);
void RestartPipeTx(TTabletId tabletId, const TActorContext& ctx);
-
+
TOperationId RouteIncomming(TTabletId tabletId, const TActorContext& ctx);
// namespace NLongRunningCommon {
@@ -1062,15 +1062,15 @@ public:
NLogin::TLoginProvider LoginProvider;
-private:
- void OnDetach(const TActorContext &ctx) override;
- void OnTabletDead(TEvTablet::TEvTabletDead::TPtr &ev, const TActorContext &ctx) override;
- void OnActivateExecutor(const TActorContext &ctx) override;
- bool OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TActorContext &ctx) override;
- void DefaultSignalTabletActive(const TActorContext &ctx) override;
- void Cleanup(const TActorContext &ctx);
- void Enqueue(STFUNC_SIG) override;
- void Die(const TActorContext &ctx) override;
+private:
+ void OnDetach(const TActorContext &ctx) override;
+ void OnTabletDead(TEvTablet::TEvTabletDead::TPtr &ev, const TActorContext &ctx) override;
+ void OnActivateExecutor(const TActorContext &ctx) override;
+ bool OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TActorContext &ctx) override;
+ void DefaultSignalTabletActive(const TActorContext &ctx) override;
+ void Cleanup(const TActorContext &ctx);
+ void Enqueue(STFUNC_SIG) override;
+ void Die(const TActorContext &ctx) override;
bool ReassignChannelsEnabled() const override {
return true;
@@ -1079,21 +1079,21 @@ private:
const TDomainsInfo::TDomain& GetDomainDescription(const TActorContext &ctx) const;
NKikimrSubDomains::TProcessingParams CreateRootProcessingParams(const TActorContext &ctx);
static NTabletPipe::TClientConfig GetPipeClientConfig();
-
-public:
+
+public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::FLAT_SCHEMESHARD_ACTOR;
}
TSchemeShard(const TActorId &tablet, TTabletStorageInfo *info);
-
+
//TTabletId TabletID() const { return TTabletId(ITablet::TabletID()); }
TTabletId SelfTabletId() const { return TTabletId(ITablet::TabletID()); }
- STFUNC(StateInit);
+ STFUNC(StateInit);
STFUNC(StateConfigure);
- STFUNC(StateWork);
- STFUNC(BrokenState);
+ STFUNC(StateWork);
+ STFUNC(BrokenState);
// A helper that enforces write-only access to the internal DB (reads must be done from the
// internal structures)
@@ -1113,7 +1113,7 @@ public:
virtual void DoExecute(NTabletFlatExecutor::TTransactionContext &txc, const TActorContext &ctx) = 0;
virtual void DoComplete(const TActorContext &ctx) = 0;
};
-};
-
+};
+
}
}
diff --git a/ydb/core/tx/schemeshard/schemeshard_info_types.cpp b/ydb/core/tx/schemeshard/schemeshard_info_types.cpp
index 692be078858..fcb564b6065 100644
--- a/ydb/core/tx/schemeshard/schemeshard_info_types.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_info_types.cpp
@@ -211,7 +211,7 @@ TTableInfo::TAlterDataPtr TTableInfo::CreateAlterData(
const auto& ttl = op.GetTTLSettings();
if (!ValidateTtlSettings(ttl, source ? source->Columns : THashMap<ui32, TColumn>(), alterData->Columns, colName2Id, subDomain, errStr)) {
- return nullptr;
+ return nullptr;
}
alterData->TableDescriptionFull->MutableTTLSettings()->CopyFrom(ttl);
@@ -723,7 +723,7 @@ THashMap<ui32, size_t> TPartitionConfigMerger::DeduplicateStorageRoomsById(NKiki
return true;
});
}
-
+
NKikimrSchemeOp::TFamilyDescription &TPartitionConfigMerger::MutableColumnFamilyById(
NKikimrSchemeOp::TPartitionConfig &partitionConfig,
THashMap<ui32, size_t> &posById, ui32 familyId)
@@ -1230,7 +1230,7 @@ void TTableInfo::SetPartitioning(TVector<TTableShardInfo>&& newPartitioning) {
THashMap<TShardIdx, TPartitionStats> newPartitionStats;
TPartitionStats newAggregatedStats;
newAggregatedStats.PartCount = newPartitioning.size();
- ui64 cpuTotal = 0;
+ ui64 cpuTotal = 0;
for (const auto& np : newPartitioning) {
auto idx = np.ShardIdx;
auto& newStats(newPartitionStats[idx]);
@@ -1238,7 +1238,7 @@ void TTableInfo::SetPartitioning(TVector<TTableShardInfo>&& newPartitioning) {
newAggregatedStats.RowCount += newStats.RowCount;
newAggregatedStats.DataSize += newStats.DataSize;
newAggregatedStats.IndexSize += newStats.IndexSize;
- cpuTotal += newStats.GetCurrentRawCpuUsage();
+ cpuTotal += newStats.GetCurrentRawCpuUsage();
newAggregatedStats.Memory += newStats.Memory;
newAggregatedStats.Network += newStats.Network;
newAggregatedStats.Storage += newStats.Storage;
@@ -1247,7 +1247,7 @@ void TTableInfo::SetPartitioning(TVector<TTableShardInfo>&& newPartitioning) {
newAggregatedStats.ReadIops += newStats.ReadIops;
newAggregatedStats.WriteIops += newStats.WriteIops;
}
- newAggregatedStats.SetCurrentRawCpuUsage(cpuTotal, AppData()->TimeProvider->Now());
+ newAggregatedStats.SetCurrentRawCpuUsage(cpuTotal, AppData()->TimeProvider->Now());
newAggregatedStats.LastAccessTime = Stats.Aggregated.LastAccessTime;
newAggregatedStats.LastUpdateTime = Stats.Aggregated.LastUpdateTime;
@@ -1284,7 +1284,7 @@ void TTableInfo::SetPartitioning(TVector<TTableShardInfo>&& newPartitioning) {
}
}
-void TTableInfo::UpdateShardStats(TShardIdx datashardIdx, TPartitionStats& newStats) {
+void TTableInfo::UpdateShardStats(TShardIdx datashardIdx, TPartitionStats& newStats) {
// Ignore stats from unknown datashard (it could have been split)
if (!Stats.PartitionStats.contains(datashardIdx))
return;
@@ -1326,11 +1326,11 @@ void TTableInfo::UpdateShardStats(TShardIdx datashardIdx, TPartitionStats& newSt
Stats.Aggregated.RangeReads += (newStats.RangeReads - oldStats.RangeReads);
Stats.Aggregated.RangeReadRows += (newStats.RangeReadRows - oldStats.RangeReadRows);
- i64 cpuUsageDelta = newStats.GetCurrentRawCpuUsage() - oldStats.GetCurrentRawCpuUsage();
- i64 prevCpuUsage = Stats.Aggregated.GetCurrentRawCpuUsage();
- ui64 newAggregatedCpuUsage = std::max<i64>(0, prevCpuUsage + cpuUsageDelta);
- TInstant now = AppData()->TimeProvider->Now();
- Stats.Aggregated.SetCurrentRawCpuUsage(newAggregatedCpuUsage, now);
+ i64 cpuUsageDelta = newStats.GetCurrentRawCpuUsage() - oldStats.GetCurrentRawCpuUsage();
+ i64 prevCpuUsage = Stats.Aggregated.GetCurrentRawCpuUsage();
+ ui64 newAggregatedCpuUsage = std::max<i64>(0, prevCpuUsage + cpuUsageDelta);
+ TInstant now = AppData()->TimeProvider->Now();
+ Stats.Aggregated.SetCurrentRawCpuUsage(newAggregatedCpuUsage, now);
Stats.Aggregated.Memory += (newStats.Memory - oldStats.Memory);
Stats.Aggregated.Network += (newStats.Network - oldStats.Network);
Stats.Aggregated.Storage += (newStats.Storage - oldStats.Storage);
@@ -1339,7 +1339,7 @@ void TTableInfo::UpdateShardStats(TShardIdx datashardIdx, TPartitionStats& newSt
Stats.Aggregated.ReadIops += (newStats.ReadIops - oldStats.ReadIops);
Stats.Aggregated.WriteIops += (newStats.WriteIops - oldStats.WriteIops);
- newStats.SaveCpuUsageHistory(oldStats);
+ newStats.SaveCpuUsageHistory(oldStats);
oldStats = newStats;
Stats.PartitionStatsUpdated++;
@@ -1412,10 +1412,10 @@ void TTableInfo::FinishSplitMergeOp(TOperationId opId) {
}
}
-
-
-bool TTableInfo::TryAddShardToMerge(const TSplitSettings& splitSettings,
- TShardIdx shardIdx, TVector<TShardIdx>& shardsToMerge,
+
+
+bool TTableInfo::TryAddShardToMerge(const TSplitSettings& splitSettings,
+ TShardIdx shardIdx, TVector<TShardIdx>& shardsToMerge,
THashSet<TTabletId>& partOwners, ui64& totalSize, float& totalLoad) const
{
if (ExpectedPartitionCount + 1 - shardsToMerge.size() <= GetMinPartitionsCount()) {
@@ -1444,45 +1444,45 @@ bool TTableInfo::TryAddShardToMerge(const TSplitSettings& splitSettings,
if (stats->HasBorrowed)
return false;
- bool canMerge = false;
-
- // Check if we can try merging by size
- if (IsMergeBySizeEnabled() && stats->DataSize + totalSize <= GetSizeToMerge()) {
- canMerge = true;
- }
-
- // Check if we can try merging by load
- TInstant now = AppData()->TimeProvider->Now();
- TDuration minUptime = TDuration::Seconds(splitSettings.MergeByLoadMinUptimeSec);
- if (!canMerge && IsMergeByLoadEnabled() && stats->StartTime && stats->StartTime + minUptime < now) {
- canMerge = true;
- }
-
- if (!canMerge)
+ bool canMerge = false;
+
+ // Check if we can try merging by size
+ if (IsMergeBySizeEnabled() && stats->DataSize + totalSize <= GetSizeToMerge()) {
+ canMerge = true;
+ }
+
+ // Check if we can try merging by load
+ TInstant now = AppData()->TimeProvider->Now();
+ TDuration minUptime = TDuration::Seconds(splitSettings.MergeByLoadMinUptimeSec);
+ if (!canMerge && IsMergeByLoadEnabled() && stats->StartTime && stats->StartTime + minUptime < now) {
+ canMerge = true;
+ }
+
+ if (!canMerge)
+ return false;
+
+ // Check that total size doesn't exceed the limits
+ if (IsSplitBySizeEnabled() && stats->DataSize + totalSize >= GetShardSizeToSplit()*0.9) {
return false;
+ }
- // Check that total size doesn't exceed the limits
- if (IsSplitBySizeEnabled() && stats->DataSize + totalSize >= GetShardSizeToSplit()*0.9) {
- return false;
- }
-
- // Check that total load doesn't exceed the limits
- float shardLoad = stats->GetCurrentRawCpuUsage() * 0.000001;
+ // Check that total load doesn't exceed the limits
+ float shardLoad = stats->GetCurrentRawCpuUsage() * 0.000001;
if (IsMergeByLoadEnabled()) {
- const auto& settings = PartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings();
- i64 cpuPercentage = settings.GetCpuPercentageThreshold();
- float cpuUsageThreshold = 0.01 * (cpuPercentage ? cpuPercentage : (i64)splitSettings.FastSplitCpuPercentageThreshold);
-
- // Calculate shard load based on historical data
- TDuration loadDuration = TDuration::Seconds(splitSettings.MergeByLoadMinLowLoadDurationSec);
- shardLoad = 0.01 * stats->GetLatestMaxCpuUsagePercent(now - loadDuration);
-
- if (shardLoad + totalLoad > cpuUsageThreshold *0.7)
- return false;
- }
-
- // Merged shards must not have borrowed parts from the same original tablet
- // because this will break part ref-counting
+ const auto& settings = PartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings();
+ i64 cpuPercentage = settings.GetCpuPercentageThreshold();
+ float cpuUsageThreshold = 0.01 * (cpuPercentage ? cpuPercentage : (i64)splitSettings.FastSplitCpuPercentageThreshold);
+
+ // Calculate shard load based on historical data
+ TDuration loadDuration = TDuration::Seconds(splitSettings.MergeByLoadMinLowLoadDurationSec);
+ shardLoad = 0.01 * stats->GetLatestMaxCpuUsagePercent(now - loadDuration);
+
+ if (shardLoad + totalLoad > cpuUsageThreshold *0.7)
+ return false;
+ }
+
+ // Merged shards must not have borrowed parts from the same original tablet
+ // because this will break part ref-counting
for (auto tabletId : stats->PartOwners) {
if (partOwners.contains(tabletId))
return false;
@@ -1490,13 +1490,13 @@ bool TTableInfo::TryAddShardToMerge(const TSplitSettings& splitSettings,
shardsToMerge.push_back(shardIdx);
totalSize += stats->DataSize;
- totalLoad += shardLoad;
+ totalLoad += shardLoad;
partOwners.insert(stats->PartOwners.begin(), stats->PartOwners.end());
return true;
}
-bool TTableInfo::CheckCanMergePartitions(const TSplitSettings& splitSettings, TShardIdx shardIdx, TVector<TShardIdx>& shardsToMerge) const {
+bool TTableInfo::CheckCanMergePartitions(const TSplitSettings& splitSettings, TShardIdx shardIdx, TVector<TShardIdx>& shardsToMerge) const {
// Don't split/merge backup tables
if (IsBackup) {
return false;
@@ -1519,7 +1519,7 @@ bool TTableInfo::CheckCanMergePartitions(const TSplitSettings& splitSettings, TS
shardsToMerge.clear();
ui64 totalSize = 0;
- float totalLoad = 0;
+ float totalLoad = 0;
THashSet<TTabletId> partOwners;
// Make sure we can actually merge current shard first
@@ -1549,88 +1549,88 @@ bool TTableInfo::CheckFastSplitForPartition(const TSplitSettings& splitSettings,
if (IsBackup)
return false;
- // Ignore stats from unknown datashard (it could have been split)
- if (!Stats.PartitionStats.contains(shardIdx))
- return false;
-
- if (!Shard2PartitionIdx.contains(shardIdx))
- return false;
-
- const ui64 MIN_ROWS_FOR_FAST_SPLIT = 1000;
- ui64 sizeThreshold = splitSettings.FastSplitSizeThreshold;
- ui64 rowCountThreshold = splitSettings.FastSplitRowCountThreshold;
- float cpuUsageThreshold = 0.01 * splitSettings.FastSplitCpuPercentageThreshold;
-
- const auto& partitionConfig = PartitionConfig();
-
- if (partitionConfig.GetPartitioningPolicy().HasFastSplitSettings()) {
- const auto& settings = partitionConfig.GetPartitioningPolicy().GetFastSplitSettings();
- sizeThreshold = settings.GetSizeThreshold();
- rowCountThreshold = settings.GetRowCountThreshold();
- cpuUsageThreshold = 0.01 * settings.GetCpuPercentageThreshold();
- }
-
- const auto& stats = *Stats.PartitionStats.FindPtr(shardIdx);
- if ((dataSize < sizeThreshold && rowCount < rowCountThreshold) ||
- rowCount < MIN_ROWS_FOR_FAST_SPLIT ||
- stats.InFlightTxCount == 0 ||
- stats.GetCurrentRawCpuUsage() < cpuUsageThreshold * 1000000)
- {
- return false;
- }
-
- return true;
-}
-
-bool TTableInfo::CheckSplitByLoad(const TSplitSettings& splitSettings, TShardIdx shardIdx, ui64 dataSize, ui64 rowCount) const {
+ // Ignore stats from unknown datashard (it could have been split)
+ if (!Stats.PartitionStats.contains(shardIdx))
+ return false;
+
+ if (!Shard2PartitionIdx.contains(shardIdx))
+ return false;
+
+ const ui64 MIN_ROWS_FOR_FAST_SPLIT = 1000;
+ ui64 sizeThreshold = splitSettings.FastSplitSizeThreshold;
+ ui64 rowCountThreshold = splitSettings.FastSplitRowCountThreshold;
+ float cpuUsageThreshold = 0.01 * splitSettings.FastSplitCpuPercentageThreshold;
+
+ const auto& partitionConfig = PartitionConfig();
+
+ if (partitionConfig.GetPartitioningPolicy().HasFastSplitSettings()) {
+ const auto& settings = partitionConfig.GetPartitioningPolicy().GetFastSplitSettings();
+ sizeThreshold = settings.GetSizeThreshold();
+ rowCountThreshold = settings.GetRowCountThreshold();
+ cpuUsageThreshold = 0.01 * settings.GetCpuPercentageThreshold();
+ }
+
+ const auto& stats = *Stats.PartitionStats.FindPtr(shardIdx);
+ if ((dataSize < sizeThreshold && rowCount < rowCountThreshold) ||
+ rowCount < MIN_ROWS_FOR_FAST_SPLIT ||
+ stats.InFlightTxCount == 0 ||
+ stats.GetCurrentRawCpuUsage() < cpuUsageThreshold * 1000000)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+bool TTableInfo::CheckSplitByLoad(const TSplitSettings& splitSettings, TShardIdx shardIdx, ui64 dataSize, ui64 rowCount) const {
// Don't split/merge backup tables
if (IsBackup)
return false;
- if (!splitSettings.SplitByLoadEnabled)
- return false;
-
- // Ignore stats from unknown datashard (it could have been split)
- if (!Stats.PartitionStats.contains(shardIdx))
- return false;
- if (!Shard2PartitionIdx.contains(shardIdx))
- return false;
-
- // A shard can be overloaded by heavy reads of non-existing keys.
- // So we want to be able to split it even if it has no data.
- const ui64 MIN_ROWS_FOR_SPLIT_BY_LOAD = 0;
- const ui64 MIN_SIZE_FOR_SPLIT_BY_LOAD = 0;
-
- const auto& partitionConfig = PartitionConfig();
- const auto& policy = partitionConfig.GetPartitioningPolicy();
-
- ui64 maxShards = policy.GetMaxPartitionsCount();
- if (maxShards == 0) {
- // Don't want to trigger "too many shards" or "too many readsets" errors
- maxShards = splitSettings.SplitByLoadMaxShardsDefault;
- }
-
- if (!policy.HasSplitByLoadSettings() || !policy.GetSplitByLoadSettings().GetEnabled()) {
- return false;
- }
-
- const auto& settings = policy.GetSplitByLoadSettings();
- i64 cpuPercentage = settings.GetCpuPercentageThreshold();
-
- float cpuUsageThreshold = 0.01 * (cpuPercentage ? cpuPercentage : (i64)splitSettings.FastSplitCpuPercentageThreshold);
-
- const auto& stats = *Stats.PartitionStats.FindPtr(shardIdx);
- if (rowCount < MIN_ROWS_FOR_SPLIT_BY_LOAD ||
- dataSize < MIN_SIZE_FOR_SPLIT_BY_LOAD ||
- Stats.PartitionStats.size() >= maxShards ||
- stats.GetCurrentRawCpuUsage() < cpuUsageThreshold * 1000000)
- {
- return false;
- }
-
- return true;
-}
-
+ if (!splitSettings.SplitByLoadEnabled)
+ return false;
+
+ // Ignore stats from unknown datashard (it could have been split)
+ if (!Stats.PartitionStats.contains(shardIdx))
+ return false;
+ if (!Shard2PartitionIdx.contains(shardIdx))
+ return false;
+
+ // A shard can be overloaded by heavy reads of non-existing keys.
+ // So we want to be able to split it even if it has no data.
+ const ui64 MIN_ROWS_FOR_SPLIT_BY_LOAD = 0;
+ const ui64 MIN_SIZE_FOR_SPLIT_BY_LOAD = 0;
+
+ const auto& partitionConfig = PartitionConfig();
+ const auto& policy = partitionConfig.GetPartitioningPolicy();
+
+ ui64 maxShards = policy.GetMaxPartitionsCount();
+ if (maxShards == 0) {
+ // Don't want to trigger "too many shards" or "too many readsets" errors
+ maxShards = splitSettings.SplitByLoadMaxShardsDefault;
+ }
+
+ if (!policy.HasSplitByLoadSettings() || !policy.GetSplitByLoadSettings().GetEnabled()) {
+ return false;
+ }
+
+ const auto& settings = policy.GetSplitByLoadSettings();
+ i64 cpuPercentage = settings.GetCpuPercentageThreshold();
+
+ float cpuUsageThreshold = 0.01 * (cpuPercentage ? cpuPercentage : (i64)splitSettings.FastSplitCpuPercentageThreshold);
+
+ const auto& stats = *Stats.PartitionStats.FindPtr(shardIdx);
+ if (rowCount < MIN_ROWS_FOR_SPLIT_BY_LOAD ||
+ dataSize < MIN_SIZE_FOR_SPLIT_BY_LOAD ||
+ Stats.PartitionStats.size() >= maxShards ||
+ stats.GetCurrentRawCpuUsage() < cpuUsageThreshold * 1000000)
+ {
+ return false;
+ }
+
+ return true;
+}
+
TChannelsMapping GetPoolsMapping(const TChannelsBindings& bindings) {
TChannelsMapping mapping;
for (const auto& bind : bindings) {
@@ -2121,4 +2121,4 @@ bool TSequenceInfo::ValidateCreate(const NKikimrSchemeOp::TSequenceDescription&
} // namespace NSchemeShard
} // namespace NKikimr
-
+
diff --git a/ydb/core/tx/schemeshard/schemeshard_info_types.h b/ydb/core/tx/schemeshard/schemeshard_info_types.h
index 316bad7d8fd..2e5710c3d3e 100644
--- a/ydb/core/tx/schemeshard/schemeshard_info_types.h
+++ b/ydb/core/tx/schemeshard/schemeshard_info_types.h
@@ -32,40 +32,40 @@
namespace NKikimr {
namespace NSchemeShard {
-struct TSplitSettings {
- TControlWrapper SplitMergePartCountLimit;
- TControlWrapper FastSplitSizeThreshold;
- TControlWrapper FastSplitRowCountThreshold;
- TControlWrapper FastSplitCpuPercentageThreshold;
- TControlWrapper SplitByLoadEnabled;
- TControlWrapper SplitByLoadMaxShardsDefault;
- TControlWrapper MergeByLoadMinUptimeSec;
- TControlWrapper MergeByLoadMinLowLoadDurationSec;
-
- TSplitSettings()
- : SplitMergePartCountLimit(2000, -1, 1000000)
- , FastSplitSizeThreshold(4*1000*1000, 100*1000, 4ll*1000*1000*1000)
- , FastSplitRowCountThreshold(100*1000, 1000, 1ll*1000*1000*1000)
- , FastSplitCpuPercentageThreshold(50, 1, 146)
- , SplitByLoadEnabled(1, 0, 1)
- , SplitByLoadMaxShardsDefault(50, 0, 10000)
- , MergeByLoadMinUptimeSec(10*60, 0, 4ll*1000*1000*1000)
- , MergeByLoadMinLowLoadDurationSec(1*60*60, 0, 4ll*1000*1000*1000)
- {}
-
- void Register(TIntrusivePtr<NKikimr::TControlBoard>& icb) {
- icb->RegisterSharedControl(SplitMergePartCountLimit, "SchemeShard_SplitMergePartCountLimit");
- icb->RegisterSharedControl(FastSplitSizeThreshold, "SchemeShard_FastSplitSizeThreshold");
- icb->RegisterSharedControl(FastSplitRowCountThreshold, "SchemeShard_FastSplitRowCountThreshold");
- icb->RegisterSharedControl(FastSplitCpuPercentageThreshold, "SchemeShard_FastSplitCpuPercentageThreshold");
-
- icb->RegisterSharedControl(SplitByLoadEnabled, "SchemeShard_SplitByLoadEnabled");
- icb->RegisterSharedControl(SplitByLoadMaxShardsDefault, "SchemeShard_SplitByLoadMaxShardsDefault");
- icb->RegisterSharedControl(MergeByLoadMinUptimeSec, "SchemeShard_MergeByLoadMinUptimeSec");
- icb->RegisterSharedControl(MergeByLoadMinLowLoadDurationSec,"SchemeShard_MergeByLoadMinLowLoadDurationSec");
- }
-};
-
+struct TSplitSettings {
+ TControlWrapper SplitMergePartCountLimit;
+ TControlWrapper FastSplitSizeThreshold;
+ TControlWrapper FastSplitRowCountThreshold;
+ TControlWrapper FastSplitCpuPercentageThreshold;
+ TControlWrapper SplitByLoadEnabled;
+ TControlWrapper SplitByLoadMaxShardsDefault;
+ TControlWrapper MergeByLoadMinUptimeSec;
+ TControlWrapper MergeByLoadMinLowLoadDurationSec;
+
+ TSplitSettings()
+ : SplitMergePartCountLimit(2000, -1, 1000000)
+ , FastSplitSizeThreshold(4*1000*1000, 100*1000, 4ll*1000*1000*1000)
+ , FastSplitRowCountThreshold(100*1000, 1000, 1ll*1000*1000*1000)
+ , FastSplitCpuPercentageThreshold(50, 1, 146)
+ , SplitByLoadEnabled(1, 0, 1)
+ , SplitByLoadMaxShardsDefault(50, 0, 10000)
+ , MergeByLoadMinUptimeSec(10*60, 0, 4ll*1000*1000*1000)
+ , MergeByLoadMinLowLoadDurationSec(1*60*60, 0, 4ll*1000*1000*1000)
+ {}
+
+ void Register(TIntrusivePtr<NKikimr::TControlBoard>& icb) {
+ icb->RegisterSharedControl(SplitMergePartCountLimit, "SchemeShard_SplitMergePartCountLimit");
+ icb->RegisterSharedControl(FastSplitSizeThreshold, "SchemeShard_FastSplitSizeThreshold");
+ icb->RegisterSharedControl(FastSplitRowCountThreshold, "SchemeShard_FastSplitRowCountThreshold");
+ icb->RegisterSharedControl(FastSplitCpuPercentageThreshold, "SchemeShard_FastSplitCpuPercentageThreshold");
+
+ icb->RegisterSharedControl(SplitByLoadEnabled, "SchemeShard_SplitByLoadEnabled");
+ icb->RegisterSharedControl(SplitByLoadMaxShardsDefault, "SchemeShard_SplitByLoadMaxShardsDefault");
+ icb->RegisterSharedControl(MergeByLoadMinUptimeSec, "SchemeShard_MergeByLoadMinUptimeSec");
+ icb->RegisterSharedControl(MergeByLoadMinLowLoadDurationSec,"SchemeShard_MergeByLoadMinLowLoadDurationSec");
+ }
+};
+
struct TBindingsRoomsChange {
TChannelsBindings ChannelsBindings;
@@ -243,66 +243,66 @@ struct TTableInfo : public TSimpleRefCount<TTableInfo> {
ui64 PartCount = 0;
ui64 SearchHeight = 0;
ui32 ShardState = NKikimrTxDataShard::Unknown;
-
+
// True when PartOwners has parts from other tablets
bool HasBorrowed = false;
- // Tablet actor started at
- TInstant StartTime;
-
- void SetCurrentRawCpuUsage(ui64 rawCpuUsage, TInstant now) {
- CPU = rawCpuUsage;
- float percent = rawCpuUsage * 0.000001 * 100;
- if (percent >= 2)
- Last2PercentLoad = now;
- if (percent >= 5)
- Last5PercentLoad = now;
- if (percent >= 10)
- Last10PercentLoad = now;
- if (percent >= 20)
- Last20PercentLoad = now;
- if (percent >= 30)
- Last30PercentLoad = now;
- }
-
- void SaveCpuUsageHistory(const TPartitionStats& oldStats) {
- Last2PercentLoad = std::max(Last2PercentLoad, oldStats.Last2PercentLoad);
- Last5PercentLoad = std::max(Last5PercentLoad, oldStats.Last5PercentLoad);
- Last10PercentLoad = std::max(Last10PercentLoad, oldStats.Last10PercentLoad);
- Last20PercentLoad = std::max(Last20PercentLoad, oldStats.Last20PercentLoad);
- Last30PercentLoad = std::max(Last30PercentLoad, oldStats.Last30PercentLoad);
- }
-
- ui64 GetCurrentRawCpuUsage() const {
- return CPU;
- }
-
- float GetLatestMaxCpuUsagePercent(TInstant since) const {
- // TODO: fix the case when stats were not collected yet
-
- if (Last30PercentLoad > since)
- return 40;
- if (Last20PercentLoad > since)
- return 30;
- if (Last10PercentLoad > since)
- return 20;
- if (Last5PercentLoad > since)
- return 10;
- if (Last2PercentLoad > since)
- return 5;
-
- return 2;
- }
-
- private:
- ui64 CPU = 0;
-
- // Latest timestamps when CPU usage exceeded 2%, 5%, 10%, 20%, 30%
- TInstant Last2PercentLoad;
- TInstant Last5PercentLoad;
- TInstant Last10PercentLoad;
- TInstant Last20PercentLoad;
- TInstant Last30PercentLoad;
+ // Tablet actor started at
+ TInstant StartTime;
+
+ void SetCurrentRawCpuUsage(ui64 rawCpuUsage, TInstant now) {
+ CPU = rawCpuUsage;
+ float percent = rawCpuUsage * 0.000001 * 100;
+ if (percent >= 2)
+ Last2PercentLoad = now;
+ if (percent >= 5)
+ Last5PercentLoad = now;
+ if (percent >= 10)
+ Last10PercentLoad = now;
+ if (percent >= 20)
+ Last20PercentLoad = now;
+ if (percent >= 30)
+ Last30PercentLoad = now;
+ }
+
+ void SaveCpuUsageHistory(const TPartitionStats& oldStats) {
+ Last2PercentLoad = std::max(Last2PercentLoad, oldStats.Last2PercentLoad);
+ Last5PercentLoad = std::max(Last5PercentLoad, oldStats.Last5PercentLoad);
+ Last10PercentLoad = std::max(Last10PercentLoad, oldStats.Last10PercentLoad);
+ Last20PercentLoad = std::max(Last20PercentLoad, oldStats.Last20PercentLoad);
+ Last30PercentLoad = std::max(Last30PercentLoad, oldStats.Last30PercentLoad);
+ }
+
+ ui64 GetCurrentRawCpuUsage() const {
+ return CPU;
+ }
+
+ float GetLatestMaxCpuUsagePercent(TInstant since) const {
+ // TODO: fix the case when stats were not collected yet
+
+ if (Last30PercentLoad > since)
+ return 40;
+ if (Last20PercentLoad > since)
+ return 30;
+ if (Last10PercentLoad > since)
+ return 20;
+ if (Last5PercentLoad > since)
+ return 10;
+ if (Last2PercentLoad > since)
+ return 5;
+
+ return 2;
+ }
+
+ private:
+ ui64 CPU = 0;
+
+ // Latest timestamps when CPU usage exceeded 2%, 5%, 10%, 20%, 30%
+ TInstant Last2PercentLoad;
+ TInstant Last5PercentLoad;
+ TInstant Last10PercentLoad;
+ TInstant Last20PercentLoad;
+ TInstant Last30PercentLoad;
};
struct TStats {
@@ -530,7 +530,7 @@ public:
ShardsStatsDetached = true;
}
- void UpdateShardStats(TShardIdx datashardIdx, TPartitionStats& newStats);
+ void UpdateShardStats(TShardIdx datashardIdx, TPartitionStats& newStats);
void RegisterSplitMegreOp(TOperationId txId, const TTxState& txState);
@@ -542,38 +542,38 @@ public:
return SplitOpsInFlight;
}
- const THashMap<TShardIdx, ui64>& GetShard2PartitionIdx() const {
- return Shard2PartitionIdx;
- }
-
+ const THashMap<TShardIdx, ui64>& GetShard2PartitionIdx() const {
+ return Shard2PartitionIdx;
+ }
+
ui64 GetExpectedPartitionCount() const {
return ExpectedPartitionCount;
}
- bool TryAddShardToMerge(const TSplitSettings& splitSettings, TShardIdx shardIdx, TVector<TShardIdx>& shardsToMerge,
- THashSet<TTabletId>& partOwners, ui64& totalSize, float& totalLoad) const;
+ bool TryAddShardToMerge(const TSplitSettings& splitSettings, TShardIdx shardIdx, TVector<TShardIdx>& shardsToMerge,
+ THashSet<TTabletId>& partOwners, ui64& totalSize, float& totalLoad) const;
- bool CheckCanMergePartitions(const TSplitSettings& splitSettings, TShardIdx shardIdx, TVector<TShardIdx>& shardsToMerge) const;
+ bool CheckCanMergePartitions(const TSplitSettings& splitSettings, TShardIdx shardIdx, TVector<TShardIdx>& shardsToMerge) const;
bool CheckFastSplitForPartition(const TSplitSettings& splitSettings, TShardIdx shardIdx, ui64 dataSize, ui64 rowCount) const;
- bool CheckSplitByLoad(const TSplitSettings& splitSettings, TShardIdx shardIdx, ui64 dataSize, ui64 rowCount) const;
-
- bool IsSplitBySizeEnabled() const {
- return PartitionConfig().GetPartitioningPolicy().GetSizeToSplit() != 0;
- }
-
- bool IsMergeBySizeEnabled() const {
+ bool CheckSplitByLoad(const TSplitSettings& splitSettings, TShardIdx shardIdx, ui64 dataSize, ui64 rowCount) const;
+
+ bool IsSplitBySizeEnabled() const {
+ return PartitionConfig().GetPartitioningPolicy().GetSizeToSplit() != 0;
+ }
+
+ bool IsMergeBySizeEnabled() const {
return IsSplitBySizeEnabled() && PartitionConfig().GetPartitioningPolicy().GetMinPartitionsCount() != 0;
- }
-
- bool IsSplitByLoadEnabled() const {
- return PartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings().GetEnabled();
- }
-
- bool IsMergeByLoadEnabled() const {
- return IsSplitByLoadEnabled();
- }
-
+ }
+
+ bool IsSplitByLoadEnabled() const {
+ return PartitionConfig().GetPartitioningPolicy().GetSplitByLoadSettings().GetEnabled();
+ }
+
+ bool IsMergeByLoadEnabled() const {
+ return IsSplitByLoadEnabled();
+ }
+
ui64 GetShardSizeToSplit() const {
ui64 threshold = PartitionConfig().GetPartitioningPolicy().GetSizeToSplit();
return threshold == 0 ?
@@ -582,7 +582,7 @@ public:
}
ui64 GetSizeToMerge() const {
- if (!IsMergeBySizeEnabled()) {
+ if (!IsMergeBySizeEnabled()) {
// Disable auto-merge by default
return 0;
} else {
diff --git a/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp b/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp
index 78e0a542451..3789d3af1b8 100644
--- a/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp
@@ -31,17 +31,17 @@ static void FillTableStats(NKikimrTableStats::TTableStats* stats, const TTableIn
stats->SetPartCount(tableStats.PartCount);
}
-static void FillTableMetrics(NKikimrTabletBase::TMetrics* metrics, const TTableInfo::TPartitionStats& tableStats) {
- metrics->SetCPU(tableStats.GetCurrentRawCpuUsage());
- metrics->SetMemory(tableStats.Memory);
- metrics->SetNetwork(tableStats.Network);
- metrics->SetStorage(tableStats.Storage);
- metrics->SetReadThroughput(tableStats.ReadThroughput);
- metrics->SetWriteThroughput(tableStats.WriteThroughput);
- metrics->SetReadIops(tableStats.ReadIops);
- metrics->SetWriteIops(tableStats.WriteIops);
-}
-
+static void FillTableMetrics(NKikimrTabletBase::TMetrics* metrics, const TTableInfo::TPartitionStats& tableStats) {
+ metrics->SetCPU(tableStats.GetCurrentRawCpuUsage());
+ metrics->SetMemory(tableStats.Memory);
+ metrics->SetNetwork(tableStats.Network);
+ metrics->SetStorage(tableStats.Storage);
+ metrics->SetReadThroughput(tableStats.ReadThroughput);
+ metrics->SetWriteThroughput(tableStats.WriteThroughput);
+ metrics->SetReadIops(tableStats.ReadIops);
+ metrics->SetWriteIops(tableStats.WriteIops);
+}
+
void TPathDescriber::FillPathDescr(NKikimrSchemeOp::TDirEntry* descr, TPathElement::TPtr pathEl, TPathElement::EPathSubType subType) {
FillChildDescr(descr, pathEl);
@@ -242,7 +242,7 @@ void TPathDescriber::DescribeTable(const TActorContext& ctx, TPathId pathId, TPa
{
auto* metrics = Result->Record.MutablePathDescription()->MutableTabletMetrics();
- FillTableMetrics(metrics, tableStats);
+ FillTableMetrics(metrics, tableStats);
}
if (returnPartitionStats) {
@@ -253,8 +253,8 @@ void TPathDescriber::DescribeTable(const TActorContext& ctx, TPathId pathId, TPa
Y_VERIFY(stats);
auto pbStats = pathDescription.AddTablePartitionStats();
FillTableStats(pbStats, *stats);
- auto pbMetrics = pathDescription.AddTablePartitionMetrics();
- FillTableMetrics(pbMetrics, *stats);
+ auto pbMetrics = pathDescription.AddTablePartitionMetrics();
+ FillTableMetrics(pbMetrics, *stats);
}
}
@@ -363,18 +363,18 @@ void TPathDescriber::DescribeOlapStore(TPathId pathId, TPathElement::TPtr pathEl
description->AddColumnShards(shardInfo->TabletID.GetValue());
}
}
-
+
void TPathDescriber::DescribeOlapTable(TPathId pathId, TPathElement::TPtr pathEl) {
const TOlapTableInfo::TPtr tableInfo = *Self->OlapTables.FindPtr(pathId);
Y_VERIFY(tableInfo, "OlapTable not found");
const TOlapStoreInfo::TPtr storeInfo = *Self->OlapStores.FindPtr(tableInfo->OlapStorePathId);
Y_VERIFY(storeInfo, "OlapStore not found");
Y_UNUSED(pathEl);
-
+
auto description = Result->Record.MutablePathDescription()->MutableColumnTableDescription();
description->CopyFrom(tableInfo->Description);
description->MutableSharding()->CopyFrom(tableInfo->Sharding);
-
+
if (!description->HasSchema() && description->HasSchemaPresetId()) {
auto& preset = storeInfo->SchemaPresets.at(description->GetSchemaPresetId());
auto& presetProto = storeInfo->Description.GetSchemaPresets(preset.ProtoIndex);
@@ -382,7 +382,7 @@ void TPathDescriber::DescribeOlapTable(TPathId pathId, TPathElement::TPtr pathEl
if (description->HasSchemaPresetVersionAdj()) {
description->MutableSchema()->SetVersion(description->GetSchema().GetVersion() + description->GetSchemaPresetVersionAdj());
}
- }
+ }
#if 0
if (!description->HasTtlSettings() && description->HasTtlSettingsPresetId()) {
auto& preset = storeInfo->TtlSettingsPresets.at(description->GetTtlSettingsPresetId());
@@ -820,7 +820,7 @@ THolder<TEvSchemeShard::TEvDescribeSchemeResultBuilder> TPathDescriber::Describe
DescribeTable(ctx, base->PathId, base);
break;
case NKikimrSchemeOp::EPathTypeColumnStore:
- DescribeDir(path);
+ DescribeDir(path);
DescribeOlapStore(base->PathId, base);
break;
case NKikimrSchemeOp::EPathTypeColumnTable:
diff --git a/ydb/core/tx/schemeshard/schemeshard_utils.h b/ydb/core/tx/schemeshard/schemeshard_utils.h
index d371463ad9d..de7d17f316f 100644
--- a/ydb/core/tx/schemeshard/schemeshard_utils.h
+++ b/ydb/core/tx/schemeshard/schemeshard_utils.h
@@ -51,9 +51,9 @@ inline NKikimrSchemeOp::TModifyScheme TransactionTemplate(const TString& working
return tx;
}
-TSerializedCellVec ChooseSplitKeyByHistogram(const NKikimrTableStats::THistogram& histogram,
- const TConstArrayRef<NScheme::TTypeId>& keyColumnTypes);
-
+TSerializedCellVec ChooseSplitKeyByHistogram(const NKikimrTableStats::THistogram& histogram,
+ const TConstArrayRef<NScheme::TTypeId>& keyColumnTypes);
+
class TShardDeleter {
struct TPerHiveDeletions {
TActorId PipeToHive;
diff --git a/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp b/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp
index aebfc4ce171..58cc395905c 100644
--- a/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp
@@ -1,33 +1,33 @@
-#include "schemeshard_info_types.h"
+#include "schemeshard_info_types.h"
#include <ydb/core/protos/flat_scheme_op.pb.h>
-
-namespace NKikimr {
+
+namespace NKikimr {
namespace NSchemeShard {
-
-// Helper accessors for OLTP and OLAP tables that use different TColumn's
-namespace {
- inline
- bool IsDropped(const TOlapSchema::TColumn& col) {
- Y_UNUSED(col);
- return false;
- }
-
- inline
- ui32 GetType(const TOlapSchema::TColumn& col) {
- return col.TypeId;
- }
-
- inline
- bool IsDropped(const TTableInfo::TColumn& col) {
- return col.IsDropped();
- }
-
- inline
- ui32 GetType(const TTableInfo::TColumn& col) {
- return col.PType;
- }
-}
-
+
+// Helper accessors for OLTP and OLAP tables that use different TColumn's
+namespace {
+ inline
+ bool IsDropped(const TOlapSchema::TColumn& col) {
+ Y_UNUSED(col);
+ return false;
+ }
+
+ inline
+ ui32 GetType(const TOlapSchema::TColumn& col) {
+ return col.TypeId;
+ }
+
+ inline
+ bool IsDropped(const TTableInfo::TColumn& col) {
+ return col.IsDropped();
+ }
+
+ inline
+ ui32 GetType(const TTableInfo::TColumn& col) {
+ return col.PType;
+ }
+}
+
template <class TColumn>
bool ValidateUnit(const TColumn& column, NKikimrSchemeOp::TTTLSettings::EUnit unit, TString& errStr) {
switch (GetType(column)) {
@@ -59,39 +59,39 @@ bool ValidateTtlSettings(const NKikimrSchemeOp::TTTLSettings& ttl,
const THashMap<ui32, TTableInfo::TColumn>& alterColumns,
const THashMap<TString, ui32>& colName2Id,
const TSubDomainInfo& subDomain, TString& errStr)
-{
+{
using TTtlProto = NKikimrSchemeOp::TTTLSettings;
- switch (ttl.GetStatusCase()) {
- case TTtlProto::kEnabled: {
+ switch (ttl.GetStatusCase()) {
+ case TTtlProto::kEnabled: {
const auto& enabled = ttl.GetEnabled();
const TString colName = enabled.GetColumnName();
-
- auto it = colName2Id.find(colName);
- if (it == colName2Id.end()) {
- errStr = Sprintf("Cannot enable TTL on unknown column: '%s'", colName.data());
- return false;
- }
-
+
+ auto it = colName2Id.find(colName);
+ if (it == colName2Id.end()) {
+ errStr = Sprintf("Cannot enable TTL on unknown column: '%s'", colName.data());
+ return false;
+ }
+
const TTableInfo::TColumn* column = nullptr;
- const ui32 colId = it->second;
- if (alterColumns.contains(colId)) {
- column = &alterColumns.at(colId);
- } else if (sourceColumns.contains(colId)) {
- column = &sourceColumns.at(colId);
- } else {
- Y_VERIFY("Unknown column");
- }
-
- if (IsDropped(*column)) {
- errStr = Sprintf("Cannot enable TTL on dropped column: '%s'", colName.data());
- return false;
- }
-
+ const ui32 colId = it->second;
+ if (alterColumns.contains(colId)) {
+ column = &alterColumns.at(colId);
+ } else if (sourceColumns.contains(colId)) {
+ column = &sourceColumns.at(colId);
+ } else {
+ Y_VERIFY("Unknown column");
+ }
+
+ if (IsDropped(*column)) {
+ errStr = Sprintf("Cannot enable TTL on dropped column: '%s'", colName.data());
+ return false;
+ }
+
const auto unit = enabled.GetColumnUnit();
if (!ValidateUnit(*column, unit, errStr)) {
- return false;
- }
+ return false;
+ }
if (enabled.HasSysSettings()) {
const auto& sys = enabled.GetSysSettings();
@@ -100,20 +100,20 @@ bool ValidateTtlSettings(const NKikimrSchemeOp::TTTLSettings& ttl,
return false;
}
}
- break;
- }
-
- case TTtlProto::kDisabled:
- break;
-
- default:
- errStr = "TTL status must be specified";
- return false;
- }
-
- return true;
-}
-
+ break;
+ }
+
+ case TTtlProto::kDisabled:
+ break;
+
+ default:
+ errStr = "TTL status must be specified";
+ return false;
+ }
+
+ return true;
+}
+
static bool ValidateColumnTableTtl(const NKikimrSchemeOp::TColumnDataLifeCycle::TTtl& ttl,
const THashMap<ui32, TOlapSchema::TColumn>& sourceColumns,
const THashMap<ui32, TOlapSchema::TColumn>& alterColumns,
@@ -238,4 +238,4 @@ bool ValidateTtlSettingsChange(
return true;
}
-}}
+}}
diff --git a/ydb/core/tx/schemeshard/ut_base.cpp b/ydb/core/tx/schemeshard/ut_base.cpp
index b9e42fdd88d..a76dcf9d5bb 100644
--- a/ydb/core/tx/schemeshard/ut_base.cpp
+++ b/ydb/core/tx/schemeshard/ut_base.cpp
@@ -2,7 +2,7 @@
#include <ydb/core/tx/schemeshard/schemeshard_utils.h>
#include <ydb/core/base/compile_time_flags.h>
-
+
#include <util/generic/size_literals.h>
#include <util/string/cast.h>
@@ -664,9 +664,9 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(CreateTable) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime);
ui64 txId = 100;
-
+
AsyncMkDir(runtime, ++txId, "/MyRoot", "DirA");
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA", R"(
Name: "Table1"
@@ -676,15 +676,15 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
)");
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table2\""
- "Columns { Name: \"key1\" Type: \"Uint32\"}"
+ "Name: \"Table2\""
+ "Columns { Name: \"key1\" Type: \"Uint32\"}"
"Columns { Name: \"key2\" Type: \"Utf8\"}"
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
+ "Columns { Name: \"RowId\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
"Columns { Name: \"YaValue\" Type: \"Yson\"}"
"Columns { Name: \"MoreValue\" Type: \"Json\"}"
- "KeyColumnNames: [\"RowId\", \"key1\", \"key2\"]"
- );
+ "KeyColumnNames: [\"RowId\", \"key1\", \"key2\"]"
+ );
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
"Name: \"x/y\""
"Columns { Name: \"key\" Type: \"Uint32\"}"
@@ -747,25 +747,25 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
"Name: \"Table3\""
"Columns { Name: \"key\" Type: \"Uint32\"}");
-
+
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table3\""
- "Columns { Name: \"key\" Type: \"Uint32\"}"
- "KeyColumnNames: [\"key\"]"
+ "Name: \"Table3\""
+ "Columns { Name: \"key\" Type: \"Uint32\"}"
+ "KeyColumnNames: [\"key\"]"
"PartitionConfig {ChannelProfileId: 42}");
-
+
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table3\""
- "Columns { Name: \"key\" Type: \"Uint32\"}"
- "KeyColumnNames: [\"key\"]"
- "PartitionConfig {ChannelProfileId: 1}");
-
+ "Name: \"Table3\""
+ "Columns { Name: \"key\" Type: \"Uint32\"}"
+ "KeyColumnNames: [\"key\"]"
+ "PartitionConfig {ChannelProfileId: 1}");
+
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table4\""
- "Columns { Name: \"key\" Type: \"Uint32\"}"
- "KeyColumnNames: [\"key\"]"
- "PartitionConfig {ChannelProfileId: 0}");
-
+ "Name: \"Table4\""
+ "Columns { Name: \"key\" Type: \"Uint32\"}"
+ "KeyColumnNames: [\"key\"]"
+ "PartitionConfig {ChannelProfileId: 0}");
+
TestModificationResult(runtime, txId-18, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId-17, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId-16, NKikimrScheme::StatusAccepted);
@@ -798,8 +798,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
{NLs::PathExist});
TestDescribeResult(DescribePath(runtime, "/MyRoot/DirA/Table2"),
{NLs::PathExist});
- }
-
+ }
+
Y_UNIT_TEST(CreateTableWithDate) { //+
TTestBasicRuntime runtime;
TTestEnv env(runtime);
@@ -2396,17 +2396,17 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(IgnoreUserColumnIds) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
- TestMkDir(runtime, ++txId, "/MyRoot", "DirA");
- TestCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"TableIgnoreIds\""
- "Columns { Name: \"key\" Type: \"Uint32\" Id: 42}"
- "Columns { Name: \"col3\" Type: \"Uint32\" Id: 100500}"
- "Columns { Name: \"col1\" Type: \"Uint32\" Id: 100}"
- "KeyColumnNames: [\"key\"]"
- "KeyColumnIds: 100500");
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
+ TestMkDir(runtime, ++txId, "/MyRoot", "DirA");
+ TestCreateTable(runtime, ++txId, "/MyRoot/DirA",
+ "Name: \"TableIgnoreIds\""
+ "Columns { Name: \"key\" Type: \"Uint32\" Id: 42}"
+ "Columns { Name: \"col3\" Type: \"Uint32\" Id: 100500}"
+ "Columns { Name: \"col1\" Type: \"Uint32\" Id: 100}"
+ "KeyColumnNames: [\"key\"]"
+ "KeyColumnIds: 100500");
env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/DirA/TableIgnoreIds"),
@@ -2423,8 +2423,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
UNIT_ASSERT_VALUES_EQUAL(1, table.KeyColumnIdsSize());
UNIT_ASSERT_VALUES_EQUAL(1, table.GetKeyColumnIds(0));
}});
- }
-
+ }
+
#if 0 // KIKIMR-1452
Y_UNIT_TEST(CreateSameTable) {
TTestBasicRuntime runtime;
@@ -2483,19 +2483,19 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(CreateTableWithUniformPartitioning) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
TestCreateTable(runtime, ++txId, "/MyRoot",
- "Name: \"PartitionedTable1\""
- "Columns { Name: \"key1\" Type: \"Uint32\"}"
+ "Name: \"PartitionedTable1\""
+ "Columns { Name: \"key1\" Type: \"Uint32\"}"
"Columns { Name: \"key2\" Type: \"Utf8\"}"
- "Columns { Name: \"key3\" Type: \"Uint64\"}"
+ "Columns { Name: \"key3\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"key1\", \"key2\", \"key3\"]"
- "UniformPartitionsCount: 10"
- );
- env.TestWaitNotification(runtime, txId);
+ "KeyColumnNames: [\"key1\", \"key2\", \"key3\"]"
+ "UniformPartitionsCount: 10"
+ );
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/PartitionedTable1"),
{NLs::IsTable,
@@ -2515,48 +2515,48 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestDescribeResult(DescribePath(runtime, "/MyRoot/PartitionedTable2"),
{NLs::IsTable,
NLs::ShardsInsideDomain(20)});
- }
-
+ }
+
Y_UNIT_TEST(CreateTableWithSplitBounadaries) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
- ++txId;
- TestCreateTable(runtime, txId, "/MyRoot", R"(
- Name: "PartitionedTable1"
- Columns { Name: "key1" Type: "Uint32"}
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+ ++txId;
+ TestCreateTable(runtime, txId, "/MyRoot", R"(
+ Name: "PartitionedTable1"
+ Columns { Name: "key1" Type: "Uint32"}
Columns { Name: "key2" Type: "Utf8"}
- Columns { Name: "key3" Type: "Uint64"}
+ Columns { Name: "key3" Type: "Uint64"}
Columns { Name: "key4" Type: "Int32"}
Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["key1", "key2", "key3", "key4"]
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint32 : 100 } }
- }}
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint32 : 200 } }
- Tuple { } # NULL
- Tuple { Optional { Uint64 : 100500 } }
+ KeyColumnNames: ["key1", "key2", "key3", "key4"]
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint32 : 100 } }
+ }}
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint32 : 200 } }
+ Tuple { } # NULL
+ Tuple { Optional { Uint64 : 100500 } }
Tuple { Optional { Int32 : -100500 } }
- }}
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint32 : 200 } }
- Tuple { Optional { Text : "Tinky Winky" } }
- Tuple { Optional { Uint64 : 200 } }
- }}
- )");
- env.TestWaitNotification(runtime, txId);
-
+ }}
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint32 : 200 } }
+ Tuple { Optional { Text : "Tinky Winky" } }
+ Tuple { Optional { Uint64 : 200 } }
+ }}
+ )");
+ env.TestWaitNotification(runtime, txId);
+
TestDescribeResult(DescribePath(runtime, "/MyRoot//PartitionedTable1", true, true),
{NLs::IsTable,
NLs::CheckBoundaries});
- }
-
+ }
+
Y_UNIT_TEST(CreateTableWithConfig) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
TestCreateTable(runtime, ++txId, "/MyRoot", R"___(
Name: "Table1"
Columns { Name: "key1" Type: "Uint32"}
@@ -2576,26 +2576,26 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
ForceSizeToCompact: 20000
CompactionBrokerQueue: 1
KeepInCache: true
- }
+ }
}
})___");
env.TestWaitNotification(runtime, txId);
-
+
auto t1 = DescribePath(runtime, "/MyRoot/Table1");
-
+
TActorId sender = runtime.AllocateEdgeActor();
RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
-
+
auto t2 = DescribePath(runtime, "/MyRoot/Table1");
-
+
UNIT_ASSERT_VALUES_EQUAL(t1.DebugString(), t2.DebugString());
- }
-
+ }
+
Y_UNIT_TEST(CreateTableWithNamedConfig) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
TestCreateTable(runtime, ++txId, "/MyRoot", R"___(
Name: "Table2"
Columns { Name: "key1" Type: "Uint32"}
@@ -2606,22 +2606,22 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
NamedCompactionPolicy : "UserTableDefault"
})___");
env.TestWaitNotification(runtime, txId);
-
+
auto t1 = DescribePath(runtime, "/MyRoot/Table2");
-
+
TActorId sender = runtime.AllocateEdgeActor();
RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
-
+
auto t2 = DescribePath(runtime, "/MyRoot/Table2");
-
+
UNIT_ASSERT_VALUES_EQUAL(t1.DebugString(), t2.DebugString());
- }
-
+ }
+
Y_UNIT_TEST(CreateTableWithUnknownNamedConfig) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
TestCreateTable(runtime, ++txId, "/MyRoot", R"___(
Name: "Table2"
Columns { Name: "key1" Type: "Uint32"}
@@ -2632,8 +2632,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
NamedCompactionPolicy : "Default"
})___",
{NKikimrScheme::StatusInvalidParameter});
- }
-
+ }
+
Y_UNIT_TEST(CreateAlterTableWithCodec) {
TTestBasicRuntime runtime;
TTestEnv env(runtime);
@@ -2713,22 +2713,22 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(DependentOps) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
- ++txId;
- AsyncMkDir(runtime, txId, "/MyRoot", "DirA");
- ++txId;
- AsyncMkDir(runtime, txId, "/MyRoot/DirA", "SubDirA");
- ++txId;
- AsyncMkDir(runtime, txId, "/MyRoot/DirA/SubDirA", "AAA");
- ++txId;
- AsyncMkDir(runtime, txId, "/MyRoot", "DirB");
- ++txId;
- AsyncMkDir(runtime, txId, "/MyRoot/DirA/SubDirA/AAA", "aaa");
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
+ ++txId;
+ AsyncMkDir(runtime, txId, "/MyRoot", "DirA");
+ ++txId;
+ AsyncMkDir(runtime, txId, "/MyRoot/DirA", "SubDirA");
+ ++txId;
+ AsyncMkDir(runtime, txId, "/MyRoot/DirA/SubDirA", "AAA");
+ ++txId;
+ AsyncMkDir(runtime, txId, "/MyRoot", "DirB");
+ ++txId;
+ AsyncMkDir(runtime, txId, "/MyRoot/DirA/SubDirA/AAA", "aaa");
+
env.TestWaitNotification(runtime, {txId, txId-1, txId-2, txId-3, txId-4});
-
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/"),
{NLs::Finished,
NLs::ChildrenCount(2)});
@@ -2752,36 +2752,36 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
{NLs::Finished,
NLs::PathVersionEqual(3),
NLs::NoChildren});
- }
-
+ }
+
Y_UNIT_TEST(ParallelCreateTable) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
AsyncMkDir(runtime, ++txId, "/MyRoot", "DirA");
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table1\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
+ "Name: \"Table1\""
+ "Columns { Name: \"RowId\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]"
- );
+ "KeyColumnNames: [\"RowId\"]"
+ );
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table2\""
- "Columns { Name: \"key1\" Type: \"Uint32\"}"
+ "Name: \"Table2\""
+ "Columns { Name: \"key1\" Type: \"Uint32\"}"
"Columns { Name: \"key2\" Type: \"Utf8\"}"
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
+ "Columns { Name: \"RowId\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\", \"key1\", \"key2\"]"
- );
+ "KeyColumnNames: [\"RowId\", \"key1\", \"key2\"]"
+ );
TestModificationResult(runtime, txId-2, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusAccepted);
-
+
env.TestWaitNotification(runtime, {txId, txId-1, txId-2});
-
- TestDescribe(runtime, "/MyRoot/DirA/Table1");
- TestDescribe(runtime, "/MyRoot/DirA/Table2");
+
+ TestDescribe(runtime, "/MyRoot/DirA/Table1");
+ TestDescribe(runtime, "/MyRoot/DirA/Table2");
TestDescribeResult(DescribePath(runtime, "/MyRoot/DirA"),
{NLs::PathVersionEqual(7)});
@@ -2789,8 +2789,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
{NLs::PathVersionEqual(3)});
TestDescribeResult(DescribePath(runtime, "/MyRoot/DirA/Table2"),
{NLs::PathVersionEqual(3)});
- }
-
+ }
+
Y_UNIT_TEST(ParallelCreateSameTable) { //+
using ESts = NKikimrScheme::EStatus;
@@ -2839,29 +2839,29 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(CopyTable) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
TestCreateTable(runtime, ++txId, "/MyRoot",
- "Name: \"Table\""
- "Columns { Name: \"key1\" Type: \"Uint32\"}"
+ "Name: \"Table\""
+ "Columns { Name: \"key1\" Type: \"Uint32\"}"
"Columns { Name: \"key2\" Type: \"Utf8\"}"
- "Columns { Name: \"key3\" Type: \"Uint64\"}"
+ "Columns { Name: \"key3\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"key1\", \"key2\", \"key3\"]"
- "UniformPartitionsCount: 2"
- );
- env.TestWaitNotification(runtime, txId);
-
+ "KeyColumnNames: [\"key1\", \"key2\", \"key3\"]"
+ "UniformPartitionsCount: 2"
+ );
+ env.TestWaitNotification(runtime, txId);
+
TestCopyTable(runtime, ++txId, "/MyRoot", "NewTable", "/MyRoot/Table");
- env.TestWaitNotification(runtime, txId);
-
- TestDescribe(runtime, "/MyRoot/NewTable");
-
- // Try to Copy over existing table
+ env.TestWaitNotification(runtime, txId);
+
+ TestDescribe(runtime, "/MyRoot/NewTable");
+
+ // Try to Copy over existing table
TestCopyTable(runtime, ++txId, "/MyRoot", "Table", "/MyRoot/NewTable", NKikimrScheme::StatusAlreadyExists);
-
- // Try to Copy over exisitng dir
+
+ // Try to Copy over exisitng dir
AsyncMkDir(runtime, ++txId, "/MyRoot", "Dir");
AsyncCopyTable(runtime, ++txId, "/MyRoot", "Dir", "/MyRoot/Table");
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
@@ -2876,46 +2876,46 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
{NLs::Finished,
NLs::IsTable,
NLs::PathVersionEqual(3)});
- }
-
+ }
+
Y_UNIT_TEST(CopyTableTwiceSimultaneously) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
TestCreateTable(runtime, ++txId, "/MyRoot",
- "Name: \"Table\""
- "Columns { Name: \"key\" Type: \"Uint32\"}"
- "Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"key\"]"
- "UniformPartitionsCount: 2"
- );
- env.TestWaitNotification(runtime, txId);
-
- // Write some data to the shards in order to prevent their deletion right after merge
- auto fnWriteRow = [&] (ui64 tabletId, ui32 key) {
- TString writeQuery = Sprintf(R"(
- (
- (let key '( '('key (Uint32 '%u)) ) )
- (let value '('('Value (Utf8 'aaaaaaaa)) ) )
- (return (AsList (UpdateRow '__user__Table key value) ))
- )
- )", key);
- NKikimrMiniKQL::TResult result;
- TString err;
+ "Name: \"Table\""
+ "Columns { Name: \"key\" Type: \"Uint32\"}"
+ "Columns { Name: \"Value\" Type: \"Utf8\"}"
+ "KeyColumnNames: [\"key\"]"
+ "UniformPartitionsCount: 2"
+ );
+ env.TestWaitNotification(runtime, txId);
+
+ // Write some data to the shards in order to prevent their deletion right after merge
+ auto fnWriteRow = [&] (ui64 tabletId, ui32 key) {
+ TString writeQuery = Sprintf(R"(
+ (
+ (let key '( '('key (Uint32 '%u)) ) )
+ (let value '('('Value (Utf8 'aaaaaaaa)) ) )
+ (return (AsList (UpdateRow '__user__Table key value) ))
+ )
+ )", key);
+ NKikimrMiniKQL::TResult result;
+ TString err;
NKikimrProto::EReplyStatus status = LocalMiniKQL(runtime, tabletId, writeQuery, result, err);
- UNIT_ASSERT_VALUES_EQUAL(err, "");
+ UNIT_ASSERT_VALUES_EQUAL(err, "");
UNIT_ASSERT_VALUES_EQUAL(status, NKikimrProto::EReplyStatus::OK);
- };
+ };
fnWriteRow(TTestTxConfig::FakeHiveTablets, 0);
fnWriteRow(TTestTxConfig::FakeHiveTablets+1, 0x80000000u);
-
+
AsyncCopyTable(runtime, ++txId, "/MyRoot", "NewTable", "/MyRoot/Table");
AsyncCopyTable(runtime, ++txId, "/MyRoot", "NewTable2", "/MyRoot/NewTable");
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
env.TestWaitNotification(runtime, {txId, txId-1});
-
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::Finished,
NLs::PathVersionEqual(3)});
@@ -2927,77 +2927,77 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestDescribeResult(DescribePath(runtime, "/MyRoot"),
{NLs::Finished,
NLs::ChildrenCount(2)});
- }
-
+ }
+
Y_UNIT_TEST(CopyTableAndConcurrentChanges) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
TestCreateTable(runtime, ++txId, "/MyRoot", //124
- "Name: \"Table\""
- "Columns { Name: \"key1\" Type: \"Uint32\"}"
+ "Name: \"Table\""
+ "Columns { Name: \"key1\" Type: \"Uint32\"}"
"Columns { Name: \"key2\" Type: \"Utf8\"}"
- "Columns { Name: \"key3\" Type: \"Uint64\"}"
+ "Columns { Name: \"key3\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"key1\", \"key2\", \"key3\"]"
- "UniformPartitionsCount: 2"
- );
- env.TestWaitNotification(runtime, txId);
-
+ "KeyColumnNames: [\"key1\", \"key2\", \"key3\"]"
+ "UniformPartitionsCount: 2"
+ );
+ env.TestWaitNotification(runtime, txId);
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::PathVersionEqual(3)});
- // Copy & Drop
+ // Copy & Drop
AsyncCopyTable(runtime, ++txId, "/MyRoot", "Copy1", "/MyRoot/Table"); //125
AsyncDropTable(runtime, ++txId, "/MyRoot", "Table"); //126
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
- env.TestWaitNotification(runtime, {txId-1, txId});
-
+ env.TestWaitNotification(runtime, {txId-1, txId});
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::PathVersionEqual(3)});
TestDescribeResult(DescribePath(runtime, "/MyRoot/Copy1"),
{NLs::PathVersionEqual(3)});
- // Copy & Alter
+ // Copy & Alter
AsyncCopyTable(runtime, ++txId, "/MyRoot", "Copy2", "/MyRoot/Table"); //127
AsyncAlterTable(runtime, ++txId, "/MyRoot", //128
- "Name: \"Table\""
- "Columns { Name: \"add_1\" Type: \"Uint32\"}"
- "Columns { Name: \"add_2\" Type: \"Uint64\"}"
- );
+ "Name: \"Table\""
+ "Columns { Name: \"add_1\" Type: \"Uint32\"}"
+ "Columns { Name: \"add_2\" Type: \"Uint64\"}"
+ );
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
- env.TestWaitNotification(runtime, {txId-1, txId});
-
+ env.TestWaitNotification(runtime, {txId-1, txId});
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::PathVersionEqual(3)});
TestDescribeResult(DescribePath(runtime, "/MyRoot/Copy2"),
{NLs::PathVersionEqual(3)});
- // Alter & Copy
+ // Alter & Copy
AsyncAlterTable(runtime, ++txId, "/MyRoot", //129
- "Name: \"Table\""
- "Columns { Name: \"add_1\" Type: \"Uint32\"}"
- "Columns { Name: \"add_2\" Type: \"Uint64\"}"
- );
+ "Name: \"Table\""
+ "Columns { Name: \"add_1\" Type: \"Uint32\"}"
+ "Columns { Name: \"add_2\" Type: \"Uint64\"}"
+ );
AsyncCopyTable(runtime, ++txId, "/MyRoot", "Copy3", "/MyRoot/Table"); //130
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
- env.TestWaitNotification(runtime, {txId-1, txId});
-
+ env.TestWaitNotification(runtime, {txId-1, txId});
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::PathVersionEqual(4)});
TestDescribeResult(DescribePath(runtime, "/MyRoot/Copy3"),
{NLs::PathNotExist});
- // Copy & Copy
+ // Copy & Copy
AsyncCopyTable(runtime, ++txId, "/MyRoot", "Copy4", "/MyRoot/Table"); //131
AsyncCopyTable(runtime, ++txId, "/MyRoot", "Copy5", "/MyRoot/Table"); //132
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
- env.TestWaitNotification(runtime, {txId-1, txId});
-
+ env.TestWaitNotification(runtime, {txId-1, txId});
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::PathVersionEqual(4)});
@@ -3006,169 +3006,169 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestDescribeResult(DescribePath(runtime, "/MyRoot/Copy5"),
{NLs::PathNotExist});
- // Drop & Copy
+ // Drop & Copy
AsyncDropTable(runtime, ++txId, "/MyRoot", "Table"); //133
AsyncCopyTable(runtime, ++txId, "/MyRoot", "Copy6", "/MyRoot/Table"); //134
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
- env.TestWaitNotification(runtime, {txId-1, txId});
+ env.TestWaitNotification(runtime, {txId-1, txId});
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::PathNotExist});
TestDescribeResult(DescribePath(runtime, "/MyRoot/Copy6"),
{NLs::PathNotExist});
- }
-
+ }
+
Y_UNIT_TEST(CopyTableAndConcurrentSplit) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["key"]
+ Name: "Table"
+ Columns { Name: "key" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["key"]
UniformPartitionsCount: 2)");
- env.TestWaitNotification(runtime, txId);
-
+ env.TestWaitNotification(runtime, txId);
+
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table", R"(
SourceTabletId: 9437195
- SplitBoundary {
- KeyPrefix {
- Tuple { Optional { Uint32: 3000000000 } }
- }
+ SplitBoundary {
+ KeyPrefix {
+ Tuple { Optional { Uint32: 3000000000 } }
+ }
})");
AsyncCopyTable(runtime, ++txId, "/MyRoot", "NewTable", "/MyRoot/Table");
- // New split must be rejected while CopyTable is in progress
+ // New split must be rejected while CopyTable is in progress
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table", R"(
SourceTabletId: 9437194
- SplitBoundary {
- KeyPrefix {
- Tuple { Optional { Uint32: 1000000000 } }
- }
+ SplitBoundary {
+ KeyPrefix {
+ Tuple { Optional { Uint32: 1000000000 } }
+ }
})");
TestModificationResult(runtime, txId-2, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
env.TestWaitNotification(runtime, {txId-1, txId-2});
-
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/NewTable", true),
{NLs::PartitionCount(3),
NLs::PathVersionEqual(4)});
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{NLs::PartitionCount(3),
NLs::PathVersionEqual(4)});
-
- // Delete all tables and wait for everything to be cleaned up
+
+ // Delete all tables and wait for everything to be cleaned up
AsyncDropTable(runtime, ++txId, "/MyRoot", "Table");
AsyncDropTable(runtime, ++txId, "/MyRoot", "NewTable");
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusAccepted);
- env.TestWaitNotification(runtime, {txId-1, txId});
-
+ env.TestWaitNotification(runtime, {txId-1, txId});
+
env.TestWaitTabletDeletion(runtime, xrange(TTestTxConfig::FakeHiveTablets, TTestTxConfig::FakeHiveTablets+10));
- }
-
+ }
+
Y_UNIT_TEST(CopyTableAndConcurrentMerge) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["key"]
- UniformPartitionsCount: 3
- )");
- env.TestWaitNotification(runtime, txId);
-
+ Name: "Table"
+ Columns { Name: "key" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["key"]
+ UniformPartitionsCount: 3
+ )");
+ env.TestWaitNotification(runtime, txId);
+
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table",
- R"(
+ R"(
SourceTabletId: 9437195
SourceTabletId: 9437196
- )");
+ )");
AsyncCopyTable(runtime, ++txId, "/MyRoot", "NewTable", "/MyRoot/Table");
- // New split must be rejected while CopyTable is in progress
+ // New split must be rejected while CopyTable is in progress
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table",
- R"(
+ R"(
SourceTabletId: 9437197
- SplitBoundary {
- KeyPrefix {
- Tuple { Optional { Uint32: 300 } }
- }
- }
+ SplitBoundary {
+ KeyPrefix {
+ Tuple { Optional { Uint32: 300 } }
+ }
+ }
)");
TestModificationResult(runtime, txId-2, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
-
+
env.TestWaitNotification(runtime, {txId-1, txId-2});
-
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/NewTable", true),
{NLs::PartitionCount(2),
NLs::PathVersionEqual(4)});
-
- // Delete all tables and wait for everything to be cleaned up
+
+ // Delete all tables and wait for everything to be cleaned up
AsyncDropTable(runtime, ++txId, "/MyRoot", "Table");
AsyncDropTable(runtime, ++txId, "/MyRoot", "NewTable");
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusAccepted);
- env.TestWaitNotification(runtime, {txId-1, txId});
-
+ env.TestWaitNotification(runtime, {txId-1, txId});
+
env.TestWaitTabletDeletion(runtime, xrange(TTestTxConfig::FakeHiveTablets, TTestTxConfig::FakeHiveTablets+10));
- }
-
+ }
+
Y_UNIT_TEST(CopyTableAndConcurrentSplitMerge) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["key"]
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint32 : 100 } }
- }}
- SplitBoundary { KeyPrefix {
- Tuple { Optional { Uint32 : 200 } }
- }}
- )");
- env.TestWaitNotification(runtime, txId);
-
- // Merge and split so that overall partition count stays the same but shard boundaries change
+ Name: "Table"
+ Columns { Name: "key" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["key"]
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint32 : 100 } }
+ }}
+ SplitBoundary { KeyPrefix {
+ Tuple { Optional { Uint32 : 200 } }
+ }}
+ )");
+ env.TestWaitNotification(runtime, txId);
+
+ // Merge and split so that overall partition count stays the same but shard boundaries change
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table",
- R"(
+ R"(
SourceTabletId: 9437194
SourceTabletId: 9437195
- )");
+ )");
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table",
- R"(
+ R"(
SourceTabletId: 9437196
- SplitBoundary {
- KeyPrefix {
- Tuple { Optional { Uint32: 4000 } }
- }
- }
- )");
+ SplitBoundary {
+ KeyPrefix {
+ Tuple { Optional { Uint32: 4000 } }
+ }
+ }
+ )");
AsyncCopyTable(runtime, ++txId, "/MyRoot", "NewTable", "/MyRoot/Table"); //104
TestModificationResult(runtime, txId-2, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusAccepted);
-
+
env.TestWaitNotification(runtime, {txId, txId-1, txId-2});
-
- auto fnCheckSingleColumnKey = [](TString keyBuf, ui32 val) {
- TSerializedCellVec cells(keyBuf);
+
+ auto fnCheckSingleColumnKey = [](TString keyBuf, ui32 val) {
+ TSerializedCellVec cells(keyBuf);
UNIT_ASSERT_VALUES_EQUAL(cells.GetCells().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(*(const ui32*)cells.GetCells()[0].Data(), val);
- };
-
+ UNIT_ASSERT_VALUES_EQUAL(*(const ui32*)cells.GetCells()[0].Data(), val);
+ };
+
TVector<ui32> expectedBoundaries = {200, 4000};
-
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/NewTable", true),
{NLs::PartitionCount(expectedBoundaries.size()+1),
NLs::PathVersionEqual(4),
@@ -3177,15 +3177,15 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
fnCheckSingleColumnKey(describeRec.GetPathDescription().GetTablePartitions(i).GetEndOfRangeKeyPrefix(), expectedBoundaries[i]);
}
}});
-
- // Delete all tables and wait for everything to be cleaned up
+
+ // Delete all tables and wait for everything to be cleaned up
AsyncDropTable(runtime, ++txId, "/MyRoot", "Table");
AsyncDropTable(runtime, ++txId, "/MyRoot", "NewTable");
- env.TestWaitNotification(runtime, {txId-1, txId});
-
+ env.TestWaitNotification(runtime, {txId-1, txId});
+
env.TestWaitTabletDeletion(runtime, xrange(TTestTxConfig::FakeHiveTablets, TTestTxConfig::FakeHiveTablets + 10));
- }
-
+ }
+
Y_UNIT_TEST(CopyTableWithAlterConfig) { //+
TTestBasicRuntime runtime;
TTestEnv env(runtime);
@@ -3560,206 +3560,206 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(AlterTableAndConcurrentSplit) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["key"]
- UniformPartitionsCount: 3
- PartitionConfig {
- CompactionPolicy {
- InMemSizeToSnapshot: 4194304
- InMemStepsToSnapshot: 300
- InMemForceStepsToSnapshot: 500
- InMemForceSizeToSnapshot: 16777216
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 67108864
- ReadAheadLoThreshold: 16777216
- MinDataPageSize: 7168
- SnapBrokerQueue: 0
- Generation {
- GenerationId: 0
- SizeToCompact: 0
- CountToCompact: 8
- ForceCountToCompact: 8
- ForceSizeToCompact: 134217728
- CompactionBrokerQueue: 1
- KeepInCache: true
- }
+ Name: "Table"
+ Columns { Name: "key" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["key"]
+ UniformPartitionsCount: 3
+ PartitionConfig {
+ CompactionPolicy {
+ InMemSizeToSnapshot: 4194304
+ InMemStepsToSnapshot: 300
+ InMemForceStepsToSnapshot: 500
+ InMemForceSizeToSnapshot: 16777216
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 67108864
+ ReadAheadLoThreshold: 16777216
+ MinDataPageSize: 7168
+ SnapBrokerQueue: 0
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 0
+ CountToCompact: 8
+ ForceCountToCompact: 8
+ ForceSizeToCompact: 134217728
+ CompactionBrokerQueue: 1
+ KeepInCache: true
+ }
}}
- )");
- env.TestWaitNotification(runtime, txId);
-
- // Write some data to the shards in order to prevent their deletion right after merge
- auto fnWriteRow = [&] (ui64 tabletId, ui32 key) {
- TString writeQuery = Sprintf(R"(
- (
- (let key '( '('key (Uint32 '%u)) ) )
- (let value '('('Value (Utf8 'aaaaaaaa)) ) )
- (return (AsList (UpdateRow '__user__Table key value) ))
- )
- )", key);
- NKikimrMiniKQL::TResult result;
- TString err;
+ )");
+ env.TestWaitNotification(runtime, txId);
+
+ // Write some data to the shards in order to prevent their deletion right after merge
+ auto fnWriteRow = [&] (ui64 tabletId, ui32 key) {
+ TString writeQuery = Sprintf(R"(
+ (
+ (let key '( '('key (Uint32 '%u)) ) )
+ (let value '('('Value (Utf8 'aaaaaaaa)) ) )
+ (return (AsList (UpdateRow '__user__Table key value) ))
+ )
+ )", key);
+ NKikimrMiniKQL::TResult result;
+ TString err;
NKikimrProto::EReplyStatus status = LocalMiniKQL(runtime, tabletId, writeQuery, result, err);
- UNIT_ASSERT_VALUES_EQUAL(err, "");
+ UNIT_ASSERT_VALUES_EQUAL(err, "");
UNIT_ASSERT_VALUES_EQUAL(status, NKikimrProto::EReplyStatus::OK);
- };
+ };
fnWriteRow(TTestTxConfig::FakeHiveTablets, 0);
fnWriteRow(TTestTxConfig::FakeHiveTablets+1, 0x80000000u);
-
+
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table", R"(
SourceTabletId: 9437194
SourceTabletId: 9437195
- )");
+ )");
AsyncAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- PartitionConfig {
- ExecutorCacheSize: 12121212
- CompactionPolicy {
- InMemSizeToSnapshot: 4194304
- InMemStepsToSnapshot: 300
- InMemForceStepsToSnapshot: 500
- InMemForceSizeToSnapshot: 16777216
- InMemCompactionBrokerQueue: 0
- ReadAheadHiThreshold: 67108864
- ReadAheadLoThreshold: 16777216
- MinDataPageSize: 7168
- SnapBrokerQueue: 0
- Generation {
- GenerationId: 0
- SizeToCompact: 0
- CountToCompact: 8
- ForceCountToCompact: 8
- ForceSizeToCompact: 134217728
- CompactionBrokerQueue: 1
- KeepInCache: true
- }
- Generation {
- GenerationId: 1
- SizeToCompact: 41943040
- CountToCompact: 5
- ForceCountToCompact: 8
- ForceSizeToCompact: 536870912
- CompactionBrokerQueue: 2
- KeepInCache: false
- }
- }
- }
- )");
- // New split must be rejected while CopyTable is in progress
+ Name: "Table"
+ PartitionConfig {
+ ExecutorCacheSize: 12121212
+ CompactionPolicy {
+ InMemSizeToSnapshot: 4194304
+ InMemStepsToSnapshot: 300
+ InMemForceStepsToSnapshot: 500
+ InMemForceSizeToSnapshot: 16777216
+ InMemCompactionBrokerQueue: 0
+ ReadAheadHiThreshold: 67108864
+ ReadAheadLoThreshold: 16777216
+ MinDataPageSize: 7168
+ SnapBrokerQueue: 0
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 0
+ CountToCompact: 8
+ ForceCountToCompact: 8
+ ForceSizeToCompact: 134217728
+ CompactionBrokerQueue: 1
+ KeepInCache: true
+ }
+ Generation {
+ GenerationId: 1
+ SizeToCompact: 41943040
+ CountToCompact: 5
+ ForceCountToCompact: 8
+ ForceSizeToCompact: 536870912
+ CompactionBrokerQueue: 2
+ KeepInCache: false
+ }
+ }
+ }
+ )");
+ // New split must be rejected while CopyTable is in progress
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table", R"(
SourceTabletId: 9437196
- SplitBoundary {
- KeyPrefix {
- Tuple { Optional { Uint32: 300 } }
- }
- }
+ SplitBoundary {
+ KeyPrefix {
+ Tuple { Optional { Uint32: 300 } }
+ }
+ }
)");
TestModificationResult(runtime, txId-2, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
env.TestWaitNotification(runtime, {txId, txId-1, txId-2});
-
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{NLs::PartitionCount(2)});
-
- NTabletFlatScheme::TSchemeChanges scheme;
- TString errStr;
-
- // Check local scheme on datashards
+
+ NTabletFlatScheme::TSchemeChanges scheme;
+ TString errStr;
+
+ // Check local scheme on datashards
LocalSchemeTx(runtime, TTestTxConfig::FakeHiveTablets, "", true, scheme, errStr);
- UNIT_ASSERT_VALUES_EQUAL(errStr, "");
- UNIT_ASSERT_C(!ToString(scheme).Contains("ExecutorCacheSize: 12121212"), "Old shard must not participate in ALTER");
-
+ UNIT_ASSERT_VALUES_EQUAL(errStr, "");
+ UNIT_ASSERT_C(!ToString(scheme).Contains("ExecutorCacheSize: 12121212"), "Old shard must not participate in ALTER");
+
LocalSchemeTx(runtime, TTestTxConfig::FakeHiveTablets+1, "", true, scheme, errStr);
- UNIT_ASSERT_VALUES_EQUAL(errStr, "");
- UNIT_ASSERT_C(!ToString(scheme).Contains("ExecutorCacheSize: 12121212"), "Old shard must not participate in ALTER");
-
+ UNIT_ASSERT_VALUES_EQUAL(errStr, "");
+ UNIT_ASSERT_C(!ToString(scheme).Contains("ExecutorCacheSize: 12121212"), "Old shard must not participate in ALTER");
+
LocalSchemeTx(runtime, TTestTxConfig::FakeHiveTablets+2, "", true, scheme, errStr);
- UNIT_ASSERT_VALUES_EQUAL(errStr, "");
- UNIT_ASSERT_STRING_CONTAINS_C(ToString(scheme), "ExecutorCacheSize: 12121212", "New shard must participate in ALTER");
- {
- // Read user table schema from new shard;
- NKikimrMiniKQL::TResult result;
- TString err;
+ UNIT_ASSERT_VALUES_EQUAL(errStr, "");
+ UNIT_ASSERT_STRING_CONTAINS_C(ToString(scheme), "ExecutorCacheSize: 12121212", "New shard must participate in ALTER");
+ {
+ // Read user table schema from new shard;
+ NKikimrMiniKQL::TResult result;
+ TString err;
NKikimrProto::EReplyStatus status = LocalMiniKQL(runtime, TTestTxConfig::FakeHiveTablets+2, R"(
- (
- (let range '('('Tid (Uint64 '0) (Void))))
- (let select '('LocalTid 'Schema))
- (let options '('('ItemsLimit (Uint64 '1))) )
- (let result (SelectRange 'UserTables range select options))
- (return (AsList (SetResult 'TableInfo result) ))
- )
- )", result, err);
+ (
+ (let range '('('Tid (Uint64 '0) (Void))))
+ (let select '('LocalTid 'Schema))
+ (let options '('('ItemsLimit (Uint64 '1))) )
+ (let result (SelectRange 'UserTables range select options))
+ (return (AsList (SetResult 'TableInfo result) ))
+ )
+ )", result, err);
UNIT_ASSERT_VALUES_EQUAL(status, NKikimrProto::EReplyStatus::OK);
- UNIT_ASSERT_VALUES_EQUAL(err, "");
-// Cerr << result << Endl;
- // Value { Struct { Optional { Struct { List { Struct { Optional { Uint32: 1001 } } Struct { Optional { Bytes: ".." } } } } } } }
- ui32 localTid = result.GetValue().GetStruct(0).GetOptional().GetStruct(0).GetList(0).GetStruct(0).GetOptional().GetUint32();
- TString schemaStr = result.GetValue().GetStruct(0).GetOptional().GetStruct(0).GetList(0).GetStruct(1).GetOptional().GetBytes();
- UNIT_ASSERT_VALUES_EQUAL(localTid, 1001);
- UNIT_ASSERT(!schemaStr.empty());
+ UNIT_ASSERT_VALUES_EQUAL(err, "");
+// Cerr << result << Endl;
+ // Value { Struct { Optional { Struct { List { Struct { Optional { Uint32: 1001 } } Struct { Optional { Bytes: ".." } } } } } } }
+ ui32 localTid = result.GetValue().GetStruct(0).GetOptional().GetStruct(0).GetList(0).GetStruct(0).GetOptional().GetUint32();
+ TString schemaStr = result.GetValue().GetStruct(0).GetOptional().GetStruct(0).GetList(0).GetStruct(1).GetOptional().GetBytes();
+ UNIT_ASSERT_VALUES_EQUAL(localTid, 1001);
+ UNIT_ASSERT(!schemaStr.empty());
NKikimrSchemeOp::TTableDescription tableDescr;
bool ok = tableDescr.ParseFromArray(schemaStr.data(), schemaStr.size());
- UNIT_ASSERT(ok);
-// Cerr << tableDescr << Endl;
- UNIT_ASSERT_VALUES_EQUAL(tableDescr.GetPartitionConfig().GetExecutorCacheSize(), 12121212);
- UNIT_ASSERT_VALUES_EQUAL(tableDescr.GetPartitionConfig().GetCompactionPolicy().GenerationSize(), 2);
- UNIT_ASSERT_VALUES_EQUAL(tableDescr.GetPartitionConfig().GetCompactionPolicy().GetGeneration(1).GetForceSizeToCompact(), 536870912);
- }
-
+ UNIT_ASSERT(ok);
+// Cerr << tableDescr << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(tableDescr.GetPartitionConfig().GetExecutorCacheSize(), 12121212);
+ UNIT_ASSERT_VALUES_EQUAL(tableDescr.GetPartitionConfig().GetCompactionPolicy().GenerationSize(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(tableDescr.GetPartitionConfig().GetCompactionPolicy().GetGeneration(1).GetForceSizeToCompact(), 536870912);
+ }
+
LocalSchemeTx(runtime, TTestTxConfig::FakeHiveTablets+3, "", true, scheme, errStr);
- UNIT_ASSERT_VALUES_EQUAL(errStr, "");
- UNIT_ASSERT_STRING_CONTAINS_C(ToString(scheme), "ExecutorCacheSize: 12121212", "Non-splitted shard must participate in ALTER");
-
- // Drop the table and wait for everything to be cleaned up
+ UNIT_ASSERT_VALUES_EQUAL(errStr, "");
+ UNIT_ASSERT_STRING_CONTAINS_C(ToString(scheme), "ExecutorCacheSize: 12121212", "Non-splitted shard must participate in ALTER");
+
+ // Drop the table and wait for everything to be cleaned up
TestDropTable(runtime, ++txId, "/MyRoot", "Table");
- env.TestWaitNotification(runtime, txId);
-
+ env.TestWaitNotification(runtime, txId);
+
env.TestWaitTabletDeletion(runtime, xrange(TTestTxConfig::FakeHiveTablets, TTestTxConfig::FakeHiveTablets+10));
- }
-
+ }
+
Y_UNIT_TEST(DropTableAndConcurrentSplit) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["key"]
- UniformPartitionsCount: 3
- )");
- env.TestWaitNotification(runtime, txId);
-
+ Name: "Table"
+ Columns { Name: "key" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["key"]
+ UniformPartitionsCount: 3
+ )");
+ env.TestWaitNotification(runtime, txId);
+
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table", R"(
SourceTabletId: 9437195
SourceTabletId: 9437196
- )");
+ )");
AsyncDropTable(runtime, ++txId, "/MyRoot", "Table");
// New split must be rejected while DropTable is in progress
AsyncSplitTable(runtime, ++txId, "/MyRoot/Table", R"(
SourceTabletId: 9437197
- SplitBoundary {
- KeyPrefix {
- Tuple { Optional { Uint32: 300 } }
- }
- }
+ SplitBoundary {
+ KeyPrefix {
+ Tuple { Optional { Uint32: 300 } }
+ }
+ }
)");
TestModificationResult(runtime, txId-2, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId-1, NKikimrScheme::StatusAccepted);
TestModificationResult(runtime, txId, NKikimrScheme::StatusMultipleModifications);
env.TestWaitNotification(runtime, {txId, txId-1, txId-2});
-
- // Wait for everything to be cleaned up
+
+ // Wait for everything to be cleaned up
env.TestWaitTabletDeletion(runtime, xrange(TTestTxConfig::FakeHiveTablets, TTestTxConfig::FakeHiveTablets+10));
- }
-
+ }
+
Y_UNIT_TEST(AlterTable) { //+
TTestBasicRuntime runtime;
TTestEnv env(runtime);
@@ -3776,7 +3776,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Columns { Name: "value" Type: "Utf8"}
KeyColumnNames: ["key1", "key2"]
)");
- env.TestWaitNotification(runtime, txId);
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::CheckColumns("Table", cols, dropCols, keyCol)});
@@ -3784,14 +3784,14 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Cdbg << "AlterTable: add column" << Endl;
cols.insert("add_1");
cols.insert("add_2");
- cols.insert("add_50");
+ cols.insert("add_50");
TestAlterTable(runtime, ++txId, "/MyRoot",
R"(Name: "Table"
Columns { Name: "add_1" Type: "Uint32"}
Columns { Name: "add_2" Type: "Uint64"}
- Columns { Name: "add_50" Type: "Utf8" Id: 100500}
+ Columns { Name: "add_50" Type: "Utf8" Id: 100500}
)");
- env.TestWaitNotification(runtime, txId);
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::CheckColumns("Table", cols, dropCols, keyCol)});
@@ -3821,12 +3821,12 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::CheckColumns("Table", cols, dropCols, keyCol)});
- Cdbg << "AlterTable: add column ignores id" << Endl;
- TestAlterTable(runtime, ++txId, "/MyRoot",
- R"(Name: "Table" Columns { Name: "add_100" Type: "Utf8" Id: 1 })");
- cols.insert("add_100");
- env.TestWaitNotification(runtime, txId);
-
+ Cdbg << "AlterTable: add column ignores id" << Endl;
+ TestAlterTable(runtime, ++txId, "/MyRoot",
+ R"(Name: "Table" Columns { Name: "add_100" Type: "Utf8" Id: 1 })");
+ cols.insert("add_100");
+ env.TestWaitNotification(runtime, txId);
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::CheckColumns("Table", cols, dropCols, keyCol)});
@@ -3837,17 +3837,17 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
dropCols.insert("add_2");
TestAlterTable(runtime, ++txId, "/MyRoot",
R"(Name: "Table" DropColumns { Name: "value" } DropColumns { Name: "add_2" })");
- env.TestWaitNotification(runtime, txId);
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::CheckColumns("Table", cols, dropCols, keyCol)});
- Cdbg << "AlterTable: drop column ignores id" << Endl;
- dropCols.insert("add_100");
- TestAlterTable(runtime, ++txId, "/MyRoot",
- R"(Name: "Table" DropColumns { Name: "add_100" Id: 500100 })");
- env.TestWaitNotification(runtime, txId);
-
+ Cdbg << "AlterTable: drop column ignores id" << Endl;
+ dropCols.insert("add_100");
+ TestAlterTable(runtime, ++txId, "/MyRoot",
+ R"(Name: "Table" DropColumns { Name: "add_100" Id: 500100 })");
+ env.TestWaitNotification(runtime, txId);
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::CheckColumns("Table", cols, dropCols, keyCol)});
@@ -3861,11 +3861,11 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
R"(Name: "Table" DropColumns { Name: "key1" })",
{NKikimrScheme::StatusSchemeError, NKikimrScheme::StatusInvalidParameter});
- Cdbg << "AlterTable: drop without column name " << Endl;
- TestAlterTable(runtime, ++txId, "/MyRoot",
- R"(Name: "Table" DropColumns { Id: 3 })",
+ Cdbg << "AlterTable: drop without column name " << Endl;
+ TestAlterTable(runtime, ++txId, "/MyRoot",
+ R"(Name: "Table" DropColumns { Id: 3 })",
{NKikimrScheme::StatusSchemeError, NKikimrScheme::StatusInvalidParameter});
-
+
Cdbg << "AlterTable: add + drop different column" << Endl;
cols.insert("add_3");
dropCols.insert("add_1");
@@ -3874,7 +3874,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Columns { Name: "add_3" Type: "Uint32" }
DropColumns { Name: "add_1" }
)");
- env.TestWaitNotification(runtime, txId);
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::CheckColumns("Table", cols, dropCols, keyCol)});
@@ -4095,52 +4095,52 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(AlterTableConfig) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
- TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key2" Type: "Uint32"}
- Columns { Name: "key1" Type: "Uint64"}
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
+ TestCreateTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ Columns { Name: "key2" Type: "Uint32"}
+ Columns { Name: "key1" Type: "Uint64"}
Columns { Name: "value" Type: "Utf8"}
- KeyColumnNames: ["key1", "key2"]
- PartitionConfig {
- TxReadSizeLimit: 100
- ExecutorCacheSize: 42
+ KeyColumnNames: ["key1", "key2"]
+ PartitionConfig {
+ TxReadSizeLimit: 100
+ ExecutorCacheSize: 42
ChannelProfileId: 1
- }
- )");
- env.TestWaitNotification(runtime, txId);
-
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
+
ui64 datashardTabletId = TTestTxConfig::FakeHiveTablets;
- UNIT_ASSERT_VALUES_EQUAL(GetTxReadSizeLimit(runtime, datashardTabletId), 100);
- UNIT_ASSERT_VALUES_EQUAL(GetExecutorCacheSize(runtime, datashardTabletId), 42);
-
+ UNIT_ASSERT_VALUES_EQUAL(GetTxReadSizeLimit(runtime, datashardTabletId), 100);
+ UNIT_ASSERT_VALUES_EQUAL(GetExecutorCacheSize(runtime, datashardTabletId), 42);
+
{ // ChannelProfileId
NKikimrSchemeOp::TTableDescription tableDescription = GetDatashardSchema(runtime, datashardTabletId, 2);
UNIT_ASSERT_VALUES_EQUAL(tableDescription.GetPartitionConfig().GetChannelProfileId(), 1);
}
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- PartitionConfig {
- TxReadSizeLimit: 2000
- }
- )");
- env.TestWaitNotification(runtime, txId);
- UNIT_ASSERT_VALUES_EQUAL(GetTxReadSizeLimit(runtime, datashardTabletId), 2000);
- UNIT_ASSERT_VALUES_EQUAL(GetExecutorCacheSize(runtime, datashardTabletId), 42);
-
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "add_2" Type: "Uint64"}
- PartitionConfig {
- ExecutorCacheSize: 100500
- }
- )");
- env.TestWaitNotification(runtime, txId);
- UNIT_ASSERT_VALUES_EQUAL(GetTxReadSizeLimit(runtime, datashardTabletId), 2000);
- UNIT_ASSERT_VALUES_EQUAL(GetExecutorCacheSize(runtime, datashardTabletId), 100500);
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ PartitionConfig {
+ TxReadSizeLimit: 2000
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
+ UNIT_ASSERT_VALUES_EQUAL(GetTxReadSizeLimit(runtime, datashardTabletId), 2000);
+ UNIT_ASSERT_VALUES_EQUAL(GetExecutorCacheSize(runtime, datashardTabletId), 42);
+
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ Columns { Name: "add_2" Type: "Uint64"}
+ PartitionConfig {
+ ExecutorCacheSize: 100500
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
+ UNIT_ASSERT_VALUES_EQUAL(GetTxReadSizeLimit(runtime, datashardTabletId), 2000);
+ UNIT_ASSERT_VALUES_EQUAL(GetExecutorCacheSize(runtime, datashardTabletId), 100500);
TestAlterTable(runtime, ++txId, "/MyRoot", R"(
Name: "Table"
@@ -4214,8 +4214,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table"),
{NLs::IsTable,
NLs::PathVersionEqual(11)});
- }
-
+ }
+
Y_UNIT_TEST(ConfigColumnFamily) {
using NKikimrSchemeOp::EColumnCodec;
using NKikimrSchemeOp::EColumnCache;
@@ -5131,80 +5131,80 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
NKikimr::NLocalDb::TCompactionPolicy realPolicy(describeRec.GetPathDescription().GetTable().GetPartitionConfig().GetCompactionPolicy());
UNIT_ASSERT(realPolicy == *expecedPolicy);
};
- }
-
+ }
+
Y_UNIT_TEST(AlterTableComapctionPolicy) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
const ui64 datashardTabletId = TTestTxConfig::FakeHiveTablets;
- NKikimr::NLocalDb::TCompactionPolicyPtr defaultUserTablePolicy = NKikimr::NLocalDb::CreateDefaultUserTablePolicy();
+ NKikimr::NLocalDb::TCompactionPolicyPtr defaultUserTablePolicy = NKikimr::NLocalDb::CreateDefaultUserTablePolicy();
NKikimr::NLocalDb::TCompactionPolicyPtr defaultSystemTablePolicy = NKikimr::NLocalDb::CreateDefaultTablePolicy();
-
- // Create table with 1-level compaction policy
- TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key2" Type: "Uint32"}
- Columns { Name: "key1" Type: "Uint64"}
+
+ // Create table with 1-level compaction policy
+ TestCreateTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ Columns { Name: "key2" Type: "Uint32"}
+ Columns { Name: "key1" Type: "Uint64"}
Columns { Name: "value" Type: "Utf8"}
- KeyColumnNames: ["key1", "key2"]
- PartitionConfig {
- NamedCompactionPolicy: "SystemTableDefault"
- }
- )");
- env.TestWaitNotification(runtime, txId);
-
- NKikimr::NLocalDb::TCompactionPolicyPtr policyInDatashard;
- policyInDatashard = GetCompactionPolicy(runtime, datashardTabletId, 1001);
- UNIT_ASSERT(*policyInDatashard == *defaultSystemTablePolicy);
+ KeyColumnNames: ["key1", "key2"]
+ PartitionConfig {
+ NamedCompactionPolicy: "SystemTableDefault"
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
+
+ NKikimr::NLocalDb::TCompactionPolicyPtr policyInDatashard;
+ policyInDatashard = GetCompactionPolicy(runtime, datashardTabletId, 1001);
+ UNIT_ASSERT(*policyInDatashard == *defaultSystemTablePolicy);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{CheckCompactionPolicy(policyInDatashard),
NLs::PathVersionEqual(3)});
-
- // Invalid compaction policy name - should fail
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "add_2" Type: "Uint64"}
- PartitionConfig {
- NamedCompactionPolicy: "Non-existing policy"
- }
+
+ // Invalid compaction policy name - should fail
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ Columns { Name: "add_2" Type: "Uint64"}
+ PartitionConfig {
+ NamedCompactionPolicy: "Non-existing policy"
+ }
)", {NKikimrScheme::StatusInvalidParameter});
- policyInDatashard = GetCompactionPolicy(runtime, datashardTabletId, 1001);
- UNIT_ASSERT(*policyInDatashard == *defaultSystemTablePolicy);
+ policyInDatashard = GetCompactionPolicy(runtime, datashardTabletId, 1001);
+ UNIT_ASSERT(*policyInDatashard == *defaultSystemTablePolicy);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{CheckCompactionPolicy(policyInDatashard),
NLs::PathVersionEqual(3)});
-
- // Valid policy with more levels - should succeed
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "add_2" Type: "Uint64"}
- PartitionConfig {
- NamedCompactionPolicy: "UserTableDefault"
- }
- )");
- env.TestWaitNotification(runtime, txId);
- policyInDatashard = GetCompactionPolicy(runtime, datashardTabletId, 1001);
- UNIT_ASSERT(*policyInDatashard == *defaultUserTablePolicy);
+
+ // Valid policy with more levels - should succeed
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ Columns { Name: "add_2" Type: "Uint64"}
+ PartitionConfig {
+ NamedCompactionPolicy: "UserTableDefault"
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
+ policyInDatashard = GetCompactionPolicy(runtime, datashardTabletId, 1001);
+ UNIT_ASSERT(*policyInDatashard == *defaultUserTablePolicy);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{CheckCompactionPolicy(policyInDatashard),
NLs::PathVersionEqual(4)});
-
- // Try to switch back to fewer levels - should fail
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- PartitionConfig {
- NamedCompactionPolicy: "SystemTableDefault"
- }
+
+ // Try to switch back to fewer levels - should fail
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ PartitionConfig {
+ NamedCompactionPolicy: "SystemTableDefault"
+ }
)", {NKikimrScheme::StatusInvalidParameter});
- policyInDatashard = GetCompactionPolicy(runtime, datashardTabletId, 1001);
- UNIT_ASSERT(*policyInDatashard == *defaultUserTablePolicy);
+ policyInDatashard = GetCompactionPolicy(runtime, datashardTabletId, 1001);
+ UNIT_ASSERT(*policyInDatashard == *defaultUserTablePolicy);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{CheckCompactionPolicy(policyInDatashard),
NLs::PathVersionEqual(4)});
- }
-
+ }
+
Y_UNIT_TEST(AlterTableFollowers) { //+
TTestBasicRuntime runtime;
TTestEnv env(runtime);
@@ -5379,7 +5379,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
AllowFollowerPromotion: true
ExecutorCacheSize: 100500
}
- )");
+ )");
env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
@@ -5717,36 +5717,36 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
}
}
-
+
Y_UNIT_TEST(AlterTableSizeToSplit) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
- TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key" Type: "Uint64"}
- Columns { Name: "value" Type: "Utf8"}
- KeyColumnNames: ["key"]
- )");
- env.TestWaitNotification(runtime, txId);
-
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
+ TestCreateTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ Columns { Name: "key" Type: "Uint64"}
+ Columns { Name: "value" Type: "Utf8"}
+ KeyColumnNames: ["key"]
+ )");
+ env.TestWaitNotification(runtime, txId);
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{NLs::SizeToSplitEqual(0)});
-
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- PartitionConfig {
- PartitioningPolicy {
- SizeToSplit: 100500
- }
- }
- )");
- env.TestWaitNotification(runtime, txId);
-
+
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ PartitionConfig {
+ PartitioningPolicy {
+ SizeToSplit: 100500
+ }
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
+
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{NLs::SizeToSplitEqual(100500)});
- }
+ }
Y_UNIT_TEST(AlterTableSplitSchema) { //+
TTestBasicRuntime runtime;
@@ -5813,70 +5813,70 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(AlterTableSettings) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 100;
-
- struct TCheckExecutorFastLogPolicy {
- bool ExpectedValue;
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
+ struct TCheckExecutorFastLogPolicy {
+ bool ExpectedValue;
void operator ()(const NKikimrScheme::TEvDescribeSchemeResult& describeRec) {
bool real = describeRec.GetPathDescription().GetTable().GetPartitionConfig().GetExecutorFastLogPolicy();
- UNIT_ASSERT_VALUES_EQUAL(ExpectedValue, real);
- }
- };
-
- struct TCheckEnableFilterByKey {
- bool ExpectedValue;
+ UNIT_ASSERT_VALUES_EQUAL(ExpectedValue, real);
+ }
+ };
+
+ struct TCheckEnableFilterByKey {
+ bool ExpectedValue;
void operator ()(const NKikimrScheme::TEvDescribeSchemeResult& describeRec) {
bool real = describeRec.GetPathDescription().GetTable().GetPartitionConfig().GetEnableFilterByKey();
- UNIT_ASSERT_VALUES_EQUAL(ExpectedValue, real);
- }
- };
-
- TestCreateTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- Columns { Name: "key" Type: "Uint64"}
- Columns { Name: "value" Type: "Utf8"}
- KeyColumnNames: ["key"]
- )");
- env.TestWaitNotification(runtime, txId);
+ UNIT_ASSERT_VALUES_EQUAL(ExpectedValue, real);
+ }
+ };
+
+ TestCreateTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ Columns { Name: "key" Type: "Uint64"}
+ Columns { Name: "value" Type: "Utf8"}
+ KeyColumnNames: ["key"]
+ )");
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{TCheckExecutorFastLogPolicy{true},
TCheckEnableFilterByKey{false}});
UNIT_ASSERT_VALUES_EQUAL_C(true, GetFastLogPolicy(runtime, TTestTxConfig::FakeHiveTablets), "FastLogPolicy must be enabled by default");
UNIT_ASSERT_VALUES_EQUAL_C(false, GetByKeyFilterEnabled(runtime, TTestTxConfig::FakeHiveTablets, 1001), "ByKeyFilter must be disabled by default");
-
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- PartitionConfig {
- ExecutorFastLogPolicy: false
- }
- )");
- env.TestWaitNotification(runtime, txId);
+
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ PartitionConfig {
+ ExecutorFastLogPolicy: false
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{TCheckExecutorFastLogPolicy{false},
TCheckEnableFilterByKey{false}});
UNIT_ASSERT_VALUES_EQUAL(false, GetFastLogPolicy(runtime, TTestTxConfig::FakeHiveTablets));
UNIT_ASSERT_VALUES_EQUAL(false, GetByKeyFilterEnabled(runtime, TTestTxConfig::FakeHiveTablets, 1001));
-
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- PartitionConfig {
- EnableFilterByKey: true
- }
- )");
- env.TestWaitNotification(runtime, txId);
+
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ PartitionConfig {
+ EnableFilterByKey: true
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{TCheckExecutorFastLogPolicy{false},
TCheckEnableFilterByKey{true}});
UNIT_ASSERT_VALUES_EQUAL(false, GetFastLogPolicy(runtime, TTestTxConfig::FakeHiveTablets));
UNIT_ASSERT_VALUES_EQUAL(true, GetByKeyFilterEnabled(runtime, TTestTxConfig::FakeHiveTablets, 1001));
-
- TestAlterTable(runtime, ++txId, "/MyRoot", R"(
- Name: "Table"
- PartitionConfig {
+
+ TestAlterTable(runtime, ++txId, "/MyRoot", R"(
+ Name: "Table"
+ PartitionConfig {
EnableEraseCache: true
}
)");
@@ -5907,11 +5907,11 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestAlterTable(runtime, ++txId, "/MyRoot", R"(
Name: "Table"
PartitionConfig {
- ExecutorFastLogPolicy: true
- EnableFilterByKey: false
- }
- )");
- env.TestWaitNotification(runtime, txId);
+ ExecutorFastLogPolicy: true
+ EnableFilterByKey: false
+ }
+ )");
+ env.TestWaitNotification(runtime, txId);
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table", true),
{TCheckExecutorFastLogPolicy{true},
@@ -5919,11 +5919,11 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
UNIT_ASSERT_VALUES_EQUAL(true, GetFastLogPolicy(runtime, TTestTxConfig::FakeHiveTablets));
UNIT_ASSERT_VALUES_EQUAL(false, GetByKeyFilterEnabled(runtime, TTestTxConfig::FakeHiveTablets, 1001));
UNIT_ASSERT_VALUES_EQUAL(false, GetEraseCacheEnabled(runtime, TTestTxConfig::FakeHiveTablets, 1001));
- }
-
+ }
+
Y_UNIT_TEST(CreatePersQueueGroup) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime);
ui64 txId = 1000;
AsyncMkDir(runtime, ++txId, "/MyRoot", "DirA");
@@ -5952,7 +5952,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
{NLs::CheckPartCount("PQGroup_2", 10, 10, 1, 10)});
TestDescribeResult(DescribePath(runtime, "/MyRoot/DirA/PQGroup_3", true),
{NLs::CheckPartCount("PQGroup_3", 10, 3, 4, 10)});
-
+
TestCreatePQGroup(runtime, ++txId, "/MyRoot/DirA",
"Name: \"PQGroup_1\""
"TotalGroupCount: 100 "
@@ -6081,7 +6081,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TActorId sender = runtime.AllocateEdgeActor();
RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
- env.TestWaitNotification(runtime, txId-1);
+ env.TestWaitNotification(runtime, txId-1);
TestDescribeResult(DescribePath(runtime, "/MyRoot/PQGroup", true),
{NLs::CheckPartCount("PQGroup", 400, 10, 40, 400),
NLs::PathsInsideDomain(1),
@@ -6810,23 +6810,23 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
Y_UNIT_TEST(Restart) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
AsyncMkDir(runtime, ++txId, "/MyRoot", "DirA");
AsyncMkDir(runtime, ++txId, "/MyRoot/DirA", "SubDirA");
AsyncCreateTable(runtime, ++txId, "/MyRoot/DirA",
"Name: \"Table1\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
+ "Columns { Name: \"RowId\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]");
+ "KeyColumnNames: [\"RowId\"]");
AsyncMkDir(runtime, ++txId, "/MyRoot/DirA/SubDirA", "AAA");
AsyncMkDir(runtime, ++txId, "/MyRoot/DirA/SubDirA", "BBB");
AsyncMkDir(runtime, ++txId, "/MyRoot/DirA/SubDirA", "CCC");
-
+
TActorId sender = runtime.AllocateEdgeActor();
RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
-
+
TestDescribeResult(DescribePath(runtime, "/MyRoot"),
{NLs::PathExist});
TestDescribeResult(DescribePath(runtime, "/MyRoot/DirA"),
@@ -6839,7 +6839,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestDescribeResult(DescribePath(runtime, "/MyRoot/DirA/SubDirA"),
{NLs::PathExist,
NLs::ChildrenCount(3)});
-
+
TestMkDir(runtime, ++txId, "/MyRoot/DirA/SubDirA", "DDD");
TestDescribeResult(DescribePath(runtime, "/MyRoot/DirA/SubDirA/DDD"),
@@ -6847,64 +6847,64 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
env.TestWaitNotification(runtime, xrange(txId-6, txId+1));
- }
-
+ }
+
Y_UNIT_TEST(ReadOnlyMode) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
AsyncMkDir(runtime, ++txId, "/MyRoot", "SubDirA");
AsyncCreateTable(runtime, ++txId, "/MyRoot",
- "Name: \"Table1\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
+ "Name: \"Table1\""
+ "Columns { Name: \"RowId\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]");
- // Set ReadOnly
- SetSchemeshardReadOnlyMode(runtime, true);
+ "KeyColumnNames: [\"RowId\"]");
+ // Set ReadOnly
+ SetSchemeshardReadOnlyMode(runtime, true);
TActorId sender = runtime.AllocateEdgeActor();
RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
-
- // Verify that table creation successfully finished
- env.TestWaitNotification(runtime, txId);
-
- // Check that describe works
+
+ // Verify that table creation successfully finished
+ env.TestWaitNotification(runtime, txId);
+
+ // Check that describe works
TestDescribeResult(DescribePath(runtime, "/MyRoot/SubDirA"),
{NLs::Finished});
TestDescribeResult(DescribePath(runtime, "/MyRoot/Table1"),
{NLs::Finished,
NLs::IsTable});
-
- // Check that new modifications fail
+
+ // Check that new modifications fail
TestMkDir(runtime, ++txId, "/MyRoot", "SubDirBBBB", {NKikimrScheme::StatusReadOnly});
TestCreateTable(runtime, ++txId, "/MyRoot",
- "Name: \"Table1\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
+ "Name: \"Table1\""
+ "Columns { Name: \"RowId\" Type: \"Uint64\"}"
"Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]",
+ "KeyColumnNames: [\"RowId\"]",
{NKikimrScheme::StatusReadOnly});
-
- // Disable ReadOnly
- SetSchemeshardReadOnlyMode(runtime, false);
- sender = runtime.AllocateEdgeActor();
+
+ // Disable ReadOnly
+ SetSchemeshardReadOnlyMode(runtime, false);
+ sender = runtime.AllocateEdgeActor();
RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
-
- // Check that modifications now work again
+
+ // Check that modifications now work again
TestMkDir(runtime, ++txId, "/MyRoot", "SubDirBBBB");
- }
-
+ }
+
Y_UNIT_TEST(PathErrors) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
auto tableDesrc = [=] (const TString& name) {
return Sprintf(R"(Name: "%s"
Columns { Name: "RowId" Type: "Uint64" }
KeyColumnNames: ["RowId"]
)", name.c_str());
};
-
+
TestMkDir(runtime, ++txId, "/MyRoot", "DirA");
env.TestWaitNotification(runtime, txId);
@@ -6933,41 +6933,41 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestCreateTable(runtime, ++txId, "/MyRoot/DirA", tableDesrc("/WrongPath"), {NKikimrScheme::StatusSchemeError});
TestCreateTable(runtime, ++txId, "/MyRoot/DirA", tableDesrc("WrongPath/"), {NKikimrScheme::StatusSchemeError});
TestCreateTable(runtime, ++txId, "/MyRoot/DirA", tableDesrc("Table1/WrongPath"), {NKikimrScheme::StatusPathIsNotDirectory});
- }
-
+ }
+
Y_UNIT_TEST(SchemeErrors) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
- ui64 txId = 123;
-
+ TTestEnv env(runtime);
+ ui64 txId = 123;
+
TestMkDir(runtime, ++txId, "/MyRoot", "DirA");
env.TestWaitNotification(runtime, txId);
-
+
TestCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table2\""
- "Columns { Name: \"RowId\" Type: \"BlaBlaType\"}"
- "KeyColumnNames: [\"RowId\"]",
+ "Name: \"Table2\""
+ "Columns { Name: \"RowId\" Type: \"BlaBlaType\"}"
+ "KeyColumnNames: [\"RowId\"]",
{NKikimrScheme::StatusSchemeError});
TestCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table2\""
- "Columns { Name: \"RowId\" Type: \"Uint32\"}",
+ "Name: \"Table2\""
+ "Columns { Name: \"RowId\" Type: \"Uint32\"}",
{NKikimrScheme::StatusSchemeError});
TestCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table2\""
- "Columns { Name: \"RowId\" Type: \"Uint32\"}"
- "KeyColumnNames: [\"AAAA\"]",
+ "Name: \"Table2\""
+ "Columns { Name: \"RowId\" Type: \"Uint32\"}"
+ "KeyColumnNames: [\"AAAA\"]",
{NKikimrScheme::StatusSchemeError});
TestCreateTable(runtime, ++txId, "/MyRoot/DirA",
- "Name: \"Table2\""
- "Columns { Name: \"RowId\" Type: \"Uint32\"}"
- "KeyColumnNames: [\"RowId\", \"RowId\"]",
+ "Name: \"Table2\""
+ "Columns { Name: \"RowId\" Type: \"Uint32\"}"
+ "KeyColumnNames: [\"RowId\", \"RowId\"]",
{NKikimrScheme::StatusSchemeError});
- }
-
+ }
+
Y_UNIT_TEST(ManyDirs) { //+
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
-
+ TTestEnv env(runtime);
+
ui64 num = 500;
ui64 txId = 123;
TSet<ui64> ids;
@@ -6975,8 +6975,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
for (ui32 id = 0; id < num; ++id) {
AsyncMkDir(runtime, ++txId, "/MyRoot", Sprintf("Dir_%u", id));
ids.insert(txId);
- }
- env.TestWaitNotification(runtime, ids);
+ }
+ env.TestWaitNotification(runtime, ids);
TestDescribeResult(DescribePath(runtime, "/MyRoot"),
{NLs::PathsInsideDomain(num),
@@ -6992,8 +6992,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
TestDescribeResult(DescribePath(runtime, "/MyRoot"),
{NLs::PathsInsideDomain(0),
NLs::ChildrenCount(0)});
- }
-
+ }
+
Y_UNIT_TEST(NestedDirs) { //+
TTestBasicRuntime runtime;
TTestEnv env(runtime);
@@ -7018,50 +7018,50 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
{NLs::PathsInsideDomain(limits.MaxDepth - 1)});
}
- void VerifyEqualCells(const TCell& a, const TCell& b) {
- UNIT_ASSERT_VALUES_EQUAL_C(a.IsNull(), b.IsNull(), "NULL/not-NULL mismatch");
- UNIT_ASSERT_VALUES_EQUAL_C(a.Size(), b.Size(), "size mismatch");
- if (!a.IsNull()) {
+ void VerifyEqualCells(const TCell& a, const TCell& b) {
+ UNIT_ASSERT_VALUES_EQUAL_C(a.IsNull(), b.IsNull(), "NULL/not-NULL mismatch");
+ UNIT_ASSERT_VALUES_EQUAL_C(a.Size(), b.Size(), "size mismatch");
+ if (!a.IsNull()) {
TString aVal(a.Data(), a.Size());
TString bVal(b.Data(), b.Size());
// Cdbg << aVal << Endl;
- UNIT_ASSERT_VALUES_EQUAL_C(aVal, bVal, "data mismatch");
- }
- }
-
+ UNIT_ASSERT_VALUES_EQUAL_C(aVal, bVal, "data mismatch");
+ }
+ }
+
void TestSerializedCellVec(TVector<TCell>& cells) {
TString serialized = TSerializedCellVec::Serialize(TArrayRef<const TCell>(cells));
UNIT_ASSERT_VALUES_EQUAL_C(cells.empty(), serialized.empty(), "Empty/non-empty mismatch");
- TSerializedCellVec deserialized(serialized);
+ TSerializedCellVec deserialized(serialized);
UNIT_ASSERT_VALUES_EQUAL_C(cells.size(), deserialized.GetCells().size(), "Sizes don't match");
- for (size_t i = 0; i < cells.size(); ++i) {
- VerifyEqualCells(cells[i], deserialized.GetCells()[i]);
- }
- }
-
+ for (size_t i = 0; i < cells.size(); ++i) {
+ VerifyEqualCells(cells[i], deserialized.GetCells()[i]);
+ }
+ }
+
Y_UNIT_TEST(SerializedCellVec) { //+
TVector<TCell> cells;
- TestSerializedCellVec(cells);
-
- for (size_t i = 0; i < 100; ++i) {
- cells.push_back(TCell());
- TestSerializedCellVec(cells);
- }
-
- cells.clear();
- char a[] = "1234728729hadjfhjvnaldjsagjkhsajklghslfkajshfgajklh";
- for (size_t i = 0; i < sizeof(a); ++i) {
- cells.push_back(TCell(a, i));
- TestSerializedCellVec(cells);
- }
-
- cells.clear();
- for (size_t i = 0; i < sizeof(a); ++i) {
- cells.push_back(TCell(a, i));
- cells.push_back(TCell());
- TestSerializedCellVec(cells);
- }
- }
+ TestSerializedCellVec(cells);
+
+ for (size_t i = 0; i < 100; ++i) {
+ cells.push_back(TCell());
+ TestSerializedCellVec(cells);
+ }
+
+ cells.clear();
+ char a[] = "1234728729hadjfhjvnaldjsagjkhsajklghslfkajshfgajklh";
+ for (size_t i = 0; i < sizeof(a); ++i) {
+ cells.push_back(TCell(a, i));
+ TestSerializedCellVec(cells);
+ }
+
+ cells.clear();
+ for (size_t i = 0; i < sizeof(a); ++i) {
+ cells.push_back(TCell(a, i));
+ cells.push_back(TCell());
+ TestSerializedCellVec(cells);
+ }
+ }
Y_UNIT_TEST(CreateFinishedInDescription) { //+
TTestBasicRuntime runtime;
@@ -9378,209 +9378,209 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
{NKikimrScheme::StatusInvalidParameter},
AlterUserAttrs({{"__document_api_version", "1"}}));
}
-
-
- class TSchemaHelpler {
- private:
- NScheme::TTypeRegistry TypeRegistry;
- const TVector<NKikimr::NScheme::TTypeId> KeyColumnTypes;
-
- public:
- explicit TSchemaHelpler(const TArrayRef<NKikimr::NScheme::TTypeId>& keyColumnTypes)
- : KeyColumnTypes(keyColumnTypes.begin(), keyColumnTypes.end())
- {}
-
- TString FindSplitKey(const TVector<TVector<TString>>& histogramKeys) const {
- NKikimrTableStats::THistogram histogram = FillHistogram(histogramKeys);
- TSerializedCellVec splitKey = ChooseSplitKeyByHistogram(histogram, KeyColumnTypes);
- return PrintKey(splitKey);
- }
-
- private:
- NKikimr::TSerializedCellVec MakeCells(const TVector<TString>& tuple) const {
- UNIT_ASSERT(tuple.size() <= KeyColumnTypes.size());
- TSmallVec<NKikimr::TCell> cells;
-
- for (size_t i = 0; i < tuple.size(); ++i) {
- if (tuple[i] == "NULL") {
- cells.push_back(NKikimr::TCell());
- } else {
- switch (KeyColumnTypes[i]) {
-#define ADD_CELL_FROM_STRING(ydbType, cppType) \
- case NKikimr::NScheme::NTypeIds::ydbType: { \
- cppType val = FromString<cppType>(tuple[i]); \
- cells.push_back(NKikimr::TCell((const char*)&val, sizeof(val))); \
- break; \
- }
-
- ADD_CELL_FROM_STRING(Bool, bool);
-
- ADD_CELL_FROM_STRING(Uint8, ui8);
- ADD_CELL_FROM_STRING(Int8, i8);
- ADD_CELL_FROM_STRING(Uint16, ui16);
- ADD_CELL_FROM_STRING(Int16, i16);
- ADD_CELL_FROM_STRING(Uint32, ui32);
- ADD_CELL_FROM_STRING(Int32, i32);
- ADD_CELL_FROM_STRING(Uint64, ui64);
- ADD_CELL_FROM_STRING(Int64, i64);
-
- ADD_CELL_FROM_STRING(Double, double);
- ADD_CELL_FROM_STRING(Float, float);
-
- case NKikimr::NScheme::NTypeIds::String:
- case NKikimr::NScheme::NTypeIds::Utf8: {
- cells.push_back(NKikimr::TCell(tuple[i].data(), tuple[i].size()));
- break;
- }
-#undef ADD_CELL_FROM_STRING
- default:
- UNIT_ASSERT_C(false, "Unexpected type");
- }
- }
- }
-
- return NKikimr::TSerializedCellVec(NKikimr::TSerializedCellVec::Serialize(cells));
- }
-
- NKikimrTableStats::THistogram FillHistogram(const TVector<TVector<TString>>& keys) const {
- NKikimrTableStats::THistogram histogram;
- for (const auto& k : keys) {
- TSerializedCellVec sk(MakeCells(k));
- histogram.AddBuckets()->SetKey(sk.GetBuffer());
- }
- return histogram;
- }
-
- TString PrintKey(const TSerializedCellVec& key) const {
- return PrintKey(key.GetCells());
- }
-
- TString PrintKey(const TConstArrayRef<TCell>& cells) const {
- return DbgPrintTuple(TDbTupleRef(KeyColumnTypes.data(), cells.data(), cells.size()), TypeRegistry);
- }
- };
-
- Y_UNIT_TEST(SplitKey) {
- TSmallVec<NScheme::TTypeId> keyColumnTypes = {
- NScheme::NTypeIds::Uint64,
- NScheme::NTypeIds::Utf8,
- NScheme::NTypeIds::Uint32
- };
-
- TSchemaHelpler schemaHelper(keyColumnTypes);
-
- {
- TString splitKey = schemaHelper.FindSplitKey({
- { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
- { "3", "bbbbbbbb", "42" },
- { "5", "cccccccccccccccccccccccc", "42" }
- });
- UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 3, Utf8 : NULL, Uint32 : NULL)");
- }
-
- {
- TString splitKey =
- schemaHelper.FindSplitKey({
- { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
- { "1", "bbbbbbbb", "42" },
- { "1", "cccccccccccccccccccccccc", "42" }
- });
- UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 1, Utf8 : bbbbbbbb, Uint32 : NULL)");
- }
-
- {
- TString splitKey =
- schemaHelper.FindSplitKey({
- { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
- { "1", "bb", "42" },
- { "1", "cc", "42" },
- { "2", "cd", "42" },
- { "2", "d", "42" },
- { "2", "e", "42" },
- { "2", "f", "42" },
- { "2", "g", "42" }
- });
- UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : NULL, Uint32 : NULL)");
- }
-
- {
- TString splitKey =
- schemaHelper.FindSplitKey({
- { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
- { "1", "bb", "42" },
- { "1", "cc", "42" },
- { "1", "cd", "42" },
- { "1", "d", "42" },
- { "2", "e", "42" },
- { "2", "f", "42" },
- { "2", "g", "42" }
- });
- //TODO: FIX this case
- UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : NULL, Uint32 : NULL)");
- }
-
- {
- TString splitKey =
- schemaHelper.FindSplitKey({
- { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
- { "1", "bb", "42" },
- { "1", "cc", "42" },
- { "1", "cd", "42" },
- { "1", "d", "42" },
- { "3", "e", "42" },
- { "3", "f", "42" },
- { "3", "g", "42" }
- });
- //TODO: FIX this case
- UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : NULL, Uint32 : NULL)");
- }
-
- {
- TString splitKey =
- schemaHelper.FindSplitKey({
- { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
- { "1", "bb", "42" },
- { "1", "cc", "42" },
- { "1", "cd", "42" },
- { "2", "d", "42" },
- { "3", "e", "42" },
- { "3", "f", "42" },
- { "3", "g", "42" }
- });
- //TODO: FIX this case
- UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : NULL, Uint32 : NULL)");
- }
-
- {
- TString splitKey =
- schemaHelper.FindSplitKey({
- { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
- { "2", "a", "42" },
- { "2", "b", "42" },
- { "2", "c", "42" },
- { "2", "d", "42" },
- { "2", "e", "42" },
- { "2", "f", "42" },
- { "3", "cccccccccccccccccccccccc", "42" }
- });
- UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : d, Uint32 : NULL)");
- }
-
- {
- TString splitKey =
- schemaHelper.FindSplitKey({
- { "2", "aaa", "1" },
- { "2", "aaa", "2" },
- { "2", "aaa", "3" },
- { "2", "aaa", "4" },
- { "2", "aaa", "5" },
- { "2", "bbb", "1" },
- { "2", "bbb", "2" },
- { "3", "ccc", "42" }
- });
- UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : bbb, Uint32 : NULL)");
- }
- }
+
+
+ class TSchemaHelpler {
+ private:
+ NScheme::TTypeRegistry TypeRegistry;
+ const TVector<NKikimr::NScheme::TTypeId> KeyColumnTypes;
+
+ public:
+ explicit TSchemaHelpler(const TArrayRef<NKikimr::NScheme::TTypeId>& keyColumnTypes)
+ : KeyColumnTypes(keyColumnTypes.begin(), keyColumnTypes.end())
+ {}
+
+ TString FindSplitKey(const TVector<TVector<TString>>& histogramKeys) const {
+ NKikimrTableStats::THistogram histogram = FillHistogram(histogramKeys);
+ TSerializedCellVec splitKey = ChooseSplitKeyByHistogram(histogram, KeyColumnTypes);
+ return PrintKey(splitKey);
+ }
+
+ private:
+ NKikimr::TSerializedCellVec MakeCells(const TVector<TString>& tuple) const {
+ UNIT_ASSERT(tuple.size() <= KeyColumnTypes.size());
+ TSmallVec<NKikimr::TCell> cells;
+
+ for (size_t i = 0; i < tuple.size(); ++i) {
+ if (tuple[i] == "NULL") {
+ cells.push_back(NKikimr::TCell());
+ } else {
+ switch (KeyColumnTypes[i]) {
+#define ADD_CELL_FROM_STRING(ydbType, cppType) \
+ case NKikimr::NScheme::NTypeIds::ydbType: { \
+ cppType val = FromString<cppType>(tuple[i]); \
+ cells.push_back(NKikimr::TCell((const char*)&val, sizeof(val))); \
+ break; \
+ }
+
+ ADD_CELL_FROM_STRING(Bool, bool);
+
+ ADD_CELL_FROM_STRING(Uint8, ui8);
+ ADD_CELL_FROM_STRING(Int8, i8);
+ ADD_CELL_FROM_STRING(Uint16, ui16);
+ ADD_CELL_FROM_STRING(Int16, i16);
+ ADD_CELL_FROM_STRING(Uint32, ui32);
+ ADD_CELL_FROM_STRING(Int32, i32);
+ ADD_CELL_FROM_STRING(Uint64, ui64);
+ ADD_CELL_FROM_STRING(Int64, i64);
+
+ ADD_CELL_FROM_STRING(Double, double);
+ ADD_CELL_FROM_STRING(Float, float);
+
+ case NKikimr::NScheme::NTypeIds::String:
+ case NKikimr::NScheme::NTypeIds::Utf8: {
+ cells.push_back(NKikimr::TCell(tuple[i].data(), tuple[i].size()));
+ break;
+ }
+#undef ADD_CELL_FROM_STRING
+ default:
+ UNIT_ASSERT_C(false, "Unexpected type");
+ }
+ }
+ }
+
+ return NKikimr::TSerializedCellVec(NKikimr::TSerializedCellVec::Serialize(cells));
+ }
+
+ NKikimrTableStats::THistogram FillHistogram(const TVector<TVector<TString>>& keys) const {
+ NKikimrTableStats::THistogram histogram;
+ for (const auto& k : keys) {
+ TSerializedCellVec sk(MakeCells(k));
+ histogram.AddBuckets()->SetKey(sk.GetBuffer());
+ }
+ return histogram;
+ }
+
+ TString PrintKey(const TSerializedCellVec& key) const {
+ return PrintKey(key.GetCells());
+ }
+
+ TString PrintKey(const TConstArrayRef<TCell>& cells) const {
+ return DbgPrintTuple(TDbTupleRef(KeyColumnTypes.data(), cells.data(), cells.size()), TypeRegistry);
+ }
+ };
+
+ Y_UNIT_TEST(SplitKey) {
+ TSmallVec<NScheme::TTypeId> keyColumnTypes = {
+ NScheme::NTypeIds::Uint64,
+ NScheme::NTypeIds::Utf8,
+ NScheme::NTypeIds::Uint32
+ };
+
+ TSchemaHelpler schemaHelper(keyColumnTypes);
+
+ {
+ TString splitKey = schemaHelper.FindSplitKey({
+ { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
+ { "3", "bbbbbbbb", "42" },
+ { "5", "cccccccccccccccccccccccc", "42" }
+ });
+ UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 3, Utf8 : NULL, Uint32 : NULL)");
+ }
+
+ {
+ TString splitKey =
+ schemaHelper.FindSplitKey({
+ { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
+ { "1", "bbbbbbbb", "42" },
+ { "1", "cccccccccccccccccccccccc", "42" }
+ });
+ UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 1, Utf8 : bbbbbbbb, Uint32 : NULL)");
+ }
+
+ {
+ TString splitKey =
+ schemaHelper.FindSplitKey({
+ { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
+ { "1", "bb", "42" },
+ { "1", "cc", "42" },
+ { "2", "cd", "42" },
+ { "2", "d", "42" },
+ { "2", "e", "42" },
+ { "2", "f", "42" },
+ { "2", "g", "42" }
+ });
+ UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : NULL, Uint32 : NULL)");
+ }
+
+ {
+ TString splitKey =
+ schemaHelper.FindSplitKey({
+ { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
+ { "1", "bb", "42" },
+ { "1", "cc", "42" },
+ { "1", "cd", "42" },
+ { "1", "d", "42" },
+ { "2", "e", "42" },
+ { "2", "f", "42" },
+ { "2", "g", "42" }
+ });
+ //TODO: FIX this case
+ UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : NULL, Uint32 : NULL)");
+ }
+
+ {
+ TString splitKey =
+ schemaHelper.FindSplitKey({
+ { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
+ { "1", "bb", "42" },
+ { "1", "cc", "42" },
+ { "1", "cd", "42" },
+ { "1", "d", "42" },
+ { "3", "e", "42" },
+ { "3", "f", "42" },
+ { "3", "g", "42" }
+ });
+ //TODO: FIX this case
+ UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : NULL, Uint32 : NULL)");
+ }
+
+ {
+ TString splitKey =
+ schemaHelper.FindSplitKey({
+ { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
+ { "1", "bb", "42" },
+ { "1", "cc", "42" },
+ { "1", "cd", "42" },
+ { "2", "d", "42" },
+ { "3", "e", "42" },
+ { "3", "f", "42" },
+ { "3", "g", "42" }
+ });
+ //TODO: FIX this case
+ UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : NULL, Uint32 : NULL)");
+ }
+
+ {
+ TString splitKey =
+ schemaHelper.FindSplitKey({
+ { "1", "aaaaaaaaaaaaaaaaaaaaaa", "42" },
+ { "2", "a", "42" },
+ { "2", "b", "42" },
+ { "2", "c", "42" },
+ { "2", "d", "42" },
+ { "2", "e", "42" },
+ { "2", "f", "42" },
+ { "3", "cccccccccccccccccccccccc", "42" }
+ });
+ UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : d, Uint32 : NULL)");
+ }
+
+ {
+ TString splitKey =
+ schemaHelper.FindSplitKey({
+ { "2", "aaa", "1" },
+ { "2", "aaa", "2" },
+ { "2", "aaa", "3" },
+ { "2", "aaa", "4" },
+ { "2", "aaa", "5" },
+ { "2", "bbb", "1" },
+ { "2", "bbb", "2" },
+ { "3", "ccc", "42" }
+ });
+ UNIT_ASSERT_VALUES_EQUAL(splitKey, "(Uint64 : 2, Utf8 : bbb, Uint32 : NULL)");
+ }
+ }
Y_UNIT_TEST(ListNotCreatedDirCase) {
TTestBasicRuntime runtime;
diff --git a/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp b/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
index 62189c6bb76..6f999b268bb 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
+++ b/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
@@ -10,7 +10,7 @@
#include <ydb/library/yql/public/issue/yql_issue_message.h>
#include <ydb/core/util/pb.h>
-
+
#include <library/cpp/testing/unittest/registar.h>
#include <util/generic/maybe.h>
@@ -107,7 +107,7 @@ namespace NSchemeShardUT_Private {
}
void CheckExpected(const TVector<TEvSchemeShard::EStatus>& expected, TEvSchemeShard::EStatus result, const TString& reason)
- {
+ {
for (TEvSchemeShard::EStatus exp : expected) {
if (result == exp) {
return;
@@ -117,7 +117,7 @@ namespace NSchemeShardUT_Private {
UNIT_FAIL("Unexpected status: " << NKikimrScheme::EStatus_Name(result) << ": " << reason);
}
- void SkipModificationReply(TTestActorRuntime& runtime, ui32 num) {
+ void SkipModificationReply(TTestActorRuntime& runtime, ui32 num) {
TAutoPtr<IEventHandle> handle;
for (ui32 i = 0; i < num; ++i)
runtime.GrabEdgeEvent<TEvSchemeShard::TEvModifySchemeTransactionResult>(handle);
@@ -125,19 +125,19 @@ namespace NSchemeShardUT_Private {
void TestModificationResult(TTestActorRuntime& runtime, ui64 txId,
TEvSchemeShard::EStatus expectedResult) {
- TestModificationResults(runtime, txId, {expectedResult});
+ TestModificationResults(runtime, txId, {expectedResult});
}
- ui64 TestModificationResults(TTestActorRuntime& runtime, ui64 txId,
+ ui64 TestModificationResults(TTestActorRuntime& runtime, ui64 txId,
const TVector<TEvSchemeShard::EStatus>& expectedResults) {
TAutoPtr<IEventHandle> handle;
TEvSchemeShard::TEvModifySchemeTransactionResult* event;
- do {
+ do {
Cerr << "TestModificationResults wait txId: " << txId << "\n";
event = runtime.GrabEdgeEvent<TEvSchemeShard::TEvModifySchemeTransactionResult>(handle);
- UNIT_ASSERT(event);
+ UNIT_ASSERT(event);
Cerr << "TestModificationResult got TxId: " << event->Record.GetTxId() << ", wait until txId: " << txId << "\n";
- } while(event->Record.GetTxId() < txId);
+ } while(event->Record.GetTxId() < txId);
UNIT_ASSERT_VALUES_EQUAL(event->Record.GetTxId(), txId);
CheckExpected(expectedResults, event->Record.GetStatus(), event->Record.GetReason());
@@ -199,13 +199,13 @@ namespace NSchemeShardUT_Private {
auto evLs = new TEvSchemeShard::TEvDescribeScheme(path);
evLs->Record.MutableOptions()->CopyFrom(opts);
ForwardToTablet(runtime, schemeShard, sender, evLs);
- TAutoPtr<IEventHandle> handle;
+ TAutoPtr<IEventHandle> handle;
auto event = runtime.GrabEdgeEvent<TEvSchemeShard::TEvDescribeSchemeResult>(handle);
- UNIT_ASSERT(event);
+ UNIT_ASSERT(event);
return event->GetRecord();
- }
-
+ }
+
NKikimrScheme::TEvDescribeSchemeResult DescribePathId(TTestActorRuntime& runtime, ui64 schemeShard, ui64 pathId, const NKikimrSchemeOp::TDescribeOptions& opts = { }) {
TActorId sender = runtime.AllocateEdgeActor();
auto evLs = new TEvSchemeShard::TEvDescribeScheme(schemeShard, pathId);
@@ -301,13 +301,13 @@ namespace NSchemeShardUT_Private {
transaction->SetWorkingDir(dstPath);
auto op = transaction->MutableCreateTable();
- op->SetName(dstName);
- op->SetCopyFromTable(srcFullName);
+ op->SetName(dstName);
+ op->SetCopyFromTable(srcFullName);
SetApplyIf(*transaction, applyIf);
return evTx;
- }
-
+ }
+
void AsyncCopyTable(TTestActorRuntime& runtime, ui64 schemeShardId, ui64 txId,
const TString& dstPath, const TString& dstName, const TString& srcFullName) {
TActorId sender = runtime.AllocateEdgeActor();
@@ -323,9 +323,9 @@ namespace NSchemeShardUT_Private {
const TString& dstPath, const TString& dstName, const TString& srcFullName,
TEvSchemeShard::EStatus expectedResult) {
AsyncCopyTable(runtime, schemeShardId, txId, dstPath, dstName, srcFullName);
- TestModificationResult(runtime, txId, expectedResult);
- }
-
+ TestModificationResult(runtime, txId, expectedResult);
+ }
+
void TestCopyTable(TTestActorRuntime& runtime, ui64 txId,
const TString& dstPath, const TString& dstName, const TString& srcFullName,
TEvSchemeShard::EStatus expectedResult) {
@@ -333,9 +333,9 @@ namespace NSchemeShardUT_Private {
}
TString TestDescribe(TTestActorRuntime& runtime, const TString& path) {
- return TestLs(runtime, path, true);
- }
-
+ return TestLs(runtime, path, true);
+ }
+
TEvSchemeShard::TEvModifySchemeTransaction* MoveTableRequest(ui64 txId, const TString& srcPath, const TString& dstPath, ui64 schemeShard, const TApplyIf& applyIf) {
THolder<TEvSchemeShard::TEvModifySchemeTransaction> evTx = MakeHolder<TEvSchemeShard::TEvModifySchemeTransaction>(txId, schemeShard);
auto transaction = evTx->Record.AddTransaction();
@@ -1093,29 +1093,29 @@ namespace NSchemeShardUT_Private {
NKikimrProto::EReplyStatus LocalMiniKQL(TTestActorRuntime& runtime, ui64 tabletId, const TString& query, NKikimrMiniKQL::TResult& result, TString& err) {
TActorId sender = runtime.AllocateEdgeActor();
-
- auto evTx = new TEvTablet::TEvLocalMKQL;
- auto *mkql = evTx->Record.MutableProgram();
- mkql->MutableProgram()->SetText(query);
-
+
+ auto evTx = new TEvTablet::TEvLocalMKQL;
+ auto *mkql = evTx->Record.MutableProgram();
+ mkql->MutableProgram()->SetText(query);
+
ForwardToTablet(runtime, tabletId, sender, evTx);
-
- TAutoPtr<IEventHandle> handle;
- auto event = runtime.GrabEdgeEvent<TEvTablet::TEvLocalMKQLResponse>(handle);
- UNIT_ASSERT(event);
-
- NYql::TIssues programErrors;
- NYql::TIssues paramsErrors;
- NYql::IssuesFromMessage(event->Record.GetCompileResults().GetProgramCompileErrors(), programErrors);
- NYql::IssuesFromMessage(event->Record.GetCompileResults().GetParamsCompileErrors(), paramsErrors);
- err = programErrors.ToString() + paramsErrors.ToString() + event->Record.GetMiniKQLErrors();
-
- result.CopyFrom(event->Record.GetExecutionEngineEvaluatedResponse());
-
+
+ TAutoPtr<IEventHandle> handle;
+ auto event = runtime.GrabEdgeEvent<TEvTablet::TEvLocalMKQLResponse>(handle);
+ UNIT_ASSERT(event);
+
+ NYql::TIssues programErrors;
+ NYql::TIssues paramsErrors;
+ NYql::IssuesFromMessage(event->Record.GetCompileResults().GetProgramCompileErrors(), programErrors);
+ NYql::IssuesFromMessage(event->Record.GetCompileResults().GetParamsCompileErrors(), paramsErrors);
+ err = programErrors.ToString() + paramsErrors.ToString() + event->Record.GetMiniKQLErrors();
+
+ result.CopyFrom(event->Record.GetExecutionEngineEvaluatedResponse());
+
// emulate enum behavior from proto3
return static_cast<NKikimrProto::EReplyStatus>(event->Record.GetStatus());
- }
-
+ }
+
NKikimrMiniKQL::TResult LocalMiniKQL(TTestActorRuntime& runtime, ui64 tabletId, const TString& query) {
NKikimrMiniKQL::TResult result;
TString error;
@@ -1148,43 +1148,43 @@ namespace NSchemeShardUT_Private {
NKikimrProto::EReplyStatus LocalSchemeTx(TTestActorRuntime& runtime, ui64 tabletId, const TString& schemeChangesStr, bool dryRun,
NTabletFlatScheme::TSchemeChanges& scheme, TString& err) {
TActorId sender = runtime.AllocateEdgeActor();
-
- auto evTx = new TEvTablet::TEvLocalSchemeTx;
- evTx->Record.SetDryRun(dryRun);
- auto schemeChanges = evTx->Record.MutableSchemeChanges();
+
+ auto evTx = new TEvTablet::TEvLocalSchemeTx;
+ evTx->Record.SetDryRun(dryRun);
+ auto schemeChanges = evTx->Record.MutableSchemeChanges();
bool parseResult = ::google::protobuf::TextFormat::ParseFromString(schemeChangesStr, schemeChanges);
UNIT_ASSERT_C(parseResult, "protobuf parsing failed");
-
+
ForwardToTablet(runtime, tabletId, sender, evTx);
-
- TAutoPtr<IEventHandle> handle;
- auto event = runtime.GrabEdgeEvent<TEvTablet::TEvLocalSchemeTxResponse>(handle);
- UNIT_ASSERT(event);
-
- err = event->Record.GetErrorReason();
- scheme.CopyFrom(event->Record.GetFullScheme());
-
+
+ TAutoPtr<IEventHandle> handle;
+ auto event = runtime.GrabEdgeEvent<TEvTablet::TEvLocalSchemeTxResponse>(handle);
+ UNIT_ASSERT(event);
+
+ err = event->Record.GetErrorReason();
+ scheme.CopyFrom(event->Record.GetFullScheme());
+
// emulate enum behavior from proto3
return static_cast<NKikimrProto::EReplyStatus>(event->Record.GetStatus());
- }
-
- ui64 GetDatashardState(TTestActorRuntime& runtime, ui64 tabletId) {
- NKikimrMiniKQL::TResult result;
- TString err;
+ }
+
+ ui64 GetDatashardState(TTestActorRuntime& runtime, ui64 tabletId) {
+ NKikimrMiniKQL::TResult result;
+ TString err;
NKikimrProto::EReplyStatus status = LocalMiniKQL(runtime, tabletId, R"(
- (
- (let row '('('Id (Uint64 '2)))) # Sys_State
- (let select '('Uint64))
- (let ret(AsList(SetResult 'State (SelectRow 'Sys row select))))
- (return ret)
- )
- )", result, err);
- // Cdbg << result << "\n";
+ (
+ (let row '('('Id (Uint64 '2)))) # Sys_State
+ (let select '('Uint64))
+ (let ret(AsList(SetResult 'State (SelectRow 'Sys row select))))
+ (return ret)
+ )
+ )", result, err);
+ // Cdbg << result << "\n";
UNIT_ASSERT_VALUES_EQUAL(status, NKikimrProto::EReplyStatus::OK);
- // Value { Struct { Optional { Optional { Struct { Optional { Uint64: 100 } } } } } } }
- return result.GetValue().GetStruct(0).GetOptional().GetOptional().GetStruct(0).GetOptional().GetUint64();
- }
-
+ // Value { Struct { Optional { Optional { Struct { Optional { Uint64: 100 } } } } } } }
+ return result.GetValue().GetStruct(0).GetOptional().GetOptional().GetStruct(0).GetOptional().GetUint64();
+ }
+
NLs::TCheckFunc ShardsIsReady(TTestActorRuntime& runtime) {
return [&] (const NKikimrScheme::TEvDescribeSchemeResult& record) {
TVector<ui64> datashards;
@@ -1195,14 +1195,14 @@ namespace NSchemeShardUT_Private {
};
}
- TString SetAllowLogBatching(TTestActorRuntime& runtime, ui64 tabletId, bool v) {
- NTabletFlatScheme::TSchemeChanges scheme;
- TString errStr;
- LocalSchemeTx(runtime, tabletId,
- Sprintf("Delta { DeltaType: UpdateExecutorInfo ExecutorAllowLogBatching: %s }", v ? "true" : "false"),
- false, scheme, errStr);
- return errStr;
- }
+ TString SetAllowLogBatching(TTestActorRuntime& runtime, ui64 tabletId, bool v) {
+ NTabletFlatScheme::TSchemeChanges scheme;
+ TString errStr;
+ LocalSchemeTx(runtime, tabletId,
+ Sprintf("Delta { DeltaType: UpdateExecutorInfo ExecutorAllowLogBatching: %s }", v ? "true" : "false"),
+ false, scheme, errStr);
+ return errStr;
+ }
ui64 GetDatashardSysTableValue(TTestActorRuntime& runtime, ui64 tabletId, ui64 sysKey) {
NKikimrMiniKQL::TResult result;
diff --git a/ydb/core/tx/schemeshard/ut_helpers/helpers.h b/ydb/core/tx/schemeshard/ut_helpers/helpers.h
index 94c9e435610..6b12c25534a 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/helpers.h
+++ b/ydb/core/tx/schemeshard/ut_helpers/helpers.h
@@ -2,7 +2,7 @@
#include "ls_checks.h"
#include "test_env.h"
-
+
#include <library/cpp/testing/unittest/registar.h>
#include <ydb/core/engine/mkql_engine_flat.h>
@@ -17,7 +17,7 @@
#include <ydb/library/yql/minikql/mkql_node_serialization.h>
#include <util/stream/null.h>
-
+
#include <functional>
#undef Cdbg
@@ -29,7 +29,7 @@
namespace NSchemeShardUT_Private {
using namespace NKikimr;
-
+
using TEvTx = TEvSchemeShard::TEvModifySchemeTransaction;
////////// tablet
@@ -79,7 +79,7 @@ namespace NSchemeShardUT_Private {
const TString& parentPath, const TString& scheme
#define UT_PARAMS_BY_PATH_ID \
ui64 pathId
-
+
#define DEFINE_HELPERS(name, params, ...) \
TEvTx* name##Request(ui64 schemeShardId, ui64 txId, params, __VA_ARGS__); \
TEvTx* name##Request(ui64 txId, params, __VA_ARGS__); \
@@ -182,7 +182,7 @@ namespace NSchemeShardUT_Private {
GENERIC_HELPERS(AlterKesus);
GENERIC_HELPERS(DropKesus);
DROP_BY_PATH_ID_HELPERS(DropKesus);
-
+
// filestore
GENERIC_HELPERS(CreateFileStore);
GENERIC_HELPERS(AlterFileStore);
@@ -330,13 +330,13 @@ namespace NSchemeShardUT_Private {
Ydb::StatusIds::StatusCode expectedStatus = Ydb::StatusIds::SUCCESS);
////////// datashard
- ui64 GetDatashardState(TTestActorRuntime& runtime, ui64 tabletId);
- TString SetAllowLogBatching(TTestActorRuntime& runtime, ui64 tabletId, bool v);
-
+ ui64 GetDatashardState(TTestActorRuntime& runtime, ui64 tabletId);
+ TString SetAllowLogBatching(TTestActorRuntime& runtime, ui64 tabletId, bool v);
+
ui64 GetDatashardSysTableValue(TTestActorRuntime& runtime, ui64 tabletId, ui64 sysKey);
ui64 GetTxReadSizeLimit(TTestActorRuntime& runtime, ui64 tabletId);
ui64 GetStatDisabled(TTestActorRuntime& runtime, ui64 tabletId);
-
+
ui64 GetExecutorCacheSize(TTestActorRuntime& runtime, ui64 tabletId);
bool GetFastLogPolicy(TTestActorRuntime& runtime, ui64 tabletId);
bool GetByKeyFilterEnabled(TTestActorRuntime& runtime, ui64 tabletId, ui32 table);
@@ -349,7 +349,7 @@ namespace NSchemeShardUT_Private {
void SetSchemeshardDatabaseQuotas(TTestActorRuntime& runtime, Ydb::Cms::DatabaseQuotas databaseQuotas, ui64 dimainId, ui64 schemeShard);
NKikimrSchemeOp::TTableDescription GetDatashardSchema(TTestActorRuntime& runtime, ui64 tabletId, ui64 tid);
-
+
NLs::TCheckFunc ShardsIsReady(TTestActorRuntime& runtime);
template <typename TCreateFunc>
diff --git a/ydb/core/tx/schemeshard/ut_olap.cpp b/ydb/core/tx/schemeshard/ut_olap.cpp
index ca32623b8f6..b3aea0b90f9 100644
--- a/ydb/core/tx/schemeshard/ut_olap.cpp
+++ b/ydb/core/tx/schemeshard/ut_olap.cpp
@@ -73,25 +73,25 @@ Y_UNIT_TEST_SUITE(TOlap) {
TestLs(runtime, "/MyRoot/DirA/DirB/OlapStore", false, NLs::PathExist);
}
- Y_UNIT_TEST(CreateTable) {
- TTestBasicRuntime runtime;
+ Y_UNIT_TEST(CreateTable) {
+ TTestBasicRuntime runtime;
TTestEnv env(runtime, TTestEnvOptions().EnableOlapSchemaOperations(true));
- ui64 txId = 100;
-
- TString olapSchema = R"(
- Name: "OlapStore"
- ColumnShardCount: 1
- SchemaPresets {
- Name: "default"
- Schema {
- Columns { Name: "timestamp" Type: "Timestamp" }
- Columns { Name: "data" Type: "Utf8" }
- KeyColumnNames: "timestamp"
+ ui64 txId = 100;
+
+ TString olapSchema = R"(
+ Name: "OlapStore"
+ ColumnShardCount: 1
+ SchemaPresets {
+ Name: "default"
+ Schema {
+ Columns { Name: "timestamp" Type: "Timestamp" }
+ Columns { Name: "data" Type: "Utf8" }
+ KeyColumnNames: "timestamp"
Engine: COLUMN_ENGINE_REPLACING_TIMESERIES
- }
- }
- )";
-
+ }
+ }
+ )";
+
TestCreateOlapStore(runtime, ++txId, "/MyRoot", olapSchema);
env.TestWaitNotification(runtime, txId);
diff --git a/ydb/core/tx/schemeshard/ut_olap_reboots.cpp b/ydb/core/tx/schemeshard/ut_olap_reboots.cpp
index faed0831099..c7369d4a4e4 100644
--- a/ydb/core/tx/schemeshard/ut_olap_reboots.cpp
+++ b/ydb/core/tx/schemeshard/ut_olap_reboots.cpp
@@ -384,7 +384,7 @@ Y_UNIT_TEST_SUITE(TOlapReboots) {
TestLs(runtime, "/MyRoot/OlapStore/OlapTable", false, NLs::All(
NLs::HasOlapTableTtlSettingsVersion(2),
- NLs::HasOlapTableTtlSettingsEnabled("timestamp", TDuration::Seconds(300))));
+ NLs::HasOlapTableTtlSettingsEnabled("timestamp", TDuration::Seconds(300))));
}
});
}
diff --git a/ydb/core/tx/schemeshard/ut_reboots.cpp b/ydb/core/tx/schemeshard/ut_reboots.cpp
index b467ff6817c..dce2c127122 100644
--- a/ydb/core/tx/schemeshard/ut_reboots.cpp
+++ b/ydb/core/tx/schemeshard/ut_reboots.cpp
@@ -252,10 +252,10 @@ Y_UNIT_TEST_SUITE(TConsistentOpsWithReboots) {
}
});
}
-
+
Y_UNIT_TEST(DropWithData) {
- TTestWithReboots t;
- t.Run([&](TTestActorRuntime& runtime, bool& activeZone) {
+ TTestWithReboots t;
+ t.Run([&](TTestActorRuntime& runtime, bool& activeZone) {
{
TInactiveZone inactive(activeZone);
TestMkDir(runtime, ++t.TxId, "/MyRoot", "DirB");
diff --git a/ydb/core/tx/schemeshard/ut_split_merge.cpp b/ydb/core/tx/schemeshard/ut_split_merge.cpp
index bec6b131418..7d91e7ae317 100644
--- a/ydb/core/tx/schemeshard/ut_split_merge.cpp
+++ b/ydb/core/tx/schemeshard/ut_split_merge.cpp
@@ -1,9 +1,9 @@
#include <ydb/core/tx/schemeshard/ut_helpers/helpers.h>
#include <ydb/library/yql/minikql/mkql_node.h>
-
+
using namespace NKikimr;
-using namespace NKikimr::NMiniKQL;
+using namespace NKikimr::NMiniKQL;
using namespace NSchemeShard;
using namespace NSchemeShardUT_Private;
@@ -252,9 +252,9 @@ Y_UNIT_TEST_SUITE(TSchemeShardSplitTest) {
PartitioningPolicy {
MinPartitionsCount: 1
SizeToSplit: 100500
- }
- }
- )");
+ }
+ }
+ )");
t.TestEnv->TestWaitNotification(runtime, t.TxId);
}
@@ -269,6 +269,6 @@ Y_UNIT_TEST_SUITE(TSchemeShardSplitTest) {
{NLs::PartitionKeys({""})});
}, true);
- }
-
-}
+ }
+
+}
diff --git a/ydb/core/tx/schemeshard/ya.make b/ydb/core/tx/schemeshard/ya.make
index c3b888f76d9..ceb87ce3883 100644
--- a/ydb/core/tx/schemeshard/ya.make
+++ b/ydb/core/tx/schemeshard/ya.make
@@ -73,7 +73,7 @@ SRCS(
schemeshard__serverless_storage_billing.cpp
schemeshard__sync_update_tenants.cpp
schemeshard__login.cpp
- schemeshard__monitoring.cpp
+ schemeshard__monitoring.cpp
schemeshard__notify.cpp
schemeshard__operation.cpp
schemeshard__operation.h
@@ -155,7 +155,7 @@ SRCS(
schemeshard__operation_drop_cdc_stream.cpp
schemeshard__publish_to_scheme_board.cpp
schemeshard__state_changed_reply.cpp
- schemeshard__table_stats.cpp
+ schemeshard__table_stats.cpp
schemeshard__table_stats_histogram.cpp
schemeshard__upgrade_schema.cpp
schemeshard__upgrade_access_database.cpp
diff --git a/ydb/core/tx/tx_proxy/datareq.cpp b/ydb/core/tx/tx_proxy/datareq.cpp
index fc819273b99..af48b42d60f 100644
--- a/ydb/core/tx/tx_proxy/datareq.cpp
+++ b/ydb/core/tx/tx_proxy/datareq.cpp
@@ -51,7 +51,7 @@ struct TFlatMKQLRequest : public TThrRefBase {
ui64 LockTxId;
bool NeedDiagnostics;
bool LlvmRuntime;
- bool CollectStats;
+ bool CollectStats;
bool ReadOnlyProgram;
TMaybe<ui64> PerShardKeysSizeLimitBytes;
NKikimrTxUserProxy::TMiniKQLTransaction::TLimits Limits;
@@ -68,7 +68,7 @@ struct TFlatMKQLRequest : public TThrRefBase {
: LockTxId(0)
, NeedDiagnostics(false)
, LlvmRuntime(false)
- , CollectStats(false)
+ , CollectStats(false)
, ReadOnlyProgram(false)
, EngineResultStatusCode(NMiniKQL::IEngineFlat::EResult::Unknown)
, EngineResponseStatus(NMiniKQL::IEngineFlat::EStatus::Unknown)
@@ -150,8 +150,8 @@ struct TReadTableRequest : public TThrRefBase {
TKeySpace KeySpace;
THashMap<ui64, TActorId> ClearanceSenders;
THashMap<ui64, TActorId> StreamingShards;
- TSerializedCellVec FromValues;
- TSerializedCellVec ToValues;
+ TSerializedCellVec FromValues;
+ TSerializedCellVec ToValues;
THolder<TKeyDesc> KeyDesc;
bool RowsLimited;
ui64 RowsRemain;
@@ -269,7 +269,7 @@ public:
bool Restarting = false;
size_t RestartCount = 0;
TTableId TableId;
- THolder<NKikimrQueryStats::TTxStats> Stats;
+ THolder<NKikimrQueryStats::TTxStats> Stats;
TReattachState ReattachState;
};
private:
@@ -327,10 +327,10 @@ private:
ui64 PlanStep;
ECoordinatorStatus CoordinatorStatus = ECoordinatorStatus::Unknown;
- ui64 ResultsReceivedCount;
- ui64 ResultsReceivedSize;
-
- TRequestControls RequestControls;
+ ui64 ResultsReceivedCount;
+ ui64 ResultsReceivedSize;
+
+ TRequestControls RequestControls;
TInstant WallClockAccepted;
TInstant WallClockResolveStarted;
@@ -378,10 +378,10 @@ private:
TActor::Die(ctx);
}
- static TInstant Now() {
- return AppData()->TimeProvider->Now();
- }
-
+ static TInstant Now() {
+ return AppData()->TimeProvider->Now();
+ }
+
void ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus status, NKikimrIssues::TStatusIds::EStatusCode code, bool reportIssues, const TActorContext &ctx);
void MarkShardError(ui64 tabletId, TDataReq::TPerTablet &perTablet, bool invalidateDistCache, const TActorContext &ctx);
void TryToInvalidateTable(TTableId tableId, const TActorContext &ctx);
@@ -440,7 +440,7 @@ private:
bool ParseRangeKey(const NKikimrMiniKQL::TParams &proto,
TConstArrayRef<NScheme::TTypeId> keyType,
- TSerializedCellVec &buf,
+ TSerializedCellVec &buf,
EParseRangeKeyExp exp);
bool CheckDomainLocality(NSchemeCache::TSchemeCacheRequest &cacheRequest);
@@ -452,8 +452,8 @@ public:
return NKikimrServices::TActivity::TX_REQ_PROXY;
}
- TDataReq(const TTxProxyServices &services, ui64 txid, const TIntrusivePtr<TTxProxyMon> mon,
- const TRequestControls& requestControls)
+ TDataReq(const TTxProxyServices &services, ui64 txid, const TIntrusivePtr<TTxProxyMon> mon,
+ const TRequestControls& requestControls)
: TActor(&TThis::StateWaitInit)
, Services(services)
, TxId(txid)
@@ -466,9 +466,9 @@ public:
, AggrMinStep(0)
, AggrMaxStep(Max<ui64>())
, PlanStep(0)
- , ResultsReceivedCount(0)
- , ResultsReceivedSize(0)
- , RequestControls(requestControls)
+ , ResultsReceivedCount(0)
+ , ResultsReceivedSize(0)
+ , RequestControls(requestControls)
, WallClockAccepted(TInstant::MicroSeconds(0))
, WallClockResolveStarted(TInstant::MicroSeconds(0))
, WallClockResolved(TInstant::MicroSeconds(0))
@@ -731,7 +731,7 @@ void TDataReq::ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus
x->Record.SetExecutionEngineResponseStatus((ui32)FlatMKQLRequest->EngineResponseStatus);
if (!FlatMKQLRequest->EngineResponse.empty())
x->Record.SetExecutionEngineResponse(FlatMKQLRequest->EngineResponse);
- x->Record.MutableExecutionEngineEvaluatedResponse()->Swap(&FlatMKQLRequest->EngineEvaluatedResponse);
+ x->Record.MutableExecutionEngineEvaluatedResponse()->Swap(&FlatMKQLRequest->EngineEvaluatedResponse);
if (FlatMKQLRequest->Engine) {
auto errors = FlatMKQLRequest->Engine->GetErrors();
if (!errors.empty()) {
@@ -773,30 +773,30 @@ void TDataReq::ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus
timings->SetElapsedPrepareExec(ElapsedExecExec.MicroSeconds());
if (ElapsedPrepareComplete.GetValue())
timings->SetElapsedPrepareComplete(ElapsedExecComplete.MicroSeconds());
- timings->SetWallClockNow(Now().MicroSeconds());
+ timings->SetWallClockNow(Now().MicroSeconds());
}
- TInstant wallClockEnd = Now();
+ TInstant wallClockEnd = Now();
TDuration prepareTime = WallClockPrepared.GetValue() ? WallClockPrepared - WallClockAccepted : TDuration::Zero();
TDuration executeTime = WallClockPrepared.GetValue() ? wallClockEnd - WallClockPrepared : wallClockEnd - WallClockAccepted;
- TDuration totalTime = wallClockEnd - WallClockAccepted;
-
- (*TxProxyMon->ResultsReceivedCount) += ResultsReceivedCount;
- (*TxProxyMon->ResultsReceivedSize) += ResultsReceivedSize;
-
- auto fnGetTableIdByShard = [&](ui64 shard) -> TString {
- const auto* e = PerTablet.FindPtr(shard);
- if (!e)
- return "";
+ TDuration totalTime = wallClockEnd - WallClockAccepted;
+
+ (*TxProxyMon->ResultsReceivedCount) += ResultsReceivedCount;
+ (*TxProxyMon->ResultsReceivedSize) += ResultsReceivedSize;
+
+ auto fnGetTableIdByShard = [&](ui64 shard) -> TString {
+ const auto* e = PerTablet.FindPtr(shard);
+ if (!e)
+ return "";
return Sprintf("%" PRIu64 "/%" PRIu64, e->TableId.PathId.OwnerId, e->TableId.PathId.LocalPathId);
- };
-
- if (FlatMKQLRequest && FlatMKQLRequest->CollectStats) {
- auto* stats = x->Record.MutableTxStats();
- BuildTxStats(*stats);
- stats->SetDurationUs(totalTime.MicroSeconds());
- }
-
+ };
+
+ if (FlatMKQLRequest && FlatMKQLRequest->CollectStats) {
+ auto* stats = x->Record.MutableTxStats();
+ BuildTxStats(*stats);
+ stats->SetDurationUs(totalTime.MicroSeconds());
+ }
+
switch (status) {
case TEvTxUserProxy::TResultStatus::ProxyAccepted:
case TEvTxUserProxy::TResultStatus::ProxyResolved:
@@ -832,11 +832,11 @@ void TDataReq::ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus
break;
case TEvTxUserProxy::TResultStatus::ProxyShardTryLater:
case TEvTxUserProxy::TResultStatus::ProxyShardOverloaded:
- LOG_LOG_S_SAMPLED_BY(ctx, NActors::NLog::PRI_NOTICE, NKikimrServices::TX_PROXY, TxId,
+ LOG_LOG_S_SAMPLED_BY(ctx, NActors::NLog::PRI_NOTICE, NKikimrServices::TX_PROXY, TxId,
"Actor# " << ctx.SelfID.ToString() << " txid# " << TxId
<< " RESPONSE Status# " << TEvTxUserProxy::TResultStatus::Str(status)
<< " shard: " << ComplainingDatashards.front()
- << " table: " << fnGetTableIdByShard(ComplainingDatashards.front())
+ << " table: " << fnGetTableIdByShard(ComplainingDatashards.front())
<< " marker# P13a");
TxProxyMon->ReportStatusNotOK->Inc();
break;
@@ -868,67 +868,67 @@ void TDataReq::ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus
ctx.Send(RequestSource, x); // todo: error tracking
}
-void Aggregate(NKikimrQueryStats::TReadOpStats& aggr, const NKikimrQueryStats::TReadOpStats& stats) {
- aggr.SetCount(aggr.GetCount() + stats.GetCount());
- aggr.SetRows(aggr.GetRows() + stats.GetRows());
- aggr.SetBytes(aggr.GetBytes() + stats.GetBytes());
-}
-
-void Aggregate(NKikimrQueryStats::TWriteOpStats& aggr, const NKikimrQueryStats::TWriteOpStats& stats) {
- aggr.SetCount(aggr.GetCount() + stats.GetCount());
- aggr.SetRows(aggr.GetRows() + stats.GetRows());
- aggr.SetBytes(aggr.GetBytes() + stats.GetBytes());
-}
-
-void Aggregate(NKikimrQueryStats::TTableAccessStats& aggr, const NKikimrQueryStats::TTableAccessStats& stats) {
- if (stats.GetSelectRow().GetCount()) {
- Aggregate(*aggr.MutableSelectRow(), stats.GetSelectRow());
- }
- if (stats.GetSelectRange().GetCount()) {
- Aggregate(*aggr.MutableSelectRange(), stats.GetSelectRange());
- }
- if (stats.GetUpdateRow().GetCount()) {
- Aggregate(*aggr.MutableUpdateRow(), stats.GetUpdateRow());
- }
- if (stats.GetEraseRow().GetCount()) {
- Aggregate(*aggr.MutableEraseRow(), stats.GetEraseRow());
- }
-}
-
-void TDataReq::BuildTxStats(NKikimrQueryStats::TTxStats& stats) {
+void Aggregate(NKikimrQueryStats::TReadOpStats& aggr, const NKikimrQueryStats::TReadOpStats& stats) {
+ aggr.SetCount(aggr.GetCount() + stats.GetCount());
+ aggr.SetRows(aggr.GetRows() + stats.GetRows());
+ aggr.SetBytes(aggr.GetBytes() + stats.GetBytes());
+}
+
+void Aggregate(NKikimrQueryStats::TWriteOpStats& aggr, const NKikimrQueryStats::TWriteOpStats& stats) {
+ aggr.SetCount(aggr.GetCount() + stats.GetCount());
+ aggr.SetRows(aggr.GetRows() + stats.GetRows());
+ aggr.SetBytes(aggr.GetBytes() + stats.GetBytes());
+}
+
+void Aggregate(NKikimrQueryStats::TTableAccessStats& aggr, const NKikimrQueryStats::TTableAccessStats& stats) {
+ if (stats.GetSelectRow().GetCount()) {
+ Aggregate(*aggr.MutableSelectRow(), stats.GetSelectRow());
+ }
+ if (stats.GetSelectRange().GetCount()) {
+ Aggregate(*aggr.MutableSelectRange(), stats.GetSelectRange());
+ }
+ if (stats.GetUpdateRow().GetCount()) {
+ Aggregate(*aggr.MutableUpdateRow(), stats.GetUpdateRow());
+ }
+ if (stats.GetEraseRow().GetCount()) {
+ Aggregate(*aggr.MutableEraseRow(), stats.GetEraseRow());
+ }
+}
+
+void TDataReq::BuildTxStats(NKikimrQueryStats::TTxStats& stats) {
TTablePathHashMap<NKikimrQueryStats::TTableAccessStats> byTable;
-
- for (const auto& shard : PerTablet) {
- if (!shard.second.Stats)
- continue;
-
- for (const auto& table : shard.second.Stats->GetTableAccessStats()) {
- TTableId tableId(table.GetTableInfo().GetSchemeshardId(), table.GetTableInfo().GetPathId());
- auto& tableStats = byTable[tableId];
- if (!tableStats.HasTableInfo()) {
+
+ for (const auto& shard : PerTablet) {
+ if (!shard.second.Stats)
+ continue;
+
+ for (const auto& table : shard.second.Stats->GetTableAccessStats()) {
+ TTableId tableId(table.GetTableInfo().GetSchemeshardId(), table.GetTableInfo().GetPathId());
+ auto& tableStats = byTable[tableId];
+ if (!tableStats.HasTableInfo()) {
tableStats.MutableTableInfo()->SetSchemeshardId(tableId.PathId.OwnerId);
tableStats.MutableTableInfo()->SetPathId(tableId.PathId.LocalPathId);
- tableStats.MutableTableInfo()->SetName(table.GetTableInfo().GetName());
- }
+ tableStats.MutableTableInfo()->SetName(table.GetTableInfo().GetName());
+ }
Aggregate(tableStats, table);
tableStats.SetShardCount(tableStats.GetShardCount() + 1);
- }
- if (shard.second.Stats->PerShardStatsSize() == 1) {
+ }
+ if (shard.second.Stats->PerShardStatsSize() == 1) {
auto shardStats = stats.AddPerShardStats();
shardStats->CopyFrom(shard.second.Stats->GetPerShardStats(0));
shardStats->SetOutgoingReadSetsCount(shard.second.OutgoingReadSetsSize);
shardStats->SetProgramSize(shard.second.ProgramSize);
shardStats->SetReplySize(shard.second.ReplySize);
- }
- }
-
- for (auto& tableStats : byTable) {
- stats.AddTableAccessStats()->Swap(&tableStats.second);
- }
+ }
+ }
+
+ for (auto& tableStats : byTable) {
+ stats.AddTableAccessStats()->Swap(&tableStats.second);
+ }
stats.SetComputeCpuTimeUsec(CpuTime.MicroSeconds());
-}
-
+}
+
void TDataReq::ProcessFlatMKQLResolve(NSchemeCache::TSchemeCacheRequest *cacheRequest, const TActorContext &ctx) {
NMiniKQL::IEngineFlat &engine = *FlatMKQLRequest->Engine;
@@ -939,8 +939,8 @@ void TDataReq::ProcessFlatMKQLResolve(NSchemeCache::TSchemeCacheRequest *cacheRe
keyDescriptions[index] = std::move(cacheRequest->ResultSet[index].KeyDescription);
}
- auto beforeBuild = Now();
- NMiniKQL::IEngineFlat::TShardLimits shardLimits(RequestControls.MaxShardCount, RequestControls.MaxReadSetCount);
+ auto beforeBuild = Now();
+ NMiniKQL::IEngineFlat::TShardLimits shardLimits(RequestControls.MaxShardCount, RequestControls.MaxReadSetCount);
if (FlatMKQLRequest->Limits.GetAffectedShardsLimit()) {
shardLimits.ShardCount = std::min(shardLimits.ShardCount, FlatMKQLRequest->Limits.GetAffectedShardsLimit());
}
@@ -948,7 +948,7 @@ void TDataReq::ProcessFlatMKQLResolve(NSchemeCache::TSchemeCacheRequest *cacheRe
shardLimits.RSCount = std::min(shardLimits.RSCount, FlatMKQLRequest->Limits.GetReadsetCountLimit());
}
FlatMKQLRequest->EngineResultStatusCode = engine.PrepareShardPrograms(shardLimits);
- auto afterBuild = Now();
+ auto afterBuild = Now();
if (FlatMKQLRequest->EngineResultStatusCode != NMiniKQL::IEngineFlat::EResult::Ok) {
IssueManager.RaiseIssue(MakeIssue(NKikimrIssues::TIssuesIds::ENGINE_ERROR));
@@ -985,7 +985,7 @@ void TDataReq::ProcessFlatMKQLResolve(NSchemeCache::TSchemeCacheRequest *cacheRe
dataTransaction.SetReadOnly(FlatMKQLRequest->ReadOnlyProgram);
dataTransaction.SetCancelAfterMs(shardCancelAfter.MilliSeconds());
dataTransaction.SetCancelDeadlineMs(shardCancelDeadline.MilliSeconds());
- dataTransaction.SetCollectStats(FlatMKQLRequest->CollectStats);
+ dataTransaction.SetCollectStats(FlatMKQLRequest->CollectStats);
if (FlatMKQLRequest->LockTxId)
dataTransaction.SetLockTxId(FlatMKQLRequest->LockTxId);
if (FlatMKQLRequest->NeedDiagnostics)
@@ -1062,7 +1062,7 @@ void TDataReq::ProcessFlatMKQLResolve(NSchemeCache::TSchemeCacheRequest *cacheRe
}
engine.AfterShardProgramsExtracted();
- TxProxyMon->TxPrepareSendShardProgramsHgram->Collect((Now() - afterBuild).MicroSeconds());
+ TxProxyMon->TxPrepareSendShardProgramsHgram->Collect((Now() - afterBuild).MicroSeconds());
Become(&TThis::StateWaitPrepare);
}
@@ -1143,29 +1143,29 @@ TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> TDataReq::PrepareFlatMKQLReque
*TxProxyMon->MiniKQLParamsSize += miniKQLParams.size();
*TxProxyMon->MiniKQLProgramSize += miniKQLProgram.size();
- auto beforeSetProgram = Now();
+ auto beforeSetProgram = Now();
FlatMKQLRequest->EngineResultStatusCode = FlatMKQLRequest->Engine->SetProgram(miniKQLProgram, miniKQLParams);
- TxProxyMon->TxPrepareSetProgramHgram->Collect((Now() - beforeSetProgram).MicroSeconds());
+ TxProxyMon->TxPrepareSetProgramHgram->Collect((Now() - beforeSetProgram).MicroSeconds());
if (FlatMKQLRequest->EngineResultStatusCode != NMiniKQL::IEngineFlat::EResult::Ok)
return nullptr;
- WallClockResolveStarted = Now();
+ WallClockResolveStarted = Now();
auto &keyDescriptions = FlatMKQLRequest->Engine->GetDbKeys();
// check keys and set use follower flag
CanUseFollower = true;
request->ResultSet.reserve(keyDescriptions.size());
- for (auto &keyd : keyDescriptions) {
+ for (auto &keyd : keyDescriptions) {
if (keyd->RowOperation != TKeyDesc::ERowOperation::Read || keyd->ReadTarget.GetMode() != TReadTarget::EMode::Follower) {
CanUseFollower = false;
LOG_DEBUG_S_SAMPLED_BY(ctx, NKikimrServices::TX_PROXY, TxId,
"Actor " << ctx.SelfID.ToString() << " txid " << TxId
<< " disallow followers cause of operation " << (ui32)keyd->RowOperation
<< " read target mode " << (ui32)keyd->ReadTarget.GetMode());
- }
+ }
request->ResultSet.emplace_back(std::move(keyd));
- }
+ }
return new TEvTxProxySchemeCache::TEvResolveKeySet(request);
}
@@ -1198,7 +1198,7 @@ void TDataReq::MarkShardError(ui64 shardId, TDataReq::TPerTablet &perTablet, boo
}
void TDataReq::Handle(TEvTxProxyReq::TEvMakeRequest::TPtr &ev, const TActorContext &ctx) {
- RequestControls.Reqister(ctx);
+ RequestControls.Reqister(ctx);
TEvTxProxyReq::TEvMakeRequest *msg = ev->Get();
const NKikimrTxUserProxy::TEvProposeTransaction &record = msg->Ev->Get()->Record;
@@ -1207,7 +1207,7 @@ void TDataReq::Handle(TEvTxProxyReq::TEvMakeRequest::TPtr &ev, const TActorConte
ProxyFlags = record.HasProxyFlags() ? record.GetProxyFlags() : 0;
ExecTimeoutPeriod = record.HasExecTimeoutPeriod()
? TDuration::MilliSeconds(record.GetExecTimeoutPeriod())
- : TDuration::MilliSeconds(RequestControls.DefaultTimeoutMs);
+ : TDuration::MilliSeconds(RequestControls.DefaultTimeoutMs);
if (ExecTimeoutPeriod.Minutes() > 60) {
LOG_WARN_S_SAMPLED_BY(ctx, NKikimrServices::TX_PROXY, TxId,
"Actor# " << ctx.SelfID.ToString() << " txid# " << TxId
@@ -1225,13 +1225,13 @@ void TDataReq::Handle(TEvTxProxyReq::TEvMakeRequest::TPtr &ev, const TActorConte
CancelAfter = {};
}
- WallClockAccepted = Now();
+ WallClockAccepted = Now();
ctx.Schedule(TDuration::MilliSeconds(KIKIMR_DATAREQ_WATCHDOG_PERIOD), new TEvPrivate::TEvProxyDataReqOngoingTransactionWatchdog());
// Schedule execution timeout
{
- TAutoPtr<IEventHandle> wakeupEv(new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
- ExecTimeoutCookieHolder.Reset(ISchedulerCookie::Make2Way());
+ TAutoPtr<IEventHandle> wakeupEv(new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
+ ExecTimeoutCookieHolder.Reset(ISchedulerCookie::Make2Way());
CreateLongTimer(ctx, ExecTimeoutPeriod, wakeupEv, AppData(ctx)->SystemPoolId, ExecTimeoutCookieHolder.Get());
}
@@ -1282,7 +1282,7 @@ void TDataReq::Handle(TEvTxProxyReq::TEvMakeRequest::TPtr &ev, const TActorConte
if (mkqlTxBody.GetFlatMKQL()) {
FlatMKQLRequest = new TFlatMKQLRequest;
FlatMKQLRequest->LlvmRuntime = mkqlTxBody.GetLlvmRuntime();
- FlatMKQLRequest->CollectStats = mkqlTxBody.GetCollectStats();
+ FlatMKQLRequest->CollectStats = mkqlTxBody.GetCollectStats();
if (mkqlTxBody.HasPerShardKeysSizeLimitBytes()) {
FlatMKQLRequest->PerShardKeysSizeLimitBytes = mkqlTxBody.GetPerShardKeysSizeLimitBytes();
}
@@ -1292,8 +1292,8 @@ void TDataReq::Handle(TEvTxProxyReq::TEvMakeRequest::TPtr &ev, const TActorConte
if (mkqlTxBody.HasSnapshotStep() && mkqlTxBody.HasSnapshotTxId())
FlatMKQLRequest->Snapshot = TRowVersion(mkqlTxBody.GetSnapshotStep(), mkqlTxBody.GetSnapshotTxId());
NMiniKQL::TEngineFlatSettings settings(NMiniKQL::IEngineFlat::EProtocol::V1, functionRegistry,
- *TAppData::RandomProvider, *TAppData::TimeProvider,
- nullptr, TxProxyMon->AllocPoolCounters);
+ *TAppData::RandomProvider, *TAppData::TimeProvider,
+ nullptr, TxProxyMon->AllocPoolCounters);
settings.EvaluateResultType = mkqlTxBody.GetEvaluateResultType();
settings.EvaluateResultValue = mkqlTxBody.GetEvaluateResultValue();
if (FlatMKQLRequest->LlvmRuntime) {
@@ -1496,9 +1496,9 @@ void TDataReq::Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr &ev,
return Die(ctx);
}
- TTableRange range(ReadTableRequest->FromValues.GetCells(),
+ TTableRange range(ReadTableRequest->FromValues.GetCells(),
fromInclusive,
- ReadTableRequest->ToValues.GetCells(),
+ ReadTableRequest->ToValues.GetCells(),
toInclusive);
if (range.IsEmptyRange({keyTypes.begin(), keyTypes.end()})) {
@@ -1531,8 +1531,8 @@ void TDataReq::Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, c
"Actor# " << ctx.SelfID.ToString() << " txid# " << TxId
<< " HANDLE EvResolveKeySetResult TDataReq marker# P3 ErrorCount# " << request->ErrorCount);
- TxProxyMon->CacheRequestLatency->Collect((Now() - WallClockAccepted).MilliSeconds());
- WallClockResolved = Now();
+ TxProxyMon->CacheRequestLatency->Collect((Now() - WallClockAccepted).MilliSeconds());
+ WallClockResolved = Now();
if (request->ErrorCount > 0) {
bool gotHardResolveError = false;
@@ -1686,7 +1686,7 @@ void TDataReq::HandlePrepare(TEvPipeCache::TEvDeliveryProblem::TPtr &ev, const T
if (notPrepared) {
TStringStream explanation;
- explanation << "could not deliver program to shard " << msg->TabletId << " with txid# " << TxId;
+ explanation << "could not deliver program to shard " << msg->TabletId << " with txid# " << TxId;
IssueManager.RaiseIssue(MakeIssue(NKikimrIssues::TIssuesIds::SHARD_NOT_AVAILABLE, explanation.Str()));
ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyShardNotAvailable, NKikimrIssues::TStatusIds::REJECTED, true, ctx);
} else if (wasRestarting) {
@@ -1700,7 +1700,7 @@ void TDataReq::HandlePrepare(TEvPipeCache::TEvDeliveryProblem::TPtr &ev, const T
ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyShardNotAvailable, NKikimrIssues::TStatusIds::REJECTED, true, ctx);
} else {
TStringStream explanation;
- explanation << "tx state unknown for shard " << msg->TabletId << " with txid# " << TxId;
+ explanation << "tx state unknown for shard " << msg->TabletId << " with txid# " << TxId;
IssueManager.RaiseIssue(MakeIssue(NKikimrIssues::TIssuesIds::TX_STATE_UNKNOWN, explanation.Str()));
auto status = IsReadOnlyRequest()
? TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyShardNotAvailable
@@ -1780,11 +1780,11 @@ void TDataReq::HandlePrepare(TEvDataShard::TEvProposeTransactionResult::TPtr &ev
<< " HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# " << perTablet->TabletStatus
<< " GetStatus# " << msg->GetStatus()
<< " shard id " << tabletId
- << " read size " << record.GetReadSize()
- << " out readset size " << record.OutgoingReadSetInfoSize()
+ << " read size " << record.GetReadSize()
+ << " out readset size " << record.OutgoingReadSetInfoSize()
<< " marker# P6");
- WallClockLastPrepareReply = Now();
+ WallClockLastPrepareReply = Now();
const TInstant reportedArriveTime = TInstant::MicroSeconds(record.GetPrepareArriveTime());
const TDuration completionDelta = WallClockLastPrepareReply - reportedArriveTime;
WallClockMinPrepareArrive = WallClockMinPrepareArrive.GetValue() ? Min(WallClockMinPrepareArrive, reportedArriveTime) : reportedArriveTime;
@@ -1806,17 +1806,17 @@ void TDataReq::HandlePrepare(TEvDataShard::TEvProposeTransactionResult::TPtr &ev
perTablet->TabletStatus = TPerTablet::ETabletStatus::StatusPrepared;
perTablet->MinStep = record.GetMinStep();
perTablet->MaxStep = record.GetMaxStep();
- perTablet->ReadSize = record.GetReadSize();
- perTablet->ReplySize = record.GetReplySize();
+ perTablet->ReadSize = record.GetReadSize();
+ perTablet->ReplySize = record.GetReplySize();
perTablet->OutgoingReadSetsSize = record.OutgoingReadSetInfoSize();
- for (size_t i = 0; i < record.OutgoingReadSetInfoSize(); ++i) {
- auto& rs = record.GetOutgoingReadSetInfo(i);
- ui64 targetTabletId = rs.GetShardId();
- ui64 size = rs.GetSize();
- TPerTablet* targetTablet = PerTablet.FindPtr(targetTabletId);
- Y_VERIFY(targetTablet);
- targetTablet->IncomingReadSetsSize += size;
- }
+ for (size_t i = 0; i < record.OutgoingReadSetInfoSize(); ++i) {
+ auto& rs = record.GetOutgoingReadSetInfo(i);
+ ui64 targetTabletId = rs.GetShardId();
+ ui64 size = rs.GetSize();
+ TPerTablet* targetTablet = PerTablet.FindPtr(targetTabletId);
+ Y_VERIFY(targetTablet);
+ targetTablet->IncomingReadSetsSize += size;
+ }
AggrMaxStep = Min(AggrMaxStep, perTablet->MaxStep);
AggrMinStep = Max(AggrMinStep, perTablet->MinStep);
@@ -1897,10 +1897,10 @@ void TDataReq::HandlePrepare(TEvDataShard::TEvProposeTransactionResult::TPtr &ev
TxProxyMon->TxResultError->Inc();
return HandlePrepareErrors(ev, ctx);
}
- case NKikimrTxDataShard::TEvProposeTransactionResult::ABORTED:
+ case NKikimrTxDataShard::TEvProposeTransactionResult::ABORTED:
ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecAborted, NKikimrIssues::TStatusIds::SUCCESS, true, ctx);
TxProxyMon->TxResultAborted->Inc();
- return Die(ctx);
+ return Die(ctx);
case NKikimrTxDataShard::TEvProposeTransactionResult::TRY_LATER:
ExtractDatashardErrors(record);
CancelProposal(tabletId);
@@ -2045,7 +2045,7 @@ void TDataReq::Handle(TEvTxProxy::TEvProposeTransactionStatus::TPtr &ev, const T
LOG_DEBUG_S_SAMPLED_BY(ctx, NKikimrServices::TX_PROXY, TxId,
"Actor# " << ctx.SelfID.ToString() << " txid# " << TxId
<< " HANDLE TEvProposeTransactionStatus TDataReq marker# P10 Status# " << msg->GetStatus());
- WallClockPlanned = Now();
+ WallClockPlanned = Now();
if (ProxyFlags & TEvTxUserProxy::TEvProposeTransaction::ProxyReportPlanned)
ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::CoordinatorPlanned, NKikimrIssues::TStatusIds::TRANSIENT, false, ctx);
break;
@@ -2498,25 +2498,25 @@ void TDataReq::HandleExecTimeout(const TActorContext &ctx) {
}
void TDataReq::MergeResult(TEvDataShard::TEvProposeTransactionResult::TPtr &ev, const TActorContext &ctx) {
- NKikimrTxDataShard::TEvProposeTransactionResult &record = ev->Get()->Record;
+ NKikimrTxDataShard::TEvProposeTransactionResult &record = ev->Get()->Record;
- ResultsReceivedCount++;
+ ResultsReceivedCount++;
ResultsReceivedSize += record.GetTxResult().size();
-
- WallClockLastExecReply = Now();
+
+ WallClockLastExecReply = Now();
if (WallClockFirstExecReply.GetValue() == 0)
WallClockFirstExecReply = WallClockLastExecReply;
- const ui64 tabletId = record.GetOrigin();
- TPerTablet *perTablet = PerTablet.FindPtr(tabletId);
-
- if (FlatMKQLRequest && FlatMKQLRequest->CollectStats) {
- perTablet->Stats.Reset(new NKikimrQueryStats::TTxStats);
- perTablet->Stats->Swap(record.MutableTxStats());
- LOG_DEBUG_S(ctx, NKikimrServices::TX_PROXY,
- "Got stats for txid: " << TxId << " datashard: " << tabletId << " " << *perTablet->Stats);
- }
-
+ const ui64 tabletId = record.GetOrigin();
+ TPerTablet *perTablet = PerTablet.FindPtr(tabletId);
+
+ if (FlatMKQLRequest && FlatMKQLRequest->CollectStats) {
+ perTablet->Stats.Reset(new NKikimrQueryStats::TTxStats);
+ perTablet->Stats->Swap(record.MutableTxStats());
+ LOG_DEBUG_S(ctx, NKikimrServices::TX_PROXY,
+ "Got stats for txid: " << TxId << " datashard: " << tabletId << " " << *perTablet->Stats);
+ }
+
if (StreamResponse) {
return FinishShardStream(ev, ctx);
}
@@ -2742,17 +2742,17 @@ ui64 TDataReq::SelectCoordinator(NSchemeCache::TSchemeCacheRequest &cacheRequest
}
void TDataReq::FailProposedRequest(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus status, TString errMsg, const TActorContext &ctx) {
- LOG_ERROR_S_SAMPLED_BY(ctx, NKikimrServices::TX_PROXY, TxId,
- "Actor# " << ctx.SelfID.ToString() << " txid# " << TxId << " FailProposedRequest: " << errMsg << " Status# " << status);
-
- DatashardErrors = errMsg;
- // Cancel the Tx on all shards (so we pass invalid tablet id)
+ LOG_ERROR_S_SAMPLED_BY(ctx, NKikimrServices::TX_PROXY, TxId,
+ "Actor# " << ctx.SelfID.ToString() << " txid# " << TxId << " FailProposedRequest: " << errMsg << " Status# " << status);
+
+ DatashardErrors = errMsg;
+ // Cancel the Tx on all shards (so we pass invalid tablet id)
CancelProposal(0);
ReportStatus(status, NKikimrIssues::TStatusIds::ERROR, true, ctx);
- Become(&TThis::StatePrepareErrors, ctx, TDuration::MilliSeconds(500), new TEvents::TEvWakeup);
- TxProxyMon->TxResultError->Inc();
-}
-
+ Become(&TThis::StatePrepareErrors, ctx, TDuration::MilliSeconds(500), new TEvents::TEvWakeup);
+ TxProxyMon->TxResultError->Inc();
+}
+
bool TDataReq::CheckDomainLocality(NSchemeCache::TSchemeCacheRequest &cacheRequest) {
NSchemeCache::TDomainInfo::TPtr domainInfo;
for (const auto& entry :cacheRequest.ResultSet) {
@@ -2776,54 +2776,54 @@ bool TDataReq::CheckDomainLocality(NSchemeCache::TSchemeCacheRequest &cacheReque
}
void TDataReq::RegisterPlan(const TActorContext &ctx) {
- WallClockPrepared = Now();
+ WallClockPrepared = Now();
TDomainsInfo *domainsInfo = AppData(ctx)->DomainsInfo.Get();
Y_VERIFY(domainsInfo);
- ui64 totalReadSize = 0;
+ ui64 totalReadSize = 0;
TSet<ui32> affectedDomains;
for (const auto &xp : PerTablet) {
const ui32 tabletDomain = domainsInfo->GetDomainUidByTabletId(xp.first);
Y_VERIFY(tabletDomain != Max<ui32>());
affectedDomains.insert(tabletDomain);
- totalReadSize += xp.second.ReadSize;
+ totalReadSize += xp.second.ReadSize;
}
- // Check reply size
- ui64 sizeLimit = RequestControls.PerRequestDataSizeLimit;
+ // Check reply size
+ ui64 sizeLimit = RequestControls.PerRequestDataSizeLimit;
if (FlatMKQLRequest && FlatMKQLRequest->Limits.GetTotalReadSizeLimitBytes()) {
sizeLimit = sizeLimit
? std::min(sizeLimit, FlatMKQLRequest->Limits.GetTotalReadSizeLimitBytes())
: FlatMKQLRequest->Limits.GetTotalReadSizeLimitBytes();
}
- if (totalReadSize > sizeLimit) {
- FailProposedRequest(
- TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecError,
- Sprintf("Transaction total read size %" PRIu64 " exceeded limit %" PRIu64, totalReadSize, sizeLimit),
- ctx);
- return;
- }
-
- // Check per tablet incoming read set size
- sizeLimit = RequestControls.PerShardIncomingReadSetSizeLimit;
- for (const auto &xp : PerTablet) {
- ui64 targetTabletId = xp.first;
- ui64 rsSize = xp.second.IncomingReadSetsSize;
- if (rsSize > sizeLimit) {
- ComplainingDatashards.push_back(targetTabletId);
- FailProposedRequest(
- TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecError,
- Sprintf("Transaction incoming read set size %" PRIu64 " for tablet %" PRIu64 " exceeded limit %" PRIu64,
- rsSize, targetTabletId, sizeLimit),
- ctx);
- return;
- }
- }
-
- if (ProxyFlags & TEvTxUserProxy::TEvProposeTransaction::ProxyReportPrepared)
+ if (totalReadSize > sizeLimit) {
+ FailProposedRequest(
+ TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecError,
+ Sprintf("Transaction total read size %" PRIu64 " exceeded limit %" PRIu64, totalReadSize, sizeLimit),
+ ctx);
+ return;
+ }
+
+ // Check per tablet incoming read set size
+ sizeLimit = RequestControls.PerShardIncomingReadSetSizeLimit;
+ for (const auto &xp : PerTablet) {
+ ui64 targetTabletId = xp.first;
+ ui64 rsSize = xp.second.IncomingReadSetsSize;
+ if (rsSize > sizeLimit) {
+ ComplainingDatashards.push_back(targetTabletId);
+ FailProposedRequest(
+ TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecError,
+ Sprintf("Transaction incoming read set size %" PRIu64 " for tablet %" PRIu64 " exceeded limit %" PRIu64,
+ rsSize, targetTabletId, sizeLimit),
+ ctx);
+ return;
+ }
+ }
+
+ if (ProxyFlags & TEvTxUserProxy::TEvProposeTransaction::ProxyReportPrepared)
ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ProxyPrepared, NKikimrIssues::TStatusIds::TRANSIENT, false, ctx);
-
+
Y_VERIFY(SelectedCoordinator, "shouldn't be run with null SelectedCoordinator");
TAutoPtr<TEvTxProxy::TEvProposeTransaction> req(new TEvTxProxy::TEvProposeTransaction(SelectedCoordinator, TxId, 0,
AggrMinStep, AggrMaxStep));
@@ -2859,7 +2859,7 @@ void TDataReq::Handle(TEvents::TEvUndelivered::TPtr &, const TActorContext &ctx)
}
void TDataReq::HandleWatchdog(const TActorContext &ctx) {
- const TDuration fromStart = Now() - this->WallClockAccepted;
+ const TDuration fromStart = Now() - this->WallClockAccepted;
LOG_LOG_S_SAMPLED_BY(ctx, NActors::NLog::PRI_INFO, NKikimrServices::TX_PROXY, TxId,
"Actor# " << ctx.SelfID.ToString() << " txid# " << TxId
<< " Transactions still running for " << fromStart);
@@ -2934,23 +2934,23 @@ void TDataReq::ProcessStreamClearance(bool cleared, const TActorContext &ctx)
bool TDataReq::ParseRangeKey(const NKikimrMiniKQL::TParams &proto,
TConstArrayRef<NScheme::TTypeId> keyType,
- TSerializedCellVec &buf,
+ TSerializedCellVec &buf,
EParseRangeKeyExp exp)
{
TVector<TCell> key;
- if (proto.HasValue()) {
- if (!proto.HasType()) {
- UnresolvedKeys.push_back("No type was specified in the range key tuple");
- return false;
- }
-
- auto& value = proto.GetValue();
- auto& type = proto.GetType();
- TString errStr;
- bool res = NMiniKQL::CellsFromTuple(&type, value, keyType, true, key, errStr);
- if (!res) {
- UnresolvedKeys.push_back("Failed to parse range key tuple: " + errStr);
- return false;
+ if (proto.HasValue()) {
+ if (!proto.HasType()) {
+ UnresolvedKeys.push_back("No type was specified in the range key tuple");
+ return false;
+ }
+
+ auto& value = proto.GetValue();
+ auto& type = proto.GetType();
+ TString errStr;
+ bool res = NMiniKQL::CellsFromTuple(&type, value, keyType, true, key, errStr);
+ if (!res) {
+ UnresolvedKeys.push_back("Failed to parse range key tuple: " + errStr);
+ return false;
}
}
@@ -2960,9 +2960,9 @@ bool TDataReq::ParseRangeKey(const NKikimrMiniKQL::TParams &proto,
break;
case EParseRangeKeyExp::NONE:
break;
- }
-
- buf.Parse(TSerializedCellVec::Serialize(key));
+ }
+
+ buf.Parse(TSerializedCellVec::Serialize(key));
return true;
}
@@ -2976,9 +2976,9 @@ bool TDataReq::IsReadOnlyRequest() const {
Y_FAIL("No request");
}
-IActor* CreateTxProxyDataReq(const TTxProxyServices &services, const ui64 txid, const TIntrusivePtr<NKikimr::NTxProxy::TTxProxyMon>& mon,
- const TRequestControls& requestControls) {
- return new NTxProxy::TDataReq(services, txid, mon, requestControls);
+IActor* CreateTxProxyDataReq(const TTxProxyServices &services, const ui64 txid, const TIntrusivePtr<NKikimr::NTxProxy::TTxProxyMon>& mon,
+ const TRequestControls& requestControls) {
+ return new NTxProxy::TDataReq(services, txid, mon, requestControls);
}
}}
diff --git a/ydb/core/tx/tx_proxy/describe.cpp b/ydb/core/tx/tx_proxy/describe.cpp
index b8838f65c14..deb38ca2b7d 100644
--- a/ydb/core/tx/tx_proxy/describe.cpp
+++ b/ydb/core/tx/tx_proxy/describe.cpp
@@ -56,15 +56,15 @@ class TDescribeReq : public TActor<TDescribeReq> {
void FillRootDescr(NKikimrSchemeOp::TDirEntry* descr, const TString& name, ui64 schemeRootId) {
descr->SetPathId(NSchemeShard::RootPathId);
- descr->SetName(name);
- descr->SetSchemeshardId(schemeRootId);
+ descr->SetName(name);
+ descr->SetSchemeshardId(schemeRootId);
descr->SetPathType(NKikimrSchemeOp::EPathType::EPathTypeDir);
- descr->SetCreateFinished(true);
+ descr->SetCreateFinished(true);
// TODO(xenoxeno): ?
//descr->SetCreateTxId(0);
//descr->SetCreateStep(0);
//descr->SetOwner(BUILTIN_ACL_ROOT);
- }
+ }
void FillSystemViewDescr(NKikimrSchemeOp::TDirEntry* descr, ui64 schemeShardId) {
descr->SetSchemeshardId(schemeShardId);
@@ -224,26 +224,26 @@ void TDescribeReq::Handle(TEvTxProxyReq::TEvNavigateScheme::TPtr &ev, const TAct
Source = msg->Ev->Sender;
SourceCookie = msg->Ev->Cookie;
- if (record.GetDescribePath().HasPath()) {
- TDomainsInfo *domainsInfo = AppData(ctx)->DomainsInfo.Get();
- Y_VERIFY(!domainsInfo->Domains.empty());
-
- if (record.GetDescribePath().GetPath() == "/") {
- // Special handling for enumerating roots
+ if (record.GetDescribePath().HasPath()) {
+ TDomainsInfo *domainsInfo = AppData(ctx)->DomainsInfo.Get();
+ Y_VERIFY(!domainsInfo->Domains.empty());
+
+ if (record.GetDescribePath().GetPath() == "/") {
+ // Special handling for enumerating roots
TAutoPtr<NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResultBuilder> result =
new NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResultBuilder("/", NSchemeShard::RootSchemeShardId, TPathId(NSchemeShard::RootSchemeShardId, NSchemeShard::RootPathId));
- auto descr = result->Record.MutablePathDescription();
+ auto descr = result->Record.MutablePathDescription();
FillRootDescr(descr->MutableSelf(), "/", NSchemeShard::RootSchemeShardId);
- for (const auto& domain : domainsInfo->Domains) {
- auto entry = result->Record.MutablePathDescription()->AddChildren();
- FillRootDescr(entry, domain.second->Name, domain.second->SchemeRoot);
- }
+ for (const auto& domain : domainsInfo->Domains) {
+ auto entry = result->Record.MutablePathDescription()->AddChildren();
+ FillRootDescr(entry, domain.second->Name, domain.second->SchemeRoot);
+ }
ctx.Send(Source, result.Release(), 0, SourceCookie);
- return Die(ctx);
- }
+ return Die(ctx);
+ }
}
-
+
if (!record.GetUserToken().empty()) {
UserToken = new NACLib::TUserToken(record.GetUserToken());
}
@@ -289,11 +289,11 @@ void TDescribeReq::Handle(TEvTxProxyReq::TEvNavigateScheme::TPtr &ev, const TAct
SchemeRequest = ev->Release();
Become(&TThis::StateWaitResolve);
}
-
+
void TDescribeReq::Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr &ev, const TActorContext &ctx) {
TEvTxProxySchemeCache::TEvNavigateKeySetResult *msg = ev->Get();
NSchemeCache::TSchemeCacheNavigate *navigate = msg->Request.Get();
-
+
TxProxyMon->CacheRequestLatency->Collect((ctx.Now() - WallClockStarted).MilliSeconds());
Y_VERIFY(navigate->ResultSet.size() == 1);
@@ -303,7 +303,7 @@ void TDescribeReq::Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr &
NKikimrServices::TX_PROXY,
"Actor# " << ctx.SelfID.ToString()
<< " HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# " << navigate->ErrorCount);
-
+
if (navigate->ErrorCount > 0) {
switch (entry.Status) {
case NSchemeCache::TSchemeCacheNavigate::EStatus::PathErrorUnknown:
@@ -334,7 +334,7 @@ void TDescribeReq::Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr &
TxProxyMon->ResolveKeySetFail->Inc();
break;
}
-
+
return Die(ctx);
}
@@ -346,10 +346,10 @@ void TDescribeReq::Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr &
<< " with access " << NACLib::AccessRightsToString(access)
<< " to path " << JoinPath(entry.Path));
ReportError(NKikimrScheme::StatusAccessDenied, "Access denied", ctx);
- return Die(ctx);
- }
- }
-
+ return Die(ctx);
+ }
+ }
+
const auto& describePath = SchemeRequest->Ev->Get()->Record.GetDescribePath();
if (entry.TableId.IsSystemView()) {
@@ -397,7 +397,7 @@ void TDescribeReq::Handle(NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult:
if (self.GetPathType() == NKikimrSchemeOp::EPathType::EPathTypeSubDomain ||
self.GetPathType() == NKikimrSchemeOp::EPathType::EPathTypeColumnStore ||
self.GetPathType() == NKikimrSchemeOp::EPathType::EPathTypeColumnTable)
- {
+ {
needSysFolder = true;
} else if (self.GetPathId() == NSchemeShard::RootPathId) {
for (const auto& [_, domain] : domainsInfo->Domains) {
diff --git a/ydb/core/tx/tx_proxy/mon.cpp b/ydb/core/tx/tx_proxy/mon.cpp
index 7a6811b666f..14f5d47064e 100644
--- a/ydb/core/tx/tx_proxy/mon.cpp
+++ b/ydb/core/tx/tx_proxy/mon.cpp
@@ -9,7 +9,7 @@ TTxProxyMon::TTxProxyMon(const TIntrusivePtr<NMonitoring::TDynamicCounters>& cou
: Counters(counters)
, TxGroup(GetServiceCounters(counters, "proxy")->GetSubgroup("subsystem", "tx"))
, DataReqGroup(GetServiceCounters(counters, "proxy")->GetSubgroup("subsystem", "datareq"))
- , AllocPoolCounters(counters, "tx_proxy")
+ , AllocPoolCounters(counters, "tx_proxy")
{
CacheRequestLatency = TxGroup->GetHistogram("CacheRequest/LatencyMs", NMonitoring::ExponentialHistogram(10, 4, 1));
@@ -124,9 +124,9 @@ TTxProxyMon::TTxProxyMon(const TIntrusivePtr<NMonitoring::TDynamicCounters>& cou
MergeResultMiniKQLExecError = DataReqGroup->GetCounter("MergeResult/MiniKQLExecError", true);
MergeResultMiniKQLExecComplete = DataReqGroup->GetCounter("MergeResult/MiniKQLExecComplete", true);
MergeResultMiniKQLUnknownStatus = DataReqGroup->GetCounter("MergeResult/MiniKQLUnknownStatus", true);
-
- ResultsReceivedCount = DataReqGroup->GetCounter("ResultsReceived/Count", true);
- ResultsReceivedSize = DataReqGroup->GetCounter("ResultsReceived/Size", true);
+
+ ResultsReceivedCount = DataReqGroup->GetCounter("ResultsReceived/Count", true);
+ ResultsReceivedSize = DataReqGroup->GetCounter("ResultsReceived/Size", true);
}
}}
diff --git a/ydb/core/tx/tx_proxy/mon.h b/ydb/core/tx/tx_proxy/mon.h
index e74fbefa352..d747ab4f998 100644
--- a/ydb/core/tx/tx_proxy/mon.h
+++ b/ydb/core/tx/tx_proxy/mon.h
@@ -123,11 +123,11 @@ namespace NTxProxy {
NMonitoring::TDynamicCounters::TCounterPtr MergeResultMiniKQLExecComplete;
NMonitoring::TDynamicCounters::TCounterPtr MergeResultMiniKQLUnknownStatus;
- NMonitoring::TDynamicCounters::TCounterPtr ResultsReceivedCount;
- NMonitoring::TDynamicCounters::TCounterPtr ResultsReceivedSize;
-
- TAlignedPagePoolCounters AllocPoolCounters;
-
+ NMonitoring::TDynamicCounters::TCounterPtr ResultsReceivedCount;
+ NMonitoring::TDynamicCounters::TCounterPtr ResultsReceivedSize;
+
+ TAlignedPagePoolCounters AllocPoolCounters;
+
TTxProxyMon(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters);
};
diff --git a/ydb/core/tx/tx_proxy/proxy.h b/ydb/core/tx/tx_proxy/proxy.h
index 3e355ac6732..86d4f1ac79e 100644
--- a/ydb/core/tx/tx_proxy/proxy.h
+++ b/ydb/core/tx/tx_proxy/proxy.h
@@ -263,47 +263,47 @@ namespace NTxProxy {
struct TSchemeCacheConfig;
- struct TRequestControls {
- private:
- bool Registered;
-
- public:
- TControlWrapper PerRequestDataSizeLimit;
- TControlWrapper PerShardIncomingReadSetSizeLimit;
- TControlWrapper DefaultTimeoutMs;
- TControlWrapper MaxShardCount;
- TControlWrapper MaxReadSetCount;
-
- TRequestControls()
- : Registered(false)
- , PerRequestDataSizeLimit(53687091200, 0, Max<i64>())
- , PerShardIncomingReadSetSizeLimit(209715200, 0, 5368709120)
- , DefaultTimeoutMs(600000, 0, 3600000)
- , MaxShardCount(10000, 0, 1000000)
+ struct TRequestControls {
+ private:
+ bool Registered;
+
+ public:
+ TControlWrapper PerRequestDataSizeLimit;
+ TControlWrapper PerShardIncomingReadSetSizeLimit;
+ TControlWrapper DefaultTimeoutMs;
+ TControlWrapper MaxShardCount;
+ TControlWrapper MaxReadSetCount;
+
+ TRequestControls()
+ : Registered(false)
+ , PerRequestDataSizeLimit(53687091200, 0, Max<i64>())
+ , PerShardIncomingReadSetSizeLimit(209715200, 0, 5368709120)
+ , DefaultTimeoutMs(600000, 0, 3600000)
+ , MaxShardCount(10000, 0, 1000000)
, MaxReadSetCount(1000000, 0, 100000000)
- {}
-
- void Reqister(const TActorContext &ctx) {
- if (Registered) {
- return;
- }
-
- AppData(ctx)->Icb->RegisterSharedControl(PerRequestDataSizeLimit,
- "TxLimitControls.PerRequestDataSizeLimit");
- AppData(ctx)->Icb->RegisterSharedControl(PerShardIncomingReadSetSizeLimit,
- "TxLimitControls.PerShardIncomingReadSetSizeLimit");
- AppData(ctx)->Icb->RegisterSharedControl(DefaultTimeoutMs,
- "TxLimitControls.DefaultTimeoutMs");
- AppData(ctx)->Icb->RegisterSharedControl(MaxShardCount,
- "TxLimitControls.MaxShardCount");
- AppData(ctx)->Icb->RegisterSharedControl(MaxReadSetCount,
- "TxLimitControls.MaxReadSetCount");
-
- Registered = true;
- }
- };
-
- IActor* CreateTxProxyDataReq(const TTxProxyServices &services, const ui64 txid, const TIntrusivePtr<TTxProxyMon>& txProxyMon, const TRequestControls& requestControls);
+ {}
+
+ void Reqister(const TActorContext &ctx) {
+ if (Registered) {
+ return;
+ }
+
+ AppData(ctx)->Icb->RegisterSharedControl(PerRequestDataSizeLimit,
+ "TxLimitControls.PerRequestDataSizeLimit");
+ AppData(ctx)->Icb->RegisterSharedControl(PerShardIncomingReadSetSizeLimit,
+ "TxLimitControls.PerShardIncomingReadSetSizeLimit");
+ AppData(ctx)->Icb->RegisterSharedControl(DefaultTimeoutMs,
+ "TxLimitControls.DefaultTimeoutMs");
+ AppData(ctx)->Icb->RegisterSharedControl(MaxShardCount,
+ "TxLimitControls.MaxShardCount");
+ AppData(ctx)->Icb->RegisterSharedControl(MaxReadSetCount,
+ "TxLimitControls.MaxReadSetCount");
+
+ Registered = true;
+ }
+ };
+
+ IActor* CreateTxProxyDataReq(const TTxProxyServices &services, const ui64 txid, const TIntrusivePtr<TTxProxyMon>& txProxyMon, const TRequestControls& requestControls);
IActor* CreateTxProxyFlatSchemeReq(const TTxProxyServices &services, const ui64 txid, TAutoPtr<TEvTxProxyReq::TEvSchemeRequest> request, const TIntrusivePtr<TTxProxyMon>& txProxyMon);
IActor* CreateTxProxyDescribeFlatSchemeReq(const TTxProxyServices &services, const TIntrusivePtr<TTxProxyMon>& txProxyMon);
IActor* CreateTxProxySnapshotReq(const TTxProxyServices &services, const ui64 txid, TEvTxUserProxy::TEvProposeTransaction::TPtr&& ev, const TIntrusivePtr<TTxProxyMon>& mon);
diff --git a/ydb/core/tx/tx_proxy/proxy_impl.cpp b/ydb/core/tx/tx_proxy/proxy_impl.cpp
index e23eae986cd..20dc8cce176 100644
--- a/ydb/core/tx/tx_proxy/proxy_impl.cpp
+++ b/ydb/core/tx/tx_proxy/proxy_impl.cpp
@@ -59,7 +59,7 @@ class TTxProxy : public TActorBootstrapped<TTxProxy> {
TIntrusivePtr<NMonitoring::TDynamicCounters> CacheCounters;
TIntrusivePtr<TTxProxyMon> TxProxyMon;
- TRequestControls RequestControls;
+ TRequestControls RequestControls;
void Die(const TActorContext &ctx) override {
ctx.Send(Services.SchemeCache, new TEvents::TEvPoisonPill());
@@ -237,8 +237,8 @@ class TTxProxy : public TActorBootstrapped<TTxProxy> {
" TxId# " << txid <<
" ProcessProposeTransaction");
- RequestControls.Reqister(ctx);
-
+ RequestControls.Reqister(ctx);
+
// process scheme transactions
const NKikimrTxUserProxy::TTransaction &tx = ev->Get()->Record.GetTransaction();
if (ev->Get()->HasSchemeProposal()) {
@@ -262,7 +262,7 @@ class TTxProxy : public TActorBootstrapped<TTxProxy> {
if (ev->Get()->HasMakeProposal()) {
// todo: in-fly and shutdown
Y_VERIFY_DEBUG(txid != 0);
- const TActorId reqId = ctx.ExecutorThread.RegisterActor(CreateTxProxyDataReq(Services, txid, TxProxyMon, RequestControls));
+ const TActorId reqId = ctx.ExecutorThread.RegisterActor(CreateTxProxyDataReq(Services, txid, TxProxyMon, RequestControls));
TxProxyMon->MakeRequest->Inc();
LOG_DEBUG_S(ctx, NKikimrServices::TX_PROXY,
"actor# " << SelfId() <<
diff --git a/ydb/core/tx/tx_proxy/schemereq.cpp b/ydb/core/tx/tx_proxy/schemereq.cpp
index 776fbd1b050..923fe381ae6 100644
--- a/ydb/core/tx/tx_proxy/schemereq.cpp
+++ b/ydb/core/tx/tx_proxy/schemereq.cpp
@@ -159,19 +159,19 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
case NKikimrSchemeOp::ESchemeOpBackup:
return *modifyScheme.MutableBackup()->MutableTableName();
-
+
case NKikimrSchemeOp::ESchemeOpCreateSubDomain:
case NKikimrSchemeOp::ESchemeOpAlterSubDomain:
case NKikimrSchemeOp::ESchemeOpCreateExtSubDomain:
case NKikimrSchemeOp::ESchemeOpAlterExtSubDomain:
return *modifyScheme.MutableSubDomain()->MutableName();
-
+
case NKikimrSchemeOp::ESchemeOpCreateRtmrVolume:
return *modifyScheme.MutableCreateRtmrVolume()->MutableName();
-
+
case NKikimrSchemeOp::ESchemeOpCreateBlockStoreVolume:
return *modifyScheme.MutableCreateBlockStoreVolume()->MutableName();
-
+
case NKikimrSchemeOp::ESchemeOpAlterBlockStoreVolume:
return *modifyScheme.MutableAlterBlockStoreVolume()->MutableName();
@@ -224,16 +224,16 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
case NKikimrSchemeOp::ESchemeOpAlterSolomonVolume:
return *modifyScheme.MutableAlterSolomonVolume()->MutableName();
-
+
case NKikimrSchemeOp::ESchemeOpDropLock:
Y_FAIL("no implementation for ESchemeOpDropLock");
-
+
case NKikimrSchemeOp::ESchemeOpFinalizeBuildIndexImplTable:
Y_FAIL("no implementation for ESchemeOpFinalizeBuildIndexImplTable");
-
+
case NKikimrSchemeOp::ESchemeOpInitiateBuildIndexImplTable:
Y_FAIL("no implementation for ESchemeOpInitiateBuildIndexImplTable");
-
+
case NKikimrSchemeOp::ESchemeOpDropIndex:
return *modifyScheme.MutableDropIndex()->MutableTableName();
@@ -308,8 +308,8 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
case NKikimrSchemeOp::ESchemeOpAlterReplication:
return *modifyScheme.MutableReplication()->MutableName();
}
- }
-
+ }
+
static bool IsCreateRequest(const NKikimrSchemeOp::TModifyScheme& modifyScheme) {
switch (modifyScheme.GetOperationType()) {
// Tenants are always created using cluster's root as working dir, skip it
diff --git a/ydb/core/tx/tx_proxy/snapshotreq.cpp b/ydb/core/tx/tx_proxy/snapshotreq.cpp
index 21c7bae75df..4b2d7fe2756 100644
--- a/ydb/core/tx/tx_proxy/snapshotreq.cpp
+++ b/ydb/core/tx/tx_proxy/snapshotreq.cpp
@@ -77,21 +77,21 @@ public:
, Request(ev->Release())
, TxProxyMon(mon)
, DefaultTimeoutMs(60000, 0, 360000)
- , SnapshotTxId(txid)
+ , SnapshotTxId(txid)
{ }
static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
return NKikimrServices::TActivity::TX_REQ_PROXY;
}
- STFUNC(StateWaitLongTxSnaphost) {
- TRACE_EVENT(NKikimrServices::TX_PROXY);
- switch (ev->GetTypeRewrite()) {
- HFuncTraced(NLongTxService::TEvLongTxService::TEvAcquireReadSnapshotResult, HandleLongTxSnaphot);
- CFunc(TEvents::TSystem::Wakeup, HandleLongTxSnaphotTimeout);
- }
- }
-
+ STFUNC(StateWaitLongTxSnaphost) {
+ TRACE_EVENT(NKikimrServices::TX_PROXY);
+ switch (ev->GetTypeRewrite()) {
+ HFuncTraced(NLongTxService::TEvLongTxService::TEvAcquireReadSnapshotResult, HandleLongTxSnaphot);
+ CFunc(TEvents::TSystem::Wakeup, HandleLongTxSnaphotTimeout);
+ }
+ }
+
STFUNC(StateWaitResolve) {
TRACE_EVENT(NKikimrServices::TX_PROXY);
switch (ev->GetTypeRewrite()) {
@@ -213,14 +213,14 @@ public:
return Die(ctx);
}
- void HandleLongTxSnaphotTimeout(const TActorContext& ctx) {
- LOG_ERROR_S_SAMPLED_BY(ctx, NKikimrServices::TX_PROXY, TxId,
- "Actor# " << ctx.SelfID.ToString() << " txid# " << TxId
- << " HANDLE LongTxSnaphotTimeout TCreateSnapshotReq");
- ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecTimeout, NKikimrIssues::TStatusIds::TIMEOUT, true, ctx);
- return Die(ctx);
- }
-
+ void HandleLongTxSnaphotTimeout(const TActorContext& ctx) {
+ LOG_ERROR_S_SAMPLED_BY(ctx, NKikimrServices::TX_PROXY, TxId,
+ "Actor# " << ctx.SelfID.ToString() << " txid# " << TxId
+ << " HANDLE LongTxSnaphotTimeout TCreateSnapshotReq");
+ ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecTimeout, NKikimrIssues::TStatusIds::TIMEOUT, true, ctx);
+ return Die(ctx);
+ }
+
void HandlePrepareErrorTimeout(const TActorContext& ctx) {
TxProxyMon->PrepareErrorTimeout->Inc();
return Die(ctx);
@@ -269,7 +269,7 @@ public:
TxProxyMon->TxPrepareResolveHgram->Collect((WallClockResolved - WallClockResolveStarted).MicroSeconds());
- bool hasOlapTable = false;
+ bool hasOlapTable = false;
for (const auto& entry : msg->Tables) {
// N.B. we create all keys as a read operation
ui32 access = 0;
@@ -291,12 +291,12 @@ public:
continue;
}
- if (entry.IsOlapTable) {
- // OLAP tables don't create snapshots explicitly
- hasOlapTable = true;
- continue;
- }
-
+ if (entry.IsOlapTable) {
+ // OLAP tables don't create snapshots explicitly
+ hasOlapTable = true;
+ continue;
+ }
+
if (entry.KeyDescription->TableId.IsSystemView() ||
TSysTables::IsSystemTable(entry.KeyDescription->TableId))
{
@@ -335,22 +335,22 @@ public:
}
if (PerShardStates.empty()) {
- if (!hasOlapTable) {
- // No real (OLTP or OLAP) tables in the request so we can use current time as a fake PlanStep
- PlanStep = ctx.Now().MilliSeconds();
+ if (!hasOlapTable) {
+ // No real (OLTP or OLAP) tables in the request so we can use current time as a fake PlanStep
+ PlanStep = ctx.Now().MilliSeconds();
+
+ // We don't have any shards to snapshot, report fake success
+ ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete, NKikimrIssues::TStatusIds::SUCCESS, true, ctx);
- // We don't have any shards to snapshot, report fake success
- ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete, NKikimrIssues::TStatusIds::SUCCESS, true, ctx);
+ return Die(ctx);
+ } else {
+ // The request includes only OLAP tables, need to get the snaphost from Long Tx service
+ auto longTxService = NLongTxService::MakeLongTxServiceID(SelfId().NodeId());
+ TString database = Request->Record.GetDatabaseName();
+ Send(longTxService, new NLongTxService::TEvLongTxService::TEvAcquireReadSnapshot(database));
- return Die(ctx);
- } else {
- // The request includes only OLAP tables, need to get the snaphost from Long Tx service
- auto longTxService = NLongTxService::MakeLongTxServiceID(SelfId().NodeId());
- TString database = Request->Record.GetDatabaseName();
- Send(longTxService, new NLongTxService::TEvLongTxService::TEvAcquireReadSnapshot(database));
-
- return Become(&TThis::StateWaitLongTxSnaphost);
- }
+ return Become(&TThis::StateWaitLongTxSnaphost);
+ }
}
if (!msg->CheckDomainLocality()) {
@@ -408,35 +408,35 @@ public:
Become(&TThis::StateWaitPrepare);
}
- void HandleLongTxSnaphot(NLongTxService::TEvLongTxService::TEvAcquireReadSnapshotResult::TPtr& ev, const TActorContext& ctx) {
- const auto& record = ev->Get()->Record;
- if (record.GetStatus() == Ydb::StatusIds::SUCCESS) {
- PlanStep = record.GetSnapshotStep();
- SnapshotTxId = record.GetSnapshotTxId();
- ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete, NKikimrIssues::TStatusIds::SUCCESS, true, ctx);
- } else {
- NYql::TIssues issues;
- NYql::IssuesFromMessage(record.GetIssues(), issues);
- IssueManager.RaiseIssues(issues);
- NKikimrIssues::TStatusIds::EStatusCode statusCode = NKikimrIssues::TStatusIds::ERROR;
- switch (record.GetStatus()) {
- case Ydb::StatusIds::SCHEME_ERROR:
- statusCode = NKikimrIssues::TStatusIds::SCHEME_ERROR;
- break;
- case Ydb::StatusIds::UNAVAILABLE:
- statusCode = NKikimrIssues::TStatusIds::NOTREADY;
- break;
- case Ydb::StatusIds::INTERNAL_ERROR:
- statusCode = NKikimrIssues::TStatusIds::INTERNAL_ERROR;
- break;
- default:
- break;
- }
- ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecError, statusCode, true, ctx);
- }
- return Die(ctx);
- }
-
+ void HandleLongTxSnaphot(NLongTxService::TEvLongTxService::TEvAcquireReadSnapshotResult::TPtr& ev, const TActorContext& ctx) {
+ const auto& record = ev->Get()->Record;
+ if (record.GetStatus() == Ydb::StatusIds::SUCCESS) {
+ PlanStep = record.GetSnapshotStep();
+ SnapshotTxId = record.GetSnapshotTxId();
+ ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecComplete, NKikimrIssues::TStatusIds::SUCCESS, true, ctx);
+ } else {
+ NYql::TIssues issues;
+ NYql::IssuesFromMessage(record.GetIssues(), issues);
+ IssueManager.RaiseIssues(issues);
+ NKikimrIssues::TStatusIds::EStatusCode statusCode = NKikimrIssues::TStatusIds::ERROR;
+ switch (record.GetStatus()) {
+ case Ydb::StatusIds::SCHEME_ERROR:
+ statusCode = NKikimrIssues::TStatusIds::SCHEME_ERROR;
+ break;
+ case Ydb::StatusIds::UNAVAILABLE:
+ statusCode = NKikimrIssues::TStatusIds::NOTREADY;
+ break;
+ case Ydb::StatusIds::INTERNAL_ERROR:
+ statusCode = NKikimrIssues::TStatusIds::INTERNAL_ERROR;
+ break;
+ default:
+ break;
+ }
+ ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::ExecError, statusCode, true, ctx);
+ }
+ return Die(ctx);
+ }
+
void HandlePrepare(TEvDataShard::TEvProposeTransactionResult::TPtr& ev, const TActorContext& ctx) {
const auto* msg = ev->Get();
const auto& record = msg->Record;
@@ -976,7 +976,7 @@ public:
void ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus status, NKikimrIssues::TStatusIds::EStatusCode code, bool reportIssues, const TActorContext& ctx) {
auto x = MakeHolder<TEvTxUserProxy::TEvProposeTransactionStatus>(status);
- x->Record.SetTxId(SnapshotTxId);
+ x->Record.SetTxId(SnapshotTxId);
if (reportIssues && IssueManager.GetIssues()) {
IssuesToMessage(IssueManager.GetIssues(), x->Record.MutableIssues());
@@ -1071,7 +1071,7 @@ private:
size_t ResultsReceivedCount = 0;
ui64 PlanStep = 0;
- ui64 SnapshotTxId = 0; // SnaphotTxId overrides TxId in case using AcquireReadSnapshot
+ ui64 SnapshotTxId = 0; // SnaphotTxId overrides TxId in case using AcquireReadSnapshot
ui64 AggrMinStep = 0;
ui64 AggrMaxStep = Max<ui64>();
@@ -1315,11 +1315,11 @@ public:
continue;
}
- if (entry.IsOlapTable) {
- // OLAP tables don't create snapshots explicitly
- continue;
- }
-
+ if (entry.IsOlapTable) {
+ // OLAP tables don't create snapshots explicitly
+ continue;
+ }
+
if (entry.KeyDescription->TableId.IsSystemView() ||
TSysTables::IsSystemTable(entry.KeyDescription->TableId))
{
diff --git a/ydb/core/tx/tx_proxy/upload_rows.cpp b/ydb/core/tx/tx_proxy/upload_rows.cpp
index d5766dc5381..3c4580a8a86 100644
--- a/ydb/core/tx/tx_proxy/upload_rows.cpp
+++ b/ydb/core/tx/tx_proxy/upload_rows.cpp
@@ -6,7 +6,7 @@
namespace NKikimr {
namespace NTxProxy {
-class TUploadRowsInternal : public TUploadRowsBase<NKikimrServices::TActivity::UPLOAD_ROWS_INTERNAL> {
+class TUploadRowsInternal : public TUploadRowsBase<NKikimrServices::TActivity::UPLOAD_ROWS_INTERNAL> {
public:
TUploadRowsInternal(
TActorId sender,
@@ -33,10 +33,10 @@ public:
}
private:
- TString GetDatabase()override {
- return TString();
- }
-
+ TString GetDatabase()override {
+ return TString();
+ }
+
const TString& GetTable() override {
return Table;
}
diff --git a/ydb/core/tx/tx_proxy/upload_rows_common_impl.h b/ydb/core/tx/tx_proxy/upload_rows_common_impl.h
index e10c48bdbd6..0aaee7e7f59 100644
--- a/ydb/core/tx/tx_proxy/upload_rows_common_impl.h
+++ b/ydb/core/tx/tx_proxy/upload_rows_common_impl.h
@@ -1,11 +1,11 @@
#pragma once
-
+
#include <ydb/core/actorlib_impl/long_timer.h>
-
+
#include <ydb/core/tx/long_tx_service/public/events.h>
#include <ydb/core/grpc_services/local_rpc/local_rpc.h>
#include <ydb/core/grpc_services/rpc_calls.h>
-#include <ydb/core/grpc_services/rpc_long_tx.h>
+#include <ydb/core/grpc_services/rpc_long_tx.h>
#include <ydb/core/formats/arrow_batch_builder.h>
#include <ydb/core/io_formats/csv.h>
#include <ydb/core/base/tablet_pipecache.h>
@@ -18,20 +18,20 @@
#include <ydb/public/api/protos/ydb_status_codes.pb.h>
#include <ydb/public/api/protos/ydb_value.pb.h>
-#define INCLUDE_YDB_INTERNAL_H
+#define INCLUDE_YDB_INTERNAL_H
#include <ydb/public/sdk/cpp/client/impl/ydb_internal/make_request/make.h>
-#undef INCLUDE_YDB_INTERNAL_H
-
+#undef INCLUDE_YDB_INTERNAL_H
+
#include <library/cpp/actors/core/actor_bootstrapped.h>
#include <util/string/join.h>
#include <util/string/vector.h>
#include <util/generic/size_literals.h>
-namespace NKikimr {
-
-using namespace NActors;
-
+namespace NKikimr {
+
+using namespace NActors;
+
struct TUpsertCost {
static constexpr float OneRowCost(ui64 sz) {
constexpr ui64 unitSize = 1_KB;
@@ -90,39 +90,39 @@ private:
}
namespace NTxProxy {
-
-template <NKikimrServices::TActivity::EType DerivedActivityType>
-class TUploadRowsBase : public TActorBootstrapped<TUploadRowsBase<DerivedActivityType>> {
- using TBase = TActorBootstrapped<TUploadRowsBase<DerivedActivityType>>;
- using TThis = typename TBase::TThis;
-
-private:
- using TTabletId = ui64;
-
- static constexpr TDuration DEFAULT_TIMEOUT = TDuration::Seconds(5*60);
-
+
+template <NKikimrServices::TActivity::EType DerivedActivityType>
+class TUploadRowsBase : public TActorBootstrapped<TUploadRowsBase<DerivedActivityType>> {
+ using TBase = TActorBootstrapped<TUploadRowsBase<DerivedActivityType>>;
+ using TThis = typename TBase::TThis;
+
+private:
+ using TTabletId = ui64;
+
+ static constexpr TDuration DEFAULT_TIMEOUT = TDuration::Seconds(5*60);
+
TActorId SchemeCache;
TActorId LeaderPipeCache;
- TDuration Timeout;
- TInstant Deadline;
+ TDuration Timeout;
+ TInstant Deadline;
TActorId TimeoutTimerActorId;
- bool WaitingResolveReply;
- bool Finished;
+ bool WaitingResolveReply;
+ bool Finished;
TAutoPtr<NSchemeCache::TSchemeCacheRequest> ResolvePartitionsResult;
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate> ResolveNamesResult;
+ TAutoPtr<NSchemeCache::TSchemeCacheNavigate> ResolveNamesResult;
TSerializedCellVec MinKey;
TSerializedCellVec MaxKey;
- TVector<NScheme::TTypeId> KeyColumnTypes;
- TVector<NScheme::TTypeId> ValueColumnTypes;
- NSchemeCache::TSchemeCacheNavigate::EKind TableKind = NSchemeCache::TSchemeCacheNavigate::KindUnknown;
- THashSet<TTabletId> ShardRepliesLeft;
- Ydb::StatusIds::StatusCode Status;
- TString ErrorMessage;
- NYql::TIssues Issues;
- NLongTxService::TLongTxId LongTxId;
- NThreading::TFuture<Ydb::LongTx::WriteResponse> WriteBatchResult;
-
+ TVector<NScheme::TTypeId> KeyColumnTypes;
+ TVector<NScheme::TTypeId> ValueColumnTypes;
+ NSchemeCache::TSchemeCacheNavigate::EKind TableKind = NSchemeCache::TSchemeCacheNavigate::KindUnknown;
+ THashSet<TTabletId> ShardRepliesLeft;
+ Ydb::StatusIds::StatusCode Status;
+ TString ErrorMessage;
+ NYql::TIssues Issues;
+ NLongTxService::TLongTxId LongTxId;
+ NThreading::TFuture<Ydb::LongTx::WriteResponse> WriteBatchResult;
+
protected:
enum class EUploadSource {
ProtoValues = 0,
@@ -130,19 +130,19 @@ protected:
CSV = 2,
};
- // Positions of key and value fields in the request proto struct
- struct TFieldDescription {
- ui32 ColId;
+ // Positions of key and value fields in the request proto struct
+ struct TFieldDescription {
+ ui32 ColId;
TString ColName;
- ui32 PositionInStruct;
- NScheme::TTypeId Type;
+ ui32 PositionInStruct;
+ NScheme::TTypeId Type;
bool NotNull = false;
- };
- TVector<TString> KeyColumnNames;
- TVector<TFieldDescription> KeyColumnPositions;
- TVector<TString> ValueColumnNames;
- TVector<TFieldDescription> ValueColumnPositions;
-
+ };
+ TVector<TString> KeyColumnNames;
+ TVector<TFieldDescription> KeyColumnPositions;
+ TVector<TString> ValueColumnNames;
+ TVector<TFieldDescription> ValueColumnPositions;
+
// Additional schema info (for OLAP dst or source format)
TVector<std::pair<TString, NScheme::TTypeId>> SrcColumns; // source columns in CSV could have any order
TVector<std::pair<TString, NScheme::TTypeId>> YdbSchema;
@@ -154,41 +154,41 @@ protected:
std::shared_ptr<arrow::RecordBatch> Batch;
float RuCost = 0.0;
-public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return DerivedActivityType;
- }
-
- explicit TUploadRowsBase(TDuration timeout = TDuration::Max())
- : TBase()
- , SchemeCache(MakeSchemeCacheID())
+public:
+ static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
+ return DerivedActivityType;
+ }
+
+ explicit TUploadRowsBase(TDuration timeout = TDuration::Max())
+ : TBase()
+ , SchemeCache(MakeSchemeCacheID())
, LeaderPipeCache(MakePipePeNodeCacheID(false))
- , Timeout((timeout && timeout <= DEFAULT_TIMEOUT) ? timeout : DEFAULT_TIMEOUT)
- , WaitingResolveReply(false)
- , Finished(false)
- , Status(Ydb::StatusIds::SUCCESS)
- {}
-
- void Bootstrap(const NActors::TActorContext& ctx) {
- Deadline = AppData(ctx)->TimeProvider->Now() + Timeout;
+ , Timeout((timeout && timeout <= DEFAULT_TIMEOUT) ? timeout : DEFAULT_TIMEOUT)
+ , WaitingResolveReply(false)
+ , Finished(false)
+ , Status(Ydb::StatusIds::SUCCESS)
+ {}
+
+ void Bootstrap(const NActors::TActorContext& ctx) {
+ Deadline = AppData(ctx)->TimeProvider->Now() + Timeout;
ResolveTable(GetTable(), ctx);
- }
-
- void Die(const NActors::TActorContext& ctx) override {
- Y_VERIFY(Finished);
- Y_VERIFY(!WaitingResolveReply);
+ }
+
+ void Die(const NActors::TActorContext& ctx) override {
+ Y_VERIFY(Finished);
+ Y_VERIFY(!WaitingResolveReply);
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(0));
- if (TimeoutTimerActorId) {
- ctx.Send(TimeoutTimerActorId, new TEvents::TEvPoisonPill());
- }
- TBase::Die(ctx);
- }
+ if (TimeoutTimerActorId) {
+ ctx.Send(TimeoutTimerActorId, new TEvents::TEvPoisonPill());
+ }
+ TBase::Die(ctx);
+ }
protected:
- const NSchemeCache::TSchemeCacheNavigate* GetResolveNameResult() const {
- return ResolveNamesResult.Get();
+ const NSchemeCache::TSchemeCacheNavigate* GetResolveNameResult() const {
+ return ResolveNamesResult.Get();
}
-
+
const TKeyDesc* GetKeyRange() const {
Y_VERIFY(ResolvePartitionsResult->ResultSet.size() == 1);
return ResolvePartitionsResult->ResultSet[0].KeyDescription.Get();
@@ -211,7 +211,7 @@ protected:
batchBuilder.AddRow(key.GetCells(), value.GetCells());
}
- return batchBuilder.FlushBatch(false);
+ return batchBuilder.FlushBatch(false);
}
TVector<std::pair<TSerializedCellVec, TString>> BatchToRows(const std::shared_ptr<arrow::RecordBatch>& batch,
@@ -231,8 +231,8 @@ protected:
return out;
}
-private:
- virtual TString GetDatabase() = 0;
+private:
+ virtual TString GetDatabase() = 0;
virtual const TString& GetTable() = 0;
virtual const TVector<std::pair<TSerializedCellVec, TString>>& GetRows() const = 0;
virtual bool CheckAccess(TString& errorMessage) = 0;
@@ -257,16 +257,16 @@ private:
}
private:
- STFUNC(StateWaitResolveTable) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
+ STFUNC(StateWaitResolveTable) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
static bool SameDstType(NScheme::TTypeId type1, NScheme::TTypeId type2, bool allowConvert) {
bool res = (type1 == type2);
if (!res && allowConvert) {
@@ -276,10 +276,10 @@ private:
}
bool BuildSchema(const NActors::TActorContext& ctx, TString& errorMessage, bool makeYqbSchema) {
- Y_UNUSED(ctx);
-
- auto& entry = ResolveNamesResult->ResultSet.front();
-
+ Y_UNUSED(ctx);
+
+ auto& entry = ResolveNamesResult->ResultSet.front();
+
for (const auto& index : entry.Indexes) {
switch (index.GetType()) {
case NKikimrSchemeOp::EIndexTypeGlobalAsync:
@@ -293,11 +293,11 @@ private:
errorMessage = "Only async-indexed tables are supported by BulkUpsert";
return false;
}
- }
-
- TVector<ui32> keyColumnIds;
- THashMap<TString, ui32> columnByName;
- THashSet<TString> keyColumnsLeft;
+ }
+
+ TVector<ui32> keyColumnIds;
+ THashMap<TString, ui32> columnByName;
+ THashSet<TString> keyColumnsLeft;
THashSet<TString> notNullColumnsLeft = entry.NotNullColumns;
SrcColumns.reserve(entry.Columns.size());
@@ -309,19 +309,19 @@ private:
columnByName[name] = id;
i32 keyOrder = colInfo.KeyOrder;
- if (keyOrder != -1) {
- Y_VERIFY(keyOrder >= 0);
- KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), keyOrder + 1));
+ if (keyOrder != -1) {
+ Y_VERIFY(keyOrder >= 0);
+ KeyColumnTypes.resize(Max<size_t>(KeyColumnTypes.size(), keyOrder + 1));
KeyColumnTypes[keyOrder] = type;
- keyColumnIds.resize(Max<size_t>(keyColumnIds.size(), keyOrder + 1));
+ keyColumnIds.resize(Max<size_t>(keyColumnIds.size(), keyOrder + 1));
keyColumnIds[keyOrder] = id;
keyColumnsLeft.insert(name);
- }
- }
-
- KeyColumnPositions.resize(KeyColumnTypes.size());
- KeyColumnNames.resize(KeyColumnTypes.size());
-
+ }
+ }
+
+ KeyColumnPositions.resize(KeyColumnTypes.size());
+ KeyColumnNames.resize(KeyColumnTypes.size());
+
auto reqColumns = GetRequestColumns(errorMessage);
if (!errorMessage.empty()) {
return false;
@@ -335,59 +335,59 @@ private:
for (size_t pos = 0; pos < reqColumns.size(); ++pos) {
auto& name = reqColumns[pos].first;
- const auto* cp = columnByName.FindPtr(name);
- if (!cp) {
- errorMessage = Sprintf("Unknown column: %s", name.c_str());
- return false;
- }
- ui32 colId = *cp;
- auto& ci = *entry.Columns.FindPtr(colId);
-
+ const auto* cp = columnByName.FindPtr(name);
+ if (!cp) {
+ errorMessage = Sprintf("Unknown column: %s", name.c_str());
+ return false;
+ }
+ ui32 colId = *cp;
+ auto& ci = *entry.Columns.FindPtr(colId);
+
const auto& typeInProto = reqColumns[pos].second;
-
- if (typeInProto.type_id()) {
- NScheme::TTypeId typeInRequest = typeInProto.type_id();
+
+ if (typeInProto.type_id()) {
+ NScheme::TTypeId typeInRequest = typeInProto.type_id();
bool ok = SameDstType(typeInRequest, ci.PType, GetSourceType() != EUploadSource::ProtoValues);
if (!ok) {
- errorMessage = Sprintf("Type mismatch for column %s: expected %s, got %s",
- name.c_str(), NScheme::TypeName(ci.PType),
- NScheme::TypeName(typeInRequest));
- return false;
- }
- } else if (typeInProto.has_decimal_type() && ci.PType == NScheme::NTypeIds::Decimal) {
- int precision = typeInProto.decimal_type().precision();
- int scale = typeInProto.decimal_type().scale();
- if (precision != NScheme::DECIMAL_PRECISION || scale != NScheme::DECIMAL_SCALE) {
- errorMessage = Sprintf("Unsupported Decimal(%d,%d) for column %s: expected Decimal(%d,%d)",
- precision, scale,
- name.c_str(),
- NScheme::DECIMAL_PRECISION, NScheme::DECIMAL_SCALE);
-
- return false;
- }
- } else {
- errorMessage = Sprintf("Unexected type for column %s: expected %s",
- name.c_str(), NScheme::TypeName(ci.PType));
- return false;
- }
-
+ errorMessage = Sprintf("Type mismatch for column %s: expected %s, got %s",
+ name.c_str(), NScheme::TypeName(ci.PType),
+ NScheme::TypeName(typeInRequest));
+ return false;
+ }
+ } else if (typeInProto.has_decimal_type() && ci.PType == NScheme::NTypeIds::Decimal) {
+ int precision = typeInProto.decimal_type().precision();
+ int scale = typeInProto.decimal_type().scale();
+ if (precision != NScheme::DECIMAL_PRECISION || scale != NScheme::DECIMAL_SCALE) {
+ errorMessage = Sprintf("Unsupported Decimal(%d,%d) for column %s: expected Decimal(%d,%d)",
+ precision, scale,
+ name.c_str(),
+ NScheme::DECIMAL_PRECISION, NScheme::DECIMAL_SCALE);
+
+ return false;
+ }
+ } else {
+ errorMessage = Sprintf("Unexected type for column %s: expected %s",
+ name.c_str(), NScheme::TypeName(ci.PType));
+ return false;
+ }
+
bool notNull = entry.NotNullColumns.contains(ci.Name);
if (notNull) {
notNullColumnsLeft.erase(ci.Name);
}
NScheme::TTypeId typeId = (NScheme::TTypeId)ci.PType;
- if (ci.KeyOrder != -1) {
+ if (ci.KeyOrder != -1) {
KeyColumnPositions[ci.KeyOrder] = TFieldDescription{ci.Id, ci.Name, (ui32)pos, typeId, notNull};
- keyColumnsLeft.erase(ci.Name);
- KeyColumnNames[ci.KeyOrder] = ci.Name;
- } else {
+ keyColumnsLeft.erase(ci.Name);
+ KeyColumnNames[ci.KeyOrder] = ci.Name;
+ } else {
ValueColumnPositions.emplace_back(TFieldDescription{ci.Id, ci.Name, (ui32)pos, typeId, notNull});
- ValueColumnNames.emplace_back(ci.Name);
- ValueColumnTypes.emplace_back(ci.PType);
- }
- }
-
+ ValueColumnNames.emplace_back(ci.Name);
+ ValueColumnTypes.emplace_back(ci.PType);
+ }
+ }
+
if (makeYqbSchema) {
Id2Position.clear();
YdbSchema.resize(KeyColumnTypes.size() + ValueColumnTypes.size());
@@ -405,53 +405,53 @@ private:
}
}
- if (!keyColumnsLeft.empty()) {
- errorMessage = Sprintf("Missing key columns: %s", JoinSeq(", ", keyColumnsLeft).c_str());
- return false;
- }
-
+ if (!keyColumnsLeft.empty()) {
+ errorMessage = Sprintf("Missing key columns: %s", JoinSeq(", ", keyColumnsLeft).c_str());
+ return false;
+ }
+
if (!notNullColumnsLeft.empty()) {
errorMessage = Sprintf("Missing not null columns: %s", JoinSeq(", ", notNullColumnsLeft).c_str());
return false;
}
- return true;
- }
-
+ return true;
+ }
+
void ResolveTable(const TString& table, const NActors::TActorContext& ctx) {
// TODO: check all params;
// Cerr << *Request->GetProtoRequest() << Endl;
-
+
TAutoPtr<NSchemeCache::TSchemeCacheNavigate> request(new NSchemeCache::TSchemeCacheNavigate());
NSchemeCache::TSchemeCacheNavigate::TEntry entry;
entry.Path = ::NKikimr::SplitPath(table);
if (entry.Path.empty()) {
return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "Invalid table path specified", ctx);
- }
+ }
entry.Operation = NSchemeCache::TSchemeCacheNavigate::OpTable;
entry.SyncVersion = true;
entry.ShowPrivatePath = AllowWriteToPrivateTable;
request->ResultSet.emplace_back(entry);
ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvNavigateKeySet(request));
-
+
TimeoutTimerActorId = CreateLongTimer(ctx, Timeout,
new IEventHandle(ctx.SelfID, ctx.SelfID, new TEvents::TEvWakeup()));
-
+
TBase::Become(&TThis::StateWaitResolveTable);
WaitingResolveReply = true;
}
-
+
void HandleTimeout(const TActorContext& ctx) {
- ShardRepliesLeft.clear();
+ ShardRepliesLeft.clear();
return ReplyWithError(Ydb::StatusIds::TIMEOUT, "Request timed out", ctx);
- }
-
+ }
+
void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev, const TActorContext& ctx) {
WaitingResolveReply = false;
if (Finished) {
return Die(ctx);
}
-
+
const NSchemeCache::TSchemeCacheNavigate& request = *ev->Get()->Request;
Y_VERIFY(request.ResultSet.size() == 1);
@@ -460,28 +460,28 @@ private:
break;
case NSchemeCache::TSchemeCacheNavigate::EStatus::LookupError:
case NSchemeCache::TSchemeCacheNavigate::EStatus::RedirectLookupError:
- return ReplyWithError(Ydb::StatusIds::UNAVAILABLE, Sprintf("Table '%s' unavaliable", GetTable().c_str()), ctx);
+ return ReplyWithError(Ydb::StatusIds::UNAVAILABLE, Sprintf("Table '%s' unavaliable", GetTable().c_str()), ctx);
case NSchemeCache::TSchemeCacheNavigate::EStatus::PathNotTable:
case NSchemeCache::TSchemeCacheNavigate::EStatus::PathNotPath:
case NSchemeCache::TSchemeCacheNavigate::EStatus::TableCreationNotComplete:
case NSchemeCache::TSchemeCacheNavigate::EStatus::PathErrorUnknown:
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown table '%s'", GetTable().c_str()), ctx);
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown table '%s'", GetTable().c_str()), ctx);
case NSchemeCache::TSchemeCacheNavigate::EStatus::RootUnknown:
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown database for table '%s'", GetTable().c_str()), ctx);
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown database for table '%s'", GetTable().c_str()), ctx);
case NSchemeCache::TSchemeCacheNavigate::EStatus::Unknown:
- return ReplyWithError(Ydb::StatusIds::GENERIC_ERROR, Sprintf("Unknown error on table '%s'", GetTable().c_str()), ctx);
- }
+ return ReplyWithError(Ydb::StatusIds::GENERIC_ERROR, Sprintf("Unknown error on table '%s'", GetTable().c_str()), ctx);
+ }
- TableKind = request.ResultSet.front().Kind;
+ TableKind = request.ResultSet.front().Kind;
bool isOlapTable = (TableKind == NSchemeCache::TSchemeCacheNavigate::KindOlapTable);
-
+
if (request.ResultSet.front().TableId.IsSystemView()) {
return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
- Sprintf("Table '%s' is a system view. Bulk upsert is not supported.", GetTable().c_str()), ctx);
+ Sprintf("Table '%s' is a system view. Bulk upsert is not supported.", GetTable().c_str()), ctx);
}
ResolveNamesResult = ev->Get()->Request;
-
+
bool makeYdbSchema = isOlapTable || (GetSourceType() != EUploadSource::ProtoValues);
TString errorMessage;
if (!BuildSchema(ctx, errorMessage, makeYdbSchema)) {
@@ -494,7 +494,7 @@ private:
if (!ExtractRows(errorMessage)) {
return ReplyWithError(Ydb::StatusIds::BAD_REQUEST, errorMessage, ctx);
}
-
+
if (isOlapTable && !ExtractBatch(errorMessage)) {
return ReplyWithError(Ydb::StatusIds::BAD_REQUEST, errorMessage, ctx);
} else {
@@ -527,57 +527,57 @@ private:
break;
}
}
-
- if (TableKind == NSchemeCache::TSchemeCacheNavigate::KindTable) {
- ResolveShards(ctx);
+
+ if (TableKind == NSchemeCache::TSchemeCacheNavigate::KindTable) {
+ ResolveShards(ctx);
} else if (isOlapTable) {
- WriteToOlapTable(ctx);
- } else {
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
- Sprintf("Table '%s': Bulk upsert is not supported for this table kind.", GetTable().c_str()), ctx);
- }
- }
-
- void WriteToOlapTable(const NActors::TActorContext& ctx) {
- TString accessCheckError;
- if (!CheckAccess(accessCheckError)) {
- return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, accessCheckError, ctx);
- }
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Bulk upsert to table " << GetTable()
- << " startint LongTx");
-
- // Begin Long Tx for writing a batch into OLAP table
- TActorId longTxServiceId = NLongTxService::MakeLongTxServiceID(ctx.SelfID.NodeId());
- NKikimrLongTxService::TEvBeginTx::EMode mode = NKikimrLongTxService::TEvBeginTx::MODE_WRITE_ONLY;
- ctx.Send(longTxServiceId, new NLongTxService::TEvLongTxService::TEvBeginTx(GetDatabase(), mode));
- TBase::Become(&TThis::StateWaitBeginLongTx);
- }
-
- STFUNC(StateWaitBeginLongTx) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NLongTxService::TEvLongTxService::TEvBeginTxResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
- }
- }
-
- void Handle(NLongTxService::TEvLongTxService::TEvBeginTxResult::TPtr& ev, const TActorContext& ctx) {
- const auto* msg = ev->Get();
-
- if (msg->Record.GetStatus() != Ydb::StatusIds::SUCCESS) {
- NYql::TIssues issues;
- NYql::IssuesFromMessage(msg->Record.GetIssues(), issues);
- for (const auto& issue: issues) {
- RaiseIssue(issue);
- }
- return ReplyWithResult(msg->Record.GetStatus(), ctx);
- }
-
- LongTxId = msg->GetLongTxId();
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Bulk upsert to table " << GetTable()
- << " started LongTx " << LongTxId.ToString());
-
+ WriteToOlapTable(ctx);
+ } else {
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
+ Sprintf("Table '%s': Bulk upsert is not supported for this table kind.", GetTable().c_str()), ctx);
+ }
+ }
+
+ void WriteToOlapTable(const NActors::TActorContext& ctx) {
+ TString accessCheckError;
+ if (!CheckAccess(accessCheckError)) {
+ return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, accessCheckError, ctx);
+ }
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Bulk upsert to table " << GetTable()
+ << " startint LongTx");
+
+ // Begin Long Tx for writing a batch into OLAP table
+ TActorId longTxServiceId = NLongTxService::MakeLongTxServiceID(ctx.SelfID.NodeId());
+ NKikimrLongTxService::TEvBeginTx::EMode mode = NKikimrLongTxService::TEvBeginTx::MODE_WRITE_ONLY;
+ ctx.Send(longTxServiceId, new NLongTxService::TEvLongTxService::TEvBeginTx(GetDatabase(), mode));
+ TBase::Become(&TThis::StateWaitBeginLongTx);
+ }
+
+ STFUNC(StateWaitBeginLongTx) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(NLongTxService::TEvLongTxService::TEvBeginTxResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+ }
+ }
+
+ void Handle(NLongTxService::TEvLongTxService::TEvBeginTxResult::TPtr& ev, const TActorContext& ctx) {
+ const auto* msg = ev->Get();
+
+ if (msg->Record.GetStatus() != Ydb::StatusIds::SUCCESS) {
+ NYql::TIssues issues;
+ NYql::IssuesFromMessage(msg->Record.GetIssues(), issues);
+ for (const auto& issue: issues) {
+ RaiseIssue(issue);
+ }
+ return ReplyWithResult(msg->Record.GetStatus(), ctx);
+ }
+
+ LongTxId = msg->GetLongTxId();
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Bulk upsert to table " << GetTable()
+ << " started LongTx " << LongTxId.ToString());
+
auto outputColumns = GetOutputColumns(ctx);
if (!outputColumns.empty()) {
std::shared_ptr<arrow::RecordBatch> batch = Batch;
@@ -619,45 +619,45 @@ private:
return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "Bad batch in bulk upsert data", ctx);
}
#endif
-
- Batch = batch;
+
+ Batch = batch;
}
- WriteBatchInLongTx(ctx);
- }
-
+ WriteBatchInLongTx(ctx);
+ }
+
std::vector<TString> GetOutputColumns(const NActors::TActorContext& ctx) {
- if (ResolveNamesResult->ErrorCount > 0) {
- ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "Failed to get table schema", ctx);
- return {};
- }
-
- auto& entry = ResolveNamesResult->ResultSet[0];
-
- if (entry.Kind != NSchemeCache::TSchemeCacheNavigate::KindOlapTable) {
- ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "The specified path is not an olap table", ctx);
- return {};
- }
-
- if (!entry.OlapTableInfo || !entry.OlapTableInfo->Description.HasSchema()) {
- ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "Olap table expected", ctx);
- return {};
- }
-
- const auto& description = entry.OlapTableInfo->Description;
- const auto& schema = description.GetSchema();
-
+ if (ResolveNamesResult->ErrorCount > 0) {
+ ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "Failed to get table schema", ctx);
+ return {};
+ }
+
+ auto& entry = ResolveNamesResult->ResultSet[0];
+
+ if (entry.Kind != NSchemeCache::TSchemeCacheNavigate::KindOlapTable) {
+ ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "The specified path is not an olap table", ctx);
+ return {};
+ }
+
+ if (!entry.OlapTableInfo || !entry.OlapTableInfo->Description.HasSchema()) {
+ ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "Olap table expected", ctx);
+ return {};
+ }
+
+ const auto& description = entry.OlapTableInfo->Description;
+ const auto& schema = description.GetSchema();
+
#if 1 // TODO: do we need this restriction?
- if ((size_t)schema.GetColumns().size() != KeyColumnPositions.size() + ValueColumnPositions.size()) {
- ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
- "Column count in the request doesn't match column count in the schema", ctx);
- return {};
- }
+ if ((size_t)schema.GetColumns().size() != KeyColumnPositions.size() + ValueColumnPositions.size()) {
+ ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
+ "Column count in the request doesn't match column count in the schema", ctx);
+ return {};
+ }
#endif
std::vector<TString> outColumns;
outColumns.reserve(YdbSchema.size());
-
- for (size_t i = 0; i < (size_t)schema.GetColumns().size(); ++i) {
+
+ for (size_t i = 0; i < (size_t)schema.GetColumns().size(); ++i) {
auto columnId = schema.GetColumns(i).GetId();
if (!Id2Position.count(columnId)) {
ReplyWithError(Ydb::StatusIds::SCHEME_ERROR,
@@ -666,356 +666,356 @@ private:
}
size_t position = Id2Position[columnId];
outColumns.push_back(YdbSchema[position].first);
- }
-
+ }
+
Y_VERIFY(!outColumns.empty());
return outColumns;
- }
-
- void WriteBatchInLongTx(const TActorContext& ctx) {
- Y_VERIFY(Batch);
- TBase::Become(&TThis::StateWaitWriteBatchResult);
- TString dedupId = LongTxId.ToString(); // TODO: is this a proper dedup_id?
- NGRpcService::DoLongTxWriteSameMailbox(ctx, ctx.SelfID, LongTxId, dedupId,
- GetDatabase(), GetTable(), *ResolveNamesResult, Batch, Issues);
- }
-
- void RollbackLongTx(const TActorContext& ctx) {
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Bulk upsert to table " << GetTable()
- << " rolling back LongTx " << LongTxId.ToString());
-
- TActorId longTxServiceId = NLongTxService::MakeLongTxServiceID(ctx.SelfID.NodeId());
- ctx.Send(longTxServiceId, new NLongTxService::TEvLongTxService::TEvRollbackTx(LongTxId));
- }
-
- STFUNC(StateWaitWriteBatchResult) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvents::TEvCompleted, HandleWriteBatchResult);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
- }
- }
-
- void HandleWriteBatchResult(TEvents::TEvCompleted::TPtr& ev, const TActorContext& ctx) {
- Ydb::StatusIds::StatusCode status = (Ydb::StatusIds::StatusCode)ev->Get()->Status;
- if (status != Ydb::StatusIds::SUCCESS) {
- for (const auto& issue: Issues) {
- RaiseIssue(issue);
- }
- Finished = true;
- return ReplyWithResult(status, ctx);
- }
-
- CommitLongTx(ctx);
- }
-
- void CommitLongTx(const TActorContext& ctx) {
- TActorId longTxServiceId = NLongTxService::MakeLongTxServiceID(ctx.SelfID.NodeId());
- ctx.Send(longTxServiceId, new NLongTxService::TEvLongTxService::TEvCommitTx(LongTxId));
- TBase::Become(&TThis::StateWaitCommitLongTx);
- }
-
- STFUNC(StateWaitCommitLongTx) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NLongTxService::TEvLongTxService::TEvCommitTxResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
- }
- }
-
- void Handle(NLongTxService::TEvLongTxService::TEvCommitTxResult::TPtr& ev, const NActors::TActorContext& ctx) {
- const auto* msg = ev->Get();
-
- Finished = true;
-
- if (msg->Record.GetStatus() == Ydb::StatusIds::SUCCESS) {
- // We are done with the transaction, forget it
- LongTxId = NLongTxService::TLongTxId();
- }
-
- NYql::TIssues issues;
- NYql::IssuesFromMessage(msg->Record.GetIssues(), issues);
- for (const auto& issue: issues) {
- RaiseIssue(issue);
- }
- return ReplyWithResult(msg->Record.GetStatus(), ctx);
- }
-
+ }
+
+ void WriteBatchInLongTx(const TActorContext& ctx) {
+ Y_VERIFY(Batch);
+ TBase::Become(&TThis::StateWaitWriteBatchResult);
+ TString dedupId = LongTxId.ToString(); // TODO: is this a proper dedup_id?
+ NGRpcService::DoLongTxWriteSameMailbox(ctx, ctx.SelfID, LongTxId, dedupId,
+ GetDatabase(), GetTable(), *ResolveNamesResult, Batch, Issues);
+ }
+
+ void RollbackLongTx(const TActorContext& ctx) {
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Bulk upsert to table " << GetTable()
+ << " rolling back LongTx " << LongTxId.ToString());
+
+ TActorId longTxServiceId = NLongTxService::MakeLongTxServiceID(ctx.SelfID.NodeId());
+ ctx.Send(longTxServiceId, new NLongTxService::TEvLongTxService::TEvRollbackTx(LongTxId));
+ }
+
+ STFUNC(StateWaitWriteBatchResult) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvents::TEvCompleted, HandleWriteBatchResult);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+ }
+ }
+
+ void HandleWriteBatchResult(TEvents::TEvCompleted::TPtr& ev, const TActorContext& ctx) {
+ Ydb::StatusIds::StatusCode status = (Ydb::StatusIds::StatusCode)ev->Get()->Status;
+ if (status != Ydb::StatusIds::SUCCESS) {
+ for (const auto& issue: Issues) {
+ RaiseIssue(issue);
+ }
+ Finished = true;
+ return ReplyWithResult(status, ctx);
+ }
+
+ CommitLongTx(ctx);
+ }
+
+ void CommitLongTx(const TActorContext& ctx) {
+ TActorId longTxServiceId = NLongTxService::MakeLongTxServiceID(ctx.SelfID.NodeId());
+ ctx.Send(longTxServiceId, new NLongTxService::TEvLongTxService::TEvCommitTx(LongTxId));
+ TBase::Become(&TThis::StateWaitCommitLongTx);
+ }
+
+ STFUNC(StateWaitCommitLongTx) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(NLongTxService::TEvLongTxService::TEvCommitTxResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+ }
+ }
+
+ void Handle(NLongTxService::TEvLongTxService::TEvCommitTxResult::TPtr& ev, const NActors::TActorContext& ctx) {
+ const auto* msg = ev->Get();
+
+ Finished = true;
+
+ if (msg->Record.GetStatus() == Ydb::StatusIds::SUCCESS) {
+ // We are done with the transaction, forget it
+ LongTxId = NLongTxService::TLongTxId();
+ }
+
+ NYql::TIssues issues;
+ NYql::IssuesFromMessage(msg->Record.GetIssues(), issues);
+ for (const auto& issue: issues) {
+ RaiseIssue(issue);
+ }
+ return ReplyWithResult(msg->Record.GetStatus(), ctx);
+ }
+
void FindMinMaxKeys() {
-
+
for (const auto& pair : GetRows()) {
const auto& serializedKey = pair.first;
-
- if (MinKey.GetCells().empty()) {
- // Only for the first key
- MinKey = serializedKey;
- MaxKey = serializedKey;
- } else {
- // For all next keys
- if (CompareTypedCellVectors(serializedKey.GetCells().data(), MinKey.GetCells().data(),
- KeyColumnTypes.data(),
- serializedKey.GetCells().size(), MinKey.GetCells().size()) < 0)
- {
- MinKey = serializedKey;
- } else if (CompareTypedCellVectors(serializedKey.GetCells().data(), MaxKey.GetCells().data(),
- KeyColumnTypes.data(),
- serializedKey.GetCells().size(), MaxKey.GetCells().size()) > 0)
- {
- MaxKey = serializedKey;
- }
- }
- }
- }
-
- void ResolveShards(const NActors::TActorContext& ctx) {
+
+ if (MinKey.GetCells().empty()) {
+ // Only for the first key
+ MinKey = serializedKey;
+ MaxKey = serializedKey;
+ } else {
+ // For all next keys
+ if (CompareTypedCellVectors(serializedKey.GetCells().data(), MinKey.GetCells().data(),
+ KeyColumnTypes.data(),
+ serializedKey.GetCells().size(), MinKey.GetCells().size()) < 0)
+ {
+ MinKey = serializedKey;
+ } else if (CompareTypedCellVectors(serializedKey.GetCells().data(), MaxKey.GetCells().data(),
+ KeyColumnTypes.data(),
+ serializedKey.GetCells().size(), MaxKey.GetCells().size()) > 0)
+ {
+ MaxKey = serializedKey;
+ }
+ }
+ }
+ }
+
+ void ResolveShards(const NActors::TActorContext& ctx) {
Y_VERIFY(!GetRows().empty());
- auto& entry = ResolveNamesResult->ResultSet.front();
-
- // We are going to set all columns
- TVector<TKeyDesc::TColumnOp> columns;
- for (const auto& ci : entry.Columns) {
+ auto& entry = ResolveNamesResult->ResultSet.front();
+
+ // We are going to set all columns
+ TVector<TKeyDesc::TColumnOp> columns;
+ for (const auto& ci : entry.Columns) {
TKeyDesc::TColumnOp op = { ci.second.Id, TKeyDesc::EColumnOperation::Set, ci.second.PType, 0, 0 };
- columns.push_back(op);
- }
-
- TTableRange range(MinKey.GetCells(), true, MaxKey.GetCells(), true, false);
+ columns.push_back(op);
+ }
+
+ TTableRange range(MinKey.GetCells(), true, MaxKey.GetCells(), true, false);
auto keyRange = MakeHolder<TKeyDesc>(entry.TableId, range, TKeyDesc::ERowOperation::Update, KeyColumnTypes, columns);
-
- TAutoPtr<NSchemeCache::TSchemeCacheRequest> request(new NSchemeCache::TSchemeCacheRequest());
-
+
+ TAutoPtr<NSchemeCache::TSchemeCacheRequest> request(new NSchemeCache::TSchemeCacheRequest());
+
request->ResultSet.emplace_back(std::move(keyRange));
-
- TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> resolveReq(new TEvTxProxySchemeCache::TEvResolveKeySet(request));
- ctx.Send(SchemeCache, resolveReq.Release());
-
- TBase::Become(&TThis::StateWaitResolveShards);
- WaitingResolveReply = true;
- }
-
- STFUNC(StateWaitResolveShards) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvTxProxySchemeCache::TEvResolveKeySetResult, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, const TActorContext &ctx) {
- WaitingResolveReply = false;
- if (Finished) {
- return Die(ctx);
- }
-
- TEvTxProxySchemeCache::TEvResolveKeySetResult *msg = ev->Get();
- ResolvePartitionsResult = msg->Request;
-
- if (ResolvePartitionsResult->ErrorCount > 0) {
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown table '%s'", GetTable().c_str()), ctx);
- }
-
- TString accessCheckError;
- if (!CheckAccess(accessCheckError)) {
- return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, accessCheckError, ctx);
- }
-
- auto getShardsString = [] (const TVector<TKeyDesc::TPartitionInfo>& partitions) {
- TVector<ui64> shards;
- shards.reserve(partitions.size());
- for (auto& partition : partitions) {
- shards.push_back(partition.ShardId);
- }
-
- return JoinVectorIntoString(shards, ", ");
- };
-
+
+ TAutoPtr<TEvTxProxySchemeCache::TEvResolveKeySet> resolveReq(new TEvTxProxySchemeCache::TEvResolveKeySet(request));
+ ctx.Send(SchemeCache, resolveReq.Release());
+
+ TBase::Become(&TThis::StateWaitResolveShards);
+ WaitingResolveReply = true;
+ }
+
+ STFUNC(StateWaitResolveShards) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvTxProxySchemeCache::TEvResolveKeySetResult, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void Handle(TEvTxProxySchemeCache::TEvResolveKeySetResult::TPtr &ev, const TActorContext &ctx) {
+ WaitingResolveReply = false;
+ if (Finished) {
+ return Die(ctx);
+ }
+
+ TEvTxProxySchemeCache::TEvResolveKeySetResult *msg = ev->Get();
+ ResolvePartitionsResult = msg->Request;
+
+ if (ResolvePartitionsResult->ErrorCount > 0) {
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, Sprintf("Unknown table '%s'", GetTable().c_str()), ctx);
+ }
+
+ TString accessCheckError;
+ if (!CheckAccess(accessCheckError)) {
+ return ReplyWithError(Ydb::StatusIds::UNAUTHORIZED, accessCheckError, ctx);
+ }
+
+ auto getShardsString = [] (const TVector<TKeyDesc::TPartitionInfo>& partitions) {
+ TVector<ui64> shards;
+ shards.reserve(partitions.size());
+ for (auto& partition : partitions) {
+ shards.push_back(partition.ShardId);
+ }
+
+ return JoinVectorIntoString(shards, ", ");
+ };
+
LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Range shards: " << getShardsString(GetKeyRange()->Partitions));
-
- MakeShardRequests(ctx);
- }
-
- void MakeShardRequests(const NActors::TActorContext& ctx) {
+
+ MakeShardRequests(ctx);
+ }
+
+ void MakeShardRequests(const NActors::TActorContext& ctx) {
const auto* keyRange = GetKeyRange();
-
+
Y_VERIFY(!keyRange->Partitions.empty());
- // Group rows by shard id
+ // Group rows by shard id
TVector<std::unique_ptr<TEvDataShard::TEvUploadRowsRequest>> shardRequests(keyRange->Partitions.size());
for (const auto& keyValue : GetRows()) {
- // Find partition for the key
+ // Find partition for the key
auto it = std::lower_bound(keyRange->Partitions.begin(), keyRange->Partitions.end(), keyValue.first.GetCells(),
- [this](const auto &partition, const auto& key) {
- const auto& range = *partition.Range;
- const int cmp = CompareBorders<true, false>(range.EndKeyPrefix.GetCells(), key,
- range.IsInclusive || range.IsPoint, true, KeyColumnTypes);
-
- return (cmp < 0);
- });
-
+ [this](const auto &partition, const auto& key) {
+ const auto& range = *partition.Range;
+ const int cmp = CompareBorders<true, false>(range.EndKeyPrefix.GetCells(), key,
+ range.IsInclusive || range.IsPoint, true, KeyColumnTypes);
+
+ return (cmp < 0);
+ });
+
size_t shardIdx = it - keyRange->Partitions.begin();
-
- TEvDataShard::TEvUploadRowsRequest* ev = shardRequests[shardIdx].get();
- if (!ev) {
- shardRequests[shardIdx].reset(new TEvDataShard::TEvUploadRowsRequest());
- ev = shardRequests[shardIdx].get();
- ev->Record.SetCancelDeadlineMs(Deadline.MilliSeconds());
-
+
+ TEvDataShard::TEvUploadRowsRequest* ev = shardRequests[shardIdx].get();
+ if (!ev) {
+ shardRequests[shardIdx].reset(new TEvDataShard::TEvUploadRowsRequest());
+ ev = shardRequests[shardIdx].get();
+ ev->Record.SetCancelDeadlineMs(Deadline.MilliSeconds());
+
ev->Record.SetTableId(keyRange->TableId.PathId.LocalPathId);
- for (const auto& fd : KeyColumnPositions) {
- ev->Record.MutableRowScheme()->AddKeyColumnIds(fd.ColId);
- }
- for (const auto& fd : ValueColumnPositions) {
- ev->Record.MutableRowScheme()->AddValueColumnIds(fd.ColId);
- }
+ for (const auto& fd : KeyColumnPositions) {
+ ev->Record.MutableRowScheme()->AddKeyColumnIds(fd.ColId);
+ }
+ for (const auto& fd : ValueColumnPositions) {
+ ev->Record.MutableRowScheme()->AddValueColumnIds(fd.ColId);
+ }
if (WriteToTableShadow) {
ev->Record.SetWriteToTableShadow(true);
}
- }
-
- auto* row = ev->Record.AddRows();
- row->SetKeyColumns(keyValue.first.GetBuffer());
- row->SetValueColumns(keyValue.second);
- }
-
- // Send requests to the shards
- for (size_t idx = 0; idx < shardRequests.size(); ++idx) {
- if (!shardRequests[idx])
- continue;
-
+ }
+
+ auto* row = ev->Record.AddRows();
+ row->SetKeyColumns(keyValue.first.GetBuffer());
+ row->SetValueColumns(keyValue.second);
+ }
+
+ // Send requests to the shards
+ for (size_t idx = 0; idx < shardRequests.size(); ++idx) {
+ if (!shardRequests[idx])
+ continue;
+
TTabletId shardId = keyRange->Partitions[idx].ShardId;
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Sending request to shards " << shardId);
-
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Sending request to shards " << shardId);
+
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvForward(shardRequests[idx].release(), shardId, true), IEventHandle::FlagTrackDelivery);
-
- auto res = ShardRepliesLeft.insert(shardId);
- if (!res.second) {
- LOG_CRIT_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Upload rows: shard " << shardId << "has already been added!");
- }
- }
-
- TBase::Become(&TThis::StateWaitResults);
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr &ev, const TActorContext &ctx) {
- Y_UNUSED(ev);
- SetError(Ydb::StatusIds::INTERNAL_ERROR, "Internal error: pipe cache is not available, the cluster might not be configured properly");
-
- ShardRepliesLeft.clear();
-
- ReplyIfDone(ctx);
- }
-
- void Handle(TEvPipeCache::TEvDeliveryProblem::TPtr &ev, const TActorContext &ctx) {
+
+ auto res = ShardRepliesLeft.insert(shardId);
+ if (!res.second) {
+ LOG_CRIT_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Upload rows: shard " << shardId << "has already been added!");
+ }
+ }
+
+ TBase::Become(&TThis::StateWaitResults);
+ }
+
+ void Handle(TEvents::TEvUndelivered::TPtr &ev, const TActorContext &ctx) {
+ Y_UNUSED(ev);
+ SetError(Ydb::StatusIds::INTERNAL_ERROR, "Internal error: pipe cache is not available, the cluster might not be configured properly");
+
+ ShardRepliesLeft.clear();
+
+ ReplyIfDone(ctx);
+ }
+
+ void Handle(TEvPipeCache::TEvDeliveryProblem::TPtr &ev, const TActorContext &ctx) {
ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvInvalidateTable(GetKeyRange()->TableId, TActorId()));
-
- SetError(Ydb::StatusIds::UNAVAILABLE, Sprintf("Failed to connect to shard %" PRIu64, ev->Get()->TabletId));
- ShardRepliesLeft.erase(ev->Get()->TabletId);
-
- ReplyIfDone(ctx);
- }
-
- STFUNC(StateWaitResults) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvDataShard::TEvUploadRowsResponse, Handle);
- HFunc(TEvents::TEvUndelivered, Handle);
- HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
- CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
-
- default:
- break;
- }
- }
-
- void Handle(TEvDataShard::TEvUploadRowsResponse::TPtr& ev, const NActors::TActorContext& ctx) {
- const auto& shardResponse = ev->Get()->Record;
-
- // Notify the cache that we are done with the pipe
+
+ SetError(Ydb::StatusIds::UNAVAILABLE, Sprintf("Failed to connect to shard %" PRIu64, ev->Get()->TabletId));
+ ShardRepliesLeft.erase(ev->Get()->TabletId);
+
+ ReplyIfDone(ctx);
+ }
+
+ STFUNC(StateWaitResults) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvDataShard::TEvUploadRowsResponse, Handle);
+ HFunc(TEvents::TEvUndelivered, Handle);
+ HFunc(TEvPipeCache::TEvDeliveryProblem, Handle);
+ CFunc(TEvents::TSystem::Wakeup, HandleTimeout);
+
+ default:
+ break;
+ }
+ }
+
+ void Handle(TEvDataShard::TEvUploadRowsResponse::TPtr& ev, const NActors::TActorContext& ctx) {
+ const auto& shardResponse = ev->Get()->Record;
+
+ // Notify the cache that we are done with the pipe
ctx.Send(LeaderPipeCache, new TEvPipeCache::TEvUnlink(shardResponse.GetTabletID()));
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Upload rows: got "
- << NKikimrTxDataShard::TError::EKind_Name((NKikimrTxDataShard::TError::EKind)shardResponse.GetStatus())
- << " from shard " << shardResponse.GetTabletID());
-
- if (shardResponse.GetStatus() != NKikimrTxDataShard::TError::OK) {
- ::Ydb::StatusIds::StatusCode status = Ydb::StatusIds::GENERIC_ERROR;
-
- switch (shardResponse.GetStatus()) {
- case NKikimrTxDataShard::TError::WRONG_SHARD_STATE:
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Upload rows: got "
+ << NKikimrTxDataShard::TError::EKind_Name((NKikimrTxDataShard::TError::EKind)shardResponse.GetStatus())
+ << " from shard " << shardResponse.GetTabletID());
+
+ if (shardResponse.GetStatus() != NKikimrTxDataShard::TError::OK) {
+ ::Ydb::StatusIds::StatusCode status = Ydb::StatusIds::GENERIC_ERROR;
+
+ switch (shardResponse.GetStatus()) {
+ case NKikimrTxDataShard::TError::WRONG_SHARD_STATE:
ctx.Send(SchemeCache, new TEvTxProxySchemeCache::TEvInvalidateTable(GetKeyRange()->TableId, TActorId()));
- status = Ydb::StatusIds::OVERLOADED;
- break;
+ status = Ydb::StatusIds::OVERLOADED;
+ break;
case NKikimrTxDataShard::TError::OUT_OF_SPACE:
status = Ydb::StatusIds::UNAVAILABLE;
break;
- case NKikimrTxDataShard::TError::SCHEME_ERROR:
- status = Ydb::StatusIds::SCHEME_ERROR;
- break;
- case NKikimrTxDataShard::TError::BAD_ARGUMENT:
- status = Ydb::StatusIds::BAD_REQUEST;
- break;
- case NKikimrTxDataShard::TError::EXECUTION_CANCELLED:
- status = Ydb::StatusIds::TIMEOUT;
- break;
- };
-
- SetError(status, shardResponse.GetErrorDescription());
- }
-
- ShardRepliesLeft.erase(shardResponse.GetTabletID());
-
- ReplyIfDone(ctx);
- }
-
- void SetError(::Ydb::StatusIds::StatusCode status, const TString& message) {
- if (Status != ::Ydb::StatusIds::SUCCESS) {
- return;
- }
-
- Status = status;
- ErrorMessage = message;
- }
-
- void ReplyIfDone(const NActors::TActorContext& ctx) {
- if (!ShardRepliesLeft.empty()) {
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Upload rows: waiting for " << ShardRepliesLeft.size() << " shards replies");
- return;
- }
-
- Finished = true;
-
- if (!ErrorMessage.empty()) {
- RaiseIssue(NYql::TIssue(ErrorMessage));
- }
-
- ReplyWithResult(Status, ctx);
- }
-
- void ReplyWithError(::Ydb::StatusIds::StatusCode status, const TString& message, const TActorContext& ctx) {
- SetError(status, message);
-
- Y_VERIFY_DEBUG(ShardRepliesLeft.empty());
- ReplyIfDone(ctx);
- }
-
- void ReplyWithResult(::Ydb::StatusIds::StatusCode status, const TActorContext& ctx) {
+ case NKikimrTxDataShard::TError::SCHEME_ERROR:
+ status = Ydb::StatusIds::SCHEME_ERROR;
+ break;
+ case NKikimrTxDataShard::TError::BAD_ARGUMENT:
+ status = Ydb::StatusIds::BAD_REQUEST;
+ break;
+ case NKikimrTxDataShard::TError::EXECUTION_CANCELLED:
+ status = Ydb::StatusIds::TIMEOUT;
+ break;
+ };
+
+ SetError(status, shardResponse.GetErrorDescription());
+ }
+
+ ShardRepliesLeft.erase(shardResponse.GetTabletID());
+
+ ReplyIfDone(ctx);
+ }
+
+ void SetError(::Ydb::StatusIds::StatusCode status, const TString& message) {
+ if (Status != ::Ydb::StatusIds::SUCCESS) {
+ return;
+ }
+
+ Status = status;
+ ErrorMessage = message;
+ }
+
+ void ReplyIfDone(const NActors::TActorContext& ctx) {
+ if (!ShardRepliesLeft.empty()) {
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Upload rows: waiting for " << ShardRepliesLeft.size() << " shards replies");
+ return;
+ }
+
+ Finished = true;
+
+ if (!ErrorMessage.empty()) {
+ RaiseIssue(NYql::TIssue(ErrorMessage));
+ }
+
+ ReplyWithResult(Status, ctx);
+ }
+
+ void ReplyWithError(::Ydb::StatusIds::StatusCode status, const TString& message, const TActorContext& ctx) {
+ SetError(status, message);
+
+ Y_VERIFY_DEBUG(ShardRepliesLeft.empty());
+ ReplyIfDone(ctx);
+ }
+
+ void ReplyWithResult(::Ydb::StatusIds::StatusCode status, const TActorContext& ctx) {
SendResult(ctx, status);
-
- LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Bulk upsert to table " << GetTable()
- << " completed with status " << status);
-
- if (LongTxId != NLongTxService::TLongTxId()) {
- // LongTxId is reset after successful commit
- // If it si still there it means we need to rollback
- Y_VERIFY_DEBUG(status != ::Ydb::StatusIds::SUCCESS);
- RollbackLongTx(ctx);
- }
-
- if (!WaitingResolveReply) {
- Die(ctx);
- }
- }
-};
-
+
+ LOG_DEBUG_S(ctx, NKikimrServices::MSGBUS_REQUEST, "Bulk upsert to table " << GetTable()
+ << " completed with status " << status);
+
+ if (LongTxId != NLongTxService::TLongTxId()) {
+ // LongTxId is reset after successful commit
+ // If it si still there it means we need to rollback
+ Y_VERIFY_DEBUG(status != ::Ydb::StatusIds::SUCCESS);
+ RollbackLongTx(ctx);
+ }
+
+ if (!WaitingResolveReply) {
+ Die(ctx);
+ }
+ }
+};
+
} // namespace NTxProxy
-} // namespace NKikimr
+} // namespace NKikimr
diff --git a/ydb/core/tx/ya.make b/ydb/core/tx/ya.make
index dd6fa55686a..5bfe4b3b861 100644
--- a/ydb/core/tx/ya.make
+++ b/ydb/core/tx/ya.make
@@ -7,7 +7,7 @@ OWNER(
SRCS(
defs.h
- message_seqno.h
+ message_seqno.h
tx.h
tx.cpp
tx_processing.h
diff --git a/ydb/core/util/cache_cache.h b/ydb/core/util/cache_cache.h
index 88ad55b410f..a8ab10977dd 100644
--- a/ydb/core/util/cache_cache.h
+++ b/ydb/core/util/cache_cache.h
@@ -19,8 +19,8 @@ struct TCacheCacheConfig : public TAtomicRefCount<TCacheCacheConfig> {
CacheGenWarm,
};
- ui64 Limit;
-
+ ui64 Limit;
+
ui64 FreshLimit;
ui64 StagingLimit;
ui64 WarmLimit;
@@ -30,25 +30,25 @@ struct TCacheCacheConfig : public TAtomicRefCount<TCacheCacheConfig> {
TCounterPtr ReportedWarm;
TCacheCacheConfig(ui64 limit, const TCounterPtr &reportedFresh, const TCounterPtr &reportedStaging, const TCounterPtr &reportedWarm)
- : Limit(0)
- , FreshLimit(0)
- , StagingLimit(0)
- , WarmLimit(0)
+ : Limit(0)
+ , FreshLimit(0)
+ , StagingLimit(0)
+ , WarmLimit(0)
, ReportedFresh(reportedFresh)
, ReportedStaging(reportedStaging)
, ReportedWarm(reportedWarm)
- {
- SetLimit(limit);
- }
-
- void SetLimit(ui64 limit) {
- Limit = limit;
-
- FreshLimit = Limit / 3;
- StagingLimit = FreshLimit;
- WarmLimit = FreshLimit;
- }
-
+ {
+ SetLimit(limit);
+ }
+
+ void SetLimit(ui64 limit) {
+ Limit = limit;
+
+ FreshLimit = Limit / 3;
+ StagingLimit = FreshLimit;
+ WarmLimit = FreshLimit;
+ }
+
template<typename TItem>
struct TDefaultWeight {
static ui64 Get(TItem *) {
@@ -164,7 +164,7 @@ public:
if (cacheSize == 0)
cacheSize = Max<ui64>();
- Config.SetLimit(cacheSize);
+ Config.SetLimit(cacheSize);
}
private:
void Unlink(TItem *item, ui64 &weight) {
diff --git a/ydb/core/util/pb.h b/ydb/core/util/pb.h
index 56195ed01e4..81bf964f214 100644
--- a/ydb/core/util/pb.h
+++ b/ydb/core/util/pb.h
@@ -28,14 +28,14 @@ bool ParseBinPBFromFile(const TString &path, T *pb) {
return ok;
}
-// Deserialize persisted protobuf without checking size limit (size should have checked before saving)
-template <class TProto>
+// Deserialize persisted protobuf without checking size limit (size should have checked before saving)
+template <class TProto>
bool ParseFromStringNoSizeLimit(TProto& proto, TArrayRef<const char> str) {
google::protobuf::io::CodedInputStream input(reinterpret_cast<const ui8*>(str.data()), str.size());
input.SetTotalBytesLimit(str.size());
- return proto.ParseFromCodedStream(&input) && input.ConsumedEntireMessage();
+ return proto.ParseFromCodedStream(&input) && input.ConsumedEntireMessage();
}
-
+
template<typename TProto>
struct TProtoBox : public TProto {
TProtoBox(TArrayRef<const char> plain) {
@@ -50,7 +50,7 @@ bool MergeFromStringNoSizeLimit(TProto& proto, TArrayRef<const char> str) {
google::protobuf::io::CodedInputStream input(reinterpret_cast<const ui8*>(str.data()), str.size());
input.SetTotalBytesLimit(str.size());
return proto.MergeFromCodedStream(&input) && input.ConsumedEntireMessage();
-}
+}
inline TString SingleLineProto(const NProtoBuf::Message& message) {
NProtoBuf::TextFormat::Printer p;
diff --git a/ydb/core/viewer/browse_db.h b/ydb/core/viewer/browse_db.h
index c0cedc95610..fe7b5d31408 100644
--- a/ydb/core/viewer/browse_db.h
+++ b/ydb/core/viewer/browse_db.h
@@ -168,16 +168,16 @@ public:
pbCommon.SetRowCount(pbTableStats.GetRowCount());
pbCommon.SetAccessTime(pbTableStats.GetLastAccessTime());
pbCommon.SetUpdateTime(pbTableStats.GetLastUpdateTime());
- pbCommon.SetImmediateTxCompleted(pbTableStats.GetImmediateTxCompleted());
- pbCommon.SetPlannedTxCompleted(pbTableStats.GetPlannedTxCompleted());
- pbCommon.SetTxRejectedByOverload(pbTableStats.GetTxRejectedByOverload());
- pbCommon.SetTxRejectedBySpace(pbTableStats.GetTxRejectedBySpace());
-
- pbCommon.SetRowUpdates(pbTableStats.GetRowUpdates());
- pbCommon.SetRowDeletes(pbTableStats.GetRowDeletes());
- pbCommon.SetRowReads(pbTableStats.GetRowReads());
- pbCommon.SetRangeReads(pbTableStats.GetRangeReads());
- pbCommon.SetRangeReadRows(pbTableStats.GetRangeReadRows());
+ pbCommon.SetImmediateTxCompleted(pbTableStats.GetImmediateTxCompleted());
+ pbCommon.SetPlannedTxCompleted(pbTableStats.GetPlannedTxCompleted());
+ pbCommon.SetTxRejectedByOverload(pbTableStats.GetTxRejectedByOverload());
+ pbCommon.SetTxRejectedBySpace(pbTableStats.GetTxRejectedBySpace());
+
+ pbCommon.SetRowUpdates(pbTableStats.GetRowUpdates());
+ pbCommon.SetRowDeletes(pbTableStats.GetRowDeletes());
+ pbCommon.SetRowReads(pbTableStats.GetRowReads());
+ pbCommon.SetRangeReads(pbTableStats.GetRangeReads());
+ pbCommon.SetRangeReadRows(pbTableStats.GetRangeReadRows());
}
if (pbPathDescription.HasTabletMetrics()) {
const auto& pbTabletMetrics(pbPathDescription.GetTabletMetrics());
diff --git a/ydb/core/viewer/content/viewer.js b/ydb/core/viewer/content/viewer.js
index 4fd20763731..1108f42278c 100644
--- a/ydb/core/viewer/content/viewer.js
+++ b/ydb/core/viewer/content/viewer.js
@@ -1498,10 +1498,10 @@ function tabletTypeToSymbol(type) {
return "TB";
case "Kesus":
return "K";
- case "OlapShard":
- return "OS";
- case "ColumnShard":
- return "CS";
+ case "OlapShard":
+ return "OS";
+ case "ColumnShard":
+ return "CS";
case "SequenceShard":
return "S";
case "ReplicationController":
@@ -1902,10 +1902,10 @@ function getTabletTypeFromHiveTabletType(type) {
return "KeyValue";
case 20:
return "PersQueue";
- case 34:
- return "OlapShard";
- case 35:
- return "ColumnShard";
+ case 34:
+ return "OlapShard";
+ case 35:
+ return "ColumnShard";
}
return type;
}
@@ -2457,29 +2457,29 @@ function onTreeDataComplete(result, obj, cb) {
child.parent = result.Path;
child.text = result.PathDescription.Children[i].Name;
switch (result.PathDescription.Children[i].PathType) {
- case 1: // Directory
+ case 1: // Directory
child.children = true;
child.icon = "glyphicon glyphicon-folder-close schema-good";
break;
- case 2: // Table
+ case 2: // Table
child.children = true;
child.icon = "glyphicon glyphicon-list schema-good";
break;
- case 3: // PQ Group
+ case 3: // PQ Group
child.icon = "glyphicon glyphicon-sort-by-alphabet schema-good";
break;
- case 4: // SubDomain
+ case 4: // SubDomain
child.children = true;
child.icon = "glyphicon glyphicon-tasks schema-good";
break;
- case 10: // ExtSubDomain
+ case 10: // ExtSubDomain
child.children = true;
child.icon = "glyphicon glyphicon-asterisk schema-good";
break;
- case 12: // OlapStore
- child.children = true;
- child.icon = "glyphicon glyphicon-list schema-good";
- break;
+ case 12: // OlapStore
+ child.children = true;
+ child.icon = "glyphicon glyphicon-list schema-good";
+ break;
}
child.data = function(obj1, cb1) { onTreeData(obj1, cb1); };
children.push(child);
@@ -2822,25 +2822,25 @@ function onTreeNodeComplete(result, obj) {
}
}
}
- if (result.PathDescription.Self.PathType === 12) {
- var olapStore = result.PathDescription.OlapStoreDescription;
- if (olapStore !== undefined) {
- if (olapStore.OlapShards !== undefined) {
- tabLen = olapStore.OlapShards.length;
- for (var k = 0; k < tabLen; k++) {
- tablet = String(olapStore.OlapShards[k]);
- SchemaTabletElements[tablet] = null;
- }
- }
- if (olapStore.ColumnShards !== undefined) {
- tabLen = olapStore.ColumnShards.length;
- for (var k = 0; k < tabLen; k++) {
- tablet = String(olapStore.ColumnShards[k]);
- SchemaTabletElements[tablet] = null;
- }
- }
- }
- }
+ if (result.PathDescription.Self.PathType === 12) {
+ var olapStore = result.PathDescription.OlapStoreDescription;
+ if (olapStore !== undefined) {
+ if (olapStore.OlapShards !== undefined) {
+ tabLen = olapStore.OlapShards.length;
+ for (var k = 0; k < tabLen; k++) {
+ tablet = String(olapStore.OlapShards[k]);
+ SchemaTabletElements[tablet] = null;
+ }
+ }
+ if (olapStore.ColumnShards !== undefined) {
+ tabLen = olapStore.ColumnShards.length;
+ for (var k = 0; k < tabLen; k++) {
+ tablet = String(olapStore.ColumnShards[k]);
+ SchemaTabletElements[tablet] = null;
+ }
+ }
+ }
+ }
}
if (result.PathDescription.Self.PathType === 7) {
@@ -2901,8 +2901,8 @@ function onTreeNodeComplete(result, obj) {
onTreeNodeTabletsComplete();
}
- if (result.PathDescription !== undefined && result.PathDescription.Children !== undefined
- && result.PathDescription.Self.PathType !== 4 && result.PathDescription.Self.PathType !== 12) {
+ if (result.PathDescription !== undefined && result.PathDescription.Children !== undefined
+ && result.PathDescription.Self.PathType !== 4 && result.PathDescription.Self.PathType !== 12) {
onTreeNodeChildrenComplete(result.Path, result.PathDescription.Children);
}
@@ -2982,62 +2982,62 @@ function onTreeNodeComplete(result, obj) {
row.insertCell(-1).innerHTML = "Last Update";
row.insertCell(-1).innerHTML = (new Date(Number(stats.LastUpdateTime))).toISOString();
}
- if (stats.ImmediateTxCompleted && Number(stats.ImmediateTxCompleted)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Immediate Tx Completed";
- row.insertCell(-1).innerHTML = valueToNumber(stats.ImmediateTxCompleted);
- }
- if (stats.PlannedTxCompleted && Number(stats.PlannedTxCompleted)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Planned Tx Completed";
- row.insertCell(-1).innerHTML = valueToNumber(stats.PlannedTxCompleted);
- }
- if (stats.TxRejectedByOverload && Number(stats.TxRejectedByOverload)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Tx Rejected by Overload";
- row.insertCell(-1).innerHTML = valueToNumber(stats.TxRejectedByOverload);
- }
- if (stats.TxRejectedBySpace && Number(stats.TxRejectedBySpace)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Tx Rejected by Space";
- row.insertCell(-1).innerHTML = valueToNumber(stats.TxRejectedBySpace);
- }
- if (stats.TxCompleteLagMsec && Number(stats.TxCompleteLagMsec)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Tx completion lag (msec)";
- row.insertCell(-1).innerHTML = valueToNumber(stats.TxCompleteLagMsec);
- }
- if (stats.InFlightTxCount && Number(stats.InFlightTxCount)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "In flight Tx count";
- row.insertCell(-1).innerHTML = valueToNumber(stats.InFlightTxCount);
- }
-
- if (stats.RowUpdates && Number(stats.RowUpdates)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Row Updates";
- row.insertCell(-1).innerHTML = valueToNumber(stats.RowUpdates);
- }
- if (stats.RowDeletes && Number(stats.RowDeletes)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Row Deletes";
- row.insertCell(-1).innerHTML = valueToNumber(stats.RowDeletes);
- }
- if (stats.RowReads && Number(stats.RowReads)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Row Reads";
- row.insertCell(-1).innerHTML = valueToNumber(stats.RowReads);
- }
- if (stats.RangeReads && Number(stats.RangeReads)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Range Reads";
- row.insertCell(-1).innerHTML = valueToNumber(stats.RangeReads);
- }
- if (stats.RangeReadRows && Number(stats.RangeReadRows)) {
- row = tab.insertRow(-1);
- row.insertCell(-1).innerHTML = "Range Read Rows";
- row.insertCell(-1).innerHTML = valueToNumber(stats.RangeReadRows);
- }
+ if (stats.ImmediateTxCompleted && Number(stats.ImmediateTxCompleted)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Immediate Tx Completed";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.ImmediateTxCompleted);
+ }
+ if (stats.PlannedTxCompleted && Number(stats.PlannedTxCompleted)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Planned Tx Completed";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.PlannedTxCompleted);
+ }
+ if (stats.TxRejectedByOverload && Number(stats.TxRejectedByOverload)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Tx Rejected by Overload";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.TxRejectedByOverload);
+ }
+ if (stats.TxRejectedBySpace && Number(stats.TxRejectedBySpace)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Tx Rejected by Space";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.TxRejectedBySpace);
+ }
+ if (stats.TxCompleteLagMsec && Number(stats.TxCompleteLagMsec)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Tx completion lag (msec)";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.TxCompleteLagMsec);
+ }
+ if (stats.InFlightTxCount && Number(stats.InFlightTxCount)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "In flight Tx count";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.InFlightTxCount);
+ }
+
+ if (stats.RowUpdates && Number(stats.RowUpdates)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Row Updates";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.RowUpdates);
+ }
+ if (stats.RowDeletes && Number(stats.RowDeletes)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Row Deletes";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.RowDeletes);
+ }
+ if (stats.RowReads && Number(stats.RowReads)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Row Reads";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.RowReads);
+ }
+ if (stats.RangeReads && Number(stats.RangeReads)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Range Reads";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.RangeReads);
+ }
+ if (stats.RangeReadRows && Number(stats.RangeReadRows)) {
+ row = tab.insertRow(-1);
+ row.insertCell(-1).innerHTML = "Range Read Rows";
+ row.insertCell(-1).innerHTML = valueToNumber(stats.RangeReadRows);
+ }
}
if (result.PathDescription.PersQueueGroup !== undefined) {
diff --git a/ydb/core/viewer/json_counters.h b/ydb/core/viewer/json_counters.h
index e3705973cbf..cac1e0885c4 100644
--- a/ydb/core/viewer/json_counters.h
+++ b/ydb/core/viewer/json_counters.h
@@ -427,7 +427,7 @@ public:
void Timeout(const TActorContext& ctx) {
ctx.Send(Initiator, new NMon::TEvHttpInfoRes(Viewer->GetHTTPGATEWAYTIMEOUT(), 0, NMon::IEvHttpInfoRes::EContentType::Custom));
- Die(ctx);
+ Die(ctx);
}
};
diff --git a/ydb/core/viewer/protos/viewer.proto b/ydb/core/viewer/protos/viewer.proto
index 87812a8bef1..9f6c822a89f 100644
--- a/ydb/core/viewer/protos/viewer.proto
+++ b/ydb/core/viewer/protos/viewer.proto
@@ -96,18 +96,18 @@ message TMetaCommonInfo {
string MinSubKey = 25;
string MaxSubKey = 26;
uint64 MinTimestamp = 27; // timestamp of the oldest entry in the table, milliseconds since epoch
-
- // Datashard-specific stats
- uint64 ImmediateTxCompleted = 30;
- uint64 PlannedTxCompleted = 31;
- uint64 TxRejectedByOverload = 32;
- uint64 TxRejectedBySpace = 33;
-
- uint64 RowUpdates = 34;
- uint64 RowDeletes = 35;
- uint64 RowReads = 36;
- uint64 RangeReads = 37;
- uint64 RangeReadRows = 38;
+
+ // Datashard-specific stats
+ uint64 ImmediateTxCompleted = 30;
+ uint64 PlannedTxCompleted = 31;
+ uint64 TxRejectedByOverload = 32;
+ uint64 TxRejectedBySpace = 33;
+
+ uint64 RowUpdates = 34;
+ uint64 RowDeletes = 35;
+ uint64 RowReads = 36;
+ uint64 RangeReads = 37;
+ uint64 RangeReadRows = 38;
}
message TMetaColumnInfo {
diff --git a/ydb/core/ydb_convert/table_description.cpp b/ydb/core/ydb_convert/table_description.cpp
index 29b2664e8dd..0d10bb8450b 100644
--- a/ydb/core/ydb_convert/table_description.cpp
+++ b/ydb/core/ydb_convert/table_description.cpp
@@ -23,15 +23,15 @@ template <typename TYdbProto>
void FillColumnDescriptionImpl(TYdbProto& out,
NKikimrMiniKQL::TType& splitKeyType, const NKikimrSchemeOp::TTableDescription& in) {
- splitKeyType.SetKind(NKikimrMiniKQL::ETypeKind::Tuple);
- splitKeyType.MutableTuple()->MutableElement()->Reserve(in.KeyColumnIdsSize());
- THashMap<ui32, size_t> columnIdToKeyPos;
- for (size_t keyPos = 0; keyPos < in.KeyColumnIdsSize(); ++keyPos) {
- ui32 colId = in.GetKeyColumnIds(keyPos);
- columnIdToKeyPos[colId] = keyPos;
- splitKeyType.MutableTuple()->AddElement();
- }
-
+ splitKeyType.SetKind(NKikimrMiniKQL::ETypeKind::Tuple);
+ splitKeyType.MutableTuple()->MutableElement()->Reserve(in.KeyColumnIdsSize());
+ THashMap<ui32, size_t> columnIdToKeyPos;
+ for (size_t keyPos = 0; keyPos < in.KeyColumnIdsSize(); ++keyPos) {
+ ui32 colId = in.GetKeyColumnIds(keyPos);
+ columnIdToKeyPos[colId] = keyPos;
+ splitKeyType.MutableTuple()->AddElement();
+ }
+
for (const auto& column : in.GetColumns()) {
NYql::NProto::TypeIds protoType;
if (!NYql::NProto::TypeIds_Parse(column.GetType(), &protoType)) {
@@ -50,14 +50,14 @@ void FillColumnDescriptionImpl(TYdbProto& out,
} else {
NMiniKQL::ExportPrimitiveTypeToProto(protoType, item);
}
-
- if (columnIdToKeyPos.count(column.GetId())) {
- size_t keyPos = columnIdToKeyPos[column.GetId()];
- auto tupleElement = splitKeyType.MutableTuple()->MutableElement(keyPos);
- tupleElement->SetKind(NKikimrMiniKQL::ETypeKind::Optional);
- ConvertYdbTypeToMiniKQLType(item, *tupleElement->MutableOptional()->MutableItem());
+
+ if (columnIdToKeyPos.count(column.GetId())) {
+ size_t keyPos = columnIdToKeyPos[column.GetId()];
+ auto tupleElement = splitKeyType.MutableTuple()->MutableElement(keyPos);
+ tupleElement->SetKind(NKikimrMiniKQL::ETypeKind::Optional);
+ ConvertYdbTypeToMiniKQLType(item, *tupleElement->MutableOptional()->MutableItem());
}
-
+
if (column.HasFamilyName()) {
newColumn->set_family(column.GetFamilyName());
}
@@ -101,51 +101,51 @@ void FillColumnDescription(Ydb::Table::CreateTableRequest& out,
FillColumnDescriptionImpl(out, splitKeyType, in);
}
-bool ExtractColumnTypeId(ui32& outTypeId, const Ydb::Type& inType, Ydb::StatusIds::StatusCode& status, TString& error) {
- ui32 typeId;
+bool ExtractColumnTypeId(ui32& outTypeId, const Ydb::Type& inType, Ydb::StatusIds::StatusCode& status, TString& error) {
+ ui32 typeId;
auto itemType = inType.has_optional_type() ? inType.optional_type().item() : inType;
- switch (itemType.type_case()) {
- case Ydb::Type::kTypeId:
- typeId = (ui32)itemType.type_id();
- break;
- case Ydb::Type::kDecimalType: {
- if (itemType.decimal_type().precision() != NScheme::DECIMAL_PRECISION) {
- status = Ydb::StatusIds::BAD_REQUEST;
- error = Sprintf("Bad decimal precision. Only Decimal(%" PRIu32
- ",%" PRIu32 ") is supported for table columns",
- NScheme::DECIMAL_PRECISION,
- NScheme::DECIMAL_SCALE);
- return false;
- }
- if (itemType.decimal_type().scale() != NScheme::DECIMAL_SCALE) {
- status = Ydb::StatusIds::BAD_REQUEST;
- error = Sprintf("Bad decimal scale. Only Decimal(%" PRIu32
- ",%" PRIu32 ") is supported for table columns",
- NScheme::DECIMAL_PRECISION,
- NScheme::DECIMAL_SCALE);
- return false;
- }
- typeId = NYql::NProto::TypeIds::Decimal;
- break;
- }
-
- default: {
- status = Ydb::StatusIds::BAD_REQUEST;
- error = "Only optional of data types are supported for table columns";
- return false;
- }
- }
-
- if (!NYql::NProto::TypeIds_IsValid((int)typeId)) {
- status = Ydb::StatusIds::BAD_REQUEST;
- error = TStringBuilder() << "Got invalid typeId: " << (int)typeId;
- return false;
- }
-
- outTypeId = typeId;
- return true;
-}
-
+ switch (itemType.type_case()) {
+ case Ydb::Type::kTypeId:
+ typeId = (ui32)itemType.type_id();
+ break;
+ case Ydb::Type::kDecimalType: {
+ if (itemType.decimal_type().precision() != NScheme::DECIMAL_PRECISION) {
+ status = Ydb::StatusIds::BAD_REQUEST;
+ error = Sprintf("Bad decimal precision. Only Decimal(%" PRIu32
+ ",%" PRIu32 ") is supported for table columns",
+ NScheme::DECIMAL_PRECISION,
+ NScheme::DECIMAL_SCALE);
+ return false;
+ }
+ if (itemType.decimal_type().scale() != NScheme::DECIMAL_SCALE) {
+ status = Ydb::StatusIds::BAD_REQUEST;
+ error = Sprintf("Bad decimal scale. Only Decimal(%" PRIu32
+ ",%" PRIu32 ") is supported for table columns",
+ NScheme::DECIMAL_PRECISION,
+ NScheme::DECIMAL_SCALE);
+ return false;
+ }
+ typeId = NYql::NProto::TypeIds::Decimal;
+ break;
+ }
+
+ default: {
+ status = Ydb::StatusIds::BAD_REQUEST;
+ error = "Only optional of data types are supported for table columns";
+ return false;
+ }
+ }
+
+ if (!NYql::NProto::TypeIds_IsValid((int)typeId)) {
+ status = Ydb::StatusIds::BAD_REQUEST;
+ error = TStringBuilder() << "Got invalid typeId: " << (int)typeId;
+ return false;
+ }
+
+ outTypeId = typeId;
+ return true;
+}
+
bool FillColumnDescription(NKikimrSchemeOp::TTableDescription& out,
const google::protobuf::RepeatedPtrField<Ydb::Table::ColumnMeta>& in, Ydb::StatusIds::StatusCode& status, TString& error) {
@@ -163,7 +163,7 @@ bool FillColumnDescription(NKikimrSchemeOp::TTableDescription& out,
}
ui32 typeId;
- if (!ExtractColumnTypeId(typeId, column.type(), status, error)) {
+ if (!ExtractColumnTypeId(typeId, column.type(), status, error)) {
return false;
}
cd->SetType(NYql::NProto::TypeIds_Name(NYql::NProto::TypeIds(typeId)));
@@ -581,14 +581,14 @@ void FillPartitioningSettingsImpl(TYdbProto& out,
outPartSettings.set_partitioning_by_size(Ydb::FeatureFlag::DISABLED);
}
- if (inPartPolicy.HasSplitByLoadSettings()) {
- bool enabled = inPartPolicy.GetSplitByLoadSettings().GetEnabled();
- outPartSettings.set_partitioning_by_load(enabled ? Ydb::FeatureFlag::ENABLED : Ydb::FeatureFlag::DISABLED);
+ if (inPartPolicy.HasSplitByLoadSettings()) {
+ bool enabled = inPartPolicy.GetSplitByLoadSettings().GetEnabled();
+ outPartSettings.set_partitioning_by_load(enabled ? Ydb::FeatureFlag::ENABLED : Ydb::FeatureFlag::DISABLED);
} else {
// (!) We assume that partitioning by load is disabled by default. But we don't know it for sure.
outPartSettings.set_partitioning_by_load(Ydb::FeatureFlag::DISABLED);
- }
-
+ }
+
if (inPartPolicy.HasMinPartitionsCount() && inPartPolicy.GetMinPartitionsCount()) {
outPartSettings.set_min_partitions_count(inPartPolicy.GetMinPartitionsCount());
}
diff --git a/ydb/core/ydb_convert/table_description.h b/ydb/core/ydb_convert/table_description.h
index 1125f8b9238..af77a93776a 100644
--- a/ydb/core/ydb_convert/table_description.h
+++ b/ydb/core/ydb_convert/table_description.h
@@ -14,7 +14,7 @@ void FillColumnDescription(Ydb::Table::CreateTableRequest& out,
// in
bool FillColumnDescription(NKikimrSchemeOp::TTableDescription& out,
const google::protobuf::RepeatedPtrField<Ydb::Table::ColumnMeta>& in, Ydb::StatusIds::StatusCode& status, TString& error);
-bool ExtractColumnTypeId(ui32& outTypeId, const Ydb::Type& inType, Ydb::StatusIds::StatusCode& status, TString& error);
+bool ExtractColumnTypeId(ui32& outTypeId, const Ydb::Type& inType, Ydb::StatusIds::StatusCode& status, TString& error);
// out
void FillTableBoundary(Ydb::Table::DescribeTableResult& out,
diff --git a/ydb/core/ydb_convert/table_settings.cpp b/ydb/core/ydb_convert/table_settings.cpp
index f9999a6d7fb..3f3547f1f5e 100644
--- a/ydb/core/ydb_convert/table_settings.cpp
+++ b/ydb/core/ydb_convert/table_settings.cpp
@@ -83,33 +83,33 @@ bool FillCreateTableSettingsDesc(NKikimrSchemeOp::TTableDescription& tableDesc,
return false;
}
- switch (partitioningSettings.partitioning_by_load()) {
- case Ydb::FeatureFlag::STATUS_UNSPECIFIED:
- {
- break;
- }
- case Ydb::FeatureFlag::ENABLED:
- {
- auto &policy = *partitionConfig.MutablePartitioningPolicy();
- policy.MutableSplitByLoadSettings()->SetEnabled(true);
+ switch (partitioningSettings.partitioning_by_load()) {
+ case Ydb::FeatureFlag::STATUS_UNSPECIFIED:
+ {
+ break;
+ }
+ case Ydb::FeatureFlag::ENABLED:
+ {
+ auto &policy = *partitionConfig.MutablePartitioningPolicy();
+ policy.MutableSplitByLoadSettings()->SetEnabled(true);
if (!partitioningSettings.min_partitions_count()) {
policy.SetMinPartitionsCount(CalculateDefaultMinPartitions(proto));
}
- break;
- }
- case Ydb::FeatureFlag::DISABLED:
- {
- auto &policy = *partitionConfig.MutablePartitioningPolicy();
- policy.MutableSplitByLoadSettings()->SetEnabled(false);
- break;
- }
- default:
- code = Ydb::StatusIds::BAD_REQUEST;
- error = TStringBuilder() << "Unknown auto partitioning by load feature flag status: '"
- << (ui32)partitioningSettings.partitioning_by_load() << "'";
- return false;
- }
-
+ break;
+ }
+ case Ydb::FeatureFlag::DISABLED:
+ {
+ auto &policy = *partitionConfig.MutablePartitioningPolicy();
+ policy.MutableSplitByLoadSettings()->SetEnabled(false);
+ break;
+ }
+ default:
+ code = Ydb::StatusIds::BAD_REQUEST;
+ error = TStringBuilder() << "Unknown auto partitioning by load feature flag status: '"
+ << (ui32)partitioningSettings.partitioning_by_load() << "'";
+ return false;
+ }
+
if (partitioningSettings.min_partitions_count()) {
auto &policy = *partitionConfig.MutablePartitioningPolicy();
policy.SetMinPartitionsCount(partitioningSettings.min_partitions_count());
@@ -251,36 +251,36 @@ bool FillAlterTableSettingsDesc(NKikimrSchemeOp::TTableDescription& tableDesc,
return false;
}
- switch (alterSettings.partitioning_by_load()) {
- case Ydb::FeatureFlag::STATUS_UNSPECIFIED:
- {
- changed = true;
- break;
- }
- case Ydb::FeatureFlag::ENABLED:
- {
- auto &policy = *partitionConfig.MutablePartitioningPolicy();
- policy.MutableSplitByLoadSettings()->SetEnabled(true);
+ switch (alterSettings.partitioning_by_load()) {
+ case Ydb::FeatureFlag::STATUS_UNSPECIFIED:
+ {
+ changed = true;
+ break;
+ }
+ case Ydb::FeatureFlag::ENABLED:
+ {
+ auto &policy = *partitionConfig.MutablePartitioningPolicy();
+ policy.MutableSplitByLoadSettings()->SetEnabled(true);
if (!alterSettings.min_partitions_count()) {
policy.SetMinPartitionsCount(defaultMinPartitions);
}
- changed = true;
- break;
- }
- case Ydb::FeatureFlag::DISABLED:
- {
- auto &policy = *partitionConfig.MutablePartitioningPolicy();
- policy.MutableSplitByLoadSettings()->SetEnabled(false);
- changed = true;
- break;
- }
- default:
- code = Ydb::StatusIds::BAD_REQUEST;
- error = TStringBuilder() << "Unknown auto partitioning by load feature flag status: '"
- << (ui32)alterSettings.partitioning_by_load() << "'";
- return false;
- }
-
+ changed = true;
+ break;
+ }
+ case Ydb::FeatureFlag::DISABLED:
+ {
+ auto &policy = *partitionConfig.MutablePartitioningPolicy();
+ policy.MutableSplitByLoadSettings()->SetEnabled(false);
+ changed = true;
+ break;
+ }
+ default:
+ code = Ydb::StatusIds::BAD_REQUEST;
+ error = TStringBuilder() << "Unknown auto partitioning by load feature flag status: '"
+ << (ui32)alterSettings.partitioning_by_load() << "'";
+ return false;
+ }
+
if (alterSettings.min_partitions_count()) {
auto &policy = *partitionConfig.MutablePartitioningPolicy();
policy.SetMinPartitionsCount(alterSettings.min_partitions_count());
diff --git a/ydb/core/ydb_convert/table_settings.h b/ydb/core/ydb_convert/table_settings.h
index 658d35c709c..cc9b4fc488a 100644
--- a/ydb/core/ydb_convert/table_settings.h
+++ b/ydb/core/ydb_convert/table_settings.h
@@ -4,8 +4,8 @@
#include <ydb/public/api/protos/ydb_table.pb.h>
#include <util/datetime/base.h>
-#include <util/string/builder.h>
-
+#include <util/string/builder.h>
+
namespace NKikimr {
void MEWarning(const TString& settingName, TList<TString>& warnings);
@@ -18,55 +18,55 @@ bool FillAlterTableSettingsDesc(NKikimrSchemeOp::TTableDescription& out,
const Ydb::Table::AlterTableRequest& in,
Ydb::StatusIds::StatusCode& code, TString& error, bool changed);
-template <class TTtlSettingsEnabled>
-bool FillTtlSettings(TTtlSettingsEnabled& out, const Ydb::Table::TtlSettings& in,
- Ydb::StatusIds::StatusCode& code, TString& error)
-{
- auto unsupported = [&code, &error](const TString& message) -> bool {
- code = Ydb::StatusIds::UNSUPPORTED;
- error = message;
- return false;
- };
-
- switch (in.mode_case()) {
- case Ydb::Table::TtlSettings::kDateTypeColumn:
- out.SetColumnName(in.date_type_column().column_name());
- out.SetExpireAfterSeconds(in.date_type_column().expire_after_seconds());
- break;
-
- case Ydb::Table::TtlSettings::kValueSinceUnixEpoch:
- out.SetColumnName(in.value_since_unix_epoch().column_name());
- out.SetExpireAfterSeconds(in.value_since_unix_epoch().expire_after_seconds());
-
- #define CASE_UNIT(type) \
- case Ydb::Table::ValueSinceUnixEpochModeSettings::type: \
+template <class TTtlSettingsEnabled>
+bool FillTtlSettings(TTtlSettingsEnabled& out, const Ydb::Table::TtlSettings& in,
+ Ydb::StatusIds::StatusCode& code, TString& error)
+{
+ auto unsupported = [&code, &error](const TString& message) -> bool {
+ code = Ydb::StatusIds::UNSUPPORTED;
+ error = message;
+ return false;
+ };
+
+ switch (in.mode_case()) {
+ case Ydb::Table::TtlSettings::kDateTypeColumn:
+ out.SetColumnName(in.date_type_column().column_name());
+ out.SetExpireAfterSeconds(in.date_type_column().expire_after_seconds());
+ break;
+
+ case Ydb::Table::TtlSettings::kValueSinceUnixEpoch:
+ out.SetColumnName(in.value_since_unix_epoch().column_name());
+ out.SetExpireAfterSeconds(in.value_since_unix_epoch().expire_after_seconds());
+
+ #define CASE_UNIT(type) \
+ case Ydb::Table::ValueSinceUnixEpochModeSettings::type: \
out.SetColumnUnit(NKikimrSchemeOp::TTTLSettings::type); \
- break
-
- switch (in.value_since_unix_epoch().column_unit()) {
- CASE_UNIT(UNIT_SECONDS);
- CASE_UNIT(UNIT_MILLISECONDS);
- CASE_UNIT(UNIT_MICROSECONDS);
- CASE_UNIT(UNIT_NANOSECONDS);
- default:
- return unsupported(TStringBuilder() << "Unsupported unit: "
- << static_cast<ui32>(in.value_since_unix_epoch().column_unit()));
- }
-
- #undef CASE_UNIT
- break;
-
- default:
- return unsupported("Unsupported ttl settings");
- }
-
+ break
+
+ switch (in.value_since_unix_epoch().column_unit()) {
+ CASE_UNIT(UNIT_SECONDS);
+ CASE_UNIT(UNIT_MILLISECONDS);
+ CASE_UNIT(UNIT_MICROSECONDS);
+ CASE_UNIT(UNIT_NANOSECONDS);
+ default:
+ return unsupported(TStringBuilder() << "Unsupported unit: "
+ << static_cast<ui32>(in.value_since_unix_epoch().column_unit()));
+ }
+
+ #undef CASE_UNIT
+ break;
+
+ default:
+ return unsupported("Unsupported ttl settings");
+ }
+
if constexpr (std::is_same_v<TTtlSettingsEnabled, NKikimrSchemeOp::TTTLSettings>) {
if (in.run_interval_seconds()) {
out.MutableSysSettings()->SetRunInterval(TDuration::Seconds(in.run_interval_seconds()).GetValue());
}
}
- return true;
-}
-
+ return true;
+}
+
} // namespace NKikimr
diff --git a/ydb/core/ymq/http/http.cpp b/ydb/core/ymq/http/http.cpp
index 7dc1cb698ed..5d0963bfb2e 100644
--- a/ydb/core/ymq/http/http.cpp
+++ b/ydb/core/ymq/http/http.cpp
@@ -1010,15 +1010,15 @@ void TAsyncHttpServer::Initialize(
nullptr, TOTAL_COUNTER_LABEL, nullptr, true
);
AggregatedUserCounters_->ShowDetailedCounters(TInstant::Max());
- PoolId_ = poolId;
+ PoolId_ = poolId;
+}
+
+void TAsyncHttpServer::Start() {
+ if (!THttpServer::Start()) {
+ Y_FAIL("Unable to start http server for SQS service on port %" PRIu16, Options().Port);
+ }
}
-void TAsyncHttpServer::Start() {
- if (!THttpServer::Start()) {
- Y_FAIL("Unable to start http server for SQS service on port %" PRIu16, Options().Port);
- }
-}
-
TClientRequest* TAsyncHttpServer::CreateClient() {
return new THttpRequest(this);
}
diff --git a/ydb/core/ymq/http/http.h b/ydb/core/ymq/http/http.h
index 9973d80c9ae..01a2b0edc4f 100644
--- a/ydb/core/ymq/http/http.h
+++ b/ydb/core/ymq/http/http.h
@@ -151,8 +151,8 @@ public:
TIntrusivePtr<NMonitoring::TDynamicCounters> ymqCounters,
ui32 poolId);
- void Start();
-
+ void Start();
+
NActors::TActorSystem* GetActorSystem() const {
return ActorSystem_;
}
diff --git a/ydb/core/yql_testlib/yql_testlib.cpp b/ydb/core/yql_testlib/yql_testlib.cpp
index 07917ffa4d8..35838c40f6a 100644
--- a/ydb/core/yql_testlib/yql_testlib.cpp
+++ b/ydb/core/yql_testlib/yql_testlib.cpp
@@ -180,11 +180,11 @@ void TYqlServer::Initialize() {
CreateBootstrapTablets();
SetupStorage();
-
+
for (ui32 nodeIdx = 0; nodeIdx < GetSettings().NodeCount; ++nodeIdx) {
SetupDomainLocalService(nodeIdx);
SetupProxies(nodeIdx);
- }
+ }
SetupLogging();
}
diff --git a/ydb/library/yql/core/type_ann/type_ann_core.cpp b/ydb/library/yql/core/type_ann/type_ann_core.cpp
index 0fd8b072906..5846e6cb108 100644
--- a/ydb/library/yql/core/type_ann/type_ann_core.cpp
+++ b/ydb/library/yql/core/type_ann/type_ann_core.cpp
@@ -3518,32 +3518,32 @@ namespace NTypeAnnImpl {
}
IGraphTransformer::TStatus FromBytesWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx) {
- Y_UNUSED(output);
+ Y_UNUSED(output);
if (!EnsureMinArgsCount(*input, 2, ctx.Expr)) {
- return IGraphTransformer::TStatus::Error;
- }
+ return IGraphTransformer::TStatus::Error;
+ }
bool isOptional;
const TDataExprType* dataType;
if (!EnsureDataOrOptionalOfData(input->Head(), isOptional, dataType, ctx.Expr)) {
- return IGraphTransformer::TStatus::Error;
- }
-
+ return IGraphTransformer::TStatus::Error;
+ }
+
if (!EnsureSpecificDataType(input->Head().Pos(), *dataType, EDataSlot::String, ctx.Expr)) {
return IGraphTransformer::TStatus::Error;
}
if (!EnsureAtom(*input->Child(1), ctx.Expr)) {
- return IGraphTransformer::TStatus::Error;
- }
-
+ return IGraphTransformer::TStatus::Error;
+ }
+
auto dataTypeName = input->Child(1)->Content();
auto slot = NKikimr::NUdf::FindDataSlot(dataTypeName);
if (!slot) {
ctx.Expr.AddError(TIssue(ctx.Expr.GetPosition(input->Child(1)->Pos()), TStringBuilder() << "Unknown datatype: " << dataTypeName));
- return IGraphTransformer::TStatus::Error;
- }
-
+ return IGraphTransformer::TStatus::Error;
+ }
+
const bool isDecimal = IsDataTypeDecimal(*slot);
if (!EnsureArgsCount(*input, isDecimal ? 4 : 2, ctx.Expr)) {
return IGraphTransformer::TStatus::Error;
@@ -3556,9 +3556,9 @@ namespace NTypeAnnImpl {
if (isDecimal && !input->GetTypeAnn()->Cast<TDataExprParamsType>()->Validate(input->Pos(), ctx.Expr)) {
return IGraphTransformer::TStatus::Error;
}
- return IGraphTransformer::TStatus::Ok;
- }
-
+ return IGraphTransformer::TStatus::Ok;
+ }
+
bool CanConvert(EDataSlot sourceType, EDataSlot targetType) {
bool canConvert = false;
if ((IsDataTypeIntegral(sourceType) || sourceType == EDataSlot::Bool) &&
@@ -4935,25 +4935,25 @@ template <NKikimr::NUdf::EDataSlot DataSlot>
}
IGraphTransformer::TStatus ToBytesWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx) {
- Y_UNUSED(output);
+ Y_UNUSED(output);
if (!EnsureArgsCount(*input, 1, ctx.Expr)) {
- return IGraphTransformer::TStatus::Error;
- }
-
+ return IGraphTransformer::TStatus::Error;
+ }
+
bool isOptional;
const TDataExprType* dataType;
if (!EnsureDataOrOptionalOfData(input->Head(), isOptional, dataType, ctx.Expr)) {
- return IGraphTransformer::TStatus::Error;
- }
-
+ return IGraphTransformer::TStatus::Error;
+ }
+
input->SetTypeAnn(ctx.Expr.MakeType<TDataExprType>(EDataSlot::String));
if (isOptional) {
input->SetTypeAnn(ctx.Expr.MakeType<TOptionalExprType>(input->GetTypeAnn()));
}
-
- return IGraphTransformer::TStatus::Ok;
- }
-
+
+ return IGraphTransformer::TStatus::Ok;
+ }
+
IGraphTransformer::TStatus DictWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx) {
Y_UNUSED(output);
if (!EnsureMinArgsCount(*input, 1, ctx.Expr)) {
@@ -12876,7 +12876,7 @@ template <NKikimr::NUdf::EDataSlot DataSlot>
Functions["SessionWindowTraits"] = &SessionWindowTraitsWrapper;
Functions["FromString"] = &FromStringWrapper;
Functions["StrictFromString"] = &StrictFromStringWrapper;
- Functions["FromBytes"] = &FromBytesWrapper;
+ Functions["FromBytes"] = &FromBytesWrapper;
Functions["Convert"] = &ConvertWrapper;
Functions["AlterTo"] = &AlterToWrapper;
Functions["ToIntegral"] = &ToIntegralWrapper;
@@ -12899,7 +12899,7 @@ template <NKikimr::NUdf::EDataSlot DataSlot>
Functions["Optional"] = &OptionalWrapper;
Functions["OptionalIf"] = &OptionalIfWrapper;
Functions["ToString"] = &ToStringWrapper;
- Functions["ToBytes"] = &ToBytesWrapper;
+ Functions["ToBytes"] = &ToBytesWrapper;
Functions["GroupByKey"] = &GroupByKeyWrapper;
Functions["PartitionByKey"] = &PartitionByKeyWrapper;
Functions["PartitionsByKeys"] = &PartitionsByKeysWrapper;
diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h
index 04f0a569074..deaadefb42c 100644
--- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h
+++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h
@@ -250,7 +250,7 @@ protected:
ProfileStats->MkqlMaxUsedMemory = std::max(ProfileStats->MkqlMaxUsedMemory, alloc->GetPeakAllocated());
CA_LOG_D("Peak memory usage: " << ProfileStats->MkqlMaxUsedMemory);
}
-
+
ReportStats(now);
}
diff --git a/ydb/library/yql/minikql/aligned_page_pool.cpp b/ydb/library/yql/minikql/aligned_page_pool.cpp
index 58ab4c9d883..e015c255c81 100644
--- a/ydb/library/yql/minikql/aligned_page_pool.cpp
+++ b/ydb/library/yql/minikql/aligned_page_pool.cpp
@@ -7,7 +7,7 @@
#include <util/system/compiler.h>
#include <util/system/info.h>
#include <util/system/error.h>
-#include <util/thread/lfstack.h>
+#include <util/thread/lfstack.h>
#if defined(_win_)
#include <util/system/winint.h>
@@ -33,8 +33,8 @@ public:
{}
~TGlobalPagePool() {
- void* addr = nullptr;
- while (Pages.Dequeue(&addr)) {
+ void* addr = nullptr;
+ while (Pages.Dequeue(&addr)) {
#ifdef _win_
Y_VERIFY_DEBUG(::VirtualFree(addr, 0, MEM_RELEASE), "VirtualFree failed: %s", LastSystemErrorText());
#else
@@ -43,25 +43,25 @@ public:
}
}
- void* GetPage() {
- void *page = nullptr;
- if (Pages.Dequeue(&page)) {
- AtomicDecrement(Count);
- return page;
- }
-
- return nullptr;
+ void* GetPage() {
+ void *page = nullptr;
+ if (Pages.Dequeue(&page)) {
+ AtomicDecrement(Count);
+ return page;
+ }
+
+ return nullptr;
+ }
+
+ void PushPage(void* addr) {
+ AtomicIncrement(Count);
+ Pages.Enqueue(addr);
}
- void PushPage(void* addr) {
- AtomicIncrement(Count);
- Pages.Enqueue(addr);
+ ui64 GetPageCount() const {
+ return RelaxedLoad(&Count);
}
- ui64 GetPageCount() const {
- return RelaxedLoad(&Count);
- }
-
size_t GetPageSize() const {
return PageSize;
}
@@ -72,8 +72,8 @@ public:
private:
const size_t PageSize;
- TAtomic Count = 0;
- TLockFreeStack<void*> Pages;
+ TAtomic Count = 0;
+ TLockFreeStack<void*> Pages;
};
class TGlobalPools {
@@ -100,51 +100,51 @@ private:
} // unnamed
-TAlignedPagePoolCounters::TAlignedPagePoolCounters(NMonitoring::TDynamicCounterPtr countersRoot, const TString& name) {
- if (!countersRoot || name.empty())
- return;
- NMonitoring::TDynamicCounterPtr subGroup = countersRoot->GetSubgroup("counters", "utils")->GetSubgroup("subsystem", "mkqlalloc");
- TotalBytesAllocatedCntr = subGroup->GetCounter(name + "/TotalBytesAllocated");
- AllocationsCntr = subGroup->GetCounter(name + "/Allocations", true);
- PoolsCntr = subGroup->GetCounter(name + "/Pools", true);
- LostPagesBytesFreeCntr = subGroup->GetCounter(name + "/LostPagesBytesFreed", true);
-}
-
+TAlignedPagePoolCounters::TAlignedPagePoolCounters(NMonitoring::TDynamicCounterPtr countersRoot, const TString& name) {
+ if (!countersRoot || name.empty())
+ return;
+ NMonitoring::TDynamicCounterPtr subGroup = countersRoot->GetSubgroup("counters", "utils")->GetSubgroup("subsystem", "mkqlalloc");
+ TotalBytesAllocatedCntr = subGroup->GetCounter(name + "/TotalBytesAllocated");
+ AllocationsCntr = subGroup->GetCounter(name + "/Allocations", true);
+ PoolsCntr = subGroup->GetCounter(name + "/Pools", true);
+ LostPagesBytesFreeCntr = subGroup->GetCounter(name + "/LostPagesBytesFreed", true);
+}
+
TAlignedPagePool::~TAlignedPagePool() {
- if (CheckLostMem && !UncaughtException()) {
+ if (CheckLostMem && !UncaughtException()) {
Y_VERIFY_DEBUG(TotalAllocated == FreePages.size() * POOL_PAGE_SIZE,
"Expected %ld, actual %ld (%ld page(s), %ld offloaded)", TotalAllocated,
FreePages.size() * POOL_PAGE_SIZE, FreePages.size(), OffloadedActiveBytes);
Y_VERIFY_DEBUG(OffloadedActiveBytes == 0, "offloaded: %ld", OffloadedActiveBytes);
- }
+ }
- size_t activeBlocksSize = 0;
- for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) {
- activeBlocksSize += it->second;
- Free(it->first, it->second);
+ size_t activeBlocksSize = 0;
+ for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) {
+ activeBlocksSize += it->second;
+ Free(it->first, it->second);
}
if (activeBlocksSize > 0 || FreePages.size() != AllPages.size() || OffloadedActiveBytes) {
- if (Counters.LostPagesBytesFreeCntr) {
+ if (Counters.LostPagesBytesFreeCntr) {
(*Counters.LostPagesBytesFreeCntr) += OffloadedActiveBytes + activeBlocksSize + (AllPages.size() - FreePages.size()) * POOL_PAGE_SIZE;
- }
- }
-
+ }
+ }
+
Y_VERIFY_DEBUG(TotalAllocated == AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes,
- "Expected %ld, actual %ld (%ld page(s))", TotalAllocated,
+ "Expected %ld, actual %ld (%ld page(s))", TotalAllocated,
AllPages.size() * POOL_PAGE_SIZE + OffloadedActiveBytes, AllPages.size());
-
- for (auto &ptr : AllPages) {
- TGlobalPools::Instance().Get(0).PushPage(ptr);
- }
-
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) -= TotalAllocated;
- }
- if (Counters.PoolsCntr) {
- --(*Counters.PoolsCntr);
- }
- TotalAllocated = 0;
+
+ for (auto &ptr : AllPages) {
+ TGlobalPools::Instance().Get(0).PushPage(ptr);
+ }
+
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) -= TotalAllocated;
+ }
+ if (Counters.PoolsCntr) {
+ --(*Counters.PoolsCntr);
+ }
+ TotalAllocated = 0;
}
void TAlignedPagePool::ReleaseFreePages() {
@@ -215,9 +215,9 @@ void* TAlignedPagePool::GetPage() {
if (AllocNotifyCallback) {
AllocNotifyCurrentBytes += POOL_PAGE_SIZE;
}
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) += POOL_PAGE_SIZE;
- }
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) += POOL_PAGE_SIZE;
+ }
++PageGlobalHitCount;
AllPages.emplace(ptr);
@@ -281,9 +281,9 @@ void* TAlignedPagePool::Alloc(size_t size) {
if (AllocNotifyCallback) {
AllocNotifyCurrentBytes += size;
}
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) += size;
- }
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) += size;
+ }
++PageGlobalHitCount;
} else {
++PageMissCount;
@@ -301,9 +301,9 @@ void* TAlignedPagePool::Alloc(size_t size) {
if (AllocNotifyCallback) {
AllocNotifyCurrentBytes += size;
}
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) += size;
- }
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) += size;
+ }
#else
void* mem = ::mmap(nullptr, size + POOL_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
if (Y_UNLIKELY(MAP_FAILED == mem)) {
@@ -322,9 +322,9 @@ void* TAlignedPagePool::Alloc(size_t size) {
if (AllocNotifyCallback) {
AllocNotifyCurrentBytes += size + POOL_PAGE_SIZE;
}
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) += size + POOL_PAGE_SIZE;
- }
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) += size + POOL_PAGE_SIZE;
+ }
} else {
// Return extra space to the system
if (Y_UNLIKELY(0 != ::munmap(reinterpret_cast<ui8*>(mem) + size, POOL_PAGE_SIZE))) {
@@ -335,9 +335,9 @@ void* TAlignedPagePool::Alloc(size_t size) {
if (AllocNotifyCallback) {
AllocNotifyCurrentBytes += size;
}
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) += size;
- }
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) += size;
+ }
}
} else {
res = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(mem) & PAGE_ADDR_MASK) + POOL_PAGE_SIZE);
@@ -355,16 +355,16 @@ void* TAlignedPagePool::Alloc(size_t size) {
if (AllocNotifyCallback) {
AllocNotifyCurrentBytes += size;
}
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) += size;
- }
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) += size;
+ }
}
#endif
}
- if (Counters.AllocationsCntr) {
- ++(*Counters.AllocationsCntr);
- }
+ if (Counters.AllocationsCntr) {
+ ++(*Counters.AllocationsCntr);
+ }
++AllocCount;
UpdatePeaks();
return res;
@@ -388,9 +388,9 @@ void TAlignedPagePool::Free(void* ptr, size_t size) noexcept {
Y_VERIFY_DEBUG(TotalAllocated >= size);
TotalAllocated -= size;
- if (Counters.TotalBytesAllocatedCntr) {
- (*Counters.TotalBytesAllocatedCntr) -= size;
- }
+ if (Counters.TotalBytesAllocatedCntr) {
+ (*Counters.TotalBytesAllocatedCntr) -= size;
+ }
}
bool TAlignedPagePool::TryIncreaseLimit(ui64 required) {
@@ -401,14 +401,14 @@ bool TAlignedPagePool::TryIncreaseLimit(ui64 required) {
return Limit >= required;
}
-ui64 TAlignedPagePool::GetGlobalPagePoolSize() {
- ui64 size = 0;
- for (size_t level = 0; level <= MidLevels; ++level) {
+ui64 TAlignedPagePool::GetGlobalPagePoolSize() {
+ ui64 size = 0;
+ for (size_t level = 0; level <= MidLevels; ++level) {
size += TGlobalPools::Instance().Get(level).GetSize();
- }
- return size;
-}
-
+ }
+ return size;
+}
+
void TAlignedPagePool::PrintStat(size_t usedPages, IOutputStream& out) const {
usedPages += GetFreePageCount();
out << "Count of free pages: " << GetFreePageCount() << Endl;
diff --git a/ydb/library/yql/minikql/aligned_page_pool.h b/ydb/library/yql/minikql/aligned_page_pool.h
index fe18f5a370e..2ef28786554 100644
--- a/ydb/library/yql/minikql/aligned_page_pool.h
+++ b/ydb/library/yql/minikql/aligned_page_pool.h
@@ -6,7 +6,7 @@
#include <util/system/defaults.h>
#include <library/cpp/monlib/dynamic_counters/counters.h>
-
+
#include <type_traits>
#include <stack>
#include <vector>
@@ -15,22 +15,22 @@
namespace NKikimr {
-struct TAlignedPagePoolCounters {
- explicit TAlignedPagePoolCounters(NMonitoring::TDynamicCounterPtr countersRoot = nullptr, const TString& name = TString());
-
- NMonitoring::TDynamicCounters::TCounterPtr TotalBytesAllocatedCntr;
- NMonitoring::TDynamicCounters::TCounterPtr AllocationsCntr;
- NMonitoring::TDynamicCounters::TCounterPtr PoolsCntr;
- NMonitoring::TDynamicCounters::TCounterPtr LostPagesBytesFreeCntr;
-
- void Swap(TAlignedPagePoolCounters& other) {
- DoSwap(TotalBytesAllocatedCntr, other.TotalBytesAllocatedCntr);
- DoSwap(AllocationsCntr, other.AllocationsCntr);
- DoSwap(PoolsCntr, other.PoolsCntr);
- DoSwap(LostPagesBytesFreeCntr, other.LostPagesBytesFreeCntr);
- }
-};
-
+struct TAlignedPagePoolCounters {
+ explicit TAlignedPagePoolCounters(NMonitoring::TDynamicCounterPtr countersRoot = nullptr, const TString& name = TString());
+
+ NMonitoring::TDynamicCounters::TCounterPtr TotalBytesAllocatedCntr;
+ NMonitoring::TDynamicCounters::TCounterPtr AllocationsCntr;
+ NMonitoring::TDynamicCounters::TCounterPtr PoolsCntr;
+ NMonitoring::TDynamicCounters::TCounterPtr LostPagesBytesFreeCntr;
+
+ void Swap(TAlignedPagePoolCounters& other) {
+ DoSwap(TotalBytesAllocatedCntr, other.TotalBytesAllocatedCntr);
+ DoSwap(AllocationsCntr, other.AllocationsCntr);
+ DoSwap(PoolsCntr, other.PoolsCntr);
+ DoSwap(LostPagesBytesFreeCntr, other.LostPagesBytesFreeCntr);
+ }
+};
+
// NOTE: We intentionally avoid inheritance from std::exception here to make it harder
// to catch this exception in UDFs code, so we can handle it in the host.
class TMemoryLimitExceededException {};
@@ -40,20 +40,20 @@ public:
static constexpr ui64 POOL_PAGE_SIZE = 1ULL << 16; // 64k
static constexpr ui64 PAGE_ADDR_MASK = ~(POOL_PAGE_SIZE - 1);
- explicit TAlignedPagePool(const TAlignedPagePoolCounters& counters = TAlignedPagePoolCounters())
- : Counters(counters)
- {
- if (Counters.PoolsCntr) {
- ++(*Counters.PoolsCntr);
- }
- }
-
+ explicit TAlignedPagePool(const TAlignedPagePoolCounters& counters = TAlignedPagePoolCounters())
+ : Counters(counters)
+ {
+ if (Counters.PoolsCntr) {
+ ++(*Counters.PoolsCntr);
+ }
+ }
+
TAlignedPagePool(const TAlignedPagePool&) = delete;
- TAlignedPagePool(TAlignedPagePool&& other) = delete;
-
- TAlignedPagePool& operator = (const TAlignedPagePool&) = delete;
- TAlignedPagePool& operator = (TAlignedPagePool&& other) = delete;
-
+ TAlignedPagePool(TAlignedPagePool&& other) = delete;
+
+ TAlignedPagePool& operator = (const TAlignedPagePool&) = delete;
+ TAlignedPagePool& operator = (TAlignedPagePool&& other) = delete;
+
~TAlignedPagePool();
inline size_t GetAllocated() const noexcept {
@@ -82,12 +82,12 @@ public:
void Swap(TAlignedPagePool& other) {
DoSwap(FreePages, other.FreePages);
- DoSwap(AllPages, other.AllPages);
- DoSwap(ActiveBlocks, other.ActiveBlocks);
+ DoSwap(AllPages, other.AllPages);
+ DoSwap(ActiveBlocks, other.ActiveBlocks);
DoSwap(TotalAllocated, other.TotalAllocated);
DoSwap(PeakAllocated, other.PeakAllocated);
DoSwap(PeakUsed, other.PeakUsed);
- DoSwap(Limit, other.Limit);
+ DoSwap(Limit, other.Limit);
DoSwap(AllocCount, other.AllocCount);
DoSwap(PageAllocCount, other.PageAllocCount);
DoSwap(PageHitCount, other.PageHitCount);
@@ -96,8 +96,8 @@ public:
DoSwap(OffloadedAllocCount, other.OffloadedAllocCount);
DoSwap(OffloadedBytes, other.OffloadedBytes);
DoSwap(OffloadedActiveBytes, other.OffloadedActiveBytes);
- DoSwap(Counters, other.Counters);
- DoSwap(CheckLostMem, other.CheckLostMem);
+ DoSwap(Counters, other.Counters);
+ DoSwap(CheckLostMem, other.CheckLostMem);
DoSwap(AllocNotifyCallback, other.AllocNotifyCallback);
DoSwap(IncreaseMemoryLimitCallback, other.IncreaseMemoryLimitCallback);
}
@@ -147,8 +147,8 @@ public:
void OffloadAlloc(ui64 size);
void OffloadFree(ui64 size) noexcept;
- static ui64 GetGlobalPagePoolSize();
-
+ static ui64 GetGlobalPagePoolSize();
+
ui64 GetLimit() const noexcept {
return Limit;
}
@@ -160,9 +160,9 @@ public:
void ReleaseFreePages();
void DisableStrictAllocationCheck() noexcept {
- CheckLostMem = false;
- }
-
+ CheckLostMem = false;
+ }
+
using TAllocNotifyCallback = std::function<void()>;
void SetAllocNotifyCallback(TAllocNotifyCallback&& callback, ui64 notifyBytes = 0) {
AllocNotifyCallback = std::move(callback);
@@ -206,8 +206,8 @@ protected:
ui64 OffloadedBytes = 0;
ui64 OffloadedActiveBytes = 0;
- TAlignedPagePoolCounters Counters;
- bool CheckLostMem = true;
+ TAlignedPagePoolCounters Counters;
+ bool CheckLostMem = true;
TAllocNotifyCallback AllocNotifyCallback;
ui64 AllocNotifyBytes = 0;
diff --git a/ydb/library/yql/minikql/mkql_alloc.cpp b/ydb/library/yql/minikql/mkql_alloc.cpp
index 039178f7b04..9b77147a26c 100644
--- a/ydb/library/yql/minikql/mkql_alloc.cpp
+++ b/ydb/library/yql/minikql/mkql_alloc.cpp
@@ -23,7 +23,7 @@ void TAllocState::TListEntry::Unlink() noexcept {
}
TAllocState::TAllocState(const NKikimr::TAlignedPagePoolCounters &counters, bool supportsSizedAllocators)
- : TAlignedPagePool(counters)
+ : TAlignedPagePool(counters)
, SupportsSizedAllocators(supportsSizedAllocators)
{
GetRoot()->InitLinks();
diff --git a/ydb/library/yql/minikql/mkql_alloc.h b/ydb/library/yql/minikql/mkql_alloc.h
index 1df4c8a5e2a..5e864cdc4c8 100644
--- a/ydb/library/yql/minikql/mkql_alloc.h
+++ b/ydb/library/yql/minikql/mkql_alloc.h
@@ -114,7 +114,7 @@ public:
size_t GetLimit() const { return MyState_.GetLimit(); }
void SetLimit(size_t limit) { MyState_.SetLimit(limit); }
- void DisableStrictAllocationCheck() { MyState_.DisableStrictAllocationCheck(); }
+ void DisableStrictAllocationCheck() { MyState_.DisableStrictAllocationCheck(); }
void ReleaseFreePages() { MyState_.ReleaseFreePages(); }
void InvalidateMemInfo() { MyState_.InvalidateMemInfo(); }
diff --git a/ydb/library/yql/minikql/mkql_mem_info.h b/ydb/library/yql/minikql/mkql_mem_info.h
index 0632f662f2f..8e880d1db30 100644
--- a/ydb/library/yql/minikql/mkql_mem_info.h
+++ b/ydb/library/yql/minikql/mkql_mem_info.h
@@ -20,7 +20,7 @@
Y_UNUSED(MemInfo); Y_UNUSED(Mem); Y_UNUSED(Size);
# define MKQL_MEM_RETURN(MemInfo, Mem, Size) \
Y_UNUSED(MemInfo); Y_UNUSED(Mem); Y_UNUSED(Size);
-# define MKQL_MEM_RETURN_PTR(MemInfo, Mem) \
+# define MKQL_MEM_RETURN_PTR(MemInfo, Mem) \
Y_UNUSED(MemInfo); Y_UNUSED(Mem);
#endif
diff --git a/ydb/library/yql/minikql/mkql_node.cpp b/ydb/library/yql/minikql/mkql_node.cpp
index 30379585acc..4c6f3e45ad4 100644
--- a/ydb/library/yql/minikql/mkql_node.cpp
+++ b/ydb/library/yql/minikql/mkql_node.cpp
@@ -2,7 +2,7 @@
#include "mkql_node_builder.h"
#include "mkql_node_cast.h"
#include "mkql_node_visitor.h"
-#include "mkql_node_printer.h"
+#include "mkql_node_printer.h"
#include <util/stream/str.h>
#include <util/string/join.h>
@@ -18,10 +18,10 @@ using namespace NDetail;
TTypeEnvironment::TTypeEnvironment(TScopedAlloc& alloc)
: Alloc(alloc)
, Arena(&Alloc.Ref())
- , EmptyStruct(nullptr)
- , EmptyTuple(nullptr)
+ , EmptyStruct(nullptr)
+ , EmptyTuple(nullptr)
{
- NamesPool.reserve(64);
+ NamesPool.reserve(64);
TypeOfType = TTypeType::Create(*this);
TypeOfType->Type = TypeOfType;
TypeOfVoid = TVoidType::Create(TypeOfType, *this);
@@ -462,7 +462,7 @@ bool TDataLiteral::Equals(const TDataLiteral& nodeToCompare) const {
}
}
-TStructType::TStructType(ui32 membersCount, std::pair<TInternName, TType*>* members, const TTypeEnvironment& env,
+TStructType::TStructType(ui32 membersCount, std::pair<TInternName, TType*>* members, const TTypeEnvironment& env,
bool validate)
: TType(EKind::Struct, env.GetTypeOfType())
, MembersCount(membersCount)
@@ -471,22 +471,22 @@ TStructType::TStructType(ui32 membersCount, std::pair<TInternName, TType*>* memb
if (!validate)
return;
- TInternName lastMemberName;
+ TInternName lastMemberName;
for (size_t index = 0; index < membersCount; ++index) {
const auto& name = Members[index].first;
MKQL_ENSURE(!name.Str().empty(), "Empty member name is not allowed");
- MKQL_ENSURE(name.Str() > lastMemberName.Str(), "Member names are not sorted: "
- << name.Str() << " <= " << lastMemberName.Str());
+ MKQL_ENSURE(name.Str() > lastMemberName.Str(), "Member names are not sorted: "
+ << name.Str() << " <= " << lastMemberName.Str());
lastMemberName = name;
}
}
TStructType* TStructType::Create(const std::pair<TString, TType*>* members, ui32 membersCount, const TTypeEnvironment& env) {
- std::pair<TInternName, TType*>* allocatedMembers = nullptr;
+ std::pair<TInternName, TType*>* allocatedMembers = nullptr;
if (membersCount) {
- allocatedMembers = static_cast<std::pair<TInternName, TType*>*>(env.AllocateBuffer(membersCount * sizeof(*allocatedMembers)));
+ allocatedMembers = static_cast<std::pair<TInternName, TType*>*>(env.AllocateBuffer(membersCount * sizeof(*allocatedMembers)));
for (ui32 i = 0; i < membersCount; ++i) {
allocatedMembers[i] = std::make_pair(env.InternName(members[i].first), members[i].second);
}
@@ -496,9 +496,9 @@ TStructType* TStructType::Create(const std::pair<TString, TType*>* members, ui32
}
TStructType* TStructType::Create(ui32 membersCount, const TStructMember* members, const TTypeEnvironment& env) {
- std::pair<TInternName, TType*>* allocatedMembers = nullptr;
+ std::pair<TInternName, TType*>* allocatedMembers = nullptr;
if (membersCount) {
- allocatedMembers = static_cast<std::pair<TInternName, TType*>*>(env.AllocateBuffer(membersCount * sizeof(*allocatedMembers)));
+ allocatedMembers = static_cast<std::pair<TInternName, TType*>*>(env.AllocateBuffer(membersCount * sizeof(*allocatedMembers)));
for (ui32 i = 0; i < membersCount; ++i) {
allocatedMembers[i] = std::make_pair(env.InternName(members[i].Name), members[i].Type);
}
@@ -565,9 +565,9 @@ TNode* TStructType::DoCloneOnCallableWrite(const TTypeEnvironment& env) const {
if (!needClone)
return const_cast<TStructType*>(this);
- std::pair<TInternName, TType*>* allocatedMembers = nullptr;
+ std::pair<TInternName, TType*>* allocatedMembers = nullptr;
if (MembersCount) {
- allocatedMembers = static_cast<std::pair<TInternName, TType*>*>(env.AllocateBuffer(MembersCount * sizeof(*allocatedMembers)));
+ allocatedMembers = static_cast<std::pair<TInternName, TType*>*>(env.AllocateBuffer(MembersCount * sizeof(*allocatedMembers)));
for (ui32 i = 0; i < MembersCount; ++i) {
allocatedMembers[i].first = Members[i].first;
auto newNode = (TNode*)Members[i].second->GetCookie();
@@ -594,7 +594,7 @@ ui32 TStructType::GetMemberIndex(const TStringBuf& name) const {
TStringStream ss;
for (ui32 i = 0; i < MembersCount; ++i) {
- ss << " " << Members[i].first.Str();
+ ss << " " << Members[i].first.Str();
}
THROW yexception() << "Member with name '" << name << "' not found; "
<< " known members: " << ss.Str() << ".";
@@ -602,7 +602,7 @@ ui32 TStructType::GetMemberIndex(const TStringBuf& name) const {
TMaybe<ui32> TStructType::FindMemberIndex(const TStringBuf& name) const {
for (ui32 i = 0; i < MembersCount; ++i) {
- if (Members[i].first == name)
+ if (Members[i].first == name)
return i;
}
@@ -640,9 +640,9 @@ TStructLiteral* TStructLiteral::Create(ui32 valuesCount, const TRuntimeNode* val
for (ui32 i = 0; i < valuesCount; ++i) {
allocatedValues[i] = values[i];
}
- } else if (env.GetEmptyStruct()) {
- // if EmptyStruct has already been initialized
- return env.GetEmptyStruct();
+ } else if (env.GetEmptyStruct()) {
+ // if EmptyStruct has already been initialized
+ return env.GetEmptyStruct();
}
return ::new(env.Allocate<TStructLiteral>()) TStructLiteral(allocatedValues, type);
@@ -1319,7 +1319,7 @@ bool TDictLiteral::Equals(const TDictLiteral& nodeToCompare) const {
return true;
}
-TCallableType::TCallableType(const TInternName &name, TType* returnType, ui32 argumentsCount,
+TCallableType::TCallableType(const TInternName &name, TType* returnType, ui32 argumentsCount,
TType **arguments, TNode* payload, const TTypeEnvironment& env)
: TType(EKind::Callable, env.GetTypeOfType())
, IsMergeDisabled0(false)
@@ -1536,7 +1536,7 @@ TCallable::TCallable(TRuntimeNode result, TCallableType* type, bool validate)
}
MKQL_ENSURE(result.GetStaticType()->IsSameType(*type->GetReturnType()), "incorrect result type for callable: "
- << GetType()->GetName());
+ << GetType()->GetName());
Result.Freeze();
}
@@ -1658,7 +1658,7 @@ bool TCallable::Equals(const TCallable& nodeToCompare) const {
return true;
}
-void TCallable::SetResult(TRuntimeNode result, const TTypeEnvironment& env) {
+void TCallable::SetResult(TRuntimeNode result, const TTypeEnvironment& env) {
Y_UNUSED(env);
MKQL_ENSURE(!Result.GetNode(), "result is already set");
@@ -1917,9 +1917,9 @@ TTupleLiteral* TTupleLiteral::Create(ui32 valuesCount, const TRuntimeNode* value
for (ui32 i = 0; i < valuesCount; ++i) {
allocatedValues[i] = values[i];
}
- } else if (env.GetEmptyTuple()) {
- // if EmptyTuple has already been initialized
- return env.GetEmptyTuple();
+ } else if (env.GetEmptyTuple()) {
+ // if EmptyTuple has already been initialized
+ return env.GetEmptyTuple();
}
return ::new(env.Allocate<TTupleLiteral>()) TTupleLiteral(allocatedValues, type);
diff --git a/ydb/library/yql/minikql/mkql_node.h b/ydb/library/yql/minikql/mkql_node.h
index 0a64f367fa3..ee74455ea69 100644
--- a/ydb/library/yql/minikql/mkql_node.h
+++ b/ydb/library/yql/minikql/mkql_node.h
@@ -280,82 +280,82 @@ class TTupleLiteral;
class TResourceType;
class TDataType;
-
-// A non-owning reference to internalized string
-// Created only by TTypeEnvironment::InternName
-class TInternName {
-public:
- TInternName()
- {}
-
- TInternName(const TInternName& other)
- : StrBuf(other.StrBuf)
- {}
-
- const TInternName& operator = (const TInternName& other) {
- StrBuf = other.StrBuf;
- return *this;
- }
-
- size_t Hash() const {
- return (size_t)StrBuf.data();
- }
-
- operator bool() const {
- return (bool)StrBuf;
- }
-
- const TStringBuf& Str() const {
- return StrBuf;
- }
-
- // Optimized comparison (only by pointer)
- bool operator == (const TInternName& other) const {
- Y_VERIFY_DEBUG(StrBuf.data() != other.StrBuf.data() || StrBuf.size() == other.StrBuf.size(),
- "Lengths must be equal if pointers are equal");
- return StrBuf.data() == other.StrBuf.data();
- }
-
- bool operator != (const TInternName& other) const {
- return !this->operator ==(other);
- }
-
- // Regular comparison (by content)
- bool operator == (const TStringBuf& other) const {
- return StrBuf == other;
- }
-
- bool operator != (const TStringBuf& other) const {
- return !this->operator ==(other);
- }
-
-private:
- friend class TTypeEnvironment;
-
- explicit TInternName(const TStringBuf& strBuf)
- : StrBuf(strBuf)
- {}
-
-private:
- TStringBuf StrBuf;
-};
-
-}} // namespaces
-
-template <>
-struct THash<NKikimr::NMiniKQL::TInternName> {
- size_t operator ()(const NKikimr::NMiniKQL::TInternName& val) const {
- return val.Hash();
- }
-};
-
-namespace NKikimr {
-namespace NMiniKQL {
-
+
+// A non-owning reference to internalized string
+// Created only by TTypeEnvironment::InternName
+class TInternName {
+public:
+ TInternName()
+ {}
+
+ TInternName(const TInternName& other)
+ : StrBuf(other.StrBuf)
+ {}
+
+ const TInternName& operator = (const TInternName& other) {
+ StrBuf = other.StrBuf;
+ return *this;
+ }
+
+ size_t Hash() const {
+ return (size_t)StrBuf.data();
+ }
+
+ operator bool() const {
+ return (bool)StrBuf;
+ }
+
+ const TStringBuf& Str() const {
+ return StrBuf;
+ }
+
+ // Optimized comparison (only by pointer)
+ bool operator == (const TInternName& other) const {
+ Y_VERIFY_DEBUG(StrBuf.data() != other.StrBuf.data() || StrBuf.size() == other.StrBuf.size(),
+ "Lengths must be equal if pointers are equal");
+ return StrBuf.data() == other.StrBuf.data();
+ }
+
+ bool operator != (const TInternName& other) const {
+ return !this->operator ==(other);
+ }
+
+ // Regular comparison (by content)
+ bool operator == (const TStringBuf& other) const {
+ return StrBuf == other;
+ }
+
+ bool operator != (const TStringBuf& other) const {
+ return !this->operator ==(other);
+ }
+
+private:
+ friend class TTypeEnvironment;
+
+ explicit TInternName(const TStringBuf& strBuf)
+ : StrBuf(strBuf)
+ {}
+
+private:
+ TStringBuf StrBuf;
+};
+
+}} // namespaces
+
+template <>
+struct THash<NKikimr::NMiniKQL::TInternName> {
+ size_t operator ()(const NKikimr::NMiniKQL::TInternName& val) const {
+ return val.Hash();
+ }
+};
+
+namespace NKikimr {
+namespace NMiniKQL {
+
class TTypeEnvironment : private TNonCopyable {
public:
explicit TTypeEnvironment(TScopedAlloc& alloc);
-
+
~TTypeEnvironment();
template <typename T>
@@ -367,18 +367,18 @@ public:
return Arena.Alloc(size);
}
- TInternName InternName(const TStringBuf& name) const {
+ TInternName InternName(const TStringBuf& name) const {
auto it = NamesPool.find(name);
if (it != NamesPool.end()) {
- return TInternName(*it);
+ return TInternName(*it);
}
- // Copy to arena and null-terminate
- char* data = (char*)AllocateBuffer(name.size()+1);
- memcpy(data, name.data(), name.size());
- data[name.size()] = 0;
+ // Copy to arena and null-terminate
+ char* data = (char*)AllocateBuffer(name.size()+1);
+ memcpy(data, name.data(), name.size());
+ data[name.size()] = 0;
- return TInternName(*NamesPool.insert(TStringBuf(data, name.size())).first);
+ return TInternName(*NamesPool.insert(TStringBuf(data, name.size())).first);
}
TTypeType* GetTypeOfType() const {
@@ -625,10 +625,10 @@ public:
TStringBuf GetMemberName(ui32 index) const {
Y_VERIFY_DEBUG(index < MembersCount);
- return Members[index].first.Str();
+ return Members[index].first.Str();
}
- TInternName GetMemberNameStr(ui32 index) const {
+ TInternName GetMemberNameStr(ui32 index) const {
Y_VERIFY_DEBUG(index < MembersCount);
return Members[index].first;
}
@@ -642,7 +642,7 @@ public:
TMaybe<ui32> FindMemberIndex(const TStringBuf& name) const;
private:
- TStructType(ui32 membersCount, std::pair<TInternName, TType*>* members, const TTypeEnvironment& env, bool validate = true);
+ TStructType(ui32 membersCount, std::pair<TInternName, TType*>* members, const TTypeEnvironment& env, bool validate = true);
void DoUpdateLinks(const THashMap<TNode*, TNode*>& links);
TNode* DoCloneOnCallableWrite(const TTypeEnvironment& env) const;
@@ -650,7 +650,7 @@ private:
private:
ui32 MembersCount;
- std::pair<TInternName, TType*>* Members;
+ std::pair<TInternName, TType*>* Members;
};
class TStructLiteral : public TNode {
@@ -939,10 +939,10 @@ public:
bool IsConvertableTo(const TCallableType& typeToCompare, bool ignoreTagged = false) const;
TStringBuf GetName() const {
- return Name.Str();
+ return Name.Str();
}
- TInternName GetNameStr() const {
+ TInternName GetNameStr() const {
return Name;
}
@@ -972,7 +972,7 @@ public:
}
private:
- TCallableType(const TInternName& name, TType* returnType, ui32 argumentsCount, TType** arguments,
+ TCallableType(const TInternName& name, TType* returnType, ui32 argumentsCount, TType** arguments,
TNode* payload, const TTypeEnvironment& env);
void DoUpdateLinks(const THashMap<TNode*, TNode*>& links);
@@ -982,7 +982,7 @@ private:
private:
bool IsMergeDisabled0;
ui32 ArgumentsCount;
- TInternName Name;
+ TInternName Name;
TType* ReturnType;
TType** Arguments;
TNode* Payload;
@@ -1016,7 +1016,7 @@ public:
return Result;
}
- void SetResult(TRuntimeNode result, const TTypeEnvironment& env);
+ void SetResult(TRuntimeNode result, const TTypeEnvironment& env);
ui32 GetUniqueId() const {
return UniqueId;
}
@@ -1189,17 +1189,17 @@ public:
bool IsConvertableTo(const TResourceType& typeToCompare, bool ignoreTagged = false) const;
TStringBuf GetTag() const {
- return Tag.Str();
+ return Tag.Str();
}
- TInternName GetTagStr() const {
+ TInternName GetTagStr() const {
return Tag;
}
static TResourceType* Create(const TStringBuf& tag, const TTypeEnvironment& env);
private:
- TResourceType(TTypeType* type, TInternName tag)
+ TResourceType(TTypeType* type, TInternName tag)
: TType(EKind::Resource, type)
, Tag(tag)
{}
@@ -1209,7 +1209,7 @@ private:
void DoFreeze(const TTypeEnvironment& env);
private:
- TInternName const Tag;
+ TInternName const Tag;
};
class TTaggedType : public TType {
diff --git a/ydb/library/yql/minikql/mkql_node_builder.cpp b/ydb/library/yql/minikql/mkql_node_builder.cpp
index 1325c6de316..34466b89446 100644
--- a/ydb/library/yql/minikql/mkql_node_builder.cpp
+++ b/ydb/library/yql/minikql/mkql_node_builder.cpp
@@ -104,7 +104,7 @@ void TStructTypeBuilder::Reserve(ui32 size) {
}
TStructTypeBuilder& TStructTypeBuilder::Add(const TStringBuf& name, TType* type, ui32* index) {
- Members.push_back(TStructMember(Env->InternName(name).Str(), type, index));
+ Members.push_back(TStructMember(Env->InternName(name).Str(), type, index));
return *this;
}
@@ -141,7 +141,7 @@ void TStructLiteralBuilder::Reserve(ui32 size) {
TStructLiteralBuilder& TStructLiteralBuilder::Add(const TStringBuf& name, TRuntimeNode value) {
TType* valueType = value.GetStaticType();
- Members.push_back(TStructMember(Env->InternName(name).Str(), valueType));
+ Members.push_back(TStructMember(Env->InternName(name).Str(), valueType));
Values.push_back(value);
return *this;
}
@@ -223,7 +223,7 @@ void TDictLiteralBuilder::Clear() {
TCallableTypeBuilder::TCallableTypeBuilder(const TTypeEnvironment& env, const TStringBuf& name, TType* returnType)
: Env(&env)
- , Name(Env->InternName(name))
+ , Name(Env->InternName(name))
, ReturnType(returnType)
, OptionalArgsCount(0)
, HasPayload(false)
@@ -273,7 +273,7 @@ TCallableType* TCallableTypeBuilder::Build() {
payload = BuildCallableTypePayload(ArgNames, ArgFlags, FuncPayload, *Env);
}
- auto ret = TCallableType::Create(ReturnType, Name.Str(), Arguments.size(), Arguments.data(), payload, *Env);
+ auto ret = TCallableType::Create(ReturnType, Name.Str(), Arguments.size(), Arguments.data(), payload, *Env);
ret->SetOptionalArgumentsCount(OptionalArgsCount);
return ret;
}
@@ -289,7 +289,7 @@ void TCallableTypeBuilder::Clear() {
TCallableBuilder::TCallableBuilder(const TTypeEnvironment& env, const TStringBuf& name, TType* returnType, bool disableMerge)
: Env(&env)
- , Name(Env->InternName(name))
+ , Name(Env->InternName(name))
, ReturnType(returnType)
, DisableMerge(disableMerge)
, OptionalArgsCount(0)
@@ -343,7 +343,7 @@ TCallable* TCallableBuilder::Build() {
payload = BuildCallableTypePayload(ArgNames, ArgFlags, FuncPayload, *Env);
}
- auto type = TCallableType::Create(ReturnType, Name.Str(), Arguments.size(), Arguments.data(), payload, *Env);
+ auto type = TCallableType::Create(ReturnType, Name.Str(), Arguments.size(), Arguments.data(), payload, *Env);
type->SetOptionalArgumentsCount(OptionalArgsCount);
if (DisableMerge)
type->DisableMerge();
diff --git a/ydb/library/yql/minikql/mkql_node_builder.h b/ydb/library/yql/minikql/mkql_node_builder.h
index 6eef2f0f1f3..46a43dc3529 100644
--- a/ydb/library/yql/minikql/mkql_node_builder.h
+++ b/ydb/library/yql/minikql/mkql_node_builder.h
@@ -103,7 +103,7 @@ public:
private:
const TTypeEnvironment* Env;
- TInternName Name;
+ TInternName Name;
TType* ReturnType;
TVector<TType*> Arguments;
ui32 OptionalArgsCount;
@@ -130,7 +130,7 @@ public:
private:
const TTypeEnvironment* Env;
- TInternName Name;
+ TInternName Name;
TType* ReturnType;
bool DisableMerge;
TVector<TType*> Arguments;
diff --git a/ydb/library/yql/minikql/mkql_node_cast_ut.cpp b/ydb/library/yql/minikql/mkql_node_cast_ut.cpp
index 674f582a360..0ae19fa5e45 100644
--- a/ydb/library/yql/minikql/mkql_node_cast_ut.cpp
+++ b/ydb/library/yql/minikql/mkql_node_cast_ut.cpp
@@ -52,10 +52,10 @@ class TMiniKQLNodeCast: public TTestBase
Y_UNUSED(list);
}
- TMiniKQLNodeCast()
+ TMiniKQLNodeCast()
: Env(Alloc)
- {}
-
+ {}
+
private:
TRuntimeNode Uint32AsNode(ui32 value) {
return TRuntimeNode(BuildDataLiteral(NUdf::TUnboxedValuePod(value), NUdf::EDataSlot::Uint32, Env), true);
diff --git a/ydb/library/yql/minikql/mkql_node_serialization.cpp b/ydb/library/yql/minikql/mkql_node_serialization.cpp
index 77021725102..e314f38cd61 100644
--- a/ydb/library/yql/minikql/mkql_node_serialization.cpp
+++ b/ydb/library/yql/minikql/mkql_node_serialization.cpp
@@ -81,7 +81,7 @@ namespace {
private:
- void AddName(const TInternName& name) {
+ void AddName(const TInternName& name) {
auto iter = Names.emplace(name, 0);
if (iter.second) {
NameOrder.emplace_back(name);
@@ -928,7 +928,7 @@ namespace {
WriteVar32(node.GetCookie() - 1);
}
- void WriteName(TInternName name) {
+ void WriteName(TInternName name) {
auto it = Names.find(name);
if (it == Names.end()) {
WriteVar32(name.Str().size() << 1);
diff --git a/ydb/library/yql/minikql/mkql_node_ut.cpp b/ydb/library/yql/minikql/mkql_node_ut.cpp
index 2b9c768a5e7..16126272462 100644
--- a/ydb/library/yql/minikql/mkql_node_ut.cpp
+++ b/ydb/library/yql/minikql/mkql_node_ut.cpp
@@ -357,7 +357,7 @@ Y_UNIT_TEST_SUITE(TMiniKQLNodeTest) {
TCallable* c1 = TCallable::Create(0, nullptr, ctype1, env);
UNIT_ASSERT_EQUAL(c1->GetInputsCount(), 0);
UNIT_ASSERT(!c1->HasResult());
- c1->SetResult(TRuntimeNode(env.GetVoid(), true), env);
+ c1->SetResult(TRuntimeNode(env.GetVoid(), true), env);
UNIT_ASSERT(c1->HasResult());
UNIT_ASSERT_EQUAL(c1->GetResult().GetStaticType()->GetKind(), TType::EKind::Void);
@@ -384,7 +384,7 @@ Y_UNIT_TEST_SUITE(TMiniKQLNodeTest) {
UNIT_ASSERT_EQUAL(c2->GetInput(1).IsImmediate(), false);
UNIT_ASSERT_EQUAL(c2->GetInput(1).GetNode()->GetType()->GetKind(), TType::EKind::Callable);
UNIT_ASSERT(!c2->HasResult());
- c2->SetResult(TRuntimeNode(env.GetVoid(), true), env);
+ c2->SetResult(TRuntimeNode(env.GetVoid(), true), env);
UNIT_ASSERT(c2->HasResult());
UNIT_ASSERT_EQUAL(c2->GetResult().GetStaticType()->GetKind(), TType::EKind::Void);
}
diff --git a/ydb/library/yql/minikql/mkql_node_visitor.cpp b/ydb/library/yql/minikql/mkql_node_visitor.cpp
index 0a57fa1e65e..5a4ac8e4798 100644
--- a/ydb/library/yql/minikql/mkql_node_visitor.cpp
+++ b/ydb/library/yql/minikql/mkql_node_visitor.cpp
@@ -581,7 +581,7 @@ TRuntimeNode SinglePassVisitCallablesImpl(TRuntimeNode root, TExploringNodeVisit
result.Freeze();
if (result.GetNode() != node) {
if (InPlace) {
- callable.SetResult(result, env);
+ callable.SetResult(result, env);
wereChanges = true;
} else {
TNode* wrappedResult = TCallable::Create(result, callable.GetType(), env);
diff --git a/ydb/library/yql/minikql/mkql_node_visitor.h b/ydb/library/yql/minikql/mkql_node_visitor.h
index 220d14d69ac..f5bf76d5203 100644
--- a/ydb/library/yql/minikql/mkql_node_visitor.h
+++ b/ydb/library/yql/minikql/mkql_node_visitor.h
@@ -178,7 +178,7 @@ private:
class TTypeEnvironment;
typedef std::function<TRuntimeNode (TCallable& callable, const TTypeEnvironment& env)> TCallableVisitFunc;
-typedef std::function<TCallableVisitFunc (const TInternName& name)> TCallableVisitFuncProvider;
+typedef std::function<TCallableVisitFunc (const TInternName& name)> TCallableVisitFuncProvider;
TRuntimeNode SinglePassVisitCallables(TRuntimeNode root, TExploringNodeVisitor& explorer,
const TCallableVisitFuncProvider& funcProvider, const TTypeEnvironment& env, bool inPlace, bool& wereChanges);
diff --git a/ydb/library/yql/minikql/mkql_opt_literal.cpp b/ydb/library/yql/minikql/mkql_opt_literal.cpp
index 1988b5af38f..832a87ea80c 100644
--- a/ydb/library/yql/minikql/mkql_opt_literal.cpp
+++ b/ydb/library/yql/minikql/mkql_opt_literal.cpp
@@ -330,8 +330,8 @@ struct TOptimizationFuncMapFiller {
Map["Nth"] = &OptimizeNth;
Map["Extend"] = &OptimizeExtend;
- Provider = [&](TInternName name) {
- auto it = Map.find(name.Str());
+ Provider = [&](TInternName name) {
+ auto it = Map.find(name.Str());
if (it != Map.end())
return it->second;
diff --git a/ydb/library/yql/minikql/mkql_type_builder.cpp b/ydb/library/yql/minikql/mkql_type_builder.cpp
index b9fb57c099e..4aecb564bf7 100644
--- a/ydb/library/yql/minikql/mkql_type_builder.cpp
+++ b/ydb/library/yql/minikql/mkql_type_builder.cpp
@@ -584,7 +584,7 @@ public:
}
NUdf::IFunctionArgTypesBuilder& Name(const NUdf::TStringRef& name) override {
- Args_.back().Name_ = Env_.InternName(name);
+ Args_.back().Name_ = Env_.InternName(name);
return *this;
}
@@ -1408,7 +1408,7 @@ void TFunctionTypeInfoBuilder::Build(TFunctionTypeInfo* funcInfo)
for (const auto& arg : Args_) {
builder.Add(arg.Type_);
if (!arg.Name_.Str().empty()) {
- builder.SetArgumentName(arg.Name_.Str());
+ builder.SetArgumentName(arg.Name_.Str());
}
if (arg.Flags_ != 0) {
diff --git a/ydb/library/yql/minikql/mkql_type_builder.h b/ydb/library/yql/minikql/mkql_type_builder.h
index 00433398bbb..a93fb41cee3 100644
--- a/ydb/library/yql/minikql/mkql_type_builder.h
+++ b/ydb/library/yql/minikql/mkql_type_builder.h
@@ -28,7 +28,7 @@ struct TFunctionTypeInfo
//////////////////////////////////////////////////////////////////////////////
struct TArgInfo {
NMiniKQL::TType* Type_ = nullptr;
- TInternName Name_;
+ TInternName Name_;
ui64 Flags_;
};
diff --git a/ydb/library/yql/providers/common/gateway/yql_provider_gateway.cpp b/ydb/library/yql/providers/common/gateway/yql_provider_gateway.cpp
index 8f12b33723a..3c92c846921 100644
--- a/ydb/library/yql/providers/common/gateway/yql_provider_gateway.cpp
+++ b/ydb/library/yql/providers/common/gateway/yql_provider_gateway.cpp
@@ -9,7 +9,7 @@ namespace NCommon {
void TOperationResult::AddIssue(const TIssue& issue) {
WalkThroughIssues(issue, false, [&](const TIssue& err, ui16 level) {
Y_UNUSED(level);
- YQL_CLOG(NOTICE, ProviderCommon) << err;
+ YQL_CLOG(NOTICE, ProviderCommon) << err;
});
Issues_.AddIssue(issue);
}
@@ -18,7 +18,7 @@ void TOperationResult::AddIssues(const TIssues& issues) {
for (auto& topIssue: issues) {
WalkThroughIssues(topIssue, false, [&](const TIssue& err, ui16 level) {
Y_UNUSED(level);
- YQL_CLOG(NOTICE, ProviderCommon) << err;
+ YQL_CLOG(NOTICE, ProviderCommon) << err;
});
}
Issues_.AddIssues(issues);
diff --git a/ydb/library/yql/providers/common/mkql/yql_provider_mkql.cpp b/ydb/library/yql/providers/common/mkql/yql_provider_mkql.cpp
index 86c1da6d7e1..dd1f162d784 100644
--- a/ydb/library/yql/providers/common/mkql/yql_provider_mkql.cpp
+++ b/ydb/library/yql/providers/common/mkql/yql_provider_mkql.cpp
@@ -1149,8 +1149,8 @@ TMkqlCommonCallableCompiler::TShared::TShared() {
const auto arg = MkqlBuildExpr(node.Head(), ctx);
const auto schemeType = ParseDataType(node, node.Tail().Content());
return ctx.ProgramBuilder.FromBytes(arg, schemeType);
- });
-
+ });
+
AddCallable("Convert", [](const TExprNode& node, TMkqlBuildContext& ctx) {
const auto arg = MkqlBuildExpr(node.Head(), ctx);
const auto type = BuildType(node.Head(), *node.GetTypeAnn(), ctx.ProgramBuilder);
diff --git a/ydb/library/yql/providers/common/mkql/yql_type_mkql.cpp b/ydb/library/yql/providers/common/mkql/yql_type_mkql.cpp
index 19f7bb52dcc..4ba0e6d7402 100644
--- a/ydb/library/yql/providers/common/mkql/yql_type_mkql.cpp
+++ b/ydb/library/yql/providers/common/mkql/yql_type_mkql.cpp
@@ -327,7 +327,7 @@ const TTypeAnnotationNode* ConvertMiniKQLType(TPosition position, NKikimr::NMini
case TType::EKind::Resource:
{
auto resType = static_cast<TResourceType*>(type);
- return ctx.MakeType<TResourceExprType>(resType->GetTag());
+ return ctx.MakeType<TResourceExprType>(resType->GetTag());
}
case TType::EKind::Stream:
diff --git a/ydb/library/yql/providers/common/schema/mkql/yql_mkql_schema.cpp b/ydb/library/yql/providers/common/schema/mkql/yql_mkql_schema.cpp
index 214e1e05d33..0a341057146 100644
--- a/ydb/library/yql/providers/common/schema/mkql/yql_mkql_schema.cpp
+++ b/ydb/library/yql/providers/common/schema/mkql/yql_mkql_schema.cpp
@@ -126,7 +126,7 @@ public:
TBase::SaveTupleType(*static_cast<const TTupleType*>(type));
break;
case TType::EKind::Resource:
- TBase::SaveResourceType(static_cast<const TResourceType*>(type)->GetTag());
+ TBase::SaveResourceType(static_cast<const TResourceType*>(type)->GetTag());
break;
case TType::EKind::Variant:
TBase::SaveVariantType(*static_cast<const TVariantType*>(type));
diff --git a/ydb/library/yql/udfs/common/digest/digest_udf.cpp b/ydb/library/yql/udfs/common/digest/digest_udf.cpp
index 8ed5a1bf7d1..7487a078dde 100644
--- a/ydb/library/yql/udfs/common/digest/digest_udf.cpp
+++ b/ydb/library/yql/udfs/common/digest/digest_udf.cpp
@@ -66,14 +66,14 @@ namespace {
ui32 hash = MurmurHash<ui32>(inputRef.Data(), inputRef.Size());
return TUnboxedValuePod(hash);
}
-
+
SIMPLE_UDF(TCityHash, ui64(TAutoMap<char*>)) {
Y_UNUSED(valueBuilder);
const auto& inputRef = args[0].AsStringRef();
ui64 hash = CityHash64(inputRef.Data(), inputRef.Size());
return TUnboxedValuePod(hash);
- }
-
+ }
+
using TUi64Pair = NUdf::TTuple<ui64, ui64>;
class TCityHash128: public TBoxedValue {
@@ -84,9 +84,9 @@ namespace {
}
static bool DeclareSignature(
- const TStringRef& name,
+ const TStringRef& name,
TType* userType,
- IFunctionTypeInfoBuilder& builder,
+ IFunctionTypeInfoBuilder& builder,
bool typesOnly) {
Y_UNUSED(userType);
if (Name() == name) {
@@ -99,8 +99,8 @@ namespace {
return true;
} else {
return false;
- }
- }
+ }
+ }
private:
TUnboxedValue Run(
@@ -121,15 +121,15 @@ namespace {
ui64 input = args[0].Get<ui64>();
ui64 hash = (ui64)NumericHash(input);
return TUnboxedValuePod(hash);
- }
+ }
SIMPLE_UDF(TMd5Hex, char*(TAutoMap<char*>)) {
- const auto& inputRef = args[0].AsStringRef();
+ const auto& inputRef = args[0].AsStringRef();
MD5 md5;
const TString& hash = md5.Calc(inputRef);
return valueBuilder->NewString(hash);
- }
-
+ }
+
SIMPLE_UDF(TMd5Raw, char*(TAutoMap<char*>)) {
const auto& inputRef = args[0].AsStringRef();
MD5 md5;
diff --git a/ydb/library/yql/utils/log/log_level.h b/ydb/library/yql/utils/log/log_level.h
index fb985a38b83..ccb12e4690b 100644
--- a/ydb/library/yql/utils/log/log_level.h
+++ b/ydb/library/yql/utils/log/log_level.h
@@ -13,7 +13,7 @@ enum class ELevel {
FATAL = TLOG_EMERG,
ERROR = TLOG_ERR,
WARN = TLOG_WARNING,
- NOTICE = TLOG_NOTICE,
+ NOTICE = TLOG_NOTICE,
INFO = TLOG_INFO,
DEBUG = TLOG_DEBUG,
TRACE = TLOG_RESOURCES,
diff --git a/ydb/public/api/grpc/draft/ya.make b/ydb/public/api/grpc/draft/ya.make
index cc18dbf9063..f63be96521c 100644
--- a/ydb/public/api/grpc/draft/ya.make
+++ b/ydb/public/api/grpc/draft/ya.make
@@ -13,13 +13,13 @@ OWNER(
SRCS(
dummy.proto
- ydb_clickhouse_internal_v1.proto
+ ydb_clickhouse_internal_v1.proto
ydb_persqueue_v1.proto
ydb_datastreams_v1.proto
- ydb_experimental_v1.proto
- ydb_s3_internal_v1.proto
+ ydb_experimental_v1.proto
+ ydb_s3_internal_v1.proto
ydb_long_tx_v1.proto
- ydb_logstore_v1.proto
+ ydb_logstore_v1.proto
yql_db_v1.proto
)
diff --git a/ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.proto b/ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.proto
index f6fdc9cc654..01fe731c7a9 100644
--- a/ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.proto
+++ b/ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.proto
@@ -1,14 +1,14 @@
-syntax = "proto3";
-
-package Ydb.ClickhouseInternal.V1;
-option java_package = "com.yandex.ydb.clickhouse.v1";
-
+syntax = "proto3";
+
+package Ydb.ClickhouseInternal.V1;
+option java_package = "com.yandex.ydb.clickhouse.v1";
+
import "ydb/public/api/protos/ydb_clickhouse_internal.proto";
-
-service ClickhouseInternalService {
- rpc Scan(ClickhouseInternal.ScanRequest) returns (ClickhouseInternal.ScanResponse);
- rpc GetShardLocations(ClickhouseInternal.GetShardLocationsRequest) returns (ClickhouseInternal.GetShardLocationsResponse);
- rpc DescribeTable(ClickhouseInternal.DescribeTableRequest) returns (ClickhouseInternal.DescribeTableResponse);
+
+service ClickhouseInternalService {
+ rpc Scan(ClickhouseInternal.ScanRequest) returns (ClickhouseInternal.ScanResponse);
+ rpc GetShardLocations(ClickhouseInternal.GetShardLocationsRequest) returns (ClickhouseInternal.GetShardLocationsResponse);
+ rpc DescribeTable(ClickhouseInternal.DescribeTableRequest) returns (ClickhouseInternal.DescribeTableResponse);
/**
* CreateSnapshot creates a temporary consistent snapshot of one or more
@@ -29,5 +29,5 @@ service ClickhouseInternalService {
* so resources may be freed earlier than its expiration time.
*/
rpc DiscardSnapshot(ClickhouseInternal.DiscardSnapshotRequest) returns (ClickhouseInternal.DiscardSnapshotResponse);
-}
-
+}
+
diff --git a/ydb/public/api/grpc/draft/ydb_experimental_v1.proto b/ydb/public/api/grpc/draft/ydb_experimental_v1.proto
index b164aac446c..003ab6d671a 100644
--- a/ydb/public/api/grpc/draft/ydb_experimental_v1.proto
+++ b/ydb/public/api/grpc/draft/ydb_experimental_v1.proto
@@ -1,11 +1,11 @@
-syntax = "proto3";
-
-package Ydb.Experimental.V1;
+syntax = "proto3";
+
+package Ydb.Experimental.V1;
option java_package = "com.yandex.ydb.experimental.v1";
-
+
import "ydb/public/api/protos/ydb_experimental.proto";
-
-service ExperimentalService {
+
+service ExperimentalService {
rpc ExecuteStreamQuery(Experimental.ExecuteStreamQueryRequest) returns (stream Experimental.ExecuteStreamQueryResponse);
-}
-
+}
+
diff --git a/ydb/public/api/grpc/draft/ydb_logstore_v1.proto b/ydb/public/api/grpc/draft/ydb_logstore_v1.proto
index 40b12b0f473..a5775df71f3 100644
--- a/ydb/public/api/grpc/draft/ydb_logstore_v1.proto
+++ b/ydb/public/api/grpc/draft/ydb_logstore_v1.proto
@@ -1,17 +1,17 @@
-syntax = "proto3";
-
-package Ydb.LogStore.V1;
-option java_package = "com.yandex.ydb.logstore.v1";
-
+syntax = "proto3";
+
+package Ydb.LogStore.V1;
+option java_package = "com.yandex.ydb.logstore.v1";
+
import "ydb/public/api/protos/draft/ydb_logstore.proto";
-
-service LogStoreService {
- rpc CreateLogStore(CreateLogStoreRequest) returns (CreateLogStoreResponse);
- rpc DescribeLogStore(DescribeLogStoreRequest) returns (DescribeLogStoreResponse);
- rpc DropLogStore(DropLogStoreRequest) returns (DropLogStoreResponse);
-
- rpc CreateLogTable(CreateLogTableRequest) returns (CreateLogTableResponse);
- rpc DescribeLogTable(DescribeLogTableRequest) returns (DescribeLogTableResponse);
- rpc DropLogTable(DropLogTableRequest) returns (DropLogTableResponse);
- rpc AlterLogTable(AlterLogTableRequest) returns (AlterLogTableResponse);
-}
+
+service LogStoreService {
+ rpc CreateLogStore(CreateLogStoreRequest) returns (CreateLogStoreResponse);
+ rpc DescribeLogStore(DescribeLogStoreRequest) returns (DescribeLogStoreResponse);
+ rpc DropLogStore(DropLogStoreRequest) returns (DropLogStoreResponse);
+
+ rpc CreateLogTable(CreateLogTableRequest) returns (CreateLogTableResponse);
+ rpc DescribeLogTable(DescribeLogTableRequest) returns (DescribeLogTableResponse);
+ rpc DropLogTable(DropLogTableRequest) returns (DropLogTableResponse);
+ rpc AlterLogTable(AlterLogTableRequest) returns (AlterLogTableResponse);
+}
diff --git a/ydb/public/api/grpc/draft/ydb_s3_internal_v1.proto b/ydb/public/api/grpc/draft/ydb_s3_internal_v1.proto
index d0ef1178eb1..2553c91d37f 100644
--- a/ydb/public/api/grpc/draft/ydb_s3_internal_v1.proto
+++ b/ydb/public/api/grpc/draft/ydb_s3_internal_v1.proto
@@ -1,11 +1,11 @@
-syntax = "proto3";
-
-package Ydb.S3Internal.V1;
+syntax = "proto3";
+
+package Ydb.S3Internal.V1;
option java_package = "com.yandex.ydb.s3_internal.v1";
-
+
import "ydb/public/api/protos/ydb_s3_internal.proto";
-
-service S3InternalService {
- rpc S3Listing(S3Internal.S3ListingRequest) returns (S3Internal.S3ListingResponse);
-}
-
+
+service S3InternalService {
+ rpc S3Listing(S3Internal.S3ListingRequest) returns (S3Internal.S3ListingResponse);
+}
+
diff --git a/ydb/public/api/grpc/ydb_table_v1.proto b/ydb/public/api/grpc/ydb_table_v1.proto
index 6fe75b10f85..dbbf62c5c8e 100644
--- a/ydb/public/api/grpc/ydb_table_v1.proto
+++ b/ydb/public/api/grpc/ydb_table_v1.proto
@@ -73,11 +73,11 @@ service TableService {
// Streaming read table
rpc StreamReadTable(Table.ReadTableRequest) returns (stream Table.ReadTableResponse);
-
- // Upserts a batch of rows non-transactionally.
- // Returns success only when all rows were successfully upserted. In case of an error some rows might
- // be upserted and some might not.
- rpc BulkUpsert(Table.BulkUpsertRequest) returns (Table.BulkUpsertResponse);
+
+ // Upserts a batch of rows non-transactionally.
+ // Returns success only when all rows were successfully upserted. In case of an error some rows might
+ // be upserted and some might not.
+ rpc BulkUpsert(Table.BulkUpsertRequest) returns (Table.BulkUpsertResponse);
// Executes scan query with streaming result.
rpc StreamExecuteScanQuery(Table.ExecuteScanQueryRequest) returns (stream Table.ExecuteScanQueryPartialResponse);
diff --git a/ydb/public/api/protos/draft/ydb_logstore.proto b/ydb/public/api/protos/draft/ydb_logstore.proto
index 2cbe7974d44..dbe1e3aa213 100644
--- a/ydb/public/api/protos/draft/ydb_logstore.proto
+++ b/ydb/public/api/protos/draft/ydb_logstore.proto
@@ -1,148 +1,148 @@
-syntax = "proto3";
-option cc_enable_arenas = true;
-
-package Ydb.LogStore;
-option java_package = "com.yandex.ydb.logstore";
-option java_outer_classname = "LogStoreProtos";
-
+syntax = "proto3";
+option cc_enable_arenas = true;
+
+package Ydb.LogStore;
+option java_package = "com.yandex.ydb.logstore";
+option java_outer_classname = "LogStoreProtos";
+
import "ydb/public/api/protos/ydb_operation.proto";
import "ydb/public/api/protos/ydb_value.proto";
import "ydb/public/api/protos/ydb_scheme.proto";
import "ydb/public/api/protos/ydb_table.proto";
-import "google/protobuf/empty.proto";
-
-message ColumnMeta {
- string name = 1;
- Type type = 2;
-}
-
-message Schema {
- repeated ColumnMeta columns = 1; // Columns (name, type)
- repeated string primary_key = 2; // List of columns used as primary key
-}
-
-message SchemaPreset {
- string name = 1;
- Schema schema = 2;
-}
-
-message TtlSettingsPreset {
- string name = 1;
- Ydb.Table.TtlSettings ttl_settings = 2;
-}
-
-message CreateLogStoreRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string path = 2; // Full path
- uint32 column_shard_count = 3;
- repeated SchemaPreset schema_presets = 4;
- // repeated TtlSettingsPreset ttl_settings_presets = 5; // Unimplemented
-}
-
-message CreateLogStoreResponse {
- Ydb.Operations.Operation operation = 1;
-}
-
-message DescribeLogStoreRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string path = 2; // Full path
-}
-
-message DescribeLogStoreResult {
- Ydb.Scheme.Entry self = 1; // Description of scheme object
-
- uint32 column_shard_count = 2;
- repeated SchemaPreset schema_presets = 3;
- // repeated TtlSettingsPreset ttl_settings_presets = 4; // Unimplemented
-}
-
-message DescribeLogStoreResponse {
- // Holds DescribeLogStoreResult in case of successful call
- Ydb.Operations.Operation operation = 1;
-}
-
-message DropLogStoreRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string path = 2; // Full path
-}
-
-message DropLogStoreResponse {
- Ydb.Operations.Operation operation = 1;
-}
-
-message CreateLogTableRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string path = 2; // Full path
- oneof schema_specification {
- string schema_preset_name = 3; // From LogStore
- Schema schema = 4;
- };
- oneof ttl_specification {
- Ydb.Table.TtlSettings ttl_settings = 5;
- // string ttl_settings_preset_name = 6; // Unimplemented for now
- };
-
- // Specifies the desired number of ColumnShards for this table
- uint32 column_shard_count = 7;
-
- repeated string sharding_columns = 8;
-}
-
-message CreateLogTableResponse {
- Ydb.Operations.Operation operation = 1;
-}
-
-message DescribeLogTableRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string path = 2; // Full path
-}
-
-message DescribeLogTableResult {
- Ydb.Scheme.Entry self = 1; // Description of scheme object
-
- string schema_preset_name = 2;
- Schema schema = 3;
- string ttl_settings_preset_name = 4;
- Ydb.Table.TtlSettings ttl_settings = 5;
-
- // Specifies the desired number of ColumnShards for this table
- uint32 column_shard_count = 6;
-
- repeated string sharding_columns = 7;
-}
-
-message DescribeLogTableResponse {
- // Holds DescribeLogTableResult in case of successful call
- Ydb.Operations.Operation operation = 1;
-}
-
-message DropLogTableRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string path = 2; // Full path
-}
-
-message DropLogTableResponse {
- Ydb.Operations.Operation operation = 1;
-}
-
-message AlterLogTableRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string path = 2; // Full path
-
- oneof ttl_action {
- google.protobuf.Empty drop_ttl_settings = 3;
- Ydb.Table.TtlSettings set_ttl_settings = 4;
- // string set_ttl_preset_name = 5; // Unimplemened for now
- }
-}
-
-message AlterLogTableResponse {
- Ydb.Operations.Operation operation = 1;
-}
+import "google/protobuf/empty.proto";
+
+message ColumnMeta {
+ string name = 1;
+ Type type = 2;
+}
+
+message Schema {
+ repeated ColumnMeta columns = 1; // Columns (name, type)
+ repeated string primary_key = 2; // List of columns used as primary key
+}
+
+message SchemaPreset {
+ string name = 1;
+ Schema schema = 2;
+}
+
+message TtlSettingsPreset {
+ string name = 1;
+ Ydb.Table.TtlSettings ttl_settings = 2;
+}
+
+message CreateLogStoreRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string path = 2; // Full path
+ uint32 column_shard_count = 3;
+ repeated SchemaPreset schema_presets = 4;
+ // repeated TtlSettingsPreset ttl_settings_presets = 5; // Unimplemented
+}
+
+message CreateLogStoreResponse {
+ Ydb.Operations.Operation operation = 1;
+}
+
+message DescribeLogStoreRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string path = 2; // Full path
+}
+
+message DescribeLogStoreResult {
+ Ydb.Scheme.Entry self = 1; // Description of scheme object
+
+ uint32 column_shard_count = 2;
+ repeated SchemaPreset schema_presets = 3;
+ // repeated TtlSettingsPreset ttl_settings_presets = 4; // Unimplemented
+}
+
+message DescribeLogStoreResponse {
+ // Holds DescribeLogStoreResult in case of successful call
+ Ydb.Operations.Operation operation = 1;
+}
+
+message DropLogStoreRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string path = 2; // Full path
+}
+
+message DropLogStoreResponse {
+ Ydb.Operations.Operation operation = 1;
+}
+
+message CreateLogTableRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string path = 2; // Full path
+ oneof schema_specification {
+ string schema_preset_name = 3; // From LogStore
+ Schema schema = 4;
+ };
+ oneof ttl_specification {
+ Ydb.Table.TtlSettings ttl_settings = 5;
+ // string ttl_settings_preset_name = 6; // Unimplemented for now
+ };
+
+ // Specifies the desired number of ColumnShards for this table
+ uint32 column_shard_count = 7;
+
+ repeated string sharding_columns = 8;
+}
+
+message CreateLogTableResponse {
+ Ydb.Operations.Operation operation = 1;
+}
+
+message DescribeLogTableRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string path = 2; // Full path
+}
+
+message DescribeLogTableResult {
+ Ydb.Scheme.Entry self = 1; // Description of scheme object
+
+ string schema_preset_name = 2;
+ Schema schema = 3;
+ string ttl_settings_preset_name = 4;
+ Ydb.Table.TtlSettings ttl_settings = 5;
+
+ // Specifies the desired number of ColumnShards for this table
+ uint32 column_shard_count = 6;
+
+ repeated string sharding_columns = 7;
+}
+
+message DescribeLogTableResponse {
+ // Holds DescribeLogTableResult in case of successful call
+ Ydb.Operations.Operation operation = 1;
+}
+
+message DropLogTableRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string path = 2; // Full path
+}
+
+message DropLogTableResponse {
+ Ydb.Operations.Operation operation = 1;
+}
+
+message AlterLogTableRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string path = 2; // Full path
+
+ oneof ttl_action {
+ google.protobuf.Empty drop_ttl_settings = 3;
+ Ydb.Table.TtlSettings set_ttl_settings = 4;
+ // string set_ttl_preset_name = 5; // Unimplemened for now
+ }
+}
+
+message AlterLogTableResponse {
+ Ydb.Operations.Operation operation = 1;
+}
diff --git a/ydb/public/api/protos/ya.make b/ydb/public/api/protos/ya.make
index d70760f7296..8a156403a3d 100644
--- a/ydb/public/api/protos/ya.make
+++ b/ydb/public/api/protos/ya.make
@@ -18,32 +18,32 @@ SRCS(
draft/persqueue_common.proto
draft/persqueue_error_codes.proto
draft/ydb_long_tx.proto
- draft/ydb_logstore.proto
+ draft/ydb_logstore.proto
draft/yq_private.proto
persqueue_error_codes_v1.proto
ydb_auth.proto
ydb_persqueue_v1.proto
ydb_persqueue_cluster_discovery.proto
- ydb_clickhouse_internal.proto
+ ydb_clickhouse_internal.proto
ydb_cms.proto
ydb_common.proto
ydb_coordination.proto
ydb_discovery.proto
- ydb_experimental.proto
+ ydb_experimental.proto
ydb_export.proto
ydb_formats.proto
ydb_import.proto
ydb_issue_message.proto
ydb_monitoring.proto
ydb_operation.proto
- ydb_query_stats.proto
+ ydb_query_stats.proto
ydb_rate_limiter.proto
ydb_scheme.proto
ydb_scripting.proto
ydb_status_codes.proto
ydb_table.proto
ydb_value.proto
- ydb_s3_internal.proto
+ ydb_s3_internal.proto
yq.proto
)
diff --git a/ydb/public/api/protos/ydb_clickhouse_internal.proto b/ydb/public/api/protos/ydb_clickhouse_internal.proto
index cc3faa6e366..0f9e685ed35 100644
--- a/ydb/public/api/protos/ydb_clickhouse_internal.proto
+++ b/ydb/public/api/protos/ydb_clickhouse_internal.proto
@@ -1,86 +1,86 @@
-syntax = "proto3";
-option cc_enable_arenas = true;
-
-package Ydb.ClickhouseInternal;
-option java_package = "com.yandex.ydb.clickhouse";
-option java_outer_classname = "ClickhouseInternalProtos";
-
+syntax = "proto3";
+option cc_enable_arenas = true;
+
+package Ydb.ClickhouseInternal;
+option java_package = "com.yandex.ydb.clickhouse";
+option java_outer_classname = "ClickhouseInternalProtos";
+
import "ydb/public/api/protos/ydb_operation.proto";
import "ydb/public/api/protos/ydb_table.proto";
-
-message ScanRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string table = 2;
- repeated string columns = 3;
-
- bytes from_key = 4;
- bool from_key_inclusive = 5;
- bytes to_key = 6;
- bool to_key_inclusive = 7;
-
- uint64 max_rows = 8;
- uint64 max_bytes = 9;
+
+message ScanRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string table = 2;
+ repeated string columns = 3;
+
+ bytes from_key = 4;
+ bool from_key_inclusive = 5;
+ bytes to_key = 6;
+ bool to_key_inclusive = 7;
+
+ uint64 max_rows = 8;
+ uint64 max_bytes = 9;
string snapshot_id = 10;
}
-
-message ScanResponse {
- Ydb.Operations.Operation operation = 1;
-}
-
-message ScanResult {
- repeated bytes blocks = 1;
-
- bool eos = 2; // no more rows
- bytes last_key = 3;
- bool last_key_inclusive = 4;
-}
-
-
-message GetShardLocationsRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- repeated uint64 tablet_ids = 2;
-}
-
-message GetShardLocationsResponse {
- Ydb.Operations.Operation operation = 1;
-}
-
-message TabletInfo {
- uint64 tablet_id = 1;
- string host = 2;
- uint32 port = 3;
-}
-
-message GetShardLocationsResult {
- repeated TabletInfo tablets = 1;
-}
-
-message DescribeTableRequest {
- Ydb.Operations.OperationParams operation_params = 1;
-
- string path = 2;
- bool include_partitions_info = 3;
-}
-
-message PartitionInfo {
- uint64 tablet_id = 1;
- bytes end_key = 2;
- bool end_key_inclusive = 3;
-}
-
-message DescribeTableResult {
- repeated Ydb.Table.ColumnMeta columns = 2;
- repeated string primary_key = 3;
- repeated PartitionInfo partitions = 4;
- bool is_virtual = 5;
-}
-
-message DescribeTableResponse {
- Ydb.Operations.Operation operation = 1;
-}
+
+message ScanResponse {
+ Ydb.Operations.Operation operation = 1;
+}
+
+message ScanResult {
+ repeated bytes blocks = 1;
+
+ bool eos = 2; // no more rows
+ bytes last_key = 3;
+ bool last_key_inclusive = 4;
+}
+
+
+message GetShardLocationsRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ repeated uint64 tablet_ids = 2;
+}
+
+message GetShardLocationsResponse {
+ Ydb.Operations.Operation operation = 1;
+}
+
+message TabletInfo {
+ uint64 tablet_id = 1;
+ string host = 2;
+ uint32 port = 3;
+}
+
+message GetShardLocationsResult {
+ repeated TabletInfo tablets = 1;
+}
+
+message DescribeTableRequest {
+ Ydb.Operations.OperationParams operation_params = 1;
+
+ string path = 2;
+ bool include_partitions_info = 3;
+}
+
+message PartitionInfo {
+ uint64 tablet_id = 1;
+ bytes end_key = 2;
+ bool end_key_inclusive = 3;
+}
+
+message DescribeTableResult {
+ repeated Ydb.Table.ColumnMeta columns = 2;
+ repeated string primary_key = 3;
+ repeated PartitionInfo partitions = 4;
+ bool is_virtual = 5;
+}
+
+message DescribeTableResponse {
+ Ydb.Operations.Operation operation = 1;
+}
message CreateSnapshotRequest {
Ydb.Operations.OperationParams operation_params = 1;
diff --git a/ydb/public/api/protos/ydb_experimental.proto b/ydb/public/api/protos/ydb_experimental.proto
index 7e8f46094ad..94eed9671b7 100644
--- a/ydb/public/api/protos/ydb_experimental.proto
+++ b/ydb/public/api/protos/ydb_experimental.proto
@@ -1,15 +1,15 @@
-syntax = "proto3";
-option cc_enable_arenas = true;
-
-package Ydb.Experimental;
+syntax = "proto3";
+option cc_enable_arenas = true;
+
+package Ydb.Experimental;
option java_package = "com.yandex.ydb.experimental";
-option java_outer_classname = "ExperimentalProtos";
-
+option java_outer_classname = "ExperimentalProtos";
+
import "ydb/public/api/protos/ydb_issue_message.proto";
import "ydb/public/api/protos/ydb_status_codes.proto";
import "ydb/public/api/protos/ydb_value.proto";
-
-
+
+
message ExecuteStreamQueryRequest {
string yql_text = 1;
map<string, TypedValue> parameters = 2;
diff --git a/ydb/public/api/protos/ydb_query_stats.proto b/ydb/public/api/protos/ydb_query_stats.proto
index 73dd28ea87f..300d5d9837c 100644
--- a/ydb/public/api/protos/ydb_query_stats.proto
+++ b/ydb/public/api/protos/ydb_query_stats.proto
@@ -1,46 +1,46 @@
-syntax = "proto3";
-option cc_enable_arenas = true;
-
-package Ydb.TableStats;
+syntax = "proto3";
+option cc_enable_arenas = true;
+
+package Ydb.TableStats;
option java_package = "com.yandex.ydb";
-
-// Describes select, update (insert, upsert, replace) and delete operations
-message OperationStats {
- uint64 rows = 1;
- uint64 bytes = 2;
-}
-
-// Describes all operations on a table
-message TableAccessStats {
- string name = 1;
- reserved 2; // table id
- OperationStats reads = 3;
- OperationStats updates = 4;
- OperationStats deletes = 5;
+
+// Describes select, update (insert, upsert, replace) and delete operations
+message OperationStats {
+ uint64 rows = 1;
+ uint64 bytes = 2;
+}
+
+// Describes all operations on a table
+message TableAccessStats {
+ string name = 1;
+ reserved 2; // table id
+ OperationStats reads = 3;
+ OperationStats updates = 4;
+ OperationStats deletes = 5;
uint64 partitions_count = 6;
-}
-
-message QueryPhaseStats {
- uint64 duration_us = 1;
- repeated TableAccessStats table_access = 2;
+}
+
+message QueryPhaseStats {
+ uint64 duration_us = 1;
+ repeated TableAccessStats table_access = 2;
uint64 cpu_time_us = 3;
uint64 affected_shards = 4;
bool literal_phase = 5;
-}
-
+}
+
message CompilationStats {
bool from_cache = 1;
uint64 duration_us = 2;
uint64 cpu_time_us = 3;
}
-message QueryStats {
- // A query might have one or more execution phases
- repeated QueryPhaseStats query_phases = 1;
+message QueryStats {
+ // A query might have one or more execution phases
+ repeated QueryPhaseStats query_phases = 1;
CompilationStats compilation = 2;
uint64 process_cpu_time_us = 3;
string query_plan = 4;
string query_ast = 5;
uint64 total_duration_us = 6;
uint64 total_cpu_time_us = 7;
-}
+}
diff --git a/ydb/public/api/protos/ydb_s3_internal.proto b/ydb/public/api/protos/ydb_s3_internal.proto
index f8fe095d977..0f12fe63028 100644
--- a/ydb/public/api/protos/ydb_s3_internal.proto
+++ b/ydb/public/api/protos/ydb_s3_internal.proto
@@ -1,30 +1,30 @@
-syntax = "proto3";
-option cc_enable_arenas = true;
-
-package Ydb.S3Internal;
+syntax = "proto3";
+option cc_enable_arenas = true;
+
+package Ydb.S3Internal;
option java_package = "com.yandex.ydb.s3_internal";
-option java_outer_classname = "S3InternalProtos";
-
+option java_outer_classname = "S3InternalProtos";
+
import "ydb/public/api/protos/ydb_operation.proto";
import "ydb/public/api/protos/ydb_value.proto";
-
-message S3ListingRequest {
- string table_name = 1;
- TypedValue key_prefix = 2; // A tuple representing all key columns that preceed path column
- string path_column_prefix = 3;
- string path_column_delimiter = 4;
- TypedValue start_after_key_suffix = 5; // A tuple representing key columns that succeed path column
- uint32 max_keys = 6;
- repeated string columns_to_return = 7;
-
- Ydb.Operations.OperationParams operation_params = 8;
+
+message S3ListingRequest {
+ string table_name = 1;
+ TypedValue key_prefix = 2; // A tuple representing all key columns that preceed path column
+ string path_column_prefix = 3;
+ string path_column_delimiter = 4;
+ TypedValue start_after_key_suffix = 5; // A tuple representing key columns that succeed path column
+ uint32 max_keys = 6;
+ repeated string columns_to_return = 7;
+
+ Ydb.Operations.OperationParams operation_params = 8;
}
-
-message S3ListingResponse {
- Ydb.Operations.Operation operation = 1;
+
+message S3ListingResponse {
+ Ydb.Operations.Operation operation = 1;
}
-
-message S3ListingResult {
+
+message S3ListingResult {
Ydb.ResultSet common_prefixes = 1; // Every Contents row starts with key suffix with KeySuffixSize columns
Ydb.ResultSet contents = 2; // Every Contents row starts with key suffix with KeySuffixSize columns
uint32 key_suffix_size = 3; // Number of key columns starting from path and up to the end
diff --git a/ydb/public/api/protos/ydb_table.proto b/ydb/public/api/protos/ydb_table.proto
index 12d9fcf1e5a..c8063d9475c 100644
--- a/ydb/public/api/protos/ydb_table.proto
+++ b/ydb/public/api/protos/ydb_table.proto
@@ -360,7 +360,7 @@ message PartitioningSettings {
// Preferred partition size for auto partitioning by size, Mb
uint64 partition_size_mb = 3;
// Enable auto partitioning based on load on each partition
- Ydb.FeatureFlag.Status partitioning_by_load = 4;
+ Ydb.FeatureFlag.Status partitioning_by_load = 4;
reserved 5; // partitioning_by_load settings
// Minimum partitions count auto merge would stop working at
uint64 min_partitions_count = 6;
@@ -707,8 +707,8 @@ message ExecuteQueryResult {
TransactionMeta tx_meta = 2;
// Query metadata
QueryMeta query_meta = 3;
- // Query execution statistics
- Ydb.TableStats.QueryStats query_stats = 4;
+ // Query execution statistics
+ Ydb.TableStats.QueryStats query_stats = 4;
}
// Explain data query
@@ -939,15 +939,15 @@ message ReadTableResult {
// Result set (same as result of sql request)
Ydb.ResultSet result_set = 1;
}
-
-message BulkUpsertRequest {
- string table = 1;
- // "rows" parameter must be a list of structs where each stuct represents one row.
- // It must contain all key columns but not necessarily all non-key columns.
- // Similar to UPSERT statement only values of specified columns will be updated.
- TypedValue rows = 2;
+
+message BulkUpsertRequest {
+ string table = 1;
+ // "rows" parameter must be a list of structs where each stuct represents one row.
+ // It must contain all key columns but not necessarily all non-key columns.
+ // Similar to UPSERT statement only values of specified columns will be updated.
+ TypedValue rows = 2;
Ydb.Operations.OperationParams operation_params = 3;
-
+
// You may set data_format + data instead of rows to insert data in serialized formats.
oneof data_format {
Ydb.Formats.ArrowBatchSettings arrow_batch_settings = 7;
@@ -956,15 +956,15 @@ message BulkUpsertRequest {
// It's last in the definition to help with sidecar patterns
bytes data = 1000;
-}
-
-message BulkUpsertResponse {
- Ydb.Operations.Operation operation = 1;
-}
-
-message BulkUpsertResult {
-}
-
+}
+
+message BulkUpsertResponse {
+ Ydb.Operations.Operation operation = 1;
+}
+
+message BulkUpsertResult {
+}
+
message ExecuteScanQueryRequest {
enum Mode {
MODE_UNSPECIFIED = 0;
diff --git a/ydb/public/lib/base/defs.h b/ydb/public/lib/base/defs.h
index c4690cfda42..97e933c93ac 100644
--- a/ydb/public/lib/base/defs.h
+++ b/ydb/public/lib/base/defs.h
@@ -77,7 +77,7 @@ namespace NTxProxy {
\
XX(BackupTxIdNotExists, 80) \
XX(TxIdIsNotABackup, 81) \
-
+
struct TResultStatus {
enum EStatus {
TXUSERPROXY_RESULT_STATUS_MAP(ENUM_VALUE_GEN)
diff --git a/ydb/public/lib/base/msgbus.h b/ydb/public/lib/base/msgbus.h
index 23aba5ab7a5..f11bd9771f1 100644
--- a/ydb/public/lib/base/msgbus.h
+++ b/ydb/public/lib/base/msgbus.h
@@ -65,7 +65,7 @@ enum {
MTYPE_CLIENT_GET_REQUEST = 10454,
MTYPE_CLIENT_GET_RESPONSE = 10455,
MTYPE_CLIENT_DB_QUERY = 10456,
- MTYPE_CLIENT_TABLET_COUNTERS_REQUEST = 10457,
+ MTYPE_CLIENT_TABLET_COUNTERS_REQUEST = 10457,
MTYPE_CLIENT_CANCEL_BACKUP = 10458,
MTYPE_CLIENT_BLOB_STORAGE_CONFIG_REQUEST = 10459,
MTYPE_CLIENT_DRAIN_NODE = 10460,
@@ -81,8 +81,8 @@ enum {
MTYPE_CLIENT_SQS_RESPONSE = 10470,
MTYPE_CLIENT_WHOAMI = 10471,
MTYPE_CLIENT_STREAM_REQUEST = 10472,
- MTYPE_CLIENT_S3_LISTING_REQUEST = 10474,
- MTYPE_CLIENT_S3_LISTING_RESPONSE = 10475,
+ MTYPE_CLIENT_S3_LISTING_REQUEST = 10474,
+ MTYPE_CLIENT_S3_LISTING_RESPONSE = 10475,
MTYPE_CLIENT_INTERCONNECT_DEBUG = 10476,
MTYPE_CLIENT_CONSOLE_REQUEST = 10477,
MTYPE_CLIENT_CONSOLE_RESPONSE = 10478,
@@ -116,7 +116,7 @@ struct TBusMessageBusTraceRequest : TBusMessage<TBusMessageBusTraceRequest, NKik
struct TBusMessageBusTraceStatus : TBusMessage<TBusMessageBusTraceStatus, NKikimrClient::TMessageBusTraceStatus, MTYPE_CLIENT_MESSAGE_BUS_TRACE_STATUS> {};
struct TBusTabletKillRequest : TBusMessage<TBusTabletKillRequest, NKikimrClient::TTabletKillRequest, MTYPE_CLIENT_TABLET_KILL_REQUEST> {};
struct TBusTabletStateRequest : TBusMessage<TBusTabletStateRequest, NKikimrClient::TTabletStateRequest, MTYPE_CLIENT_TABLET_STATE_REQUEST> {};
-struct TBusTabletCountersRequest : TBusMessage<TBusTabletCountersRequest, NKikimrClient::TTabletCountersRequest, MTYPE_CLIENT_TABLET_COUNTERS_REQUEST> {};
+struct TBusTabletCountersRequest : TBusMessage<TBusTabletCountersRequest, NKikimrClient::TTabletCountersRequest, MTYPE_CLIENT_TABLET_COUNTERS_REQUEST> {};
struct TBusTabletLocalMKQL : TBusMessage<TBusTabletLocalMKQL, NKikimrClient::TLocalMKQL, MTYPE_CLIENT_LOCAL_MINIKQL> {};
struct TBusTabletLocalSchemeTx : TBusMessage<TBusTabletLocalSchemeTx, NKikimrClient::TLocalSchemeTx, MTYPE_CLIENT_LOCAL_SCHEME_TX> {};
struct TBusSchemeOperation : TBusMessage<TBusSchemeOperation, NKikimrClient::TSchemeOperation, MTYPE_CLIENT_FLAT_TX_REQUEST> {};
@@ -145,8 +145,8 @@ struct TBusSqsRequest : TBusMessage<TBusSqsRequest, NKikimrClient::TSqsRequest,
struct TBusSqsResponse : TBusMessage<TBusSqsResponse, NKikimrClient::TSqsResponse, MTYPE_CLIENT_SQS_RESPONSE> {};
struct TBusWhoAmI : TBusMessage<TBusWhoAmI, NKikimrClient::TWhoAmI, MTYPE_CLIENT_WHOAMI> {};
struct TBusStreamRequest : TBusMessage<TBusStreamRequest, NKikimrClient::TRequest, MTYPE_CLIENT_STREAM_REQUEST> {};
-struct TBusS3ListingRequest : TBusMessage<TBusS3ListingRequest, NKikimrClient::TS3ListingRequest, MTYPE_CLIENT_S3_LISTING_REQUEST> {};
-struct TBusS3ListingResponse : TBusMessage<TBusS3ListingResponse, NKikimrClient::TS3ListingResponse, MTYPE_CLIENT_S3_LISTING_RESPONSE> {};
+struct TBusS3ListingRequest : TBusMessage<TBusS3ListingRequest, NKikimrClient::TS3ListingRequest, MTYPE_CLIENT_S3_LISTING_REQUEST> {};
+struct TBusS3ListingResponse : TBusMessage<TBusS3ListingResponse, NKikimrClient::TS3ListingResponse, MTYPE_CLIENT_S3_LISTING_RESPONSE> {};
struct TBusInterconnectDebug : TBusMessage<TBusInterconnectDebug, NKikimrClient::TInterconnectDebug, MTYPE_CLIENT_INTERCONNECT_DEBUG> {};
struct TBusConsoleRequest : TBusMessage<TBusConsoleRequest, NKikimrClient::TConsoleRequest, MTYPE_CLIENT_CONSOLE_REQUEST> {};
struct TBusConsoleResponse : TBusMessage<TBusConsoleResponse, NKikimrClient::TConsoleResponse, MTYPE_CLIENT_CONSOLE_RESPONSE> {};
@@ -213,9 +213,9 @@ public:
RegisterType(new TBusPersQueue);
RegisterType(new TBusTabletKillRequest);
RegisterType(new TBusTabletStateRequest);
- RegisterType(new TBusTabletCountersRequest);
+ RegisterType(new TBusTabletCountersRequest);
RegisterType(new TBusTabletLocalMKQL);
- RegisterType(new TBusTabletLocalSchemeTx);
+ RegisterType(new TBusTabletLocalSchemeTx);
RegisterType(new TBusSchemeOperation);
RegisterType(new TBusSchemeOperationStatus);
RegisterType(new TBusSchemeDescribe);
@@ -240,8 +240,8 @@ public:
RegisterType(new TBusChooseProxy);
RegisterType(new TBusWhoAmI);
RegisterType(new TBusStreamRequest);
- RegisterType(new TBusS3ListingRequest);
- RegisterType(new TBusS3ListingResponse);
+ RegisterType(new TBusS3ListingRequest);
+ RegisterType(new TBusS3ListingResponse);
RegisterType(new TBusInterconnectDebug);
RegisterType(new TBusConsoleRequest);
RegisterType(new TBusConsoleResponse);
diff --git a/ydb/public/lib/deprecated/client/grpc_client.cpp b/ydb/public/lib/deprecated/client/grpc_client.cpp
index f6f213b98e7..dbf808ef752 100644
--- a/ydb/public/lib/deprecated/client/grpc_client.cpp
+++ b/ydb/public/lib/deprecated/client/grpc_client.cpp
@@ -331,7 +331,7 @@ namespace NKikimr {
IMPL_REQUEST(RegisterNode, TNodeRegistrationRequest, TNodeRegistrationResponse)
IMPL_REQUEST(CmsRequest, TCmsRequest, TCmsResponse)
IMPL_REQUEST(SqsRequest, TSqsRequest, TSqsResponse)
- IMPL_REQUEST(S3Listing, TS3ListingRequest, TS3ListingResponse)
+ IMPL_REQUEST(S3Listing, TS3ListingRequest, TS3ListingResponse)
IMPL_REQUEST(LocalMKQL, TLocalMKQL, TResponse)
IMPL_REQUEST(LocalSchemeTx, TLocalSchemeTx, TResponse)
IMPL_REQUEST(TabletKillRequest, TTabletKillRequest, TResponse)
diff --git a/ydb/public/lib/deprecated/client/grpc_client.h b/ydb/public/lib/deprecated/client/grpc_client.h
index b014cdddb15..b0483a2646f 100644
--- a/ydb/public/lib/deprecated/client/grpc_client.h
+++ b/ydb/public/lib/deprecated/client/grpc_client.h
@@ -23,7 +23,7 @@ namespace NKikimr {
using TNodeRegistrationResponseCallback = TCallback<NKikimrClient::TNodeRegistrationResponse>;
using TCmsResponseCallback = TCallback<NKikimrClient::TCmsResponse>;
using TSqsResponseCallback = TCallback<NKikimrClient::TSqsResponse>;
- using TS3ListingResponseCallback = TCallback<NKikimrClient::TS3ListingResponse>;
+ using TS3ListingResponseCallback = TCallback<NKikimrClient::TS3ListingResponse>;
using TConsoleResponseCallback = TCallback<NKikimrClient::TConsoleResponse>;
using TFinishCallback = std::function<void (const TGrpcError*)>;
@@ -91,11 +91,11 @@ namespace NKikimr {
void SqsRequest(const NKikimrClient::TSqsRequest& request, TSqsResponseCallback callback);
/////////////////////////////////////////////////////////////////////////////////////////////////
- // S3 LISTING INTERFACE
- /////////////////////////////////////////////////////////////////////////////////////////////////
- void S3Listing(const NKikimrClient::TS3ListingRequest& request, TS3ListingResponseCallback callback);
-
- /////////////////////////////////////////////////////////////////////////////////////////////////
+ // S3 LISTING INTERFACE
+ /////////////////////////////////////////////////////////////////////////////////////////////////
+ void S3Listing(const NKikimrClient::TS3ListingRequest& request, TS3ListingResponseCallback callback);
+
+ /////////////////////////////////////////////////////////////////////////////////////////////////
// CONSOLE INTERFACE
/////////////////////////////////////////////////////////////////////////////////////////////////
void ConsoleRequest(const NKikimrClient::TConsoleRequest& request, TConsoleResponseCallback callback);
diff --git a/ydb/public/lib/deprecated/client/msgbus_client.cpp b/ydb/public/lib/deprecated/client/msgbus_client.cpp
index 1dcdb32b3b2..4d5f2427ebd 100644
--- a/ydb/public/lib/deprecated/client/msgbus_client.cpp
+++ b/ydb/public/lib/deprecated/client/msgbus_client.cpp
@@ -44,10 +44,10 @@ struct TMessageCookie
struct TSyncMessageCookie : public TMessageCookie {
TAutoPtr<NBus::TBusMessage> Reply;
NBus::EMessageStatus ErrorStatus = NBus::MESSAGE_UNKNOWN;
- TManualEvent Ev;
+ TManualEvent Ev;
TSyncMessageCookie()
- : Ev()
+ : Ev()
{}
void Wait() {
diff --git a/ydb/public/lib/deprecated/kicli/cpp_ut.cpp b/ydb/public/lib/deprecated/kicli/cpp_ut.cpp
index f0cfde7da51..cb7dc3ed6d9 100644
--- a/ydb/public/lib/deprecated/kicli/cpp_ut.cpp
+++ b/ydb/public/lib/deprecated/kicli/cpp_ut.cpp
@@ -758,7 +758,7 @@ Y_UNIT_TEST_SUITE(ClientLib) {
auto error = result.GetError();
UNIT_ASSERT(error.Permanent());
UNIT_ASSERT_VALUES_EQUAL(error.GetCode(), "MP-0128");
- UNIT_ASSERT_STRING_CONTAINS(error.GetMessage(), "Mismatch of column type expectedType = 3 actual type = 4608");
+ UNIT_ASSERT_STRING_CONTAINS(error.GetMessage(), "Mismatch of column type expectedType = 3 actual type = 4608");
}
Y_UNIT_TEST(Test14) {
@@ -1397,226 +1397,226 @@ Y_UNIT_TEST_SUITE(ClientLib) {
}
Y_UNIT_TEST(SameTableDifferentColumns) {
- using namespace NClient;
- NMsgBusProxy::TMsgBusClientConfig clientConfig;
- Tests::TServer server = StartupKikimr(clientConfig);
- NClient::TKikimr kikimr(clientConfig);
-
- auto dc = kikimr.GetSchemaRoot("dc-1");
- auto example = dc.MakeDirectory("deduper");
- example.CreateTable("Groups", {
- TKeyColumn("ShardId", NClient::TType::Uint32),
- TKeyColumn("HostTitleHash", NClient::TType::Uint64),
- TKeyColumn("GroupSimHash", NClient::TType::Uint64),
- TKeyColumn("Rank", NClient::TType::Uint32),
- TKeyColumn("UrlHash", NClient::TType::Uint32)
- });
-
- auto updateQuery = kikimr.Query(R"___(
- (
- (let shardId (Parameter 'SHARDID (DataType 'Uint32)))
- (let hostTitleHash (Parameter 'HOSTTITLEHASH (DataType 'Uint64)))
- (let groupSimHash (Parameter 'GROUPSIMHASH (DataType 'Uint64)))
- (let rank (Parameter 'RANK (DataType 'Uint32)))
- (let urlHash (Parameter 'URLHASH (DataType 'Uint32)))
- (let key '(
- '('ShardId shardId)
- '('HostTitleHash hostTitleHash)
- '('GroupSimHash groupSimHash)
- '('Rank rank)
- '('UrlHash urlHash)
- ))
- (let value '())
- (let pgmReturn (AsList
- (UpdateRow '/dc-1/deduper/Groups key value)
- ))
- (return pgmReturn)
- )
- )___").SyncPrepare().GetQuery();
-
- {
- auto result = updateQuery.SyncExecute(
- TParameter("SHARDID", (ui32)0),
- TParameter("HOSTTITLEHASH", (ui64)1111),
- TParameter("GROUPSIMHASH", (ui64)2222),
- TParameter("RANK", (ui32)1),
- TParameter("URLHASH", (ui32)424242)
- );
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
- }
-
- {
- auto result = updateQuery.SyncExecute(
- TParameter("SHARDID", (ui32)0),
- TParameter("HOSTTITLEHASH", (ui64)1111),
- TParameter("GROUPSIMHASH", (ui64)2223),
- TParameter("RANK", (ui32)0),
- TParameter("URLHASH", (ui32)333333)
- );
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
- }
-
- {
- // The query does SelecRow and SelectRange from the same table but with different sets
- // of key columns and different selected columns
+ using namespace NClient;
+ NMsgBusProxy::TMsgBusClientConfig clientConfig;
+ Tests::TServer server = StartupKikimr(clientConfig);
+ NClient::TKikimr kikimr(clientConfig);
+
+ auto dc = kikimr.GetSchemaRoot("dc-1");
+ auto example = dc.MakeDirectory("deduper");
+ example.CreateTable("Groups", {
+ TKeyColumn("ShardId", NClient::TType::Uint32),
+ TKeyColumn("HostTitleHash", NClient::TType::Uint64),
+ TKeyColumn("GroupSimHash", NClient::TType::Uint64),
+ TKeyColumn("Rank", NClient::TType::Uint32),
+ TKeyColumn("UrlHash", NClient::TType::Uint32)
+ });
+
+ auto updateQuery = kikimr.Query(R"___(
+ (
+ (let shardId (Parameter 'SHARDID (DataType 'Uint32)))
+ (let hostTitleHash (Parameter 'HOSTTITLEHASH (DataType 'Uint64)))
+ (let groupSimHash (Parameter 'GROUPSIMHASH (DataType 'Uint64)))
+ (let rank (Parameter 'RANK (DataType 'Uint32)))
+ (let urlHash (Parameter 'URLHASH (DataType 'Uint32)))
+ (let key '(
+ '('ShardId shardId)
+ '('HostTitleHash hostTitleHash)
+ '('GroupSimHash groupSimHash)
+ '('Rank rank)
+ '('UrlHash urlHash)
+ ))
+ (let value '())
+ (let pgmReturn (AsList
+ (UpdateRow '/dc-1/deduper/Groups key value)
+ ))
+ (return pgmReturn)
+ )
+ )___").SyncPrepare().GetQuery();
+
+ {
+ auto result = updateQuery.SyncExecute(
+ TParameter("SHARDID", (ui32)0),
+ TParameter("HOSTTITLEHASH", (ui64)1111),
+ TParameter("GROUPSIMHASH", (ui64)2222),
+ TParameter("RANK", (ui32)1),
+ TParameter("URLHASH", (ui32)424242)
+ );
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
+ }
+
+ {
+ auto result = updateQuery.SyncExecute(
+ TParameter("SHARDID", (ui32)0),
+ TParameter("HOSTTITLEHASH", (ui64)1111),
+ TParameter("GROUPSIMHASH", (ui64)2223),
+ TParameter("RANK", (ui32)0),
+ TParameter("URLHASH", (ui32)333333)
+ );
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
+ }
+
+ {
+ // The query does SelecRow and SelectRange from the same table but with different sets
+ // of key columns and different selected columns
auto rawq = kikimr.Query(R"___(
- (
- (let shardId (Parameter 'SHARDID (DataType 'Uint32)))
- (let hostTitleHash (Parameter 'HOSTTITLEHASH (DataType 'Uint64)))
- (let groupSimHash (Parameter 'GROUPSIMHASH (DataType 'Uint64)))
- (let rank (Parameter 'RANK (DataType 'Uint32)))
- (let urlHash (Parameter 'URLHASH (DataType 'Uint32)))
- (let key1 '('('ShardId shardId)
- '('HostTitleHash hostTitleHash)
- '('GroupSimHash groupSimHash)
- '('Rank rank)
- '('UrlHash urlHash)))
- (let column1 '('UrlHash))
- (let range '('IncFrom '('ShardId shardId (Void))
- '('HostTitleHash hostTitleHash (Void))
- '('GroupSimHash groupSimHash (Void))
- ))
- (let column2 '('UrlHash))
- (let options '('('ItemsLimit (Uint64 '1))))
- # SelectRow by the full key (5 columns) with 1 key column in the result
- (let res1 (SelectRow 'dc-1/deduper/Groups key1 column1))
- # SelectRange by 3 key columns and 1 key column in the result
- (let res2 (SelectRange 'dc-1/deduper/Groups range column2 options))
- (let result (SetResult 'result (AsStruct '('rowResult res1) '('rangeResult res2))))
- (let pgmReturn (AsList result))
- (return pgmReturn)
- )
+ (
+ (let shardId (Parameter 'SHARDID (DataType 'Uint32)))
+ (let hostTitleHash (Parameter 'HOSTTITLEHASH (DataType 'Uint64)))
+ (let groupSimHash (Parameter 'GROUPSIMHASH (DataType 'Uint64)))
+ (let rank (Parameter 'RANK (DataType 'Uint32)))
+ (let urlHash (Parameter 'URLHASH (DataType 'Uint32)))
+ (let key1 '('('ShardId shardId)
+ '('HostTitleHash hostTitleHash)
+ '('GroupSimHash groupSimHash)
+ '('Rank rank)
+ '('UrlHash urlHash)))
+ (let column1 '('UrlHash))
+ (let range '('IncFrom '('ShardId shardId (Void))
+ '('HostTitleHash hostTitleHash (Void))
+ '('GroupSimHash groupSimHash (Void))
+ ))
+ (let column2 '('UrlHash))
+ (let options '('('ItemsLimit (Uint64 '1))))
+ # SelectRow by the full key (5 columns) with 1 key column in the result
+ (let res1 (SelectRow 'dc-1/deduper/Groups key1 column1))
+ # SelectRange by 3 key columns and 1 key column in the result
+ (let res2 (SelectRange 'dc-1/deduper/Groups range column2 options))
+ (let result (SetResult 'result (AsStruct '('rowResult res1) '('rangeResult res2))))
+ (let pgmReturn (AsList result))
+ (return pgmReturn)
+ )
)___");
auto q = rawq.SyncPrepare();
- UNIT_ASSERT_VALUES_EQUAL_C(q.GetStatus(), NMsgBusProxy::MSTATUS_OK, q.GetError().GetMessage().c_str());
-
- auto query = q.GetQuery();
- auto result = query.SyncExecute(
- TParameter("SHARDID", (ui32)0),
- TParameter("HOSTTITLEHASH", (ui64)1111),
- TParameter("GROUPSIMHASH", (ui64)2222),
- TParameter("RANK", (ui32)1),
- TParameter("URLHASH", (ui32)424242)
- );
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
- // result.GetValue().DumpValue();
- }
- }
-
- void CheckSelectRange(NClient::TKikimr& kikimr,
- ui32 key1Start, ui32 key1End,
- bool includeFrom, bool includeTo,
+ UNIT_ASSERT_VALUES_EQUAL_C(q.GetStatus(), NMsgBusProxy::MSTATUS_OK, q.GetError().GetMessage().c_str());
+
+ auto query = q.GetQuery();
+ auto result = query.SyncExecute(
+ TParameter("SHARDID", (ui32)0),
+ TParameter("HOSTTITLEHASH", (ui64)1111),
+ TParameter("GROUPSIMHASH", (ui64)2222),
+ TParameter("RANK", (ui32)1),
+ TParameter("URLHASH", (ui32)424242)
+ );
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
+ // result.GetValue().DumpValue();
+ }
+ }
+
+ void CheckSelectRange(NClient::TKikimr& kikimr,
+ ui32 key1Start, ui32 key1End,
+ bool includeFrom, bool includeTo,
const TVector<std::pair<ui32, ui32>>& expectedKeys)
- {
- using namespace NClient;
-
+ {
+ using namespace NClient;
+
auto rawq = kikimr.Query(
- Sprintf(R"___(
- (
- (let key1Start (Parameter 'KEY1START (DataType 'Uint32)))
- (let key1End (Parameter 'KEY1END (DataType 'Uint32)))
- (let range '('%s '%s '('Key1 key1Start key1End)))
- (let column '('Key1 'Key2))
- (let options '())
- (let readRes (SelectRange 'dc-1/test/Table range column options))
- (let result (SetResult 'readRes readRes))
- (let pgmReturn (AsList result))
- (return pgmReturn)
- )
- )___",
- includeFrom ? "IncFrom" : "ExcFrom",
- includeTo ? "IncTo" : "ExcTo"
+ Sprintf(R"___(
+ (
+ (let key1Start (Parameter 'KEY1START (DataType 'Uint32)))
+ (let key1End (Parameter 'KEY1END (DataType 'Uint32)))
+ (let range '('%s '%s '('Key1 key1Start key1End)))
+ (let column '('Key1 'Key2))
+ (let options '())
+ (let readRes (SelectRange 'dc-1/test/Table range column options))
+ (let result (SetResult 'readRes readRes))
+ (let pgmReturn (AsList result))
+ (return pgmReturn)
+ )
+ )___",
+ includeFrom ? "IncFrom" : "ExcFrom",
+ includeTo ? "IncTo" : "ExcTo"
));
auto q = rawq.SyncPrepare();
- UNIT_ASSERT_VALUES_EQUAL_C(q.GetStatus(), NMsgBusProxy::MSTATUS_OK, q.GetError().GetMessage().c_str());
-
- auto query = q.GetQuery();
- auto result = query.SyncExecute(
- TParameter("KEY1START", key1Start),
- TParameter("KEY1END", key1End)
- );
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
- // result.GetValue().DumpValue();
- auto valueResult = result.GetValue();
- ui32 sz = valueResult["readRes"]["List"].Size();
- UNIT_ASSERT_VALUES_EQUAL(sz, expectedKeys.size());
- for (ui32 i = 0; i < sz ; ++i) {
- ui32 actualKey1 = (ui32)valueResult["readRes"]["List"][i]["Key1"];
- ui64 actualKey2 = (ui64)valueResult["readRes"]["List"][i]["Key2"];
-
- UNIT_ASSERT_VALUES_EQUAL(actualKey1, expectedKeys[i].first);
- UNIT_ASSERT_VALUES_EQUAL(actualKey2, expectedKeys[i].second);
- }
- }
-
+ UNIT_ASSERT_VALUES_EQUAL_C(q.GetStatus(), NMsgBusProxy::MSTATUS_OK, q.GetError().GetMessage().c_str());
+
+ auto query = q.GetQuery();
+ auto result = query.SyncExecute(
+ TParameter("KEY1START", key1Start),
+ TParameter("KEY1END", key1End)
+ );
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
+ // result.GetValue().DumpValue();
+ auto valueResult = result.GetValue();
+ ui32 sz = valueResult["readRes"]["List"].Size();
+ UNIT_ASSERT_VALUES_EQUAL(sz, expectedKeys.size());
+ for (ui32 i = 0; i < sz ; ++i) {
+ ui32 actualKey1 = (ui32)valueResult["readRes"]["List"][i]["Key1"];
+ ui64 actualKey2 = (ui64)valueResult["readRes"]["List"][i]["Key2"];
+
+ UNIT_ASSERT_VALUES_EQUAL(actualKey1, expectedKeys[i].first);
+ UNIT_ASSERT_VALUES_EQUAL(actualKey2, expectedKeys[i].second);
+ }
+ }
+
Y_UNIT_TEST(SelectRangeWithInf) {
- using namespace NClient;
- NMsgBusProxy::TMsgBusClientConfig clientConfig;
- Tests::TServer server = StartupKikimr(clientConfig);
- NClient::TKikimr kikimr(clientConfig);
-
- auto dc = kikimr.GetSchemaRoot("dc-1");
- auto example = dc.MakeDirectory("test");
- example.CreateTable("Table", {
- TKeyColumn("Key1", NClient::TType::Uint32),
- TKeyColumn("Key2", NClient::TType::Uint64),
+ using namespace NClient;
+ NMsgBusProxy::TMsgBusClientConfig clientConfig;
+ Tests::TServer server = StartupKikimr(clientConfig);
+ NClient::TKikimr kikimr(clientConfig);
+
+ auto dc = kikimr.GetSchemaRoot("dc-1");
+ auto example = dc.MakeDirectory("test");
+ example.CreateTable("Table", {
+ TKeyColumn("Key1", NClient::TType::Uint32),
+ TKeyColumn("Key2", NClient::TType::Uint64),
TColumn("Value", NClient::TType::Utf8)
- });
-
+ });
+
auto rawQuery = kikimr.Query(R"___(
- (
- (let key1 (Parameter 'KEY1 (DataType 'Uint32)))
- (let key2 (Parameter 'KEY2 (DataType 'Uint64)))
+ (
+ (let key1 (Parameter 'KEY1 (DataType 'Uint32)))
+ (let key2 (Parameter 'KEY2 (DataType 'Uint64)))
(let val (Parameter 'VALUE (DataType 'Utf8)))
- (let key '(
- '('Key1 key1)
- '('Key2 key2)
- ))
- (let value '('('Value val)))
- (let pgmReturn (AsList
- (UpdateRow '/dc-1/test/Table key value)
- ))
- (return pgmReturn)
- )
+ (let key '(
+ '('Key1 key1)
+ '('Key2 key2)
+ ))
+ (let value '('('Value val)))
+ (let pgmReturn (AsList
+ (UpdateRow '/dc-1/test/Table key value)
+ ))
+ (return pgmReturn)
+ )
)___");
auto updateQuery = rawQuery.SyncPrepare();
- UNIT_ASSERT_VALUES_EQUAL_C(updateQuery.GetStatus(), NMsgBusProxy::MSTATUS_OK, updateQuery.GetError().GetMessage().c_str());
-
- // Write many rows in order to trigger compaction and create a flat part
- for (ui32 k1 = 0; k1 < 300; ++k1) {
- for (ui64 k2 = 0; k2 < 3; ++k2) {
- auto result = updateQuery.GetQuery().SyncExecute(
- TParameter("KEY1", k1),
- TParameter("KEY2", k2),
+ UNIT_ASSERT_VALUES_EQUAL_C(updateQuery.GetStatus(), NMsgBusProxy::MSTATUS_OK, updateQuery.GetError().GetMessage().c_str());
+
+ // Write many rows in order to trigger compaction and create a flat part
+ for (ui32 k1 = 0; k1 < 300; ++k1) {
+ for (ui64 k2 = 0; k2 < 3; ++k2) {
+ auto result = updateQuery.GetQuery().SyncExecute(
+ TParameter("KEY1", k1),
+ TParameter("KEY2", k2),
TParameter("VALUE", TString(2048, 'A'))
- );
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
- }
- }
-
- for (ui32 k1 = 0; k1 < 299; ++k1) {
- // from (k1, inf) to (k1+1, inf)
+ );
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NMsgBusProxy::MSTATUS_OK, result.GetError().GetMessage().c_str());
+ }
+ }
+
+ for (ui32 k1 = 0; k1 < 299; ++k1) {
+ // from (k1, inf) to (k1+1, inf)
TVector<std::pair<ui32, ui32>> expected = {{k1+1,0},{k1+1,1},{k1+1,2}};
- CheckSelectRange(kikimr, k1, k1+1, false, false, expected);
- CheckSelectRange(kikimr, k1, k1+1, false, true, expected);
- CheckSelectRange(kikimr, k1, k1+1, true, false, expected);
- CheckSelectRange(kikimr, k1, k1+1, true, true, expected);
-
- // from (k1, inf) to (k1, inf)
- CheckSelectRange(kikimr, k1, k1, false, false, {});
- CheckSelectRange(kikimr, k1, k1, false, true, {});
- CheckSelectRange(kikimr, k1, k1, true, false, {});
- CheckSelectRange(kikimr, k1, k1, true, true, {});
-
- // from (k1+1, inf) to (k1, inf)
- CheckSelectRange(kikimr, k1+1, k1, false, false, {});
- CheckSelectRange(kikimr, k1+1, k1, false, true, {});
- CheckSelectRange(kikimr, k1+1, k1, true, false, {});
- CheckSelectRange(kikimr, k1+1, k1, true, true, {});
- }
- }
-
-
+ CheckSelectRange(kikimr, k1, k1+1, false, false, expected);
+ CheckSelectRange(kikimr, k1, k1+1, false, true, expected);
+ CheckSelectRange(kikimr, k1, k1+1, true, false, expected);
+ CheckSelectRange(kikimr, k1, k1+1, true, true, expected);
+
+ // from (k1, inf) to (k1, inf)
+ CheckSelectRange(kikimr, k1, k1, false, false, {});
+ CheckSelectRange(kikimr, k1, k1, false, true, {});
+ CheckSelectRange(kikimr, k1, k1, true, false, {});
+ CheckSelectRange(kikimr, k1, k1, true, true, {});
+
+ // from (k1+1, inf) to (k1, inf)
+ CheckSelectRange(kikimr, k1+1, k1, false, false, {});
+ CheckSelectRange(kikimr, k1+1, k1, false, true, {});
+ CheckSelectRange(kikimr, k1+1, k1, true, false, {});
+ CheckSelectRange(kikimr, k1+1, k1, true, true, {});
+ }
+ }
+
+
Y_UNIT_TEST(TypicalCase1) {
using namespace NClient;
NMsgBusProxy::TMsgBusClientConfig clientConfig;
@@ -2146,12 +2146,12 @@ NKikimrTxUserProxy::TKeyRange MakeRange(const TVector<TString> from, const TVect
auto &tuple = *range.MutableFrom()->MutableType()->MutableTuple();
for (auto &s : from) {
if (s)
- range.MutableFrom()->MutableValue()->AddTuple()->MutableOptional()->SetText(s);
+ range.MutableFrom()->MutableValue()->AddTuple()->MutableOptional()->SetText(s);
auto &elem = *tuple.AddElement();
- elem.SetKind(NKikimrMiniKQL::Optional);
- auto &item = *elem.MutableOptional()->MutableItem();
- item.SetKind(NKikimrMiniKQL::Data);
- item.MutableData()->SetScheme(NUdf::TDataType<NUdf::TUtf8>::Id);
+ elem.SetKind(NKikimrMiniKQL::Optional);
+ auto &item = *elem.MutableOptional()->MutableItem();
+ item.SetKind(NKikimrMiniKQL::Data);
+ item.MutableData()->SetScheme(NUdf::TDataType<NUdf::TUtf8>::Id);
}
}
if (!to.empty()) {
@@ -2159,12 +2159,12 @@ NKikimrTxUserProxy::TKeyRange MakeRange(const TVector<TString> from, const TVect
auto &tuple = *range.MutableTo()->MutableType()->MutableTuple();
for (auto &s : to) {
if (s)
- range.MutableTo()->MutableValue()->AddTuple()->MutableOptional()->SetText(s);
+ range.MutableTo()->MutableValue()->AddTuple()->MutableOptional()->SetText(s);
auto &elem = *tuple.AddElement();
- elem.SetKind(NKikimrMiniKQL::Optional);
- auto &item = *elem.MutableOptional()->MutableItem();
- item.SetKind(NKikimrMiniKQL::Data);
- item.MutableData()->SetScheme(NUdf::TDataType<NUdf::TUtf8>::Id);
+ elem.SetKind(NKikimrMiniKQL::Optional);
+ auto &item = *elem.MutableOptional()->MutableItem();
+ item.SetKind(NKikimrMiniKQL::Data);
+ item.MutableData()->SetScheme(NUdf::TDataType<NUdf::TUtf8>::Id);
}
}
range.SetFromInclusive(fromInclusive);
diff --git a/ydb/public/lib/deprecated/kicli/error.cpp b/ydb/public/lib/deprecated/kicli/error.cpp
index a82dfbfc36f..473a981f607 100644
--- a/ydb/public/lib/deprecated/kicli/error.cpp
+++ b/ydb/public/lib/deprecated/kicli/error.cpp
@@ -86,7 +86,7 @@ bool TError::Permanent() const {
case NTxProxy::TResultStatus::EStatus::ExecInProgress:
case NTxProxy::TResultStatus::EStatus::ProxyShardTryLater:
case NTxProxy::TResultStatus::EStatus::ProxyShardOverloaded:
- case NTxProxy::TResultStatus::EStatus::ProxyShardNotAvailable:
+ case NTxProxy::TResultStatus::EStatus::ProxyShardNotAvailable:
return false;
default:
break;
diff --git a/ydb/public/lib/deprecated/kicli/kicli.h b/ydb/public/lib/deprecated/kicli/kicli.h
index 0d95bd089aa..9dc472d2314 100644
--- a/ydb/public/lib/deprecated/kicli/kicli.h
+++ b/ydb/public/lib/deprecated/kicli/kicli.h
@@ -49,7 +49,7 @@ class TKikimr;
using TTablePartitionConfig = NKikimrSchemeOp::TPartitionConfig;
using TModifyScheme = NKikimrSchemeOp::TModifyScheme;
-
+
class TType {
public:
static const TType Int64;
@@ -65,8 +65,8 @@ public:
static const TType String2m;
static const TType Yson;
static const TType Json;
- static const TType JsonDocument;
- static const TType Timestamp;
+ static const TType JsonDocument;
+ static const TType Timestamp;
const TString& GetName() const;
ui16 GetId() const;
@@ -82,10 +82,10 @@ protected:
class TColumn {
friend class TSchemaObject;
public:
- TString Name;
- TType Type;
- bool Key;
- ui32 Partitions;
+ TString Name;
+ TType Type;
+ bool Key;
+ ui32 Partitions;
// generic column of a table, used in schema operations
TColumn(const TString& name, const TType& type);
@@ -575,7 +575,7 @@ public:
Kesus,
SolomonVolume,
FileStore,
- OlapStore,
+ OlapStore,
OlapTable,
Sequence,
Replication,
@@ -589,7 +589,7 @@ public:
TSchemaObject MakeDirectory(const TString& name);
TSchemaObject CreateTable(const TString& name, const TVector<TColumn>& columns);
TSchemaObject CreateTable(const TString& name, const TVector<TColumn>& columns,
- const TTablePartitionConfig& partitionConfig);
+ const TTablePartitionConfig& partitionConfig);
TSchemaObject GetChild(const TString& name) const;
TString GetName() const;
TString GetPath() const;
@@ -607,8 +607,8 @@ protected:
EPathType pathType = EPathType::Unknown);
TSchemaObject DoCreateTable(const TString& name, const TVector<TColumn>& columns,
- const TTablePartitionConfig* partitionConfig);
-
+ const TTablePartitionConfig* partitionConfig);
+
TKikimr& Kikimr;
TString Path;
TString Name;
@@ -788,7 +788,7 @@ protected:
NThreading::TFuture<TResult> ModifySchema(const TModifyScheme& schema);
NThreading::TFuture<TResult> MakeDirectory(const TSchemaObject& object, const TString& name);
NThreading::TFuture<TResult> CreateTable(TSchemaObject& object, const TString& name, const TVector<TColumn>& columns,
- const TTablePartitionConfig* partitionConfig);
+ const TTablePartitionConfig* partitionConfig);
NBus::EMessageStatus ExecuteRequestInternal(NThreading::TPromise<TResult> promise, TAutoPtr<NBus::TBusMessage> request);
NThreading::TFuture<TResult> RegisterNode(const TString& domainPath, const TString& host, ui16 port,
const TString& address, const TString& resolveHost,
@@ -856,18 +856,18 @@ protected:
}
}
- void PrepareRequest(NKikimrClient::TLocalMKQL& request) const {
- if (!SecurityToken.empty()) {
- request.SetSecurityToken(SecurityToken);
- }
- }
-
- void PrepareRequest(NKikimrClient::TLocalSchemeTx& request) const {
- if (!SecurityToken.empty()) {
- request.SetSecurityToken(SecurityToken);
- }
- }
-
+ void PrepareRequest(NKikimrClient::TLocalMKQL& request) const {
+ if (!SecurityToken.empty()) {
+ request.SetSecurityToken(SecurityToken);
+ }
+ }
+
+ void PrepareRequest(NKikimrClient::TLocalSchemeTx& request) const {
+ if (!SecurityToken.empty()) {
+ request.SetSecurityToken(SecurityToken);
+ }
+ }
+
TString SecurityToken;
THolder<TImpl> Impl;
};
diff --git a/ydb/public/lib/deprecated/kicli/kikimr.cpp b/ydb/public/lib/deprecated/kicli/kikimr.cpp
index 6874e4b0759..4156d72d4b3 100644
--- a/ydb/public/lib/deprecated/kicli/kikimr.cpp
+++ b/ydb/public/lib/deprecated/kicli/kikimr.cpp
@@ -277,8 +277,8 @@ public:
return ExecuteGRpcRequest<NMsgBusProxy::TBusChooseProxy>(&NGRpcProxy::TGRpcClient::ChooseProxy, promise, request);
case NMsgBusProxy::MTYPE_CLIENT_SQS_REQUEST:
return ExecuteGRpcRequest<NMsgBusProxy::TBusSqsRequest, NMsgBusProxy::TBusSqsResponse>(&NGRpcProxy::TGRpcClient::SqsRequest, promise, request);
- case NMsgBusProxy::MTYPE_CLIENT_S3_LISTING_REQUEST:
- return ExecuteGRpcRequest<NMsgBusProxy::TBusS3ListingRequest, NMsgBusProxy::TBusS3ListingResponse>(&NGRpcProxy::TGRpcClient::S3Listing, promise, request);
+ case NMsgBusProxy::MTYPE_CLIENT_S3_LISTING_REQUEST:
+ return ExecuteGRpcRequest<NMsgBusProxy::TBusS3ListingRequest, NMsgBusProxy::TBusS3ListingResponse>(&NGRpcProxy::TGRpcClient::S3Listing, promise, request);
case NMsgBusProxy::MTYPE_CLIENT_INTERCONNECT_DEBUG:
return ExecuteGRpcRequest<NMsgBusProxy::TBusInterconnectDebug>(&NGRpcProxy::TGRpcClient::InterconnectDebug, promise, request);
case NMsgBusProxy::MTYPE_CLIENT_CONSOLE_REQUEST:
@@ -524,8 +524,8 @@ NThreading::TFuture<TResult> TKikimr::MakeDirectory(const TSchemaObject& object,
}
NThreading::TFuture<TResult> TKikimr::CreateTable(TSchemaObject& object, const TString& name, const TVector<TColumn>& columns,
- const TTablePartitionConfig* partitionConfig)
-{
+ const TTablePartitionConfig* partitionConfig)
+{
TAutoPtr<NMsgBusProxy::TBusSchemeOperation> request(new NMsgBusProxy::TBusSchemeOperation());
request->Record.MutablePollOptions()->SetTimeout(POLLING_TIMEOUT);
auto* modifyScheme = request->Record.MutableTransaction()->MutableModifyScheme();
@@ -545,9 +545,9 @@ NThreading::TFuture<TResult> TKikimr::CreateTable(TSchemaObject& object, const T
}
}
}
- if (partitionConfig) {
- createTable->MutablePartitionConfig()->CopyFrom(*partitionConfig);
- }
+ if (partitionConfig) {
+ createTable->MutablePartitionConfig()->CopyFrom(*partitionConfig);
+ }
return ExecuteRequest(request.Release());
}
diff --git a/ydb/public/lib/deprecated/kicli/schema.cpp b/ydb/public/lib/deprecated/kicli/schema.cpp
index 1694c6a3de4..d1ee0384f8b 100644
--- a/ydb/public/lib/deprecated/kicli/schema.cpp
+++ b/ydb/public/lib/deprecated/kicli/schema.cpp
@@ -53,8 +53,8 @@ const TType TType::String4k(NScheme::NTypeIds::String4k);
const TType TType::String2m(NScheme::NTypeIds::String2m);
const TType TType::Yson(NScheme::NTypeIds::Yson);
const TType TType::Json(NScheme::NTypeIds::Json);
-const TType TType::JsonDocument(NScheme::NTypeIds::JsonDocument);
-const TType TType::Timestamp(NScheme::NTypeIds::Timestamp);
+const TType TType::JsonDocument(NScheme::NTypeIds::JsonDocument);
+const TType TType::Timestamp(NScheme::NTypeIds::Timestamp);
const TString& TType::GetName() const {
return TypeName;
@@ -107,12 +107,12 @@ void TSchemaObject::Drop() {
case EPathType::SolomonVolume:
drop.SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpDropSolomonVolume);
break;
- case EPathType::OlapStore:
+ case EPathType::OlapStore:
drop.SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpDropColumnStore);
- break;
- case EPathType::OlapTable:
+ break;
+ case EPathType::OlapTable:
drop.SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpDropColumnTable);
- break;
+ break;
case EPathType::Sequence:
drop.SetOperationType(NKikimrSchemeOp::EOperationType::ESchemeOpDropSequence);
break;
@@ -143,19 +143,19 @@ TSchemaObject TSchemaObject::MakeDirectory(const TString& name) {
}
TSchemaObject TSchemaObject::CreateTable(const TString& name, const TVector<TColumn>& columns) {
- return DoCreateTable(name, columns, nullptr);
-}
-
+ return DoCreateTable(name, columns, nullptr);
+}
+
TSchemaObject TSchemaObject::CreateTable(const TString& name, const TVector<TColumn>& columns,
- const TTablePartitionConfig& partitionConfig)
-{
- return DoCreateTable(name, columns, &partitionConfig);
-}
-
+ const TTablePartitionConfig& partitionConfig)
+{
+ return DoCreateTable(name, columns, &partitionConfig);
+}
+
TSchemaObject TSchemaObject::DoCreateTable(const TString& name, const TVector<TColumn>& columns,
- const TTablePartitionConfig* partitionConfig)
-{
- NThreading::TFuture<TResult> future = Kikimr.CreateTable(*this, name, columns, partitionConfig);
+ const TTablePartitionConfig* partitionConfig)
+{
+ NThreading::TFuture<TResult> future = Kikimr.CreateTable(*this, name, columns, partitionConfig);
TResult result = future.GetValue(TDuration::Max());
result.GetError().Throw();
@@ -195,9 +195,9 @@ static TSchemaObject::EPathType GetType(const NKikimrSchemeOp::TDirEntry& entry)
case NKikimrSchemeOp::EPathTypeSolomonVolume:
return TSchemaObject::EPathType::SolomonVolume;
case NKikimrSchemeOp::EPathTypeColumnStore:
- return TSchemaObject::EPathType::OlapStore;
+ return TSchemaObject::EPathType::OlapStore;
case NKikimrSchemeOp::EPathTypeColumnTable:
- return TSchemaObject::EPathType::OlapTable;
+ return TSchemaObject::EPathType::OlapTable;
case NKikimrSchemeOp::EPathTypeSequence:
return TSchemaObject::EPathType::Sequence;
case NKikimrSchemeOp::EPathTypeReplication:
diff --git a/ydb/public/lib/experimental/ya.make b/ydb/public/lib/experimental/ya.make
index 089515ecb8c..397454a978d 100644
--- a/ydb/public/lib/experimental/ya.make
+++ b/ydb/public/lib/experimental/ya.make
@@ -1,19 +1,19 @@
-LIBRARY()
-
-OWNER(g:kikimr)
-
-SRCS(
- ydb_clickhouse_internal.cpp
- ydb_experimental.cpp
- ydb_logstore.cpp
- ydb_s3_internal.cpp
-)
-
-PEERDIR(
+LIBRARY()
+
+OWNER(g:kikimr)
+
+SRCS(
+ ydb_clickhouse_internal.cpp
+ ydb_experimental.cpp
+ ydb_logstore.cpp
+ ydb_s3_internal.cpp
+)
+
+PEERDIR(
ydb/core/scheme
ydb/public/api/grpc/draft
ydb/public/sdk/cpp/client/ydb_proto
ydb/public/sdk/cpp/client/ydb_table
-)
-
-END()
+)
+
+END()
diff --git a/ydb/public/lib/experimental/ydb_clickhouse_internal.cpp b/ydb/public/lib/experimental/ydb_clickhouse_internal.cpp
index c169ded9681..2984f680b7b 100644
--- a/ydb/public/lib/experimental/ydb_clickhouse_internal.cpp
+++ b/ydb/public/lib/experimental/ydb_clickhouse_internal.cpp
@@ -1,5 +1,5 @@
-#include "ydb_clickhouse_internal.h"
-
+#include "ydb_clickhouse_internal.h"
+
#define INCLUDE_YDB_INTERNAL_H
#include <ydb/public/sdk/cpp/client/impl/ydb_internal/make_request/make.h>
#undef INCLUDE_YDB_INTERNAL_H
@@ -7,146 +7,146 @@
#include <ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.grpc.pb.h>
#include <library/cpp/grpc/client/grpc_client_low.h>
#include <ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h>
-
-// TODO: Bad dependency???
+
+// TODO: Bad dependency???
#include <ydb/core/scheme/scheme_tablecell.h>
-
-namespace NYdb {
-namespace NClickhouseInternal {
-
+
+namespace NYdb {
+namespace NClickhouseInternal {
+
using namespace NThreading;
-///////////////////////////////////////////////////////////////////////////
-class TScanResult::TResultImpl {
-public:
- TResultImpl(Ydb::ClickhouseInternal::ScanResult&& result)
- : Result(std::move(result))
- {}
-
- const Ydb::ClickhouseInternal::ScanResult Result;
-};
-
-TScanResult::TScanResult(TResultImpl* impl, TStatus&& status)
- : TStatus(std::move(status))
- , ResultImpl(impl)
-{}
-
-TScanResult::TScanResult(TScanResult&& other)
- : TStatus(std::move(other))
- , ResultImpl(std::move(other.ResultImpl))
-{}
-
-TScanResult& TScanResult::operator = (TScanResult&& other) {
- (TStatus&)*this = std::move(other);
- ResultImpl = std::move(other.ResultImpl);
- return *this;
-}
-
-TScanResult::~TScanResult() {
-}
-
-size_t TScanResult::GetBuffersCount() const {
- return ResultImpl->Result.blocks_size();
-}
-
-TString TScanResult::GetBuffer(size_t idx) const {
- return ResultImpl->Result.blocks(idx);
-}
-
-bool TScanResult::IsEos() const {
- return ResultImpl->Result.eos();
-}
-
-std::pair<TString, bool> TScanResult::GetLastKey() const {
- return {ResultImpl->Result.last_key(), ResultImpl->Result.last_key_inclusive()};
-}
-
-class TScanClient::TImpl : public TClientImplCommon<TScanClient::TImpl> {
-public:
- TImpl(std::shared_ptr<TGRpcConnectionsImpl>&& connections, const TCommonClientSettings& settings)
+///////////////////////////////////////////////////////////////////////////
+class TScanResult::TResultImpl {
+public:
+ TResultImpl(Ydb::ClickhouseInternal::ScanResult&& result)
+ : Result(std::move(result))
+ {}
+
+ const Ydb::ClickhouseInternal::ScanResult Result;
+};
+
+TScanResult::TScanResult(TResultImpl* impl, TStatus&& status)
+ : TStatus(std::move(status))
+ , ResultImpl(impl)
+{}
+
+TScanResult::TScanResult(TScanResult&& other)
+ : TStatus(std::move(other))
+ , ResultImpl(std::move(other.ResultImpl))
+{}
+
+TScanResult& TScanResult::operator = (TScanResult&& other) {
+ (TStatus&)*this = std::move(other);
+ ResultImpl = std::move(other.ResultImpl);
+ return *this;
+}
+
+TScanResult::~TScanResult() {
+}
+
+size_t TScanResult::GetBuffersCount() const {
+ return ResultImpl->Result.blocks_size();
+}
+
+TString TScanResult::GetBuffer(size_t idx) const {
+ return ResultImpl->Result.blocks(idx);
+}
+
+bool TScanResult::IsEos() const {
+ return ResultImpl->Result.eos();
+}
+
+std::pair<TString, bool> TScanResult::GetLastKey() const {
+ return {ResultImpl->Result.last_key(), ResultImpl->Result.last_key_inclusive()};
+}
+
+class TScanClient::TImpl : public TClientImplCommon<TScanClient::TImpl> {
+public:
+ TImpl(std::shared_ptr<TGRpcConnectionsImpl>&& connections, const TCommonClientSettings& settings)
: TClientImplCommon(std::move(connections), settings) {}
-
- TAsyncScanResult Scan(
- const TString& table, const TVector<TString>& columns,
- ui64 maxRows, ui64 maxBytes,
- const TString& fromKey, bool fromKeyInclusive,
- const TScanSettings& settings)
- {
- auto request = MakeOperationRequest<Ydb::ClickhouseInternal::ScanRequest>(settings);
- request.set_table(table);
- for (TString col : columns) {
- request.add_columns(col);
- }
- request.set_from_key(fromKey);
- request.set_from_key_inclusive(fromKeyInclusive);
- request.set_max_rows(maxRows);
- request.set_max_bytes(maxBytes);
+
+ TAsyncScanResult Scan(
+ const TString& table, const TVector<TString>& columns,
+ ui64 maxRows, ui64 maxBytes,
+ const TString& fromKey, bool fromKeyInclusive,
+ const TScanSettings& settings)
+ {
+ auto request = MakeOperationRequest<Ydb::ClickhouseInternal::ScanRequest>(settings);
+ request.set_table(table);
+ for (TString col : columns) {
+ request.add_columns(col);
+ }
+ request.set_from_key(fromKey);
+ request.set_from_key_inclusive(fromKeyInclusive);
+ request.set_max_rows(maxRows);
+ request.set_max_bytes(maxBytes);
if (settings.SnapshotId_) {
request.set_snapshot_id(settings.SnapshotId_);
}
-// Cerr << request << Endl;
-
- auto promise = NThreading::NewPromise<TScanResult>();
-
+// Cerr << request << Endl;
+
+ auto promise = NThreading::NewPromise<TScanResult>();
+
auto extractor = [promise]
(google::protobuf::Any* any, TPlainStatus status) mutable {
- Ydb::ClickhouseInternal::ScanResult result;
- if (any) {
- any->UnpackTo(&result);
- }
-// Cerr << result << Endl;
-
- TScanResult val(new TScanResult::TResultImpl(std::move(result)),
+ Ydb::ClickhouseInternal::ScanResult result;
+ if (any) {
+ any->UnpackTo(&result);
+ }
+// Cerr << result << Endl;
+
+ TScanResult val(new TScanResult::TResultImpl(std::move(result)),
TStatus(std::move(status)));
-
- promise.SetValue(std::move(val));
- };
-
- Connections_->RunDeferred<Ydb::ClickhouseInternal::V1::ClickhouseInternalService, Ydb::ClickhouseInternal::ScanRequest, Ydb::ClickhouseInternal::ScanResponse>(
- std::move(request),
- extractor,
- &Ydb::ClickhouseInternal::V1::ClickhouseInternalService::Stub::AsyncScan,
- DbDriverState_,
+
+ promise.SetValue(std::move(val));
+ };
+
+ Connections_->RunDeferred<Ydb::ClickhouseInternal::V1::ClickhouseInternalService, Ydb::ClickhouseInternal::ScanRequest, Ydb::ClickhouseInternal::ScanResponse>(
+ std::move(request),
+ extractor,
+ &Ydb::ClickhouseInternal::V1::ClickhouseInternalService::Stub::AsyncScan,
+ DbDriverState_,
INITIAL_DEFERRED_CALL_DELAY,
TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_,
- settings.Endpoint_);
-
- return promise.GetFuture();
- }
-};
-
-
-TScanClient::TScanClient(const TDriver& driver, const TCommonClientSettings& settings)
- : Impl_(new TImpl(CreateInternalInterface(driver), settings))
-{}
-
-TAsyncScanResult TScanClient::Scan(const TString& path, const TVector<TString>& columns,
- ui64 maxRows, ui64 maxBytes, const TString& fromKey, bool fromKeyInclusive, const TScanSettings& settings) {
- return Impl_->Scan(path, columns, maxRows, maxBytes, fromKey, fromKeyInclusive, settings);
-}
-
-bool RangeFinished(const TString& lastReadKey, const TString& endKey, const TVector<NKikimr::NScheme::TTypeId>& keyColumnTypes) {
- if (lastReadKey.empty()) // +inf
- return true;
-
- if (endKey.empty())
- return false;
-
- NKikimr::TSerializedCellVec last(lastReadKey);
- Y_VERIFY(last.GetCells().size() <= keyColumnTypes.size());
-
- NKikimr::TSerializedCellVec end(endKey);
- Y_VERIFY(end.GetCells().size() <= keyColumnTypes.size());
-
- int cmp = NKikimr::CompareTypedCellVectors(
- last.GetCells().data(), end.GetCells().data(),
- keyColumnTypes.data(),
- last.GetCells().size(), end.GetCells().size());
-
- return cmp >= 0;
-}
-
+ settings.ClientTimeout_,
+ settings.Endpoint_);
+
+ return promise.GetFuture();
+ }
+};
+
+
+TScanClient::TScanClient(const TDriver& driver, const TCommonClientSettings& settings)
+ : Impl_(new TImpl(CreateInternalInterface(driver), settings))
+{}
+
+TAsyncScanResult TScanClient::Scan(const TString& path, const TVector<TString>& columns,
+ ui64 maxRows, ui64 maxBytes, const TString& fromKey, bool fromKeyInclusive, const TScanSettings& settings) {
+ return Impl_->Scan(path, columns, maxRows, maxBytes, fromKey, fromKeyInclusive, settings);
+}
+
+bool RangeFinished(const TString& lastReadKey, const TString& endKey, const TVector<NKikimr::NScheme::TTypeId>& keyColumnTypes) {
+ if (lastReadKey.empty()) // +inf
+ return true;
+
+ if (endKey.empty())
+ return false;
+
+ NKikimr::TSerializedCellVec last(lastReadKey);
+ Y_VERIFY(last.GetCells().size() <= keyColumnTypes.size());
+
+ NKikimr::TSerializedCellVec end(endKey);
+ Y_VERIFY(end.GetCells().size() <= keyColumnTypes.size());
+
+ int cmp = NKikimr::CompareTypedCellVectors(
+ last.GetCells().data(), end.GetCells().data(),
+ keyColumnTypes.data(),
+ last.GetCells().size(), end.GetCells().size());
+
+ return cmp >= 0;
+}
+
TScanIterator::TScanIterator(const TDriver& driver, const TString &database, const TString &endpoint, const TString &token, bool ssl, const TString& path,
const TVector<TString>& columns,
const TVector<NKikimr::NScheme::TTypeId>& keyColumnTypes,
@@ -169,253 +169,253 @@ TScanIterator::TScanIterator(const TDriver& driver, const TString &database, con
{
MakeRequest();
}
-
-TScanIterator::TScanIterator(const TDriver& driver, const TString &database, const TString &token, const TString& path,
- const TVector<TString>& columns,
- const TVector<NKikimr::NScheme::TTypeId>& keyColumnTypes,
- ui64 maxRowsInRequest, ui64 maxBytesInRequest,
+
+TScanIterator::TScanIterator(const TDriver& driver, const TString &database, const TString &token, const TString& path,
+ const TVector<TString>& columns,
+ const TVector<NKikimr::NScheme::TTypeId>& keyColumnTypes,
+ ui64 maxRowsInRequest, ui64 maxBytesInRequest,
const TString& keyFrom, const TString& keyTo,
const TScanSettings& settings)
- : Path(path)
- , Columns(columns)
- , KeyColumnTypes(keyColumnTypes)
- , MaxRows(maxRowsInRequest)
- , MaxBytes(maxBytesInRequest)
+ : Path(path)
+ , Columns(columns)
+ , KeyColumnTypes(keyColumnTypes)
+ , MaxRows(maxRowsInRequest)
+ , MaxBytes(maxBytesInRequest)
, Settings(settings)
- , Connection(driver, NYdb::TCommonClientSettings().Database(database).AuthToken(token))
+ , Connection(driver, NYdb::TCommonClientSettings().Database(database).AuthToken(token))
, LastReadKey(keyFrom.empty() ? NKikimr::TSerializedCellVec::Serialize(TVector<NKikimr::TCell>(KeyColumnTypes.size())) : keyFrom)
- , LastReadKeyInclusive(false)
- , EndKey(keyTo)
+ , LastReadKeyInclusive(false)
+ , EndKey(keyTo)
, RequestsDone(!EndKey.empty() && RangeFinished(LastReadKey, EndKey, KeyColumnTypes))
- , MaxRetries(20)
- , Retried(0)
-{
- MakeRequest();
-}
-
-TString TScanIterator::GetBlocks() {
- while (Blocks.empty()) {
- if (RequestsDone)
- return TString();
-
- WaitResult();
- MakeRequest();
- }
-
- TString block = Blocks.front();
- Blocks.pop_front();
- return block;
-}
-
-void TScanIterator::MakeRequest() {
- if (RequestsDone)
- return;
-
- // TODO: check that previous request is finished
-
+ , MaxRetries(20)
+ , Retried(0)
+{
+ MakeRequest();
+}
+
+TString TScanIterator::GetBlocks() {
+ while (Blocks.empty()) {
+ if (RequestsDone)
+ return TString();
+
+ WaitResult();
+ MakeRequest();
+ }
+
+ TString block = Blocks.front();
+ Blocks.pop_front();
+ return block;
+}
+
+void TScanIterator::MakeRequest() {
+ if (RequestsDone)
+ return;
+
+ // TODO: check that previous request is finished
+
NextResult = Connection.Scan(Path, Columns, MaxRows, MaxBytes, LastReadKey, !LastReadKeyInclusive, Settings);
-}
-
-bool IsRetriable(EStatus status) {
- switch (status) {
- case EStatus::BAD_REQUEST:
- case EStatus::SCHEME_ERROR:
- case EStatus::UNAUTHORIZED:
- case EStatus::NOT_FOUND:
- return false;
- default:
- return true;
- }
-}
-
-void TScanIterator::WaitResult() {
- while (true) {
- TScanResult res = NextResult.ExtractValueSync();
- if (res.GetStatus() == EStatus::SUCCESS) {
- size_t bc = res.GetBuffersCount();
-
- for (size_t i = 0; i < bc; ++i) {
- TString block = res.GetBuffer(i);
- if (block.size() > 0)
- Blocks.push_back(block);
- }
-
- RequestsDone = res.IsEos();
- std::tie(LastReadKey, LastReadKeyInclusive) = res.GetLastKey();
-
- if (!EndKey.empty()) {
- RequestsDone = RangeFinished(LastReadKey, EndKey, KeyColumnTypes);
- }
-
- // Reset backoffs after a successful attempt
- Retried = 0;
-
- return;
- }
-
- if (!IsRetriable(res.GetStatus()) || Retried > MaxRetries) {
- ythrow yexception() << res.GetStatus() << ": " << res.GetIssues().ToString();
- }
-
- TDuration delay = TDuration::MilliSeconds(50 * (1 << Retried));
- Sleep(Min(TDuration::Seconds(3), delay));
- ++Retried;
-
- MakeRequest();
- }
-}
-
-
-///////////////////////////////////////////////////////////////////////////
-class TGetShardLocationsResult::TResultImpl {
-public:
- TResultImpl(Ydb::ClickhouseInternal::GetShardLocationsResult&& result) {
- for (const auto& ti : result.tablets()) {
- ShardLocations[ti.tablet_id()] = { ti.host(), ti.port() };
- }
- }
-
- THashMap<ui64, std::pair<TString, ui16>> ShardLocations;
-};
-
-TGetShardLocationsResult::TGetShardLocationsResult(TResultImpl* impl, TStatus&& status)
- : TStatus(std::move(status))
- , ResultImpl(impl)
-{}
-
-TGetShardLocationsResult::TGetShardLocationsResult(TGetShardLocationsResult&& other)
- : TStatus(std::move(other))
- , ResultImpl(std::move(other.ResultImpl))
-{}
-
-TGetShardLocationsResult& TGetShardLocationsResult::operator = (TGetShardLocationsResult&& other) {
- (TStatus&)*this = std::move(other);
- ResultImpl = std::move(other.ResultImpl);
- return *this;
-}
-
-TGetShardLocationsResult::~TGetShardLocationsResult() {
-}
-
-
-std::pair<TString, ui16> TGetShardLocationsResult::GetLocation(ui64 tabletId) const {
- const auto* info = ResultImpl->ShardLocations.FindPtr(tabletId);
- return info ? *info : std::pair<TString, ui16>();
-}
-
-class TDescribeTableResult::TResultImpl {
-public:
- TResultImpl(Ydb::ClickhouseInternal::DescribeTableResult&& result)
- : Result(std::move(result))
- {}
-
- Ydb::ClickhouseInternal::DescribeTableResult Result;
-};
-
-
-TDescribeTableResult::TDescribeTableResult(TResultImpl* impl, TStatus&& status)
- : TStatus(std::move(status))
- , ResultImpl(impl)
-{}
-
-TDescribeTableResult::TDescribeTableResult(TDescribeTableResult&& other)
- : TStatus(std::move(other))
- , ResultImpl(std::move(other.ResultImpl))
-{}
-
-TDescribeTableResult& TDescribeTableResult::operator = (TDescribeTableResult&& other) {
- (TStatus&)*this = std::move(other);
- ResultImpl = std::move(other.ResultImpl);
- return *this;
-}
-
-TDescribeTableResult::~TDescribeTableResult() {
-}
-
-const Ydb::ClickhouseInternal::DescribeTableResult& TDescribeTableResult::GetDescription() const {
- return ResultImpl->Result;
-}
-
+}
+
+bool IsRetriable(EStatus status) {
+ switch (status) {
+ case EStatus::BAD_REQUEST:
+ case EStatus::SCHEME_ERROR:
+ case EStatus::UNAUTHORIZED:
+ case EStatus::NOT_FOUND:
+ return false;
+ default:
+ return true;
+ }
+}
+
+void TScanIterator::WaitResult() {
+ while (true) {
+ TScanResult res = NextResult.ExtractValueSync();
+ if (res.GetStatus() == EStatus::SUCCESS) {
+ size_t bc = res.GetBuffersCount();
+
+ for (size_t i = 0; i < bc; ++i) {
+ TString block = res.GetBuffer(i);
+ if (block.size() > 0)
+ Blocks.push_back(block);
+ }
+
+ RequestsDone = res.IsEos();
+ std::tie(LastReadKey, LastReadKeyInclusive) = res.GetLastKey();
+
+ if (!EndKey.empty()) {
+ RequestsDone = RangeFinished(LastReadKey, EndKey, KeyColumnTypes);
+ }
+
+ // Reset backoffs after a successful attempt
+ Retried = 0;
+
+ return;
+ }
+
+ if (!IsRetriable(res.GetStatus()) || Retried > MaxRetries) {
+ ythrow yexception() << res.GetStatus() << ": " << res.GetIssues().ToString();
+ }
+
+ TDuration delay = TDuration::MilliSeconds(50 * (1 << Retried));
+ Sleep(Min(TDuration::Seconds(3), delay));
+ ++Retried;
+
+ MakeRequest();
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////
+class TGetShardLocationsResult::TResultImpl {
+public:
+ TResultImpl(Ydb::ClickhouseInternal::GetShardLocationsResult&& result) {
+ for (const auto& ti : result.tablets()) {
+ ShardLocations[ti.tablet_id()] = { ti.host(), ti.port() };
+ }
+ }
+
+ THashMap<ui64, std::pair<TString, ui16>> ShardLocations;
+};
+
+TGetShardLocationsResult::TGetShardLocationsResult(TResultImpl* impl, TStatus&& status)
+ : TStatus(std::move(status))
+ , ResultImpl(impl)
+{}
+
+TGetShardLocationsResult::TGetShardLocationsResult(TGetShardLocationsResult&& other)
+ : TStatus(std::move(other))
+ , ResultImpl(std::move(other.ResultImpl))
+{}
+
+TGetShardLocationsResult& TGetShardLocationsResult::operator = (TGetShardLocationsResult&& other) {
+ (TStatus&)*this = std::move(other);
+ ResultImpl = std::move(other.ResultImpl);
+ return *this;
+}
+
+TGetShardLocationsResult::~TGetShardLocationsResult() {
+}
+
+
+std::pair<TString, ui16> TGetShardLocationsResult::GetLocation(ui64 tabletId) const {
+ const auto* info = ResultImpl->ShardLocations.FindPtr(tabletId);
+ return info ? *info : std::pair<TString, ui16>();
+}
+
+class TDescribeTableResult::TResultImpl {
+public:
+ TResultImpl(Ydb::ClickhouseInternal::DescribeTableResult&& result)
+ : Result(std::move(result))
+ {}
+
+ Ydb::ClickhouseInternal::DescribeTableResult Result;
+};
+
+
+TDescribeTableResult::TDescribeTableResult(TResultImpl* impl, TStatus&& status)
+ : TStatus(std::move(status))
+ , ResultImpl(impl)
+{}
+
+TDescribeTableResult::TDescribeTableResult(TDescribeTableResult&& other)
+ : TStatus(std::move(other))
+ , ResultImpl(std::move(other.ResultImpl))
+{}
+
+TDescribeTableResult& TDescribeTableResult::operator = (TDescribeTableResult&& other) {
+ (TStatus&)*this = std::move(other);
+ ResultImpl = std::move(other.ResultImpl);
+ return *this;
+}
+
+TDescribeTableResult::~TDescribeTableResult() {
+}
+
+const Ydb::ClickhouseInternal::DescribeTableResult& TDescribeTableResult::GetDescription() const {
+ return ResultImpl->Result;
+}
+
////////////////////////////////////////////////////////////////////////////////
-class TMetaClient::TImpl : public TClientImplCommon<TMetaClient::TImpl> {
+class TMetaClient::TImpl : public TClientImplCommon<TMetaClient::TImpl> {
friend class TSnapshotHandleLifecycle;
-public:
- TImpl(std::shared_ptr<TGRpcConnectionsImpl>&& connections, const TCommonClientSettings& settings)
+public:
+ TImpl(std::shared_ptr<TGRpcConnectionsImpl>&& connections, const TCommonClientSettings& settings)
: TClientImplCommon(std::move(connections), settings) {}
-
- TAsyncGetShardLocationsResult GetShardLocations(
- const TVector<ui64>& tabletIds,
- const TGetShardLocationsSettings& settings)
- {
- auto request = MakeOperationRequest<Ydb::ClickhouseInternal::GetShardLocationsRequest>(settings);
- for (ui64 id : tabletIds) {
- request.add_tablet_ids(id);
- }
-
- auto promise = NThreading::NewPromise<TGetShardLocationsResult>();
-
+
+ TAsyncGetShardLocationsResult GetShardLocations(
+ const TVector<ui64>& tabletIds,
+ const TGetShardLocationsSettings& settings)
+ {
+ auto request = MakeOperationRequest<Ydb::ClickhouseInternal::GetShardLocationsRequest>(settings);
+ for (ui64 id : tabletIds) {
+ request.add_tablet_ids(id);
+ }
+
+ auto promise = NThreading::NewPromise<TGetShardLocationsResult>();
+
auto extractor = [promise]
(google::protobuf::Any* any, TPlainStatus status) mutable {
- Ydb::ClickhouseInternal::GetShardLocationsResult result;
- if (any) {
- any->UnpackTo(&result);
- }
-
- TGetShardLocationsResult val(new TGetShardLocationsResult::TResultImpl(std::move(result)),
+ Ydb::ClickhouseInternal::GetShardLocationsResult result;
+ if (any) {
+ any->UnpackTo(&result);
+ }
+
+ TGetShardLocationsResult val(new TGetShardLocationsResult::TResultImpl(std::move(result)),
TStatus(std::move(status)));
-
- promise.SetValue(std::move(val));
- };
-
- Connections_->RunDeferred<Ydb::ClickhouseInternal::V1::ClickhouseInternalService, Ydb::ClickhouseInternal::GetShardLocationsRequest, Ydb::ClickhouseInternal::GetShardLocationsResponse>(
- std::move(request),
- extractor,
- &Ydb::ClickhouseInternal::V1::ClickhouseInternalService::Stub::AsyncGetShardLocations,
- DbDriverState_,
+
+ promise.SetValue(std::move(val));
+ };
+
+ Connections_->RunDeferred<Ydb::ClickhouseInternal::V1::ClickhouseInternalService, Ydb::ClickhouseInternal::GetShardLocationsRequest, Ydb::ClickhouseInternal::GetShardLocationsResponse>(
+ std::move(request),
+ extractor,
+ &Ydb::ClickhouseInternal::V1::ClickhouseInternalService::Stub::AsyncGetShardLocations,
+ DbDriverState_,
INITIAL_DEFERRED_CALL_DELAY,
TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
-
- return promise.GetFuture();
- }
-
- TAsyncDescribeTableResult GetTableDescription(
- const TString& path,
- bool includePartitionsInfo,
- const TGetShardLocationsSettings& settings)
- {
- auto request = MakeOperationRequest<Ydb::ClickhouseInternal::DescribeTableRequest>(settings);
- request.set_path(path);
- request.set_include_partitions_info(includePartitionsInfo);
-
- auto promise = NThreading::NewPromise<TDescribeTableResult>();
-
+ settings.ClientTimeout_);
+
+ return promise.GetFuture();
+ }
+
+ TAsyncDescribeTableResult GetTableDescription(
+ const TString& path,
+ bool includePartitionsInfo,
+ const TGetShardLocationsSettings& settings)
+ {
+ auto request = MakeOperationRequest<Ydb::ClickhouseInternal::DescribeTableRequest>(settings);
+ request.set_path(path);
+ request.set_include_partitions_info(includePartitionsInfo);
+
+ auto promise = NThreading::NewPromise<TDescribeTableResult>();
+
auto extractor = [promise]
- (google::protobuf::Any* any, TPlainStatus status) mutable {
- Ydb::ClickhouseInternal::DescribeTableResult result;
- if (any) {
- any->UnpackTo(&result);
- }
-
- TDescribeTableResult val(new TDescribeTableResult::TResultImpl(std::move(result)),
+ (google::protobuf::Any* any, TPlainStatus status) mutable {
+ Ydb::ClickhouseInternal::DescribeTableResult result;
+ if (any) {
+ any->UnpackTo(&result);
+ }
+
+ TDescribeTableResult val(new TDescribeTableResult::TResultImpl(std::move(result)),
TStatus(std::move(status)));
-
- promise.SetValue(std::move(val));
- };
-
- Connections_->RunDeferred<Ydb::ClickhouseInternal::V1::ClickhouseInternalService, Ydb::ClickhouseInternal::DescribeTableRequest, Ydb::ClickhouseInternal::DescribeTableResponse>(
- std::move(request),
- extractor,
- &Ydb::ClickhouseInternal::V1::ClickhouseInternalService::Stub::AsyncDescribeTable,
- DbDriverState_,
- INITIAL_DEFERRED_CALL_DELAY,
+
+ promise.SetValue(std::move(val));
+ };
+
+ Connections_->RunDeferred<Ydb::ClickhouseInternal::V1::ClickhouseInternalService, Ydb::ClickhouseInternal::DescribeTableRequest, Ydb::ClickhouseInternal::DescribeTableResponse>(
+ std::move(request),
+ extractor,
+ &Ydb::ClickhouseInternal::V1::ClickhouseInternalService::Stub::AsyncDescribeTable,
+ DbDriverState_,
+ INITIAL_DEFERRED_CALL_DELAY,
TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
-
- return promise.GetFuture();
- }
+ settings.ClientTimeout_);
+
+ return promise.GetFuture();
+ }
template<class TProtoResult, class TResultWrapper>
auto MakeResultExtractor(NThreading::TPromise<TResultWrapper> promise) {
@@ -544,10 +544,10 @@ private:
TGRpcConnectionsImpl* Connections() const {
return Connections_.get();
}
-};
-
+};
+
////////////////////////////////////////////////////////////////////////////////
-
+
class TSnapshotHandleLifecycle : public TThrRefBase {
friend class TSnapshotHandle::TImpl;
@@ -860,25 +860,25 @@ bool TSnapshotHandle::IsAlive() const {
////////////////////////////////////////////////////////////////////////////////
-TMetaClient::TMetaClient(const TDriver& driver, const TCommonClientSettings& settings)
- : Impl_(new TImpl(CreateInternalInterface(driver), settings))
-{}
-
-TAsyncGetShardLocationsResult TMetaClient::GetShardLocations(
- const TVector<ui64> tabletIds,
- const TGetShardLocationsSettings& settings)
-{
- return Impl_->GetShardLocations(tabletIds, settings);
-}
-
-TAsyncDescribeTableResult TMetaClient::GetTableDescription(
- const TString& path,
- bool includePartitionsInfo,
- const TGetShardLocationsSettings& settings)
-{
- return Impl_->GetTableDescription(path, includePartitionsInfo, settings);
-}
-
+TMetaClient::TMetaClient(const TDriver& driver, const TCommonClientSettings& settings)
+ : Impl_(new TImpl(CreateInternalInterface(driver), settings))
+{}
+
+TAsyncGetShardLocationsResult TMetaClient::GetShardLocations(
+ const TVector<ui64> tabletIds,
+ const TGetShardLocationsSettings& settings)
+{
+ return Impl_->GetShardLocations(tabletIds, settings);
+}
+
+TAsyncDescribeTableResult TMetaClient::GetTableDescription(
+ const TString& path,
+ bool includePartitionsInfo,
+ const TGetShardLocationsSettings& settings)
+{
+ return Impl_->GetTableDescription(path, includePartitionsInfo, settings);
+}
+
TAsyncCreateSnapshotResult TMetaClient::CreateSnapshot(
const TVector<TString>& tables,
const TSnapshotSettings& settings)
@@ -909,5 +909,5 @@ TAsyncCreateSnapshotHandleResult TMetaClient::CreateSnapshotHandle(
return MakeIntrusive<TSnapshotHandleLifecycle>(Impl_, tables, settings)->Start();
}
-} // namespace NClickhouseInternal
+} // namespace NClickhouseInternal
} // namespace NYdb
diff --git a/ydb/public/lib/experimental/ydb_clickhouse_internal.h b/ydb/public/lib/experimental/ydb_clickhouse_internal.h
index 35bee941f2d..d2a846ca8f8 100644
--- a/ydb/public/lib/experimental/ydb_clickhouse_internal.h
+++ b/ydb/public/lib/experimental/ydb_clickhouse_internal.h
@@ -1,143 +1,143 @@
-#pragma once
-
+#pragma once
+
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
#include <ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.pb.h>
-
-// TODO: Bad dependency???
+
+// TODO: Bad dependency???
#include <ydb/core/scheme/scheme_type_id.h>
-
-#include <util/generic/deque.h>
-
-namespace NYdb {
-namespace NClickhouseInternal {
-
-class TScanResult : public TStatus {
- friend class TScanClient;
- class TResultImpl;
-
-private:
- TScanResult(TResultImpl* impl, TStatus&& status);
-
-public:
- ~TScanResult();
- TScanResult(TScanResult&& other);
- TScanResult& operator = (TScanResult&& other);
-
- size_t GetBuffersCount() const;
- TString GetBuffer(size_t idx) const;
- bool IsEos() const;
- std::pair<TString, bool>GetLastKey() const;
-
-private:
- std::unique_ptr<TResultImpl> ResultImpl;
-};
-
-using TAsyncScanResult = NThreading::TFuture<TScanResult>;
-
-
+
+#include <util/generic/deque.h>
+
+namespace NYdb {
+namespace NClickhouseInternal {
+
+class TScanResult : public TStatus {
+ friend class TScanClient;
+ class TResultImpl;
+
+private:
+ TScanResult(TResultImpl* impl, TStatus&& status);
+
+public:
+ ~TScanResult();
+ TScanResult(TScanResult&& other);
+ TScanResult& operator = (TScanResult&& other);
+
+ size_t GetBuffersCount() const;
+ TString GetBuffer(size_t idx) const;
+ bool IsEos() const;
+ std::pair<TString, bool>GetLastKey() const;
+
+private:
+ std::unique_ptr<TResultImpl> ResultImpl;
+};
+
+using TAsyncScanResult = NThreading::TFuture<TScanResult>;
+
+
struct TScanSettings : public TOperationRequestSettings<TScanSettings> {
FLUENT_SETTING(TString, SnapshotId);
- FLUENT_SETTING(TString, Endpoint);
+ FLUENT_SETTING(TString, Endpoint);
+};
+
+class TScanClient {
+ class TImpl;
+
+public:
+ TScanClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings());
+
+ TAsyncScanResult Scan(
+ const TString& path, const TVector<TString>& columns,
+ ui64 maxRows, ui64 maxBytes,
+ const TString& fromKey, bool fromKeyInclusive,
+ const TScanSettings& settings = TScanSettings());
+
+private:
+ std::shared_ptr<TImpl> Impl_;
};
-
-class TScanClient {
- class TImpl;
-
-public:
- TScanClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings());
-
- TAsyncScanResult Scan(
- const TString& path, const TVector<TString>& columns,
- ui64 maxRows, ui64 maxBytes,
- const TString& fromKey, bool fromKeyInclusive,
- const TScanSettings& settings = TScanSettings());
-
-private:
- std::shared_ptr<TImpl> Impl_;
-};
-
-// Makes table range scan by doing Scan requests one by one and keeping track of
-// the last returned key
-class TScanIterator {
-public:
+
+// Makes table range scan by doing Scan requests one by one and keeping track of
+// the last returned key
+class TScanIterator {
+public:
TScanIterator(const TDriver& driver, const TString &database, const TString &endpoint, const TString& token, bool ssl, const TString& path, const TVector<TString>& columns,
const TVector<NKikimr::NScheme::TTypeId>& keyColumnTypes,
ui64 maxRowsInRequest, ui64 maxBytesInRequest,
const TString& keyFrom = TString(), const TString& keyTo = TString(),
const TScanSettings& settings = TScanSettings());
- TScanIterator(const TDriver& driver, const TString &database, const TString& token, const TString& path, const TVector<TString>& columns,
- const TVector<NKikimr::NScheme::TTypeId>& keyColumnTypes,
- ui64 maxRowsInRequest, ui64 maxBytesInRequest,
+ TScanIterator(const TDriver& driver, const TString &database, const TString& token, const TString& path, const TVector<TString>& columns,
+ const TVector<NKikimr::NScheme::TTypeId>& keyColumnTypes,
+ ui64 maxRowsInRequest, ui64 maxBytesInRequest,
const TString& keyFrom = TString(), const TString& keyTo = TString(),
const TScanSettings& settings = TScanSettings());
- TString GetBlocks();
-
-private:
- void MakeRequest();
- void WaitResult();
-
-private:
- const TString Path;
- const TVector<TString> Columns;
- const TVector<NKikimr::NScheme::TTypeId> KeyColumnTypes;
- const ui64 MaxRows;
- const ui64 MaxBytes;
+ TString GetBlocks();
+
+private:
+ void MakeRequest();
+ void WaitResult();
+
+private:
+ const TString Path;
+ const TVector<TString> Columns;
+ const TVector<NKikimr::NScheme::TTypeId> KeyColumnTypes;
+ const ui64 MaxRows;
+ const ui64 MaxBytes;
const TScanSettings Settings;
- TScanClient Connection;
-
- TDeque<TString> Blocks;
- TAsyncScanResult NextResult;
- TString LastReadKey;
- bool LastReadKeyInclusive;
- TString EndKey;
- bool RequestsDone;
- int MaxRetries;
- int Retried;
-};
-
-
-////////////////////////////////////////////////////////////////////////////////
-
-class TGetShardLocationsResult : public TStatus {
- friend class TMetaClient;
- class TResultImpl;
-
-private:
- TGetShardLocationsResult(TResultImpl* impl, TStatus&& status);
-
-public:
- ~TGetShardLocationsResult();
- TGetShardLocationsResult(TGetShardLocationsResult&& other);
- TGetShardLocationsResult& operator = (TGetShardLocationsResult&& other);
-
- std::pair<TString, ui16> GetLocation(ui64 tabletId) const;
-
-private:
- std::unique_ptr<TResultImpl> ResultImpl;
-};
-
-class TDescribeTableResult : public TStatus {
- friend class TMetaClient;
- class TResultImpl;
-
-private:
- TDescribeTableResult(TResultImpl* impl, TStatus&& status);
-
-public:
- ~TDescribeTableResult();
- TDescribeTableResult(TDescribeTableResult&& other);
- TDescribeTableResult& operator = (TDescribeTableResult&& other);
-
- const Ydb::ClickhouseInternal::DescribeTableResult& GetDescription() const;
-
-private:
- std::unique_ptr<TResultImpl> ResultImpl;
-};
-
+ TScanClient Connection;
+
+ TDeque<TString> Blocks;
+ TAsyncScanResult NextResult;
+ TString LastReadKey;
+ bool LastReadKeyInclusive;
+ TString EndKey;
+ bool RequestsDone;
+ int MaxRetries;
+ int Retried;
+};
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+class TGetShardLocationsResult : public TStatus {
+ friend class TMetaClient;
+ class TResultImpl;
+
+private:
+ TGetShardLocationsResult(TResultImpl* impl, TStatus&& status);
+
+public:
+ ~TGetShardLocationsResult();
+ TGetShardLocationsResult(TGetShardLocationsResult&& other);
+ TGetShardLocationsResult& operator = (TGetShardLocationsResult&& other);
+
+ std::pair<TString, ui16> GetLocation(ui64 tabletId) const;
+
+private:
+ std::unique_ptr<TResultImpl> ResultImpl;
+};
+
+class TDescribeTableResult : public TStatus {
+ friend class TMetaClient;
+ class TResultImpl;
+
+private:
+ TDescribeTableResult(TResultImpl* impl, TStatus&& status);
+
+public:
+ ~TDescribeTableResult();
+ TDescribeTableResult(TDescribeTableResult&& other);
+ TDescribeTableResult& operator = (TDescribeTableResult&& other);
+
+ const Ydb::ClickhouseInternal::DescribeTableResult& GetDescription() const;
+
+private:
+ std::unique_ptr<TResultImpl> ResultImpl;
+};
+
template<class TProtoResult>
class TProtoResultWrapper : public TStatus {
friend class TMetaClient;
-
+
private:
TProtoResultWrapper(
TStatus&& status,
@@ -218,37 +218,37 @@ private:
TSnapshotHandle Handle;
};
-using TAsyncGetShardLocationsResult = NThreading::TFuture<TGetShardLocationsResult>;
-using TAsyncDescribeTableResult = NThreading::TFuture<TDescribeTableResult>;
+using TAsyncGetShardLocationsResult = NThreading::TFuture<TGetShardLocationsResult>;
+using TAsyncDescribeTableResult = NThreading::TFuture<TDescribeTableResult>;
using TAsyncCreateSnapshotResult = NThreading::TFuture<TCreateSnapshotResult>;
using TAsyncRefreshSnapshotResult = NThreading::TFuture<TRefreshSnapshotResult>;
using TAsyncDiscardSnapshotResult = NThreading::TFuture<TDiscardSnapshotResult>;
using TAsyncCreateSnapshotHandleResult = NThreading::TFuture<TCreateSnapshotHandleResult>;
-
-
-struct TGetShardLocationsSettings : public TOperationRequestSettings<TGetShardLocationsSettings> {};
-
+
+
+struct TGetShardLocationsSettings : public TOperationRequestSettings<TGetShardLocationsSettings> {};
+
struct TSnapshotSettings : public TOperationRequestSettings<TSnapshotSettings> {
FLUENT_SETTING_FLAG(IgnoreSystemViews);
};
-class TMetaClient {
+class TMetaClient {
friend class TSnapshotHandleLifecycle;
- class TImpl;
-
-public:
- TMetaClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings());
-
- TAsyncGetShardLocationsResult GetShardLocations(
- const TVector<ui64> tabletIds,
- const TGetShardLocationsSettings& settings = TGetShardLocationsSettings());
-
- TAsyncDescribeTableResult GetTableDescription(
- const TString& path,
- bool includePartitionsInfo,
- const TGetShardLocationsSettings& settings = TGetShardLocationsSettings());
-
+ class TImpl;
+
+public:
+ TMetaClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings());
+
+ TAsyncGetShardLocationsResult GetShardLocations(
+ const TVector<ui64> tabletIds,
+ const TGetShardLocationsSettings& settings = TGetShardLocationsSettings());
+
+ TAsyncDescribeTableResult GetTableDescription(
+ const TString& path,
+ bool includePartitionsInfo,
+ const TGetShardLocationsSettings& settings = TGetShardLocationsSettings());
+
TAsyncCreateSnapshotResult CreateSnapshot(
const TVector<TString>& tables,
const TSnapshotSettings& settings = TSnapshotSettings());
@@ -267,9 +267,9 @@ public:
const TVector<TString>& tables,
const TSnapshotSettings& settings = TSnapshotSettings());
-private:
- std::shared_ptr<TImpl> Impl_;
-};
-
-} // namespace NClickhouseInternal
+private:
+ std::shared_ptr<TImpl> Impl_;
+};
+
+} // namespace NClickhouseInternal
} // namespace NYdb
diff --git a/ydb/public/lib/experimental/ydb_experimental.cpp b/ydb/public/lib/experimental/ydb_experimental.cpp
index bf48f8657fd..8765c0c750a 100644
--- a/ydb/public/lib/experimental/ydb_experimental.cpp
+++ b/ydb/public/lib/experimental/ydb_experimental.cpp
@@ -1,5 +1,5 @@
-#include "ydb_experimental.h"
-
+#include "ydb_experimental.h"
+
#define INCLUDE_YDB_INTERNAL_H
#include <ydb/public/sdk/cpp/client/impl/ydb_internal/make_request/make.h>
#undef INCLUDE_YDB_INTERNAL_H
@@ -8,13 +8,13 @@
#include <library/cpp/grpc/client/grpc_client_low.h>
#include <ydb/public/sdk/cpp/client/ydb_proto/accessor.h>
#include <ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h>
-
-namespace NYdb {
-namespace NExperimental {
-
+
+namespace NYdb {
+namespace NExperimental {
+
using namespace NThreading;
-
+
TStreamPartStatus::TStreamPartStatus(TStatus&& status)
: TStatus(std::move(status))
{}
@@ -42,7 +42,7 @@ public:
StreamProcessor_->Cancel();
}
- bool IsFinished() const {
+ bool IsFinished() const {
return Finished_;
}
diff --git a/ydb/public/lib/experimental/ydb_experimental.h b/ydb/public/lib/experimental/ydb_experimental.h
index 0b5d22c2d05..3cdd8f6dc8f 100644
--- a/ydb/public/lib/experimental/ydb_experimental.h
+++ b/ydb/public/lib/experimental/ydb_experimental.h
@@ -1,12 +1,12 @@
-#pragma once
-
+#pragma once
+
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
-
-namespace NYdb {
-namespace NExperimental {
-
-////////////////////////////////////////////////////////////////////////////////
-
+
+namespace NYdb {
+namespace NExperimental {
+
+////////////////////////////////////////////////////////////////////////////////
+
class TStreamPartIterator;
using TAsyncStreamPartIterator = NThreading::TFuture<TStreamPartIterator>;
diff --git a/ydb/public/lib/experimental/ydb_logstore.cpp b/ydb/public/lib/experimental/ydb_logstore.cpp
index 60509b50aed..9bd2da5107a 100644
--- a/ydb/public/lib/experimental/ydb_logstore.cpp
+++ b/ydb/public/lib/experimental/ydb_logstore.cpp
@@ -1,348 +1,348 @@
-#include "ydb_logstore.h"
-
-#define INCLUDE_YDB_INTERNAL_H
+#include "ydb_logstore.h"
+
+#define INCLUDE_YDB_INTERNAL_H
#include <ydb/public/sdk/cpp/client/impl/ydb_internal/make_request/make.h>
#include <ydb/public/sdk/cpp/client/impl/ydb_internal/table_helpers/helpers.h>
-#undef INCLUDE_YDB_INTERNAL_H
-
+#undef INCLUDE_YDB_INTERNAL_H
+
#include <ydb/public/api/grpc/draft/ydb_logstore_v1.grpc.pb.h>
#include <ydb/public/sdk/cpp/client/ydb_proto/accessor.h>
#include <ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h>
-
+
#include <ydb/library/yql/public/issue/yql_issue.h>
#include <ydb/library/yql/public/issue/yql_issue_message.h>
-
-namespace NYdb {
-namespace NLogStore {
-
-TMaybe<TTtlSettings> TtlSettingsFromProto(const Ydb::Table::TtlSettings& proto) {
- switch (proto.mode_case()) {
+
+namespace NYdb {
+namespace NLogStore {
+
+TMaybe<TTtlSettings> TtlSettingsFromProto(const Ydb::Table::TtlSettings& proto) {
+ switch (proto.mode_case()) {
case Ydb::Table::TtlSettings::kDateTypeColumn:
return TTtlSettings(
proto.date_type_column(),
proto.run_interval_seconds()
);
-
+
case Ydb::Table::TtlSettings::kValueSinceUnixEpoch:
return TTtlSettings(
proto.value_since_unix_epoch(),
proto.run_interval_seconds()
);
-
- default:
- break;
- }
- return {};
-}
-
-TType MakeColumnType(EPrimitiveType primitiveType) {
- return TTypeBuilder().BeginOptional().Primitive(primitiveType).EndOptional().Build();
-}
-
-TSchema::TSchema(const Ydb::LogStore::Schema& schema)
- : Columns()
- , PrimaryKeyColumns(schema.primary_key().begin(), schema.primary_key().end())
-{
- Columns.reserve(schema.columns().size());
- for (const auto& col : schema.columns()) {
- TColumn c(col.name(), TType(col.type()));
- Columns.emplace_back(std::move(c));
- }
-}
-
-void TSchema::SerializeTo(Ydb::LogStore::Schema& schema) const {
- for (const auto& c : Columns) {
- auto& col = *schema.add_columns();
- col.set_name(c.Name);
- col.mutable_type()->CopyFrom(TProtoAccessor::GetProto(c.Type));
- }
- for (const auto& pkc : PrimaryKeyColumns) {
- schema.add_primary_key(pkc);
- }
-}
-
-TLogStoreDescription::TLogStoreDescription(ui32 columnShardCount, const THashMap<TString, TSchema>& schemaPresets)
- : ColumnShardCount(columnShardCount)
- , SchemaPresets(schemaPresets)
-{}
-
-TLogStoreDescription::TLogStoreDescription(Ydb::LogStore::DescribeLogStoreResult&& desc, const TDescribeLogStoreSettings& describeSettings)
- : ColumnShardCount(desc.column_shard_count())
- , SchemaPresets()
- , Owner(desc.self().owner())
-{
- Y_UNUSED(describeSettings);
- for (const auto& sp : desc.schema_presets()) {
- SchemaPresets[sp.name()] = TSchema(sp.schema());
- }
- PermissionToSchemeEntry(desc.self().permissions(), &Permissions);
- PermissionToSchemeEntry(desc.self().effective_permissions(), &EffectivePermissions);
-}
-
-void TLogStoreDescription::SerializeTo(Ydb::LogStore::CreateLogStoreRequest& request) const {
- for (const auto& sp : SchemaPresets) {
- auto& pb = *request.add_schema_presets();
- pb.set_name(sp.first);
- sp.second.SerializeTo(*pb.mutable_schema());
- }
- request.set_column_shard_count(ColumnShardCount);
-}
-
-TDescribeLogStoreResult::TDescribeLogStoreResult(TStatus&& status, Ydb::LogStore::DescribeLogStoreResult&& desc,
- const TDescribeLogStoreSettings& describeSettings)
- : TStatus(std::move(status))
- , LogStoreDescription_(std::move(desc), describeSettings)
-{}
-
-
-TLogTableDescription::TLogTableDescription(const TString& schemaPresetName, const TVector<TString>& shardingColumns,
- ui32 columnShardCount, const TMaybe<TTtlSettings>& ttlSettings)
- : SchemaPresetName(schemaPresetName)
- , ShardingColumns(shardingColumns)
- , ColumnShardCount(columnShardCount)
- , TtlSettings(ttlSettings)
-{}
-
-TLogTableDescription::TLogTableDescription(const TSchema& schema, const TVector<TString>& shardingColumns,
- ui32 columnShardCount, const TMaybe<TTtlSettings>& ttlSettings)
- : Schema(schema)
- , ShardingColumns(shardingColumns)
- , ColumnShardCount(columnShardCount)
- , TtlSettings(ttlSettings)
-{}
-
-TLogTableDescription::TLogTableDescription(Ydb::LogStore::DescribeLogTableResult&& desc, const TDescribeLogTableSettings& describeSettings)
- : Schema(desc.schema())
- , ShardingColumns(desc.sharding_columns().begin(), desc.sharding_columns().end())
- , ColumnShardCount(desc.column_shard_count())
- , TtlSettings(TtlSettingsFromProto(desc.ttl_settings()))
- , Owner(desc.self().owner())
-{
- Y_UNUSED(describeSettings);
- PermissionToSchemeEntry(desc.self().permissions(), &Permissions);
- PermissionToSchemeEntry(desc.self().effective_permissions(), &EffectivePermissions);
-}
-
-void TLogTableDescription::SerializeTo(Ydb::LogStore::CreateLogTableRequest& request) const {
- if (!Schema.GetColumns().empty()) {
- Schema.SerializeTo(*request.mutable_schema());
- }
- request.set_schema_preset_name(SchemaPresetName);
- request.set_column_shard_count(ColumnShardCount);
- for (const auto& sc : ShardingColumns) {
- request.add_sharding_columns(sc);
- }
- if (TtlSettings) {
- TtlSettings->SerializeTo(*request.mutable_ttl_settings());
- }
-}
-
-TDescribeLogTableResult::TDescribeLogTableResult(TStatus&& status, Ydb::LogStore::DescribeLogTableResult&& desc,
- const TDescribeLogTableSettings& describeSettings)
- : TStatus(std::move(status))
- , LogTableDescription_(std::move(desc), describeSettings)
-{}
-
-class TLogStoreClient::TImpl: public TClientImplCommon<TLogStoreClient::TImpl> {
-public:
- TImpl(std::shared_ptr<TGRpcConnectionsImpl>&& connections, const TCommonClientSettings& settings)
- : TClientImplCommon(std::move(connections), settings)
- {}
-
- TAsyncStatus CreateLogStore(const TString& path, TLogStoreDescription&& storeDesc,
- const TCreateLogStoreSettings& settings)
- {
- auto request = MakeOperationRequest<Ydb::LogStore::CreateLogStoreRequest>(settings);
- storeDesc.SerializeTo(request);
- request.set_path(path);
- return RunSimple<
- Ydb::LogStore::V1::LogStoreService,
- Ydb::LogStore::CreateLogStoreRequest,
- Ydb::LogStore::CreateLogStoreResponse>(
- std::move(request),
- &Ydb::LogStore::V1::LogStoreService::Stub::AsyncCreateLogStore,
- TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
- }
-
- TAsyncDescribeLogStoreResult DescribeLogStore(const TString& path, const TDescribeLogStoreSettings& settings) {
- auto request = MakeOperationRequest<Ydb::LogStore::DescribeLogStoreRequest>(settings);
- request.set_path(path);
-
- auto promise = NThreading::NewPromise<TDescribeLogStoreResult>();
-
- auto extractor = [promise, settings]
- (google::protobuf::Any* any, TPlainStatus status) mutable {
- Ydb::LogStore::DescribeLogStoreResult result;
- if (any) {
- any->UnpackTo(&result);
- }
- TDescribeLogStoreResult describeLogStoreResult(TStatus(std::move(status)),
- std::move(result), settings);
- promise.SetValue(std::move(describeLogStoreResult));
- };
-
- Connections_->RunDeferred<
- Ydb::LogStore::V1::LogStoreService,
- Ydb::LogStore::DescribeLogStoreRequest,
- Ydb::LogStore::DescribeLogStoreResponse>(
- std::move(request),
- extractor,
- &Ydb::LogStore::V1::LogStoreService::Stub::AsyncDescribeLogStore,
- DbDriverState_,
- INITIAL_DEFERRED_CALL_DELAY,
- TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
-
- return promise.GetFuture();
- }
-
- TAsyncStatus DropLogStore(const TString& path, const TDropLogStoreSettings& settings) {
- auto request = MakeOperationRequest<Ydb::LogStore::DropLogStoreRequest>(settings);
- request.set_path(path);
- return RunSimple<
- Ydb::LogStore::V1::LogStoreService,
- Ydb::LogStore::DropLogStoreRequest,
- Ydb::LogStore::DropLogStoreResponse>(
- std::move(request),
- &Ydb::LogStore::V1::LogStoreService::Stub::AsyncDropLogStore,
- TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
- }
-
- TAsyncStatus CreateLogTable(const TString& path, TLogTableDescription&& tableDesc,
- const TCreateLogTableSettings& settings)
- {
- auto request = MakeOperationRequest<Ydb::LogStore::CreateLogTableRequest>(settings);
- tableDesc.SerializeTo(request);
- request.set_path(path);
- return RunSimple<
- Ydb::LogStore::V1::LogStoreService,
- Ydb::LogStore::CreateLogTableRequest,
- Ydb::LogStore::CreateLogTableResponse>(
- std::move(request),
- &Ydb::LogStore::V1::LogStoreService::Stub::AsyncCreateLogTable,
- TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
- }
-
- TAsyncDescribeLogTableResult DescribeLogTable(const TString& path, const TDescribeLogTableSettings& settings) {
- auto request = MakeOperationRequest<Ydb::LogStore::DescribeLogTableRequest>(settings);
- request.set_path(path);
-
- auto promise = NThreading::NewPromise<TDescribeLogTableResult>();
-
- auto extractor = [promise, settings]
- (google::protobuf::Any* any, TPlainStatus status) mutable {
- Ydb::LogStore::DescribeLogTableResult result;
- if (any) {
- any->UnpackTo(&result);
- }
- TDescribeLogTableResult describeLogTableResult(TStatus(std::move(status)),
- std::move(result), settings);
- promise.SetValue(std::move(describeLogTableResult));
- };
-
- Connections_->RunDeferred<
- Ydb::LogStore::V1::LogStoreService,
- Ydb::LogStore::DescribeLogTableRequest,
- Ydb::LogStore::DescribeLogTableResponse>(
- std::move(request),
- extractor,
- &Ydb::LogStore::V1::LogStoreService::Stub::AsyncDescribeLogTable,
- DbDriverState_,
- INITIAL_DEFERRED_CALL_DELAY,
- TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
-
- return promise.GetFuture();
- }
-
- TAsyncStatus DropLogTable(const TString& path, const TDropLogTableSettings& settings) {
- auto request = MakeOperationRequest<Ydb::LogStore::DropLogTableRequest>(settings);
- request.set_path(path);
- return RunSimple<
- Ydb::LogStore::V1::LogStoreService,
- Ydb::LogStore::DropLogTableRequest,
- Ydb::LogStore::DropLogTableResponse>(
- std::move(request),
- &Ydb::LogStore::V1::LogStoreService::Stub::AsyncDropLogTable,
- TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
- }
-
- TAsyncStatus AlterLogTable(const TString& path, const TAlterLogTableSettings& settings) {
- auto request = MakeOperationRequest<Ydb::LogStore::AlterLogTableRequest>(settings);
- request.set_path(path);
- if (const auto& ttl = settings.GetAlterTtlSettings()) {
- switch (ttl->GetAction()) {
- case TAlterTtlSettings::EAction::Set:
- ttl->GetTtlSettings().SerializeTo(*request.mutable_set_ttl_settings());
- break;
- case TAlterTtlSettings::EAction::Drop:
- request.mutable_drop_ttl_settings();
- break;
- }
- }
- return RunSimple<
- Ydb::LogStore::V1::LogStoreService,
- Ydb::LogStore::AlterLogTableRequest,
- Ydb::LogStore::AlterLogTableResponse>(
- std::move(request),
- &Ydb::LogStore::V1::LogStoreService::Stub::AsyncAlterLogTable,
- TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
- }
-};
-
-TLogStoreClient::TLogStoreClient(const TDriver& driver, const TCommonClientSettings& settings)
- : Impl_(new TImpl(CreateInternalInterface(driver), settings))
-{}
-
-TAsyncStatus TLogStoreClient::CreateLogStore(const TString& path, TLogStoreDescription&& storeDesc,
- const TCreateLogStoreSettings& settings)
-{
- return Impl_->CreateLogStore(path, std::move(storeDesc), settings);
-}
-
-TAsyncDescribeLogStoreResult TLogStoreClient::DescribeLogStore(const TString& path, const TDescribeLogStoreSettings& settings)
-{
- return Impl_->DescribeLogStore(path, settings);
-}
-
-TAsyncStatus TLogStoreClient::DropLogStore(const TString& path, const TDropLogStoreSettings& settings)
-{
- return Impl_->DropLogStore(path, settings);
-}
-
-TAsyncStatus TLogStoreClient::CreateLogTable(const TString& path, TLogTableDescription&& storeDesc,
- const TCreateLogTableSettings& settings)
-{
- return Impl_->CreateLogTable(path, std::move(storeDesc), settings);
-}
-
-TAsyncDescribeLogTableResult TLogStoreClient::DescribeLogTable(const TString& path, const TDescribeLogTableSettings& settings)
-{
- return Impl_->DescribeLogTable(path, settings);
-}
-
-TAsyncStatus TLogStoreClient::DropLogTable(const TString& path, const TDropLogTableSettings& settings)
-{
- return Impl_->DropLogTable(path, settings);
-}
-
-TAsyncStatus TLogStoreClient::AlterLogTable(const TString& path, const TAlterLogTableSettings& settings)
-{
- return Impl_->AlterLogTable(path, settings);
-}
-
-TAlterLogTableSettings& TAlterLogTableSettings::AlterTtlSettings(const TMaybe<TAlterTtlSettings>& value) {
- AlterTtlSettings_ = value;
- return *this;
-}
-
-const TMaybe<TAlterTtlSettings>& TAlterLogTableSettings::GetAlterTtlSettings() const {
- return AlterTtlSettings_;
-}
-
-}}
+
+ default:
+ break;
+ }
+ return {};
+}
+
+TType MakeColumnType(EPrimitiveType primitiveType) {
+ return TTypeBuilder().BeginOptional().Primitive(primitiveType).EndOptional().Build();
+}
+
+TSchema::TSchema(const Ydb::LogStore::Schema& schema)
+ : Columns()
+ , PrimaryKeyColumns(schema.primary_key().begin(), schema.primary_key().end())
+{
+ Columns.reserve(schema.columns().size());
+ for (const auto& col : schema.columns()) {
+ TColumn c(col.name(), TType(col.type()));
+ Columns.emplace_back(std::move(c));
+ }
+}
+
+void TSchema::SerializeTo(Ydb::LogStore::Schema& schema) const {
+ for (const auto& c : Columns) {
+ auto& col = *schema.add_columns();
+ col.set_name(c.Name);
+ col.mutable_type()->CopyFrom(TProtoAccessor::GetProto(c.Type));
+ }
+ for (const auto& pkc : PrimaryKeyColumns) {
+ schema.add_primary_key(pkc);
+ }
+}
+
+TLogStoreDescription::TLogStoreDescription(ui32 columnShardCount, const THashMap<TString, TSchema>& schemaPresets)
+ : ColumnShardCount(columnShardCount)
+ , SchemaPresets(schemaPresets)
+{}
+
+TLogStoreDescription::TLogStoreDescription(Ydb::LogStore::DescribeLogStoreResult&& desc, const TDescribeLogStoreSettings& describeSettings)
+ : ColumnShardCount(desc.column_shard_count())
+ , SchemaPresets()
+ , Owner(desc.self().owner())
+{
+ Y_UNUSED(describeSettings);
+ for (const auto& sp : desc.schema_presets()) {
+ SchemaPresets[sp.name()] = TSchema(sp.schema());
+ }
+ PermissionToSchemeEntry(desc.self().permissions(), &Permissions);
+ PermissionToSchemeEntry(desc.self().effective_permissions(), &EffectivePermissions);
+}
+
+void TLogStoreDescription::SerializeTo(Ydb::LogStore::CreateLogStoreRequest& request) const {
+ for (const auto& sp : SchemaPresets) {
+ auto& pb = *request.add_schema_presets();
+ pb.set_name(sp.first);
+ sp.second.SerializeTo(*pb.mutable_schema());
+ }
+ request.set_column_shard_count(ColumnShardCount);
+}
+
+TDescribeLogStoreResult::TDescribeLogStoreResult(TStatus&& status, Ydb::LogStore::DescribeLogStoreResult&& desc,
+ const TDescribeLogStoreSettings& describeSettings)
+ : TStatus(std::move(status))
+ , LogStoreDescription_(std::move(desc), describeSettings)
+{}
+
+
+TLogTableDescription::TLogTableDescription(const TString& schemaPresetName, const TVector<TString>& shardingColumns,
+ ui32 columnShardCount, const TMaybe<TTtlSettings>& ttlSettings)
+ : SchemaPresetName(schemaPresetName)
+ , ShardingColumns(shardingColumns)
+ , ColumnShardCount(columnShardCount)
+ , TtlSettings(ttlSettings)
+{}
+
+TLogTableDescription::TLogTableDescription(const TSchema& schema, const TVector<TString>& shardingColumns,
+ ui32 columnShardCount, const TMaybe<TTtlSettings>& ttlSettings)
+ : Schema(schema)
+ , ShardingColumns(shardingColumns)
+ , ColumnShardCount(columnShardCount)
+ , TtlSettings(ttlSettings)
+{}
+
+TLogTableDescription::TLogTableDescription(Ydb::LogStore::DescribeLogTableResult&& desc, const TDescribeLogTableSettings& describeSettings)
+ : Schema(desc.schema())
+ , ShardingColumns(desc.sharding_columns().begin(), desc.sharding_columns().end())
+ , ColumnShardCount(desc.column_shard_count())
+ , TtlSettings(TtlSettingsFromProto(desc.ttl_settings()))
+ , Owner(desc.self().owner())
+{
+ Y_UNUSED(describeSettings);
+ PermissionToSchemeEntry(desc.self().permissions(), &Permissions);
+ PermissionToSchemeEntry(desc.self().effective_permissions(), &EffectivePermissions);
+}
+
+void TLogTableDescription::SerializeTo(Ydb::LogStore::CreateLogTableRequest& request) const {
+ if (!Schema.GetColumns().empty()) {
+ Schema.SerializeTo(*request.mutable_schema());
+ }
+ request.set_schema_preset_name(SchemaPresetName);
+ request.set_column_shard_count(ColumnShardCount);
+ for (const auto& sc : ShardingColumns) {
+ request.add_sharding_columns(sc);
+ }
+ if (TtlSettings) {
+ TtlSettings->SerializeTo(*request.mutable_ttl_settings());
+ }
+}
+
+TDescribeLogTableResult::TDescribeLogTableResult(TStatus&& status, Ydb::LogStore::DescribeLogTableResult&& desc,
+ const TDescribeLogTableSettings& describeSettings)
+ : TStatus(std::move(status))
+ , LogTableDescription_(std::move(desc), describeSettings)
+{}
+
+class TLogStoreClient::TImpl: public TClientImplCommon<TLogStoreClient::TImpl> {
+public:
+ TImpl(std::shared_ptr<TGRpcConnectionsImpl>&& connections, const TCommonClientSettings& settings)
+ : TClientImplCommon(std::move(connections), settings)
+ {}
+
+ TAsyncStatus CreateLogStore(const TString& path, TLogStoreDescription&& storeDesc,
+ const TCreateLogStoreSettings& settings)
+ {
+ auto request = MakeOperationRequest<Ydb::LogStore::CreateLogStoreRequest>(settings);
+ storeDesc.SerializeTo(request);
+ request.set_path(path);
+ return RunSimple<
+ Ydb::LogStore::V1::LogStoreService,
+ Ydb::LogStore::CreateLogStoreRequest,
+ Ydb::LogStore::CreateLogStoreResponse>(
+ std::move(request),
+ &Ydb::LogStore::V1::LogStoreService::Stub::AsyncCreateLogStore,
+ TRpcRequestSettings::Make(settings),
+ settings.ClientTimeout_);
+ }
+
+ TAsyncDescribeLogStoreResult DescribeLogStore(const TString& path, const TDescribeLogStoreSettings& settings) {
+ auto request = MakeOperationRequest<Ydb::LogStore::DescribeLogStoreRequest>(settings);
+ request.set_path(path);
+
+ auto promise = NThreading::NewPromise<TDescribeLogStoreResult>();
+
+ auto extractor = [promise, settings]
+ (google::protobuf::Any* any, TPlainStatus status) mutable {
+ Ydb::LogStore::DescribeLogStoreResult result;
+ if (any) {
+ any->UnpackTo(&result);
+ }
+ TDescribeLogStoreResult describeLogStoreResult(TStatus(std::move(status)),
+ std::move(result), settings);
+ promise.SetValue(std::move(describeLogStoreResult));
+ };
+
+ Connections_->RunDeferred<
+ Ydb::LogStore::V1::LogStoreService,
+ Ydb::LogStore::DescribeLogStoreRequest,
+ Ydb::LogStore::DescribeLogStoreResponse>(
+ std::move(request),
+ extractor,
+ &Ydb::LogStore::V1::LogStoreService::Stub::AsyncDescribeLogStore,
+ DbDriverState_,
+ INITIAL_DEFERRED_CALL_DELAY,
+ TRpcRequestSettings::Make(settings),
+ settings.ClientTimeout_);
+
+ return promise.GetFuture();
+ }
+
+ TAsyncStatus DropLogStore(const TString& path, const TDropLogStoreSettings& settings) {
+ auto request = MakeOperationRequest<Ydb::LogStore::DropLogStoreRequest>(settings);
+ request.set_path(path);
+ return RunSimple<
+ Ydb::LogStore::V1::LogStoreService,
+ Ydb::LogStore::DropLogStoreRequest,
+ Ydb::LogStore::DropLogStoreResponse>(
+ std::move(request),
+ &Ydb::LogStore::V1::LogStoreService::Stub::AsyncDropLogStore,
+ TRpcRequestSettings::Make(settings),
+ settings.ClientTimeout_);
+ }
+
+ TAsyncStatus CreateLogTable(const TString& path, TLogTableDescription&& tableDesc,
+ const TCreateLogTableSettings& settings)
+ {
+ auto request = MakeOperationRequest<Ydb::LogStore::CreateLogTableRequest>(settings);
+ tableDesc.SerializeTo(request);
+ request.set_path(path);
+ return RunSimple<
+ Ydb::LogStore::V1::LogStoreService,
+ Ydb::LogStore::CreateLogTableRequest,
+ Ydb::LogStore::CreateLogTableResponse>(
+ std::move(request),
+ &Ydb::LogStore::V1::LogStoreService::Stub::AsyncCreateLogTable,
+ TRpcRequestSettings::Make(settings),
+ settings.ClientTimeout_);
+ }
+
+ TAsyncDescribeLogTableResult DescribeLogTable(const TString& path, const TDescribeLogTableSettings& settings) {
+ auto request = MakeOperationRequest<Ydb::LogStore::DescribeLogTableRequest>(settings);
+ request.set_path(path);
+
+ auto promise = NThreading::NewPromise<TDescribeLogTableResult>();
+
+ auto extractor = [promise, settings]
+ (google::protobuf::Any* any, TPlainStatus status) mutable {
+ Ydb::LogStore::DescribeLogTableResult result;
+ if (any) {
+ any->UnpackTo(&result);
+ }
+ TDescribeLogTableResult describeLogTableResult(TStatus(std::move(status)),
+ std::move(result), settings);
+ promise.SetValue(std::move(describeLogTableResult));
+ };
+
+ Connections_->RunDeferred<
+ Ydb::LogStore::V1::LogStoreService,
+ Ydb::LogStore::DescribeLogTableRequest,
+ Ydb::LogStore::DescribeLogTableResponse>(
+ std::move(request),
+ extractor,
+ &Ydb::LogStore::V1::LogStoreService::Stub::AsyncDescribeLogTable,
+ DbDriverState_,
+ INITIAL_DEFERRED_CALL_DELAY,
+ TRpcRequestSettings::Make(settings),
+ settings.ClientTimeout_);
+
+ return promise.GetFuture();
+ }
+
+ TAsyncStatus DropLogTable(const TString& path, const TDropLogTableSettings& settings) {
+ auto request = MakeOperationRequest<Ydb::LogStore::DropLogTableRequest>(settings);
+ request.set_path(path);
+ return RunSimple<
+ Ydb::LogStore::V1::LogStoreService,
+ Ydb::LogStore::DropLogTableRequest,
+ Ydb::LogStore::DropLogTableResponse>(
+ std::move(request),
+ &Ydb::LogStore::V1::LogStoreService::Stub::AsyncDropLogTable,
+ TRpcRequestSettings::Make(settings),
+ settings.ClientTimeout_);
+ }
+
+ TAsyncStatus AlterLogTable(const TString& path, const TAlterLogTableSettings& settings) {
+ auto request = MakeOperationRequest<Ydb::LogStore::AlterLogTableRequest>(settings);
+ request.set_path(path);
+ if (const auto& ttl = settings.GetAlterTtlSettings()) {
+ switch (ttl->GetAction()) {
+ case TAlterTtlSettings::EAction::Set:
+ ttl->GetTtlSettings().SerializeTo(*request.mutable_set_ttl_settings());
+ break;
+ case TAlterTtlSettings::EAction::Drop:
+ request.mutable_drop_ttl_settings();
+ break;
+ }
+ }
+ return RunSimple<
+ Ydb::LogStore::V1::LogStoreService,
+ Ydb::LogStore::AlterLogTableRequest,
+ Ydb::LogStore::AlterLogTableResponse>(
+ std::move(request),
+ &Ydb::LogStore::V1::LogStoreService::Stub::AsyncAlterLogTable,
+ TRpcRequestSettings::Make(settings),
+ settings.ClientTimeout_);
+ }
+};
+
+TLogStoreClient::TLogStoreClient(const TDriver& driver, const TCommonClientSettings& settings)
+ : Impl_(new TImpl(CreateInternalInterface(driver), settings))
+{}
+
+TAsyncStatus TLogStoreClient::CreateLogStore(const TString& path, TLogStoreDescription&& storeDesc,
+ const TCreateLogStoreSettings& settings)
+{
+ return Impl_->CreateLogStore(path, std::move(storeDesc), settings);
+}
+
+TAsyncDescribeLogStoreResult TLogStoreClient::DescribeLogStore(const TString& path, const TDescribeLogStoreSettings& settings)
+{
+ return Impl_->DescribeLogStore(path, settings);
+}
+
+TAsyncStatus TLogStoreClient::DropLogStore(const TString& path, const TDropLogStoreSettings& settings)
+{
+ return Impl_->DropLogStore(path, settings);
+}
+
+TAsyncStatus TLogStoreClient::CreateLogTable(const TString& path, TLogTableDescription&& storeDesc,
+ const TCreateLogTableSettings& settings)
+{
+ return Impl_->CreateLogTable(path, std::move(storeDesc), settings);
+}
+
+TAsyncDescribeLogTableResult TLogStoreClient::DescribeLogTable(const TString& path, const TDescribeLogTableSettings& settings)
+{
+ return Impl_->DescribeLogTable(path, settings);
+}
+
+TAsyncStatus TLogStoreClient::DropLogTable(const TString& path, const TDropLogTableSettings& settings)
+{
+ return Impl_->DropLogTable(path, settings);
+}
+
+TAsyncStatus TLogStoreClient::AlterLogTable(const TString& path, const TAlterLogTableSettings& settings)
+{
+ return Impl_->AlterLogTable(path, settings);
+}
+
+TAlterLogTableSettings& TAlterLogTableSettings::AlterTtlSettings(const TMaybe<TAlterTtlSettings>& value) {
+ AlterTtlSettings_ = value;
+ return *this;
+}
+
+const TMaybe<TAlterTtlSettings>& TAlterLogTableSettings::GetAlterTtlSettings() const {
+ return AlterTtlSettings_;
+}
+
+}}
diff --git a/ydb/public/lib/experimental/ydb_logstore.h b/ydb/public/lib/experimental/ydb_logstore.h
index 669e3d069d2..44533b0a2b4 100644
--- a/ydb/public/lib/experimental/ydb_logstore.h
+++ b/ydb/public/lib/experimental/ydb_logstore.h
@@ -1,212 +1,212 @@
-#pragma once
-
+#pragma once
+
#include <ydb/public/sdk/cpp/client/ydb_result/result.h>
#include <ydb/public/sdk/cpp/client/ydb_scheme/scheme.h>
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
-
-namespace Ydb {
-namespace LogStore {
-
-class Schema;
-class CreateLogStoreRequest;
-class CreateLogTableRequest;
-class DescribeLogStoreResult;
-class DescribeLogTableResult;
-
-}}
-
-namespace NYdb {
-namespace NLogStore {
-
-struct TCreateLogStoreSettings : public TOperationRequestSettings<TCreateLogStoreSettings> {
- using TSelf = TCreateLogStoreSettings;
-};
-
-struct TDropLogStoreSettings : public TOperationRequestSettings<TDropLogStoreSettings> {
- using TSelf = TDropLogStoreSettings;
-};
-
-struct TDescribeLogStoreSettings : public TOperationRequestSettings<TDescribeLogStoreSettings> {
- using TSelf = TDescribeLogStoreSettings;
-};
-
-struct TCreateLogTableSettings : public TOperationRequestSettings<TCreateLogTableSettings> {
- using TSelf = TCreateLogTableSettings;
-};
-
-struct TDropLogTableSettings : public TOperationRequestSettings<TDropLogTableSettings> {
- using TSelf = TDropLogTableSettings;
-};
-
-struct TDescribeLogTableSettings : public TOperationRequestSettings<TDescribeLogTableSettings> {
- using TSelf = TDescribeLogTableSettings;
-};
-
-using NTable::TTtlSettings;
-using NTable::TAlterTtlSettings;
-
-struct TAlterLogTableSettings : public TOperationRequestSettings<TAlterLogTableSettings> {
- using TSelf = TAlterLogTableSettings;
-
- TSelf& AlterTtlSettings(const TMaybe<TAlterTtlSettings>& value);
- const TMaybe<TAlterTtlSettings>& GetAlterTtlSettings() const;
-private:
- TMaybe<TAlterTtlSettings> AlterTtlSettings_;
-};
-
-TType MakeColumnType(EPrimitiveType primitiveType);
-
-class TSchema {
-public:
- TSchema(const TVector<TColumn>& columns = {}, const TVector<TString> primaryKeyColumns = {})
- : Columns(columns)
- , PrimaryKeyColumns(primaryKeyColumns)
- {}
-
- explicit TSchema(const Ydb::LogStore::Schema& schema);
-
- void SerializeTo(Ydb::LogStore::Schema& schema) const;
-
- TVector<TColumn> GetColumns() const {
- return Columns;
- }
- const TVector<TString>& GetPrimaryKeyColumns() const {
- return PrimaryKeyColumns;
- }
-private:
- TVector<TColumn> Columns;
- TVector<TString> PrimaryKeyColumns;
-};
-
-class TLogStoreDescription {
-public:
- TLogStoreDescription(ui32 columnShardCount, const THashMap<TString, TSchema>& schemaPresets);
- TLogStoreDescription(Ydb::LogStore::DescribeLogStoreResult&& desc, const TDescribeLogStoreSettings& describeSettings);
- void SerializeTo(Ydb::LogStore::CreateLogStoreRequest& request) const;
- const THashMap<TString, TSchema>& GetSchemaPresets() const {
- return SchemaPresets;
- }
- ui32 GetColumnShardCount() const {
- return ColumnShardCount;
- }
-
- const TString& GetOwner() const {
- return Owner;
- }
- const TVector<NScheme::TPermissions>& GetPermissions() const {
- return Permissions;
- }
- const TVector<NScheme::TPermissions>& GetEffectivePermissions() const {
- return EffectivePermissions;
- }
-
-private:
- ui32 ColumnShardCount;
- THashMap<TString, TSchema> SchemaPresets;
- TString Owner;
- TVector<NScheme::TPermissions> Permissions;
- TVector<NScheme::TPermissions> EffectivePermissions;
-};
-
-class TLogTableDescription {
-public:
- TLogTableDescription(const TString& schemaPresetName, const TVector<TString>& shardingColumns,
- ui32 columnShardCount, const TMaybe<TTtlSettings>& ttlSettings = {});
- TLogTableDescription(const TSchema& schema, const TVector<TString>& shardingColumns,
- ui32 columnShardCount, const TMaybe<TTtlSettings>& ttlSettings = {});
- TLogTableDescription(Ydb::LogStore::DescribeLogTableResult&& desc, const TDescribeLogTableSettings& describeSettings);
- void SerializeTo(Ydb::LogStore::CreateLogTableRequest& request) const;
- const TSchema& GetSchema() const {
- return Schema;
- }
- const TVector<TString>& GetShardingColumns() const {
- return ShardingColumns;
- }
- ui32 GetColumnShardCount() const {
- return ColumnShardCount;
- }
- const TMaybe<TTtlSettings>& GetTtlSettings() const {
- return TtlSettings;
- }
-
- const TString& GetOwner() const {
- return Owner;
- }
- const TVector<NScheme::TPermissions>& GetPermissions() const {
- return Permissions;
- }
- const TVector<NScheme::TPermissions>& GetEffectivePermissions() const {
- return EffectivePermissions;
- }
-
-private:
- const TString SchemaPresetName;
- const TSchema Schema;
- const TVector<TString> ShardingColumns;
- const ui32 ColumnShardCount;
- const TMaybe<TTtlSettings> TtlSettings;
- TString Owner;
- TVector<NScheme::TPermissions> Permissions;
- TVector<NScheme::TPermissions> EffectivePermissions;
-};
-
-//! Represents result of DescribeLogStore call
-class TDescribeLogStoreResult : public TStatus {
-public:
- TDescribeLogStoreResult(TStatus&& status, Ydb::LogStore::DescribeLogStoreResult&& desc,
- const TDescribeLogStoreSettings& describeSettings);
-
- const TLogStoreDescription& GetDescription() const {
- return LogStoreDescription_;
- }
-
-private:
- TLogStoreDescription LogStoreDescription_;
-};
-
-//! Represents result of DescribeLogTable call
-class TDescribeLogTableResult : public TStatus {
-public:
- TDescribeLogTableResult(TStatus&& status, Ydb::LogStore::DescribeLogTableResult&& desc,
- const TDescribeLogTableSettings& describeSettings);
-
- TLogTableDescription GetDescription() const {
- return LogTableDescription_;
- }
-
-private:
- TLogTableDescription LogTableDescription_;
-};
-
-using TAsyncDescribeLogStoreResult = NThreading::TFuture<TDescribeLogStoreResult>;
-using TAsyncDescribeLogTableResult = NThreading::TFuture<TDescribeLogTableResult>;
-
-class TLogStoreClient {
- class TImpl;
-
-public:
- TLogStoreClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings());
-
- TAsyncStatus CreateLogStore(const TString& path, TLogStoreDescription&& tableDesc,
- const TCreateLogStoreSettings& settings = TCreateLogStoreSettings());
-
- TAsyncDescribeLogStoreResult DescribeLogStore(const TString& path,
- const TDescribeLogStoreSettings& settings = TDescribeLogStoreSettings());
-
- TAsyncStatus DropLogStore(const TString& path, const TDropLogStoreSettings& settings = TDropLogStoreSettings());
-
- TAsyncStatus CreateLogTable(const TString& path, TLogTableDescription&& tableDesc,
- const TCreateLogTableSettings& settings = TCreateLogTableSettings());
-
- TAsyncDescribeLogTableResult DescribeLogTable(const TString& path,
- const TDescribeLogTableSettings& settings = TDescribeLogTableSettings());
-
- TAsyncStatus DropLogTable(const TString& path, const TDropLogTableSettings& settings = TDropLogTableSettings());
-
- TAsyncStatus AlterLogTable(const TString& path, const TAlterLogTableSettings& settings = TAlterLogTableSettings());
-
-private:
- std::shared_ptr<TImpl> Impl_;
-};
-
-}}
+
+namespace Ydb {
+namespace LogStore {
+
+class Schema;
+class CreateLogStoreRequest;
+class CreateLogTableRequest;
+class DescribeLogStoreResult;
+class DescribeLogTableResult;
+
+}}
+
+namespace NYdb {
+namespace NLogStore {
+
+struct TCreateLogStoreSettings : public TOperationRequestSettings<TCreateLogStoreSettings> {
+ using TSelf = TCreateLogStoreSettings;
+};
+
+struct TDropLogStoreSettings : public TOperationRequestSettings<TDropLogStoreSettings> {
+ using TSelf = TDropLogStoreSettings;
+};
+
+struct TDescribeLogStoreSettings : public TOperationRequestSettings<TDescribeLogStoreSettings> {
+ using TSelf = TDescribeLogStoreSettings;
+};
+
+struct TCreateLogTableSettings : public TOperationRequestSettings<TCreateLogTableSettings> {
+ using TSelf = TCreateLogTableSettings;
+};
+
+struct TDropLogTableSettings : public TOperationRequestSettings<TDropLogTableSettings> {
+ using TSelf = TDropLogTableSettings;
+};
+
+struct TDescribeLogTableSettings : public TOperationRequestSettings<TDescribeLogTableSettings> {
+ using TSelf = TDescribeLogTableSettings;
+};
+
+using NTable::TTtlSettings;
+using NTable::TAlterTtlSettings;
+
+struct TAlterLogTableSettings : public TOperationRequestSettings<TAlterLogTableSettings> {
+ using TSelf = TAlterLogTableSettings;
+
+ TSelf& AlterTtlSettings(const TMaybe<TAlterTtlSettings>& value);
+ const TMaybe<TAlterTtlSettings>& GetAlterTtlSettings() const;
+private:
+ TMaybe<TAlterTtlSettings> AlterTtlSettings_;
+};
+
+TType MakeColumnType(EPrimitiveType primitiveType);
+
+class TSchema {
+public:
+ TSchema(const TVector<TColumn>& columns = {}, const TVector<TString> primaryKeyColumns = {})
+ : Columns(columns)
+ , PrimaryKeyColumns(primaryKeyColumns)
+ {}
+
+ explicit TSchema(const Ydb::LogStore::Schema& schema);
+
+ void SerializeTo(Ydb::LogStore::Schema& schema) const;
+
+ TVector<TColumn> GetColumns() const {
+ return Columns;
+ }
+ const TVector<TString>& GetPrimaryKeyColumns() const {
+ return PrimaryKeyColumns;
+ }
+private:
+ TVector<TColumn> Columns;
+ TVector<TString> PrimaryKeyColumns;
+};
+
+class TLogStoreDescription {
+public:
+ TLogStoreDescription(ui32 columnShardCount, const THashMap<TString, TSchema>& schemaPresets);
+ TLogStoreDescription(Ydb::LogStore::DescribeLogStoreResult&& desc, const TDescribeLogStoreSettings& describeSettings);
+ void SerializeTo(Ydb::LogStore::CreateLogStoreRequest& request) const;
+ const THashMap<TString, TSchema>& GetSchemaPresets() const {
+ return SchemaPresets;
+ }
+ ui32 GetColumnShardCount() const {
+ return ColumnShardCount;
+ }
+
+ const TString& GetOwner() const {
+ return Owner;
+ }
+ const TVector<NScheme::TPermissions>& GetPermissions() const {
+ return Permissions;
+ }
+ const TVector<NScheme::TPermissions>& GetEffectivePermissions() const {
+ return EffectivePermissions;
+ }
+
+private:
+ ui32 ColumnShardCount;
+ THashMap<TString, TSchema> SchemaPresets;
+ TString Owner;
+ TVector<NScheme::TPermissions> Permissions;
+ TVector<NScheme::TPermissions> EffectivePermissions;
+};
+
+class TLogTableDescription {
+public:
+ TLogTableDescription(const TString& schemaPresetName, const TVector<TString>& shardingColumns,
+ ui32 columnShardCount, const TMaybe<TTtlSettings>& ttlSettings = {});
+ TLogTableDescription(const TSchema& schema, const TVector<TString>& shardingColumns,
+ ui32 columnShardCount, const TMaybe<TTtlSettings>& ttlSettings = {});
+ TLogTableDescription(Ydb::LogStore::DescribeLogTableResult&& desc, const TDescribeLogTableSettings& describeSettings);
+ void SerializeTo(Ydb::LogStore::CreateLogTableRequest& request) const;
+ const TSchema& GetSchema() const {
+ return Schema;
+ }
+ const TVector<TString>& GetShardingColumns() const {
+ return ShardingColumns;
+ }
+ ui32 GetColumnShardCount() const {
+ return ColumnShardCount;
+ }
+ const TMaybe<TTtlSettings>& GetTtlSettings() const {
+ return TtlSettings;
+ }
+
+ const TString& GetOwner() const {
+ return Owner;
+ }
+ const TVector<NScheme::TPermissions>& GetPermissions() const {
+ return Permissions;
+ }
+ const TVector<NScheme::TPermissions>& GetEffectivePermissions() const {
+ return EffectivePermissions;
+ }
+
+private:
+ const TString SchemaPresetName;
+ const TSchema Schema;
+ const TVector<TString> ShardingColumns;
+ const ui32 ColumnShardCount;
+ const TMaybe<TTtlSettings> TtlSettings;
+ TString Owner;
+ TVector<NScheme::TPermissions> Permissions;
+ TVector<NScheme::TPermissions> EffectivePermissions;
+};
+
+//! Represents result of DescribeLogStore call
+class TDescribeLogStoreResult : public TStatus {
+public:
+ TDescribeLogStoreResult(TStatus&& status, Ydb::LogStore::DescribeLogStoreResult&& desc,
+ const TDescribeLogStoreSettings& describeSettings);
+
+ const TLogStoreDescription& GetDescription() const {
+ return LogStoreDescription_;
+ }
+
+private:
+ TLogStoreDescription LogStoreDescription_;
+};
+
+//! Represents result of DescribeLogTable call
+class TDescribeLogTableResult : public TStatus {
+public:
+ TDescribeLogTableResult(TStatus&& status, Ydb::LogStore::DescribeLogTableResult&& desc,
+ const TDescribeLogTableSettings& describeSettings);
+
+ TLogTableDescription GetDescription() const {
+ return LogTableDescription_;
+ }
+
+private:
+ TLogTableDescription LogTableDescription_;
+};
+
+using TAsyncDescribeLogStoreResult = NThreading::TFuture<TDescribeLogStoreResult>;
+using TAsyncDescribeLogTableResult = NThreading::TFuture<TDescribeLogTableResult>;
+
+class TLogStoreClient {
+ class TImpl;
+
+public:
+ TLogStoreClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings());
+
+ TAsyncStatus CreateLogStore(const TString& path, TLogStoreDescription&& tableDesc,
+ const TCreateLogStoreSettings& settings = TCreateLogStoreSettings());
+
+ TAsyncDescribeLogStoreResult DescribeLogStore(const TString& path,
+ const TDescribeLogStoreSettings& settings = TDescribeLogStoreSettings());
+
+ TAsyncStatus DropLogStore(const TString& path, const TDropLogStoreSettings& settings = TDropLogStoreSettings());
+
+ TAsyncStatus CreateLogTable(const TString& path, TLogTableDescription&& tableDesc,
+ const TCreateLogTableSettings& settings = TCreateLogTableSettings());
+
+ TAsyncDescribeLogTableResult DescribeLogTable(const TString& path,
+ const TDescribeLogTableSettings& settings = TDescribeLogTableSettings());
+
+ TAsyncStatus DropLogTable(const TString& path, const TDropLogTableSettings& settings = TDropLogTableSettings());
+
+ TAsyncStatus AlterLogTable(const TString& path, const TAlterLogTableSettings& settings = TAlterLogTableSettings());
+
+private:
+ std::shared_ptr<TImpl> Impl_;
+};
+
+}}
diff --git a/ydb/public/lib/experimental/ydb_s3_internal.cpp b/ydb/public/lib/experimental/ydb_s3_internal.cpp
index 415073e8ada..ac1c5025edb 100644
--- a/ydb/public/lib/experimental/ydb_s3_internal.cpp
+++ b/ydb/public/lib/experimental/ydb_s3_internal.cpp
@@ -1,122 +1,122 @@
-#include "ydb_s3_internal.h"
-
+#include "ydb_s3_internal.h"
+
#define INCLUDE_YDB_INTERNAL_H
#include <ydb/public/sdk/cpp/client/impl/ydb_internal/make_request/make.h>
#undef INCLUDE_YDB_INTERNAL_H
#include <ydb/public/api/grpc/draft/ydb_s3_internal_v1.grpc.pb.h>
-
+
#include <ydb/library/yql/public/issue/yql_issue.h>
#include <ydb/library/yql/public/issue/yql_issue_message.h>
-
+
#include <ydb/public/sdk/cpp/client/ydb_proto/accessor.h>
#include <ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h>
-
-namespace NYdb {
-namespace NS3Internal {
-
+
+namespace NYdb {
+namespace NS3Internal {
+
TS3ListingResult::TS3ListingResult(TResultSet&& commonPrefixes, TResultSet&& contents, ui32 keySuffixSize, TStatus&& status)
- : TStatus(std::move(status))
- , CommonPrefixes(std::move(commonPrefixes))
- , Contents(std::move(contents))
- , KeySuffixSize(keySuffixSize)
-{}
-
+ : TStatus(std::move(status))
+ , CommonPrefixes(std::move(commonPrefixes))
+ , Contents(std::move(contents))
+ , KeySuffixSize(keySuffixSize)
+{}
+
const TResultSet& TS3ListingResult::GetCommonPrefixes() const {
- return CommonPrefixes;
-}
-
+ return CommonPrefixes;
+}
+
const TResultSet& TS3ListingResult::GetContents() const {
- return Contents;
-}
-
-ui32 TS3ListingResult::GetKeySuffixSize() const {
- return KeySuffixSize;
-}
-
-void SetProtoValue(Ydb::TypedValue& out, TValue&& in) {
- *out.mutable_type() = TProtoAccessor::GetProto(in.GetType());
- *out.mutable_value() = TProtoAccessor::GetProto(in);
-}
-
-
-class TS3InternalClient::TImpl : public TClientImplCommon<TS3InternalClient::TImpl> {
-public:
- TImpl(std::shared_ptr<TGRpcConnectionsImpl>&& connections, const TCommonClientSettings& settings)
+ return Contents;
+}
+
+ui32 TS3ListingResult::GetKeySuffixSize() const {
+ return KeySuffixSize;
+}
+
+void SetProtoValue(Ydb::TypedValue& out, TValue&& in) {
+ *out.mutable_type() = TProtoAccessor::GetProto(in.GetType());
+ *out.mutable_value() = TProtoAccessor::GetProto(in);
+}
+
+
+class TS3InternalClient::TImpl : public TClientImplCommon<TS3InternalClient::TImpl> {
+public:
+ TImpl(std::shared_ptr<TGRpcConnectionsImpl>&& connections, const TCommonClientSettings& settings)
: TClientImplCommon(std::move(connections), settings) {}
-
- TAsyncS3ListingResult S3Listing(const TString& tableName,
- TValue&& keyPrefix,
- const TString& pathColumnPrefix,
- const TString& pathColumnDelimiter,
- TValue&& startAfterKeySuffix,
- ui32 maxKeys,
- const TVector<TString> columnsToReturn,
- const TS3ListingSettings& settings)
- {
- auto request = MakeOperationRequest<Ydb::S3Internal::S3ListingRequest>(settings);
- request.set_table_name(tableName);
- SetProtoValue(*request.mutable_key_prefix(), std::move(keyPrefix));
- request.set_path_column_prefix(pathColumnPrefix);
- request.set_path_column_delimiter(pathColumnDelimiter);
- SetProtoValue(*request.mutable_start_after_key_suffix(), std::move(startAfterKeySuffix));
- request.set_max_keys(maxKeys);
- for (auto& c : columnsToReturn) {
- request.add_columns_to_return(c);
- }
-
- auto promise = NThreading::NewPromise<TS3ListingResult>();
-
+
+ TAsyncS3ListingResult S3Listing(const TString& tableName,
+ TValue&& keyPrefix,
+ const TString& pathColumnPrefix,
+ const TString& pathColumnDelimiter,
+ TValue&& startAfterKeySuffix,
+ ui32 maxKeys,
+ const TVector<TString> columnsToReturn,
+ const TS3ListingSettings& settings)
+ {
+ auto request = MakeOperationRequest<Ydb::S3Internal::S3ListingRequest>(settings);
+ request.set_table_name(tableName);
+ SetProtoValue(*request.mutable_key_prefix(), std::move(keyPrefix));
+ request.set_path_column_prefix(pathColumnPrefix);
+ request.set_path_column_delimiter(pathColumnDelimiter);
+ SetProtoValue(*request.mutable_start_after_key_suffix(), std::move(startAfterKeySuffix));
+ request.set_max_keys(maxKeys);
+ for (auto& c : columnsToReturn) {
+ request.add_columns_to_return(c);
+ }
+
+ auto promise = NThreading::NewPromise<TS3ListingResult>();
+
auto extractor = [promise]
(google::protobuf::Any* any, TPlainStatus status) mutable {
- Ydb::S3Internal::S3ListingResult result;
- if (any) {
- any->UnpackTo(&result);
- }
+ Ydb::S3Internal::S3ListingResult result;
+ if (any) {
+ any->UnpackTo(&result);
+ }
TResultSet commonPrefixes(result.Getcommon_prefixes());
TResultSet contents(result.Getcontents());
-
- TS3ListingResult val(std::move(commonPrefixes), std::move(contents), result.Getkey_suffix_size(),
+
+ TS3ListingResult val(std::move(commonPrefixes), std::move(contents), result.Getkey_suffix_size(),
TStatus(std::move(status)));
- promise.SetValue(std::move(val));
- };
-
- Connections_->RunDeferred<Ydb::S3Internal::V1::S3InternalService, Ydb::S3Internal::S3ListingRequest, Ydb::S3Internal::S3ListingResponse>(
- std::move(request),
- extractor,
- &Ydb::S3Internal::V1::S3InternalService::Stub::AsyncS3Listing,
- DbDriverState_,
+ promise.SetValue(std::move(val));
+ };
+
+ Connections_->RunDeferred<Ydb::S3Internal::V1::S3InternalService, Ydb::S3Internal::S3ListingRequest, Ydb::S3Internal::S3ListingResponse>(
+ std::move(request),
+ extractor,
+ &Ydb::S3Internal::V1::S3InternalService::Stub::AsyncS3Listing,
+ DbDriverState_,
INITIAL_DEFERRED_CALL_DELAY,
TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
-
- return promise.GetFuture();
- }
-};
-
-
-TS3InternalClient::TS3InternalClient(const TDriver& driver, const TCommonClientSettings& settings)
- : Impl_(new TImpl(CreateInternalInterface(driver), settings))
-{}
-
-TAsyncS3ListingResult TS3InternalClient::S3Listing(const TString& tableName,
- TValue&& keyPrefix,
- const TString& pathColumnPrefix,
- const TString& pathColumnDelimiter,
- TValue&& startAfterKeySuffix,
- ui32 maxKeys,
- const TVector<TString>& columnsToReturn,
- const TS3ListingSettings& settings)
-{
- return Impl_->S3Listing(tableName,
- std::move(keyPrefix),
- pathColumnPrefix,
- pathColumnDelimiter,
- std::move(startAfterKeySuffix),
- maxKeys,
- columnsToReturn,
- settings);
-}
-
-}}
-
+ settings.ClientTimeout_);
+
+ return promise.GetFuture();
+ }
+};
+
+
+TS3InternalClient::TS3InternalClient(const TDriver& driver, const TCommonClientSettings& settings)
+ : Impl_(new TImpl(CreateInternalInterface(driver), settings))
+{}
+
+TAsyncS3ListingResult TS3InternalClient::S3Listing(const TString& tableName,
+ TValue&& keyPrefix,
+ const TString& pathColumnPrefix,
+ const TString& pathColumnDelimiter,
+ TValue&& startAfterKeySuffix,
+ ui32 maxKeys,
+ const TVector<TString>& columnsToReturn,
+ const TS3ListingSettings& settings)
+{
+ return Impl_->S3Listing(tableName,
+ std::move(keyPrefix),
+ pathColumnPrefix,
+ pathColumnDelimiter,
+ std::move(startAfterKeySuffix),
+ maxKeys,
+ columnsToReturn,
+ settings);
+}
+
+}}
+
diff --git a/ydb/public/lib/experimental/ydb_s3_internal.h b/ydb/public/lib/experimental/ydb_s3_internal.h
index 24ef9a98ebd..ee70f0dcddb 100644
--- a/ydb/public/lib/experimental/ydb_s3_internal.h
+++ b/ydb/public/lib/experimental/ydb_s3_internal.h
@@ -1,50 +1,50 @@
-#pragma once
-
+#pragma once
+
#include <ydb/public/sdk/cpp/client/ydb_result/result.h>
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
-
-namespace NYdb {
-namespace NS3Internal {
-
-struct TS3ListingSettings : public TOperationRequestSettings<TS3ListingSettings> {};
-
-
-class TS3ListingResult : public TStatus {
- friend class TS3InternalClient;
-private:
+
+namespace NYdb {
+namespace NS3Internal {
+
+struct TS3ListingSettings : public TOperationRequestSettings<TS3ListingSettings> {};
+
+
+class TS3ListingResult : public TStatus {
+ friend class TS3InternalClient;
+private:
TS3ListingResult(TResultSet&& commonPrefixes, TResultSet&& contents, ui32 keySuffixSize, TStatus&& status);
-
-public:
+
+public:
const TResultSet& GetCommonPrefixes() const;
const TResultSet& GetContents() const;
- ui32 GetKeySuffixSize() const;
-
-private:
+ ui32 GetKeySuffixSize() const;
+
+private:
TResultSet CommonPrefixes;
TResultSet Contents;
- const ui32 KeySuffixSize;
-};
-
-using TAsyncS3ListingResult = NThreading::TFuture<TS3ListingResult>;
-
-
-class TS3InternalClient {
- class TImpl;
-
-public:
- TS3InternalClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings());
-
- TAsyncS3ListingResult S3Listing(const TString& tableName,
- TValue&& keyPrefix,
- const TString& pathColumnPrefix,
- const TString& pathColumnDelimiter,
- TValue&& startAfterKeySuffix,
- ui32 maxKeys,
- const TVector<TString> &columnsToReturn,
- const TS3ListingSettings& settings = TS3ListingSettings());
-
-private:
- std::shared_ptr<TImpl> Impl_;
-};
-
-}}
+ const ui32 KeySuffixSize;
+};
+
+using TAsyncS3ListingResult = NThreading::TFuture<TS3ListingResult>;
+
+
+class TS3InternalClient {
+ class TImpl;
+
+public:
+ TS3InternalClient(const TDriver& driver, const TCommonClientSettings& settings = TCommonClientSettings());
+
+ TAsyncS3ListingResult S3Listing(const TString& tableName,
+ TValue&& keyPrefix,
+ const TString& pathColumnPrefix,
+ const TString& pathColumnDelimiter,
+ TValue&& startAfterKeySuffix,
+ ui32 maxKeys,
+ const TVector<TString> &columnsToReturn,
+ const TS3ListingSettings& settings = TS3ListingSettings());
+
+private:
+ std::shared_ptr<TImpl> Impl_;
+};
+
+}}
diff --git a/ydb/public/lib/idx_test/idx_test_checker.cpp b/ydb/public/lib/idx_test/idx_test_checker.cpp
index 55b3d8ee9aa..f87f5801515 100644
--- a/ydb/public/lib/idx_test/idx_test_checker.cpp
+++ b/ydb/public/lib/idx_test/idx_test_checker.cpp
@@ -42,8 +42,8 @@ private:
TVector<size_t> checkColumnsMap;
checkColumnsMap.resize(indexDesc.GetIndexColumns().size() + indexDesc.GetDataColumns().size());
- THashMap<TString, size_t> indexColumns;
-
+ THashMap<TString, size_t> indexColumns;
+
{
size_t j = 0;
for (size_t i = 0; i < indexDesc.GetIndexColumns().size(); i++, j++) {
@@ -66,17 +66,17 @@ private:
TVector<size_t> pkColumnIdx;
- size_t includedColumns = 0; // count of PK columns included in the index (before i-th PK column)
+ size_t includedColumns = 0; // count of PK columns included in the index (before i-th PK column)
for (size_t i = 0; i < TableDescription_.GetRef().GetPrimaryKeyColumns().size(); i++) {
const auto& col = TableDescription_.GetRef().GetPrimaryKeyColumns()[i];
- auto it = indexColumns.find(col);
- if (it != indexColumns.end()) {
- // PK column is included in the secondary index
+ auto it = indexColumns.find(col);
+ if (it != indexColumns.end()) {
+ // PK column is included in the secondary index
pkColumnIdx.push_back(it->second);
- ++includedColumns;
+ ++includedColumns;
} else {
settings.AppendColumns(col);
- pkColumnIdx.push_back(checkColumnsMap.size() + i - includedColumns);
+ pkColumnIdx.push_back(checkColumnsMap.size() + i - includedColumns);
}
}
diff --git a/ydb/public/lib/value/value.cpp b/ydb/public/lib/value/value.cpp
index f154a715584..0ed8b826895 100644
--- a/ydb/public/lib/value/value.cpp
+++ b/ydb/public/lib/value/value.cpp
@@ -363,8 +363,8 @@ size_t TValue::Size() const {
return Value.ListSize();
if (Type.HasTuple())
return Value.TupleSize();
- if (Type.HasStruct())
- return Value.StructSize();
+ if (Type.HasStruct())
+ return Value.StructSize();
return 0;
}
@@ -416,9 +416,9 @@ TString TValue::GetDataText() const {
case NScheme::NTypeIds::Datetime:
return ToString(Value.GetUint32());
case NScheme::NTypeIds::Timestamp:
- return ToString(Value.GetUint64());
+ return ToString(Value.GetUint64());
case NScheme::NTypeIds::Interval:
- return ToString(Value.GetInt64());
+ return ToString(Value.GetInt64());
case NScheme::NTypeIds::JsonDocument:
return "\"<JsonDocument>\"";
}
@@ -595,8 +595,8 @@ template <> TString TValue::GetValueText<TFormatJSON>(const TFormatJSON& format)
}
case NScheme::NTypeIds::Uint64:
case NScheme::NTypeIds::Int64:
- case NScheme::NTypeIds::Timestamp:
- case NScheme::NTypeIds::Interval:
+ case NScheme::NTypeIds::Timestamp:
+ case NScheme::NTypeIds::Interval:
return format.UI64AsString ? ("\"" + GetDataText() + "\"") : GetDataText();
default:
return GetDataText();
diff --git a/ydb/public/lib/ya.make b/ydb/public/lib/ya.make
index 2640101e0d7..a4f529b44bc 100644
--- a/ydb/public/lib/ya.make
+++ b/ydb/public/lib/ya.make
@@ -3,7 +3,7 @@ OWNER(g:kikimr)
RECURSE(
base
deprecated
- experimental
+ experimental
idx_test
json_value
jwt
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_service_import.cpp b/ydb/public/lib/ydb_cli/commands/ydb_service_import.cpp
index f9bea4acb10..b28e8814288 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_service_import.cpp
+++ b/ydb/public/lib/ydb_cli/commands/ydb_service_import.cpp
@@ -166,10 +166,10 @@ void TCommandImportFromCsv::Config(TConfig& config) {
config.Opts->AddLongOption("batch-bytes",
"Use portions of this size in bytes to parse and upload file data")
.DefaultValue(HumanReadableSize(defaults.BytesPerRequest_, SF_BYTES)).StoreResult(&BytesPerRequest);
-
- config.Opts->AddLongOption("max-in-flight",
- "Maximum number of in-flight requests; increase to load big files faster (more memory needed)")
- .DefaultValue(defaults.MaxInFlightRequests_).StoreResult(&MaxInFlightRequests);
+
+ config.Opts->AddLongOption("max-in-flight",
+ "Maximum number of in-flight requests; increase to load big files faster (more memory needed)")
+ .DefaultValue(defaults.MaxInFlightRequests_).StoreResult(&MaxInFlightRequests);
}
void TCommandImportFromCsv::Parse(TConfig& config) {
@@ -193,11 +193,11 @@ int TCommandImportFromCsv::Run(TConfig& config) {
settings.BytesPerRequest(bytesPerRequest);
}
- if (MaxInFlightRequests == 0) {
- MaxInFlightRequests = 1;
- }
- settings.MaxInFlightRequests(MaxInFlightRequests);
-
+ if (MaxInFlightRequests == 0) {
+ MaxInFlightRequests = 1;
+ }
+ settings.MaxInFlightRequests(MaxInFlightRequests);
+
if (Delimiter.size() != 1) {
throw TMissUseException()
<< "--delimiter should be a one symbol string. Got: '" << Delimiter << "'";
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_service_import.h b/ydb/public/lib/ydb_cli/commands/ydb_service_import.h
index 94423e8df66..04dc1010476 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_service_import.h
+++ b/ydb/public/lib/ydb_cli/commands/ydb_service_import.h
@@ -62,7 +62,7 @@ protected:
ui32 SkipRows = 0;
bool Header = false;
TString BytesPerRequest;
- ui64 MaxInFlightRequests = 1;
+ ui64 MaxInFlightRequests = 1;
};
class TCommandImportFromTsv : public TCommandImportFromCsv {
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp b/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp
index d9fc3f0784e..a6f81fe432a 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp
+++ b/ydb/public/lib/ydb_cli/commands/ydb_service_scheme.cpp
@@ -482,8 +482,8 @@ namespace {
void PrintPartitioningSettings(const NTable::TTableDescription& tableDescription) {
const auto& settings = tableDescription.GetPartitioningSettings();
const auto partBySize = settings.GetPartitioningBySize();
- const auto partByLoad = settings.GetPartitioningByLoad();
- if (!partBySize.Defined() && !partByLoad.Defined()) {
+ const auto partByLoad = settings.GetPartitioningByLoad();
+ if (!partBySize.Defined() && !partByLoad.Defined()) {
return;
}
const auto partitionSizeMb = settings.GetPartitionSizeMb();
@@ -491,8 +491,8 @@ namespace {
const auto maxPartitions = settings.GetMaxPartitionsCount();
Cout << Endl << "Auto partitioning settings: " << Endl;
Cout << "Partitioning by size: " << (partBySize.GetRef() ? "true" : "false") << Endl;
- Cout << "Partitioning by load: " << (partByLoad.GetRef() ? "true" : "false") << Endl;
- if (partBySize.Defined() && partitionSizeMb) {
+ Cout << "Partitioning by load: " << (partByLoad.GetRef() ? "true" : "false") << Endl;
+ if (partBySize.Defined() && partitionSizeMb) {
Cout << "Preferred partition size (Mb): " << partitionSizeMb << Endl;
}
if (minPartitions) {
diff --git a/ydb/public/lib/ydb_cli/import/import.cpp b/ydb/public/lib/ydb_cli/import/import.cpp
index e91d230639e..911aaf27191 100644
--- a/ydb/public/lib/ydb_cli/import/import.cpp
+++ b/ydb/public/lib/ydb_cli/import/import.cpp
@@ -16,8 +16,8 @@
#include <util/string/builder.h>
#include <util/folder/path.h>
-#include <deque>
-
+#include <deque>
+
namespace NYdb {
namespace NConsoleClient {
@@ -69,22 +69,22 @@ TStatus TImportFileClient::Import(const TString& filePath, const TString& dbPath
return UpsertCsv(dataFile, dbPath, settings);
}
-namespace {
-
-TStatus WaitForQueue(std::deque<TAsyncStatus>& inFlightRequests, size_t maxQueueSize) {
- while (!inFlightRequests.empty() && inFlightRequests.size() > maxQueueSize) {
- auto status = inFlightRequests.front().ExtractValueSync();
- inFlightRequests.pop_front();
- if (!status.IsSuccess()) {
- return status;
- }
- }
-
- return MakeStatus();
-}
-
-}
-
+namespace {
+
+TStatus WaitForQueue(std::deque<TAsyncStatus>& inFlightRequests, size_t maxQueueSize) {
+ while (!inFlightRequests.empty() && inFlightRequests.size() > maxQueueSize) {
+ auto status = inFlightRequests.front().ExtractValueSync();
+ inFlightRequests.pop_front();
+ if (!status.IsSuccess()) {
+ return status;
+ }
+ }
+
+ return MakeStatus();
+}
+
+}
+
TStatus TImportFileClient::UpsertCsv(const TString& dataFile, const TString& dbPath,
const TImportFileSettings& settings) {
TFileInput input(dataFile, settings.FileBufferSize_);
@@ -130,8 +130,8 @@ TStatus TImportFileClient::UpsertCsv(const TString& dataFile, const TString& dbP
upsertSettings.FormatSettings(formatSettings);
}
- std::deque<TAsyncStatus> inFlightRequests;
-
+ std::deque<TAsyncStatus> inFlightRequests;
+
// TODO: better read
// * read serveral lines a time
// * support endlines inside quotes
@@ -141,36 +141,36 @@ TStatus TImportFileClient::UpsertCsv(const TString& dataFile, const TString& dbP
buffer += '\n'; // TODO: keep original endline?
if (buffer.Size() >= settings.BytesPerRequest_) {
- auto status = WaitForQueue(inFlightRequests, settings.MaxInFlightRequests_);
+ auto status = WaitForQueue(inFlightRequests, settings.MaxInFlightRequests_);
if (!status.IsSuccess()) {
return status;
}
- inFlightRequests.push_back(UpsertCsvBuffer(dbPath, buffer, {}, upsertSettings, retrySettings));
-
+ inFlightRequests.push_back(UpsertCsvBuffer(dbPath, buffer, {}, upsertSettings, retrySettings));
+
buffer = headerRow;
}
}
if (!buffer.Empty()) {
- inFlightRequests.push_back(UpsertCsvBuffer(dbPath, buffer, {}, upsertSettings, retrySettings));
+ inFlightRequests.push_back(UpsertCsvBuffer(dbPath, buffer, {}, upsertSettings, retrySettings));
}
- return WaitForQueue(inFlightRequests, 0);
+ return WaitForQueue(inFlightRequests, 0);
}
-TAsyncStatus TImportFileClient::UpsertCsvBuffer(const TString& dbPath, const TString& csv, const TString& header,
+TAsyncStatus TImportFileClient::UpsertCsvBuffer(const TString& dbPath, const TString& csv, const TString& header,
const NTable::TBulkUpsertSettings& upsertSettings,
const NTable::TRetryOperationSettings& retrySettings) {
- auto upsert = [dbPath, csv, header, upsertSettings](NYdb::NTable::TTableClient& tableClient) -> TAsyncStatus {
- return tableClient.BulkUpsert(dbPath, NTable::EDataFormat::CSV, csv, header, upsertSettings)
- .Apply([](const NYdb::NTable::TAsyncBulkUpsertResult& bulkUpsertResult) {
- NYdb::TStatus status = bulkUpsertResult.GetValueSync();
- return NThreading::MakeFuture(status);
- });
+ auto upsert = [dbPath, csv, header, upsertSettings](NYdb::NTable::TTableClient& tableClient) -> TAsyncStatus {
+ return tableClient.BulkUpsert(dbPath, NTable::EDataFormat::CSV, csv, header, upsertSettings)
+ .Apply([](const NYdb::NTable::TAsyncBulkUpsertResult& bulkUpsertResult) {
+ NYdb::TStatus status = bulkUpsertResult.GetValueSync();
+ return NThreading::MakeFuture(status);
+ });
};
- return TableClient->RetryOperation(upsert, retrySettings);
+ return TableClient->RetryOperation(upsert, retrySettings);
}
}
diff --git a/ydb/public/lib/ydb_cli/import/import.h b/ydb/public/lib/ydb_cli/import/import.h
index b99cdf4b09b..66269e8e6c1 100644
--- a/ydb/public/lib/ydb_cli/import/import.h
+++ b/ydb/public/lib/ydb_cli/import/import.h
@@ -34,7 +34,7 @@ struct TImportFileSettings : public TOperationRequestSettings<TImportFileSetting
FLUENT_SETTING_DEFAULT(NTable::EDataFormat, Format, NTable::EDataFormat::CSV);
FLUENT_SETTING_DEFAULT(ui64, BytesPerRequest, 1_MB);
FLUENT_SETTING_DEFAULT(ui64, FileBufferSize, 2_MB);
- FLUENT_SETTING_DEFAULT(ui64, MaxInFlightRequests, 100);
+ FLUENT_SETTING_DEFAULT(ui64, MaxInFlightRequests, 100);
FLUENT_SETTING_DEFAULT(ui32, SkipRows, 0);
FLUENT_SETTING_DEFAULT(bool, Header, false);
FLUENT_SETTING_DEFAULT(TString, Delimiter, DefaultDelimiter);
@@ -53,7 +53,7 @@ private:
std::shared_ptr<NTable::TTableClient> TableClient;
TStatus UpsertCsv(const TString& dataFile, const TString& dbPath, const TImportFileSettings& settings);
- TAsyncStatus UpsertCsvBuffer(const TString& dbPath, const TString& csv, const TString& header,
+ TAsyncStatus UpsertCsvBuffer(const TString& dbPath, const TString& csv, const TString& header,
const NTable::TBulkUpsertSettings& upsertSettings,
const NTable::TRetryOperationSettings& retrySettings);
};
diff --git a/ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h b/ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h
index 43d2fe6fcdc..c43e9aab694 100644
--- a/ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h
+++ b/ydb/public/sdk/cpp/client/ydb_common_client/impl/client.h
@@ -52,7 +52,7 @@ protected:
template<typename TService, typename TRequest, typename TResponse>
NThreading::TFuture<TStatus> RunSimple(
- TRequest&& request,
+ TRequest&& request,
TAsyncRequest<TService, TRequest, TResponse> rpc,
const TRpcRequestSettings& requestSettings = {},
TDuration timeout = TDuration::Zero(),
@@ -67,7 +67,7 @@ protected:
};
Connections_->RunDeferred<TService, TRequest, TResponse>(
- std::move(request),
+ std::move(request),
extractor,
rpc,
DbDriverState_,
diff --git a/ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp b/ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp
index 743d8cf295f..7a99c95214b 100644
--- a/ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp
+++ b/ydb/public/sdk/cpp/client/ydb_coordination/coordination.cpp
@@ -1811,46 +1811,46 @@ public:
}
TAsyncStatus CreateNode(
- Ydb::Coordination::CreateNodeRequest&& request,
+ Ydb::Coordination::CreateNodeRequest&& request,
const TCreateNodeSettings& settings)
{
return RunSimple<Ydb::Coordination::V1::CoordinationService,
Ydb::Coordination::CreateNodeRequest,
Ydb::Coordination::CreateNodeResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Coordination::V1::CoordinationService::Stub::AsyncCreateNode,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
}
TAsyncStatus AlterNode(
- Ydb::Coordination::AlterNodeRequest&& request,
+ Ydb::Coordination::AlterNodeRequest&& request,
const TAlterNodeSettings& settings)
{
return RunSimple<Ydb::Coordination::V1::CoordinationService,
Ydb::Coordination::AlterNodeRequest,
Ydb::Coordination::AlterNodeResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Coordination::V1::CoordinationService::Stub::AsyncAlterNode,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
}
TAsyncStatus DropNode(
- Ydb::Coordination::DropNodeRequest&& request,
+ Ydb::Coordination::DropNodeRequest&& request,
const TDropNodeSettings& settings)
{
return RunSimple<Ydb::Coordination::V1::CoordinationService,
Ydb::Coordination::DropNodeRequest,
Ydb::Coordination::DropNodeResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Coordination::V1::CoordinationService::Stub::AsyncDropNode,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
}
TAsyncDescribeNodeResult DescribeNode(
- Ydb::Coordination::DescribeNodeRequest&& request,
+ Ydb::Coordination::DescribeNodeRequest&& request,
const TDescribeNodeSettings& settings)
{
auto promise = NewPromise<TDescribeNodeResult>();
@@ -1870,7 +1870,7 @@ public:
Connections_->RunDeferred<Ydb::Coordination::V1::CoordinationService,
Ydb::Coordination::DescribeNodeRequest,
Ydb::Coordination::DescribeNodeResponse>(
- std::move(request),
+ std::move(request),
std::move(extractor),
&Ydb::Coordination::V1::CoordinationService::Stub::AsyncDescribeNode,
DbDriverState_,
diff --git a/ydb/public/sdk/cpp/client/ydb_proto/accessor.h b/ydb/public/sdk/cpp/client/ydb_proto/accessor.h
index 49c476c236d..be0165d74bb 100644
--- a/ydb/public/sdk/cpp/client/ydb_proto/accessor.h
+++ b/ydb/public/sdk/cpp/client/ydb_proto/accessor.h
@@ -32,7 +32,7 @@ public:
static const Ydb::Type& GetProto(const TType& type);
static const Ydb::Value& GetProto(const TValue& value);
static const Ydb::ResultSet& GetProto(const TResultSet& resultSet);
- static const Ydb::TableStats::QueryStats& GetProto(const NTable::TQueryStats& queryStats);
+ static const Ydb::TableStats::QueryStats& GetProto(const NTable::TQueryStats& queryStats);
static const Ydb::Table::DescribeTableResult& GetProto(const NTable::TTableDescription& tableDescription);
static const Ydb::PersQueue::V1::DescribeTopicResult& GetProto(const NYdb::NPersQueue::TDescribeTopicResult& topicDescription);
diff --git a/ydb/public/sdk/cpp/client/ydb_scheme/scheme.cpp b/ydb/public/sdk/cpp/client/ydb_scheme/scheme.cpp
index a7ba6b50836..31f5535b968 100644
--- a/ydb/public/sdk/cpp/client/ydb_scheme/scheme.cpp
+++ b/ydb/public/sdk/cpp/client/ydb_scheme/scheme.cpp
@@ -50,7 +50,7 @@ public:
request.set_path(path);
return RunSimple<Ydb::Scheme::V1::SchemeService, MakeDirectoryRequest, MakeDirectoryResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Scheme::V1::SchemeService::Stub::AsyncMakeDirectory,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
@@ -61,7 +61,7 @@ public:
request.set_path(path);
return RunSimple<Ydb::Scheme::V1::SchemeService, RemoveDirectoryRequest, RemoveDirectoryResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Scheme::V1::SchemeService::Stub::AsyncRemoveDirectory,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
@@ -93,7 +93,7 @@ public:
};
Connections_->RunDeferred<Ydb::Scheme::V1::SchemeService, DescribePathRequest, DescribePathResponse>(
- std::move(request),
+ std::move(request),
extractor,
&Ydb::Scheme::V1::SchemeService::Stub::AsyncDescribePath,
DbDriverState_,
@@ -136,7 +136,7 @@ public:
};
Connections_->RunDeferred<Ydb::Scheme::V1::SchemeService, ListDirectoryRequest, ListDirectoryResponse>(
- std::move(request),
+ std::move(request),
extractor,
&Ydb::Scheme::V1::SchemeService::Stub::AsyncListDirectory,
DbDriverState_,
@@ -185,7 +185,7 @@ public:
}
return RunSimple<Ydb::Scheme::V1::SchemeService, ModifyPermissionsRequest, ModifyPermissionsResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Scheme::V1::SchemeService::Stub::AsyncModifyPermissions,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
diff --git a/ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.cpp b/ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.cpp
index 9866470d35d..106b848c83e 100644
--- a/ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.cpp
+++ b/ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.cpp
@@ -1,24 +1,24 @@
#include "stats.h"
-
+
#include <ydb/public/api/protos/ydb_table.pb.h>
-
+
#include <util/datetime/base.h>
#include <google/protobuf/text_format.h>
-namespace NYdb {
-namespace NTable {
-
-class TQueryStats::TImpl {
-public:
- Ydb::TableStats::QueryStats Proto;
-};
-
-TQueryStats::TQueryStats(const Ydb::TableStats::QueryStats& proto) {
- Impl_ = std::make_shared<TImpl>();
- Impl_->Proto = proto;
-}
-
+namespace NYdb {
+namespace NTable {
+
+class TQueryStats::TImpl {
+public:
+ Ydb::TableStats::QueryStats Proto;
+};
+
+TQueryStats::TQueryStats(const Ydb::TableStats::QueryStats& proto) {
+ Impl_ = std::make_shared<TImpl>();
+ Impl_->Proto = proto;
+}
+
TString TQueryStats::ToString(bool withPlan) const {
auto proto = Impl_->Proto;
@@ -27,11 +27,11 @@ TString TQueryStats::ToString(bool withPlan) const {
proto.clear_query_ast();
}
- TString res;
+ TString res;
::google::protobuf::TextFormat::PrintToString(proto, &res);
- return res;
-}
-
+ return res;
+}
+
TMaybe<TString> TQueryStats::GetPlan() const {
auto proto = Impl_->Proto;
@@ -50,9 +50,9 @@ TDuration TQueryStats::GetTotalCpuTime() const {
return TDuration::MicroSeconds(Impl_->Proto.total_cpu_time_us());
}
-const Ydb::TableStats::QueryStats& TQueryStats::GetProto() const {
- return Impl_->Proto;
-}
-
-} // namespace NTable
-} // namespace NYdb
+const Ydb::TableStats::QueryStats& TQueryStats::GetProto() const {
+ return Impl_->Proto;
+}
+
+} // namespace NTable
+} // namespace NYdb
diff --git a/ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.h b/ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.h
index 2737161ab82..355c271dc24 100644
--- a/ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.h
+++ b/ydb/public/sdk/cpp/client/ydb_table/query_stats/stats.h
@@ -1,26 +1,26 @@
-#pragma once
-
+#pragma once
+
#include <util/generic/maybe.h>
#include <util/generic/string.h>
-
+
#include <memory>
class TDuration;
-namespace Ydb {
+namespace Ydb {
namespace TableStats {
class QueryStats;
}
-
+
namespace Table {
class QueryStatsCollection;
}
-}
-
-namespace NYdb {
-
-class TProtoAccessor;
-
+}
+
+namespace NYdb {
+
+class TProtoAccessor;
+
namespace NScripting {
class TScriptingClient;
@@ -28,35 +28,35 @@ class TYqlResultPartIterator;
} // namespace NScripting
-namespace NTable {
-
+namespace NTable {
+
enum class ECollectQueryStatsMode {
None = 0, // Stats collection is disabled
Basic = 1, // Aggregated stats of reads, updates and deletes per table
Full = 2 // Add per-stage execution profile and query plan on top of Basic mode
};
-class TQueryStats {
- friend class TTableClient;
- friend class NYdb::TProtoAccessor;
+class TQueryStats {
+ friend class TTableClient;
+ friend class NYdb::TProtoAccessor;
friend class NYdb::NScripting::TScriptingClient;
friend class NYdb::NScripting::TYqlResultPartIterator;
friend class TScanQueryPartIterator;
-
-public:
+
+public:
TString ToString(bool withPlan = false) const;
TMaybe<TString> GetPlan() const;
TDuration GetTotalDuration() const;
TDuration GetTotalCpuTime() const;
-
-private:
- explicit TQueryStats(const Ydb::TableStats::QueryStats& proto);
- const Ydb::TableStats::QueryStats& GetProto() const;
-
-private:
- class TImpl;
- std::shared_ptr<TImpl> Impl_;
-};
-
-} // namespace NTable
-} // namespace NYdb
+
+private:
+ explicit TQueryStats(const Ydb::TableStats::QueryStats& proto);
+ const Ydb::TableStats::QueryStats& GetProto() const;
+
+private:
+ class TImpl;
+ std::shared_ptr<TImpl> Impl_;
+};
+
+} // namespace NTable
+} // namespace NYdb
diff --git a/ydb/public/sdk/cpp/client/ydb_table/table.cpp b/ydb/public/sdk/cpp/client/ydb_table/table.cpp
index ed9abeff86d..610fec38847 100644
--- a/ydb/public/sdk/cpp/client/ydb_table/table.cpp
+++ b/ydb/public/sdk/cpp/client/ydb_table/table.cpp
@@ -208,17 +208,17 @@ TMaybe<bool> TPartitioningSettings::GetPartitioningBySize() const {
}
}
-TMaybe<bool> TPartitioningSettings::GetPartitioningByLoad() const {
- switch (GetProto().partitioning_by_load()) {
- case Ydb::FeatureFlag::ENABLED:
- return true;
- case Ydb::FeatureFlag::DISABLED:
- return false;
- default:
- return { };
- }
-}
-
+TMaybe<bool> TPartitioningSettings::GetPartitioningByLoad() const {
+ switch (GetProto().partitioning_by_load()) {
+ case Ydb::FeatureFlag::ENABLED:
+ return true;
+ case Ydb::FeatureFlag::DISABLED:
+ return false;
+ default:
+ return { };
+ }
+}
+
ui64 TPartitioningSettings::GetPartitionSizeMb() const {
return GetProto().partition_size_mb();
}
@@ -931,12 +931,12 @@ TPartitioningSettingsBuilder& TPartitioningSettingsBuilder::SetPartitioningBySiz
return *this;
}
-TPartitioningSettingsBuilder& TPartitioningSettingsBuilder::SetPartitioningByLoad(bool enabled) {
- Impl_->Proto.set_partitioning_by_load(
- enabled ? Ydb::FeatureFlag::ENABLED : Ydb::FeatureFlag::DISABLED);
- return *this;
-}
-
+TPartitioningSettingsBuilder& TPartitioningSettingsBuilder::SetPartitioningByLoad(bool enabled) {
+ Impl_->Proto.set_partitioning_by_load(
+ enabled ? Ydb::FeatureFlag::ENABLED : Ydb::FeatureFlag::DISABLED);
+ return *this;
+}
+
TPartitioningSettingsBuilder& TPartitioningSettingsBuilder::SetPartitionSizeMb(ui64 sizeMb) {
Impl_->Proto.set_partition_size_mb(sizeMb);
return *this;
@@ -1967,7 +1967,7 @@ public:
};
Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::CreateSessionRequest, Ydb::Table::CreateSessionResponse>(
- std::move(request),
+ std::move(request),
createSessionExtractor,
&Ydb::Table::V1::TableService::Stub::AsyncCreateSession,
DbDriverState_,
@@ -2011,7 +2011,7 @@ public:
};
Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::KeepAliveRequest, Ydb::Table::KeepAliveResponse>(
- std::move(request),
+ std::move(request),
keepAliveExtractor,
&Ydb::Table::V1::TableService::Stub::AsyncKeepAlive,
DbDriverState_,
@@ -2026,7 +2026,7 @@ public:
TFuture<TStatus> CreateTable(Ydb::Table::CreateTableRequest&& request, const TCreateTableSettings& settings)
{
return RunSimple<Ydb::Table::V1::TableService, Ydb::Table::CreateTableRequest,Ydb::Table::CreateTableResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Table::V1::TableService::Stub::AsyncCreateTable,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
@@ -2035,7 +2035,7 @@ public:
TFuture<TStatus> AlterTable(Ydb::Table::AlterTableRequest&& request, const TAlterTableSettings& settings)
{
return RunSimple<Ydb::Table::V1::TableService, Ydb::Table::AlterTableRequest, Ydb::Table::AlterTableResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Table::V1::TableService::Stub::AsyncAlterTable,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
@@ -2062,7 +2062,7 @@ public:
request.set_destination_path(dst);
return RunSimple<Ydb::Table::V1::TableService, Ydb::Table::CopyTableRequest, Ydb::Table::CopyTableResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Table::V1::TableService::Stub::AsyncCopyTable,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
@@ -2092,7 +2092,7 @@ public:
request.set_path(path);
return RunSimple<Ydb::Table::V1::TableService, Ydb::Table::DropTableRequest, Ydb::Table::DropTableResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Table::V1::TableService::Stub::AsyncDropTable,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
@@ -2128,7 +2128,7 @@ public:
};
Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::DescribeTableRequest, Ydb::Table::DescribeTableResponse>(
- std::move(request),
+ std::move(request),
extractor,
&Ydb::Table::V1::TableService::Stub::AsyncDescribeTable,
DbDriverState_,
@@ -2210,7 +2210,7 @@ public:
CollectQuerySize(query, QuerySizeHistogram);
Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::PrepareDataQueryRequest, Ydb::Table::PrepareDataQueryResponse>(
- std::move(request),
+ std::move(request),
extractor,
&Ydb::Table::V1::TableService::Stub::AsyncPrepareDataQuery,
DbDriverState_,
@@ -2230,7 +2230,7 @@ public:
request.set_yql_text(query);
return RunSimple<Ydb::Table::V1::TableService, Ydb::Table::ExecuteSchemeQueryRequest, Ydb::Table::ExecuteSchemeQueryResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Table::V1::TableService::Stub::AsyncExecuteSchemeQuery,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_);
@@ -2260,7 +2260,7 @@ public:
};
Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::BeginTransactionRequest, Ydb::Table::BeginTransactionResponse>(
- std::move(request),
+ std::move(request),
extractor,
&Ydb::Table::V1::TableService::Stub::AsyncBeginTransaction,
DbDriverState_,
@@ -2299,7 +2299,7 @@ public:
};
Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::CommitTransactionRequest, Ydb::Table::CommitTransactionResponse>(
- std::move(request),
+ std::move(request),
extractor,
&Ydb::Table::V1::TableService::Stub::AsyncCommitTransaction,
DbDriverState_,
@@ -2319,7 +2319,7 @@ public:
request.set_tx_id(tx.GetId());
return RunSimple<Ydb::Table::V1::TableService, Ydb::Table::RollbackTransactionRequest, Ydb::Table::RollbackTransactionResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Table::V1::TableService::Stub::AsyncRollbackTransaction,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_,
@@ -2351,7 +2351,7 @@ public:
};
Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::ExplainDataQueryRequest, Ydb::Table::ExplainDataQueryResponse>(
- std::move(request),
+ std::move(request),
extractor,
&Ydb::Table::V1::TableService::Stub::AsyncExplainDataQuery,
DbDriverState_,
@@ -2411,7 +2411,7 @@ public:
auto promise = NewPromise<std::pair<TPlainStatus, TReadTableStreamProcessorPtr>>();
Connections_->StartReadStream<Ydb::Table::V1::TableService, Ydb::Table::ReadTableRequest, Ydb::Table::ReadTableResponse>(
- std::move(request),
+ std::move(request),
[promise] (TPlainStatus status, TReadTableStreamProcessorPtr processor) mutable {
promise.SetValue(std::make_pair(status, processor));
},
@@ -2427,7 +2427,7 @@ public:
auto request = MakeOperationRequest<Ydb::Table::DeleteSessionRequest>(settings);
request.set_session_id(sessionImpl->GetId());
return RunSimple<Ydb::Table::V1::TableService, Ydb::Table::DeleteSessionRequest, Ydb::Table::DeleteSessionResponse>(
- std::move(request),
+ std::move(request),
&Ydb::Table::V1::TableService::Stub::AsyncDeleteSession,
TRpcRequestSettings::Make(settings),
settings.ClientTimeout_,
@@ -2522,35 +2522,35 @@ public:
RequestMigrated.Set(collector.RequestMigrated);
}
- TAsyncBulkUpsertResult BulkUpsert(const TString& table, TValue&& rows, const TBulkUpsertSettings& settings) {
- auto request = MakeOperationRequest<Ydb::Table::BulkUpsertRequest>(settings);
- request.set_table(table);
- *request.mutable_rows()->mutable_type() = TProtoAccessor::GetProto(rows.GetType());
-
- // TODO: move protobuf instead of copying it!!!!111
- *request.mutable_rows()->mutable_value() = TProtoAccessor::GetProto(rows);
-
- auto promise = NewPromise<TBulkUpsertResult>();
-
+ TAsyncBulkUpsertResult BulkUpsert(const TString& table, TValue&& rows, const TBulkUpsertSettings& settings) {
+ auto request = MakeOperationRequest<Ydb::Table::BulkUpsertRequest>(settings);
+ request.set_table(table);
+ *request.mutable_rows()->mutable_type() = TProtoAccessor::GetProto(rows.GetType());
+
+ // TODO: move protobuf instead of copying it!!!!111
+ *request.mutable_rows()->mutable_value() = TProtoAccessor::GetProto(rows);
+
+ auto promise = NewPromise<TBulkUpsertResult>();
+
auto extractor = [promise]
(google::protobuf::Any* any, TPlainStatus status) mutable {
- Y_UNUSED(any);
+ Y_UNUSED(any);
TBulkUpsertResult val(TStatus(std::move(status)));
- promise.SetValue(std::move(val));
- };
-
- Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::BulkUpsertRequest, Ydb::Table::BulkUpsertResponse>(
- std::move(request),
- extractor,
- &Ydb::Table::V1::TableService::Stub::AsyncBulkUpsert,
- DbDriverState_,
+ promise.SetValue(std::move(val));
+ };
+
+ Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::BulkUpsertRequest, Ydb::Table::BulkUpsertResponse>(
+ std::move(request),
+ extractor,
+ &Ydb::Table::V1::TableService::Stub::AsyncBulkUpsert,
+ DbDriverState_,
INITIAL_DEFERRED_CALL_DELAY,
TRpcRequestSettings::Make(settings),
- settings.ClientTimeout_);
-
- return promise.GetFuture();
- }
-
+ settings.ClientTimeout_);
+
+ return promise.GetFuture();
+ }
+
TAsyncBulkUpsertResult BulkUpsert(const TString& table, EDataFormat format,
const TString& data, const TString& schema, const TBulkUpsertSettings& settings)
{
@@ -2727,7 +2727,7 @@ private:
}
request.set_collect_stats(GetStatsCollectionMode(settings.CollectQueryStats_));
-
+
SetQuery(query, request.mutable_query());
CollectQuerySize(query, QuerySizeHistogram);
@@ -2753,7 +2753,7 @@ private:
TVector<TResultSet> res;
TMaybe<TTransaction> tx;
TMaybe<TDataQuery> dataQuery;
- TMaybe<TQueryStats> queryStats;
+ TMaybe<TQueryStats> queryStats;
auto queryText = GetQueryText(query);
if (any) {
@@ -2774,10 +2774,10 @@ private:
dataQuery = TDataQuery(*sessionPtr, *queryText, query_meta.id(), query_meta.parameters_types());
}
}
-
- if (result.has_query_stats()) {
- queryStats = TQueryStats(result.query_stats());
- }
+
+ if (result.has_query_stats()) {
+ queryStats = TQueryStats(result.query_stats());
+ }
}
if (keepInCache && dataQuery && queryText) {
@@ -2785,7 +2785,7 @@ private:
}
TDataQueryResult dataQueryResult(TStatus(std::move(status)),
- std::move(res), tx, dataQuery, fromCache, queryStats);
+ std::move(res), tx, dataQuery, fromCache, queryStats);
delete sessionPtr;
tx.Clear();
@@ -2794,7 +2794,7 @@ private:
};
Connections_->RunDeferred<Ydb::Table::V1::TableService, Ydb::Table::ExecuteDataQueryRequest, Ydb::Table::ExecuteDataQueryResponse>(
- std::move(request),
+ std::move(request),
extractor,
&Ydb::Table::V1::TableService::Stub::AsyncExecuteDataQuery,
DbDriverState_,
@@ -3240,124 +3240,124 @@ static void Backoff(const TBackoffSettings& settings, ui32 retryNumber) {
Sleep(TDuration::MilliSeconds(durationMs));
}
-class TRetryOperationContext : public TThrRefBase, TNonCopyable {
-public:
- using TRetryContextPtr = TIntrusivePtr<TRetryOperationContext>;
-
-protected:
- TRetryOperationSettings Settings;
- TTableClient TableClient;
- NThreading::TPromise<TStatus> Promise;
- ui32 RetryNumber;
-
-public:
- virtual void Execute() = 0;
-
- TAsyncStatus GetFuture() {
- return Promise.GetFuture();
- }
-
-protected:
- TRetryOperationContext(const TRetryOperationSettings& settings,
- const TTableClient& tableClient)
- : Settings(settings)
- , TableClient(tableClient)
- , Promise(NThreading::NewPromise<TStatus>())
- , RetryNumber(0)
- {}
-
- static void RunOp(TRetryContextPtr self) {
- self->Execute();
- }
-
- virtual void Reset() {}
-
- static void DoRetry(TRetryContextPtr self, bool fast) {
- self->TableClient.Impl_->AsyncBackoff(
- fast ? self->Settings.FastBackoffSettings_ : self->Settings.SlowBackoffSettings_,
- self->RetryNumber,
- [self]() {
- RunOp(self);
- }
- );
- }
-
- static void HandleStatus(TRetryContextPtr self, const TStatus& status) {
- if (status.IsSuccess()) {
- return self->Promise.SetValue(status);
- }
-
- if (self->RetryNumber >= self->Settings.MaxRetries_) {
- return self->Promise.SetValue(status);
- }
-
- self->RetryNumber++;
- self->TableClient.Impl_->RetryOperationStatCollector.IncAsyncRetryOperation(status.GetStatus());
-
- switch (status.GetStatus()) {
- case EStatus::ABORTED:
- return RunOp(self);
-
- case EStatus::OVERLOADED:
- case EStatus::CLIENT_RESOURCE_EXHAUSTED:
- return DoRetry(self, false);
-
- case EStatus::UNAVAILABLE:
- return DoRetry(self, true);
-
- case EStatus::BAD_SESSION:
- case EStatus::SESSION_BUSY:
- self->Reset();
- return RunOp(self);
-
- case EStatus::NOT_FOUND:
- return self->Settings.RetryNotFound_
- ? RunOp(self)
- : self->Promise.SetValue(status);
-
- case EStatus::UNDETERMINED:
- return self->Settings.Idempotent_
- ? DoRetry(self, true)
- : self->Promise.SetValue(status);
-
- case EStatus::TRANSPORT_UNAVAILABLE:
- if (self->Settings.Idempotent_) {
- self->Reset();
- return DoRetry(self, true);
- } else {
- return self->Promise.SetValue(status);
- }
-
- default:
- return self->Promise.SetValue(status);
+class TRetryOperationContext : public TThrRefBase, TNonCopyable {
+public:
+ using TRetryContextPtr = TIntrusivePtr<TRetryOperationContext>;
+
+protected:
+ TRetryOperationSettings Settings;
+ TTableClient TableClient;
+ NThreading::TPromise<TStatus> Promise;
+ ui32 RetryNumber;
+
+public:
+ virtual void Execute() = 0;
+
+ TAsyncStatus GetFuture() {
+ return Promise.GetFuture();
+ }
+
+protected:
+ TRetryOperationContext(const TRetryOperationSettings& settings,
+ const TTableClient& tableClient)
+ : Settings(settings)
+ , TableClient(tableClient)
+ , Promise(NThreading::NewPromise<TStatus>())
+ , RetryNumber(0)
+ {}
+
+ static void RunOp(TRetryContextPtr self) {
+ self->Execute();
+ }
+
+ virtual void Reset() {}
+
+ static void DoRetry(TRetryContextPtr self, bool fast) {
+ self->TableClient.Impl_->AsyncBackoff(
+ fast ? self->Settings.FastBackoffSettings_ : self->Settings.SlowBackoffSettings_,
+ self->RetryNumber,
+ [self]() {
+ RunOp(self);
+ }
+ );
+ }
+
+ static void HandleStatus(TRetryContextPtr self, const TStatus& status) {
+ if (status.IsSuccess()) {
+ return self->Promise.SetValue(status);
+ }
+
+ if (self->RetryNumber >= self->Settings.MaxRetries_) {
+ return self->Promise.SetValue(status);
}
- }
+
+ self->RetryNumber++;
+ self->TableClient.Impl_->RetryOperationStatCollector.IncAsyncRetryOperation(status.GetStatus());
+
+ switch (status.GetStatus()) {
+ case EStatus::ABORTED:
+ return RunOp(self);
+
+ case EStatus::OVERLOADED:
+ case EStatus::CLIENT_RESOURCE_EXHAUSTED:
+ return DoRetry(self, false);
+
+ case EStatus::UNAVAILABLE:
+ return DoRetry(self, true);
+
+ case EStatus::BAD_SESSION:
+ case EStatus::SESSION_BUSY:
+ self->Reset();
+ return RunOp(self);
+
+ case EStatus::NOT_FOUND:
+ return self->Settings.RetryNotFound_
+ ? RunOp(self)
+ : self->Promise.SetValue(status);
+
+ case EStatus::UNDETERMINED:
+ return self->Settings.Idempotent_
+ ? DoRetry(self, true)
+ : self->Promise.SetValue(status);
+
+ case EStatus::TRANSPORT_UNAVAILABLE:
+ if (self->Settings.Idempotent_) {
+ self->Reset();
+ return DoRetry(self, true);
+ } else {
+ return self->Promise.SetValue(status);
+ }
+
+ default:
+ return self->Promise.SetValue(status);
+ }
+ }
static void HandleException(TRetryContextPtr self, std::exception_ptr e) {
self->Promise.SetException(e);
}
-};
-
-class TRetryOperationWithSession : public TRetryOperationContext {
- using TFunc = TTableClient::TOperationFunc;
-
- TFunc Operation;
- TMaybe<TSession> Session;
-
-public:
- explicit TRetryOperationWithSession(TFunc&& operation,
- const TRetryOperationSettings& settings,
- const TTableClient& tableClient)
- : TRetryOperationContext(settings, tableClient)
- , Operation(operation)
- {}
-
- void Execute() override {
- TRetryContextPtr self(this);
- if (!Session) {
- TableClient.GetSession(
- TCreateSessionSettings().ClientTimeout(Settings.GetSessionClientTimeout_)).Subscribe(
- [self](const TAsyncCreateSessionResult& resultFuture) {
+};
+
+class TRetryOperationWithSession : public TRetryOperationContext {
+ using TFunc = TTableClient::TOperationFunc;
+
+ TFunc Operation;
+ TMaybe<TSession> Session;
+
+public:
+ explicit TRetryOperationWithSession(TFunc&& operation,
+ const TRetryOperationSettings& settings,
+ const TTableClient& tableClient)
+ : TRetryOperationContext(settings, tableClient)
+ , Operation(operation)
+ {}
+
+ void Execute() override {
+ TRetryContextPtr self(this);
+ if (!Session) {
+ TableClient.GetSession(
+ TCreateSessionSettings().ClientTimeout(Settings.GetSessionClientTimeout_)).Subscribe(
+ [self](const TAsyncCreateSessionResult& resultFuture) {
try {
auto& result = resultFuture.GetValue();
if (!result.IsSuccess()) {
@@ -3369,75 +3369,75 @@ public:
myself->DoRunOp(self);
} catch (...) {
return HandleException(self, std::current_exception());
- }
- });
- } else {
- DoRunOp(self);
- }
- }
-
-private:
- void Reset() override {
- Session.Clear();
- }
-
- void DoRunOp(TRetryContextPtr self) {
- Operation(Session.GetRef()).Subscribe([self](const TAsyncStatus& result) {
+ }
+ });
+ } else {
+ DoRunOp(self);
+ }
+ }
+
+private:
+ void Reset() override {
+ Session.Clear();
+ }
+
+ void DoRunOp(TRetryContextPtr self) {
+ Operation(Session.GetRef()).Subscribe([self](const TAsyncStatus& result) {
try {
return HandleStatus(self, result.GetValue());
} catch (...) {
return HandleException(self, std::current_exception());
}
- });
- }
-};
-
-TAsyncStatus TTableClient::RetryOperation(TOperationFunc&& operation, const TRetryOperationSettings& settings) {
- TRetryOperationContext::TRetryContextPtr ctx(new TRetryOperationWithSession(std::move(operation), settings, *this));
- ctx->Execute();
- return ctx->GetFuture();
-}
-
-class TRetryOperationWithoutSession : public TRetryOperationContext {
- using TFunc = TTableClient::TOperationWithoutSessionFunc;
-
- TFunc Operation;
-
-public:
- explicit TRetryOperationWithoutSession(TFunc&& operation,
- const TRetryOperationSettings& settings,
- const TTableClient& tableClient)
- : TRetryOperationContext(settings, tableClient)
- , Operation(operation)
- {}
-
- void Execute() override {
- TRetryContextPtr self(this);
- Operation(TableClient).Subscribe([self](const TAsyncStatus& result) {
+ });
+ }
+};
+
+TAsyncStatus TTableClient::RetryOperation(TOperationFunc&& operation, const TRetryOperationSettings& settings) {
+ TRetryOperationContext::TRetryContextPtr ctx(new TRetryOperationWithSession(std::move(operation), settings, *this));
+ ctx->Execute();
+ return ctx->GetFuture();
+}
+
+class TRetryOperationWithoutSession : public TRetryOperationContext {
+ using TFunc = TTableClient::TOperationWithoutSessionFunc;
+
+ TFunc Operation;
+
+public:
+ explicit TRetryOperationWithoutSession(TFunc&& operation,
+ const TRetryOperationSettings& settings,
+ const TTableClient& tableClient)
+ : TRetryOperationContext(settings, tableClient)
+ , Operation(operation)
+ {}
+
+ void Execute() override {
+ TRetryContextPtr self(this);
+ Operation(TableClient).Subscribe([self](const TAsyncStatus& result) {
try {
return HandleStatus(self, result.GetValue());
} catch (...) {
return HandleException(self, std::current_exception());
}
- });
- }
-};
-
-TAsyncStatus TTableClient::RetryOperation(TOperationWithoutSessionFunc&& operation, const TRetryOperationSettings& settings) {
- TRetryOperationContext::TRetryContextPtr ctx(new TRetryOperationWithoutSession(std::move(operation), settings, *this));
- ctx->Execute();
- return ctx->GetFuture();
-}
-
-TStatus TTableClient::RetryOperationSyncHelper(const TOperationWrapperSyncFunc& operationWrapper, const TRetryOperationSettings& settings) {
+ });
+ }
+};
+
+TAsyncStatus TTableClient::RetryOperation(TOperationWithoutSessionFunc&& operation, const TRetryOperationSettings& settings) {
+ TRetryOperationContext::TRetryContextPtr ctx(new TRetryOperationWithoutSession(std::move(operation), settings, *this));
+ ctx->Execute();
+ return ctx->GetFuture();
+}
+
+TStatus TTableClient::RetryOperationSyncHelper(const TOperationWrapperSyncFunc& operationWrapper, const TRetryOperationSettings& settings) {
TRetryState retryState;
TMaybe<NYdb::TStatus> status;
for (ui32 retryNumber = 0; retryNumber <= settings.MaxRetries_; ++retryNumber) {
- status = operationWrapper(retryState);
+ status = operationWrapper(retryState);
- if (status->IsSuccess()) {
- return *status;
+ if (status->IsSuccess()) {
+ return *status;
}
if (retryNumber == settings.MaxRetries_) {
@@ -3494,42 +3494,42 @@ TStatus TTableClient::RetryOperationSyncHelper(const TOperationWrapperSyncFunc&
return *status;
}
-TStatus TTableClient::RetryOperationSync(const TOperationWithoutSessionSyncFunc& operation, const TRetryOperationSettings& settings) {
- auto operationWrapper = [this, &operation] (TRetryState&) {
- return operation(*this);
- };
-
- return RetryOperationSyncHelper(operationWrapper, settings);
-}
-
-TStatus TTableClient::RetryOperationSync(const TOperationSyncFunc& operation, const TRetryOperationSettings& settings) {
- TRetryState retryState;
-
- auto operationWrapper = [this, &operation, &settings] (TRetryState& retryState) {
- TMaybe<NYdb::TStatus> status;
-
- if (!retryState.Session) {
- auto sessionResult = Impl_->GetSession(
- TCreateSessionSettings().ClientTimeout(settings.GetSessionClientTimeout_)).GetValueSync();
- if (sessionResult.IsSuccess()) {
- retryState.Session = sessionResult.GetSession();
- }
- status = sessionResult;
- }
-
- if (retryState.Session) {
- status = operation(retryState.Session.GetRef());
- if (status->IsSuccess()) {
- return *status;
- }
- }
-
- return *status;
- };
-
- return RetryOperationSyncHelper(operationWrapper, settings);
-}
-
+TStatus TTableClient::RetryOperationSync(const TOperationWithoutSessionSyncFunc& operation, const TRetryOperationSettings& settings) {
+ auto operationWrapper = [this, &operation] (TRetryState&) {
+ return operation(*this);
+ };
+
+ return RetryOperationSyncHelper(operationWrapper, settings);
+}
+
+TStatus TTableClient::RetryOperationSync(const TOperationSyncFunc& operation, const TRetryOperationSettings& settings) {
+ TRetryState retryState;
+
+ auto operationWrapper = [this, &operation, &settings] (TRetryState& retryState) {
+ TMaybe<NYdb::TStatus> status;
+
+ if (!retryState.Session) {
+ auto sessionResult = Impl_->GetSession(
+ TCreateSessionSettings().ClientTimeout(settings.GetSessionClientTimeout_)).GetValueSync();
+ if (sessionResult.IsSuccess()) {
+ retryState.Session = sessionResult.GetSession();
+ }
+ status = sessionResult;
+ }
+
+ if (retryState.Session) {
+ status = operation(retryState.Session.GetRef());
+ if (status->IsSuccess()) {
+ return *status;
+ }
+ }
+
+ return *status;
+ };
+
+ return RetryOperationSyncHelper(operationWrapper, settings);
+}
+
NThreading::TFuture<void> TTableClient::Stop() {
return Impl_->Stop();
}
@@ -3537,9 +3537,9 @@ NThreading::TFuture<void> TTableClient::Stop() {
TAsyncBulkUpsertResult TTableClient::BulkUpsert(const TString& table, TValue&& rows,
const TBulkUpsertSettings& settings)
{
- return Impl_->BulkUpsert(table, std::move(rows), settings);
-}
-
+ return Impl_->BulkUpsert(table, std::move(rows), settings);
+}
+
TAsyncBulkUpsertResult TTableClient::BulkUpsert(const TString& table, EDataFormat format,
const TString& data, const TString& schema, const TBulkUpsertSettings& settings)
{
@@ -4151,14 +4151,14 @@ TTableDescription TDescribeTableResult::GetTableDescription() const {
////////////////////////////////////////////////////////////////////////////////
TDataQueryResult::TDataQueryResult(TStatus&& status, TVector<TResultSet>&& resultSets,
- const TMaybe<TTransaction>& transaction, const TMaybe<TDataQuery>& dataQuery, bool fromCache, const TMaybe<TQueryStats> &queryStats)
+ const TMaybe<TTransaction>& transaction, const TMaybe<TDataQuery>& dataQuery, bool fromCache, const TMaybe<TQueryStats> &queryStats)
: TStatus(std::move(status))
, Transaction_(transaction)
, ResultSets_(std::move(resultSets))
, DataQuery_(dataQuery)
- , FromCache_(fromCache)
- , QueryStats_(queryStats)
-{}
+ , FromCache_(fromCache)
+ , QueryStats_(queryStats)
+{}
const TVector<TResultSet>& TDataQueryResult::GetResultSets() const {
return ResultSets_;
@@ -4188,10 +4188,10 @@ bool TDataQueryResult::IsQueryFromCache() const {
return FromCache_;
}
-const TMaybe<TQueryStats>& TDataQueryResult::GetStats() const {
- return QueryStats_;
-}
-
+const TMaybe<TQueryStats>& TDataQueryResult::GetStats() const {
+ return QueryStats_;
+}
+
const TString TDataQueryResult::GetQueryPlan() const {
if (QueryStats_.Defined()) {
return NYdb::TProtoAccessor::GetProto(*QueryStats_.Get()).query_plan();
@@ -4416,8 +4416,8 @@ bool operator!=(const TIndexDescription& lhs, const TIndexDescription& rhs) {
return !(lhs == rhs);
}
-////////////////////////////////////////////////////////////////////////////////
-
+////////////////////////////////////////////////////////////////////////////////
+
TDateTypeColumnModeSettings::TDateTypeColumnModeSettings(const TString& columnName, const TDuration& expireAfter)
: ColumnName_(columnName)
, ExpireAfter_(expireAfter)
@@ -4668,9 +4668,9 @@ ui64 TReadReplicasSettings::GetReadReplicasCount() const {
////////////////////////////////////////////////////////////////////////////////
-TBulkUpsertResult::TBulkUpsertResult(TStatus&& status)
- : TStatus(std::move(status))
-{}
-
+TBulkUpsertResult::TBulkUpsertResult(TStatus&& status)
+ : TStatus(std::move(status))
+{}
+
} // namespace NTable
} // namespace NYdb
diff --git a/ydb/public/sdk/cpp/client/ydb_table/table.h b/ydb/public/sdk/cpp/client/ydb_table/table.h
index 03387aa728d..42d21d2fec7 100644
--- a/ydb/public/sdk/cpp/client/ydb_table/table.h
+++ b/ydb/public/sdk/cpp/client/ydb_table/table.h
@@ -351,7 +351,7 @@ public:
const Ydb::Table::PartitioningSettings& GetProto() const;
TMaybe<bool> GetPartitioningBySize() const;
- TMaybe<bool> GetPartitioningByLoad() const;
+ TMaybe<bool> GetPartitioningByLoad() const;
ui64 GetPartitionSizeMb() const;
ui64 GetMinPartitionsCount() const;
ui64 GetMaxPartitionsCount() const;
@@ -510,7 +510,7 @@ public:
~TPartitioningSettingsBuilder();
TPartitioningSettingsBuilder& SetPartitioningBySize(bool enabled);
- TPartitioningSettingsBuilder& SetPartitioningByLoad(bool enabled);
+ TPartitioningSettingsBuilder& SetPartitioningByLoad(bool enabled);
TPartitioningSettingsBuilder& SetPartitionSizeMb(ui64 sizeMb);
TPartitioningSettingsBuilder& SetMinPartitionsCount(ui64 count);
TPartitioningSettingsBuilder& SetMaxPartitionsCount(ui64 count);
@@ -611,11 +611,11 @@ public:
return *this;
}
- TTablePartitioningSettingsBuilder& SetPartitioningByLoad(bool enabled) {
- Builder_.SetPartitioningByLoad(enabled);
- return *this;
- }
-
+ TTablePartitioningSettingsBuilder& SetPartitioningByLoad(bool enabled) {
+ Builder_.SetPartitioningByLoad(enabled);
+ return *this;
+ }
+
TTablePartitioningSettingsBuilder& SetPartitionSizeMb(ui64 sizeMb) {
Builder_.SetPartitionSizeMb(sizeMb);
return *this;
@@ -774,7 +774,7 @@ class TBeginTransactionResult;
class TCommitTransactionResult;
class TKeepAliveResult;
class TSessionPoolImpl;
-class TBulkUpsertResult;
+class TBulkUpsertResult;
class TScanQueryPartIterator;
using TAsyncCreateSessionResult = NThreading::TFuture<TCreateSessionResult>;
@@ -786,7 +786,7 @@ using TAsyncBeginTransactionResult = NThreading::TFuture<TBeginTransactionResult
using TAsyncCommitTransactionResult = NThreading::TFuture<TCommitTransactionResult>;
using TAsyncTablePartIterator = NThreading::TFuture<TTablePartIterator>;
using TAsyncKeepAliveResult = NThreading::TFuture<TKeepAliveResult>;
-using TAsyncBulkUpsertResult = NThreading::TFuture<TBulkUpsertResult>;
+using TAsyncBulkUpsertResult = NThreading::TFuture<TBulkUpsertResult>;
using TAsyncScanQueryPartIterator = NThreading::TFuture<TScanQueryPartIterator>;
////////////////////////////////////////////////////////////////////////////////
@@ -890,7 +890,7 @@ struct TBulkUpsertSettings : public TOperationRequestSettings<TBulkUpsertSetting
// I.e. it's Ydb.Table.CsvSettings for CSV.
FLUENT_SETTING_DEFAULT(TString, FormatSettings, "");
};
-
+
struct TStreamExecScanQuerySettings : public TRequestSettings<TStreamExecScanQuerySettings> {
// Return query plan without actual query execution
FLUENT_SETTING_DEFAULT(bool, Explain, false);
@@ -900,7 +900,7 @@ struct TStreamExecScanQuerySettings : public TRequestSettings<TStreamExecScanQue
};
class TSession;
-struct TRetryState;
+struct TRetryState;
enum class EDataFormat {
ApacheArrow = 1,
@@ -911,13 +911,13 @@ class TTableClient {
friend class TSession;
friend class TTransaction;
friend class TSessionPoolImpl;
- friend class TRetryOperationContext;
+ friend class TRetryOperationContext;
public:
using TOperationFunc = std::function<TAsyncStatus(TSession session)>;
using TOperationSyncFunc = std::function<TStatus(TSession session)>;
- using TOperationWithoutSessionFunc = std::function<TAsyncStatus(TTableClient& tableClient)>;
- using TOperationWithoutSessionSyncFunc = std::function<TStatus(TTableClient& tableClient)>;
+ using TOperationWithoutSessionFunc = std::function<TAsyncStatus(TTableClient& tableClient)>;
+ using TOperationWithoutSessionSyncFunc = std::function<TStatus(TTableClient& tableClient)>;
public:
TTableClient(const TDriver& driver, const TClientSettings& settings = TClientSettings());
@@ -945,49 +945,49 @@ public:
//! Returns new type builder
TTypeBuilder GetTypeBuilder();
- TAsyncStatus RetryOperation(TOperationFunc&& operation,
+ TAsyncStatus RetryOperation(TOperationFunc&& operation,
+ const TRetryOperationSettings& settings = TRetryOperationSettings());
+
+ template<typename TResult>
+ TAsyncStatus RetryOperation(std::function<NThreading::TFuture<TResult>(TSession session)>&& operation,
const TRetryOperationSettings& settings = TRetryOperationSettings());
template<typename TResult>
- TAsyncStatus RetryOperation(std::function<NThreading::TFuture<TResult>(TSession session)>&& operation,
- const TRetryOperationSettings& settings = TRetryOperationSettings());
-
- template<typename TResult>
TAsyncStatus RetryOperation(const std::function<NThreading::TFuture<TResult>(TSession session)>& operation,
const TRetryOperationSettings& settings = TRetryOperationSettings());
TStatus RetryOperationSync(const TOperationSyncFunc& operation,
const TRetryOperationSettings& settings = TRetryOperationSettings());
- TAsyncStatus RetryOperation(TOperationWithoutSessionFunc&& operation,
- const TRetryOperationSettings& settings = TRetryOperationSettings());
-
- template<typename TResult>
- TAsyncStatus RetryOperation(std::function<NThreading::TFuture<TResult>(TTableClient& tableClient)>&& operation,
- const TRetryOperationSettings& settings = TRetryOperationSettings());
-
- template<typename TResult>
- TAsyncStatus RetryOperation(const std::function<NThreading::TFuture<TResult>(TTableClient& tableClient)>& operation,
- const TRetryOperationSettings& settings = TRetryOperationSettings());
-
- TStatus RetryOperationSync(const TOperationWithoutSessionSyncFunc& operation,
- const TRetryOperationSettings& settings = TRetryOperationSettings());
-
+ TAsyncStatus RetryOperation(TOperationWithoutSessionFunc&& operation,
+ const TRetryOperationSettings& settings = TRetryOperationSettings());
+
+ template<typename TResult>
+ TAsyncStatus RetryOperation(std::function<NThreading::TFuture<TResult>(TTableClient& tableClient)>&& operation,
+ const TRetryOperationSettings& settings = TRetryOperationSettings());
+
+ template<typename TResult>
+ TAsyncStatus RetryOperation(const std::function<NThreading::TFuture<TResult>(TTableClient& tableClient)>& operation,
+ const TRetryOperationSettings& settings = TRetryOperationSettings());
+
+ TStatus RetryOperationSync(const TOperationWithoutSessionSyncFunc& operation,
+ const TRetryOperationSettings& settings = TRetryOperationSettings());
+
//! Stop all client internal routines, drain session pools
//! Sessions returned to the session pool after this call will be closed
//! Using the client after call this method causes UB
NThreading::TFuture<void> Stop();
- //! Non-transactional fast bulk write.
- //! Interanlly it uses an implicit session and thus doesn't need a session to be passed.
- //! "rows" parameter must be a list of structs where each stuct represents one row.
- //! It must contain all key columns but not necessarily all non-key columns.
- //! Similar to UPSERT statement only values of specified columns will be updated.
- TAsyncBulkUpsertResult BulkUpsert(const TString& table, TValue&& rows,
- const TBulkUpsertSettings& settings = TBulkUpsertSettings());
+ //! Non-transactional fast bulk write.
+ //! Interanlly it uses an implicit session and thus doesn't need a session to be passed.
+ //! "rows" parameter must be a list of structs where each stuct represents one row.
+ //! It must contain all key columns but not necessarily all non-key columns.
+ //! Similar to UPSERT statement only values of specified columns will be updated.
+ TAsyncBulkUpsertResult BulkUpsert(const TString& table, TValue&& rows,
+ const TBulkUpsertSettings& settings = TBulkUpsertSettings());
TAsyncBulkUpsertResult BulkUpsert(const TString& table, EDataFormat format,
const TString& data, const TString& schema = {}, const TBulkUpsertSettings& settings = TBulkUpsertSettings());
-
+
TAsyncScanQueryPartIterator StreamExecuteScanQuery(const TString& query,
const TStreamExecScanQuerySettings& settings = TStreamExecScanQuerySettings());
@@ -995,10 +995,10 @@ public:
const TStreamExecScanQuerySettings& settings = TStreamExecScanQuerySettings());
private:
- using TOperationWrapperSyncFunc = std::function<TStatus(TRetryState& retryState)>;
- TStatus RetryOperationSyncHelper(const TOperationWrapperSyncFunc& operationWrapper, const TRetryOperationSettings& settings);
-
-private:
+ using TOperationWrapperSyncFunc = std::function<TStatus(TRetryState& retryState)>;
+ TStatus RetryOperationSyncHelper(const TOperationWrapperSyncFunc& operationWrapper, const TRetryOperationSettings& settings);
+
+private:
class TImpl;
std::shared_ptr<TImpl> Impl_;
};
@@ -1304,11 +1304,11 @@ public:
return *this;
}
- TAlterPartitioningSettingsBuilder& SetPartitioningByLoad(bool enabled) {
- Builder_.SetPartitioningByLoad(enabled);
- return *this;
- }
-
+ TAlterPartitioningSettingsBuilder& SetPartitioningByLoad(bool enabled) {
+ Builder_.SetPartitioningByLoad(enabled);
+ return *this;
+ }
+
TAlterPartitioningSettingsBuilder& SetPartitionSizeMb(ui64 sizeMb) {
Builder_.SetPartitionSizeMb(sizeMb);
return *this;
@@ -1444,8 +1444,8 @@ struct TPrepareDataQuerySettings : public TOperationRequestSettings<TPrepareData
struct TExecDataQuerySettings : public TOperationRequestSettings<TExecDataQuerySettings> {
FLUENT_SETTING_OPTIONAL(bool, KeepInQueryCache);
-
- FLUENT_SETTING_OPTIONAL(ECollectQueryStatsMode, CollectQueryStats);
+
+ FLUENT_SETTING_OPTIONAL(ECollectQueryStatsMode, CollectQueryStats);
};
struct TExecSchemeQuerySettings : public TOperationRequestSettings<TExecSchemeQuerySettings> {};
@@ -1564,18 +1564,18 @@ private:
template<typename TResult>
TAsyncStatus TTableClient::RetryOperation(
- std::function<NThreading::TFuture<TResult>(TSession session)>&& operation,
- const TRetryOperationSettings& settings)
-{
- return RetryOperation([operation = std::move(operation)] (TSession session) {
- return operation(session).Apply([] (const NThreading::TFuture<TResult>& result) {
- return NThreading::MakeFuture<TStatus>(result.GetValue());
- });
- }, settings);
-}
-
-template<typename TResult>
-TAsyncStatus TTableClient::RetryOperation(
+ std::function<NThreading::TFuture<TResult>(TSession session)>&& operation,
+ const TRetryOperationSettings& settings)
+{
+ return RetryOperation([operation = std::move(operation)] (TSession session) {
+ return operation(session).Apply([] (const NThreading::TFuture<TResult>& result) {
+ return NThreading::MakeFuture<TStatus>(result.GetValue());
+ });
+ }, settings);
+}
+
+template<typename TResult>
+TAsyncStatus TTableClient::RetryOperation(
const std::function<NThreading::TFuture<TResult>(TSession session)>& operation,
const TRetryOperationSettings& settings)
{
@@ -1586,30 +1586,30 @@ TAsyncStatus TTableClient::RetryOperation(
}, settings);
}
-template<typename TResult>
-TAsyncStatus TTableClient::RetryOperation(
- std::function<NThreading::TFuture<TResult>(TTableClient& tableClient)>&& operation,
- const TRetryOperationSettings& settings)
-{
- return RetryOperation([operation = std::move(operation)] (TTableClient& tableClient) {
- return operation(tableClient).Apply([] (const NThreading::TFuture<TResult>& result) {
- return NThreading::MakeFuture<TStatus>(result.GetValue());
- });
- }, settings);
-}
-
-template<typename TResult>
-TAsyncStatus TTableClient::RetryOperation(
- const std::function<NThreading::TFuture<TResult>(TTableClient& tableClient)>& operation,
- const TRetryOperationSettings& settings)
-{
- return RetryOperation([operation] (TTableClient& tableClient) {
- return operation(tableClient).Apply([] (const NThreading::TFuture<TResult>& result) {
- return NThreading::MakeFuture<TStatus>(result.GetValue());
- });
- }, settings);
-}
-
+template<typename TResult>
+TAsyncStatus TTableClient::RetryOperation(
+ std::function<NThreading::TFuture<TResult>(TTableClient& tableClient)>&& operation,
+ const TRetryOperationSettings& settings)
+{
+ return RetryOperation([operation = std::move(operation)] (TTableClient& tableClient) {
+ return operation(tableClient).Apply([] (const NThreading::TFuture<TResult>& result) {
+ return NThreading::MakeFuture<TStatus>(result.GetValue());
+ });
+ }, settings);
+}
+
+template<typename TResult>
+TAsyncStatus TTableClient::RetryOperation(
+ const std::function<NThreading::TFuture<TResult>(TTableClient& tableClient)>& operation,
+ const TRetryOperationSettings& settings)
+{
+ return RetryOperation([operation] (TTableClient& tableClient) {
+ return operation(tableClient).Apply([] (const NThreading::TFuture<TResult>& result) {
+ return NThreading::MakeFuture<TStatus>(result.GetValue());
+ });
+ }, settings);
+}
+
////////////////////////////////////////////////////////////////////////////////
//! Represents data transaction
@@ -1714,7 +1714,7 @@ private:
class TDataQueryResult : public TStatus {
public:
TDataQueryResult(TStatus&& status, TVector<TResultSet>&& resultSets, const TMaybe<TTransaction>& transaction,
- const TMaybe<TDataQuery>& dataQuery, bool fromCache, const TMaybe<TQueryStats>& queryStats);
+ const TMaybe<TDataQuery>& dataQuery, bool fromCache, const TMaybe<TQueryStats>& queryStats);
const TVector<TResultSet>& GetResultSets() const;
TResultSet GetResultSet(size_t resultIndex) const;
@@ -1726,8 +1726,8 @@ public:
TMaybe<TDataQuery> GetQuery() const;
bool IsQueryFromCache() const;
- const TMaybe<TQueryStats>& GetStats() const;
-
+ const TMaybe<TQueryStats>& GetStats() const;
+
const TString GetQueryPlan() const;
private:
@@ -1735,7 +1735,7 @@ private:
TVector<TResultSet> ResultSets_;
TMaybe<TDataQuery> DataQuery_;
bool FromCache_;
- TMaybe<TQueryStats> QueryStats_;
+ TMaybe<TQueryStats> QueryStats_;
};
template<typename TPart>
@@ -1861,11 +1861,11 @@ private:
ESessionStatus SessionStatus;
};
-class TBulkUpsertResult : public TStatus {
-public:
- explicit TBulkUpsertResult(TStatus&& status);
-};
-
+class TBulkUpsertResult : public TStatus {
+public:
+ explicit TBulkUpsertResult(TStatus&& status);
+};
+
} // namespace NTable
} // namespace NYdb
diff --git a/ydb/public/sdk/cpp/client/ydb_value/value.cpp b/ydb/public/sdk/cpp/client/ydb_value/value.cpp
index 6fc0db4c20d..8c18d4d50f8 100644
--- a/ydb/public/sdk/cpp/client/ydb_value/value.cpp
+++ b/ydb/public/sdk/cpp/client/ydb_value/value.cpp
@@ -2788,12 +2788,12 @@ TDerived& TValueBuilderBase<TDerived>::EmptyOptional(const TType& itemType) {
}
template<typename TDerived>
-TDerived& TValueBuilderBase<TDerived>::EmptyOptional(EPrimitiveType itemType) {
- Impl_->EmptyOptional(itemType);
- return static_cast<TDerived&>(*this);
-}
-
-template<typename TDerived>
+TDerived& TValueBuilderBase<TDerived>::EmptyOptional(EPrimitiveType itemType) {
+ Impl_->EmptyOptional(itemType);
+ return static_cast<TDerived&>(*this);
+}
+
+template<typename TDerived>
TDerived& TValueBuilderBase<TDerived>::EmptyOptional() {
Impl_->EmptyOptional();
return static_cast<TDerived&>(*this);
diff --git a/ydb/public/sdk/cpp/client/ydb_value/value.h b/ydb/public/sdk/cpp/client/ydb_value/value.h
index 1ccea4b239e..4a163b92206 100644
--- a/ydb/public/sdk/cpp/client/ydb_value/value.h
+++ b/ydb/public/sdk/cpp/client/ydb_value/value.h
@@ -392,7 +392,7 @@ public:
TDerived& BeginOptional();
TDerived& EndOptional();
TDerived& EmptyOptional(const TType& itemType);
- TDerived& EmptyOptional(EPrimitiveType itemType);
+ TDerived& EmptyOptional(EPrimitiveType itemType);
TDerived& EmptyOptional();
// List
diff --git a/ydb/public/sdk/cpp/client/ydb_value/value_ut.cpp b/ydb/public/sdk/cpp/client/ydb_value/value_ut.cpp
index 1a3d90e5a00..d7508a5f9b0 100644
--- a/ydb/public/sdk/cpp/client/ydb_value/value_ut.cpp
+++ b/ydb/public/sdk/cpp/client/ydb_value/value_ut.cpp
@@ -764,8 +764,8 @@ Y_UNIT_TEST_SUITE(YdbValue) {
.String("Anna")
.AddMember("Value")
.Int32(-100)
- .AddMember("Description")
- .EmptyOptional(EPrimitiveType::Utf8)
+ .AddMember("Description")
+ .EmptyOptional(EPrimitiveType::Utf8)
.EndStruct()
.AddListItem()
.BeginStruct()
@@ -774,14 +774,14 @@ Y_UNIT_TEST_SUITE(YdbValue) {
.AddMember("Value", TValueBuilder().Int32(-200).Build())
.AddMember("Id")
.Uint32(2)
- .AddMember("Description")
- .OptionalUtf8("Some details")
+ .AddMember("Description")
+ .OptionalUtf8("Some details")
.EndStruct()
.EndList()
.Build();
UNIT_ASSERT_NO_DIFF(FormatValueYson(value),
- R"([[1u;"Anna";-100;#];[2u;"Paul";-200;["Some details"]]])");
+ R"([[1u;"Anna";-100;#];[2u;"Paul";-200;["Some details"]]])");
UNIT_ASSERT_NO_DIFF(
FormatValueJson(value, EBinaryStringEncoding::Unicode),
R"([{"Id":1,"Name":"Anna","Value":-100,"Description":null},)"
diff --git a/ydb/services/cms/grpc_service.cpp b/ydb/services/cms/grpc_service.cpp
index c033d979338..33a7569982c 100644
--- a/ydb/services/cms/grpc_service.cpp
+++ b/ydb/services/cms/grpc_service.cpp
@@ -43,9 +43,9 @@ void TGRpcCmsService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
MakeIntrusive<TGRpcRequest<Ydb::Cms::IN, Ydb::Cms::OUT, TGRpcCmsService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *ctx) { \
- ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
- ACTION; \
- }, &Ydb::Cms::V1::CmsService::AsyncService::Request ## NAME, \
+ ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
+ ACTION; \
+ }, &Ydb::Cms::V1::CmsService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("cms", #NAME))->Run();
ADD_REQUEST(CreateDatabase, CreateDatabaseRequest, CreateDatabaseResponse, {
diff --git a/ydb/services/discovery/grpc_service.cpp b/ydb/services/discovery/grpc_service.cpp
index 02d31b5bb51..010322b5d9b 100644
--- a/ydb/services/discovery/grpc_service.cpp
+++ b/ydb/services/discovery/grpc_service.cpp
@@ -51,8 +51,8 @@ TGRpcDiscoveryService::TGRpcDiscoveryService(NActors::TActorSystem *system,
MakeIntrusive<TGRpcRequest<Ydb::Discovery::IN, Ydb::Discovery::OUT, TGRpcDiscoveryService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *reqCtx) { \
NGRpcService::ReportGrpcReqToMon(*ActorSystem_, reqCtx->GetPeer(), GetSdkBuildInfo(reqCtx)); \
- ACTION; \
- }, &Ydb::Discovery::V1::DiscoveryService::AsyncService::Request ## NAME, \
+ ACTION; \
+ }, &Ydb::Discovery::V1::DiscoveryService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("discovery", #NAME))->Run();
ADD_REQUEST(ListEndpoints, ListEndpointsRequest, ListEndpointsResponse, {
diff --git a/ydb/services/kesus/grpc_service.cpp b/ydb/services/kesus/grpc_service.cpp
index fd157a0c633..c5826b66274 100644
--- a/ydb/services/kesus/grpc_service.cpp
+++ b/ydb/services/kesus/grpc_service.cpp
@@ -650,9 +650,9 @@ void TKesusGRpcService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
&Service_, \
CQ, \
[this](NGrpc::IRequestContextBase* reqCtx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem, reqCtx->GetPeer()); \
- ACTION; \
- }, \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem, reqCtx->GetPeer()); \
+ ACTION; \
+ }, \
&Ydb::Coordination::V1::CoordinationService::AsyncService::Request ## NAME, \
"Coordination/" #NAME, \
logger, \
diff --git a/ydb/services/ydb/ut/json_udf.cpp b/ydb/services/ydb/ut/json_udf.cpp
index b3d29568170..704eb5c9e47 100644
--- a/ydb/services/ydb/ut/json_udf.cpp
+++ b/ydb/services/ydb/ut/json_udf.cpp
@@ -1,5 +1,5 @@
#include <ydb/library/yql/udfs/common/json2/json2_udf.cpp>
-
-NYql::NUdf::TUniquePtr<NYql::NUdf::IUdfModule> CreateJson2Module() {
- return new NJson2Udf::TJson2Module();
-}
+
+NYql::NUdf::TUniquePtr<NYql::NUdf::IUdfModule> CreateJson2Module() {
+ return new NJson2Udf::TJson2Module();
+}
diff --git a/ydb/services/ydb/ut/re2_udf.cpp b/ydb/services/ydb/ut/re2_udf.cpp
index 724e9a3d860..e7e4cecd3ad 100644
--- a/ydb/services/ydb/ut/re2_udf.cpp
+++ b/ydb/services/ydb/ut/re2_udf.cpp
@@ -1,7 +1,7 @@
-// HACK: the TRe2Module class is in an anonymous namespace
-// so including the source cpp is the only way to access it
+// HACK: the TRe2Module class is in an anonymous namespace
+// so including the source cpp is the only way to access it
#include <ydb/library/yql/udfs/common/re2/re2_udf.cpp>
-
-NYql::NUdf::TUniquePtr<NYql::NUdf::IUdfModule> CreateRe2Module() {
- return new TRe2Module<true>();
-}
+
+NYql::NUdf::TUniquePtr<NYql::NUdf::IUdfModule> CreateRe2Module() {
+ return new TRe2Module<true>();
+}
diff --git a/ydb/services/ydb/ut/udfs.h b/ydb/services/ydb/ut/udfs.h
index ef024e7208c..612ab37da10 100644
--- a/ydb/services/ydb/ut/udfs.h
+++ b/ydb/services/ydb/ut/udfs.h
@@ -1,5 +1,5 @@
-#pragma once
+#pragma once
#include <ydb/library/yql/public/udf/udf_registrator.h>
-
-NYql::NUdf::TUniquePtr<NYql::NUdf::IUdfModule> CreateRe2Module();
-NYql::NUdf::TUniquePtr<NYql::NUdf::IUdfModule> CreateJson2Module();
+
+NYql::NUdf::TUniquePtr<NYql::NUdf::IUdfModule> CreateRe2Module();
+NYql::NUdf::TUniquePtr<NYql::NUdf::IUdfModule> CreateJson2Module();
diff --git a/ydb/services/ydb/ut/ya.make b/ydb/services/ydb/ut/ya.make
index e121193b884..b7209faa5f9 100644
--- a/ydb/services/ydb/ut/ya.make
+++ b/ydb/services/ydb/ut/ya.make
@@ -18,22 +18,22 @@ ELSE()
ENDIF()
SRCS(
- ydb_bulk_upsert_ut.cpp
+ ydb_bulk_upsert_ut.cpp
ydb_bulk_upsert_olap_ut.cpp
ydb_coordination_ut.cpp
- ydb_index_table_ut.cpp
+ ydb_index_table_ut.cpp
ydb_import_ut.cpp
ydb_ut.cpp
- ydb_s3_internal_ut.cpp
+ ydb_s3_internal_ut.cpp
ydb_scripting_ut.cpp
ydb_table_ut.cpp
- ydb_table_split_ut.cpp
+ ydb_table_split_ut.cpp
ydb_stats_ut.cpp
ydb_long_tx_ut.cpp
- ydb_logstore_ut.cpp
- ydb_olapstore_ut.cpp
- json_udf.cpp
- re2_udf.cpp
+ ydb_logstore_ut.cpp
+ ydb_olapstore_ut.cpp
+ json_udf.cpp
+ re2_udf.cpp
)
PEERDIR(
diff --git a/ydb/services/ydb/ya.make b/ydb/services/ydb/ya.make
index d8b6301441b..a2f41a3e75d 100644
--- a/ydb/services/ydb/ya.make
+++ b/ydb/services/ydb/ya.make
@@ -6,14 +6,14 @@ OWNER(
)
SRCS(
- ydb_clickhouse_internal.cpp
+ ydb_clickhouse_internal.cpp
ydb_dummy.cpp
- ydb_experimental.cpp
+ ydb_experimental.cpp
ydb_export.cpp
ydb_import.cpp
- ydb_logstore.cpp
+ ydb_logstore.cpp
ydb_operation.cpp
- ydb_s3_internal.cpp
+ ydb_s3_internal.cpp
ydb_scheme.cpp
ydb_scripting.cpp
ydb_table.cpp
diff --git a/ydb/services/ydb/ydb_bulk_upsert_ut.cpp b/ydb/services/ydb/ydb_bulk_upsert_ut.cpp
index d2897970010..0665008ee49 100644
--- a/ydb/services/ydb/ydb_bulk_upsert_ut.cpp
+++ b/ydb/services/ydb/ydb_bulk_upsert_ut.cpp
@@ -1,223 +1,223 @@
-#include "ydb_common_ut.h"
-
+#include "ydb_common_ut.h"
+
#include <ydb/public/sdk/cpp/client/ydb_result/result.h>
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
#include <ydb/public/lib/yson_value/ydb_yson_value.h>
-
+
#include <ydb/library/yql/public/issue/yql_issue.h>
#include <ydb/library/yql/public/issue/yql_issue_message.h>
-
-using namespace NYdb;
-
-Y_UNIT_TEST_SUITE(YdbTableBulkUpsert) {
-
- Y_UNIT_TEST(Simple) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Shard", EPrimitiveType::Uint64)
- .AddNullableColumn("App", EPrimitiveType::Utf8)
- .AddNullableColumn("Timestamp", EPrimitiveType::Int64)
- .AddNullableColumn("HttpCode", EPrimitiveType::Uint32)
- .AddNullableColumn("Message", EPrimitiveType::Utf8)
- .AddNullableColumn("Ratio", EPrimitiveType::Double)
- .AddNullableColumn("Binary", EPrimitiveType::String)
- .AddNullableColumn("Empty", EPrimitiveType::Uint32);
- tableBuilder.SetPrimaryKeyColumns({"Shard", "App", "Timestamp"});
- NYdb::NTable::TCreateTableSettings tableSettings;
- tableSettings.PartitioningPolicy(NYdb::NTable::TPartitioningPolicy().UniformPartitions(32));
- auto result = session.CreateTable("/Root/Logs", tableBuilder.Build(), tableSettings).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- const size_t BATCH_COUNT = 1;//1000;
- const size_t BATCH_SIZE = 3;//100;
-
- TInstant start = TInstant::Now();
-
- for (ui64 b = 0; b < BATCH_COUNT; ++b) {
- TValueBuilder rows;
- rows.BeginList();
- for (ui64 i = 0; i < BATCH_SIZE; ++i) {
- ui64 shard = (i % 8) << 61;
- i64 ts = i % 23;
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Shard").Uint64(shard)
- .AddMember("App").Utf8("app_" + ToString(b))
- .AddMember("Timestamp").Int64(ts)
- .AddMember("HttpCode").Uint32(200)
- .AddMember("Message").Utf8("message")
- .AddMember("Ratio").OptionalDouble(0.33)
- .AddMember("Binary").OptionalString("\1\1\1\1")
- .AddMember("Empty").EmptyOptional(EPrimitiveType::Uint32)
- .EndStruct();
- }
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
-
- Cerr << res.GetStatus() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
- Cerr << BATCH_COUNT * BATCH_SIZE << " rows in " << TInstant::Now() - start << Endl;
-
- auto res = session.ExecuteDataQuery(
- "SELECT count(*) AS __count FROM [/Root/Logs];",
- NYdb::NTable::TTxControl::BeginTx().CommitTx()
- ).ExtractValueSync();
-
- Cerr << res.GetStatus() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
-
- auto rs = NYdb::TResultSetParser(res.GetResultSet(0));
- UNIT_ASSERT(rs.TryNextRow());
- ui64 count = rs.ColumnParser("__count").GetUint64();
- Cerr << "count returned " << count << " rows" << Endl;
- UNIT_ASSERT_VALUES_EQUAL(count, BATCH_COUNT * BATCH_SIZE);
- }
-
- void TestNull(NYdb::TDriver& connection, EPrimitiveType valueType, bool inKey) {
- TString tableName = Sprintf("/Root/TestNulls_0x%04x", valueType);
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key", EPrimitiveType::Uint64)
- .AddNullableColumn("Value", valueType);
- if (inKey) {
- tableBuilder.SetPrimaryKeyColumns({"Key", "Value"});
- } else {
- tableBuilder.SetPrimaryKeyColumns({"Key"});
- }
- auto result = session.CreateTable(tableName, tableBuilder.Build()).ExtractValueSync();
-
- Cerr << result.GetIssues().ToString();
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Key").Uint64(22)
- .AddMember("Value").EmptyOptional(valueType)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert(tableName, rows.Build()).GetValueSync();
-
- Cerr << res.GetStatus() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
-
- {
- auto res = session.ExecuteDataQuery(
- "SELECT count(*) AS __count FROM [" + tableName + "] WHERE Value IS NOT NULL;",
- NYdb::NTable::TTxControl::BeginTx().CommitTx()
- ).ExtractValueSync();
-
- Cerr << res.GetStatus() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
-
- auto rs = NYdb::TResultSetParser(res.GetResultSet(0));
- UNIT_ASSERT(rs.TryNextRow());
- ui64 count = rs.ColumnParser("__count").GetUint64();
- Cerr << "count returned " << count << " rows" << Endl;
- UNIT_ASSERT_VALUES_EQUAL(count, 0);
- }
-
- {
- auto res = session.ExecuteDataQuery(
- "SELECT count(*) AS __count FROM [" + tableName + "] WHERE Value IS NULL;",
- NYdb::NTable::TTxControl::BeginTx().CommitTx()
- ).ExtractValueSync();
-
- Cerr << res.GetStatus() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
-
- auto rs = NYdb::TResultSetParser(res.GetResultSet(0));
- UNIT_ASSERT(rs.TryNextRow());
- ui64 count = rs.ColumnParser("__count").GetUint64();
- Cerr << "count returned " << count << " rows" << Endl;
- UNIT_ASSERT_VALUES_EQUAL(count, 1);
- }
-
- {
- auto result = session.DropTable(tableName).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
- }
-
- Y_UNIT_TEST(Nulls) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- EPrimitiveType ydbTypes[] = {
- EPrimitiveType::Bool,
- EPrimitiveType::Uint8,
- EPrimitiveType::Int32, EPrimitiveType::Uint32,
- EPrimitiveType::Int64, EPrimitiveType::Uint64,
- EPrimitiveType::Float,
- EPrimitiveType::Double,
- EPrimitiveType::Date,
- EPrimitiveType::Datetime,
- EPrimitiveType::Timestamp,
- EPrimitiveType::Interval,
- EPrimitiveType::String,
- EPrimitiveType::Utf8,
- EPrimitiveType::Yson,
- EPrimitiveType::Json,
- EPrimitiveType::JsonDocument,
- EPrimitiveType::DyNumber
- };
-
- for (EPrimitiveType t : ydbTypes) {
- TestNull(connection, t, false);
- }
-
- EPrimitiveType ydbKeyTypes[] = {
- EPrimitiveType::Bool,
- EPrimitiveType::Uint8,
- EPrimitiveType::Int32, EPrimitiveType::Uint32,
- EPrimitiveType::Int64, EPrimitiveType::Uint64,
- EPrimitiveType::Date,
- EPrimitiveType::Datetime,
- EPrimitiveType::Timestamp,
- EPrimitiveType::Interval,
- EPrimitiveType::String,
- EPrimitiveType::Utf8,
- EPrimitiveType::DyNumber
- };
-
- for (EPrimitiveType t : ydbKeyTypes) {
- TestNull(connection, t, true);
- }
- }
-
+
+using namespace NYdb;
+
+Y_UNIT_TEST_SUITE(YdbTableBulkUpsert) {
+
+ Y_UNIT_TEST(Simple) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Shard", EPrimitiveType::Uint64)
+ .AddNullableColumn("App", EPrimitiveType::Utf8)
+ .AddNullableColumn("Timestamp", EPrimitiveType::Int64)
+ .AddNullableColumn("HttpCode", EPrimitiveType::Uint32)
+ .AddNullableColumn("Message", EPrimitiveType::Utf8)
+ .AddNullableColumn("Ratio", EPrimitiveType::Double)
+ .AddNullableColumn("Binary", EPrimitiveType::String)
+ .AddNullableColumn("Empty", EPrimitiveType::Uint32);
+ tableBuilder.SetPrimaryKeyColumns({"Shard", "App", "Timestamp"});
+ NYdb::NTable::TCreateTableSettings tableSettings;
+ tableSettings.PartitioningPolicy(NYdb::NTable::TPartitioningPolicy().UniformPartitions(32));
+ auto result = session.CreateTable("/Root/Logs", tableBuilder.Build(), tableSettings).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ const size_t BATCH_COUNT = 1;//1000;
+ const size_t BATCH_SIZE = 3;//100;
+
+ TInstant start = TInstant::Now();
+
+ for (ui64 b = 0; b < BATCH_COUNT; ++b) {
+ TValueBuilder rows;
+ rows.BeginList();
+ for (ui64 i = 0; i < BATCH_SIZE; ++i) {
+ ui64 shard = (i % 8) << 61;
+ i64 ts = i % 23;
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Shard").Uint64(shard)
+ .AddMember("App").Utf8("app_" + ToString(b))
+ .AddMember("Timestamp").Int64(ts)
+ .AddMember("HttpCode").Uint32(200)
+ .AddMember("Message").Utf8("message")
+ .AddMember("Ratio").OptionalDouble(0.33)
+ .AddMember("Binary").OptionalString("\1\1\1\1")
+ .AddMember("Empty").EmptyOptional(EPrimitiveType::Uint32)
+ .EndStruct();
+ }
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
+
+ Cerr << res.GetStatus() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+ Cerr << BATCH_COUNT * BATCH_SIZE << " rows in " << TInstant::Now() - start << Endl;
+
+ auto res = session.ExecuteDataQuery(
+ "SELECT count(*) AS __count FROM [/Root/Logs];",
+ NYdb::NTable::TTxControl::BeginTx().CommitTx()
+ ).ExtractValueSync();
+
+ Cerr << res.GetStatus() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+
+ auto rs = NYdb::TResultSetParser(res.GetResultSet(0));
+ UNIT_ASSERT(rs.TryNextRow());
+ ui64 count = rs.ColumnParser("__count").GetUint64();
+ Cerr << "count returned " << count << " rows" << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(count, BATCH_COUNT * BATCH_SIZE);
+ }
+
+ void TestNull(NYdb::TDriver& connection, EPrimitiveType valueType, bool inKey) {
+ TString tableName = Sprintf("/Root/TestNulls_0x%04x", valueType);
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key", EPrimitiveType::Uint64)
+ .AddNullableColumn("Value", valueType);
+ if (inKey) {
+ tableBuilder.SetPrimaryKeyColumns({"Key", "Value"});
+ } else {
+ tableBuilder.SetPrimaryKeyColumns({"Key"});
+ }
+ auto result = session.CreateTable(tableName, tableBuilder.Build()).ExtractValueSync();
+
+ Cerr << result.GetIssues().ToString();
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint64(22)
+ .AddMember("Value").EmptyOptional(valueType)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert(tableName, rows.Build()).GetValueSync();
+
+ Cerr << res.GetStatus() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+
+ {
+ auto res = session.ExecuteDataQuery(
+ "SELECT count(*) AS __count FROM [" + tableName + "] WHERE Value IS NOT NULL;",
+ NYdb::NTable::TTxControl::BeginTx().CommitTx()
+ ).ExtractValueSync();
+
+ Cerr << res.GetStatus() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+
+ auto rs = NYdb::TResultSetParser(res.GetResultSet(0));
+ UNIT_ASSERT(rs.TryNextRow());
+ ui64 count = rs.ColumnParser("__count").GetUint64();
+ Cerr << "count returned " << count << " rows" << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(count, 0);
+ }
+
+ {
+ auto res = session.ExecuteDataQuery(
+ "SELECT count(*) AS __count FROM [" + tableName + "] WHERE Value IS NULL;",
+ NYdb::NTable::TTxControl::BeginTx().CommitTx()
+ ).ExtractValueSync();
+
+ Cerr << res.GetStatus() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+
+ auto rs = NYdb::TResultSetParser(res.GetResultSet(0));
+ UNIT_ASSERT(rs.TryNextRow());
+ ui64 count = rs.ColumnParser("__count").GetUint64();
+ Cerr << "count returned " << count << " rows" << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(count, 1);
+ }
+
+ {
+ auto result = session.DropTable(tableName).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+ }
+
+ Y_UNIT_TEST(Nulls) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ EPrimitiveType ydbTypes[] = {
+ EPrimitiveType::Bool,
+ EPrimitiveType::Uint8,
+ EPrimitiveType::Int32, EPrimitiveType::Uint32,
+ EPrimitiveType::Int64, EPrimitiveType::Uint64,
+ EPrimitiveType::Float,
+ EPrimitiveType::Double,
+ EPrimitiveType::Date,
+ EPrimitiveType::Datetime,
+ EPrimitiveType::Timestamp,
+ EPrimitiveType::Interval,
+ EPrimitiveType::String,
+ EPrimitiveType::Utf8,
+ EPrimitiveType::Yson,
+ EPrimitiveType::Json,
+ EPrimitiveType::JsonDocument,
+ EPrimitiveType::DyNumber
+ };
+
+ for (EPrimitiveType t : ydbTypes) {
+ TestNull(connection, t, false);
+ }
+
+ EPrimitiveType ydbKeyTypes[] = {
+ EPrimitiveType::Bool,
+ EPrimitiveType::Uint8,
+ EPrimitiveType::Int32, EPrimitiveType::Uint32,
+ EPrimitiveType::Int64, EPrimitiveType::Uint64,
+ EPrimitiveType::Date,
+ EPrimitiveType::Datetime,
+ EPrimitiveType::Timestamp,
+ EPrimitiveType::Interval,
+ EPrimitiveType::String,
+ EPrimitiveType::Utf8,
+ EPrimitiveType::DyNumber
+ };
+
+ for (EPrimitiveType t : ydbKeyTypes) {
+ TestNull(connection, t, true);
+ }
+ }
+
Y_UNIT_TEST(NotNulls) {
TKikimrWithGrpcAndRootSchema server;
ui16 grpc = server.GetPort();
@@ -329,633 +329,633 @@ Y_UNIT_TEST_SUITE(YdbTableBulkUpsert) {
}
}
- Y_UNIT_TEST(Errors) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Shard", EPrimitiveType::Uint64)
- .AddNullableColumn("App", EPrimitiveType::Utf8)
- .AddNullableColumn("Timestamp", EPrimitiveType::Int64)
- .AddNullableColumn("Message", EPrimitiveType::Utf8)
- .AddNullableColumn("Ratio", EPrimitiveType::Double);
- tableBuilder.SetPrimaryKeyColumns({"Shard", "App", "Timestamp"});
- NYdb::NTable::TCreateTableSettings tableSettings;
- tableSettings.PartitioningPolicy(NYdb::NTable::TPartitioningPolicy().UniformPartitions(32));
- auto result = session.CreateTable("/Root/Logs", tableBuilder.Build(), tableSettings).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- // Unknown table
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Shard").Uint64(42)
- .AddMember("App").Utf8("app_")
- .AddMember("Message").OptionalUtf8("message")
- .AddMember("Ratio").Double(0.33)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Traces", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Unknown table '/Root/Traces'");
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
- }
-
- // Missing key column
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Shard").Uint64(42)
- .AddMember("App").Utf8("app_")
- .AddMember("Message").OptionalUtf8("message")
- .AddMember("Ratio").Double(0.33)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Missing key columns: Timestamp");
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
- }
-
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("App").Utf8("app_")
- .AddMember("Timestamp").Int64(-3)
- .AddMember("Message").Utf8("message")
- .AddMember("Ratio").Double(0.33)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Missing key columns: Shard");
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
- }
-
- // Invalid key column type
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Shard").Uint64(42)
- .AddMember("App").Uint64(3)
- .AddMember("Timestamp").Int64(-3)
- .AddMember("Message").Utf8("message")
- .AddMember("Ratio").Double(0.33)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Type mismatch for column App: expected Utf8, got Uint64");
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
- }
-
- // Invalid value column type
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Shard").Uint64(42)
- .AddMember("App").Utf8("app")
- .AddMember("Timestamp").Int64(-3)
- .AddMember("Message").Uint64(3)
- .AddMember("Ratio").OptionalDouble(0.33)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Type mismatch for column Message: expected Utf8, got Uint64");
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
- }
-
- // Missing value column - it's ok
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Shard").Uint64(42)
- .AddMember("App").Utf8("app")
- .AddMember("Timestamp").Int64(-3)
- .AddMember("Ratio").OptionalDouble(0.33)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT(res.GetIssues().ToString().empty());
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
-
- // Unknown column
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Shard").Uint64(42)
- .AddMember("App").Utf8("app_")
- .AddMember("Timestamp").Int64(-3)
- .AddMember("HttpCode").Uint32(200)
- .AddMember("Message").Utf8("message")
- .AddMember("Ratio").Double(0.33)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Unknown column: HttpCode");
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
- }
- }
-
- Y_UNIT_TEST(Types) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key", EPrimitiveType::Uint64)
-
- .AddNullableColumn("Column_Bool", EPrimitiveType::Bool)
- .AddNullableColumn("Column_Uint8", EPrimitiveType::Uint8)
- .AddNullableColumn("Column_Int32", EPrimitiveType::Int32)
- .AddNullableColumn("Column_Uint32", EPrimitiveType::Uint32)
- .AddNullableColumn("Column_Int64", EPrimitiveType::Int64)
- .AddNullableColumn("Column_Uint64", EPrimitiveType::Uint64)
- .AddNullableColumn("Column_Float", EPrimitiveType::Float)
- .AddNullableColumn("Column_Double", EPrimitiveType::Double)
- .AddNullableColumn("Column_Date", EPrimitiveType::Date)
- .AddNullableColumn("Column_Datetime", EPrimitiveType::Datetime)
- .AddNullableColumn("Column_Timestamp", EPrimitiveType::Timestamp)
- .AddNullableColumn("Column_Interval", EPrimitiveType::Interval)
- .AddNullableColumn("Column_String", EPrimitiveType::String)
- .AddNullableColumn("Column_Utf8", EPrimitiveType::Utf8)
- .AddNullableColumn("Column_Yson", EPrimitiveType::Yson)
- .AddNullableColumn("Column_Json", EPrimitiveType::Json)
- .AddNullableColumn("Column_JsonDocument", EPrimitiveType::JsonDocument)
- .AddNullableColumn("Column_DyNumber", EPrimitiveType::DyNumber)
- .AddNullableColumn("Column_Decimal", TDecimalType(22, 9))
-// These types are not currently supported for table columns
-// .AddNullableColumn("Column_Int8", EPrimitiveType::Int8)
-// .AddNullableColumn("Column_Int16", EPrimitiveType::Int16)
-// .AddNullableColumn("Column_Uint16", EPrimitiveType::Uint16)
-// .AddNullableColumn("Column_TzDate", EPrimitiveType::TzDate)
-// .AddNullableColumn("Column_TzDatetime", EPrimitiveType::TzDatetime)
-// .AddNullableColumn("Column_TzTimestamp", EPrimitiveType::TzTimestamp)
-// .AddNullableColumn("Column_Uuid", EPrimitiveType::Uuid)
- ;
- tableBuilder.SetPrimaryKeyColumns({"Key"});
- auto result = session.CreateTable("/Root/Types", tableBuilder.Build()).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- Cerr << result.GetIssues().ToString() << Endl;
- UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Key").Uint64(1)
-
- .AddMember("Column_Bool").Bool(0)
-// .AddMember("Column_Int8").Int8(0)
- .AddMember("Column_Uint8").Uint8(0)
-// .AddMember("Column_Int16").Int16(0)
-// .AddMember("Column_Uint16").Uint16(0)
- .AddMember("Column_Int32").Int32(0)
- .AddMember("Column_Uint32").Uint32(0)
- .AddMember("Column_Int64").Int64(0)
- .AddMember("Column_Uint64").Uint64(0)
- .AddMember("Column_Float").Float(0)
- .AddMember("Column_Double").Double(0)
- .AddMember("Column_Date").Date(TInstant())
- .AddMember("Column_Datetime").Datetime(TInstant::ParseRfc822("Fri, 29 Aug 1997 02:14:00 EST"))
- .AddMember("Column_Timestamp").Timestamp(TInstant())
- .AddMember("Column_Interval").Interval(0)
-// .AddMember("Column_TzDate").TzDate("")
-// .AddMember("Column_TzDatetime").TzDatetime("")
-// .AddMember("Column_TzTimestamp").TzTimestamp("")
- .AddMember("Column_String").String("")
- .AddMember("Column_Utf8").Utf8("")
- .AddMember("Column_Yson").Yson("{ \"a\" = [ { \"b\" = 1; } ]; }")
- .AddMember("Column_Json").Json("{}")
- .AddMember("Column_JsonDocument").JsonDocument("{}")
- .AddMember("Column_DyNumber").DyNumber("123")
-// .AddMember("Column_Uuid").Uuid("")
- .AddMember("Column_Decimal").Decimal(TDecimalValue("99.95"))
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Types", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
-
- // With Optionals
- {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Key").OptionalUint64(1)
-
- .AddMember("Column_Bool").OptionalBool(0)
-// .AddMember("Column_Int8").OptionalInt8(0)
- .AddMember("Column_Uint8").OptionalUint8(0)
-// .AddMember("Column_Int16").OptionalInt16(0)
-// .AddMember("Column_Uint16").OptionalUint16(0)
- .AddMember("Column_Int32").OptionalInt32(0)
- .AddMember("Column_Uint32").OptionalUint32(0)
- .AddMember("Column_Int64").OptionalInt64(0)
- .AddMember("Column_Uint64").OptionalUint64(0)
- .AddMember("Column_Float").OptionalFloat(0)
- .AddMember("Column_Double").OptionalDouble(0)
- .AddMember("Column_Date").OptionalDate(TInstant())
- .AddMember("Column_Datetime").OptionalDatetime(TInstant::ParseRfc822("Fri, 29 Aug 1997 02:14:00 EST"))
- .AddMember("Column_Timestamp").OptionalTimestamp(TInstant())
- .AddMember("Column_Interval").OptionalInterval(0)
-// .AddMember("Column_TzDate").OptionalTzDate("")
-// .AddMember("Column_TzDatetime").OptionalTzDatetime("")
-// .AddMember("Column_TzTimestamp").OptionalTzTimestamp("")
- .AddMember("Column_String").OptionalString("")
- .AddMember("Column_Utf8").OptionalUtf8("")
- .AddMember("Column_Yson").OptionalYson("{ \"aaa\" = 1; }")
- .AddMember("Column_Json").OptionalJson("{}")
- .AddMember("Column_JsonDocument").OptionalJsonDocument("{}")
- .AddMember("Column_DyNumber").OptionalDyNumber("42")
-// .AddMember("Column_Uuid").OptionalUuid("")
- .AddMember("Column_Decimal").Decimal(TDecimalValue("99.95"))
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Types", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
- }
-
- struct TTestRow {
- TString Key1;
- TString Key2;
- TString Key3;
- TString Value1;
- TString Value2;
- TString Value3;
- };
-
- NYdb::NTable::TBulkUpsertResult TestUpsertRow(NYdb::NTable::TTableClient& client, const TTestRow& row) {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Key1").Utf8(row.Key1)
- .AddMember("Key2").String(row.Key2)
- .AddMember("Key3").Utf8(row.Key3)
- .AddMember("Value1").String(row.Value1)
- .AddMember("Value2").Utf8(row.Value2)
- .AddMember("Value3").String(row.Value3)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/Limits", rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- return res;
- }
-
- Y_UNIT_TEST(Limits) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key1", EPrimitiveType::Utf8)
- .AddNullableColumn("Key2", EPrimitiveType::String)
- .AddNullableColumn("Key3", EPrimitiveType::Utf8)
-
- .AddNullableColumn("Value1", EPrimitiveType::String)
- .AddNullableColumn("Value2", EPrimitiveType::Utf8)
- .AddNullableColumn("Value3", EPrimitiveType::String);
-
- tableBuilder.SetPrimaryKeyColumns({"Key1", "Key2", "Key3"});
- auto result = session.CreateTable("/Root/Limits", tableBuilder.Build()).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- Cerr << result.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- {
- auto res = TestUpsertRow(client, {TString(1100000, 'a'), "bb", "", "val1", "val2", "val3"});
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- auto res = TestUpsertRow(client, {"aa", TString(1100000, 'b'), "", "val1", "val2", "val3"});
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- auto res = TestUpsertRow(client, {TString(600000, 'a'), TString(500000, 'b'), "", "val1", "val2", "val3"});
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
+ Y_UNIT_TEST(Errors) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Shard", EPrimitiveType::Uint64)
+ .AddNullableColumn("App", EPrimitiveType::Utf8)
+ .AddNullableColumn("Timestamp", EPrimitiveType::Int64)
+ .AddNullableColumn("Message", EPrimitiveType::Utf8)
+ .AddNullableColumn("Ratio", EPrimitiveType::Double);
+ tableBuilder.SetPrimaryKeyColumns({"Shard", "App", "Timestamp"});
+ NYdb::NTable::TCreateTableSettings tableSettings;
+ tableSettings.PartitioningPolicy(NYdb::NTable::TPartitioningPolicy().UniformPartitions(32));
+ auto result = session.CreateTable("/Root/Logs", tableBuilder.Build(), tableSettings).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ // Unknown table
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Shard").Uint64(42)
+ .AddMember("App").Utf8("app_")
+ .AddMember("Message").OptionalUtf8("message")
+ .AddMember("Ratio").Double(0.33)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Traces", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Unknown table '/Root/Traces'");
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
+ }
+
+ // Missing key column
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Shard").Uint64(42)
+ .AddMember("App").Utf8("app_")
+ .AddMember("Message").OptionalUtf8("message")
+ .AddMember("Ratio").Double(0.33)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Missing key columns: Timestamp");
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
+ }
+
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("App").Utf8("app_")
+ .AddMember("Timestamp").Int64(-3)
+ .AddMember("Message").Utf8("message")
+ .AddMember("Ratio").Double(0.33)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Missing key columns: Shard");
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
+ }
+
+ // Invalid key column type
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Shard").Uint64(42)
+ .AddMember("App").Uint64(3)
+ .AddMember("Timestamp").Int64(-3)
+ .AddMember("Message").Utf8("message")
+ .AddMember("Ratio").Double(0.33)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Type mismatch for column App: expected Utf8, got Uint64");
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
+ }
+
+ // Invalid value column type
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Shard").Uint64(42)
+ .AddMember("App").Utf8("app")
+ .AddMember("Timestamp").Int64(-3)
+ .AddMember("Message").Uint64(3)
+ .AddMember("Ratio").OptionalDouble(0.33)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Type mismatch for column Message: expected Utf8, got Uint64");
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
+ }
+
+ // Missing value column - it's ok
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Shard").Uint64(42)
+ .AddMember("App").Utf8("app")
+ .AddMember("Timestamp").Int64(-3)
+ .AddMember("Ratio").OptionalDouble(0.33)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT(res.GetIssues().ToString().empty());
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+
+ // Unknown column
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Shard").Uint64(42)
+ .AddMember("App").Utf8("app_")
+ .AddMember("Timestamp").Int64(-3)
+ .AddMember("HttpCode").Uint32(200)
+ .AddMember("Message").Utf8("message")
+ .AddMember("Ratio").Double(0.33)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Logs", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Unknown column: HttpCode");
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
+ }
+ }
+
+ Y_UNIT_TEST(Types) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key", EPrimitiveType::Uint64)
+
+ .AddNullableColumn("Column_Bool", EPrimitiveType::Bool)
+ .AddNullableColumn("Column_Uint8", EPrimitiveType::Uint8)
+ .AddNullableColumn("Column_Int32", EPrimitiveType::Int32)
+ .AddNullableColumn("Column_Uint32", EPrimitiveType::Uint32)
+ .AddNullableColumn("Column_Int64", EPrimitiveType::Int64)
+ .AddNullableColumn("Column_Uint64", EPrimitiveType::Uint64)
+ .AddNullableColumn("Column_Float", EPrimitiveType::Float)
+ .AddNullableColumn("Column_Double", EPrimitiveType::Double)
+ .AddNullableColumn("Column_Date", EPrimitiveType::Date)
+ .AddNullableColumn("Column_Datetime", EPrimitiveType::Datetime)
+ .AddNullableColumn("Column_Timestamp", EPrimitiveType::Timestamp)
+ .AddNullableColumn("Column_Interval", EPrimitiveType::Interval)
+ .AddNullableColumn("Column_String", EPrimitiveType::String)
+ .AddNullableColumn("Column_Utf8", EPrimitiveType::Utf8)
+ .AddNullableColumn("Column_Yson", EPrimitiveType::Yson)
+ .AddNullableColumn("Column_Json", EPrimitiveType::Json)
+ .AddNullableColumn("Column_JsonDocument", EPrimitiveType::JsonDocument)
+ .AddNullableColumn("Column_DyNumber", EPrimitiveType::DyNumber)
+ .AddNullableColumn("Column_Decimal", TDecimalType(22, 9))
+// These types are not currently supported for table columns
+// .AddNullableColumn("Column_Int8", EPrimitiveType::Int8)
+// .AddNullableColumn("Column_Int16", EPrimitiveType::Int16)
+// .AddNullableColumn("Column_Uint16", EPrimitiveType::Uint16)
+// .AddNullableColumn("Column_TzDate", EPrimitiveType::TzDate)
+// .AddNullableColumn("Column_TzDatetime", EPrimitiveType::TzDatetime)
+// .AddNullableColumn("Column_TzTimestamp", EPrimitiveType::TzTimestamp)
+// .AddNullableColumn("Column_Uuid", EPrimitiveType::Uuid)
+ ;
+ tableBuilder.SetPrimaryKeyColumns({"Key"});
+ auto result = session.CreateTable("/Root/Types", tableBuilder.Build()).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ Cerr << result.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint64(1)
+
+ .AddMember("Column_Bool").Bool(0)
+// .AddMember("Column_Int8").Int8(0)
+ .AddMember("Column_Uint8").Uint8(0)
+// .AddMember("Column_Int16").Int16(0)
+// .AddMember("Column_Uint16").Uint16(0)
+ .AddMember("Column_Int32").Int32(0)
+ .AddMember("Column_Uint32").Uint32(0)
+ .AddMember("Column_Int64").Int64(0)
+ .AddMember("Column_Uint64").Uint64(0)
+ .AddMember("Column_Float").Float(0)
+ .AddMember("Column_Double").Double(0)
+ .AddMember("Column_Date").Date(TInstant())
+ .AddMember("Column_Datetime").Datetime(TInstant::ParseRfc822("Fri, 29 Aug 1997 02:14:00 EST"))
+ .AddMember("Column_Timestamp").Timestamp(TInstant())
+ .AddMember("Column_Interval").Interval(0)
+// .AddMember("Column_TzDate").TzDate("")
+// .AddMember("Column_TzDatetime").TzDatetime("")
+// .AddMember("Column_TzTimestamp").TzTimestamp("")
+ .AddMember("Column_String").String("")
+ .AddMember("Column_Utf8").Utf8("")
+ .AddMember("Column_Yson").Yson("{ \"a\" = [ { \"b\" = 1; } ]; }")
+ .AddMember("Column_Json").Json("{}")
+ .AddMember("Column_JsonDocument").JsonDocument("{}")
+ .AddMember("Column_DyNumber").DyNumber("123")
+// .AddMember("Column_Uuid").Uuid("")
+ .AddMember("Column_Decimal").Decimal(TDecimalValue("99.95"))
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Types", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+
+ // With Optionals
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").OptionalUint64(1)
+
+ .AddMember("Column_Bool").OptionalBool(0)
+// .AddMember("Column_Int8").OptionalInt8(0)
+ .AddMember("Column_Uint8").OptionalUint8(0)
+// .AddMember("Column_Int16").OptionalInt16(0)
+// .AddMember("Column_Uint16").OptionalUint16(0)
+ .AddMember("Column_Int32").OptionalInt32(0)
+ .AddMember("Column_Uint32").OptionalUint32(0)
+ .AddMember("Column_Int64").OptionalInt64(0)
+ .AddMember("Column_Uint64").OptionalUint64(0)
+ .AddMember("Column_Float").OptionalFloat(0)
+ .AddMember("Column_Double").OptionalDouble(0)
+ .AddMember("Column_Date").OptionalDate(TInstant())
+ .AddMember("Column_Datetime").OptionalDatetime(TInstant::ParseRfc822("Fri, 29 Aug 1997 02:14:00 EST"))
+ .AddMember("Column_Timestamp").OptionalTimestamp(TInstant())
+ .AddMember("Column_Interval").OptionalInterval(0)
+// .AddMember("Column_TzDate").OptionalTzDate("")
+// .AddMember("Column_TzDatetime").OptionalTzDatetime("")
+// .AddMember("Column_TzTimestamp").OptionalTzTimestamp("")
+ .AddMember("Column_String").OptionalString("")
+ .AddMember("Column_Utf8").OptionalUtf8("")
+ .AddMember("Column_Yson").OptionalYson("{ \"aaa\" = 1; }")
+ .AddMember("Column_Json").OptionalJson("{}")
+ .AddMember("Column_JsonDocument").OptionalJsonDocument("{}")
+ .AddMember("Column_DyNumber").OptionalDyNumber("42")
+// .AddMember("Column_Uuid").OptionalUuid("")
+ .AddMember("Column_Decimal").Decimal(TDecimalValue("99.95"))
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Types", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+ }
+
+ struct TTestRow {
+ TString Key1;
+ TString Key2;
+ TString Key3;
+ TString Value1;
+ TString Value2;
+ TString Value3;
+ };
+
+ NYdb::NTable::TBulkUpsertResult TestUpsertRow(NYdb::NTable::TTableClient& client, const TTestRow& row) {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key1").Utf8(row.Key1)
+ .AddMember("Key2").String(row.Key2)
+ .AddMember("Key3").Utf8(row.Key3)
+ .AddMember("Value1").String(row.Value1)
+ .AddMember("Value2").Utf8(row.Value2)
+ .AddMember("Value3").String(row.Value3)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/Limits", rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ return res;
+ }
+
+ Y_UNIT_TEST(Limits) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key1", EPrimitiveType::Utf8)
+ .AddNullableColumn("Key2", EPrimitiveType::String)
+ .AddNullableColumn("Key3", EPrimitiveType::Utf8)
+
+ .AddNullableColumn("Value1", EPrimitiveType::String)
+ .AddNullableColumn("Value2", EPrimitiveType::Utf8)
+ .AddNullableColumn("Value3", EPrimitiveType::String);
+
+ tableBuilder.SetPrimaryKeyColumns({"Key1", "Key2", "Key3"});
+ auto result = session.CreateTable("/Root/Limits", tableBuilder.Build()).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ Cerr << result.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ {
+ auto res = TestUpsertRow(client, {TString(1100000, 'a'), "bb", "", "val1", "val2", "val3"});
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ auto res = TestUpsertRow(client, {"aa", TString(1100000, 'b'), "", "val1", "val2", "val3"});
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ auto res = TestUpsertRow(client, {TString(600000, 'a'), TString(500000, 'b'), "", "val1", "val2", "val3"});
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
auto res = TestUpsertRow(client, {TString(500000, 'a'), TString(500000, 'b'), "", TString(17*1000000, '1'), "val2", "val3"});
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
auto res = TestUpsertRow(client, {TString(500000, 'a'), TString(500000, 'b'), "", TString(15.9*1000000, '1'), "val2", "val3"});
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
-
- {
- auto res = TestUpsertRow(client, {TString(500000, 'a'), "", TString(500000, 'c'),
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+
+ {
+ auto res = TestUpsertRow(client, {TString(500000, 'a'), "", TString(500000, 'c'),
TString(15.9*1000000, '1'), TString(15.9*1000000, '2'), TString(15.9*1000000, '3')});
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
- }
-
- NYdb::NTable::TBulkUpsertResult TestUpsertRow(NYdb::NTable::TTableClient& client, const TString& table, ui8 key, ui8 val) {
- TValueBuilder rows;
- rows.BeginList();
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Key").Uint8(key)
- .AddMember("Value").Uint8(val)
- .EndStruct();
- rows.EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- return res;
- }
-
- Y_UNIT_TEST(DataValidation) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key", EPrimitiveType::Uint32)
- .AddNullableColumn("Value_Decimal", TDecimalType(22, 9))
- .AddNullableColumn("Value_Date", EPrimitiveType::Date)
- .AddNullableColumn("Value_DateTime", EPrimitiveType::Datetime)
- .AddNullableColumn("Value_Timestamp", EPrimitiveType::Timestamp)
- .AddNullableColumn("Value_Interval", EPrimitiveType::Interval)
- .AddNullableColumn("Value_Utf8", EPrimitiveType::Utf8)
- .AddNullableColumn("Value_Yson", EPrimitiveType::Yson)
- .AddNullableColumn("Value_Json", EPrimitiveType::Json)
- .AddNullableColumn("Value_JsonDocument",EPrimitiveType::JsonDocument)
- .AddNullableColumn("Value_DyNumber", EPrimitiveType::DyNumber)
- ;
-
- tableBuilder.SetPrimaryKeyColumns({"Key"});
- auto result = session.CreateTable("/Root/TestInvalidData", tableBuilder.Build()).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- Cerr << result.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TDecimalValue val("0");
- val.Low_ = 0;
- val.Hi_ = 11000000000000000000ULL;
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Decimal").Decimal(val).EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Date").Date(TInstant::Days(50000)).EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_DateTime").Datetime(TInstant::Seconds(Max<ui32>())).EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Timestamp").Timestamp(TInstant::Days(50000)).EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Interval").Interval(TDuration::Days(50000).MicroSeconds()).EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Utf8").Utf8("\xff").EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- // NOTE: in this case the protobuf serialization doesn't allow to set invalid utf8 value in a 'string' field
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::CLIENT_INTERNAL_ERROR);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Yson").Yson("]][").EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Json").Json("]]]").EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_JsonDocument").JsonDocument("]]]").EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- TString table = "/Root/TestInvalidData";
- TValueBuilder rows;
- rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_DyNumber").DyNumber("[[[]]]").EndStruct().EndList();
-
- auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
- Cerr << res.GetStatus() << Endl;
- Cerr << res.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
- }
-
- Y_UNIT_TEST(Uint8) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key", EPrimitiveType::Uint8)
- .AddNullableColumn("Value", EPrimitiveType::Uint8);
-
- tableBuilder.SetPrimaryKeyColumns({"Key"});
- auto result = session.CreateTable("/Root/ui8", tableBuilder.Build()).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- Cerr << result.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- for (ui32 i = 0; i < 256; ++i) {
- {
- auto res = TestUpsertRow(client, "/Root/ui8", i, 42);
- if (i <= 127)
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- else
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
- }
-
- {
- auto res = TestUpsertRow(client, "/Root/ui8", 42, i);
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
- }
- }
-
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+ }
+
+ NYdb::NTable::TBulkUpsertResult TestUpsertRow(NYdb::NTable::TTableClient& client, const TString& table, ui8 key, ui8 val) {
+ TValueBuilder rows;
+ rows.BeginList();
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint8(key)
+ .AddMember("Value").Uint8(val)
+ .EndStruct();
+ rows.EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ return res;
+ }
+
+ Y_UNIT_TEST(DataValidation) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key", EPrimitiveType::Uint32)
+ .AddNullableColumn("Value_Decimal", TDecimalType(22, 9))
+ .AddNullableColumn("Value_Date", EPrimitiveType::Date)
+ .AddNullableColumn("Value_DateTime", EPrimitiveType::Datetime)
+ .AddNullableColumn("Value_Timestamp", EPrimitiveType::Timestamp)
+ .AddNullableColumn("Value_Interval", EPrimitiveType::Interval)
+ .AddNullableColumn("Value_Utf8", EPrimitiveType::Utf8)
+ .AddNullableColumn("Value_Yson", EPrimitiveType::Yson)
+ .AddNullableColumn("Value_Json", EPrimitiveType::Json)
+ .AddNullableColumn("Value_JsonDocument",EPrimitiveType::JsonDocument)
+ .AddNullableColumn("Value_DyNumber", EPrimitiveType::DyNumber)
+ ;
+
+ tableBuilder.SetPrimaryKeyColumns({"Key"});
+ auto result = session.CreateTable("/Root/TestInvalidData", tableBuilder.Build()).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ Cerr << result.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TDecimalValue val("0");
+ val.Low_ = 0;
+ val.Hi_ = 11000000000000000000ULL;
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Decimal").Decimal(val).EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Date").Date(TInstant::Days(50000)).EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_DateTime").Datetime(TInstant::Seconds(Max<ui32>())).EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Timestamp").Timestamp(TInstant::Days(50000)).EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Interval").Interval(TDuration::Days(50000).MicroSeconds()).EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Utf8").Utf8("\xff").EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ // NOTE: in this case the protobuf serialization doesn't allow to set invalid utf8 value in a 'string' field
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::CLIENT_INTERNAL_ERROR);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Yson").Yson("]][").EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_Json").Json("]]]").EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_JsonDocument").JsonDocument("]]]").EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ TString table = "/Root/TestInvalidData";
+ TValueBuilder rows;
+ rows.BeginList().AddListItem().BeginStruct().AddMember("Key").Uint32(1).AddMember("Value_DyNumber").DyNumber("[[[]]]").EndStruct().EndList();
+
+ auto res = client.BulkUpsert(table, rows.Build()).GetValueSync();
+ Cerr << res.GetStatus() << Endl;
+ Cerr << res.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+ }
+
+ Y_UNIT_TEST(Uint8) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key", EPrimitiveType::Uint8)
+ .AddNullableColumn("Value", EPrimitiveType::Uint8);
+
+ tableBuilder.SetPrimaryKeyColumns({"Key"});
+ auto result = session.CreateTable("/Root/ui8", tableBuilder.Build()).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ Cerr << result.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ for (ui32 i = 0; i < 256; ++i) {
+ {
+ auto res = TestUpsertRow(client, "/Root/ui8", i, 42);
+ if (i <= 127)
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ else
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::BAD_REQUEST);
+ }
+
+ {
+ auto res = TestUpsertRow(client, "/Root/ui8", 42, i);
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+ }
+ }
+
void Index(NYdb::NTable::EIndexType indexType, bool enableBulkUpsertToAsyncIndexedTables = false) {
auto server = TKikimrWithGrpcAndRootSchema({}, {}, {}, false, nullptr, [=](auto& settings) {
settings.SetEnableBulkUpsertToAsyncIndexedTables(enableBulkUpsertToAsyncIndexedTables);
});
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key", EPrimitiveType::Uint8)
- .AddNullableColumn("Value", EPrimitiveType::Uint8)
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key", EPrimitiveType::Uint8)
+ .AddNullableColumn("Value", EPrimitiveType::Uint8)
.AddSecondaryIndex("Value_index", indexType, "Value");
-
- tableBuilder.SetPrimaryKeyColumns({"Key"});
- auto result = session.CreateTable("/Root/ui8", tableBuilder.Build()).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- Cerr << result.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- {
- auto res = TestUpsertRow(client, "/Root/ui8", 1, 2);
+
+ tableBuilder.SetPrimaryKeyColumns({"Key"});
+ auto result = session.CreateTable("/Root/ui8", tableBuilder.Build()).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ Cerr << result.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ {
+ auto res = TestUpsertRow(client, "/Root/ui8", 1, 2);
if (indexType == NYdb::NTable::EIndexType::GlobalAsync) {
UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), enableBulkUpsertToAsyncIndexedTables
@@ -974,14 +974,14 @@ Y_UNIT_TEST_SUITE(YdbTableBulkUpsert) {
} else {
UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
}
- }
-
- {
- auto res = TestUpsertRow(client, "/Root/ui8/Value_index/indexImplTable", 1, 2);
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
- }
- }
-
+ }
+
+ {
+ auto res = TestUpsertRow(client, "/Root/ui8/Value_index/indexImplTable", 1, 2);
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
+ }
+ }
+
Y_UNIT_TEST(SyncIndexShouldSucceed) {
Index(NYdb::NTable::EIndexType::GlobalSync);
}
@@ -994,314 +994,314 @@ Y_UNIT_TEST_SUITE(YdbTableBulkUpsert) {
Index(NYdb::NTable::EIndexType::GlobalAsync, true);
}
- Y_UNIT_TEST(Timeout) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key", EPrimitiveType::Uint32)
- .AddNullableColumn("Value", EPrimitiveType::Uint8);
-
- tableBuilder.SetPrimaryKeyColumns({"Key"});
- NYdb::NTable::TCreateTableSettings tableSettings;
- tableSettings.PartitioningPolicy(NYdb::NTable::TPartitioningPolicy().UniformPartitions(32));
- auto result = session.CreateTable("/Root/ui32", tableBuilder.Build(), tableSettings).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- Cerr << result.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- bool gotTimeout = false;
- bool gotSuccess = false;
- for (ui64 usec = 1; (!gotTimeout || !gotSuccess) && usec < 15*1000*1000; usec *= 2) {
- TValueBuilder rows;
- rows.BeginList();
- for (ui32 k = 0; k < 300; ++k) {
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Key").Uint32(NumericHash(k))
- .AddMember("Value").Uint8(k)
- .EndStruct();
- }
- rows.EndList();
-
- Cerr << usec << " usec" << Endl;
- auto res = client.BulkUpsert("/Root/ui32", rows.Build(),
- NYdb::NTable::TBulkUpsertSettings().OperationTimeout(TDuration::MicroSeconds(usec))
- ).GetValueSync();
- Cerr << res.GetIssues().ToString() << Endl;
- if (res.GetStatus() == EStatus::TIMEOUT) {
- gotTimeout = true;
- } else if (res.GetStatus() == EStatus::SUCCESS) {
- gotSuccess = true;
- }
- }
- UNIT_ASSERT(gotTimeout);
- UNIT_ASSERT(gotSuccess);
- }
-
- Y_UNIT_TEST(Overload) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
-
- {
- TClient annoyingClient(*server.ServerSettings);
-
- // Create a table with crazy compaction policy that can easily get
- // overloaded
- const char * tableDescr = R"___(
- Name: "kv"
- Columns { Name: "Key" Type: "Uint64"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["Key"]
- UniformPartitionsCount: 5
-
- PartitionConfig {
- CompactionPolicy {
- InMemSizeToSnapshot: 100
- InMemStepsToSnapshot: 1
- InMemForceStepsToSnapshot: 1
- InMemForceSizeToSnapshot: 200
- InMemCompactionBrokerQueue: 0
- MinDataPageSize: 7168
- Generation {
- GenerationId: 0
- SizeToCompact: 100
- CountToCompact: 1
- ForceCountToCompact: 1
- ForceSizeToCompact: 200
- CompactionBrokerQueue: 1
- KeepInCache: false
- }
- }
- }
- )___";
-
- NMsgBusProxy::EResponseStatus status = annoyingClient.CreateTable("/Root", tableDescr);
- UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::EResponseStatus::MSTATUS_OK);
- }
-
- TString location = TStringBuilder() << "localhost:" << grpc;
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
- NYdb::NTable::TTableClient client(connection);
-
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
-
- bool gotOverload = false;
- bool gotSuccess = false;
- TString blob(100, 'a');
- for (ui64 count = 0; (!gotOverload || !gotSuccess) && count < 100000; count++) {
- TValueBuilder rows;
-
- rows.BeginList();
-
- // Add a row for shard 0
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Key").Uint64(0)
- .AddMember("Value").Utf8("")
- .EndStruct();
-
- // Add some random rows for other shards
- for (int i = 0; i < 10; ++i) {
- ui64 key = NumericHash(count + i) | (1ull << 63);
- rows.AddListItem()
- .BeginStruct()
- .AddMember("Key").Uint64(key)
- .AddMember("Value").Utf8(blob)
- .EndStruct();
- }
-
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/kv", rows.Build()).GetValueSync();
-
- if (res.GetStatus() == EStatus::SUCCESS) {
- gotSuccess = true;
- Cerr << ".";
- } else {
- Cerr << Endl << res.GetIssues().ToString() << Endl;
- if (res.GetStatus() == EStatus::OVERLOADED && gotSuccess) {
- gotOverload = true;
- }
- }
- }
- UNIT_ASSERT(gotOverload);
- UNIT_ASSERT(gotSuccess);
- }
-
- void CreateTestTable(NYdb::NTable::TTableClient& client) {
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key", EPrimitiveType::Uint32)
- .AddNullableColumn("Value", EPrimitiveType::Int32);
-
- tableBuilder.SetPrimaryKeyColumns({"Key"});
- auto result = session.CreateTable("/Root/Test", tableBuilder.Build()).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- Cerr << result.GetIssues().ToString() << Endl;
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- // Returns the specified status first N times and then returns SUCCESS;
- class TFailStatusInjector {
- const NYdb::EStatus StatusCode;
- const ui32 FailCount;
- ui32 CallCount;
- public:
- TFailStatusInjector(NYdb::EStatus statusCode, ui32 failCount)
- : StatusCode(statusCode)
- , FailCount(failCount)
- , CallCount(0)
- {}
-
- NYdb::EStatus GetInjectedStatus() {
- ++CallCount;
- if (CallCount > FailCount) {
- return EStatus::SUCCESS;
- }
- return StatusCode;
- }
-
- ui32 GetCallCount() const {
- return CallCount;
- }
- };
-
- const EStatus InjectedFailStatuses[] = {
- EStatus::ABORTED,
- EStatus::OVERLOADED,
- EStatus::CLIENT_RESOURCE_EXHAUSTED,
- EStatus::UNAVAILABLE,
- EStatus::BAD_SESSION,
- EStatus::SESSION_BUSY,
- EStatus::NOT_FOUND,
- EStatus::UNDETERMINED,
- EStatus::TRANSPORT_UNAVAILABLE
- };
-
+ Y_UNIT_TEST(Timeout) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key", EPrimitiveType::Uint32)
+ .AddNullableColumn("Value", EPrimitiveType::Uint8);
+
+ tableBuilder.SetPrimaryKeyColumns({"Key"});
+ NYdb::NTable::TCreateTableSettings tableSettings;
+ tableSettings.PartitioningPolicy(NYdb::NTable::TPartitioningPolicy().UniformPartitions(32));
+ auto result = session.CreateTable("/Root/ui32", tableBuilder.Build(), tableSettings).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ Cerr << result.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ bool gotTimeout = false;
+ bool gotSuccess = false;
+ for (ui64 usec = 1; (!gotTimeout || !gotSuccess) && usec < 15*1000*1000; usec *= 2) {
+ TValueBuilder rows;
+ rows.BeginList();
+ for (ui32 k = 0; k < 300; ++k) {
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint32(NumericHash(k))
+ .AddMember("Value").Uint8(k)
+ .EndStruct();
+ }
+ rows.EndList();
+
+ Cerr << usec << " usec" << Endl;
+ auto res = client.BulkUpsert("/Root/ui32", rows.Build(),
+ NYdb::NTable::TBulkUpsertSettings().OperationTimeout(TDuration::MicroSeconds(usec))
+ ).GetValueSync();
+ Cerr << res.GetIssues().ToString() << Endl;
+ if (res.GetStatus() == EStatus::TIMEOUT) {
+ gotTimeout = true;
+ } else if (res.GetStatus() == EStatus::SUCCESS) {
+ gotSuccess = true;
+ }
+ }
+ UNIT_ASSERT(gotTimeout);
+ UNIT_ASSERT(gotSuccess);
+ }
+
+ Y_UNIT_TEST(Overload) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+
+ {
+ TClient annoyingClient(*server.ServerSettings);
+
+ // Create a table with crazy compaction policy that can easily get
+ // overloaded
+ const char * tableDescr = R"___(
+ Name: "kv"
+ Columns { Name: "Key" Type: "Uint64"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["Key"]
+ UniformPartitionsCount: 5
+
+ PartitionConfig {
+ CompactionPolicy {
+ InMemSizeToSnapshot: 100
+ InMemStepsToSnapshot: 1
+ InMemForceStepsToSnapshot: 1
+ InMemForceSizeToSnapshot: 200
+ InMemCompactionBrokerQueue: 0
+ MinDataPageSize: 7168
+ Generation {
+ GenerationId: 0
+ SizeToCompact: 100
+ CountToCompact: 1
+ ForceCountToCompact: 1
+ ForceSizeToCompact: 200
+ CompactionBrokerQueue: 1
+ KeepInCache: false
+ }
+ }
+ }
+ )___";
+
+ NMsgBusProxy::EResponseStatus status = annoyingClient.CreateTable("/Root", tableDescr);
+ UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::EResponseStatus::MSTATUS_OK);
+ }
+
+ TString location = TStringBuilder() << "localhost:" << grpc;
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+ NYdb::NTable::TTableClient client(connection);
+
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_DEBUG);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
+
+ bool gotOverload = false;
+ bool gotSuccess = false;
+ TString blob(100, 'a');
+ for (ui64 count = 0; (!gotOverload || !gotSuccess) && count < 100000; count++) {
+ TValueBuilder rows;
+
+ rows.BeginList();
+
+ // Add a row for shard 0
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint64(0)
+ .AddMember("Value").Utf8("")
+ .EndStruct();
+
+ // Add some random rows for other shards
+ for (int i = 0; i < 10; ++i) {
+ ui64 key = NumericHash(count + i) | (1ull << 63);
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint64(key)
+ .AddMember("Value").Utf8(blob)
+ .EndStruct();
+ }
+
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/kv", rows.Build()).GetValueSync();
+
+ if (res.GetStatus() == EStatus::SUCCESS) {
+ gotSuccess = true;
+ Cerr << ".";
+ } else {
+ Cerr << Endl << res.GetIssues().ToString() << Endl;
+ if (res.GetStatus() == EStatus::OVERLOADED && gotSuccess) {
+ gotOverload = true;
+ }
+ }
+ }
+ UNIT_ASSERT(gotOverload);
+ UNIT_ASSERT(gotSuccess);
+ }
+
+ void CreateTestTable(NYdb::NTable::TTableClient& client) {
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key", EPrimitiveType::Uint32)
+ .AddNullableColumn("Value", EPrimitiveType::Int32);
+
+ tableBuilder.SetPrimaryKeyColumns({"Key"});
+ auto result = session.CreateTable("/Root/Test", tableBuilder.Build()).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ Cerr << result.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ // Returns the specified status first N times and then returns SUCCESS;
+ class TFailStatusInjector {
+ const NYdb::EStatus StatusCode;
+ const ui32 FailCount;
+ ui32 CallCount;
+ public:
+ TFailStatusInjector(NYdb::EStatus statusCode, ui32 failCount)
+ : StatusCode(statusCode)
+ , FailCount(failCount)
+ , CallCount(0)
+ {}
+
+ NYdb::EStatus GetInjectedStatus() {
+ ++CallCount;
+ if (CallCount > FailCount) {
+ return EStatus::SUCCESS;
+ }
+ return StatusCode;
+ }
+
+ ui32 GetCallCount() const {
+ return CallCount;
+ }
+ };
+
+ const EStatus InjectedFailStatuses[] = {
+ EStatus::ABORTED,
+ EStatus::OVERLOADED,
+ EStatus::CLIENT_RESOURCE_EXHAUSTED,
+ EStatus::UNAVAILABLE,
+ EStatus::BAD_SESSION,
+ EStatus::SESSION_BUSY,
+ EStatus::NOT_FOUND,
+ EStatus::UNDETERMINED,
+ EStatus::TRANSPORT_UNAVAILABLE
+ };
+
NYdb::NTable::TRetryOperationSettings GetTestRetrySettings() {
NYdb::NTable::TRetryOperationSettings retrySettings;
- retrySettings
- .Idempotent(true)
+ retrySettings
+ .Idempotent(true)
.FastBackoffSettings(NYdb::NTable::TBackoffSettings().SlotDuration(TDuration::MilliSeconds(1)).Ceiling(3))
.SlowBackoffSettings(NYdb::NTable::TBackoffSettings().SlotDuration(TDuration::MilliSeconds(1)).Ceiling(3))
- .MaxRetries(5);
- return retrySettings;
- }
-
- Y_UNIT_TEST(RetryOperationSync) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
- TString location = TStringBuilder() << "localhost:" << grpc;
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient db(connection);
-
- CreateTestTable(db);
-
+ .MaxRetries(5);
+ return retrySettings;
+ }
+
+ Y_UNIT_TEST(RetryOperationSync) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+ TString location = TStringBuilder() << "localhost:" << grpc;
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient db(connection);
+
+ CreateTestTable(db);
+
NYdb::NTable::TRetryOperationSettings retrySettings = GetTestRetrySettings();
-
- for (EStatus injectedStatus : InjectedFailStatuses) {
- for (ui32 injectCount : {10, 6, 5, 3, 0}) {
- TFailStatusInjector failInjector(injectedStatus, injectCount);
-
- Cerr << "Injecting " << injectedStatus << " " << injectCount << " times" << Endl;
-
- auto status = db.RetryOperationSync([&failInjector](NYdb::NTable::TTableClient& db) {
- EStatus injected = failInjector.GetInjectedStatus();
- if (injected != EStatus::SUCCESS) {
+
+ for (EStatus injectedStatus : InjectedFailStatuses) {
+ for (ui32 injectCount : {10, 6, 5, 3, 0}) {
+ TFailStatusInjector failInjector(injectedStatus, injectCount);
+
+ Cerr << "Injecting " << injectedStatus << " " << injectCount << " times" << Endl;
+
+ auto status = db.RetryOperationSync([&failInjector](NYdb::NTable::TTableClient& db) {
+ EStatus injected = failInjector.GetInjectedStatus();
+ if (injected != EStatus::SUCCESS) {
return NYdb::NTable::TBulkUpsertResult(TStatus(injected, NYql::TIssues()));
- }
-
- NYdb::TValueBuilder rows;
- rows.BeginList()
- .AddListItem()
- .BeginStruct()
- .AddMember("Key").Uint32(1)
- .AddMember("Value").Int32(42)
- .EndStruct()
- .EndList();
- auto status = db.BulkUpsert("Root/Test", rows.Build()).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
- return status;
- }, retrySettings);
-
- Cerr << "Result: " << status.GetStatus() << Endl;
-
- if (injectCount < retrySettings.MaxRetries_ + 1) {
- UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
- UNIT_ASSERT_VALUES_EQUAL(failInjector.GetCallCount(), injectCount + 1);
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), injectedStatus, status.GetIssues().ToString());
- UNIT_ASSERT_VALUES_EQUAL(failInjector.GetCallCount(), retrySettings.MaxRetries_ + 1);
- }
- }
- }
- }
-
- Y_UNIT_TEST(RetryOperation) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
- TString location = TStringBuilder() << "localhost:" << grpc;
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient db(connection);
-
- CreateTestTable(db);
-
+ }
+
+ NYdb::TValueBuilder rows;
+ rows.BeginList()
+ .AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint32(1)
+ .AddMember("Value").Int32(42)
+ .EndStruct()
+ .EndList();
+ auto status = db.BulkUpsert("Root/Test", rows.Build()).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
+ return status;
+ }, retrySettings);
+
+ Cerr << "Result: " << status.GetStatus() << Endl;
+
+ if (injectCount < retrySettings.MaxRetries_ + 1) {
+ UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
+ UNIT_ASSERT_VALUES_EQUAL(failInjector.GetCallCount(), injectCount + 1);
+ } else {
+ UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), injectedStatus, status.GetIssues().ToString());
+ UNIT_ASSERT_VALUES_EQUAL(failInjector.GetCallCount(), retrySettings.MaxRetries_ + 1);
+ }
+ }
+ }
+ }
+
+ Y_UNIT_TEST(RetryOperation) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+ TString location = TStringBuilder() << "localhost:" << grpc;
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient db(connection);
+
+ CreateTestTable(db);
+
NYdb::NTable::TRetryOperationSettings retrySettings = GetTestRetrySettings();
-
- for (EStatus injectedStatus : InjectedFailStatuses) {
- for (ui32 injectCount : {10, 6, 5, 3, 0}) {
- TFailStatusInjector failInjector(injectedStatus, injectCount);
-
- Cerr << "Injecting " << injectedStatus << " " << injectCount << " times" << Endl;
-
- std::function<NYdb::NTable::TAsyncBulkUpsertResult(NYdb::NTable::TTableClient& tableClient)> bulkUpsertOp =
- [&failInjector](NYdb::NTable::TTableClient& db) {
- EStatus injected = failInjector.GetInjectedStatus();
- if (injected != EStatus::SUCCESS) {
+
+ for (EStatus injectedStatus : InjectedFailStatuses) {
+ for (ui32 injectCount : {10, 6, 5, 3, 0}) {
+ TFailStatusInjector failInjector(injectedStatus, injectCount);
+
+ Cerr << "Injecting " << injectedStatus << " " << injectCount << " times" << Endl;
+
+ std::function<NYdb::NTable::TAsyncBulkUpsertResult(NYdb::NTable::TTableClient& tableClient)> bulkUpsertOp =
+ [&failInjector](NYdb::NTable::TTableClient& db) {
+ EStatus injected = failInjector.GetInjectedStatus();
+ if (injected != EStatus::SUCCESS) {
return NThreading::MakeFuture<NYdb::NTable::TBulkUpsertResult>(NYdb::NTable::TBulkUpsertResult(TStatus(injected, NYql::TIssues())));
- }
-
- NYdb::TValueBuilder rows;
- rows.BeginList()
- .AddListItem()
- .BeginStruct()
- .AddMember("Key").Uint32(1)
- .AddMember("Value").Int32(42)
- .EndStruct()
- .EndList();
- return db.BulkUpsert("Root/Test", rows.Build());
- };
-
- auto status = db.RetryOperation(bulkUpsertOp, retrySettings).GetValueSync();
-
- Cerr << "Result: " << status.GetStatus() << Endl;
-
- if (injectCount < retrySettings.MaxRetries_ + 1) {
- UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
- UNIT_ASSERT_VALUES_EQUAL(failInjector.GetCallCount(), injectCount + 1);
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), injectedStatus, status.GetIssues().ToString());
- UNIT_ASSERT_VALUES_EQUAL(failInjector.GetCallCount(), retrySettings.MaxRetries_ + 1);
- }
- }
- }
- }
-}
+ }
+
+ NYdb::TValueBuilder rows;
+ rows.BeginList()
+ .AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint32(1)
+ .AddMember("Value").Int32(42)
+ .EndStruct()
+ .EndList();
+ return db.BulkUpsert("Root/Test", rows.Build());
+ };
+
+ auto status = db.RetryOperation(bulkUpsertOp, retrySettings).GetValueSync();
+
+ Cerr << "Result: " << status.GetStatus() << Endl;
+
+ if (injectCount < retrySettings.MaxRetries_ + 1) {
+ UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), EStatus::SUCCESS, status.GetIssues().ToString());
+ UNIT_ASSERT_VALUES_EQUAL(failInjector.GetCallCount(), injectCount + 1);
+ } else {
+ UNIT_ASSERT_VALUES_EQUAL_C(status.GetStatus(), injectedStatus, status.GetIssues().ToString());
+ UNIT_ASSERT_VALUES_EQUAL(failInjector.GetCallCount(), retrySettings.MaxRetries_ + 1);
+ }
+ }
+ }
+ }
+}
diff --git a/ydb/services/ydb/ydb_clickhouse_internal.cpp b/ydb/services/ydb/ydb_clickhouse_internal.cpp
index b4820a2bacc..543bcd98ee1 100644
--- a/ydb/services/ydb/ydb_clickhouse_internal.cpp
+++ b/ydb/services/ydb/ydb_clickhouse_internal.cpp
@@ -1,63 +1,63 @@
-#include "ydb_clickhouse_internal.h"
-
+#include "ydb_clickhouse_internal.h"
+
#include <ydb/core/grpc_services/grpc_helper.h>
#include <ydb/core/grpc_services/grpc_request_proxy.h>
#include <ydb/core/grpc_services/rpc_calls.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-TGRpcYdbClickhouseInternalService::TGRpcYdbClickhouseInternalService(NActors::TActorSystem *system,
- TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
- TIntrusivePtr<TInFlightLimiterRegistry> inFlightLimiterRegistry,
+
+namespace NKikimr {
+namespace NGRpcService {
+
+TGRpcYdbClickhouseInternalService::TGRpcYdbClickhouseInternalService(NActors::TActorSystem *system,
+ TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
+ TIntrusivePtr<TInFlightLimiterRegistry> inFlightLimiterRegistry,
NActors::TActorId id)
- : ActorSystem_(system)
- , Counters_(counters)
- , LimiterRegistry_(inFlightLimiterRegistry)
- , GRpcRequestProxyId_(id) {}
-
+ : ActorSystem_(system)
+ , Counters_(counters)
+ , LimiterRegistry_(inFlightLimiterRegistry)
+ , GRpcRequestProxyId_(id) {}
+
void TGRpcYdbClickhouseInternalService::InitService(grpc::ServerCompletionQueue *cq, NGrpc::TLoggerPtr logger) {
- CQ_ = cq;
+ CQ_ = cq;
SetupIncomingRequests(std::move(logger));
-}
-
+}
+
void TGRpcYdbClickhouseInternalService::SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) {
- Limiter_ = limiter;
-}
-
-bool TGRpcYdbClickhouseInternalService::IncRequest() {
- return Limiter_->Inc();
-}
-
-void TGRpcYdbClickhouseInternalService::DecRequest() {
- Limiter_->Dec();
- Y_ASSERT(Limiter_->GetCurrentInFlight() >= 0);
-}
-
+ Limiter_ = limiter;
+}
+
+bool TGRpcYdbClickhouseInternalService::IncRequest() {
+ return Limiter_->Inc();
+}
+
+void TGRpcYdbClickhouseInternalService::DecRequest() {
+ Limiter_->Dec();
+ Y_ASSERT(Limiter_->GetCurrentInFlight() >= 0);
+}
+
void TGRpcYdbClickhouseInternalService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
- auto getCounterBlock = CreateCounterCb(Counters_, ActorSystem_);
- auto getLimiter = CreateLimiterCb(LimiterRegistry_);
-
-#ifdef ADD_REQUEST
-#error ADD_REQUEST macro already defined
-#endif
-#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
- MakeIntrusive<TGRpcRequest<Ydb::ClickhouseInternal::IN, Ydb::ClickhouseInternal::OUT, TGRpcYdbClickhouseInternalService>>(this, &Service_, CQ_, \
+ auto getCounterBlock = CreateCounterCb(Counters_, ActorSystem_);
+ auto getLimiter = CreateLimiterCb(LimiterRegistry_);
+
+#ifdef ADD_REQUEST
+#error ADD_REQUEST macro already defined
+#endif
+#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
+ MakeIntrusive<TGRpcRequest<Ydb::ClickhouseInternal::IN, Ydb::ClickhouseInternal::OUT, TGRpcYdbClickhouseInternalService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *ctx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
- ACTION; \
- }, &Ydb::ClickhouseInternal::V1::ClickhouseInternalService::AsyncService::Request ## NAME, \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
+ ACTION; \
+ }, &Ydb::ClickhouseInternal::V1::ClickhouseInternalService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("clickhouse_internal", #NAME), getLimiter("ClickhouseInternal", #NAME, DEFAULT_MAX_IN_FLIGHT))->Run();
-
- ADD_REQUEST(Scan, ScanRequest, ScanResponse, {
- ActorSystem_->Send(GRpcRequestProxyId_, new TEvReadColumnsRequest(ctx));
- })
- ADD_REQUEST(GetShardLocations, GetShardLocationsRequest, GetShardLocationsResponse, {
- ActorSystem_->Send(GRpcRequestProxyId_, new TEvGetShardLocationsRequest(ctx));
- })
- ADD_REQUEST(DescribeTable, DescribeTableRequest, DescribeTableResponse, {
- ActorSystem_->Send(GRpcRequestProxyId_, new TEvKikhouseDescribeTableRequest(ctx));
- })
+
+ ADD_REQUEST(Scan, ScanRequest, ScanResponse, {
+ ActorSystem_->Send(GRpcRequestProxyId_, new TEvReadColumnsRequest(ctx));
+ })
+ ADD_REQUEST(GetShardLocations, GetShardLocationsRequest, GetShardLocationsResponse, {
+ ActorSystem_->Send(GRpcRequestProxyId_, new TEvGetShardLocationsRequest(ctx));
+ })
+ ADD_REQUEST(DescribeTable, DescribeTableRequest, DescribeTableResponse, {
+ ActorSystem_->Send(GRpcRequestProxyId_, new TEvKikhouseDescribeTableRequest(ctx));
+ })
ADD_REQUEST(CreateSnapshot, CreateSnapshotRequest, CreateSnapshotResponse, {
ActorSystem_->Send(GRpcRequestProxyId_, new TEvKikhouseCreateSnapshotRequest(ctx));
})
@@ -67,8 +67,8 @@ void TGRpcYdbClickhouseInternalService::SetupIncomingRequests(NGrpc::TLoggerPtr
ADD_REQUEST(DiscardSnapshot, DiscardSnapshotRequest, DiscardSnapshotResponse, {
ActorSystem_->Send(GRpcRequestProxyId_, new TEvKikhouseDiscardSnapshotRequest(ctx));
})
-#undef ADD_REQUEST
-}
-
-} // namespace NGRpcService
-} // namespace NKikimr
+#undef ADD_REQUEST
+}
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/services/ydb/ydb_clickhouse_internal.h b/ydb/services/ydb/ydb_clickhouse_internal.h
index 4fca32ce756..a0ff8863d10 100644
--- a/ydb/services/ydb/ydb_clickhouse_internal.h
+++ b/ydb/services/ydb/ydb_clickhouse_internal.h
@@ -1,41 +1,41 @@
-#pragma once
-
+#pragma once
+
#include <ydb/public/api/grpc/draft/ydb_clickhouse_internal_v1.grpc.pb.h>
-
+
#include <ydb/core/grpc_services/grpc_helper.h>
#include <library/cpp/grpc/server/grpc_server.h>
-
+
#include <library/cpp/actors/core/actorsystem.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-class TGRpcYdbClickhouseInternalService
+
+namespace NKikimr {
+namespace NGRpcService {
+
+class TGRpcYdbClickhouseInternalService
: public NGrpc::TGrpcServiceBase<Ydb::ClickhouseInternal::V1::ClickhouseInternalService>
-{
-private:
- constexpr static i64 DEFAULT_MAX_IN_FLIGHT = 200;
-
-public:
- TGRpcYdbClickhouseInternalService(NActors::TActorSystem* system, TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
+{
+private:
+ constexpr static i64 DEFAULT_MAX_IN_FLIGHT = 200;
+
+public:
+ TGRpcYdbClickhouseInternalService(NActors::TActorSystem* system, TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
TIntrusivePtr<NGRpcService::TInFlightLimiterRegistry> inFlightLimiterRegistry, NActors::TActorId id);
-
+
void InitService(grpc::ServerCompletionQueue* cq, NGrpc::TLoggerPtr logger) override;
void SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) override;
-
- bool IncRequest();
- void DecRequest();
-private:
+
+ bool IncRequest();
+ void DecRequest();
+private:
void SetupIncomingRequests(NGrpc::TLoggerPtr logger);
-
- NActors::TActorSystem* ActorSystem_;
+
+ NActors::TActorSystem* ActorSystem_;
grpc::ServerCompletionQueue* CQ_ = nullptr;
-
- TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_;
- TIntrusivePtr<NGRpcService::TInFlightLimiterRegistry> LimiterRegistry_;
+
+ TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_;
+ TIntrusivePtr<NGRpcService::TInFlightLimiterRegistry> LimiterRegistry_;
NActors::TActorId GRpcRequestProxyId_;
NGrpc::TGlobalLimiter* Limiter_ = nullptr;
-};
-
-} // namespace NGRpcService
-} // namespace NKikimr
+};
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/services/ydb/ydb_common_ut.h b/ydb/services/ydb/ydb_common_ut.h
index 714ff922634..c067928c785 100644
--- a/ydb/services/ydb/ydb_common_ut.h
+++ b/ydb/services/ydb/ydb_common_ut.h
@@ -78,19 +78,19 @@ public:
ServerSettings->SetEnableSystemViews(TestSettings::EnableSystemViews);
ServerSettings->SetEnableSchemeTransactionsAtSchemeShard(true);
ServerSettings->SetEnableYq(enableYq);
- ServerSettings->Formats = new TFormatFactory;
+ ServerSettings->Formats = new TFormatFactory;
ServerSettings->PQConfig = appConfig.GetPQConfig();
if (appConfig.HasMeteringConfig() && appConfig.GetMeteringConfig().HasMeteringFilePath()) {
ServerSettings->SetMeteringFilePath(appConfig.GetMeteringConfig().GetMeteringFilePath());
}
ServerSettings->RegisterGrpcService<NKikimr::NGRpcService::TGRpcYdbDummyService>("dummy");
- if (udfFrFactory) {
- ServerSettings->SetFrFactory(udfFrFactory);
- }
+ if (udfFrFactory) {
+ ServerSettings->SetFrFactory(udfFrFactory);
+ }
if (builder) {
builder(*ServerSettings);;
}
-
+
Server_.Reset(new TServer(*ServerSettings));
Tenants_.Reset(new Tests::TTenants(Server_));
@@ -130,20 +130,20 @@ public:
return GRpcPort_;
}
- TPortManager& GetPortManager() {
- return PortManager;
- }
-
- void ResetSchemeCache(TString path, ui32 nodeIndex = 0) {
- TTestActorRuntime* runtime = Server_->GetRuntime();
- TClient annoyingClient(*ServerSettings);
- annoyingClient.RefreshPathCache(runtime, path, nodeIndex);
- }
-
- TTestActorRuntime* GetRuntime() {
- return Server_->GetRuntime();
- }
-
+ TPortManager& GetPortManager() {
+ return PortManager;
+ }
+
+ void ResetSchemeCache(TString path, ui32 nodeIndex = 0) {
+ TTestActorRuntime* runtime = Server_->GetRuntime();
+ TClient annoyingClient(*ServerSettings);
+ annoyingClient.RefreshPathCache(runtime, path, nodeIndex);
+ }
+
+ TTestActorRuntime* GetRuntime() {
+ return Server_->GetRuntime();
+ }
+
Tests::TServer& GetServer() {
return *Server_;
}
diff --git a/ydb/services/ydb/ydb_dummy.cpp b/ydb/services/ydb/ydb_dummy.cpp
index 73cfdd99c7a..c628b0b3e4a 100644
--- a/ydb/services/ydb/ydb_dummy.cpp
+++ b/ydb/services/ydb/ydb_dummy.cpp
@@ -145,9 +145,9 @@ void TGRpcYdbDummyService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
MakeIntrusive<TGRpcRequest<Draft::Dummy::IN, Draft::Dummy::OUT, TGRpcYdbDummyService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *ctx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
- ACTION; \
- }, &Draft::Dummy::DummyService::AsyncService::Request ## NAME, \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
+ ACTION; \
+ }, &Draft::Dummy::DummyService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("dummy", #NAME))->Run();
ADD_REQUEST(Ping, PingRequest, PingResponse, {
diff --git a/ydb/services/ydb/ydb_experimental.cpp b/ydb/services/ydb/ydb_experimental.cpp
index 9e063be7285..1ca0009770d 100644
--- a/ydb/services/ydb/ydb_experimental.cpp
+++ b/ydb/services/ydb/ydb_experimental.cpp
@@ -1,55 +1,55 @@
-#include "ydb_experimental.h"
-
+#include "ydb_experimental.h"
+
#include <ydb/core/grpc_services/grpc_helper.h>
#include <ydb/core/grpc_services/grpc_request_proxy.h>
#include <ydb/core/grpc_services/rpc_calls.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-TGRpcYdbExperimentalService::TGRpcYdbExperimentalService(NActors::TActorSystem *system,
+
+namespace NKikimr {
+namespace NGRpcService {
+
+TGRpcYdbExperimentalService::TGRpcYdbExperimentalService(NActors::TActorSystem *system,
TIntrusivePtr<NMonitoring::TDynamicCounters> counters, NActors::TActorId id)
- : ActorSystem_(system)
- , Counters_(counters)
- , GRpcRequestProxyId_(id) {}
-
+ : ActorSystem_(system)
+ , Counters_(counters)
+ , GRpcRequestProxyId_(id) {}
+
void TGRpcYdbExperimentalService::InitService(grpc::ServerCompletionQueue *cq, NGrpc::TLoggerPtr logger) {
- CQ_ = cq;
+ CQ_ = cq;
SetupIncomingRequests(std::move(logger));
-}
-
+}
+
void TGRpcYdbExperimentalService::SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) {
- Limiter_ = limiter;
-}
-
-bool TGRpcYdbExperimentalService::IncRequest() {
- return Limiter_->Inc();
-}
-
-void TGRpcYdbExperimentalService::DecRequest() {
- Limiter_->Dec();
- Y_ASSERT(Limiter_->GetCurrentInFlight() >= 0);
-}
-
+ Limiter_ = limiter;
+}
+
+bool TGRpcYdbExperimentalService::IncRequest() {
+ return Limiter_->Inc();
+}
+
+void TGRpcYdbExperimentalService::DecRequest() {
+ Limiter_->Dec();
+ Y_ASSERT(Limiter_->GetCurrentInFlight() >= 0);
+}
+
void TGRpcYdbExperimentalService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
- auto getCounterBlock = CreateCounterCb(Counters_, ActorSystem_);
-
-#ifdef ADD_REQUEST
-#error ADD_REQUEST macro already defined
-#endif
-#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
- MakeIntrusive<TGRpcRequest<Ydb::Experimental::IN, Ydb::Experimental::OUT, TGRpcYdbExperimentalService>>(this, &Service_, CQ_, \
+ auto getCounterBlock = CreateCounterCb(Counters_, ActorSystem_);
+
+#ifdef ADD_REQUEST
+#error ADD_REQUEST macro already defined
+#endif
+#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
+ MakeIntrusive<TGRpcRequest<Ydb::Experimental::IN, Ydb::Experimental::OUT, TGRpcYdbExperimentalService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *ctx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
- ACTION; \
- }, &Ydb::Experimental::V1::ExperimentalService::AsyncService::Request ## NAME, \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
+ ACTION; \
+ }, &Ydb::Experimental::V1::ExperimentalService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("experimental", #NAME))->Run();
-
+
ADD_REQUEST(ExecuteStreamQuery, ExecuteStreamQueryRequest, ExecuteStreamQueryResponse, {
ActorSystem_->Send(GRpcRequestProxyId_, new TEvExperimentalStreamQueryRequest(ctx));
})
-#undef ADD_REQUEST
-}
-
-} // namespace NGRpcService
-} // namespace NKikimr
+#undef ADD_REQUEST
+}
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/services/ydb/ydb_experimental.h b/ydb/services/ydb/ydb_experimental.h
index 1e92fc38ad7..c312a18cacb 100644
--- a/ydb/services/ydb/ydb_experimental.h
+++ b/ydb/services/ydb/ydb_experimental.h
@@ -1,36 +1,36 @@
-#pragma once
-
+#pragma once
+
#include <library/cpp/actors/core/actorsystem.h>
-
+
#include <library/cpp/grpc/server/grpc_server.h>
-
+
#include <ydb/public/api/grpc/draft/ydb_experimental_v1.grpc.pb.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-class TGRpcYdbExperimentalService
+
+namespace NKikimr {
+namespace NGRpcService {
+
+class TGRpcYdbExperimentalService
: public NGrpc::TGrpcServiceBase<Ydb::Experimental::V1::ExperimentalService>
-{
-public:
- TGRpcYdbExperimentalService(NActors::TActorSystem* system, TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
+{
+public:
+ TGRpcYdbExperimentalService(NActors::TActorSystem* system, TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
NActors::TActorId id);
-
+
void InitService(grpc::ServerCompletionQueue* cq, NGrpc::TLoggerPtr logger) override;
void SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) override;
-
- bool IncRequest();
- void DecRequest();
-private:
+
+ bool IncRequest();
+ void DecRequest();
+private:
void SetupIncomingRequests(NGrpc::TLoggerPtr logger);
-
- NActors::TActorSystem* ActorSystem_;
+
+ NActors::TActorSystem* ActorSystem_;
grpc::ServerCompletionQueue* CQ_ = nullptr;
-
- TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_;
+
+ TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_;
NActors::TActorId GRpcRequestProxyId_;
NGrpc::TGlobalLimiter* Limiter_ = nullptr;
-};
-
-} // namespace NGRpcService
-} // namespace NKikimr
+};
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/services/ydb/ydb_index_table_ut.cpp b/ydb/services/ydb/ydb_index_table_ut.cpp
index 05bea665f2f..3c31f43a5ab 100644
--- a/ydb/services/ydb/ydb_index_table_ut.cpp
+++ b/ydb/services/ydb/ydb_index_table_ut.cpp
@@ -4,7 +4,7 @@
#include <ydb/core/tx/datashard/datashard.h>
#include <ydb/core/client/flat_ut_client.h>
-
+
#include <ydb/library/yql/public/issue/yql_issue.h>
#include "ydb_common_ut.h"
@@ -16,245 +16,245 @@
using namespace NYdb;
using namespace NYdb::NTable;
-void CreateTestTableWithIndex(NYdb::NTable::TTableClient& client) {
- auto sessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
- auto session = sessionResult.GetSession();
+void CreateTestTableWithIndex(NYdb::NTable::TTableClient& client) {
+ auto sessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
+ auto session = sessionResult.GetSession();
- const ui32 SHARD_COUNT = 4;
+ const ui32 SHARD_COUNT = 4;
- {
- auto tableBuilder = client.GetTableBuilder()
- .AddNullableColumn("NameHash", EPrimitiveType::Uint32)
- .AddNullableColumn("Name", EPrimitiveType::Utf8)
- .AddNullableColumn("Version", EPrimitiveType::Uint32)
- .AddNullableColumn("Timestamp", EPrimitiveType::Int64)
- .AddNullableColumn("Data", EPrimitiveType::String)
- .SetPrimaryKeyColumns({"NameHash", "Name"})
- .AddSecondaryIndex("TimestampIndex",TVector<TString>({"Timestamp", "Name", "Version"}));
+ {
+ auto tableBuilder = client.GetTableBuilder()
+ .AddNullableColumn("NameHash", EPrimitiveType::Uint32)
+ .AddNullableColumn("Name", EPrimitiveType::Utf8)
+ .AddNullableColumn("Version", EPrimitiveType::Uint32)
+ .AddNullableColumn("Timestamp", EPrimitiveType::Int64)
+ .AddNullableColumn("Data", EPrimitiveType::String)
+ .SetPrimaryKeyColumns({"NameHash", "Name"})
+ .AddSecondaryIndex("TimestampIndex",TVector<TString>({"Timestamp", "Name", "Version"}));
- auto tableSettings = NYdb::NTable::TCreateTableSettings().PartitioningPolicy(
- NYdb::NTable::TPartitioningPolicy().UniformPartitions(SHARD_COUNT));
+ auto tableSettings = NYdb::NTable::TCreateTableSettings().PartitioningPolicy(
+ NYdb::NTable::TPartitioningPolicy().UniformPartitions(SHARD_COUNT));
- auto result = session.CreateTable("/Root/Foo", tableBuilder.Build(), tableSettings).ExtractValueSync();
+ auto result = session.CreateTable("/Root/Foo", tableBuilder.Build(), tableSettings).ExtractValueSync();
UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
}
-}
-
-Y_UNIT_TEST_SUITE(YdbIndexTable) {
- Y_UNIT_TEST(FastSplitIndex) {
- TKikimrWithGrpcAndRootSchema server;
-
- NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
- NYdb::NTable::TTableClient client(driver);
- NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
-
- CreateTestTableWithIndex(client);
-
- size_t shardsBefore = oldClient.GetTablePartitions("/Root/Foo/TimestampIndex/indexImplTable").size();
- Cerr << "Index table has " << shardsBefore << " shards" << Endl;
- UNIT_ASSERT_VALUES_EQUAL(shardsBefore, 1);
-
+}
+
+Y_UNIT_TEST_SUITE(YdbIndexTable) {
+ Y_UNIT_TEST(FastSplitIndex) {
+ TKikimrWithGrpcAndRootSchema server;
+
+ NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
+ NYdb::NTable::TTableClient client(driver);
+ NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
+
+ CreateTestTableWithIndex(client);
+
+ size_t shardsBefore = oldClient.GetTablePartitions("/Root/Foo/TimestampIndex/indexImplTable").size();
+ Cerr << "Index table has " << shardsBefore << " shards" << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(shardsBefore, 1);
+
NDataShard::gDbStatsReportInterval = TDuration::Seconds(0);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_NOTICE);
-
- // Set low CPU usage threshold for robustness
- TAtomic unused;
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 1, unused);
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportThreshlodPercent", 1, unused);
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportIntervalSeconds", 3, unused);
-
- TString query =
- "DECLARE $name_hash AS Uint32;\n"
- "DECLARE $name AS Utf8;\n"
- "DECLARE $version AS Uint32;\n"
- "DECLARE $timestamp AS Int64;\n\n"
- "UPSERT INTO [/Root/Foo] (NameHash, Name, Version, Timestamp) "
- " VALUES ($name_hash, $name, $version, $timestamp);";
-
- TAtomic enough = 0;
- auto threadFunc = [&client, &query, &enough](TString namePrefix) {
- auto sessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
- auto session = sessionResult.GetSession();
-
- for (int key = 0 ; key < 2000 && !AtomicGet(enough); ++key) {
- TString name = namePrefix + ToString(key);
-
- auto paramsBuilder = client.GetParamsBuilder();
- auto params = paramsBuilder
- .AddParam("$name_hash")
- .Uint32(MurmurHash<ui32>(name.data(), name.size()))
- .Build()
- .AddParam("$name")
- .Utf8(name)
- .Build()
- .AddParam("$version")
- .Uint32(key%5)
- .Build()
- .AddParam("$timestamp")
- .Int64(key%10)
- .Build()
- .Build();
-
- auto result = session.ExecuteDataQuery(
- query,
- TTxControl::BeginTx().CommitTx(), std::move(params)).ExtractValueSync();
-
- if (!result.IsSuccess() && result.GetStatus() != NYdb::EStatus::OVERLOADED) {
- TString err = result.GetIssues().ToString();
- Cerr << result.GetStatus() << ": " << err << Endl;
- }
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- }
- };
-
- IThreadFactory* pool = SystemThreadFactory();
-
- TAtomic finished = 0;
- TVector<TAutoPtr<IThreadFactory::IThread>> threads;
- threads.resize(10);
- for (size_t i = 0; i < threads.size(); i++) {
- TString namePrefix;
- namePrefix.append(5000, 'a' + i);
- threads[i] = pool->Run([threadFunc, namePrefix, &finished]() {
- threadFunc(namePrefix);
- AtomicIncrement(finished);
- });
- }
-
- // Wait for split to happen
- while (AtomicGet(finished) < (i64)threads.size()) {
- size_t shardsAfter = oldClient.GetTablePartitions("/Root/Foo/TimestampIndex/indexImplTable").size();
- if (shardsAfter > shardsBefore) {
- AtomicSet(enough, 1);
- break;
- }
- Sleep(TDuration::Seconds(5));
- }
-
- for (size_t i = 0; i < threads.size(); i++) {
- threads[i]->Join();
- }
-
- int retries = 5;
- size_t shardsAfter = 0;
- for (;retries > 0 && shardsAfter <= shardsBefore; --retries, Sleep(TDuration::Seconds(1))) {
- shardsAfter = oldClient.GetTablePartitions("/Root/Foo/TimestampIndex/indexImplTable").size();
- }
- Cerr << "Index table has " << shardsAfter << " shards" << Endl;
- UNIT_ASSERT_C(shardsAfter > shardsBefore, "Index table didn't split!!11 O_O");
- }
-
- Y_UNIT_TEST(AlterIndexImplBySuperUser) {
- TKikimrWithGrpcAndRootSchema server;
-
- NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
- NYdb::NTable::TTableClient client(driver);
- NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
-
- CreateTestTableWithIndex(client);
-
- server.Server_->GetRuntime()->GetAppData().AdministrationAllowedSIDs.push_back("root@builtin");
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_NOTICE);
-
- {
- auto sessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
- auto session = sessionResult.GetSession();
-
- auto type = TTypeBuilder().BeginOptional().Primitive(EPrimitiveType::Uint64).EndOptional().Build();
- auto alter = TAlterTableSettings().AppendAddColumns(TColumn("FinishedTimestamp", type));
-
- auto alterResult = session.AlterTable("Root/Foo/TimestampIndex/indexImplTable", alter).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(alterResult.GetStatus(), EStatus::SCHEME_ERROR,
- "Alter of index impl table must fail");
- }
-
- {
- TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
- Name: "indexImplTable"
- PartitionConfig {
- PartitioningPolicy {
- FastSplitSettings {
- SizeThreshold: 10000
- RowCountThreshold: 10000
- CpuPercentageThreshold: 146
- }
- }
- }
- )", "user@builtin");
- UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR, "User must not be able to alter index impl table");
- UNIT_ASSERT_VALUES_EQUAL(result->Record.GetErrorReason(), "Administrative access denied");
-
- auto description = oldClient.Ls("/Root/Foo/TimestampIndex/indexImplTable");
- // Cerr << description->Record.GetPathDescription().GetTable().GetPartitionConfig() << Endl;
- UNIT_ASSERT(!description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().HasFastSplitSettings());
- }
-
- {
- TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
- Name: "indexImplTable"
- PartitionConfig {
- PartitioningPolicy {
- FastSplitSettings {
- SizeThreshold: 10000
- RowCountThreshold: 10000
- CpuPercentageThreshold: 146
- }
- }
- }
- )", "root@builtin");
- UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK, "Super user must be able to alter partition config");
-
- auto description = oldClient.Ls("/Root/Foo/TimestampIndex/indexImplTable");
- // Cerr << description->Record.GetPathDescription().GetTable().GetPartitionConfig() << Endl;
- UNIT_ASSERT(description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().HasFastSplitSettings());
- auto& fastSplitSettings = description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().GetFastSplitSettings();
- UNIT_ASSERT_VALUES_EQUAL(fastSplitSettings.GetSizeThreshold(), 10000);
- UNIT_ASSERT_VALUES_EQUAL(fastSplitSettings.GetRowCountThreshold(), 10000);
- UNIT_ASSERT_VALUES_EQUAL(fastSplitSettings.GetCpuPercentageThreshold(), 146);
- }
-
- {
- TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
- Name: "indexImplTable"
- PartitionConfig {
- PartitioningPolicy {
- SizeToSplit: 13000001
- }
- }
- )", "root@builtin");
- UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK, "Super user must be able to alter partition config");
-
- auto description = oldClient.Ls("/Root/Foo/TimestampIndex/indexImplTable");
- // Cerr << description->Record.GetPathDescription().GetTable().GetPartitionConfig() << Endl;
- UNIT_ASSERT(description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().HasFastSplitSettings());
- UNIT_ASSERT_VALUES_EQUAL(description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().GetSizeToSplit(), 13000001);
- }
-
- {
- TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
- Name: "indexImplTable"
- Columns {
- Name: "NewColumn"
- Type: "Uint32"
- }
- )", "root@builtin");
- UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR, "Super user must not be able to alter coloumns");
- UNIT_ASSERT_VALUES_EQUAL(result->Record.GetErrorReason(), "Adding or dropping columns in index table is not supported");
- }
-
- {
- TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
- Name: "indexImplTable"
- DropColumns {
- Name: "Timestamp"
- }
- )", "root@builtin");
- UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR, "Super user must not be able to alter coloumns");
- UNIT_ASSERT_VALUES_EQUAL(result->Record.GetErrorReason(), "Adding or dropping columns in index table is not supported");
- }
- }
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_NOTICE);
+
+ // Set low CPU usage threshold for robustness
+ TAtomic unused;
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 1, unused);
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportThreshlodPercent", 1, unused);
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportIntervalSeconds", 3, unused);
+
+ TString query =
+ "DECLARE $name_hash AS Uint32;\n"
+ "DECLARE $name AS Utf8;\n"
+ "DECLARE $version AS Uint32;\n"
+ "DECLARE $timestamp AS Int64;\n\n"
+ "UPSERT INTO [/Root/Foo] (NameHash, Name, Version, Timestamp) "
+ " VALUES ($name_hash, $name, $version, $timestamp);";
+
+ TAtomic enough = 0;
+ auto threadFunc = [&client, &query, &enough](TString namePrefix) {
+ auto sessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
+ auto session = sessionResult.GetSession();
+
+ for (int key = 0 ; key < 2000 && !AtomicGet(enough); ++key) {
+ TString name = namePrefix + ToString(key);
+
+ auto paramsBuilder = client.GetParamsBuilder();
+ auto params = paramsBuilder
+ .AddParam("$name_hash")
+ .Uint32(MurmurHash<ui32>(name.data(), name.size()))
+ .Build()
+ .AddParam("$name")
+ .Utf8(name)
+ .Build()
+ .AddParam("$version")
+ .Uint32(key%5)
+ .Build()
+ .AddParam("$timestamp")
+ .Int64(key%10)
+ .Build()
+ .Build();
+
+ auto result = session.ExecuteDataQuery(
+ query,
+ TTxControl::BeginTx().CommitTx(), std::move(params)).ExtractValueSync();
+
+ if (!result.IsSuccess() && result.GetStatus() != NYdb::EStatus::OVERLOADED) {
+ TString err = result.GetIssues().ToString();
+ Cerr << result.GetStatus() << ": " << err << Endl;
+ }
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ }
+ };
+
+ IThreadFactory* pool = SystemThreadFactory();
+
+ TAtomic finished = 0;
+ TVector<TAutoPtr<IThreadFactory::IThread>> threads;
+ threads.resize(10);
+ for (size_t i = 0; i < threads.size(); i++) {
+ TString namePrefix;
+ namePrefix.append(5000, 'a' + i);
+ threads[i] = pool->Run([threadFunc, namePrefix, &finished]() {
+ threadFunc(namePrefix);
+ AtomicIncrement(finished);
+ });
+ }
+
+ // Wait for split to happen
+ while (AtomicGet(finished) < (i64)threads.size()) {
+ size_t shardsAfter = oldClient.GetTablePartitions("/Root/Foo/TimestampIndex/indexImplTable").size();
+ if (shardsAfter > shardsBefore) {
+ AtomicSet(enough, 1);
+ break;
+ }
+ Sleep(TDuration::Seconds(5));
+ }
+
+ for (size_t i = 0; i < threads.size(); i++) {
+ threads[i]->Join();
+ }
+
+ int retries = 5;
+ size_t shardsAfter = 0;
+ for (;retries > 0 && shardsAfter <= shardsBefore; --retries, Sleep(TDuration::Seconds(1))) {
+ shardsAfter = oldClient.GetTablePartitions("/Root/Foo/TimestampIndex/indexImplTable").size();
+ }
+ Cerr << "Index table has " << shardsAfter << " shards" << Endl;
+ UNIT_ASSERT_C(shardsAfter > shardsBefore, "Index table didn't split!!11 O_O");
+ }
+
+ Y_UNIT_TEST(AlterIndexImplBySuperUser) {
+ TKikimrWithGrpcAndRootSchema server;
+
+ NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
+ NYdb::NTable::TTableClient client(driver);
+ NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
+
+ CreateTestTableWithIndex(client);
+
+ server.Server_->GetRuntime()->GetAppData().AdministrationAllowedSIDs.push_back("root@builtin");
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_NOTICE);
+
+ {
+ auto sessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
+ auto session = sessionResult.GetSession();
+
+ auto type = TTypeBuilder().BeginOptional().Primitive(EPrimitiveType::Uint64).EndOptional().Build();
+ auto alter = TAlterTableSettings().AppendAddColumns(TColumn("FinishedTimestamp", type));
+
+ auto alterResult = session.AlterTable("Root/Foo/TimestampIndex/indexImplTable", alter).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(alterResult.GetStatus(), EStatus::SCHEME_ERROR,
+ "Alter of index impl table must fail");
+ }
+
+ {
+ TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
+ Name: "indexImplTable"
+ PartitionConfig {
+ PartitioningPolicy {
+ FastSplitSettings {
+ SizeThreshold: 10000
+ RowCountThreshold: 10000
+ CpuPercentageThreshold: 146
+ }
+ }
+ }
+ )", "user@builtin");
+ UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR, "User must not be able to alter index impl table");
+ UNIT_ASSERT_VALUES_EQUAL(result->Record.GetErrorReason(), "Administrative access denied");
+
+ auto description = oldClient.Ls("/Root/Foo/TimestampIndex/indexImplTable");
+ // Cerr << description->Record.GetPathDescription().GetTable().GetPartitionConfig() << Endl;
+ UNIT_ASSERT(!description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().HasFastSplitSettings());
+ }
+
+ {
+ TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
+ Name: "indexImplTable"
+ PartitionConfig {
+ PartitioningPolicy {
+ FastSplitSettings {
+ SizeThreshold: 10000
+ RowCountThreshold: 10000
+ CpuPercentageThreshold: 146
+ }
+ }
+ }
+ )", "root@builtin");
+ UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK, "Super user must be able to alter partition config");
+
+ auto description = oldClient.Ls("/Root/Foo/TimestampIndex/indexImplTable");
+ // Cerr << description->Record.GetPathDescription().GetTable().GetPartitionConfig() << Endl;
+ UNIT_ASSERT(description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().HasFastSplitSettings());
+ auto& fastSplitSettings = description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().GetFastSplitSettings();
+ UNIT_ASSERT_VALUES_EQUAL(fastSplitSettings.GetSizeThreshold(), 10000);
+ UNIT_ASSERT_VALUES_EQUAL(fastSplitSettings.GetRowCountThreshold(), 10000);
+ UNIT_ASSERT_VALUES_EQUAL(fastSplitSettings.GetCpuPercentageThreshold(), 146);
+ }
+
+ {
+ TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
+ Name: "indexImplTable"
+ PartitionConfig {
+ PartitioningPolicy {
+ SizeToSplit: 13000001
+ }
+ }
+ )", "root@builtin");
+ UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_OK, "Super user must be able to alter partition config");
+
+ auto description = oldClient.Ls("/Root/Foo/TimestampIndex/indexImplTable");
+ // Cerr << description->Record.GetPathDescription().GetTable().GetPartitionConfig() << Endl;
+ UNIT_ASSERT(description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().HasFastSplitSettings());
+ UNIT_ASSERT_VALUES_EQUAL(description->Record.GetPathDescription().GetTable().GetPartitionConfig().GetPartitioningPolicy().GetSizeToSplit(), 13000001);
+ }
+
+ {
+ TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
+ Name: "indexImplTable"
+ Columns {
+ Name: "NewColumn"
+ Type: "Uint32"
+ }
+ )", "root@builtin");
+ UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR, "Super user must not be able to alter coloumns");
+ UNIT_ASSERT_VALUES_EQUAL(result->Record.GetErrorReason(), "Adding or dropping columns in index table is not supported");
+ }
+
+ {
+ TAutoPtr<NMsgBusProxy::TBusResponse> result = oldClient.AlterTable("/Root/Foo/TimestampIndex", R"(
+ Name: "indexImplTable"
+ DropColumns {
+ Name: "Timestamp"
+ }
+ )", "root@builtin");
+ UNIT_ASSERT_VALUES_EQUAL_C(result->Record.GetStatus(), NMsgBusProxy::MSTATUS_ERROR, "Super user must not be able to alter coloumns");
+ UNIT_ASSERT_VALUES_EQUAL(result->Record.GetErrorReason(), "Adding or dropping columns in index table is not supported");
+ }
+ }
}
diff --git a/ydb/services/ydb/ydb_logstore.cpp b/ydb/services/ydb/ydb_logstore.cpp
index 556020dd3b8..607b4f25087 100644
--- a/ydb/services/ydb/ydb_logstore.cpp
+++ b/ydb/services/ydb/ydb_logstore.cpp
@@ -1,45 +1,45 @@
-#include "ydb_logstore.h"
-
+#include "ydb_logstore.h"
+
#include <ydb/core/grpc_services/service_logstore.h>
#include <ydb/core/grpc_services/base/base.h>
#include <ydb/core/grpc_services/grpc_helper.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-TGRpcYdbLogStoreService::TGRpcYdbLogStoreService(NActors::TActorSystem *system,
- TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
- NActors::TActorId id)
- : ActorSystem_(system)
- , Counters_(counters)
- , GRpcRequestProxyId_(id) {}
-
-void TGRpcYdbLogStoreService::InitService(grpc::ServerCompletionQueue *cq, NGrpc::TLoggerPtr logger) {
- CQ_ = cq;
- SetupIncomingRequests(std::move(logger));
-}
-
-void TGRpcYdbLogStoreService::SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) {
- Limiter_ = limiter;
-}
-
-bool TGRpcYdbLogStoreService::IncRequest() {
- return Limiter_->Inc();
-}
-
-void TGRpcYdbLogStoreService::DecRequest() {
- Limiter_->Dec();
- Y_ASSERT(Limiter_->GetCurrentInFlight() >= 0);
-}
-
-void TGRpcYdbLogStoreService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
+
+namespace NKikimr {
+namespace NGRpcService {
+
+TGRpcYdbLogStoreService::TGRpcYdbLogStoreService(NActors::TActorSystem *system,
+ TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
+ NActors::TActorId id)
+ : ActorSystem_(system)
+ , Counters_(counters)
+ , GRpcRequestProxyId_(id) {}
+
+void TGRpcYdbLogStoreService::InitService(grpc::ServerCompletionQueue *cq, NGrpc::TLoggerPtr logger) {
+ CQ_ = cq;
+ SetupIncomingRequests(std::move(logger));
+}
+
+void TGRpcYdbLogStoreService::SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) {
+ Limiter_ = limiter;
+}
+
+bool TGRpcYdbLogStoreService::IncRequest() {
+ return Limiter_->Inc();
+}
+
+void TGRpcYdbLogStoreService::DecRequest() {
+ Limiter_->Dec();
+ Y_ASSERT(Limiter_->GetCurrentInFlight() >= 0);
+}
+
+void TGRpcYdbLogStoreService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
using namespace Ydb;
- auto getCounterBlock = CreateCounterCb(Counters_, ActorSystem_);
-
-#ifdef ADD_REQUEST
-#error ADD_REQUEST macro already defined
-#endif
+ auto getCounterBlock = CreateCounterCb(Counters_, ActorSystem_);
+
+#ifdef ADD_REQUEST
+#error ADD_REQUEST macro already defined
+#endif
#define ADD_REQUEST(NAME, CB) \
MakeIntrusive<TGRpcRequest<LogStore::NAME##Request, LogStore::NAME##Response, TGRpcYdbLogStoreService>> \
(this, &Service_, CQ_, [this](NGrpc::IRequestContextBase *ctx) { \
@@ -48,19 +48,19 @@ void TGRpcYdbLogStoreService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
new TGrpcRequestOperationCall<LogStore::NAME##Request, LogStore::NAME##Response> \
(ctx, &CB)); \
}, &Ydb::LogStore::V1::LogStoreService::AsyncService::Request ## NAME, \
- #NAME, logger, getCounterBlock("logstore", #NAME))->Run();
-
+ #NAME, logger, getCounterBlock("logstore", #NAME))->Run();
+
ADD_REQUEST(CreateLogStore, DoCreateLogStoreRequest)
ADD_REQUEST(DescribeLogStore, DoDescribeLogStoreRequest)
ADD_REQUEST(DropLogStore, DoDropLogStoreRequest)
-
+
ADD_REQUEST(CreateLogTable, DoCreateLogTableRequest)
ADD_REQUEST(DescribeLogTable, DoDescribeLogTableRequest)
ADD_REQUEST(DropLogTable, DoDropLogTableRequest)
ADD_REQUEST(AlterLogTable, DoAlterLogTableRequest)
-
-#undef ADD_REQUEST
-}
-
-} // namespace NGRpcService
-} // namespace NKikimr
+
+#undef ADD_REQUEST
+}
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/services/ydb/ydb_logstore.h b/ydb/services/ydb/ydb_logstore.h
index 7a0498c8259..27a176eb0bb 100644
--- a/ydb/services/ydb/ydb_logstore.h
+++ b/ydb/services/ydb/ydb_logstore.h
@@ -1,35 +1,35 @@
-#pragma once
-
+#pragma once
+
#include <ydb/public/api/grpc/draft/ydb_logstore_v1.grpc.pb.h>
#include <ydb/core/grpc_services/grpc_helper.h>
-#include <library/cpp/grpc/server/grpc_server.h>
-#include <library/cpp/actors/core/actorsystem.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-class TGRpcYdbLogStoreService
- : public NGrpc::TGrpcServiceBase<Ydb::LogStore::V1::LogStoreService>
-{
-public:
- TGRpcYdbLogStoreService(NActors::TActorSystem* system, TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
- NActors::TActorId id);
-
- void InitService(grpc::ServerCompletionQueue* cq, NGrpc::TLoggerPtr logger) override;
- void SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) override;
-
- bool IncRequest();
- void DecRequest();
-private:
- void SetupIncomingRequests(NGrpc::TLoggerPtr logger);
-
- NActors::TActorSystem* ActorSystem_;
+#include <library/cpp/grpc/server/grpc_server.h>
+#include <library/cpp/actors/core/actorsystem.h>
+
+namespace NKikimr {
+namespace NGRpcService {
+
+class TGRpcYdbLogStoreService
+ : public NGrpc::TGrpcServiceBase<Ydb::LogStore::V1::LogStoreService>
+{
+public:
+ TGRpcYdbLogStoreService(NActors::TActorSystem* system, TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
+ NActors::TActorId id);
+
+ void InitService(grpc::ServerCompletionQueue* cq, NGrpc::TLoggerPtr logger) override;
+ void SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) override;
+
+ bool IncRequest();
+ void DecRequest();
+private:
+ void SetupIncomingRequests(NGrpc::TLoggerPtr logger);
+
+ NActors::TActorSystem* ActorSystem_;
grpc::ServerCompletionQueue* CQ_ = nullptr;
-
- TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_;
- NActors::TActorId GRpcRequestProxyId_;
+
+ TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_;
+ NActors::TActorId GRpcRequestProxyId_;
NGrpc::TGlobalLimiter* Limiter_ = nullptr;
-};
-
-} // namespace NGRpcService
-} // namespace NKikimr
+};
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/services/ydb/ydb_logstore_ut.cpp b/ydb/services/ydb/ydb_logstore_ut.cpp
index 0ed21143e85..fa7679f1a2b 100644
--- a/ydb/services/ydb/ydb_logstore_ut.cpp
+++ b/ydb/services/ydb/ydb_logstore_ut.cpp
@@ -1,458 +1,458 @@
-#include "ydb_common_ut.h"
-
+#include "ydb_common_ut.h"
+
#include <ydb/public/sdk/cpp/client/ydb_result/result.h>
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
#include <ydb/public/lib/experimental/ydb_logstore.h>
-
+
#include <ydb/library/yql/public/issue/yql_issue.h>
#include <ydb/library/yql/public/issue/yql_issue_message.h>
-
-using namespace NYdb;
-
-Y_UNIT_TEST_SUITE(YdbLogStore) {
-
- void EnableDebugLogs(TKikimrWithGrpcAndRootSchema& server) {
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD, NActors::NLog::PRI_DEBUG);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
- }
-
- NYdb::TDriver ConnectToServer(TKikimrWithGrpcAndRootSchema& server, const TString& token = {}) {
- ui16 grpc = server.GetPort();
- TString location = TStringBuilder() << "localhost:" << grpc;
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location).SetDatabase("/Root").SetAuthToken(token));
- return connection;
- }
-
- Y_UNIT_TEST(LogStore) {
- NKikimrConfig::TAppConfig appConfig;
- appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
- TKikimrWithGrpcAndRootSchema server(appConfig);
- EnableDebugLogs(server);
-
- NYdb::NLogStore::TSchema logSchema(
- {
- NYdb::TColumn("timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("resource_type", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("resource_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("uid", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("level", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Int32)),
- NYdb::TColumn("message", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("json_payload", NYdb::NLogStore::MakeColumnType(EPrimitiveType::JsonDocument)),
- NYdb::TColumn("request_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("ingested_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("saved_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- },
- {"timestamp", "resource_type", "resource_id", "uid"}
- );
-
- auto connection = ConnectToServer(server);
- NYdb::NLogStore::TLogStoreClient logStoreClient(connection);
- {
- THashMap<TString, NYdb::NLogStore::TSchema> schemaPresets;
- schemaPresets["default"] = logSchema;
- NYdb::NLogStore::TLogStoreDescription storeDescr(4, schemaPresets);
- auto res = logStoreClient.CreateLogStore("/Root/LogStore", std::move(storeDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- auto res = logStoreClient.DescribeLogStore("/Root/LogStore").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- UNIT_ASSERT_VALUES_EQUAL(descr.GetColumnShardCount(), 4);
- UNIT_ASSERT_VALUES_EQUAL(descr.GetSchemaPresets().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(descr.GetSchemaPresets().count("default"), 1);
- const auto& schema = descr.GetSchemaPresets().begin()->second;
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns().size(), 10);
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[0].ToString(), "{ name: \"timestamp\", type: Timestamp? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[1].ToString(), "{ name: \"resource_type\", type: Utf8? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[4].ToString(), "{ name: \"level\", type: Int32? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetPrimaryKeyColumns(),
- TVector<TString>({"timestamp", "resource_type", "resource_id", "uid"}));
- UNIT_ASSERT_VALUES_EQUAL(descr.GetOwner(), "root@builtin");
- }
-
- {
- auto res = logStoreClient.DropLogStore("/Root/LogStore").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
- }
-
- Y_UNIT_TEST(Dirs) {
- NKikimrConfig::TAppConfig appConfig;
- appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
- TKikimrWithGrpcAndRootSchema server(appConfig);
- EnableDebugLogs(server);
-
- NYdb::NLogStore::TSchema logSchema(
- {
- NYdb::TColumn("timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("resource_type", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("resource_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("uid", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("level", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Int32)),
- NYdb::TColumn("message", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("json_payload", NYdb::NLogStore::MakeColumnType(EPrimitiveType::JsonDocument)),
- NYdb::TColumn("request_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("ingested_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("saved_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- },
- {"timestamp", "resource_type", "resource_id", "uid"}
- );
-
- auto connection = ConnectToServer(server);
- NYdb::NLogStore::TLogStoreClient logStoreClient(connection);
- {
- THashMap<TString, NYdb::NLogStore::TSchema> schemaPresets;
- schemaPresets["default"] = logSchema;
- NYdb::NLogStore::TLogStoreDescription storeDescr(4, schemaPresets);
- auto res = logStoreClient.CreateLogStore("/Root/home/folder/LogStore", std::move(storeDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- auto res = logStoreClient.DescribeLogStore("/Root/home/folder/LogStore").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- NYdb::NScheme::TSchemeClient schemeClient(connection);
-
- // MkDir inside LogStore
- {
- auto res = schemeClient.MakeDirectory("/Root/home/folder/LogStore/Dir1").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- // Re-create the same dir
- {
- auto res = schemeClient.MakeDirectory("/Root/home/folder/LogStore/Dir1").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- // MkDir for existing LogStore path
- {
- auto res = schemeClient.MakeDirectory("/Root/home/folder/LogStore").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- // Two levels of non-existing dirs
- {
- auto res = schemeClient.MakeDirectory("/Root/home/folder/LogStore/Dir2/Dir3").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- // Log table with intermediate dirs
- {
- NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4);
- auto res = logStoreClient.CreateLogTable("/Root/home/folder/LogStore/Dir1/Dir2/log1", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
- }
-
- Y_UNIT_TEST(LogTable) {
- NKikimrConfig::TAppConfig appConfig;
- appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
- TKikimrWithGrpcAndRootSchema server(appConfig);
- EnableDebugLogs(server);
-
- NYdb::NLogStore::TSchema logSchema(
- {
- NYdb::TColumn("timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("resource_type", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("resource_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("uid", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("level", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Int32)),
- NYdb::TColumn("message", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("json_payload", NYdb::NLogStore::MakeColumnType(EPrimitiveType::JsonDocument)),
- NYdb::TColumn("request_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("ingested_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("saved_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- },
- {"timestamp", "resource_type", "resource_id", "uid"}
- );
-
- auto connection = ConnectToServer(server);
- NYdb::NLogStore::TLogStoreClient logStoreClient(connection);
- {
- THashMap<TString, NYdb::NLogStore::TSchema> schemaPresets;
- schemaPresets["default"] = logSchema;
- NYdb::NLogStore::TLogStoreDescription storeDescr(4, schemaPresets);
- auto res = logStoreClient.CreateLogStore("/Root/LogStore", std::move(storeDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4);
- auto res = logStoreClient.CreateLogTable("/Root/LogStore/log1", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log1").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- UNIT_ASSERT_VALUES_EQUAL(descr.GetColumnShardCount(), 4);
- const auto& schema = descr.GetSchema();
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns().size(), 10);
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[0].ToString(), "{ name: \"timestamp\", type: Timestamp? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[1].ToString(), "{ name: \"resource_type\", type: Utf8? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[4].ToString(), "{ name: \"level\", type: Int32? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetPrimaryKeyColumns(),
- TVector<TString>({"timestamp", "resource_type", "resource_id", "uid"}));
- UNIT_ASSERT_VALUES_EQUAL(descr.GetOwner(), "root@builtin");
- }
-
- {
- NYdb::NLogStore::TLogTableDescription tableDescr(logSchema, {"timestamp", "uid"}, 4);
- auto res = logStoreClient.CreateLogTable("/Root/LogStore/log2", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- UNIT_ASSERT_VALUES_EQUAL(descr.GetColumnShardCount(), 4);
- const auto& schema = descr.GetSchema();
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns().size(), 10);
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[0].ToString(), "{ name: \"timestamp\", type: Timestamp? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[1].ToString(), "{ name: \"resource_type\", type: Utf8? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[4].ToString(), "{ name: \"level\", type: Int32? }");
- UNIT_ASSERT_VALUES_EQUAL(schema.GetPrimaryKeyColumns(),
- TVector<TString>({"timestamp", "resource_type", "resource_id", "uid"}));
- UNIT_ASSERT_VALUES_EQUAL(descr.GetOwner(), "root@builtin");
- }
-
- {
- NYdb::NLogStore::TLogTableDescription tableDescr(logSchema, {"timestamp", "uid"}, 4);
- auto res = logStoreClient.CreateLogTable("/Root/LogStore/log2", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- NYdb::NScheme::TSchemeClient schemaClient(connection);
- auto res = schemaClient.ListDirectory("/Root/LogStore").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto children = res.GetChildren();
- UNIT_ASSERT_VALUES_EQUAL(children.size(), 3);
- UNIT_ASSERT_VALUES_EQUAL(children[0].Name, "log1");
- UNIT_ASSERT_VALUES_EQUAL(children[1].Name, "log2");
- UNIT_ASSERT_VALUES_EQUAL(children[2].Name, ".sys");
- }
-
- {
- NYdb::NScheme::TSchemeClient schemaClient(connection);
- auto res = schemaClient.ListDirectory("/Root/LogStore/.sys").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto children = res.GetChildren();
- UNIT_ASSERT_VALUES_EQUAL(children.size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(children[0].Name, "store_primary_index_stats");
- }
-
- {
- NYdb::NScheme::TSchemeClient schemaClient(connection);
- auto res = schemaClient.ListDirectory("/Root/LogStore/log1").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto children = res.GetChildren();
- UNIT_ASSERT_VALUES_EQUAL(children.size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(children[0].Name, ".sys");
- }
-
- {
- NYdb::NScheme::TSchemeClient schemaClient(connection);
- auto res = schemaClient.ListDirectory("/Root/LogStore/log1/.sys").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto children = res.GetChildren();
- UNIT_ASSERT_VALUES_EQUAL(children.size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(children[0].Name, "primary_index_stats");
- }
-
- {
- // Try to drop non-empty LogStore
- auto res = logStoreClient.DropLogStore("/Root/LogStore").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SCHEME_ERROR, res.GetIssues().ToString());
- }
-
- {
- auto res = logStoreClient.DropLogTable("/Root/LogStore/log1").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- // Try to drop LogTable as LogStore
- auto res = logStoreClient.DropLogStore("/Root/LogStore/log2").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SCHEME_ERROR, res.GetIssues().ToString());
- }
-
- {
- auto res = logStoreClient.DropLogTable("/Root/LogStore/log2").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- // Try to drop LogStore as LogTable
- auto res = logStoreClient.DropLogTable("/Root/LogStore").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SCHEME_ERROR, res.GetIssues().ToString());
- }
-
- {
- auto res = logStoreClient.DropLogStore("/Root/LogStore").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
- }
-
- Y_UNIT_TEST(AlterLogTable) {
- NKikimrConfig::TAppConfig appConfig;
- appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
- TKikimrWithGrpcAndRootSchema server(appConfig);
- EnableDebugLogs(server);
-
- NYdb::NLogStore::TSchema logSchema(
- {
- NYdb::TColumn("timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("resource_type", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("resource_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("uid", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("level", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Int32)),
- NYdb::TColumn("message", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("json_payload", NYdb::NLogStore::MakeColumnType(EPrimitiveType::JsonDocument)),
- NYdb::TColumn("request_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
- NYdb::TColumn("ingested_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("saved_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
- NYdb::TColumn("uint_timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Uint64)),
- },
- {"timestamp", "resource_type", "resource_id", "uid"}
- );
-
- auto connection = ConnectToServer(server);
- NYdb::NLogStore::TLogStoreClient logStoreClient(connection);
- {
- THashMap<TString, NYdb::NLogStore::TSchema> schemaPresets;
- schemaPresets["default"] = logSchema;
- NYdb::NLogStore::TLogStoreDescription storeDescr(4, schemaPresets);
- auto res = logStoreClient.CreateLogStore("/Root/LogStore", std::move(storeDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- // Create table without TTL settings
- {
- NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4);
- auto res = logStoreClient.CreateLogTable("/Root/LogStore/log1", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
- {
- auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log1").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- UNIT_ASSERT_C(!descr.GetTtlSettings(), "The table was created without TTL settings");
- }
-
- // Create table with TTL settings
- {
+
+using namespace NYdb;
+
+Y_UNIT_TEST_SUITE(YdbLogStore) {
+
+ void EnableDebugLogs(TKikimrWithGrpcAndRootSchema& server) {
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD, NActors::NLog::PRI_DEBUG);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
+ }
+
+ NYdb::TDriver ConnectToServer(TKikimrWithGrpcAndRootSchema& server, const TString& token = {}) {
+ ui16 grpc = server.GetPort();
+ TString location = TStringBuilder() << "localhost:" << grpc;
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location).SetDatabase("/Root").SetAuthToken(token));
+ return connection;
+ }
+
+ Y_UNIT_TEST(LogStore) {
+ NKikimrConfig::TAppConfig appConfig;
+ appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
+ TKikimrWithGrpcAndRootSchema server(appConfig);
+ EnableDebugLogs(server);
+
+ NYdb::NLogStore::TSchema logSchema(
+ {
+ NYdb::TColumn("timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("resource_type", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("resource_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("uid", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("level", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Int32)),
+ NYdb::TColumn("message", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("json_payload", NYdb::NLogStore::MakeColumnType(EPrimitiveType::JsonDocument)),
+ NYdb::TColumn("request_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("ingested_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("saved_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ },
+ {"timestamp", "resource_type", "resource_id", "uid"}
+ );
+
+ auto connection = ConnectToServer(server);
+ NYdb::NLogStore::TLogStoreClient logStoreClient(connection);
+ {
+ THashMap<TString, NYdb::NLogStore::TSchema> schemaPresets;
+ schemaPresets["default"] = logSchema;
+ NYdb::NLogStore::TLogStoreDescription storeDescr(4, schemaPresets);
+ auto res = logStoreClient.CreateLogStore("/Root/LogStore", std::move(storeDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ auto res = logStoreClient.DescribeLogStore("/Root/LogStore").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ UNIT_ASSERT_VALUES_EQUAL(descr.GetColumnShardCount(), 4);
+ UNIT_ASSERT_VALUES_EQUAL(descr.GetSchemaPresets().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(descr.GetSchemaPresets().count("default"), 1);
+ const auto& schema = descr.GetSchemaPresets().begin()->second;
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns().size(), 10);
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[0].ToString(), "{ name: \"timestamp\", type: Timestamp? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[1].ToString(), "{ name: \"resource_type\", type: Utf8? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[4].ToString(), "{ name: \"level\", type: Int32? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetPrimaryKeyColumns(),
+ TVector<TString>({"timestamp", "resource_type", "resource_id", "uid"}));
+ UNIT_ASSERT_VALUES_EQUAL(descr.GetOwner(), "root@builtin");
+ }
+
+ {
+ auto res = logStoreClient.DropLogStore("/Root/LogStore").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+ }
+
+ Y_UNIT_TEST(Dirs) {
+ NKikimrConfig::TAppConfig appConfig;
+ appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
+ TKikimrWithGrpcAndRootSchema server(appConfig);
+ EnableDebugLogs(server);
+
+ NYdb::NLogStore::TSchema logSchema(
+ {
+ NYdb::TColumn("timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("resource_type", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("resource_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("uid", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("level", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Int32)),
+ NYdb::TColumn("message", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("json_payload", NYdb::NLogStore::MakeColumnType(EPrimitiveType::JsonDocument)),
+ NYdb::TColumn("request_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("ingested_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("saved_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ },
+ {"timestamp", "resource_type", "resource_id", "uid"}
+ );
+
+ auto connection = ConnectToServer(server);
+ NYdb::NLogStore::TLogStoreClient logStoreClient(connection);
+ {
+ THashMap<TString, NYdb::NLogStore::TSchema> schemaPresets;
+ schemaPresets["default"] = logSchema;
+ NYdb::NLogStore::TLogStoreDescription storeDescr(4, schemaPresets);
+ auto res = logStoreClient.CreateLogStore("/Root/home/folder/LogStore", std::move(storeDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ auto res = logStoreClient.DescribeLogStore("/Root/home/folder/LogStore").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ NYdb::NScheme::TSchemeClient schemeClient(connection);
+
+ // MkDir inside LogStore
+ {
+ auto res = schemeClient.MakeDirectory("/Root/home/folder/LogStore/Dir1").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ // Re-create the same dir
+ {
+ auto res = schemeClient.MakeDirectory("/Root/home/folder/LogStore/Dir1").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ // MkDir for existing LogStore path
+ {
+ auto res = schemeClient.MakeDirectory("/Root/home/folder/LogStore").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ // Two levels of non-existing dirs
+ {
+ auto res = schemeClient.MakeDirectory("/Root/home/folder/LogStore/Dir2/Dir3").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ // Log table with intermediate dirs
+ {
+ NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4);
+ auto res = logStoreClient.CreateLogTable("/Root/home/folder/LogStore/Dir1/Dir2/log1", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+ }
+
+ Y_UNIT_TEST(LogTable) {
+ NKikimrConfig::TAppConfig appConfig;
+ appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
+ TKikimrWithGrpcAndRootSchema server(appConfig);
+ EnableDebugLogs(server);
+
+ NYdb::NLogStore::TSchema logSchema(
+ {
+ NYdb::TColumn("timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("resource_type", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("resource_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("uid", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("level", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Int32)),
+ NYdb::TColumn("message", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("json_payload", NYdb::NLogStore::MakeColumnType(EPrimitiveType::JsonDocument)),
+ NYdb::TColumn("request_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("ingested_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("saved_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ },
+ {"timestamp", "resource_type", "resource_id", "uid"}
+ );
+
+ auto connection = ConnectToServer(server);
+ NYdb::NLogStore::TLogStoreClient logStoreClient(connection);
+ {
+ THashMap<TString, NYdb::NLogStore::TSchema> schemaPresets;
+ schemaPresets["default"] = logSchema;
+ NYdb::NLogStore::TLogStoreDescription storeDescr(4, schemaPresets);
+ auto res = logStoreClient.CreateLogStore("/Root/LogStore", std::move(storeDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4);
+ auto res = logStoreClient.CreateLogTable("/Root/LogStore/log1", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log1").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ UNIT_ASSERT_VALUES_EQUAL(descr.GetColumnShardCount(), 4);
+ const auto& schema = descr.GetSchema();
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns().size(), 10);
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[0].ToString(), "{ name: \"timestamp\", type: Timestamp? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[1].ToString(), "{ name: \"resource_type\", type: Utf8? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[4].ToString(), "{ name: \"level\", type: Int32? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetPrimaryKeyColumns(),
+ TVector<TString>({"timestamp", "resource_type", "resource_id", "uid"}));
+ UNIT_ASSERT_VALUES_EQUAL(descr.GetOwner(), "root@builtin");
+ }
+
+ {
+ NYdb::NLogStore::TLogTableDescription tableDescr(logSchema, {"timestamp", "uid"}, 4);
+ auto res = logStoreClient.CreateLogTable("/Root/LogStore/log2", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ UNIT_ASSERT_VALUES_EQUAL(descr.GetColumnShardCount(), 4);
+ const auto& schema = descr.GetSchema();
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns().size(), 10);
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[0].ToString(), "{ name: \"timestamp\", type: Timestamp? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[1].ToString(), "{ name: \"resource_type\", type: Utf8? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetColumns()[4].ToString(), "{ name: \"level\", type: Int32? }");
+ UNIT_ASSERT_VALUES_EQUAL(schema.GetPrimaryKeyColumns(),
+ TVector<TString>({"timestamp", "resource_type", "resource_id", "uid"}));
+ UNIT_ASSERT_VALUES_EQUAL(descr.GetOwner(), "root@builtin");
+ }
+
+ {
+ NYdb::NLogStore::TLogTableDescription tableDescr(logSchema, {"timestamp", "uid"}, 4);
+ auto res = logStoreClient.CreateLogTable("/Root/LogStore/log2", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ NYdb::NScheme::TSchemeClient schemaClient(connection);
+ auto res = schemaClient.ListDirectory("/Root/LogStore").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto children = res.GetChildren();
+ UNIT_ASSERT_VALUES_EQUAL(children.size(), 3);
+ UNIT_ASSERT_VALUES_EQUAL(children[0].Name, "log1");
+ UNIT_ASSERT_VALUES_EQUAL(children[1].Name, "log2");
+ UNIT_ASSERT_VALUES_EQUAL(children[2].Name, ".sys");
+ }
+
+ {
+ NYdb::NScheme::TSchemeClient schemaClient(connection);
+ auto res = schemaClient.ListDirectory("/Root/LogStore/.sys").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto children = res.GetChildren();
+ UNIT_ASSERT_VALUES_EQUAL(children.size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(children[0].Name, "store_primary_index_stats");
+ }
+
+ {
+ NYdb::NScheme::TSchemeClient schemaClient(connection);
+ auto res = schemaClient.ListDirectory("/Root/LogStore/log1").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto children = res.GetChildren();
+ UNIT_ASSERT_VALUES_EQUAL(children.size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(children[0].Name, ".sys");
+ }
+
+ {
+ NYdb::NScheme::TSchemeClient schemaClient(connection);
+ auto res = schemaClient.ListDirectory("/Root/LogStore/log1/.sys").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto children = res.GetChildren();
+ UNIT_ASSERT_VALUES_EQUAL(children.size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(children[0].Name, "primary_index_stats");
+ }
+
+ {
+ // Try to drop non-empty LogStore
+ auto res = logStoreClient.DropLogStore("/Root/LogStore").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SCHEME_ERROR, res.GetIssues().ToString());
+ }
+
+ {
+ auto res = logStoreClient.DropLogTable("/Root/LogStore/log1").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ // Try to drop LogTable as LogStore
+ auto res = logStoreClient.DropLogStore("/Root/LogStore/log2").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SCHEME_ERROR, res.GetIssues().ToString());
+ }
+
+ {
+ auto res = logStoreClient.DropLogTable("/Root/LogStore/log2").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ // Try to drop LogStore as LogTable
+ auto res = logStoreClient.DropLogTable("/Root/LogStore").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SCHEME_ERROR, res.GetIssues().ToString());
+ }
+
+ {
+ auto res = logStoreClient.DropLogStore("/Root/LogStore").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+ }
+
+ Y_UNIT_TEST(AlterLogTable) {
+ NKikimrConfig::TAppConfig appConfig;
+ appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
+ TKikimrWithGrpcAndRootSchema server(appConfig);
+ EnableDebugLogs(server);
+
+ NYdb::NLogStore::TSchema logSchema(
+ {
+ NYdb::TColumn("timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("resource_type", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("resource_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("uid", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("level", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Int32)),
+ NYdb::TColumn("message", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("json_payload", NYdb::NLogStore::MakeColumnType(EPrimitiveType::JsonDocument)),
+ NYdb::TColumn("request_id", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Utf8)),
+ NYdb::TColumn("ingested_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("saved_at", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Timestamp)),
+ NYdb::TColumn("uint_timestamp", NYdb::NLogStore::MakeColumnType(EPrimitiveType::Uint64)),
+ },
+ {"timestamp", "resource_type", "resource_id", "uid"}
+ );
+
+ auto connection = ConnectToServer(server);
+ NYdb::NLogStore::TLogStoreClient logStoreClient(connection);
+ {
+ THashMap<TString, NYdb::NLogStore::TSchema> schemaPresets;
+ schemaPresets["default"] = logSchema;
+ NYdb::NLogStore::TLogStoreDescription storeDescr(4, schemaPresets);
+ auto res = logStoreClient.CreateLogStore("/Root/LogStore", std::move(storeDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ // Create table without TTL settings
+ {
+ NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4);
+ auto res = logStoreClient.CreateLogTable("/Root/LogStore/log1", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+ {
+ auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log1").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ UNIT_ASSERT_C(!descr.GetTtlSettings(), "The table was created without TTL settings");
+ }
+
+ // Create table with TTL settings
+ {
NYdb::NLogStore::TTtlSettings ttlSettings("saved_at", TDuration::Seconds(2000));
- NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4, ttlSettings);
- auto res = logStoreClient.CreateLogTable("/Root/LogStore/log2", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
- {
- auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- auto ttlSettings = descr.GetTtlSettings();
- UNIT_ASSERT_C(!ttlSettings.Empty(), "The table was created with TTL settings");
- UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetColumnName(), "saved_at");
- UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetExpireAfter(), TDuration::Seconds(2000));
- }
-
- // Add TTL to a table (currently not supported)
- {
- NYdb::NLogStore::TAlterLogTableSettings alterLogTableSettings;
+ NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4, ttlSettings);
+ auto res = logStoreClient.CreateLogTable("/Root/LogStore/log2", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+ {
+ auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ auto ttlSettings = descr.GetTtlSettings();
+ UNIT_ASSERT_C(!ttlSettings.Empty(), "The table was created with TTL settings");
+ UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetColumnName(), "saved_at");
+ UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetExpireAfter(), TDuration::Seconds(2000));
+ }
+
+ // Add TTL to a table (currently not supported)
+ {
+ NYdb::NLogStore::TAlterLogTableSettings alterLogTableSettings;
alterLogTableSettings.AlterTtlSettings(NYdb::NTable::TAlterTtlSettings::Set("uint_timestamp", NYdb::NTable::TTtlSettings::EUnit::MilliSeconds, TDuration::Seconds(3600)));
- auto res = logStoreClient.AlterLogTable("/Root/LogStore/log1", std::move(alterLogTableSettings)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
- }
- {
- auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log1").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- auto ttlSettings = descr.GetTtlSettings();
- UNIT_ASSERT_C(ttlSettings.Empty(), "Table must not have TTL settings");
- }
-
- // Change TTL column (currently not supported)
- {
- NYdb::NLogStore::TAlterLogTableSettings alterLogTableSettings;
+ auto res = logStoreClient.AlterLogTable("/Root/LogStore/log1", std::move(alterLogTableSettings)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
+ }
+ {
+ auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log1").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ auto ttlSettings = descr.GetTtlSettings();
+ UNIT_ASSERT_C(ttlSettings.Empty(), "Table must not have TTL settings");
+ }
+
+ // Change TTL column (currently not supported)
+ {
+ NYdb::NLogStore::TAlterLogTableSettings alterLogTableSettings;
alterLogTableSettings.AlterTtlSettings(NYdb::NTable::TAlterTtlSettings::Set("ingested_at", TDuration::Seconds(86400)));
- auto res = logStoreClient.AlterLogTable("/Root/LogStore/log2", std::move(alterLogTableSettings)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
- }
- {
- auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- auto ttlSettings = descr.GetTtlSettings();
- UNIT_ASSERT_C(!ttlSettings.Empty(), "Table must have TTL settings");
- UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetColumnName(), "saved_at");
- UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetExpireAfter(), TDuration::Seconds(2000));
- }
-
- // Change TTL expiration time
- {
- NYdb::NLogStore::TAlterLogTableSettings alterLogTableSettings;
+ auto res = logStoreClient.AlterLogTable("/Root/LogStore/log2", std::move(alterLogTableSettings)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
+ }
+ {
+ auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ auto ttlSettings = descr.GetTtlSettings();
+ UNIT_ASSERT_C(!ttlSettings.Empty(), "Table must have TTL settings");
+ UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetColumnName(), "saved_at");
+ UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetExpireAfter(), TDuration::Seconds(2000));
+ }
+
+ // Change TTL expiration time
+ {
+ NYdb::NLogStore::TAlterLogTableSettings alterLogTableSettings;
alterLogTableSettings.AlterTtlSettings(NYdb::NTable::TAlterTtlSettings::Set("saved_at", TDuration::Seconds(86400)));
- auto res = logStoreClient.AlterLogTable("/Root/LogStore/log2", std::move(alterLogTableSettings)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
- {
- auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- auto ttlSettings = descr.GetTtlSettings();
- UNIT_ASSERT_C(!ttlSettings.Empty(), "Table must have TTL settings");
- UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetColumnName(), "saved_at");
- UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetExpireAfter(), TDuration::Seconds(86400));
- }
-
- // Remove TTL (currently not supported)
- {
- NYdb::NLogStore::TAlterLogTableSettings alterLogTableSettings;
- alterLogTableSettings.AlterTtlSettings(NYdb::NTable::TAlterTtlSettings::Drop());
- auto res = logStoreClient.AlterLogTable("/Root/LogStore/log2", std::move(alterLogTableSettings)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
- }
- {
- auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- auto descr = res.GetDescription();
- auto ttlSettings = descr.GetTtlSettings();
- UNIT_ASSERT_C(!ttlSettings.Empty(), "Table must have TTL settings");
- UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetColumnName(), "saved_at");
- UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetExpireAfter(), TDuration::Seconds(86400));
- }
-
- // Use invalid column for TTL
- {
+ auto res = logStoreClient.AlterLogTable("/Root/LogStore/log2", std::move(alterLogTableSettings)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+ {
+ auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ auto ttlSettings = descr.GetTtlSettings();
+ UNIT_ASSERT_C(!ttlSettings.Empty(), "Table must have TTL settings");
+ UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetColumnName(), "saved_at");
+ UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetExpireAfter(), TDuration::Seconds(86400));
+ }
+
+ // Remove TTL (currently not supported)
+ {
+ NYdb::NLogStore::TAlterLogTableSettings alterLogTableSettings;
+ alterLogTableSettings.AlterTtlSettings(NYdb::NTable::TAlterTtlSettings::Drop());
+ auto res = logStoreClient.AlterLogTable("/Root/LogStore/log2", std::move(alterLogTableSettings)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
+ }
+ {
+ auto res = logStoreClient.DescribeLogTable("/Root/LogStore/log2").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ auto descr = res.GetDescription();
+ auto ttlSettings = descr.GetTtlSettings();
+ UNIT_ASSERT_C(!ttlSettings.Empty(), "Table must have TTL settings");
+ UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetColumnName(), "saved_at");
+ UNIT_ASSERT_VALUES_EQUAL(ttlSettings->GetDateTypeColumn().GetExpireAfter(), TDuration::Seconds(86400));
+ }
+
+ // Use invalid column for TTL
+ {
NYdb::NLogStore::TTtlSettings ttlSettings("nonexisting_column", TDuration::Seconds(2000));
- NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4, ttlSettings);
- auto res = logStoreClient.CreateLogTable("/Root/LogStore/log3", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
- }
-
- // Use column of invalid type for TTL
- {
+ NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4, ttlSettings);
+ auto res = logStoreClient.CreateLogTable("/Root/LogStore/log3", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
+ }
+
+ // Use column of invalid type for TTL
+ {
NYdb::NLogStore::TTtlSettings ttlSettings("message", NYdb::NTable::TTtlSettings::EUnit::MilliSeconds, TDuration::Seconds(3600));
- NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4, ttlSettings);
- auto res = logStoreClient.CreateLogTable("/Root/LogStore/log4", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
- }
-
- // Use non-Timestamp column for TTL
- {
+ NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4, ttlSettings);
+ auto res = logStoreClient.CreateLogTable("/Root/LogStore/log4", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
+ }
+
+ // Use non-Timestamp column for TTL
+ {
NYdb::NLogStore::TTtlSettings ttlSettings("uint_timestamp", NYdb::NTable::TTtlSettings::EUnit::MilliSeconds, TDuration::Seconds(3600));
- NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4, ttlSettings);
- auto res = logStoreClient.CreateLogTable("/Root/LogStore/log5", std::move(tableDescr)).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
- }
- }
-}
+ NYdb::NLogStore::TLogTableDescription tableDescr("default", {"timestamp", "uid"}, 4, ttlSettings);
+ auto res = logStoreClient.CreateLogTable("/Root/LogStore/log5", std::move(tableDescr)).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::GENERIC_ERROR, res.GetIssues().ToString());
+ }
+ }
+}
diff --git a/ydb/services/ydb/ydb_long_tx_ut.cpp b/ydb/services/ydb/ydb_long_tx_ut.cpp
index b3f5e64a5bb..94518e8e582 100644
--- a/ydb/services/ydb/ydb_long_tx_ut.cpp
+++ b/ydb/services/ydb/ydb_long_tx_ut.cpp
@@ -135,17 +135,17 @@ Y_UNIT_TEST_SUITE(YdbLongTx) {
NYdb::NLongTx::TClient client(connection);
NLongTx::TLongTxBeginResult resBeginTx = client.BeginWriteTx().GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(resBeginTx.Status().GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(resBeginTx.Status().GetStatus(), EStatus::SUCCESS);
auto txId = resBeginTx.GetResult().tx_id();
TString data = TestBlob();
NLongTx::TLongTxWriteResult resWrite =
client.Write(txId, TestTablePath, "0", data, Ydb::LongTx::Data::APACHE_ARROW).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(resWrite.Status().GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(resWrite.Status().GetStatus(), EStatus::SUCCESS);
NLongTx::TLongTxCommitResult resCommitTx = client.CommitTx(txId).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(resCommitTx.Status().GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(resCommitTx.Status().GetStatus(), EStatus::SUCCESS);
}
Y_UNIT_TEST(BeginWriteRollback) {
@@ -163,17 +163,17 @@ Y_UNIT_TEST_SUITE(YdbLongTx) {
NYdb::NLongTx::TClient client(connection);
NLongTx::TLongTxBeginResult resBeginTx = client.BeginWriteTx().GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(resBeginTx.Status().GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(resBeginTx.Status().GetStatus(), EStatus::SUCCESS);
auto txId = resBeginTx.GetResult().tx_id();
TString data = TestBlob();
NLongTx::TLongTxWriteResult resWrite =
client.Write(txId, TestTablePath, "0", data, Ydb::LongTx::Data::APACHE_ARROW).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(resWrite.Status().GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(resWrite.Status().GetStatus(), EStatus::SUCCESS);
NLongTx::TLongTxRollbackResult resRollbackTx = client.RollbackTx(txId).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(resRollbackTx.Status().GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(resRollbackTx.Status().GetStatus(), EStatus::SUCCESS);
}
Y_UNIT_TEST(BeginRead) {
@@ -191,12 +191,12 @@ Y_UNIT_TEST_SUITE(YdbLongTx) {
NYdb::NLongTx::TClient client(connection);
NLongTx::TLongTxBeginResult resBeginTx = client.BeginReadTx().GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(resBeginTx.Status().GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(resBeginTx.Status().GetStatus(), EStatus::SUCCESS);
auto txId = resBeginTx.GetResult().tx_id();
NLongTx::TLongTxReadResult resRead = client.Read(txId, TestTablePath).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(resRead.Status().GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(resRead.Status().GetStatus(), EStatus::SUCCESS);
UNIT_ASSERT_VALUES_EQUAL(resRead.GetResult().data().data(), "");
}
diff --git a/ydb/services/ydb/ydb_olapstore_ut.cpp b/ydb/services/ydb/ydb_olapstore_ut.cpp
index 6f0b7291f8f..6a61447fe9f 100644
--- a/ydb/services/ydb/ydb_olapstore_ut.cpp
+++ b/ydb/services/ydb/ydb_olapstore_ut.cpp
@@ -1,612 +1,612 @@
-#include "ydb_common_ut.h"
-
+#include "ydb_common_ut.h"
+
#include <ydb/services/ydb/ut/udfs.h>
#include <ydb/core/kqp/ut/common/kqp_ut_common.h>
-
+
#include <ydb/public/sdk/cpp/client/ydb_result/result.h>
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
-
+
#include <ydb/library/yql/minikql/invoke_builtins/mkql_builtins.h>
#include <ydb/library/yql/public/issue/yql_issue.h>
#include <ydb/library/yql/public/issue/yql_issue_message.h>
-
-using namespace NYdb;
-
-Y_UNIT_TEST_SUITE(YdbOlapStore) {
-
- NMiniKQL::IFunctionRegistry* UdfFrFactory(const NKikimr::NScheme::TTypeRegistry& typeRegistry) {
- Y_UNUSED(typeRegistry);
- auto funcRegistry = NMiniKQL::CreateFunctionRegistry(NMiniKQL::CreateBuiltinRegistry())->Clone();
- funcRegistry->AddModule("fake_re2_path", "Re2", CreateRe2Module());
- funcRegistry->AddModule("fake_json2_path", "Json2", CreateJson2Module());
- return funcRegistry.Release();
- }
-
- void EnableDebugLogs(TKikimrWithGrpcAndRootSchema& server) {
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD, NActors::NLog::PRI_DEBUG);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD_SCAN, NActors::NLog::PRI_DEBUG);
- //server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::KQP_EXECUTER, NActors::NLog::PRI_DEBUG);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::BLOB_CACHE, NActors::NLog::PRI_DEBUG);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::LONG_TX_SERVICE, NActors::NLog::PRI_DEBUG);
- }
-
- NYdb::TDriver ConnectToServer(TKikimrWithGrpcAndRootSchema& server, const TString& token = {}) {
- ui16 grpc = server.GetPort();
- TString location = TStringBuilder() << "localhost:" << grpc;
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location).SetDatabase("/Root").SetAuthToken(token));
- NKqp::WaitForKqpProxyInit(connection);
- return connection;
- }
-
- void CreateOlapTable(const TServerSettings& settings, const TString& tableName, ui32 shards = 2) {
- const char * tableDescr = R"(
- Name: "OlapStore"
- #MetaShardCount: 1
- ColumnShardCount: 4
- SchemaPresets {
- Name: "default"
- Schema {
- Columns { Name: "message" Type: "Utf8" }
- Columns { Name: "json_payload" Type: "JsonDocument" }
- Columns { Name: "resource_id" Type: "Utf8" }
- Columns { Name: "uid" Type: "Utf8" }
- Columns { Name: "timestamp" Type: "Timestamp" }
- Columns { Name: "resource_type" Type: "Utf8" }
- Columns { Name: "level" Type: "Int32" }
- Columns { Name: "ingested_at" Type: "Timestamp" }
- Columns { Name: "saved_at" Type: "Timestamp" }
- Columns { Name: "request_id" Type: "Utf8" }
- KeyColumnNames: ["timestamp", "resource_type", "resource_id", "uid"]
+
+using namespace NYdb;
+
+Y_UNIT_TEST_SUITE(YdbOlapStore) {
+
+ NMiniKQL::IFunctionRegistry* UdfFrFactory(const NKikimr::NScheme::TTypeRegistry& typeRegistry) {
+ Y_UNUSED(typeRegistry);
+ auto funcRegistry = NMiniKQL::CreateFunctionRegistry(NMiniKQL::CreateBuiltinRegistry())->Clone();
+ funcRegistry->AddModule("fake_re2_path", "Re2", CreateRe2Module());
+ funcRegistry->AddModule("fake_json2_path", "Json2", CreateJson2Module());
+ return funcRegistry.Release();
+ }
+
+ void EnableDebugLogs(TKikimrWithGrpcAndRootSchema& server) {
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_DEBUG);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD, NActors::NLog::PRI_DEBUG);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_COLUMNSHARD_SCAN, NActors::NLog::PRI_DEBUG);
+ //server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::KQP_EXECUTER, NActors::NLog::PRI_DEBUG);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::MSGBUS_REQUEST, NActors::NLog::PRI_DEBUG);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::BLOB_CACHE, NActors::NLog::PRI_DEBUG);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::LONG_TX_SERVICE, NActors::NLog::PRI_DEBUG);
+ }
+
+ NYdb::TDriver ConnectToServer(TKikimrWithGrpcAndRootSchema& server, const TString& token = {}) {
+ ui16 grpc = server.GetPort();
+ TString location = TStringBuilder() << "localhost:" << grpc;
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location).SetDatabase("/Root").SetAuthToken(token));
+ NKqp::WaitForKqpProxyInit(connection);
+ return connection;
+ }
+
+ void CreateOlapTable(const TServerSettings& settings, const TString& tableName, ui32 shards = 2) {
+ const char * tableDescr = R"(
+ Name: "OlapStore"
+ #MetaShardCount: 1
+ ColumnShardCount: 4
+ SchemaPresets {
+ Name: "default"
+ Schema {
+ Columns { Name: "message" Type: "Utf8" }
+ Columns { Name: "json_payload" Type: "JsonDocument" }
+ Columns { Name: "resource_id" Type: "Utf8" }
+ Columns { Name: "uid" Type: "Utf8" }
+ Columns { Name: "timestamp" Type: "Timestamp" }
+ Columns { Name: "resource_type" Type: "Utf8" }
+ Columns { Name: "level" Type: "Int32" }
+ Columns { Name: "ingested_at" Type: "Timestamp" }
+ Columns { Name: "saved_at" Type: "Timestamp" }
+ Columns { Name: "request_id" Type: "Utf8" }
+ KeyColumnNames: ["timestamp", "resource_type", "resource_id", "uid"]
Engine: COLUMN_ENGINE_REPLACING_TIMESERIES
- }
- }
- )";
-
- TClient annoyingClient(settings);
- NMsgBusProxy::EResponseStatus status = annoyingClient.CreateOlapStore("/Root", tableDescr);
- UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::EResponseStatus::MSTATUS_OK);
- status = annoyingClient.CreateOlapTable("/Root/OlapStore", Sprintf(R"(
- Name: "%s"
- ColumnShardCount : %d
- Sharding {
- HashSharding {
- Function: HASH_FUNCTION_CLOUD_LOGS
- Columns: ["timestamp", "uid"]
- }
- }
- )", tableName.c_str(), shards));
- UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::EResponseStatus::MSTATUS_OK);
- }
-
- void CreateTable(const TServerSettings& settings, const TString& tableName) {
- TString tableDescr = Sprintf(R"(
- Name: "%s"
- Columns { Name: "uid" Type: "Utf8" }
- Columns { Name: "message" Type: "Utf8" }
- Columns { Name: "json_payload" Type: "JsonDocument" }
- Columns { Name: "resource_id" Type: "Utf8" }
- Columns { Name: "ingested_at" Type: "Timestamp" }
- Columns { Name: "timestamp" Type: "Timestamp" }
- Columns { Name: "resource_type" Type: "Utf8" }
- Columns { Name: "level" Type: "Int32" }
- Columns { Name: "saved_at" Type: "Timestamp" }
- Columns { Name: "request_id" Type: "Utf8" }
- KeyColumnNames: ["timestamp", "resource_type", "resource_id", "uid"]
- )", tableName.c_str());
-
- TClient annoyingClient(settings);
- NMsgBusProxy::EResponseStatus status = annoyingClient.CreateTable("/Root", tableDescr);
- UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::EResponseStatus::MSTATUS_OK);
- }
-
- NYdb::NTable::TAsyncBulkUpsertResult SendBatch(NYdb::NTable::TTableClient& client, const TString& tableName,
- const ui64 batchSize, const ui32 baseUserId, i64& ts)
- {
- TValueBuilder rows;
- rows.BeginList();
- for (ui64 i = 0; i < batchSize; ++i, ts += 1000) {
- const ui32 userId = baseUserId + (i % 100);
- rows.AddListItem()
- .BeginStruct()
- .AddMember("timestamp").Timestamp(TInstant::MicroSeconds(ts))
- .AddMember("resource_type").Utf8(i%2 ? "app" : "nginx")
- .AddMember("resource_id").Utf8("resource_" + ToString((i+13) % 7))
- .AddMember("uid").Utf8(ToString(i % 23))
- .AddMember("level").Int32(i % 10)
- .AddMember("message").Utf8("message")
- .AddMember("json_payload").JsonDocument(
- Sprintf(R"({
- "auth":{
- "user":{
- "ip":"257.257.257.257",
- "is_cloud":"false",
- "id":%)" PRIu32 R"(
- },
- "type":"token",
- "org_id":7704,
- "service":{
- "ip":"258.258.258.258",
- "internal":"false"
- }
- }
- })", userId))
- .AddMember("ingested_at").Timestamp(TInstant::MicroSeconds(ts) + TDuration::MilliSeconds(342))
- .AddMember("saved_at").Timestamp(TInstant::MicroSeconds(ts) + TDuration::MilliSeconds(600))
- .AddMember("request_id").Utf8(Sprintf("%x", (unsigned int)i))
- .EndStruct();
- }
- rows.EndList();
-
- return client.BulkUpsert(tableName, rows.Build());
- }
-
- size_t WriteRows(NYdb::TDriver& connection, const TString& tableName, i64 startTs, const size_t batchCount, const size_t batchSize, const TString& token = {}) {
- NYdb::NTable::TTableClient client(connection, NYdb::NTable::TClientSettings().AuthToken(token));
-
- TInstant start = TInstant::Now();
- i64 ts = startTs;
-
- const ui32 baseUserId = 1000000;
- TVector<NYdb::NTable::TAsyncBulkUpsertResult> results;
- for (ui64 b = 0; b < batchCount; ++b) {
- auto res = SendBatch(client, tableName, batchSize, baseUserId, ts);
- results.emplace_back(std::move(res));
- }
-
- for (auto& asyncResult : results) {
- auto res = asyncResult.GetValueSync();
- Cerr << ".";
- if (res.GetStatus() != EStatus::SUCCESS) {
- Cerr << res.GetStatus();
- }
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
- Cerr << Endl << tableName << ": " << batchCount * batchSize << " rows upserted in " << TInstant::Now() - start << Endl;
-
- return batchCount * batchSize;
- }
-
- template <class TStatsProto>
- void PrintQueryStats(IOutputStream& out, const TStatsProto& stats) {
- out << "total CPU: " << stats.process_cpu_time_us() << "\n";
- for (const auto& qp : stats.query_phases()) {
- out << " duration: " << qp.duration_us() << " usec\n"
- << " cpu: " << qp.cpu_time_us() << " usec\n";
- for (const auto& ta : qp.table_access()) {
- out << " " << ta << "\n";
- }
- }
- }
-
- TString RunQuery(TDriver& connection, const TString& query) {
- auto client = NYdb::NTable::TTableClient(connection);
-
- NYdb::NTable::TStreamExecScanQuerySettings execSettings;
- execSettings.CollectQueryStats(NYdb::NTable::ECollectQueryStatsMode::Basic);
- auto it = client.StreamExecuteScanQuery(query, execSettings).GetValueSync();
-
- UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
- auto result = NKikimr::NKqp::CollectStreamResult(it);
- Cerr << "RESULT:\n" << result.ResultSetYson << "\n---------------------\nSTATS:\n";
+ }
+ }
+ )";
+
+ TClient annoyingClient(settings);
+ NMsgBusProxy::EResponseStatus status = annoyingClient.CreateOlapStore("/Root", tableDescr);
+ UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::EResponseStatus::MSTATUS_OK);
+ status = annoyingClient.CreateOlapTable("/Root/OlapStore", Sprintf(R"(
+ Name: "%s"
+ ColumnShardCount : %d
+ Sharding {
+ HashSharding {
+ Function: HASH_FUNCTION_CLOUD_LOGS
+ Columns: ["timestamp", "uid"]
+ }
+ }
+ )", tableName.c_str(), shards));
+ UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::EResponseStatus::MSTATUS_OK);
+ }
+
+ void CreateTable(const TServerSettings& settings, const TString& tableName) {
+ TString tableDescr = Sprintf(R"(
+ Name: "%s"
+ Columns { Name: "uid" Type: "Utf8" }
+ Columns { Name: "message" Type: "Utf8" }
+ Columns { Name: "json_payload" Type: "JsonDocument" }
+ Columns { Name: "resource_id" Type: "Utf8" }
+ Columns { Name: "ingested_at" Type: "Timestamp" }
+ Columns { Name: "timestamp" Type: "Timestamp" }
+ Columns { Name: "resource_type" Type: "Utf8" }
+ Columns { Name: "level" Type: "Int32" }
+ Columns { Name: "saved_at" Type: "Timestamp" }
+ Columns { Name: "request_id" Type: "Utf8" }
+ KeyColumnNames: ["timestamp", "resource_type", "resource_id", "uid"]
+ )", tableName.c_str());
+
+ TClient annoyingClient(settings);
+ NMsgBusProxy::EResponseStatus status = annoyingClient.CreateTable("/Root", tableDescr);
+ UNIT_ASSERT_VALUES_EQUAL(status, NMsgBusProxy::EResponseStatus::MSTATUS_OK);
+ }
+
+ NYdb::NTable::TAsyncBulkUpsertResult SendBatch(NYdb::NTable::TTableClient& client, const TString& tableName,
+ const ui64 batchSize, const ui32 baseUserId, i64& ts)
+ {
+ TValueBuilder rows;
+ rows.BeginList();
+ for (ui64 i = 0; i < batchSize; ++i, ts += 1000) {
+ const ui32 userId = baseUserId + (i % 100);
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("timestamp").Timestamp(TInstant::MicroSeconds(ts))
+ .AddMember("resource_type").Utf8(i%2 ? "app" : "nginx")
+ .AddMember("resource_id").Utf8("resource_" + ToString((i+13) % 7))
+ .AddMember("uid").Utf8(ToString(i % 23))
+ .AddMember("level").Int32(i % 10)
+ .AddMember("message").Utf8("message")
+ .AddMember("json_payload").JsonDocument(
+ Sprintf(R"({
+ "auth":{
+ "user":{
+ "ip":"257.257.257.257",
+ "is_cloud":"false",
+ "id":%)" PRIu32 R"(
+ },
+ "type":"token",
+ "org_id":7704,
+ "service":{
+ "ip":"258.258.258.258",
+ "internal":"false"
+ }
+ }
+ })", userId))
+ .AddMember("ingested_at").Timestamp(TInstant::MicroSeconds(ts) + TDuration::MilliSeconds(342))
+ .AddMember("saved_at").Timestamp(TInstant::MicroSeconds(ts) + TDuration::MilliSeconds(600))
+ .AddMember("request_id").Utf8(Sprintf("%x", (unsigned int)i))
+ .EndStruct();
+ }
+ rows.EndList();
+
+ return client.BulkUpsert(tableName, rows.Build());
+ }
+
+ size_t WriteRows(NYdb::TDriver& connection, const TString& tableName, i64 startTs, const size_t batchCount, const size_t batchSize, const TString& token = {}) {
+ NYdb::NTable::TTableClient client(connection, NYdb::NTable::TClientSettings().AuthToken(token));
+
+ TInstant start = TInstant::Now();
+ i64 ts = startTs;
+
+ const ui32 baseUserId = 1000000;
+ TVector<NYdb::NTable::TAsyncBulkUpsertResult> results;
+ for (ui64 b = 0; b < batchCount; ++b) {
+ auto res = SendBatch(client, tableName, batchSize, baseUserId, ts);
+ results.emplace_back(std::move(res));
+ }
+
+ for (auto& asyncResult : results) {
+ auto res = asyncResult.GetValueSync();
+ Cerr << ".";
+ if (res.GetStatus() != EStatus::SUCCESS) {
+ Cerr << res.GetStatus();
+ }
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+ Cerr << Endl << tableName << ": " << batchCount * batchSize << " rows upserted in " << TInstant::Now() - start << Endl;
+
+ return batchCount * batchSize;
+ }
+
+ template <class TStatsProto>
+ void PrintQueryStats(IOutputStream& out, const TStatsProto& stats) {
+ out << "total CPU: " << stats.process_cpu_time_us() << "\n";
+ for (const auto& qp : stats.query_phases()) {
+ out << " duration: " << qp.duration_us() << " usec\n"
+ << " cpu: " << qp.cpu_time_us() << " usec\n";
+ for (const auto& ta : qp.table_access()) {
+ out << " " << ta << "\n";
+ }
+ }
+ }
+
+ TString RunQuery(TDriver& connection, const TString& query) {
+ auto client = NYdb::NTable::TTableClient(connection);
+
+ NYdb::NTable::TStreamExecScanQuerySettings execSettings;
+ execSettings.CollectQueryStats(NYdb::NTable::ECollectQueryStatsMode::Basic);
+ auto it = client.StreamExecuteScanQuery(query, execSettings).GetValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ auto result = NKikimr::NKqp::CollectStreamResult(it);
+ Cerr << "RESULT:\n" << result.ResultSetYson << "\n---------------------\nSTATS:\n";
UNIT_ASSERT(result.QueryStats);
PrintQueryStats(Cerr, *result.QueryStats);
- Cerr << "\n";
- return result.ResultSetYson;
- }
-
- // Create OLTP and OLAP tables with the same set of columns and same PK
- void CreateTestTables(const TServerSettings& settings, const TString& tableName) {
- CreateOlapTable(settings, tableName);
- CreateTable(settings, "oltp_" + tableName);
- }
-
- // Write the same set or rows to OLTP and OLAP table
- size_t WriteTestRows(NYdb::TDriver& connection, const TString& tableName, i64 startTs, const size_t batchCount, const size_t batchSize, const TString& token = {}) {
- size_t rowCount = WriteRows(connection, "/Root/OlapStore/" + tableName, startTs, batchCount, batchSize, token);
- size_t rowCount2 = WriteRows(connection, "/Root/oltp_" + tableName, startTs, batchCount, batchSize, token);
- UNIT_ASSERT_VALUES_EQUAL(rowCount, rowCount2);
- return rowCount;
- }
-
- // Run query against OLTP and OLAP table and check that results are equal
- TString CompareQueryResults(TDriver& connection, const TString& tableName, const TString& query) {
- Cerr << "QUERY:\n" << query << "\n\n";
-
- TString oltpQuery = query;
- SubstGlobal(oltpQuery, "<TABLE>", "`/Root/oltp_" + tableName + "`");
- TString expectedResult = RunQuery(connection, oltpQuery);
-
- TString olapQuery = query;
- SubstGlobal(olapQuery, "<TABLE>", "`/Root/OlapStore/" + tableName + "`");
- TString result = RunQuery(connection, olapQuery);
-
- UNIT_ASSERT_VALUES_EQUAL(result, expectedResult);
- return result;
- }
-
- Y_UNIT_TEST(BulkUpsert) {
+ Cerr << "\n";
+ return result.ResultSetYson;
+ }
+
+ // Create OLTP and OLAP tables with the same set of columns and same PK
+ void CreateTestTables(const TServerSettings& settings, const TString& tableName) {
+ CreateOlapTable(settings, tableName);
+ CreateTable(settings, "oltp_" + tableName);
+ }
+
+ // Write the same set or rows to OLTP and OLAP table
+ size_t WriteTestRows(NYdb::TDriver& connection, const TString& tableName, i64 startTs, const size_t batchCount, const size_t batchSize, const TString& token = {}) {
+ size_t rowCount = WriteRows(connection, "/Root/OlapStore/" + tableName, startTs, batchCount, batchSize, token);
+ size_t rowCount2 = WriteRows(connection, "/Root/oltp_" + tableName, startTs, batchCount, batchSize, token);
+ UNIT_ASSERT_VALUES_EQUAL(rowCount, rowCount2);
+ return rowCount;
+ }
+
+ // Run query against OLTP and OLAP table and check that results are equal
+ TString CompareQueryResults(TDriver& connection, const TString& tableName, const TString& query) {
+ Cerr << "QUERY:\n" << query << "\n\n";
+
+ TString oltpQuery = query;
+ SubstGlobal(oltpQuery, "<TABLE>", "`/Root/oltp_" + tableName + "`");
+ TString expectedResult = RunQuery(connection, oltpQuery);
+
+ TString olapQuery = query;
+ SubstGlobal(olapQuery, "<TABLE>", "`/Root/OlapStore/" + tableName + "`");
+ TString result = RunQuery(connection, olapQuery);
+
+ UNIT_ASSERT_VALUES_EQUAL(result, expectedResult);
+ return result;
+ }
+
+ Y_UNIT_TEST(BulkUpsert) {
NKikimrConfig::TAppConfig appConfig;
appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
TKikimrWithGrpcAndRootSchema server(appConfig);
- EnableDebugLogs(server);
-
- auto connection = ConnectToServer(server);
-
- CreateOlapTable(*server.ServerSettings, "log1");
-
- TClient annoyingClient(*server.ServerSettings);
- annoyingClient.ModifyOwner("/Root/OlapStore", "log1", "alice@builtin");
-
- {
- NYdb::NTable::TTableClient client(connection, NYdb::NTable::TClientSettings().AuthToken("bob@builtin"));
- i64 ts = 1000;
- auto res = SendBatch(client, "/Root/OlapStore/log1", 100, 1, ts).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::UNAUTHORIZED);
- UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(),
- "Access denied for bob@builtin with access UpdateRow to table '/Root/OlapStore/log1'");
-
- TString result = RunQuery(connection, "SELECT count(*) FROM `/Root/OlapStore/log1`;");
- UNIT_ASSERT_VALUES_EQUAL(result, "[[0u]]");
- }
-
- {
- NYdb::NTable::TTableClient client(connection, NYdb::NTable::TClientSettings().AuthToken("alice@builtin"));
- i64 ts = 1000;
- auto res = SendBatch(client, "log1", 100, 1, ts).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
- UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Unknown database for table 'log1'");
-
- TString result = RunQuery(connection, "SELECT count(*) FROM `/Root/OlapStore/log1`;");
- UNIT_ASSERT_VALUES_EQUAL(result, "[[0u]]");
- }
-
- {
- NYdb::NTable::TTableClient client(connection, NYdb::NTable::TClientSettings().AuthToken("alice@builtin"));
- i64 ts = 1000;
- auto res = SendBatch(client, "/Root/OlapStore/log1", 100, 1, ts).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
-
- TString result = RunQuery(connection, "SELECT count(*) FROM `/Root/OlapStore/log1`;");
- UNIT_ASSERT_VALUES_EQUAL(result, "[[100u]]");
- }
- }
-
- Y_UNIT_TEST(ManyTables) {
+ EnableDebugLogs(server);
+
+ auto connection = ConnectToServer(server);
+
+ CreateOlapTable(*server.ServerSettings, "log1");
+
+ TClient annoyingClient(*server.ServerSettings);
+ annoyingClient.ModifyOwner("/Root/OlapStore", "log1", "alice@builtin");
+
+ {
+ NYdb::NTable::TTableClient client(connection, NYdb::NTable::TClientSettings().AuthToken("bob@builtin"));
+ i64 ts = 1000;
+ auto res = SendBatch(client, "/Root/OlapStore/log1", 100, 1, ts).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::UNAUTHORIZED);
+ UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(),
+ "Access denied for bob@builtin with access UpdateRow to table '/Root/OlapStore/log1'");
+
+ TString result = RunQuery(connection, "SELECT count(*) FROM `/Root/OlapStore/log1`;");
+ UNIT_ASSERT_VALUES_EQUAL(result, "[[0u]]");
+ }
+
+ {
+ NYdb::NTable::TTableClient client(connection, NYdb::NTable::TClientSettings().AuthToken("alice@builtin"));
+ i64 ts = 1000;
+ auto res = SendBatch(client, "log1", 100, 1, ts).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(res.GetStatus(), EStatus::SCHEME_ERROR);
+ UNIT_ASSERT_STRING_CONTAINS(res.GetIssues().ToString(), "Unknown database for table 'log1'");
+
+ TString result = RunQuery(connection, "SELECT count(*) FROM `/Root/OlapStore/log1`;");
+ UNIT_ASSERT_VALUES_EQUAL(result, "[[0u]]");
+ }
+
+ {
+ NYdb::NTable::TTableClient client(connection, NYdb::NTable::TClientSettings().AuthToken("alice@builtin"));
+ i64 ts = 1000;
+ auto res = SendBatch(client, "/Root/OlapStore/log1", 100, 1, ts).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+
+ TString result = RunQuery(connection, "SELECT count(*) FROM `/Root/OlapStore/log1`;");
+ UNIT_ASSERT_VALUES_EQUAL(result, "[[100u]]");
+ }
+ }
+
+ Y_UNIT_TEST(ManyTables) {
NKikimrConfig::TAppConfig appConfig;
appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
TKikimrWithGrpcAndRootSchema server(appConfig);
- EnableDebugLogs(server);
-
- auto connection = ConnectToServer(server);
-
- CreateTestTables(*server.ServerSettings, "log1");
- CreateTestTables(*server.ServerSettings, "log2");
- CreateTestTables(*server.ServerSettings, "log3");
-
- size_t rowCount = WriteTestRows(connection, "log1", 0, 1, 50);
- UNIT_ASSERT_VALUES_EQUAL(rowCount, 50);
-
- CompareQueryResults(connection, "log2", "SELECT count(*) FROM <TABLE>;");
- CompareQueryResults(connection, "log3", "SELECT count(*) FROM <TABLE>;");
- CompareQueryResults(connection, "log1", "SELECT count(*) FROM <TABLE>;");
-
- WriteTestRows(connection, "log2", 0, 10, 15);
-
- CompareQueryResults(connection, "log2", "SELECT count(*) FROM <TABLE>;");
- CompareQueryResults(connection, "log3", "SELECT count(*) FROM <TABLE>;");
- CompareQueryResults(connection, "log1", "SELECT count(*) FROM <TABLE>;");
-
- WriteTestRows(connection, "log3", 0, 10, 100);
-
- CompareQueryResults(connection, "log2", "SELECT count(*) FROM <TABLE>;");
- CompareQueryResults(connection, "log3", "SELECT count(*) FROM <TABLE>;");
- CompareQueryResults(connection, "log1", "SELECT count(*) FROM <TABLE>;");
- }
-
- Y_UNIT_TEST(DuplicateRows) {
- NKikimrConfig::TAppConfig appConfig;
- appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
- TKikimrWithGrpcAndRootSchema server(appConfig);
- EnableDebugLogs(server);
-
- auto connection = ConnectToServer(server);
- NYdb::NTable::TTableClient client(connection);
-
- CreateOlapTable(*server.ServerSettings, "log1");
-
- const ui64 batchCount = 100;
- const ui64 batchSize = 1000;
-
- for (ui64 batch = 0; batch < batchCount; ++batch) {
- TValueBuilder rows;
- rows.BeginList();
- i64 ts = 1000; // Same for all rows!
- for (ui64 i = 0; i < batchSize; ++i) {
- rows.AddListItem()
- .BeginStruct()
- .AddMember("timestamp").Timestamp(TInstant::MicroSeconds(ts))
- .AddMember("resource_type").Utf8(i%2 ? "app" : "nginx")
- .AddMember("resource_id").Utf8("resource_" + ToString((i+13) % 7))
- .AddMember("uid").Utf8(ToString(i % 23))
- .AddMember("level").Int32(i % 10)
- .AddMember("message").Utf8(TString(1000, 'a'))
- .AddMember("json_payload").JsonDocument("{}")
- .AddMember("ingested_at").Timestamp(TInstant::MicroSeconds(ts) + TDuration::MilliSeconds(342))
- .AddMember("saved_at").Timestamp(TInstant::MicroSeconds(ts) + TDuration::MilliSeconds(600))
- .AddMember("request_id").Utf8(Sprintf("%x", (unsigned int)i))
- .EndStruct();
- }
- rows.EndList();
-
- auto res = client.BulkUpsert("/Root/OlapStore/log1", rows.Build()).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
- }
-
- {
- TString result = RunQuery(connection, "SELECT count(*) FROM `/Root/OlapStore/log1`;");
+ EnableDebugLogs(server);
+
+ auto connection = ConnectToServer(server);
+
+ CreateTestTables(*server.ServerSettings, "log1");
+ CreateTestTables(*server.ServerSettings, "log2");
+ CreateTestTables(*server.ServerSettings, "log3");
+
+ size_t rowCount = WriteTestRows(connection, "log1", 0, 1, 50);
+ UNIT_ASSERT_VALUES_EQUAL(rowCount, 50);
+
+ CompareQueryResults(connection, "log2", "SELECT count(*) FROM <TABLE>;");
+ CompareQueryResults(connection, "log3", "SELECT count(*) FROM <TABLE>;");
+ CompareQueryResults(connection, "log1", "SELECT count(*) FROM <TABLE>;");
+
+ WriteTestRows(connection, "log2", 0, 10, 15);
+
+ CompareQueryResults(connection, "log2", "SELECT count(*) FROM <TABLE>;");
+ CompareQueryResults(connection, "log3", "SELECT count(*) FROM <TABLE>;");
+ CompareQueryResults(connection, "log1", "SELECT count(*) FROM <TABLE>;");
+
+ WriteTestRows(connection, "log3", 0, 10, 100);
+
+ CompareQueryResults(connection, "log2", "SELECT count(*) FROM <TABLE>;");
+ CompareQueryResults(connection, "log3", "SELECT count(*) FROM <TABLE>;");
+ CompareQueryResults(connection, "log1", "SELECT count(*) FROM <TABLE>;");
+ }
+
+ Y_UNIT_TEST(DuplicateRows) {
+ NKikimrConfig::TAppConfig appConfig;
+ appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
+ TKikimrWithGrpcAndRootSchema server(appConfig);
+ EnableDebugLogs(server);
+
+ auto connection = ConnectToServer(server);
+ NYdb::NTable::TTableClient client(connection);
+
+ CreateOlapTable(*server.ServerSettings, "log1");
+
+ const ui64 batchCount = 100;
+ const ui64 batchSize = 1000;
+
+ for (ui64 batch = 0; batch < batchCount; ++batch) {
+ TValueBuilder rows;
+ rows.BeginList();
+ i64 ts = 1000; // Same for all rows!
+ for (ui64 i = 0; i < batchSize; ++i) {
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("timestamp").Timestamp(TInstant::MicroSeconds(ts))
+ .AddMember("resource_type").Utf8(i%2 ? "app" : "nginx")
+ .AddMember("resource_id").Utf8("resource_" + ToString((i+13) % 7))
+ .AddMember("uid").Utf8(ToString(i % 23))
+ .AddMember("level").Int32(i % 10)
+ .AddMember("message").Utf8(TString(1000, 'a'))
+ .AddMember("json_payload").JsonDocument("{}")
+ .AddMember("ingested_at").Timestamp(TInstant::MicroSeconds(ts) + TDuration::MilliSeconds(342))
+ .AddMember("saved_at").Timestamp(TInstant::MicroSeconds(ts) + TDuration::MilliSeconds(600))
+ .AddMember("request_id").Utf8(Sprintf("%x", (unsigned int)i))
+ .EndStruct();
+ }
+ rows.EndList();
+
+ auto res = client.BulkUpsert("/Root/OlapStore/log1", rows.Build()).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(res.GetStatus(), EStatus::SUCCESS, res.GetIssues().ToString());
+ }
+
+ {
+ TString result = RunQuery(connection, "SELECT count(*) FROM `/Root/OlapStore/log1`;");
UNIT_ASSERT_VALUES_EQUAL(result, "[[322u]]"); // 2 * 7 * 23
- }
- }
-
- void TestQuery(const TString& query) {
+ }
+ }
+
+ void TestQuery(const TString& query) {
NKikimrConfig::TAppConfig appConfig;
appConfig.MutableFeatureFlags()->SetEnableOlapSchemaOperations(true);
- TKikimrWithGrpcAndRootSchema server(appConfig, {}, {}, false, &UdfFrFactory);
-
- auto connection = ConnectToServer(server);
-
- CreateTestTables(*server.ServerSettings, "log1");
-
- // EnableDebugLogs(server);
-
- // Run with empty tables first
- CompareQueryResults(connection, "log1", query);
-
- size_t batchCount = 100;
- size_t batchSize = 50;//500;
- size_t rowCount = WriteTestRows(connection, "log1", 0, batchCount, batchSize);
- UNIT_ASSERT_VALUES_EQUAL(rowCount, batchCount * batchSize);
-
- // EnableDebugLogs(server);
-
- CompareQueryResults(connection, "log1", query);
- }
-
- Y_UNIT_TEST(LogLast50) {
- TestQuery(R"(
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
- FROM <TABLE>
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogLast50ByResource) {
- TestQuery(R"(
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
- FROM <TABLE>
- WHERE resource_type == 'app' AND resource_id == 'resource_1'
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogGrepNonExisting) {
- TestQuery(R"(
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
- FROM <TABLE>
- WHERE message LIKE '%non-exisiting string%'
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogGrepExisting) {
- TestQuery(R"(
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
- FROM <TABLE>
- WHERE message LIKE '%message%'
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogNonExistingRequest) {
- TestQuery(R"(
- $request_id = '0xfaceb00c';
-
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`, `request_id`
- FROM <TABLE>
- WHERE request_id == $request_id
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogExistingRequest) {
- TestQuery(R"(
- $request_id = '1f';
-
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`, `request_id`
- FROM <TABLE>
- WHERE request_id == $request_id
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogNonExistingUserId) {
- TestQuery(R"(
- $user_id = '111';
-
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`, `json_payload`
- FROM <TABLE>
- WHERE JSON_VALUE(json_payload, '$.auth.user.id') == $user_id
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogExistingUserId) {
- TestQuery(R"(
- $user_id = '1000042';
-
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`, `json_payload`
- FROM <TABLE>
- WHERE JSON_VALUE(json_payload, '$.auth.user.id') == $user_id
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogPagingBefore) {
- TestQuery(R"(
- PRAGMA kikimr.OptEnablePredicateExtract = "true";
-
- $ts = CAST(3000000 AS Timestamp);
- $res_type = CAST('nginx' AS Utf8);
- $res_id = CAST('resource_)' AS Utf8);
- $uid = CAST('10' AS Utf8);
-
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
- FROM <TABLE>
- WHERE resource_type == 'app' AND resource_id == 'resource_1'
- AND (`timestamp`, `resource_type`, `resource_id`, `uid`) < ($ts, $res_type, $res_id, $uid)
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogPagingBetween) {
- TestQuery(R"(
- PRAGMA kikimr.OptEnablePredicateExtract = "true";
-
- $ts1 = CAST(2500000 AS Timestamp);
- $res_type1 = CAST('nginx' AS Utf8);
- $res_id1 = CAST('resource_)' AS Utf8);
- $uid1 = CAST('10' AS Utf8);
-
- $ts2 = CAST(3500000 AS Timestamp);
- $res_type2 = CAST('nginx' AS Utf8);
- $res_id2 = CAST('resource_)' AS Utf8);
- $uid2 = CAST('10' AS Utf8);
-
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
- FROM <TABLE>
- WHERE
- (`timestamp`, `resource_type`, `resource_id`, `uid`) > ($ts1, $res_type1, $res_id1, $uid1)
- AND (`timestamp`, `resource_type`, `resource_id`, `uid`) < ($ts2, $res_type2, $res_id2, $uid2)
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogPagingAfter) {
- TestQuery(R"(
- PRAGMA kikimr.OptEnablePredicateExtract = "true";
-
- $ts = CAST(3000000 AS Timestamp);
- $res_type = CAST('nginx' AS Utf8);
- $res_id = CAST('resource_)' AS Utf8);
- $uid = CAST('10' AS Utf8);
-
- $next50 = (
- SELECT *
- FROM <TABLE>
- WHERE resource_type == 'app' AND resource_id == 'resource_1'
- AND (`timestamp`, `resource_type`, `resource_id`, `uid`) > ($ts, $res_type, $res_id, $uid)
- ORDER BY `timestamp`, `resource_type`, `resource_id`, `uid`
- LIMIT 50
- );
-
- SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
- FROM $next50
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC;
- )");
- }
-
- Y_UNIT_TEST(LogCountByResource) {
- TestQuery(R"(
- SELECT count(*)
- FROM <TABLE>
- WHERE resource_type == 'app' AND resource_id == 'resource_1'
- LIMIT 50
- )");
- }
-
- Y_UNIT_TEST(LogWithUnionAllAscending) {
- TestQuery(R"(
- PRAGMA AnsiInForEmptyOrNullableItemsCollections;
-
- $until = CAST(4100000 AS Timestamp);
- $uidUntil = CAST(3 AS Utf8);
- $resourceTypeUntil = CAST('app' AS Utf8);
- $resourceIDUntil = CAST('resource_5' AS Utf8);
- $since = CAST(4000000 AS Timestamp);
- $uidSince = CAST(1 AS Utf8);
- $resourceTypeSince = CAST('app' AS Utf8);
- $resourceIDSince = CAST('resource_2' AS Utf8);
- $level0 = CAST(1 AS Int64);
- $level1 = CAST(3 AS Int64);
- $limit = 50;
-
- $part0 = (SELECT * FROM <TABLE> WHERE `timestamp` < $until AND `timestamp` > $since AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
- $part1 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` < $resourceTypeUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
- $part2 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` = $resourceTypeUntil AND `resource_id` < $resourceIDUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
- $part3 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` = $resourceTypeUntil AND `resource_id` = $resourceIDUntil AND `uid` < $uidUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
- $part4 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` > $resourceTypeSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
- $part5 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` = $resourceTypeSince AND `resource_id` > $resourceIDSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
- $part6 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` = $resourceTypeSince AND `resource_id` = $resourceIDSince AND `uid` > $uidSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
- $data = (SELECT * FROM $part0 UNION ALL SELECT * FROM $part1 UNION ALL SELECT * FROM $part2 UNION ALL SELECT * FROM $part3 UNION ALL SELECT * FROM $part4 UNION ALL SELECT * FROM $part5 UNION ALL SELECT * FROM $part6);
- SELECT * FROM $data ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit;
- )");
- }
-
- Y_UNIT_TEST(LogWithUnionAllDescending) {
- TestQuery(R"(
- PRAGMA AnsiInForEmptyOrNullableItemsCollections;
-
- $until = CAST(4093000 AS Timestamp);
- $uidUntil = CAST(3 AS Utf8);
- $resourceTypeUntil = CAST('app' AS Utf8);
- $resourceIDUntil = CAST('resource_5' AS Utf8);
- $since = CAST(4000000 AS Timestamp);
- $uidSince = CAST(1 AS Utf8);
- $resourceTypeSince = CAST('app' AS Utf8);
- $resourceIDSince = CAST('resource_2' AS Utf8);
- $level0 = CAST(1 AS Int64);
- $level1 = CAST(3 AS Int64);
- $limit = 50;
-
- $part0 = (SELECT * FROM <TABLE> WHERE `timestamp` < $until AND `timestamp` > $since AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
- $part1 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` < $resourceTypeUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
- $part2 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` = $resourceTypeUntil AND `resource_id` < $resourceIDUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
- $part3 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` = $resourceTypeUntil AND `resource_id` = $resourceIDUntil AND `uid` < $uidUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
- $part4 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` > $resourceTypeSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
- $part5 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` = $resourceTypeSince AND `resource_id` > $resourceIDSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
- $part6 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` = $resourceTypeSince AND `resource_id` = $resourceIDSince AND `uid` > $uidSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
- $data = (SELECT * FROM $part0 UNION ALL SELECT * FROM $part1 UNION ALL SELECT * FROM $part2 UNION ALL SELECT * FROM $part3 UNION ALL SELECT * FROM $part4 UNION ALL SELECT * FROM $part5 UNION ALL SELECT * FROM $part6);
- SELECT * FROM $data ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit;
- )");
- }
-
- Y_UNIT_TEST(LogTsRangeDescending) {
- TestQuery(R"(
- --PRAGMA AnsiInForEmptyOrNullableItemsCollections;
-
- $until = CAST(4093000 AS Timestamp);
- $since = CAST(4000000 AS Timestamp);
-
- $limit = 50;
-
- SELECT *
- FROM <TABLE>
- WHERE
- `timestamp` <= $until AND
- `timestamp` >= $since
- ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit;
- )");
- }
-}
+ TKikimrWithGrpcAndRootSchema server(appConfig, {}, {}, false, &UdfFrFactory);
+
+ auto connection = ConnectToServer(server);
+
+ CreateTestTables(*server.ServerSettings, "log1");
+
+ // EnableDebugLogs(server);
+
+ // Run with empty tables first
+ CompareQueryResults(connection, "log1", query);
+
+ size_t batchCount = 100;
+ size_t batchSize = 50;//500;
+ size_t rowCount = WriteTestRows(connection, "log1", 0, batchCount, batchSize);
+ UNIT_ASSERT_VALUES_EQUAL(rowCount, batchCount * batchSize);
+
+ // EnableDebugLogs(server);
+
+ CompareQueryResults(connection, "log1", query);
+ }
+
+ Y_UNIT_TEST(LogLast50) {
+ TestQuery(R"(
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
+ FROM <TABLE>
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogLast50ByResource) {
+ TestQuery(R"(
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
+ FROM <TABLE>
+ WHERE resource_type == 'app' AND resource_id == 'resource_1'
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogGrepNonExisting) {
+ TestQuery(R"(
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
+ FROM <TABLE>
+ WHERE message LIKE '%non-exisiting string%'
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogGrepExisting) {
+ TestQuery(R"(
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
+ FROM <TABLE>
+ WHERE message LIKE '%message%'
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogNonExistingRequest) {
+ TestQuery(R"(
+ $request_id = '0xfaceb00c';
+
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`, `request_id`
+ FROM <TABLE>
+ WHERE request_id == $request_id
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogExistingRequest) {
+ TestQuery(R"(
+ $request_id = '1f';
+
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`, `request_id`
+ FROM <TABLE>
+ WHERE request_id == $request_id
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogNonExistingUserId) {
+ TestQuery(R"(
+ $user_id = '111';
+
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`, `json_payload`
+ FROM <TABLE>
+ WHERE JSON_VALUE(json_payload, '$.auth.user.id') == $user_id
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogExistingUserId) {
+ TestQuery(R"(
+ $user_id = '1000042';
+
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`, `json_payload`
+ FROM <TABLE>
+ WHERE JSON_VALUE(json_payload, '$.auth.user.id') == $user_id
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogPagingBefore) {
+ TestQuery(R"(
+ PRAGMA kikimr.OptEnablePredicateExtract = "true";
+
+ $ts = CAST(3000000 AS Timestamp);
+ $res_type = CAST('nginx' AS Utf8);
+ $res_id = CAST('resource_)' AS Utf8);
+ $uid = CAST('10' AS Utf8);
+
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
+ FROM <TABLE>
+ WHERE resource_type == 'app' AND resource_id == 'resource_1'
+ AND (`timestamp`, `resource_type`, `resource_id`, `uid`) < ($ts, $res_type, $res_id, $uid)
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogPagingBetween) {
+ TestQuery(R"(
+ PRAGMA kikimr.OptEnablePredicateExtract = "true";
+
+ $ts1 = CAST(2500000 AS Timestamp);
+ $res_type1 = CAST('nginx' AS Utf8);
+ $res_id1 = CAST('resource_)' AS Utf8);
+ $uid1 = CAST('10' AS Utf8);
+
+ $ts2 = CAST(3500000 AS Timestamp);
+ $res_type2 = CAST('nginx' AS Utf8);
+ $res_id2 = CAST('resource_)' AS Utf8);
+ $uid2 = CAST('10' AS Utf8);
+
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
+ FROM <TABLE>
+ WHERE
+ (`timestamp`, `resource_type`, `resource_id`, `uid`) > ($ts1, $res_type1, $res_id1, $uid1)
+ AND (`timestamp`, `resource_type`, `resource_id`, `uid`) < ($ts2, $res_type2, $res_id2, $uid2)
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogPagingAfter) {
+ TestQuery(R"(
+ PRAGMA kikimr.OptEnablePredicateExtract = "true";
+
+ $ts = CAST(3000000 AS Timestamp);
+ $res_type = CAST('nginx' AS Utf8);
+ $res_id = CAST('resource_)' AS Utf8);
+ $uid = CAST('10' AS Utf8);
+
+ $next50 = (
+ SELECT *
+ FROM <TABLE>
+ WHERE resource_type == 'app' AND resource_id == 'resource_1'
+ AND (`timestamp`, `resource_type`, `resource_id`, `uid`) > ($ts, $res_type, $res_id, $uid)
+ ORDER BY `timestamp`, `resource_type`, `resource_id`, `uid`
+ LIMIT 50
+ );
+
+ SELECT `timestamp`, `resource_type`, `resource_id`, `uid`, `level`, `message`
+ FROM $next50
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC;
+ )");
+ }
+
+ Y_UNIT_TEST(LogCountByResource) {
+ TestQuery(R"(
+ SELECT count(*)
+ FROM <TABLE>
+ WHERE resource_type == 'app' AND resource_id == 'resource_1'
+ LIMIT 50
+ )");
+ }
+
+ Y_UNIT_TEST(LogWithUnionAllAscending) {
+ TestQuery(R"(
+ PRAGMA AnsiInForEmptyOrNullableItemsCollections;
+
+ $until = CAST(4100000 AS Timestamp);
+ $uidUntil = CAST(3 AS Utf8);
+ $resourceTypeUntil = CAST('app' AS Utf8);
+ $resourceIDUntil = CAST('resource_5' AS Utf8);
+ $since = CAST(4000000 AS Timestamp);
+ $uidSince = CAST(1 AS Utf8);
+ $resourceTypeSince = CAST('app' AS Utf8);
+ $resourceIDSince = CAST('resource_2' AS Utf8);
+ $level0 = CAST(1 AS Int64);
+ $level1 = CAST(3 AS Int64);
+ $limit = 50;
+
+ $part0 = (SELECT * FROM <TABLE> WHERE `timestamp` < $until AND `timestamp` > $since AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
+ $part1 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` < $resourceTypeUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
+ $part2 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` = $resourceTypeUntil AND `resource_id` < $resourceIDUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
+ $part3 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` = $resourceTypeUntil AND `resource_id` = $resourceIDUntil AND `uid` < $uidUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
+ $part4 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` > $resourceTypeSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
+ $part5 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` = $resourceTypeSince AND `resource_id` > $resourceIDSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
+ $part6 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` = $resourceTypeSince AND `resource_id` = $resourceIDSince AND `uid` > $uidSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit);
+ $data = (SELECT * FROM $part0 UNION ALL SELECT * FROM $part1 UNION ALL SELECT * FROM $part2 UNION ALL SELECT * FROM $part3 UNION ALL SELECT * FROM $part4 UNION ALL SELECT * FROM $part5 UNION ALL SELECT * FROM $part6);
+ SELECT * FROM $data ORDER BY `timestamp` ASC, `resource_type` ASC, `resource_id` ASC, `uid` ASC LIMIT $limit;
+ )");
+ }
+
+ Y_UNIT_TEST(LogWithUnionAllDescending) {
+ TestQuery(R"(
+ PRAGMA AnsiInForEmptyOrNullableItemsCollections;
+
+ $until = CAST(4093000 AS Timestamp);
+ $uidUntil = CAST(3 AS Utf8);
+ $resourceTypeUntil = CAST('app' AS Utf8);
+ $resourceIDUntil = CAST('resource_5' AS Utf8);
+ $since = CAST(4000000 AS Timestamp);
+ $uidSince = CAST(1 AS Utf8);
+ $resourceTypeSince = CAST('app' AS Utf8);
+ $resourceIDSince = CAST('resource_2' AS Utf8);
+ $level0 = CAST(1 AS Int64);
+ $level1 = CAST(3 AS Int64);
+ $limit = 50;
+
+ $part0 = (SELECT * FROM <TABLE> WHERE `timestamp` < $until AND `timestamp` > $since AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
+ $part1 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` < $resourceTypeUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
+ $part2 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` = $resourceTypeUntil AND `resource_id` < $resourceIDUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
+ $part3 = (SELECT * FROM <TABLE> WHERE `timestamp` = $until AND `resource_type` = $resourceTypeUntil AND `resource_id` = $resourceIDUntil AND `uid` < $uidUntil AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
+ $part4 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` > $resourceTypeSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
+ $part5 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` = $resourceTypeSince AND `resource_id` > $resourceIDSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
+ $part6 = (SELECT * FROM <TABLE> WHERE `timestamp` = $since AND `resource_type` = $resourceTypeSince AND `resource_id` = $resourceIDSince AND `uid` > $uidSince AND `level` IN ($level0, $level1) ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit);
+ $data = (SELECT * FROM $part0 UNION ALL SELECT * FROM $part1 UNION ALL SELECT * FROM $part2 UNION ALL SELECT * FROM $part3 UNION ALL SELECT * FROM $part4 UNION ALL SELECT * FROM $part5 UNION ALL SELECT * FROM $part6);
+ SELECT * FROM $data ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit;
+ )");
+ }
+
+ Y_UNIT_TEST(LogTsRangeDescending) {
+ TestQuery(R"(
+ --PRAGMA AnsiInForEmptyOrNullableItemsCollections;
+
+ $until = CAST(4093000 AS Timestamp);
+ $since = CAST(4000000 AS Timestamp);
+
+ $limit = 50;
+
+ SELECT *
+ FROM <TABLE>
+ WHERE
+ `timestamp` <= $until AND
+ `timestamp` >= $since
+ ORDER BY `timestamp` DESC, `resource_type` DESC, `resource_id` DESC, `uid` DESC LIMIT $limit;
+ )");
+ }
+}
diff --git a/ydb/services/ydb/ydb_operation.cpp b/ydb/services/ydb/ydb_operation.cpp
index cc56f597caf..602980027cb 100644
--- a/ydb/services/ydb/ydb_operation.cpp
+++ b/ydb/services/ydb/ydb_operation.cpp
@@ -39,9 +39,9 @@ void TGRpcOperationService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
MakeIntrusive<TGRpcRequest<Ydb::Operations::IN, Ydb::Operations::OUT, TGRpcOperationService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *ctx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
- ACTION; \
- }, &Ydb::Operation::V1::OperationService::AsyncService::Request ## NAME, \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
+ ACTION; \
+ }, &Ydb::Operation::V1::OperationService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("operation", #NAME))->Run();
ADD_REQUEST(GetOperation, GetOperationRequest, GetOperationResponse, {
diff --git a/ydb/services/ydb/ydb_s3_internal.cpp b/ydb/services/ydb/ydb_s3_internal.cpp
index 676357384b6..5dca2c2fd95 100644
--- a/ydb/services/ydb/ydb_s3_internal.cpp
+++ b/ydb/services/ydb/ydb_s3_internal.cpp
@@ -1,55 +1,55 @@
-#include "ydb_s3_internal.h"
-
+#include "ydb_s3_internal.h"
+
#include <ydb/core/grpc_services/grpc_helper.h>
#include <ydb/core/grpc_services/grpc_request_proxy.h>
#include <ydb/core/grpc_services/rpc_calls.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-TGRpcYdbS3InternalService::TGRpcYdbS3InternalService(NActors::TActorSystem *system,
+
+namespace NKikimr {
+namespace NGRpcService {
+
+TGRpcYdbS3InternalService::TGRpcYdbS3InternalService(NActors::TActorSystem *system,
TIntrusivePtr<NMonitoring::TDynamicCounters> counters, NActors::TActorId id)
- : ActorSystem_(system)
- , Counters_(counters)
- , GRpcRequestProxyId_(id) {}
-
+ : ActorSystem_(system)
+ , Counters_(counters)
+ , GRpcRequestProxyId_(id) {}
+
void TGRpcYdbS3InternalService::InitService(grpc::ServerCompletionQueue *cq, NGrpc::TLoggerPtr logger) {
- CQ_ = cq;
+ CQ_ = cq;
SetupIncomingRequests(std::move(logger));
-}
-
+}
+
void TGRpcYdbS3InternalService::SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) {
- Limiter_ = limiter;
-}
-
-bool TGRpcYdbS3InternalService::IncRequest() {
- return Limiter_->Inc();
-}
-
-void TGRpcYdbS3InternalService::DecRequest() {
- Limiter_->Dec();
- Y_ASSERT(Limiter_->GetCurrentInFlight() >= 0);
-}
-
+ Limiter_ = limiter;
+}
+
+bool TGRpcYdbS3InternalService::IncRequest() {
+ return Limiter_->Inc();
+}
+
+void TGRpcYdbS3InternalService::DecRequest() {
+ Limiter_->Dec();
+ Y_ASSERT(Limiter_->GetCurrentInFlight() >= 0);
+}
+
void TGRpcYdbS3InternalService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
- auto getCounterBlock = CreateCounterCb(Counters_, ActorSystem_);
-
-#ifdef ADD_REQUEST
-#error ADD_REQUEST macro already defined
-#endif
-#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
- MakeIntrusive<TGRpcRequest<Ydb::S3Internal::IN, Ydb::S3Internal::OUT, TGRpcYdbS3InternalService>>(this, &Service_, CQ_, \
+ auto getCounterBlock = CreateCounterCb(Counters_, ActorSystem_);
+
+#ifdef ADD_REQUEST
+#error ADD_REQUEST macro already defined
+#endif
+#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
+ MakeIntrusive<TGRpcRequest<Ydb::S3Internal::IN, Ydb::S3Internal::OUT, TGRpcYdbS3InternalService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *ctx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
- ACTION; \
- }, &Ydb::S3Internal::V1::S3InternalService::AsyncService::Request ## NAME, \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
+ ACTION; \
+ }, &Ydb::S3Internal::V1::S3InternalService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("s3_internal", #NAME))->Run();
-
- ADD_REQUEST(S3Listing, S3ListingRequest, S3ListingResponse, {
- ActorSystem_->Send(GRpcRequestProxyId_, new TEvS3ListingRequest(ctx));
- })
-#undef ADD_REQUEST
-}
-
-} // namespace NGRpcService
-} // namespace NKikimr
+
+ ADD_REQUEST(S3Listing, S3ListingRequest, S3ListingResponse, {
+ ActorSystem_->Send(GRpcRequestProxyId_, new TEvS3ListingRequest(ctx));
+ })
+#undef ADD_REQUEST
+}
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/services/ydb/ydb_s3_internal.h b/ydb/services/ydb/ydb_s3_internal.h
index d75f6d1309e..e947a9b131f 100644
--- a/ydb/services/ydb/ydb_s3_internal.h
+++ b/ydb/services/ydb/ydb_s3_internal.h
@@ -1,34 +1,34 @@
-#pragma once
-
+#pragma once
+
#include <library/cpp/actors/core/actorsystem.h>
#include <library/cpp/grpc/server/grpc_server.h>
#include <ydb/public/api/grpc/draft/ydb_s3_internal_v1.grpc.pb.h>
-
-namespace NKikimr {
-namespace NGRpcService {
-
-class TGRpcYdbS3InternalService
+
+namespace NKikimr {
+namespace NGRpcService {
+
+class TGRpcYdbS3InternalService
: public NGrpc::TGrpcServiceBase<Ydb::S3Internal::V1::S3InternalService>
-{
-public:
- TGRpcYdbS3InternalService(NActors::TActorSystem* system, TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
+{
+public:
+ TGRpcYdbS3InternalService(NActors::TActorSystem* system, TIntrusivePtr<NMonitoring::TDynamicCounters> counters,
NActors::TActorId id);
-
+
void InitService(grpc::ServerCompletionQueue* cq, NGrpc::TLoggerPtr logger) override;
void SetGlobalLimiterHandle(NGrpc::TGlobalLimiter* limiter) override;
-
- bool IncRequest();
- void DecRequest();
-private:
+
+ bool IncRequest();
+ void DecRequest();
+private:
void SetupIncomingRequests(NGrpc::TLoggerPtr logger);
-
- NActors::TActorSystem* ActorSystem_;
+
+ NActors::TActorSystem* ActorSystem_;
grpc::ServerCompletionQueue* CQ_ = nullptr;
-
- TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_;
+
+ TIntrusivePtr<NMonitoring::TDynamicCounters> Counters_;
NActors::TActorId GRpcRequestProxyId_;
NGrpc::TGlobalLimiter* Limiter_ = nullptr;
-};
-
-} // namespace NGRpcService
-} // namespace NKikimr
+};
+
+} // namespace NGRpcService
+} // namespace NKikimr
diff --git a/ydb/services/ydb/ydb_s3_internal_ut.cpp b/ydb/services/ydb/ydb_s3_internal_ut.cpp
index 88d79f70599..94053711c2e 100644
--- a/ydb/services/ydb/ydb_s3_internal_ut.cpp
+++ b/ydb/services/ydb/ydb_s3_internal_ut.cpp
@@ -1,288 +1,288 @@
-#include "ydb_common_ut.h"
-
+#include "ydb_common_ut.h"
+
#include <ydb/public/lib/experimental/ydb_s3_internal.h>
#include <ydb/public/sdk/cpp/client/ydb_result/result.h>
#include <ydb/public/sdk/cpp/client/ydb_scheme/scheme.h>
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
-
-using namespace NYdb;
-
-Y_UNIT_TEST_SUITE(YdbS3Internal) {
-
- void PrepareData(TString location) {
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
-
- NYdb::NTable::TTableClient client(connection);
- auto session = client.GetSession().ExtractValueSync().GetSession();
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Hash", EPrimitiveType::Uint64)
- .AddNullableColumn("Name", EPrimitiveType::Utf8)
- .AddNullableColumn("Path", EPrimitiveType::Utf8)
- .AddNullableColumn("Version", EPrimitiveType::Uint64)
- .AddNullableColumn("Timestamp", EPrimitiveType::Uint64)
- .AddNullableColumn("Data", EPrimitiveType::String)
- .AddNullableColumn("ExtraData", EPrimitiveType::String)
- .AddNullableColumn("Unused1", EPrimitiveType::Uint32);
- tableBuilder.SetPrimaryKeyColumns({"Hash", "Name", "Path", "Version"});
- NYdb::NTable::TCreateTableSettings tableSettings;
- tableSettings.PartitioningPolicy(NYdb::NTable::TPartitioningPolicy().UniformPartitions(32));
- auto result = session.CreateTable("/Root/ListingObjects", tableBuilder.Build(), tableSettings).ExtractValueSync();
-
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- // Write some rows
- {
- auto res = session.ExecuteDataQuery(
- "REPLACE INTO [/Root/ListingObjects] (Hash, Name, Path, Version, Timestamp, Data) VALUES\n"
- "(50, 'bucket50', '/home/Music/Bohemian Rapshody.mp3', 1, 10, 'MP3'),\n"
- "(50, 'bucket50', '/home/.bashrc', 1, 10, '#bashrc')\n"
- ";",
- NYdb::NTable::TTxControl::BeginTx().CommitTx()
- ).ExtractValueSync();
-
-// Cerr << res.GetStatus() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
- }
- }
-
- Y_UNIT_TEST(TestS3Listing) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- PrepareData(location);
-
- // List
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
- NS3Internal::TS3InternalClient s3conn(connection);
-
- TValueBuilder keyPrefix;
- keyPrefix.BeginTuple()
- .AddElement().OptionalUint64(50)
- .AddElement().OptionalUtf8("bucket50")
- .EndTuple();
- TValueBuilder suffix;
- suffix.BeginTuple().EndTuple();
- auto res = s3conn.S3Listing("/Root/ListingObjects",
- keyPrefix.Build(),
- "/home/",
- "/",
- suffix.Build(),
- 100,
- {"Name", "Data", "Timestamp"}
- ).GetValueSync();
-
- Cerr << res.GetStatus() << Endl;
- UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
-
- {
- UNIT_ASSERT_VALUES_EQUAL(res.GetCommonPrefixes().RowsCount(), 1);
+
+using namespace NYdb;
+
+Y_UNIT_TEST_SUITE(YdbS3Internal) {
+
+ void PrepareData(TString location) {
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+
+ NYdb::NTable::TTableClient client(connection);
+ auto session = client.GetSession().ExtractValueSync().GetSession();
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Hash", EPrimitiveType::Uint64)
+ .AddNullableColumn("Name", EPrimitiveType::Utf8)
+ .AddNullableColumn("Path", EPrimitiveType::Utf8)
+ .AddNullableColumn("Version", EPrimitiveType::Uint64)
+ .AddNullableColumn("Timestamp", EPrimitiveType::Uint64)
+ .AddNullableColumn("Data", EPrimitiveType::String)
+ .AddNullableColumn("ExtraData", EPrimitiveType::String)
+ .AddNullableColumn("Unused1", EPrimitiveType::Uint32);
+ tableBuilder.SetPrimaryKeyColumns({"Hash", "Name", "Path", "Version"});
+ NYdb::NTable::TCreateTableSettings tableSettings;
+ tableSettings.PartitioningPolicy(NYdb::NTable::TPartitioningPolicy().UniformPartitions(32));
+ auto result = session.CreateTable("/Root/ListingObjects", tableBuilder.Build(), tableSettings).ExtractValueSync();
+
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ // Write some rows
+ {
+ auto res = session.ExecuteDataQuery(
+ "REPLACE INTO [/Root/ListingObjects] (Hash, Name, Path, Version, Timestamp, Data) VALUES\n"
+ "(50, 'bucket50', '/home/Music/Bohemian Rapshody.mp3', 1, 10, 'MP3'),\n"
+ "(50, 'bucket50', '/home/.bashrc', 1, 10, '#bashrc')\n"
+ ";",
+ NYdb::NTable::TTxControl::BeginTx().CommitTx()
+ ).ExtractValueSync();
+
+// Cerr << res.GetStatus() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+ }
+ }
+
+ Y_UNIT_TEST(TestS3Listing) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ PrepareData(location);
+
+ // List
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+ NS3Internal::TS3InternalClient s3conn(connection);
+
+ TValueBuilder keyPrefix;
+ keyPrefix.BeginTuple()
+ .AddElement().OptionalUint64(50)
+ .AddElement().OptionalUtf8("bucket50")
+ .EndTuple();
+ TValueBuilder suffix;
+ suffix.BeginTuple().EndTuple();
+ auto res = s3conn.S3Listing("/Root/ListingObjects",
+ keyPrefix.Build(),
+ "/home/",
+ "/",
+ suffix.Build(),
+ 100,
+ {"Name", "Data", "Timestamp"}
+ ).GetValueSync();
+
+ Cerr << res.GetStatus() << Endl;
+ UNIT_ASSERT_EQUAL(res.GetStatus(), EStatus::SUCCESS);
+
+ {
+ UNIT_ASSERT_VALUES_EQUAL(res.GetCommonPrefixes().RowsCount(), 1);
TResultSetParser parser(res.GetCommonPrefixes());
- UNIT_ASSERT(parser.TryNextRow());
- UNIT_ASSERT_VALUES_EQUAL(*parser.ColumnParser("Path").GetOptionalUtf8(), "/home/Music/");
- }
-
- {
- UNIT_ASSERT_VALUES_EQUAL(res.GetContents().RowsCount(), 1);
+ UNIT_ASSERT(parser.TryNextRow());
+ UNIT_ASSERT_VALUES_EQUAL(*parser.ColumnParser("Path").GetOptionalUtf8(), "/home/Music/");
+ }
+
+ {
+ UNIT_ASSERT_VALUES_EQUAL(res.GetContents().RowsCount(), 1);
TResultSetParser parser(res.GetContents());
- UNIT_ASSERT(parser.TryNextRow());
- UNIT_ASSERT_VALUES_EQUAL(*parser.ColumnParser("Name").GetOptionalUtf8(), "bucket50");
- UNIT_ASSERT_VALUES_EQUAL(*parser.ColumnParser("Path").GetOptionalUtf8(), "/home/.bashrc");
- UNIT_ASSERT_VALUES_EQUAL(*parser.ColumnParser("Timestamp").GetOptionalUint64(), 10);
- }
- }
-
- void SetPermissions(TString location) {
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
- auto scheme = NYdb::NScheme::TSchemeClient(connection);
- auto status = scheme.ModifyPermissions("/Root/ListingObjects",
- NYdb::NScheme::TModifyPermissionsSettings()
- .AddSetPermissions(
- NYdb::NScheme::TPermissions("reader@builtin", {"ydb.tables.read"})
- )
- .AddSetPermissions(
- NYdb::NScheme::TPermissions("generic_reader@builtin", {"ydb.generic.read"})
- )
- .AddSetPermissions(
- NYdb::NScheme::TPermissions("writer@builtin", {"ydb.tables.modify"})
- )
- .AddSetPermissions(
- NYdb::NScheme::TPermissions("generic_writer@builtin", {"ydb.generic.write"})
- )
- ).ExtractValueSync();
- UNIT_ASSERT_EQUAL(status.IsTransportError(), false);
- UNIT_ASSERT_EQUAL(status.GetStatus(), EStatus::SUCCESS);
- }
-
- NYdb::EStatus MakeListingRequest(TString location, TString userToken) {
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location).SetAuthToken(userToken));
- NS3Internal::TS3InternalClient s3conn(connection);
-
- TValueBuilder keyPrefix;
- keyPrefix.BeginTuple()
- .AddElement().OptionalUint64(50)
- .AddElement().OptionalUtf8("bucket50")
- .EndTuple();
- TValueBuilder suffix;
- suffix.BeginTuple().EndTuple();
- auto res = s3conn.S3Listing("/Root/ListingObjects",
- keyPrefix.Build(),
- "/home/",
- "/",
- suffix.Build(),
- 100,
- {"Name", "Data", "Timestamp"}
- ).GetValueSync();
-
-// Cerr << res.GetStatus() << Endl;
-// Cerr << res.GetIssues().ToString() << Endl;
- return res.GetStatus();
- }
-
-
- Y_UNIT_TEST(TestAccessCheck) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- PrepareData(location);
- SetPermissions(location);
- server.ResetSchemeCache("/Root/ListingObjects");
-
- UNIT_ASSERT_EQUAL(MakeListingRequest(location, ""), EStatus::SUCCESS);
- UNIT_ASSERT_EQUAL(MakeListingRequest(location, "reader@builtin"), EStatus::SUCCESS);
- UNIT_ASSERT_EQUAL(MakeListingRequest(location, "generic_reader@builtin"), EStatus::SUCCESS);
- UNIT_ASSERT_EQUAL(MakeListingRequest(location, "root@builtin"), EStatus::SUCCESS);
-
- UNIT_ASSERT_EQUAL(MakeListingRequest(location, "writer@builtin"), EStatus::UNAUTHORIZED);
- UNIT_ASSERT_EQUAL(MakeListingRequest(location, "generic_writer@builtin"), EStatus::UNAUTHORIZED);
- UNIT_ASSERT_EQUAL(MakeListingRequest(location, "badguy@builtin"), EStatus::UNAUTHORIZED);
- }
-
- NYdb::EStatus TestRequest(NS3Internal::TS3InternalClient s3conn, TValue&& keyPrefix, TValue&& suffix) {
- auto res = s3conn.S3Listing("/Root/ListingObjects",
- std::move(keyPrefix),
- "/home/",
- "/",
- std::move(suffix),
- 100,
- {"Name", "Data", "Timestamp"}
- ).GetValueSync();
-
-// Cerr << res.GetStatus() << Endl;
-// Cerr << res.GetIssues().ToString() << Endl;
- return res.GetStatus();
- }
-
- // Test request with good suffix
- NYdb::EStatus TestKeyPrefixRequest(NS3Internal::TS3InternalClient s3conn, TValue&& keyPrefix) {
- return TestRequest(s3conn,
- std::move(keyPrefix),
- TValueBuilder().BeginTuple().EndTuple().Build());
- }
-
- // Test request with good keyPrefix
- NYdb::EStatus TestKeySuffixRequest(NS3Internal::TS3InternalClient s3conn, TValue&& keySuffix) {
- return TestRequest(s3conn,
- TValueBuilder()
- .BeginTuple()
- .AddElement().OptionalUint64(1)
- .AddElement().OptionalUtf8("Bucket50")
- .EndTuple().Build(),
- std::move(keySuffix));
- }
-
- Y_UNIT_TEST(BadRequests) {
- TKikimrWithGrpcAndRootSchema server;
- ui16 grpc = server.GetPort();
- TString location = TStringBuilder() << "localhost:" << grpc;
-
- PrepareData(location);
-
- auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
- NS3Internal::TS3InternalClient s3conn(connection);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
- TValueBuilder()
- .BeginTuple()
- .AddElement().OptionalUint64(1)
- .AddElement().OptionalUtf8("Bucket50")
- .EndTuple().Build()),
- EStatus::SUCCESS);
-
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
- TValueBuilder().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
- TValueBuilder()
- .BeginTuple()
- .AddElement().BeginList().EndList()
- .AddElement().OptionalUtf8("Bucket50")
- .EndTuple().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
- TValueBuilder().BeginStruct().EndStruct().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
- TValueBuilder().BeginList().EndList().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
- TValueBuilder()
- .BeginList()
- .AddListItem().OptionalUint64(1)
- .AddListItem().OptionalUint64(22)
- .EndList().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
- TValueBuilder().Uint64(50).Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
- TValueBuilder().OptionalUint64(50).Build()),
- EStatus::BAD_REQUEST);
-
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
- TValueBuilder().BeginTuple().EndTuple().Build()),
- EStatus::SUCCESS);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
- TValueBuilder().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
- TValueBuilder().BeginStruct().EndStruct().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
- TValueBuilder().BeginList().EndList().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
- TValueBuilder()
- .BeginList()
- .AddListItem().OptionalUint64(1)
- .AddListItem().OptionalUint64(22)
- .EndList().Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
- TValueBuilder().Uint64(50).Build()),
- EStatus::BAD_REQUEST);
-
- UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
- TValueBuilder().OptionalUint64(50).Build()),
- EStatus::BAD_REQUEST);
- }
-}
+ UNIT_ASSERT(parser.TryNextRow());
+ UNIT_ASSERT_VALUES_EQUAL(*parser.ColumnParser("Name").GetOptionalUtf8(), "bucket50");
+ UNIT_ASSERT_VALUES_EQUAL(*parser.ColumnParser("Path").GetOptionalUtf8(), "/home/.bashrc");
+ UNIT_ASSERT_VALUES_EQUAL(*parser.ColumnParser("Timestamp").GetOptionalUint64(), 10);
+ }
+ }
+
+ void SetPermissions(TString location) {
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+ auto scheme = NYdb::NScheme::TSchemeClient(connection);
+ auto status = scheme.ModifyPermissions("/Root/ListingObjects",
+ NYdb::NScheme::TModifyPermissionsSettings()
+ .AddSetPermissions(
+ NYdb::NScheme::TPermissions("reader@builtin", {"ydb.tables.read"})
+ )
+ .AddSetPermissions(
+ NYdb::NScheme::TPermissions("generic_reader@builtin", {"ydb.generic.read"})
+ )
+ .AddSetPermissions(
+ NYdb::NScheme::TPermissions("writer@builtin", {"ydb.tables.modify"})
+ )
+ .AddSetPermissions(
+ NYdb::NScheme::TPermissions("generic_writer@builtin", {"ydb.generic.write"})
+ )
+ ).ExtractValueSync();
+ UNIT_ASSERT_EQUAL(status.IsTransportError(), false);
+ UNIT_ASSERT_EQUAL(status.GetStatus(), EStatus::SUCCESS);
+ }
+
+ NYdb::EStatus MakeListingRequest(TString location, TString userToken) {
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location).SetAuthToken(userToken));
+ NS3Internal::TS3InternalClient s3conn(connection);
+
+ TValueBuilder keyPrefix;
+ keyPrefix.BeginTuple()
+ .AddElement().OptionalUint64(50)
+ .AddElement().OptionalUtf8("bucket50")
+ .EndTuple();
+ TValueBuilder suffix;
+ suffix.BeginTuple().EndTuple();
+ auto res = s3conn.S3Listing("/Root/ListingObjects",
+ keyPrefix.Build(),
+ "/home/",
+ "/",
+ suffix.Build(),
+ 100,
+ {"Name", "Data", "Timestamp"}
+ ).GetValueSync();
+
+// Cerr << res.GetStatus() << Endl;
+// Cerr << res.GetIssues().ToString() << Endl;
+ return res.GetStatus();
+ }
+
+
+ Y_UNIT_TEST(TestAccessCheck) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ PrepareData(location);
+ SetPermissions(location);
+ server.ResetSchemeCache("/Root/ListingObjects");
+
+ UNIT_ASSERT_EQUAL(MakeListingRequest(location, ""), EStatus::SUCCESS);
+ UNIT_ASSERT_EQUAL(MakeListingRequest(location, "reader@builtin"), EStatus::SUCCESS);
+ UNIT_ASSERT_EQUAL(MakeListingRequest(location, "generic_reader@builtin"), EStatus::SUCCESS);
+ UNIT_ASSERT_EQUAL(MakeListingRequest(location, "root@builtin"), EStatus::SUCCESS);
+
+ UNIT_ASSERT_EQUAL(MakeListingRequest(location, "writer@builtin"), EStatus::UNAUTHORIZED);
+ UNIT_ASSERT_EQUAL(MakeListingRequest(location, "generic_writer@builtin"), EStatus::UNAUTHORIZED);
+ UNIT_ASSERT_EQUAL(MakeListingRequest(location, "badguy@builtin"), EStatus::UNAUTHORIZED);
+ }
+
+ NYdb::EStatus TestRequest(NS3Internal::TS3InternalClient s3conn, TValue&& keyPrefix, TValue&& suffix) {
+ auto res = s3conn.S3Listing("/Root/ListingObjects",
+ std::move(keyPrefix),
+ "/home/",
+ "/",
+ std::move(suffix),
+ 100,
+ {"Name", "Data", "Timestamp"}
+ ).GetValueSync();
+
+// Cerr << res.GetStatus() << Endl;
+// Cerr << res.GetIssues().ToString() << Endl;
+ return res.GetStatus();
+ }
+
+ // Test request with good suffix
+ NYdb::EStatus TestKeyPrefixRequest(NS3Internal::TS3InternalClient s3conn, TValue&& keyPrefix) {
+ return TestRequest(s3conn,
+ std::move(keyPrefix),
+ TValueBuilder().BeginTuple().EndTuple().Build());
+ }
+
+ // Test request with good keyPrefix
+ NYdb::EStatus TestKeySuffixRequest(NS3Internal::TS3InternalClient s3conn, TValue&& keySuffix) {
+ return TestRequest(s3conn,
+ TValueBuilder()
+ .BeginTuple()
+ .AddElement().OptionalUint64(1)
+ .AddElement().OptionalUtf8("Bucket50")
+ .EndTuple().Build(),
+ std::move(keySuffix));
+ }
+
+ Y_UNIT_TEST(BadRequests) {
+ TKikimrWithGrpcAndRootSchema server;
+ ui16 grpc = server.GetPort();
+ TString location = TStringBuilder() << "localhost:" << grpc;
+
+ PrepareData(location);
+
+ auto connection = NYdb::TDriver(TDriverConfig().SetEndpoint(location));
+ NS3Internal::TS3InternalClient s3conn(connection);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
+ TValueBuilder()
+ .BeginTuple()
+ .AddElement().OptionalUint64(1)
+ .AddElement().OptionalUtf8("Bucket50")
+ .EndTuple().Build()),
+ EStatus::SUCCESS);
+
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
+ TValueBuilder().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
+ TValueBuilder()
+ .BeginTuple()
+ .AddElement().BeginList().EndList()
+ .AddElement().OptionalUtf8("Bucket50")
+ .EndTuple().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
+ TValueBuilder().BeginStruct().EndStruct().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
+ TValueBuilder().BeginList().EndList().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
+ TValueBuilder()
+ .BeginList()
+ .AddListItem().OptionalUint64(1)
+ .AddListItem().OptionalUint64(22)
+ .EndList().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
+ TValueBuilder().Uint64(50).Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeyPrefixRequest(s3conn,
+ TValueBuilder().OptionalUint64(50).Build()),
+ EStatus::BAD_REQUEST);
+
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
+ TValueBuilder().BeginTuple().EndTuple().Build()),
+ EStatus::SUCCESS);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
+ TValueBuilder().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
+ TValueBuilder().BeginStruct().EndStruct().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
+ TValueBuilder().BeginList().EndList().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
+ TValueBuilder()
+ .BeginList()
+ .AddListItem().OptionalUint64(1)
+ .AddListItem().OptionalUint64(22)
+ .EndList().Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
+ TValueBuilder().Uint64(50).Build()),
+ EStatus::BAD_REQUEST);
+
+ UNIT_ASSERT_VALUES_EQUAL(TestKeySuffixRequest(s3conn,
+ TValueBuilder().OptionalUint64(50).Build()),
+ EStatus::BAD_REQUEST);
+ }
+}
diff --git a/ydb/services/ydb/ydb_table.cpp b/ydb/services/ydb/ydb_table.cpp
index 0e661ff698b..7189614b8e8 100644
--- a/ydb/services/ydb/ydb_table.cpp
+++ b/ydb/services/ydb/ydb_table.cpp
@@ -39,17 +39,17 @@ void TGRpcYdbTableService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
#define ADD_REQUEST(NAME, IN, OUT, ACTION) \
MakeIntrusive<TGRpcRequest<Ydb::Table::IN, Ydb::Table::OUT, TGRpcYdbTableService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *ctx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
- ACTION; \
- }, &Ydb::Table::V1::TableService::AsyncService::Request ## NAME, \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
+ ACTION; \
+ }, &Ydb::Table::V1::TableService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("table", #NAME))->Run();
#define ADD_BYTES_REQUEST(NAME, IN, OUT, ACTION) \
MakeIntrusive<TGRpcRequest<Ydb::Table::IN, Ydb::Table::OUT, TGRpcYdbTableService>>(this, &Service_, CQ_, \
[this](NGrpc::IRequestContextBase *ctx) { \
- NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
- ACTION; \
- }, &Ydb::Table::V1::TableService::AsyncService::Request ## NAME, \
+ NGRpcService::ReportGrpcReqToMon(*ActorSystem_, ctx->GetPeer()); \
+ ACTION; \
+ }, &Ydb::Table::V1::TableService::AsyncService::Request ## NAME, \
#NAME, logger, getCounterBlock("table", #NAME))->Run();
ADD_REQUEST(CreateSession, CreateSessionRequest, CreateSessionResponse, {
@@ -109,9 +109,9 @@ void TGRpcYdbTableService::SetupIncomingRequests(NGrpc::TLoggerPtr logger) {
ADD_REQUEST(DescribeTableOptions, DescribeTableOptionsRequest, DescribeTableOptionsResponse, {
ActorSystem_->Send(GRpcRequestProxyId_, new TEvDescribeTableOptionsRequest(ctx));
})
- ADD_REQUEST(BulkUpsert, BulkUpsertRequest, BulkUpsertResponse, {
- ActorSystem_->Send(GRpcRequestProxyId_, new TEvBulkUpsertRequest(ctx));
- })
+ ADD_REQUEST(BulkUpsert, BulkUpsertRequest, BulkUpsertResponse, {
+ ActorSystem_->Send(GRpcRequestProxyId_, new TEvBulkUpsertRequest(ctx));
+ })
ADD_REQUEST(StreamExecuteScanQuery, ExecuteScanQueryRequest, ExecuteScanQueryPartialResponse, {
ActorSystem_->Send(GRpcRequestProxyId_, new TEvStreamExecuteScanQueryRequest(ctx));
})
diff --git a/ydb/services/ydb/ydb_table_split_ut.cpp b/ydb/services/ydb/ydb_table_split_ut.cpp
index b852def2aea..f492820f7a1 100644
--- a/ydb/services/ydb/ydb_table_split_ut.cpp
+++ b/ydb/services/ydb/ydb_table_split_ut.cpp
@@ -1,113 +1,113 @@
#include <ydb/public/sdk/cpp/client/ydb_table/table.h>
#include <ydb/public/sdk/cpp/client/ydb_params/params.h>
#include <ydb/public/sdk/cpp/client/ydb_types/status_codes.h>
-
+
#include <ydb/core/tx/datashard/datashard.h>
#include <ydb/core/client/flat_ut_client.h>
-
+
#include <ydb/library/yql/public/issue/yql_issue.h>
-
-#include "ydb_common_ut.h"
-
-#include <util/thread/factory.h>
-
+
+#include "ydb_common_ut.h"
+
+#include <util/thread/factory.h>
+
#include <ydb/public/api/grpc/ydb_table_v1.grpc.pb.h>
-
-using namespace NYdb;
-using namespace NYdb::NTable;
-
-void CreateTestTable(NYdb::NTable::TTableClient& client, const TString& name) {
- auto sessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
- auto session = sessionResult.GetSession();
-
- {
- auto query = TStringBuilder() << R"(
- --!syntax_v1
- CREATE TABLE `)" << name << R"(` (
- NameHash Uint32,
- Name Utf8,
- Version Uint32,
- `Timestamp` Int64,
- Data String,
- PRIMARY KEY (NameHash, Name)
- );)";
- auto result = session.ExecuteSchemeQuery(query).GetValueSync();
-
- Cerr << result.GetIssues().ToString();
- UNIT_ASSERT_VALUES_EQUAL(result.IsTransportError(), false);
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-}
-
-void SetAutoSplitByLoad(NYdb::NTable::TTableClient& client, const TString& tableName, bool enabled) {
- auto sessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
- auto session = sessionResult.GetSession();
-
- {
- auto query = TStringBuilder()
- << "--!syntax_v1\n"
- << "ALTER TABLE `" << tableName << "` "
- << "SET AUTO_PARTITIONING_BY_LOAD "
- << (enabled ? "ENABLED" : "DISABLED") << ";";
- auto result = session.ExecuteSchemeQuery(query).GetValueSync();
-
- Cerr << result.GetIssues().ToString();
- UNIT_ASSERT_VALUES_EQUAL(result.IsTransportError(), false);
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-}
-
-void RunQueryInLoop(NYdb::NTable::TTableClient& client, TString query, int keyCount, TAtomic& enough, TString namePrefix) {
- auto sessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
- auto session = sessionResult.GetSession();
-
- TExecDataQuerySettings querySettings;
- querySettings.KeepInQueryCache(true);
-
- for (int key = 0 ; key < keyCount && !AtomicGet(enough); ++key) {
- TString name = namePrefix + ToString(key);
-
- auto paramsBuilder = client.GetParamsBuilder();
- auto params = paramsBuilder
- .AddParam("$name_hash")
- .Uint32(MurmurHash<ui32>(name.data(), name.size()))
- .Build()
- .AddParam("$name")
- .Utf8(name)
- .Build()
- .AddParam("$version")
- .Uint32(key%5)
- .Build()
- .AddParam("$timestamp")
- .Int64(key%10)
- .Build()
- .Build();
-
- auto result = session.ExecuteDataQuery(
- query,
- TTxControl::BeginTx().CommitTx(),
- std::move(params),
- querySettings)
- .ExtractValueSync();
-
- if (!result.IsSuccess() && result.GetStatus() != NYdb::EStatus::OVERLOADED) {
- TString err = result.GetIssues().ToString();
- Cerr << result.GetStatus() << ": " << err << Endl;
- }
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- }
-};
-
-Y_UNIT_TEST_SUITE(YdbTableSplit) {
+
+using namespace NYdb;
+using namespace NYdb::NTable;
+
+void CreateTestTable(NYdb::NTable::TTableClient& client, const TString& name) {
+ auto sessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
+ auto session = sessionResult.GetSession();
+
+ {
+ auto query = TStringBuilder() << R"(
+ --!syntax_v1
+ CREATE TABLE `)" << name << R"(` (
+ NameHash Uint32,
+ Name Utf8,
+ Version Uint32,
+ `Timestamp` Int64,
+ Data String,
+ PRIMARY KEY (NameHash, Name)
+ );)";
+ auto result = session.ExecuteSchemeQuery(query).GetValueSync();
+
+ Cerr << result.GetIssues().ToString();
+ UNIT_ASSERT_VALUES_EQUAL(result.IsTransportError(), false);
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+}
+
+void SetAutoSplitByLoad(NYdb::NTable::TTableClient& client, const TString& tableName, bool enabled) {
+ auto sessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
+ auto session = sessionResult.GetSession();
+
+ {
+ auto query = TStringBuilder()
+ << "--!syntax_v1\n"
+ << "ALTER TABLE `" << tableName << "` "
+ << "SET AUTO_PARTITIONING_BY_LOAD "
+ << (enabled ? "ENABLED" : "DISABLED") << ";";
+ auto result = session.ExecuteSchemeQuery(query).GetValueSync();
+
+ Cerr << result.GetIssues().ToString();
+ UNIT_ASSERT_VALUES_EQUAL(result.IsTransportError(), false);
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+}
+
+void RunQueryInLoop(NYdb::NTable::TTableClient& client, TString query, int keyCount, TAtomic& enough, TString namePrefix) {
+ auto sessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
+ auto session = sessionResult.GetSession();
+
+ TExecDataQuerySettings querySettings;
+ querySettings.KeepInQueryCache(true);
+
+ for (int key = 0 ; key < keyCount && !AtomicGet(enough); ++key) {
+ TString name = namePrefix + ToString(key);
+
+ auto paramsBuilder = client.GetParamsBuilder();
+ auto params = paramsBuilder
+ .AddParam("$name_hash")
+ .Uint32(MurmurHash<ui32>(name.data(), name.size()))
+ .Build()
+ .AddParam("$name")
+ .Utf8(name)
+ .Build()
+ .AddParam("$version")
+ .Uint32(key%5)
+ .Build()
+ .AddParam("$timestamp")
+ .Int64(key%10)
+ .Build()
+ .Build();
+
+ auto result = session.ExecuteDataQuery(
+ query,
+ TTxControl::BeginTx().CommitTx(),
+ std::move(params),
+ querySettings)
+ .ExtractValueSync();
+
+ if (!result.IsSuccess() && result.GetStatus() != NYdb::EStatus::OVERLOADED) {
+ TString err = result.GetIssues().ToString();
+ Cerr << result.GetStatus() << ": " << err << Endl;
+ }
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ }
+};
+
+Y_UNIT_TEST_SUITE(YdbTableSplit) {
void DoTestSplitByLoad(TKikimrWithGrpcAndRootSchema& server, TString query, bool fillWithData = false, size_t minSplits = 1) {
- NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
- NYdb::NTable::TTableClient client(driver);
- NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
-
- CreateTestTable(client, "/Root/Foo");
+ NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
+ NYdb::NTable::TTableClient client(driver);
+ NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
+
+ CreateTestTable(client, "/Root/Foo");
if (fillWithData) {
IThreadFactory* pool = SystemThreadFactory();
@@ -137,74 +137,74 @@ Y_UNIT_TEST_SUITE(YdbTableSplit) {
Cerr << "Table filled with data" << Endl;
}
- SetAutoSplitByLoad(client, "/Root/Foo", true);
-
- size_t shardsBefore = oldClient.GetTablePartitions("/Root/Foo").size();
- Cerr << "Table has " << shardsBefore << " shards" << Endl;
- UNIT_ASSERT_VALUES_EQUAL(shardsBefore, 1);
-
+ SetAutoSplitByLoad(client, "/Root/Foo", true);
+
+ size_t shardsBefore = oldClient.GetTablePartitions("/Root/Foo").size();
+ Cerr << "Table has " << shardsBefore << " shards" << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(shardsBefore, 1);
+
NDataShard::gDbStatsReportInterval = TDuration::Seconds(0);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_INFO);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_INFO);
-
- // Set low CPU usage threshold for robustness
- TAtomic unused;
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 5, unused);
-// server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_SplitByLoadEnabled", 1, unused);
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportThreshlodPercent", 1, unused);
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportIntervalSeconds", 3, unused);
-
- TAtomic enough = 0;
- TAtomic finished = 0;
-
- IThreadFactory* pool = SystemThreadFactory();
-
- TVector<TAutoPtr<IThreadFactory::IThread>> threads;
- threads.resize(10);
- for (size_t i = 0; i < threads.size(); i++) {
- TString namePrefix = ToString(i) + "_";
- threads[i] = pool->Run([&client, &query, &enough, namePrefix, &finished]() {
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_INFO);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_INFO);
+
+ // Set low CPU usage threshold for robustness
+ TAtomic unused;
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 5, unused);
+// server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_SplitByLoadEnabled", 1, unused);
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportThreshlodPercent", 1, unused);
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportIntervalSeconds", 3, unused);
+
+ TAtomic enough = 0;
+ TAtomic finished = 0;
+
+ IThreadFactory* pool = SystemThreadFactory();
+
+ TVector<TAutoPtr<IThreadFactory::IThread>> threads;
+ threads.resize(10);
+ for (size_t i = 0; i < threads.size(); i++) {
+ TString namePrefix = ToString(i) + "_";
+ threads[i] = pool->Run([&client, &query, &enough, namePrefix, &finished]() {
RunQueryInLoop(client, query, 100000, enough, namePrefix);
- AtomicIncrement(finished);
- });
- }
-
- // Wait for split to happen
- while (AtomicGet(finished) < (i64)threads.size()) {
- size_t shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
+ AtomicIncrement(finished);
+ });
+ }
+
+ // Wait for split to happen
+ while (AtomicGet(finished) < (i64)threads.size()) {
+ size_t shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
if (shardsAfter >= shardsBefore + minSplits) {
- AtomicSet(enough, 1);
- break;
- }
- Sleep(TDuration::Seconds(5));
- }
-
- for (size_t i = 0; i < threads.size(); i++) {
- threads[i]->Join();
- }
-
- int retries = 5;
- size_t shardsAfter = 0;
+ AtomicSet(enough, 1);
+ break;
+ }
+ Sleep(TDuration::Seconds(5));
+ }
+
+ for (size_t i = 0; i < threads.size(); i++) {
+ threads[i]->Join();
+ }
+
+ int retries = 5;
+ size_t shardsAfter = 0;
for (;retries > 0 && !(shardsAfter >= shardsBefore + minSplits); --retries, Sleep(TDuration::Seconds(1))) {
- shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
- }
- Cerr << "Table has " << shardsAfter << " shards" << Endl;
+ shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
+ }
+ Cerr << "Table has " << shardsAfter << " shards" << Endl;
UNIT_ASSERT_C(shardsAfter >= shardsBefore + minSplits, "Table didn't split!!11 O_O");
- }
-
- Y_UNIT_TEST(SplitByLoadWithReads) {
- TString query =
- "DECLARE $name_hash AS Uint32;\n"
- "DECLARE $name AS Utf8;\n"
- "DECLARE $version AS Uint32;\n"
- "DECLARE $timestamp AS Int64;\n\n"
- "SELECT * FROM [/Root/Foo] \n"
- "WHERE NameHash = $name_hash AND Name = $name";
-
- TKikimrWithGrpcAndRootSchema server;
- DoTestSplitByLoad(server, query);
- }
-
+ }
+
+ Y_UNIT_TEST(SplitByLoadWithReads) {
+ TString query =
+ "DECLARE $name_hash AS Uint32;\n"
+ "DECLARE $name AS Utf8;\n"
+ "DECLARE $version AS Uint32;\n"
+ "DECLARE $timestamp AS Int64;\n\n"
+ "SELECT * FROM [/Root/Foo] \n"
+ "WHERE NameHash = $name_hash AND Name = $name";
+
+ TKikimrWithGrpcAndRootSchema server;
+ DoTestSplitByLoad(server, query);
+ }
+
Y_UNIT_TEST(SplitByLoadWithReadsMultipleSplitsWithData) {
TString query =
"DECLARE $name_hash AS Uint32;\n"
@@ -218,205 +218,205 @@ Y_UNIT_TEST_SUITE(YdbTableSplit) {
DoTestSplitByLoad(server, query, /* fill with data */ true, /* at least two splits */ 2);
}
- Y_UNIT_TEST(SplitByLoadWithUpdates) {
- TString query =
- "DECLARE $name_hash AS Uint32;\n"
- "DECLARE $name AS Utf8;\n"
- "DECLARE $version AS Uint32;\n"
- "DECLARE $timestamp AS Int64;\n\n"
- "UPSERT INTO [/Root/Foo] (NameHash, Name, Version, Timestamp) "
- " VALUES ($name_hash, $name, $version, $timestamp);";
-
- TKikimrWithGrpcAndRootSchema server;
- DoTestSplitByLoad(server, query);
- }
-
- Y_UNIT_TEST(SplitByLoadWithDeletes) {
- TString query =
- "DECLARE $name_hash AS Uint32;\n"
- "DECLARE $name AS Utf8;\n"
- "DECLARE $version AS Uint32;\n"
- "DECLARE $timestamp AS Int64;\n\n"
- "DELETE FROM [/Root/Foo] \n"
- "WHERE NameHash = $name_hash AND Name = $name";
-
- TKikimrWithGrpcAndRootSchema server;
- DoTestSplitByLoad(server, query);
- }
-
- Y_UNIT_TEST(SplitByLoadWithNonEmptyRangeReads) {
- TKikimrWithGrpcAndRootSchema server;
-
- NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
- NYdb::NTable::TTableClient client(driver);
- NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
-
- CreateTestTable(client, "/Root/Foo");
-
- // Fill the table with some data
- {
- TString upsertQuery =
- "DECLARE $name_hash AS Uint32;\n"
- "DECLARE $name AS Utf8;\n"
- "DECLARE $version AS Uint32;\n"
- "DECLARE $timestamp AS Int64;\n\n"
- "UPSERT INTO [/Root/Foo] (NameHash, Name, Version, Timestamp) "
- " VALUES ($name_hash, $name, $version, $timestamp);";
-
- TAtomic enough = 0;
- RunQueryInLoop(client, upsertQuery, 2000, enough, "");
- }
-
- SetAutoSplitByLoad(client, "/Root/Foo", true);
-
- // Set low CPU usage threshold for robustness
- TAtomic unused;
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 5, unused);
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportThreshlodPercent", 1, unused);
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportIntervalSeconds", 3, unused);
-
- size_t shardsBefore = oldClient.GetTablePartitions("/Root/Foo").size();
- Cerr << "Table has " << shardsBefore << " shards" << Endl;
- UNIT_ASSERT_VALUES_EQUAL(shardsBefore, 1);
-
+ Y_UNIT_TEST(SplitByLoadWithUpdates) {
+ TString query =
+ "DECLARE $name_hash AS Uint32;\n"
+ "DECLARE $name AS Utf8;\n"
+ "DECLARE $version AS Uint32;\n"
+ "DECLARE $timestamp AS Int64;\n\n"
+ "UPSERT INTO [/Root/Foo] (NameHash, Name, Version, Timestamp) "
+ " VALUES ($name_hash, $name, $version, $timestamp);";
+
+ TKikimrWithGrpcAndRootSchema server;
+ DoTestSplitByLoad(server, query);
+ }
+
+ Y_UNIT_TEST(SplitByLoadWithDeletes) {
+ TString query =
+ "DECLARE $name_hash AS Uint32;\n"
+ "DECLARE $name AS Utf8;\n"
+ "DECLARE $version AS Uint32;\n"
+ "DECLARE $timestamp AS Int64;\n\n"
+ "DELETE FROM [/Root/Foo] \n"
+ "WHERE NameHash = $name_hash AND Name = $name";
+
+ TKikimrWithGrpcAndRootSchema server;
+ DoTestSplitByLoad(server, query);
+ }
+
+ Y_UNIT_TEST(SplitByLoadWithNonEmptyRangeReads) {
+ TKikimrWithGrpcAndRootSchema server;
+
+ NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
+ NYdb::NTable::TTableClient client(driver);
+ NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
+
+ CreateTestTable(client, "/Root/Foo");
+
+ // Fill the table with some data
+ {
+ TString upsertQuery =
+ "DECLARE $name_hash AS Uint32;\n"
+ "DECLARE $name AS Utf8;\n"
+ "DECLARE $version AS Uint32;\n"
+ "DECLARE $timestamp AS Int64;\n\n"
+ "UPSERT INTO [/Root/Foo] (NameHash, Name, Version, Timestamp) "
+ " VALUES ($name_hash, $name, $version, $timestamp);";
+
+ TAtomic enough = 0;
+ RunQueryInLoop(client, upsertQuery, 2000, enough, "");
+ }
+
+ SetAutoSplitByLoad(client, "/Root/Foo", true);
+
+ // Set low CPU usage threshold for robustness
+ TAtomic unused;
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 5, unused);
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportThreshlodPercent", 1, unused);
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("DataShardControls.CpuUsageReportIntervalSeconds", 3, unused);
+
+ size_t shardsBefore = oldClient.GetTablePartitions("/Root/Foo").size();
+ Cerr << "Table has " << shardsBefore << " shards" << Endl;
+ UNIT_ASSERT_VALUES_EQUAL(shardsBefore, 1);
+
NDataShard::gDbStatsReportInterval = TDuration::Seconds(0);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_INFO);
- server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_INFO);
-
- TString rangeQuery =
- "DECLARE $name_hash AS Uint32;\n"
- "DECLARE $name AS Utf8;\n"
- "DECLARE $version AS Uint32;\n"
- "DECLARE $timestamp AS Int64;\n\n"
- "SELECT count(*) FROM [/Root/Foo] \n"
- "WHERE NameHash = $name_hash;";
-
- TAtomic enough = 0;
- TAtomic finished = 0;
-
- IThreadFactory* pool = SystemThreadFactory();
-
- TVector<TAutoPtr<IThreadFactory::IThread>> threads;
- threads.resize(10);
- for (size_t i = 0; i < threads.size(); i++) {
- threads[i] = pool->Run([&client, &rangeQuery, &enough, &finished]() {
- for (int i = 0; i < 10; ++i) {
- RunQueryInLoop(client, rangeQuery, 2000, enough, "");
- }
- AtomicIncrement(finished);
- });
- }
-
- // Wait for split to happen
- while (AtomicGet(finished) < (i64)threads.size()) {
- size_t shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
- if (shardsAfter > shardsBefore) {
- AtomicSet(enough, 1);
- break;
- }
- Sleep(TDuration::Seconds(5));
- }
-
- for (size_t i = 0; i < threads.size(); i++) {
- threads[i]->Join();
- }
-
- int retries = 5;
- size_t shardsAfter = 0;
- for (;retries > 0 && shardsAfter <= shardsBefore; --retries, Sleep(TDuration::Seconds(1))) {
- shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
- }
- Cerr << "Table has " << shardsAfter << " shards" << Endl;
- UNIT_ASSERT_C(shardsAfter > shardsBefore, "Table didn't split!!11 O_O");
- }
-
- // Allows to adjust the time
- class TTestTimeProvider : public ITimeProvider {
- TIntrusivePtr<ITimeProvider> RealProvider;
- TAtomic Shift;
-
- public:
- explicit TTestTimeProvider(TIntrusivePtr<ITimeProvider> realProvider)
- : RealProvider(realProvider)
- , Shift(0)
- {}
-
- TInstant Now() override {
- return RealProvider->Now() + TDuration::MicroSeconds(AtomicGet(Shift));
- }
-
- void AddShift(TDuration delta) {
- AtomicAdd(Shift, delta.MicroSeconds());
- }
- };
-
- Y_UNIT_TEST(MergeByNoLoadAfterSplit) {
- // Create test table and read many rows and trigger split
- TString query =
- "DECLARE $name_hash AS Uint32;\n"
- "DECLARE $name AS Utf8;\n"
- "DECLARE $version AS Uint32;\n"
- "DECLARE $timestamp AS Int64;\n\n"
- "SELECT * FROM [/Root/Foo] \n"
- "WHERE NameHash = $name_hash AND Name = $name";
-
- TIntrusivePtr<ITimeProvider> originalTimeProvider = NKikimr::TAppData::TimeProvider;
- TIntrusivePtr<TTestTimeProvider> testTimeProvider = new TTestTimeProvider(originalTimeProvider);
- NKikimr::TAppData::TimeProvider = testTimeProvider;
-
- TKikimrWithGrpcAndRootSchema server;
-
- // Set min uptime before merge by load to 10h
- TAtomic unused;
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_MergeByLoadMinUptimeSec", 4*3600, unused);
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_MergeByLoadMinLowLoadDurationSec", 10*3600, unused);
-
- Cerr << "Triggering split by load" << Endl;
- DoTestSplitByLoad(server, query);
-
- // Set split threshold very high and run some more load on new shards
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 110, unused);
-
- Cerr << "Loading new shards" << Endl;
- {
- NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
- NYdb::NTable::TTableClient client(driver);
- TInstant startTime = testTimeProvider->Now();
- while (testTimeProvider->Now() < startTime + TDuration::Seconds(20)) {
- TAtomic enough = 0;
- RunQueryInLoop(client, query, 1000, enough, "");
- }
- }
-
- // Set split threshold at 10% so that merge can be trigger after high load goes away
- server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 10, unused);
-
- // Stop all load an see how many partitions the table has
- NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
- size_t shardsBefore = oldClient.GetTablePartitions("/Root/Foo").size();
- Cerr << "Table has " << shardsBefore << " shards" << Endl;
- UNIT_ASSERT_VALUES_UNEQUAL(shardsBefore, 1);
-
- // Fast forward time a bit multiple time and check that merge doesn't happen
- for (int i = 0; i < 8; ++i) {
- Cerr << "Fast forward 1h" << Endl;
- testTimeProvider->AddShift(TDuration::Hours(1));
- Sleep(TDuration::Seconds(3));
- UNIT_ASSERT_VALUES_EQUAL(oldClient.GetTablePartitions("/Root/Foo").size(), shardsBefore);
- }
-
- Cerr << "Fast forward > 10h to trigger the merge" << Endl;
- testTimeProvider->AddShift(TDuration::Hours(8));
-
- // Wait for merge to happen
- size_t shardsAfter = shardsBefore;
- for (int i = 0; i < 20; ++i) {
- size_t shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
- if (shardsAfter < shardsBefore) {
- return;
- }
- Sleep(TDuration::Seconds(5));
- }
- UNIT_ASSERT_C(shardsAfter < shardsBefore, "Merge didn't happen!!11 O_O");
- }
-}
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_INFO);
+ server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::TX_DATASHARD, NActors::NLog::PRI_INFO);
+
+ TString rangeQuery =
+ "DECLARE $name_hash AS Uint32;\n"
+ "DECLARE $name AS Utf8;\n"
+ "DECLARE $version AS Uint32;\n"
+ "DECLARE $timestamp AS Int64;\n\n"
+ "SELECT count(*) FROM [/Root/Foo] \n"
+ "WHERE NameHash = $name_hash;";
+
+ TAtomic enough = 0;
+ TAtomic finished = 0;
+
+ IThreadFactory* pool = SystemThreadFactory();
+
+ TVector<TAutoPtr<IThreadFactory::IThread>> threads;
+ threads.resize(10);
+ for (size_t i = 0; i < threads.size(); i++) {
+ threads[i] = pool->Run([&client, &rangeQuery, &enough, &finished]() {
+ for (int i = 0; i < 10; ++i) {
+ RunQueryInLoop(client, rangeQuery, 2000, enough, "");
+ }
+ AtomicIncrement(finished);
+ });
+ }
+
+ // Wait for split to happen
+ while (AtomicGet(finished) < (i64)threads.size()) {
+ size_t shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
+ if (shardsAfter > shardsBefore) {
+ AtomicSet(enough, 1);
+ break;
+ }
+ Sleep(TDuration::Seconds(5));
+ }
+
+ for (size_t i = 0; i < threads.size(); i++) {
+ threads[i]->Join();
+ }
+
+ int retries = 5;
+ size_t shardsAfter = 0;
+ for (;retries > 0 && shardsAfter <= shardsBefore; --retries, Sleep(TDuration::Seconds(1))) {
+ shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
+ }
+ Cerr << "Table has " << shardsAfter << " shards" << Endl;
+ UNIT_ASSERT_C(shardsAfter > shardsBefore, "Table didn't split!!11 O_O");
+ }
+
+ // Allows to adjust the time
+ class TTestTimeProvider : public ITimeProvider {
+ TIntrusivePtr<ITimeProvider> RealProvider;
+ TAtomic Shift;
+
+ public:
+ explicit TTestTimeProvider(TIntrusivePtr<ITimeProvider> realProvider)
+ : RealProvider(realProvider)
+ , Shift(0)
+ {}
+
+ TInstant Now() override {
+ return RealProvider->Now() + TDuration::MicroSeconds(AtomicGet(Shift));
+ }
+
+ void AddShift(TDuration delta) {
+ AtomicAdd(Shift, delta.MicroSeconds());
+ }
+ };
+
+ Y_UNIT_TEST(MergeByNoLoadAfterSplit) {
+ // Create test table and read many rows and trigger split
+ TString query =
+ "DECLARE $name_hash AS Uint32;\n"
+ "DECLARE $name AS Utf8;\n"
+ "DECLARE $version AS Uint32;\n"
+ "DECLARE $timestamp AS Int64;\n\n"
+ "SELECT * FROM [/Root/Foo] \n"
+ "WHERE NameHash = $name_hash AND Name = $name";
+
+ TIntrusivePtr<ITimeProvider> originalTimeProvider = NKikimr::TAppData::TimeProvider;
+ TIntrusivePtr<TTestTimeProvider> testTimeProvider = new TTestTimeProvider(originalTimeProvider);
+ NKikimr::TAppData::TimeProvider = testTimeProvider;
+
+ TKikimrWithGrpcAndRootSchema server;
+
+ // Set min uptime before merge by load to 10h
+ TAtomic unused;
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_MergeByLoadMinUptimeSec", 4*3600, unused);
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_MergeByLoadMinLowLoadDurationSec", 10*3600, unused);
+
+ Cerr << "Triggering split by load" << Endl;
+ DoTestSplitByLoad(server, query);
+
+ // Set split threshold very high and run some more load on new shards
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 110, unused);
+
+ Cerr << "Loading new shards" << Endl;
+ {
+ NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
+ NYdb::NTable::TTableClient client(driver);
+ TInstant startTime = testTimeProvider->Now();
+ while (testTimeProvider->Now() < startTime + TDuration::Seconds(20)) {
+ TAtomic enough = 0;
+ RunQueryInLoop(client, query, 1000, enough, "");
+ }
+ }
+
+ // Set split threshold at 10% so that merge can be trigger after high load goes away
+ server.Server_->GetRuntime()->GetAppData().Icb->SetValue("SchemeShard_FastSplitCpuPercentageThreshold", 10, unused);
+
+ // Stop all load an see how many partitions the table has
+ NFlatTests::TFlatMsgBusClient oldClient(server.ServerSettings->Port);
+ size_t shardsBefore = oldClient.GetTablePartitions("/Root/Foo").size();
+ Cerr << "Table has " << shardsBefore << " shards" << Endl;
+ UNIT_ASSERT_VALUES_UNEQUAL(shardsBefore, 1);
+
+ // Fast forward time a bit multiple time and check that merge doesn't happen
+ for (int i = 0; i < 8; ++i) {
+ Cerr << "Fast forward 1h" << Endl;
+ testTimeProvider->AddShift(TDuration::Hours(1));
+ Sleep(TDuration::Seconds(3));
+ UNIT_ASSERT_VALUES_EQUAL(oldClient.GetTablePartitions("/Root/Foo").size(), shardsBefore);
+ }
+
+ Cerr << "Fast forward > 10h to trigger the merge" << Endl;
+ testTimeProvider->AddShift(TDuration::Hours(8));
+
+ // Wait for merge to happen
+ size_t shardsAfter = shardsBefore;
+ for (int i = 0; i < 20; ++i) {
+ size_t shardsAfter = oldClient.GetTablePartitions("/Root/Foo").size();
+ if (shardsAfter < shardsBefore) {
+ return;
+ }
+ Sleep(TDuration::Seconds(5));
+ }
+ UNIT_ASSERT_C(shardsAfter < shardsBefore, "Merge didn't happen!!11 O_O");
+ }
+}
diff --git a/ydb/services/ydb/ydb_table_ut.cpp b/ydb/services/ydb/ydb_table_ut.cpp
index 571f31e5334..4861c06b92e 100644
--- a/ydb/services/ydb/ydb_table_ut.cpp
+++ b/ydb/services/ydb/ydb_table_ut.cpp
@@ -243,11 +243,11 @@ Y_UNIT_TEST_SUITE(YdbYqlClient) {
auto result = session.ExecuteDataQuery("SELECT 42;",
TTxControl::BeginTx(TTxSettings::SerializableRW()).CommitTx()).ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), EStatus::SUCCESS);
UNIT_ASSERT_VALUES_EQUAL(result.GetEndpoint(), location);
}
// All requests used one session
- UNIT_ASSERT_VALUES_EQUAL(sids.size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(sids.size(), 1);
// No more session captured by client
UNIT_ASSERT_VALUES_EQUAL(client.GetActiveSessionCount(), 0);
}
@@ -2465,7 +2465,7 @@ R"___(<main>: Error: Transaction not found: , code: 2015
UNIT_ASSERT(deferred.status() == Ydb::StatusIds::BAD_SESSION);
}
}
-
+
Y_UNIT_TEST(DeleteTableWithDeletedIndex) {
TKikimrWithGrpcAndRootSchema server;
@@ -2802,163 +2802,163 @@ R"___(<main>: Error: Transaction not found: , code: 2015
- Y_UNIT_TEST(QueryStats) {
- TKikimrWithGrpcAndRootSchema server;
-
- NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
- NYdb::NTable::TTableClient client(driver);
-
- auto sessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
- auto session = sessionResult.GetSession();
-
- const ui32 SHARD_COUNT = 4;
-
- {
- auto tableBuilder = client.GetTableBuilder();
- tableBuilder
- .AddNullableColumn("Key", EPrimitiveType::Uint32)
- .AddNullableColumn("Value", EPrimitiveType::Utf8);
- tableBuilder.SetPrimaryKeyColumn("Key");
-
- auto tableSettings = NYdb::NTable::TCreateTableSettings().PartitioningPolicy(
- NYdb::NTable::TPartitioningPolicy().UniformPartitions(SHARD_COUNT));
-
- auto result = session.CreateTable("/Root/Foo", tableBuilder.Build(), tableSettings).ExtractValueSync();
- UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
- UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
- }
-
- for (bool returnStats : {false, true}) {
- NYdb::NTable::TExecDataQuerySettings execSettings;
- if (returnStats) {
- execSettings.CollectQueryStats(ECollectQueryStatsMode::Basic);
- }
- {
- auto query = "UPSERT INTO [/Root/Foo] (Key, Value) VALUES (0, 'aa');";
- auto result = session.ExecuteDataQuery(
- query,
- TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
-
- if (!returnStats) {
- UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
- } else {
- // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
- auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).name(), "/Root/Foo");
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).updates().rows(), 1);
- UNIT_ASSERT(stats.query_phases(0).table_access(0).updates().bytes() > 1);
- UNIT_ASSERT(stats.query_phases(0).cpu_time_us() > 0);
+ Y_UNIT_TEST(QueryStats) {
+ TKikimrWithGrpcAndRootSchema server;
+
+ NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
+ NYdb::NTable::TTableClient client(driver);
+
+ auto sessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
+ auto session = sessionResult.GetSession();
+
+ const ui32 SHARD_COUNT = 4;
+
+ {
+ auto tableBuilder = client.GetTableBuilder();
+ tableBuilder
+ .AddNullableColumn("Key", EPrimitiveType::Uint32)
+ .AddNullableColumn("Value", EPrimitiveType::Utf8);
+ tableBuilder.SetPrimaryKeyColumn("Key");
+
+ auto tableSettings = NYdb::NTable::TCreateTableSettings().PartitioningPolicy(
+ NYdb::NTable::TPartitioningPolicy().UniformPartitions(SHARD_COUNT));
+
+ auto result = session.CreateTable("/Root/Foo", tableBuilder.Build(), tableSettings).ExtractValueSync();
+ UNIT_ASSERT_EQUAL(result.IsTransportError(), false);
+ UNIT_ASSERT_EQUAL(result.GetStatus(), EStatus::SUCCESS);
+ }
+
+ for (bool returnStats : {false, true}) {
+ NYdb::NTable::TExecDataQuerySettings execSettings;
+ if (returnStats) {
+ execSettings.CollectQueryStats(ECollectQueryStatsMode::Basic);
+ }
+ {
+ auto query = "UPSERT INTO [/Root/Foo] (Key, Value) VALUES (0, 'aa');";
+ auto result = session.ExecuteDataQuery(
+ query,
+ TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
+
+ if (!returnStats) {
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
+ } else {
+ // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
+ auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).name(), "/Root/Foo");
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).updates().rows(), 1);
+ UNIT_ASSERT(stats.query_phases(0).table_access(0).updates().bytes() > 1);
+ UNIT_ASSERT(stats.query_phases(0).cpu_time_us() > 0);
UNIT_ASSERT_VALUES_EQUAL(stats.total_cpu_time_us(), stats.query_phases(0).cpu_time_us());
UNIT_ASSERT_VALUES_EQUAL(stats.total_duration_us(), stats.query_phases(0).duration_us());
- }
- }
-
- {
- auto query = "UPSERT INTO [/Root/Foo] (Key, Value) VALUES (1, Utf8('bb')), (0xffffffff, Utf8('cc'));";
- auto result = session.ExecuteDataQuery(
- query,
- TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
-
- if (!returnStats) {
- UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
- } else {
- // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
- auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 2);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access(0).name(), "/Root/Foo");
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access(0).updates().rows(), 2);
- UNIT_ASSERT(stats.query_phases(1).table_access(0).updates().bytes() > 1);
- UNIT_ASSERT(stats.query_phases(1).cpu_time_us() > 0);
+ }
+ }
+
+ {
+ auto query = "UPSERT INTO [/Root/Foo] (Key, Value) VALUES (1, Utf8('bb')), (0xffffffff, Utf8('cc'));";
+ auto result = session.ExecuteDataQuery(
+ query,
+ TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
+
+ if (!returnStats) {
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
+ } else {
+ // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
+ auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access(0).name(), "/Root/Foo");
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access(0).updates().rows(), 2);
+ UNIT_ASSERT(stats.query_phases(1).table_access(0).updates().bytes() > 1);
+ UNIT_ASSERT(stats.query_phases(1).cpu_time_us() > 0);
UNIT_ASSERT_VALUES_EQUAL(stats.total_cpu_time_us(),
stats.query_phases(0).cpu_time_us() + stats.query_phases(1).cpu_time_us());
UNIT_ASSERT_VALUES_EQUAL(stats.total_duration_us(),
stats.query_phases(0).duration_us() + stats.query_phases(1).duration_us());
- }
- }
-
- {
- auto query = "SELECT * FROM [/Root/Foo];";
- auto result = session.ExecuteDataQuery(
- query,
- TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
-
- if (!returnStats) {
- UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
- } else {
- // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
- auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).name(), "/Root/Foo");
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).reads().rows(), 3);
- UNIT_ASSERT(stats.query_phases(0).table_access(0).reads().bytes() > 3);
- UNIT_ASSERT(stats.query_phases(0).cpu_time_us() > 0);
+ }
+ }
+
+ {
+ auto query = "SELECT * FROM [/Root/Foo];";
+ auto result = session.ExecuteDataQuery(
+ query,
+ TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
+
+ if (!returnStats) {
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
+ } else {
+ // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
+ auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).name(), "/Root/Foo");
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).reads().rows(), 3);
+ UNIT_ASSERT(stats.query_phases(0).table_access(0).reads().bytes() > 3);
+ UNIT_ASSERT(stats.query_phases(0).cpu_time_us() > 0);
UNIT_ASSERT_VALUES_EQUAL(stats.total_cpu_time_us(), stats.query_phases(0).cpu_time_us());
UNIT_ASSERT_VALUES_EQUAL(stats.total_duration_us(), stats.query_phases(0).duration_us());
- }
- }
-
- {
- auto query = "SELECT * FROM [/Root/Foo] WHERE Key == 1;";
- auto result = session.ExecuteDataQuery(
- query,
- TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
-
- if (!returnStats) {
- UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
- } else {
- // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
- auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).name(), "/Root/Foo");
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).reads().rows(), 1);
- UNIT_ASSERT(stats.query_phases(0).table_access(0).reads().bytes() > 1);
- UNIT_ASSERT(stats.query_phases(0).cpu_time_us() > 0);
+ }
+ }
+
+ {
+ auto query = "SELECT * FROM [/Root/Foo] WHERE Key == 1;";
+ auto result = session.ExecuteDataQuery(
+ query,
+ TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
+
+ if (!returnStats) {
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
+ } else {
+ // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
+ auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).name(), "/Root/Foo");
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).reads().rows(), 1);
+ UNIT_ASSERT(stats.query_phases(0).table_access(0).reads().bytes() > 1);
+ UNIT_ASSERT(stats.query_phases(0).cpu_time_us() > 0);
UNIT_ASSERT_VALUES_EQUAL(stats.total_cpu_time_us(), stats.query_phases(0).cpu_time_us());
UNIT_ASSERT_VALUES_EQUAL(stats.total_duration_us(), stats.query_phases(0).duration_us());
- }
- }
-
- {
- auto query = "DELETE FROM [/Root/Foo] WHERE Key > 0;";
- auto result = session.ExecuteDataQuery(
- query,
- TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
-
- if (!returnStats) {
- UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
- } else {
- // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
- auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 2);
-
- // 1st phase: find matching rows
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).name(), "/Root/Foo");
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).reads().rows(), 2);
- UNIT_ASSERT(stats.query_phases(0).cpu_time_us() > 0);
- // 2nd phase: delete found rows
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access().size(), 1);
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access(0).name(), "/Root/Foo");
- UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access(0).deletes().rows(), 2);
- UNIT_ASSERT(stats.query_phases(1).cpu_time_us() > 0);
+ }
+ }
+
+ {
+ auto query = "DELETE FROM [/Root/Foo] WHERE Key > 0;";
+ auto result = session.ExecuteDataQuery(
+ query,
+ TTxControl::BeginTx().CommitTx(), execSettings).ExtractValueSync();
+
+ if (!returnStats) {
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStats().Defined(), false);
+ } else {
+ // Cerr << "\nQUERY: " << query << "\nSTATS:\n" << result.GetStats()->ToString() << Endl;
+ auto& stats = NYdb::TProtoAccessor::GetProto(*result.GetStats());
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases().size(), 2);
+
+ // 1st phase: find matching rows
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).name(), "/Root/Foo");
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(0).table_access(0).reads().rows(), 2);
+ UNIT_ASSERT(stats.query_phases(0).cpu_time_us() > 0);
+ // 2nd phase: delete found rows
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access().size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access(0).name(), "/Root/Foo");
+ UNIT_ASSERT_VALUES_EQUAL(stats.query_phases(1).table_access(0).deletes().rows(), 2);
+ UNIT_ASSERT(stats.query_phases(1).cpu_time_us() > 0);
// Totals
UNIT_ASSERT_VALUES_EQUAL(stats.total_cpu_time_us(),
stats.query_phases(0).cpu_time_us() + stats.query_phases(1).cpu_time_us());
UNIT_ASSERT_VALUES_EQUAL(stats.total_duration_us(),
stats.query_phases(0).duration_us() + stats.query_phases(1).duration_us());
- }
- }
- }
-
- sessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
- }
+ }
+ }
+ }
+
+ sessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL(sessionResult.GetStatus(), EStatus::SUCCESS);
+ }
Y_UNIT_TEST(CopyTables) {
TKikimrWithGrpcAndRootSchemaNoSystemViews server;
@@ -3661,7 +3661,7 @@ R"___(<main>: Error: Transaction not found: , code: 2015
}
}
}
-
+
Y_UNIT_TEST(ColumnFamiliesWithStorageAndIndex) {
TKikimrWithGrpcAndRootSchema server;
server.Server_->GetRuntime()->SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_NOTICE);
@@ -3910,8 +3910,8 @@ R"___(<main>: Error: Transaction not found: , code: 2015
{
auto tableBuilder = client.GetTableBuilder()
- .AddNullableColumn("Value", EPrimitiveType::Utf8)
- .AddNullableColumn("SubKey", EPrimitiveType::Utf8)
+ .AddNullableColumn("Value", EPrimitiveType::Utf8)
+ .AddNullableColumn("SubKey", EPrimitiveType::Utf8)
.AddNullableColumn("Key", EPrimitiveType::Uint32)
.SetPrimaryKeyColumn("Key");
@@ -4234,102 +4234,102 @@ R"___(<main>: Error: Transaction not found: , code: 2015
UNIT_ASSERT(partSettings.GetPartitioningBySize().Defined());
UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningBySize().GetRef(), false);
}
- {
- auto settings = NYdb::NTable::TAlterTableSettings()
- .BeginAlterPartitioningSettings()
- .SetPartitioningByLoad(true)
- .EndAlterPartitioningSettings();
-
- auto result = session.AlterTable(tableName, settings).ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
- }
- {
- TDescribeTableResult describeResult = session.DescribeTable(tableName)
- .GetValueSync();
- UNIT_ASSERT_EQUAL(describeResult.GetStatus(), EStatus::SUCCESS);
- const auto& partSettings = describeResult.GetTableDescription().GetPartitioningSettings();
- UNIT_ASSERT(partSettings.GetPartitioningByLoad().Defined());
- UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningByLoad().GetRef(), true);
- }
+ {
+ auto settings = NYdb::NTable::TAlterTableSettings()
+ .BeginAlterPartitioningSettings()
+ .SetPartitioningByLoad(true)
+ .EndAlterPartitioningSettings();
+
+ auto result = session.AlterTable(tableName, settings).ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+ {
+ TDescribeTableResult describeResult = session.DescribeTable(tableName)
+ .GetValueSync();
+ UNIT_ASSERT_EQUAL(describeResult.GetStatus(), EStatus::SUCCESS);
+ const auto& partSettings = describeResult.GetTableDescription().GetPartitioningSettings();
+ UNIT_ASSERT(partSettings.GetPartitioningByLoad().Defined());
+ UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningByLoad().GetRef(), true);
+ }
}
- Y_UNIT_TEST(CreateAndAltertTableWithPartitioningByLoad) {
- TKikimrWithGrpcAndRootSchema server;
-
- NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
-
- NYdb::NTable::TTableClient client(driver);
- auto getSessionResult = client.CreateSession().ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(getSessionResult.GetStatus(), EStatus::SUCCESS, getSessionResult.GetIssues().ToString());
- auto session = getSessionResult.GetSession();
- const TString tableName = "Root/Test";
-
- {
- auto builder = TTableBuilder()
- .AddNullableColumn("key", EPrimitiveType::Uint64)
- .AddNullableColumn("value", EPrimitiveType::Utf8)
- .SetPrimaryKeyColumn("key")
- .BeginPartitioningSettings()
- .SetPartitioningByLoad(true)
- .EndPartitioningSettings();
-
- auto desc = builder.Build();
-
- auto result = session.CreateTable(tableName, std::move(desc)).GetValueSync();
-
- UNIT_ASSERT_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
- }
- {
- TDescribeTableResult describeResult = session.DescribeTable(tableName)
- .GetValueSync();
- UNIT_ASSERT_EQUAL(describeResult.GetStatus(), EStatus::SUCCESS);
- const auto& partSettings = describeResult.GetTableDescription().GetPartitioningSettings();
- UNIT_ASSERT(partSettings.GetPartitioningBySize().Defined());
- UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningBySize().GetRef(), false);
- UNIT_ASSERT(partSettings.GetPartitioningByLoad().Defined());
- UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningByLoad().GetRef(), true);
+ Y_UNIT_TEST(CreateAndAltertTableWithPartitioningByLoad) {
+ TKikimrWithGrpcAndRootSchema server;
+
+ NYdb::TDriver driver(TDriverConfig().SetEndpoint(TStringBuilder() << "localhost:" << server.GetPort()));
+
+ NYdb::NTable::TTableClient client(driver);
+ auto getSessionResult = client.CreateSession().ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(getSessionResult.GetStatus(), EStatus::SUCCESS, getSessionResult.GetIssues().ToString());
+ auto session = getSessionResult.GetSession();
+ const TString tableName = "Root/Test";
+
+ {
+ auto builder = TTableBuilder()
+ .AddNullableColumn("key", EPrimitiveType::Uint64)
+ .AddNullableColumn("value", EPrimitiveType::Utf8)
+ .SetPrimaryKeyColumn("key")
+ .BeginPartitioningSettings()
+ .SetPartitioningByLoad(true)
+ .EndPartitioningSettings();
+
+ auto desc = builder.Build();
+
+ auto result = session.CreateTable(tableName, std::move(desc)).GetValueSync();
+
+ UNIT_ASSERT_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+ {
+ TDescribeTableResult describeResult = session.DescribeTable(tableName)
+ .GetValueSync();
+ UNIT_ASSERT_EQUAL(describeResult.GetStatus(), EStatus::SUCCESS);
+ const auto& partSettings = describeResult.GetTableDescription().GetPartitioningSettings();
+ UNIT_ASSERT(partSettings.GetPartitioningBySize().Defined());
+ UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningBySize().GetRef(), false);
+ UNIT_ASSERT(partSettings.GetPartitioningByLoad().Defined());
+ UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningByLoad().GetRef(), true);
UNIT_ASSERT_VALUES_EQUAL(partSettings.GetMinPartitionsCount(), 1);
- }
- {
- auto settings = NYdb::NTable::TAlterTableSettings()
- .BeginAlterPartitioningSettings()
- .SetPartitioningBySize(true)
- .EndAlterPartitioningSettings();
-
- auto result = session.AlterTable(tableName, settings).ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
- }
- {
- TDescribeTableResult describeResult = session.DescribeTable(tableName)
- .GetValueSync();
- UNIT_ASSERT_EQUAL(describeResult.GetStatus(), EStatus::SUCCESS);
- const auto& partSettings = describeResult.GetTableDescription().GetPartitioningSettings();
- UNIT_ASSERT(partSettings.GetPartitioningBySize().Defined());
- UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningBySize().GetRef(), true);
- UNIT_ASSERT(partSettings.GetPartitioningByLoad().Defined());
+ }
+ {
+ auto settings = NYdb::NTable::TAlterTableSettings()
+ .BeginAlterPartitioningSettings()
+ .SetPartitioningBySize(true)
+ .EndAlterPartitioningSettings();
+
+ auto result = session.AlterTable(tableName, settings).ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+ {
+ TDescribeTableResult describeResult = session.DescribeTable(tableName)
+ .GetValueSync();
+ UNIT_ASSERT_EQUAL(describeResult.GetStatus(), EStatus::SUCCESS);
+ const auto& partSettings = describeResult.GetTableDescription().GetPartitioningSettings();
+ UNIT_ASSERT(partSettings.GetPartitioningBySize().Defined());
+ UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningBySize().GetRef(), true);
+ UNIT_ASSERT(partSettings.GetPartitioningByLoad().Defined());
UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningByLoad().GetRef(), true);
UNIT_ASSERT_VALUES_EQUAL(partSettings.GetMinPartitionsCount(), 1);
UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitionSizeMb(), 2048);
- }
- {
- auto settings = NYdb::NTable::TAlterTableSettings()
- .BeginAlterPartitioningSettings()
- .SetPartitioningByLoad(false)
- .EndAlterPartitioningSettings();
-
- auto result = session.AlterTable(tableName, settings).ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
- }
- {
- TDescribeTableResult describeResult = session.DescribeTable(tableName)
- .GetValueSync();
- UNIT_ASSERT_EQUAL(describeResult.GetStatus(), EStatus::SUCCESS);
- const auto& partSettings = describeResult.GetTableDescription().GetPartitioningSettings();
- UNIT_ASSERT(partSettings.GetPartitioningByLoad().Defined());
- UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningByLoad().GetRef(), false);
- }
- }
-
+ }
+ {
+ auto settings = NYdb::NTable::TAlterTableSettings()
+ .BeginAlterPartitioningSettings()
+ .SetPartitioningByLoad(false)
+ .EndAlterPartitioningSettings();
+
+ auto result = session.AlterTable(tableName, settings).ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+ {
+ TDescribeTableResult describeResult = session.DescribeTable(tableName)
+ .GetValueSync();
+ UNIT_ASSERT_EQUAL(describeResult.GetStatus(), EStatus::SUCCESS);
+ const auto& partSettings = describeResult.GetTableDescription().GetPartitioningSettings();
+ UNIT_ASSERT(partSettings.GetPartitioningByLoad().Defined());
+ UNIT_ASSERT_VALUES_EQUAL(partSettings.GetPartitioningByLoad().GetRef(), false);
+ }
+ }
+
Y_UNIT_TEST(CheckDefaultTableSettings1) {
TKikimrWithGrpcAndRootSchema server;
diff --git a/ydb/services/ydb/ydb_ut.cpp b/ydb/services/ydb/ydb_ut.cpp
index d52872122ae..2d370630799 100644
--- a/ydb/services/ydb/ydb_ut.cpp
+++ b/ydb/services/ydb/ydb_ut.cpp
@@ -4098,13 +4098,13 @@ Y_UNIT_TEST_SUITE(TTableProfileTests) {
{
auto tableBuilder = client.GetTableBuilder();
tableBuilder
- .AddNullableColumn("Data", EPrimitiveType::String)
- .AddNullableColumn("KeyHash", EPrimitiveType::Uint64)
- .AddNullableColumn("Version", EPrimitiveType::Uint32)
- .AddNullableColumn("Ratio", EPrimitiveType::Float)
- .AddNullableColumn("SubKey", EPrimitiveType::Int32)
- .AddNullableColumn("Key", EPrimitiveType::Utf8);
- tableBuilder.SetPrimaryKeyColumns({"KeyHash", "Key", "SubKey"});
+ .AddNullableColumn("Data", EPrimitiveType::String)
+ .AddNullableColumn("KeyHash", EPrimitiveType::Uint64)
+ .AddNullableColumn("Version", EPrimitiveType::Uint32)
+ .AddNullableColumn("Ratio", EPrimitiveType::Float)
+ .AddNullableColumn("SubKey", EPrimitiveType::Int32)
+ .AddNullableColumn("Key", EPrimitiveType::Utf8);
+ tableBuilder.SetPrimaryKeyColumns({"KeyHash", "Key", "SubKey"});
auto settings = TCreateTableSettings().PresetName("profile1");
auto res = session.CreateTable("/Root/ydb_ut_tenant/table-1", tableBuilder.Build(), settings).ExtractValueSync();
diff --git a/ydb/tests/library/common/types.py b/ydb/tests/library/common/types.py
index 01d5c1e2932..5ec564d65fa 100644
--- a/ydb/tests/library/common/types.py
+++ b/ydb/tests/library/common/types.py
@@ -19,9 +19,9 @@ class DeltaTypes(IntEnum):
UpdateExecutorInfo = 8,
SetCompactionPolicy = 9,
SetRoom = 10,
- SetFamily = 11,
- SetRedo = 12,
- SetTable = 13
+ SetFamily = 11,
+ SetRedo = 12,
+ SetTable = 13
@unique
diff --git a/ydb/tests/library/harness/util.py b/ydb/tests/library/harness/util.py
index 3a3d044c595..a5075ce0e8d 100644
--- a/ydb/tests/library/harness/util.py
+++ b/ydb/tests/library/harness/util.py
@@ -11,8 +11,8 @@ class LogLevels(enum.IntEnum):
WARN = 4,
NOTICE = 5,
INFO = 6,
- DEBUG = 7,
- TRACE = 8
+ DEBUG = 7,
+ TRACE = 8
@staticmethod
def from_string(val):