aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/grpc/src/cpp
diff options
context:
space:
mode:
authorDevtools Arcadia <arcadia-devtools@yandex-team.ru>2022-02-07 18:08:42 +0300
committerDevtools Arcadia <arcadia-devtools@mous.vla.yp-c.yandex.net>2022-02-07 18:08:42 +0300
commit1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch)
treee26c9fed0de5d9873cce7e00bc214573dc2195b7 /contrib/libs/grpc/src/cpp
downloadydb-1110808a9d39d4b808aef724c861a2e1a38d2a69.tar.gz
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'contrib/libs/grpc/src/cpp')
-rwxr-xr-xcontrib/libs/grpc/src/cpp/README.md180
-rw-r--r--contrib/libs/grpc/src/cpp/client/channel_cc.cc252
-rw-r--r--contrib/libs/grpc/src/cpp/client/client_callback.cc52
-rw-r--r--contrib/libs/grpc/src/cpp/client/client_context.cc180
-rw-r--r--contrib/libs/grpc/src/cpp/client/client_interceptor.cc44
-rw-r--r--contrib/libs/grpc/src/cpp/client/create_channel.cc85
-rw-r--r--contrib/libs/grpc/src/cpp/client/create_channel_internal.cc36
-rw-r--r--contrib/libs/grpc/src/cpp/client/create_channel_internal.h40
-rw-r--r--contrib/libs/grpc/src/cpp/client/create_channel_posix.cc77
-rw-r--r--contrib/libs/grpc/src/cpp/client/credentials_cc.cc33
-rw-r--r--contrib/libs/grpc/src/cpp/client/insecure_credentials.cc62
-rw-r--r--contrib/libs/grpc/src/cpp/client/secure_credentials.cc513
-rw-r--r--contrib/libs/grpc/src/cpp/client/secure_credentials.h114
-rw-r--r--contrib/libs/grpc/src/cpp/codegen/codegen_init.cc30
-rw-r--r--contrib/libs/grpc/src/cpp/common/.yandex_meta/licenses.list.txt28
-rw-r--r--contrib/libs/grpc/src/cpp/common/alarm.cc161
-rw-r--r--contrib/libs/grpc/src/cpp/common/alts_context.cc127
-rw-r--r--contrib/libs/grpc/src/cpp/common/alts_util.cc82
-rw-r--r--contrib/libs/grpc/src/cpp/common/auth_property_iterator.cc70
-rw-r--r--contrib/libs/grpc/src/cpp/common/channel_arguments.cc217
-rw-r--r--contrib/libs/grpc/src/cpp/common/channel_filter.cc98
-rw-r--r--contrib/libs/grpc/src/cpp/common/channel_filter.h402
-rw-r--r--contrib/libs/grpc/src/cpp/common/completion_queue_cc.cc99
-rw-r--r--contrib/libs/grpc/src/cpp/common/core_codegen.cc240
-rw-r--r--contrib/libs/grpc/src/cpp/common/insecure_create_auth_context.cc30
-rw-r--r--contrib/libs/grpc/src/cpp/common/resource_quota_cc.cc40
-rw-r--r--contrib/libs/grpc/src/cpp/common/rpc_method.cc21
-rw-r--r--contrib/libs/grpc/src/cpp/common/secure_auth_context.cc97
-rw-r--r--contrib/libs/grpc/src/cpp/common/secure_auth_context.h60
-rw-r--r--contrib/libs/grpc/src/cpp/common/secure_channel_arguments.cc39
-rw-r--r--contrib/libs/grpc/src/cpp/common/secure_create_auth_context.cc36
-rw-r--r--contrib/libs/grpc/src/cpp/common/tls_credentials_options.cc343
-rw-r--r--contrib/libs/grpc/src/cpp/common/tls_credentials_options_util.cc149
-rw-r--r--contrib/libs/grpc/src/cpp/common/tls_credentials_options_util.h58
-rw-r--r--contrib/libs/grpc/src/cpp/common/validate_service_config.cc40
-rw-r--r--contrib/libs/grpc/src/cpp/common/version_cc.cc26
-rw-r--r--contrib/libs/grpc/src/cpp/common/ya.make41
-rw-r--r--contrib/libs/grpc/src/cpp/ext/proto_server_reflection.cc212
-rw-r--r--contrib/libs/grpc/src/cpp/ext/proto_server_reflection.h80
-rw-r--r--contrib/libs/grpc/src/cpp/ext/proto_server_reflection_plugin.cc83
-rw-r--r--contrib/libs/grpc/src/cpp/server/async_generic_service.cc33
-rw-r--r--contrib/libs/grpc/src/cpp/server/channel_argument_option.cc65
-rw-r--r--contrib/libs/grpc/src/cpp/server/channelz/channelz_service.cc153
-rw-r--r--contrib/libs/grpc/src/cpp/server/channelz/channelz_service.h64
-rw-r--r--contrib/libs/grpc/src/cpp/server/channelz/channelz_service_plugin.cc88
-rw-r--r--contrib/libs/grpc/src/cpp/server/create_default_thread_pool.cc44
-rw-r--r--contrib/libs/grpc/src/cpp/server/dynamic_thread_pool.cc124
-rw-r--r--contrib/libs/grpc/src/cpp/server/dynamic_thread_pool.h68
-rw-r--r--contrib/libs/grpc/src/cpp/server/external_connection_acceptor_impl.cc96
-rw-r--r--contrib/libs/grpc/src/cpp/server/external_connection_acceptor_impl.h71
-rw-r--r--contrib/libs/grpc/src/cpp/server/health/default_health_check_service.cc504
-rw-r--r--contrib/libs/grpc/src/cpp/server/health/default_health_check_service.h284
-rw-r--r--contrib/libs/grpc/src/cpp/server/health/health_check_service.cc34
-rw-r--r--contrib/libs/grpc/src/cpp/server/health/health_check_service_server_builder_option.cc35
-rw-r--r--contrib/libs/grpc/src/cpp/server/insecure_server_credentials.cc44
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/constants.h81
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats.h36
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_linux.cc48
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_macos.cc45
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_unsupported.cc40
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_windows.cc55
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/load_data_store.cc338
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/load_data_store.h348
-rw-r--r--contrib/libs/grpc/src/cpp/server/load_reporter/util.cc47
-rw-r--r--contrib/libs/grpc/src/cpp/server/secure_server_credentials.cc155
-rw-r--r--contrib/libs/grpc/src/cpp/server/secure_server_credentials.h79
-rw-r--r--contrib/libs/grpc/src/cpp/server/server_builder.cc434
-rw-r--r--contrib/libs/grpc/src/cpp/server/server_callback.cc84
-rw-r--r--contrib/libs/grpc/src/cpp/server/server_cc.cc1340
-rw-r--r--contrib/libs/grpc/src/cpp/server/server_context.cc361
-rw-r--r--contrib/libs/grpc/src/cpp/server/server_credentials.cc25
-rw-r--r--contrib/libs/grpc/src/cpp/server/server_posix.cc33
-rw-r--r--contrib/libs/grpc/src/cpp/server/thread_pool_interface.h43
-rw-r--r--contrib/libs/grpc/src/cpp/thread_manager/thread_manager.cc265
-rw-r--r--contrib/libs/grpc/src/cpp/thread_manager/thread_manager.h181
-rw-r--r--contrib/libs/grpc/src/cpp/util/byte_buffer_cc.cc46
-rw-r--r--contrib/libs/grpc/src/cpp/util/error_details.cc50
-rw-r--r--contrib/libs/grpc/src/cpp/util/status.cc26
-rw-r--r--contrib/libs/grpc/src/cpp/util/string_ref.cc25
-rw-r--r--contrib/libs/grpc/src/cpp/util/time_cc.cc75
80 files changed, 10544 insertions, 0 deletions
diff --git a/contrib/libs/grpc/src/cpp/README.md b/contrib/libs/grpc/src/cpp/README.md
new file mode 100755
index 0000000000..967a0a43b7
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/README.md
@@ -0,0 +1,180 @@
+# gRPC C++
+
+This directory contains the C++ implementation of gRPC.
+
+# To start using gRPC C++
+
+This section describes how to add gRPC as a dependency to your C++ project.
+
+In the C++ world, there's no universally accepted standard for managing project dependencies.
+Therefore, gRPC supports several major build systems, which should satisfy most users.
+
+## Bazel
+
+Bazel is the primary build system used by the core gRPC development team. Bazel
+provides fast builds and it easily handles dependencies that support bazel.
+
+To add gRPC as a dependency in bazel:
+1. determine commit SHA for the grpc release you want to use
+2. Use the [http_archive](https://docs.bazel.build/versions/master/repo/http.html#http_archive) bazel rule to include gRPC source
+ ```
+ http_archive(
+ name = "com_github_grpc_grpc",
+ urls = [
+ "https://github.com/grpc/grpc/archive/YOUR_GRPC_COMMIT_SHA.tar.gz",
+ ],
+ strip_prefix = "grpc-YOUR_GRPC_COMMIT_SHA",
+ )
+
+ load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps")
+
+ grpc_deps()
+ ```
+
+## CMake
+
+`cmake` is your best option if you cannot use bazel. It supports building on Linux,
+MacOS and Windows (official support) but also has a good chance of working on
+other platforms (no promises!). `cmake` has good support for crosscompiling and
+can be used for targeting the Android platform.
+
+To build gRPC C++ from source, follow the [BUILDING guide](../../BUILDING.md).
+
+### find_package
+
+The canonical way to discover dependencies in CMake is the
+[`find_package` command](https://cmake.org/cmake/help/latest/command/find_package.html).
+
+```cmake
+find_package(gRPC CONFIG REQUIRED)
+add_executable(my_exe my_exe.cc)
+target_link_libraries(my_exe gRPC::grpc++)
+```
+[Full example](../../examples/cpp/helloworld/CMakeLists.txt)
+
+`find_package` can only find software that has already been installed on your
+system. In practice that means you'll need to install gRPC using cmake first.
+gRPC's cmake support provides the option to install gRPC either system-wide
+(not recommended) or under a directory prefix in a way that you can later
+easily use it with the `find_package(gRPC CONFIG REQUIRED)` command.
+
+The following sections describe strategies to automatically build gRPC
+as part of your project.
+
+### FetchContent
+If you are using CMake v3.11 or newer you should use CMake's
+[FetchContent module](https://cmake.org/cmake/help/latest/module/FetchContent.html).
+The first time you run CMake in a given build directory, FetchContent will
+clone the gRPC repository and its submodules. `FetchContent_MakeAvailable()`
+also sets up an `add_subdirectory()` rule for you. This causes gRPC to be
+built as part of your project.
+
+```cmake
+cmake_minimum_required(VERSION 3.15)
+project(my_project)
+
+include(FetchContent)
+FetchContent_Declare(
+ gRPC
+ GIT_REPOSITORY https://github.com/grpc/grpc
+ GIT_TAG RELEASE_TAG_HERE # e.g v1.28.0
+)
+set(FETCHCONTENT_QUIET OFF)
+FetchContent_MakeAvailable(gRPC)
+
+add_executable(my_exe my_exe.cc)
+target_link_libraries(my_exe grpc++)
+```
+
+Note that you need to
+[install the prerequisites](../../BUILDING.md#pre-requisites)
+before building gRPC.
+
+### git submodule
+If you cannot use FetchContent, another approach is to add the gRPC source tree
+to your project as a
+[git submodule](https://git-scm.com/book/en/v2/Git-Tools-Submodules).
+You can then add it to your CMake project with `add_subdirectory()`.
+[Example](../../examples/cpp/helloworld/CMakeLists.txt)
+
+### Support system-installed gRPC
+
+If your project builds gRPC you should still consider the case where a user
+wants to build your software using a previously installed gRPC. Here's a
+code snippet showing how this is typically done.
+
+```cmake
+option(USE_SYSTEM_GRPC "Use system installed gRPC" OFF)
+if(USE_SYSTEM_GRPC)
+ # Find system-installed gRPC
+ find_package(gRPC CONFIG REQUIRED)
+else()
+ # Build gRPC using FetchContent or add_subdirectory
+endif()
+```
+
+[Full example](../../examples/cpp/helloworld/CMakeLists.txt)
+
+## pkg-config
+
+If your project does not use CMake (e.g. you're using `make` directly), you can
+first install gRPC C++ using CMake, and have your non-CMake project rely on the
+`pkgconfig` files which are provided by gRPC installation.
+[Example](../../test/distrib/cpp/run_distrib_test_cmake_pkgconfig.sh)
+
+## make (deprecated)
+
+The default choice for building on UNIX based systems used to be `make`, but we are no longer recommending it.
+You should use `bazel` or `cmake` instead.
+
+To install gRPC for C++ on your system using `make`, follow the [Building gRPC C++](../../BUILDING.md)
+instructions to build from source and then install locally using `make install`.
+This also installs the protocol buffer compiler `protoc` (if you don't have it already),
+and the C++ gRPC plugin for `protoc`.
+
+WARNING: After installing with `make install` there is no easy way to uninstall, which can cause issues
+if you later want to remove the grpc and/or protobuf installation or upgrade to a newer version.
+
+## Packaging systems
+
+We do not officially support any packaging system for C++, but there are some community-maintained packages that are kept up-to-date
+and are known to work well. More contributions and support for popular packaging systems are welcome!
+
+### Install using vcpkg package
+gRPC is available using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager:
+
+```
+# install vcpkg package manager on your system using the official instructions
+git clone https://github.com/Microsoft/vcpkg.git
+cd vcpkg
+./bootstrap-vcpkg.sh
+./vcpkg integrate install
+
+# install gRPC using vcpkg package manager
+vcpkg install grpc
+```
+
+The gRPC port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository.
+
+
+## Examples & Additional Documentation
+
+You can find out how to build and run our simplest gRPC C++ example in our
+[C++ quick start](../../examples/cpp).
+
+For more detailed documentation on using gRPC in C++ , see our main
+documentation site at [grpc.io](https://grpc.io), specifically:
+
+* [Overview](https://grpc.io/docs): An introduction to gRPC with a simple
+ Hello World example in all our supported languages, including C++.
+* [gRPC Basics - C++](https://grpc.io/docs/languages/cpp/basics):
+ A tutorial that steps you through creating a simple gRPC C++ example
+ application.
+* [Asynchronous Basics - C++](https://grpc.io/docs/languages/cpp/async):
+ A tutorial that shows you how to use gRPC C++'s asynchronous/non-blocking
+ APIs.
+
+
+# To start developing gRPC C++
+
+For instructions on how to build gRPC C++ from source, follow the [Building gRPC C++](../../BUILDING.md) instructions.
diff --git a/contrib/libs/grpc/src/cpp/client/channel_cc.cc b/contrib/libs/grpc/src/cpp/client/channel_cc.cc
new file mode 100644
index 0000000000..ac95c29efc
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/channel_cc.cc
@@ -0,0 +1,252 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/channel.h>
+
+#include <cstring>
+#include <memory>
+
+#include <grpc/grpc.h>
+#include <grpc/slice.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/time.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/completion_queue.h>
+#include <grpcpp/impl/call.h>
+#include <grpcpp/impl/codegen/call_op_set.h>
+#include <grpcpp/impl/codegen/completion_queue_tag.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/impl/rpc_method.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/support/channel_arguments.h>
+#include <grpcpp/support/config.h>
+#include <grpcpp/support/status.h>
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+namespace grpc {
+
+static ::grpc::internal::GrpcLibraryInitializer g_gli_initializer;
+Channel::Channel(const TString& host, grpc_channel* channel,
+ std::vector<std::unique_ptr<
+ ::grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators)
+ : host_(host), c_channel_(channel) {
+ interceptor_creators_ = std::move(interceptor_creators);
+ g_gli_initializer.summon();
+}
+
+Channel::~Channel() {
+ grpc_channel_destroy(c_channel_);
+ if (callback_cq_ != nullptr) {
+ callback_cq_->Shutdown();
+ }
+}
+
+namespace {
+
+inline grpc_slice SliceFromArray(const char* arr, size_t len) {
+ return g_core_codegen_interface->grpc_slice_from_copied_buffer(arr, len);
+}
+
+TString GetChannelInfoField(grpc_channel* channel,
+ grpc_channel_info* channel_info,
+ char*** channel_info_field) {
+ char* value = nullptr;
+ memset(channel_info, 0, sizeof(*channel_info));
+ *channel_info_field = &value;
+ grpc_channel_get_info(channel, channel_info);
+ if (value == nullptr) return "";
+ TString result = value;
+ gpr_free(value);
+ return result;
+}
+
+} // namespace
+
+TString Channel::GetLoadBalancingPolicyName() const {
+ grpc_channel_info channel_info;
+ return GetChannelInfoField(c_channel_, &channel_info,
+ &channel_info.lb_policy_name);
+}
+
+TString Channel::GetServiceConfigJSON() const {
+ grpc_channel_info channel_info;
+ return GetChannelInfoField(c_channel_, &channel_info,
+ &channel_info.service_config_json);
+}
+
+namespace experimental {
+
+void ChannelResetConnectionBackoff(Channel* channel) {
+ grpc_channel_reset_connect_backoff(channel->c_channel_);
+}
+
+} // namespace experimental
+
+::grpc::internal::Call Channel::CreateCallInternal(
+ const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
+ ::grpc::CompletionQueue* cq, size_t interceptor_pos) {
+ const bool kRegistered = method.channel_tag() && context->authority().empty();
+ grpc_call* c_call = nullptr;
+ if (kRegistered) {
+ c_call = grpc_channel_create_registered_call(
+ c_channel_, context->propagate_from_call_,
+ context->propagation_options_.c_bitmask(), cq->cq(),
+ method.channel_tag(), context->raw_deadline(), nullptr);
+ } else {
+ const ::TString* host_str = nullptr;
+ if (!context->authority_.empty()) {
+ host_str = &context->authority_;
+ } else if (!host_.empty()) {
+ host_str = &host_;
+ }
+ grpc_slice method_slice =
+ SliceFromArray(method.name(), strlen(method.name()));
+ grpc_slice host_slice;
+ if (host_str != nullptr) {
+ host_slice = ::grpc::SliceFromCopiedString(*host_str);
+ }
+ c_call = grpc_channel_create_call(
+ c_channel_, context->propagate_from_call_,
+ context->propagation_options_.c_bitmask(), cq->cq(), method_slice,
+ host_str == nullptr ? nullptr : &host_slice, context->raw_deadline(),
+ nullptr);
+ grpc_slice_unref(method_slice);
+ if (host_str != nullptr) {
+ grpc_slice_unref(host_slice);
+ }
+ }
+ grpc_census_call_set_context(c_call, context->census_context());
+
+ // ClientRpcInfo should be set before call because set_call also checks
+ // whether the call has been cancelled, and if the call was cancelled, we
+ // should notify the interceptors too.
+ auto* info =
+ context->set_client_rpc_info(method.name(), method.method_type(), this,
+ interceptor_creators_, interceptor_pos);
+ context->set_call(c_call, shared_from_this());
+
+ return ::grpc::internal::Call(c_call, this, cq, info);
+}
+
+::grpc::internal::Call Channel::CreateCall(
+ const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
+ CompletionQueue* cq) {
+ return CreateCallInternal(method, context, cq, 0);
+}
+
+void Channel::PerformOpsOnCall(::grpc::internal::CallOpSetInterface* ops,
+ ::grpc::internal::Call* call) {
+ ops->FillOps(
+ call); // Make a copy of call. It's fine since Call just has pointers
+}
+
+void* Channel::RegisterMethod(const char* method) {
+ return grpc_channel_register_call(
+ c_channel_, method, host_.empty() ? nullptr : host_.c_str(), nullptr);
+}
+
+grpc_connectivity_state Channel::GetState(bool try_to_connect) {
+ return grpc_channel_check_connectivity_state(c_channel_, try_to_connect);
+}
+
+namespace {
+
+class TagSaver final : public ::grpc::internal::CompletionQueueTag {
+ public:
+ explicit TagSaver(void* tag) : tag_(tag) {}
+ ~TagSaver() override {}
+ bool FinalizeResult(void** tag, bool* /*status*/) override {
+ *tag = tag_;
+ delete this;
+ return true;
+ }
+
+ private:
+ void* tag_;
+};
+
+} // namespace
+
+void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
+ gpr_timespec deadline,
+ ::grpc::CompletionQueue* cq, void* tag) {
+ TagSaver* tag_saver = new TagSaver(tag);
+ grpc_channel_watch_connectivity_state(c_channel_, last_observed, deadline,
+ cq->cq(), tag_saver);
+}
+
+bool Channel::WaitForStateChangeImpl(grpc_connectivity_state last_observed,
+ gpr_timespec deadline) {
+ ::grpc::CompletionQueue cq;
+ bool ok = false;
+ void* tag = nullptr;
+ NotifyOnStateChangeImpl(last_observed, deadline, &cq, nullptr);
+ cq.Next(&tag, &ok);
+ GPR_ASSERT(tag == nullptr);
+ return ok;
+}
+
+namespace {
+class ShutdownCallback : public grpc_experimental_completion_queue_functor {
+ public:
+ ShutdownCallback() {
+ functor_run = &ShutdownCallback::Run;
+ // Set inlineable to true since this callback is trivial and thus does not
+ // need to be run from the executor (triggering a thread hop). This should
+ // only be used by internal callbacks like this and not by user application
+ // code.
+ inlineable = true;
+ }
+ // TakeCQ takes ownership of the cq into the shutdown callback
+ // so that the shutdown callback will be responsible for destroying it
+ void TakeCQ(::grpc::CompletionQueue* cq) { cq_ = cq; }
+
+ // The Run function will get invoked by the completion queue library
+ // when the shutdown is actually complete
+ static void Run(grpc_experimental_completion_queue_functor* cb, int) {
+ auto* callback = static_cast<ShutdownCallback*>(cb);
+ delete callback->cq_;
+ delete callback;
+ }
+
+ private:
+ ::grpc::CompletionQueue* cq_ = nullptr;
+};
+} // namespace
+
+::grpc::CompletionQueue* Channel::CallbackCQ() {
+ // TODO(vjpai): Consider using a single global CQ for the default CQ
+ // if there is no explicit per-channel CQ registered
+ grpc::internal::MutexLock l(&mu_);
+ if (callback_cq_ == nullptr) {
+ auto* shutdown_callback = new ShutdownCallback;
+ callback_cq_ = new ::grpc::CompletionQueue(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
+ shutdown_callback});
+
+ // Transfer ownership of the new cq to its own shutdown callback
+ shutdown_callback->TakeCQ(callback_cq_);
+ }
+ return callback_cq_;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/client_callback.cc b/contrib/libs/grpc/src/cpp/client/client_callback.cc
new file mode 100644
index 0000000000..f4cbc97d34
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/client_callback.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/codegen/client_callback.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/executor.h"
+
+namespace grpc {
+namespace internal {
+
+void ClientReactor::InternalScheduleOnDone(grpc::Status s) {
+ // Unlike other uses of closure, do not Ref or Unref here since the reactor
+ // object's lifetime is controlled by user code.
+ grpc_core::ExecCtx exec_ctx;
+ struct ClosureWithArg {
+ grpc_closure closure;
+ ClientReactor* const reactor;
+ const grpc::Status status;
+ ClosureWithArg(ClientReactor* reactor_arg, grpc::Status s)
+ : reactor(reactor_arg), status(std::move(s)) {
+ GRPC_CLOSURE_INIT(&closure,
+ [](void* void_arg, grpc_error*) {
+ ClosureWithArg* arg =
+ static_cast<ClosureWithArg*>(void_arg);
+ arg->reactor->OnDone(arg->status);
+ delete arg;
+ },
+ this, grpc_schedule_on_exec_ctx);
+ }
+ };
+ ClosureWithArg* arg = new ClosureWithArg(this, std::move(s));
+ grpc_core::Executor::Run(&arg->closure, GRPC_ERROR_NONE);
+}
+
+} // namespace internal
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/client_context.cc b/contrib/libs/grpc/src/cpp/client/client_context.cc
new file mode 100644
index 0000000000..b75343d089
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/client_context.cc
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/client_context.h>
+
+#include <grpc/compression.h>
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include <grpcpp/impl/codegen/interceptor_common.h>
+#include <grpcpp/impl/codegen/sync.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/time.h>
+
+namespace grpc {
+
+class Channel;
+
+class DefaultGlobalClientCallbacks final
+ : public ClientContext::GlobalCallbacks {
+ public:
+ ~DefaultGlobalClientCallbacks() override {}
+ void DefaultConstructor(ClientContext* /*context*/) override {}
+ void Destructor(ClientContext* /*context*/) override {}
+};
+
+static internal::GrpcLibraryInitializer g_gli_initializer;
+static DefaultGlobalClientCallbacks* g_default_client_callbacks =
+ new DefaultGlobalClientCallbacks();
+static ClientContext::GlobalCallbacks* g_client_callbacks =
+ g_default_client_callbacks;
+
+ClientContext::ClientContext()
+ : initial_metadata_received_(false),
+ wait_for_ready_(false),
+ wait_for_ready_explicitly_set_(false),
+ idempotent_(false),
+ cacheable_(false),
+ call_(nullptr),
+ call_canceled_(false),
+ deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)),
+ census_context_(nullptr),
+ propagate_from_call_(nullptr),
+ compression_algorithm_(GRPC_COMPRESS_NONE),
+ initial_metadata_corked_(false) {
+ g_client_callbacks->DefaultConstructor(this);
+}
+
+ClientContext::~ClientContext() {
+ if (call_) {
+ grpc_call_unref(call_);
+ }
+ g_client_callbacks->Destructor(this);
+}
+
+void ClientContext::set_credentials(
+ const std::shared_ptr<CallCredentials>& creds) {
+ creds_ = creds;
+ // If call_ is set, we have already created the call, and set the call
+ // credentials. This should only be done before we have started the batch
+ // for sending initial metadata.
+ if (creds_ != nullptr && call_ != nullptr) {
+ if (!creds_->ApplyToCall(call_)) {
+ SendCancelToInterceptors();
+ grpc_call_cancel_with_status(call_, GRPC_STATUS_CANCELLED,
+ "Failed to set credentials to rpc.",
+ nullptr);
+ }
+ }
+}
+
+std::unique_ptr<ClientContext> ClientContext::FromInternalServerContext(
+ const grpc::ServerContextBase& context, PropagationOptions options) {
+ std::unique_ptr<ClientContext> ctx(new ClientContext);
+ ctx->propagate_from_call_ = context.call_.call;
+ ctx->propagation_options_ = options;
+ return ctx;
+}
+
+std::unique_ptr<ClientContext> ClientContext::FromServerContext(
+ const grpc::ServerContext& server_context, PropagationOptions options) {
+ return FromInternalServerContext(server_context, options);
+}
+
+std::unique_ptr<ClientContext> ClientContext::FromCallbackServerContext(
+ const grpc::CallbackServerContext& server_context,
+ PropagationOptions options) {
+ return FromInternalServerContext(server_context, options);
+}
+
+void ClientContext::AddMetadata(const TString& meta_key,
+ const TString& meta_value) {
+ send_initial_metadata_.insert(std::make_pair(meta_key, meta_value));
+}
+
+void ClientContext::set_call(grpc_call* call,
+ const std::shared_ptr<Channel>& channel) {
+ internal::MutexLock lock(&mu_);
+ GPR_ASSERT(call_ == nullptr);
+ call_ = call;
+ channel_ = channel;
+ if (creds_ && !creds_->ApplyToCall(call_)) {
+ // TODO(yashykt): should interceptors also see this status?
+ SendCancelToInterceptors();
+ grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED,
+ "Failed to set credentials to rpc.", nullptr);
+ }
+ if (call_canceled_) {
+ SendCancelToInterceptors();
+ grpc_call_cancel(call_, nullptr);
+ }
+}
+
+void ClientContext::set_compression_algorithm(
+ grpc_compression_algorithm algorithm) {
+ compression_algorithm_ = algorithm;
+ const char* algorithm_name = nullptr;
+ if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {
+ gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.",
+ algorithm);
+ abort();
+ }
+ GPR_ASSERT(algorithm_name != nullptr);
+ AddMetadata(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, algorithm_name);
+}
+
+void ClientContext::TryCancel() {
+ internal::MutexLock lock(&mu_);
+ if (call_) {
+ SendCancelToInterceptors();
+ grpc_call_cancel(call_, nullptr);
+ } else {
+ call_canceled_ = true;
+ }
+}
+
+void ClientContext::SendCancelToInterceptors() {
+ internal::CancelInterceptorBatchMethods cancel_methods;
+ for (size_t i = 0; i < rpc_info_.interceptors_.size(); i++) {
+ rpc_info_.RunInterceptor(&cancel_methods, i);
+ }
+}
+
+TString ClientContext::peer() const {
+ TString peer;
+ if (call_) {
+ char* c_peer = grpc_call_get_peer(call_);
+ peer = c_peer;
+ gpr_free(c_peer);
+ }
+ return peer;
+}
+
+void ClientContext::SetGlobalCallbacks(GlobalCallbacks* client_callbacks) {
+ GPR_ASSERT(g_client_callbacks == g_default_client_callbacks);
+ GPR_ASSERT(client_callbacks != nullptr);
+ GPR_ASSERT(client_callbacks != g_default_client_callbacks);
+ g_client_callbacks = client_callbacks;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/client_interceptor.cc b/contrib/libs/grpc/src/cpp/client/client_interceptor.cc
new file mode 100644
index 0000000000..a91950cae2
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/client_interceptor.cc
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/codegen/client_interceptor.h>
+
+namespace grpc {
+
+namespace internal {
+experimental::ClientInterceptorFactoryInterface*
+ g_global_client_interceptor_factory = nullptr;
+}
+
+namespace experimental {
+void RegisterGlobalClientInterceptorFactory(
+ ClientInterceptorFactoryInterface* factory) {
+ if (internal::g_global_client_interceptor_factory != nullptr) {
+ GPR_ASSERT(false &&
+ "It is illegal to call RegisterGlobalClientInterceptorFactory "
+ "multiple times.");
+ }
+ internal::g_global_client_interceptor_factory = factory;
+}
+
+// For testing purposes only.
+void TestOnlyResetGlobalClientInterceptorFactory() {
+ internal::g_global_client_interceptor_factory = nullptr;
+}
+} // namespace experimental
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/create_channel.cc b/contrib/libs/grpc/src/cpp/client/create_channel.cc
new file mode 100644
index 0000000000..97327490ed
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/create_channel.cc
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+
+#include <grpcpp/channel.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/support/channel_arguments.h>
+
+#include "src/cpp/client/create_channel_internal.h"
+
+namespace grpc {
+std::shared_ptr<grpc::Channel> CreateChannel(
+ const grpc::string& target,
+ const std::shared_ptr<grpc::ChannelCredentials>& creds) {
+ return CreateCustomChannel(target, creds, grpc::ChannelArguments());
+}
+
+std::shared_ptr<grpc::Channel> CreateCustomChannel(
+ const grpc::string& target,
+ const std::shared_ptr<grpc::ChannelCredentials>& creds,
+ const grpc::ChannelArguments& args) {
+ grpc::GrpcLibraryCodegen
+ init_lib; // We need to call init in case of bad creds.
+ return creds ? creds->CreateChannelImpl(target, args)
+ : grpc::CreateChannelInternal(
+ "",
+ grpc_lame_client_channel_create(
+ nullptr, GRPC_STATUS_INVALID_ARGUMENT,
+ "Invalid credentials."),
+ std::vector<std::unique_ptr<
+ grpc::experimental::
+ ClientInterceptorFactoryInterface>>());
+}
+
+namespace experimental {
+/// Create a new \em custom \a Channel pointing to \a target with \a
+/// interceptors being invoked per call.
+///
+/// \warning For advanced use and testing ONLY. Override default channel
+/// arguments only if necessary.
+///
+/// \param target The URI of the endpoint to connect to.
+/// \param creds Credentials to use for the created channel. If it does not
+/// hold an object or is invalid, a lame channel (one on which all operations
+/// fail) is returned.
+/// \param args Options for channel creation.
+std::shared_ptr<grpc::Channel> CreateCustomChannelWithInterceptors(
+ const TString& target,
+ const std::shared_ptr<grpc::ChannelCredentials>& creds,
+ const grpc::ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
+ grpc::GrpcLibraryCodegen
+ init_lib; // We need to call init in case of bad creds.
+ return creds ? creds->CreateChannelWithInterceptors(
+ target, args, std::move(interceptor_creators))
+ : grpc::CreateChannelInternal(
+ "",
+ grpc_lame_client_channel_create(
+ nullptr, GRPC_STATUS_INVALID_ARGUMENT,
+ "Invalid credentials."),
+ std::move(interceptor_creators));
+}
+} // namespace experimental
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/create_channel_internal.cc b/contrib/libs/grpc/src/cpp/client/create_channel_internal.cc
new file mode 100644
index 0000000000..da2a878a22
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/create_channel_internal.cc
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+
+#include <grpcpp/channel.h>
+
+struct grpc_channel;
+
+namespace grpc {
+
+std::shared_ptr<Channel> CreateChannelInternal(
+ const TString& host, grpc_channel* c_channel,
+ std::vector<std::unique_ptr<
+ ::grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
+ return std::shared_ptr<Channel>(
+ new Channel(host, c_channel, std::move(interceptor_creators)));
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/create_channel_internal.h b/contrib/libs/grpc/src/cpp/client/create_channel_internal.h
new file mode 100644
index 0000000000..09d4e56b02
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/create_channel_internal.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_CLIENT_CREATE_CHANNEL_INTERNAL_H
+#define GRPC_INTERNAL_CPP_CLIENT_CREATE_CHANNEL_INTERNAL_H
+
+#include <memory>
+
+#include <grpcpp/channel.h>
+#include <grpcpp/impl/codegen/client_interceptor.h>
+#include <grpcpp/support/config.h>
+
+struct grpc_channel;
+
+namespace grpc {
+
+std::shared_ptr<Channel> CreateChannelInternal(
+ const TString& host, grpc_channel* c_channel,
+ std::vector<std::unique_ptr<
+ ::grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_CLIENT_CREATE_CHANNEL_INTERNAL_H
diff --git a/contrib/libs/grpc/src/cpp/client/create_channel_posix.cc b/contrib/libs/grpc/src/cpp/client/create_channel_posix.cc
new file mode 100644
index 0000000000..db09eda8a6
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/create_channel_posix.cc
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/grpc_posix.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/support/channel_arguments.h>
+
+#include "src/cpp/client/create_channel_internal.h"
+
+namespace grpc {
+
+class ChannelArguments;
+
+#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
+
+std::shared_ptr<Channel> CreateInsecureChannelFromFd(const TString& target,
+ int fd) {
+ grpc::internal::GrpcLibrary init_lib;
+ init_lib.init();
+ return CreateChannelInternal(
+ "", grpc_insecure_channel_create_from_fd(target.c_str(), fd, nullptr),
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>());
+}
+
+std::shared_ptr<Channel> CreateCustomInsecureChannelFromFd(
+ const TString& target, int fd, const grpc::ChannelArguments& args) {
+ internal::GrpcLibrary init_lib;
+ init_lib.init();
+ grpc_channel_args channel_args;
+ args.SetChannelArgs(&channel_args);
+ return CreateChannelInternal(
+ "",
+ grpc_insecure_channel_create_from_fd(target.c_str(), fd, &channel_args),
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>());
+}
+
+namespace experimental {
+
+std::shared_ptr<Channel> CreateCustomInsecureChannelWithInterceptorsFromFd(
+ const TString& target, int fd, const ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
+ grpc::internal::GrpcLibrary init_lib;
+ init_lib.init();
+ grpc_channel_args channel_args;
+ args.SetChannelArgs(&channel_args);
+ return CreateChannelInternal(
+ "",
+ grpc_insecure_channel_create_from_fd(target.c_str(), fd, &channel_args),
+ std::move(interceptor_creators));
+}
+
+} // namespace experimental
+
+#endif // GPR_SUPPORT_CHANNELS_FROM_FD
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/credentials_cc.cc b/contrib/libs/grpc/src/cpp/client/credentials_cc.cc
new file mode 100644
index 0000000000..9dfb2f491c
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/credentials_cc.cc
@@ -0,0 +1,33 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/security/credentials.h>
+
+namespace grpc {
+
+static grpc::internal::GrpcLibraryInitializer g_gli_initializer;
+ChannelCredentials::ChannelCredentials() { g_gli_initializer.summon(); }
+
+ChannelCredentials::~ChannelCredentials() {}
+
+CallCredentials::CallCredentials() { g_gli_initializer.summon(); }
+
+CallCredentials::~CallCredentials() {}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/insecure_credentials.cc b/contrib/libs/grpc/src/cpp/client/insecure_credentials.cc
new file mode 100644
index 0000000000..e5bafff70a
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/insecure_credentials.cc
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+#include <grpcpp/security/credentials.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/support/channel_arguments.h>
+#include <grpcpp/support/config.h>
+#include "src/cpp/client/create_channel_internal.h"
+
+namespace grpc {
+
+namespace {
+class InsecureChannelCredentialsImpl final : public ChannelCredentials {
+ public:
+ std::shared_ptr<Channel> CreateChannelImpl(
+ const TString& target, const ChannelArguments& args) override {
+ return CreateChannelWithInterceptors(
+ target, args,
+ std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>());
+ }
+
+ std::shared_ptr<Channel> CreateChannelWithInterceptors(
+ const TString& target, const ChannelArguments& args,
+ std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) override {
+ grpc_channel_args channel_args;
+ args.SetChannelArgs(&channel_args);
+ return ::grpc::CreateChannelInternal(
+ "",
+ grpc_insecure_channel_create(target.c_str(), &channel_args, nullptr),
+ std::move(interceptor_creators));
+ }
+
+ SecureChannelCredentials* AsSecureCredentials() override { return nullptr; }
+};
+} // namespace
+
+std::shared_ptr<ChannelCredentials> InsecureChannelCredentials() {
+ return std::shared_ptr<ChannelCredentials>(
+ new InsecureChannelCredentialsImpl());
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/secure_credentials.cc b/contrib/libs/grpc/src/cpp/client/secure_credentials.cc
new file mode 100644
index 0000000000..0f6db3caa5
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/secure_credentials.cc
@@ -0,0 +1,513 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/cpp/client/secure_credentials.h"
+
+#include <grpc/impl/codegen/slice.h>
+#include <grpc/slice.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/support/channel_arguments.h>
+
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/load_file.h"
+#include "src/core/lib/json/json.h"
+#include "src/core/lib/security/transport/auth_filters.h"
+#include "src/core/lib/security/util/json_util.h"
+#include "src/cpp/client/create_channel_internal.h"
+#include "src/cpp/common/secure_auth_context.h"
+
+namespace grpc {
+
+static grpc::internal::GrpcLibraryInitializer g_gli_initializer;
+SecureChannelCredentials::SecureChannelCredentials(
+ grpc_channel_credentials* c_creds)
+ : c_creds_(c_creds) {
+ g_gli_initializer.summon();
+}
+
+std::shared_ptr<Channel> SecureChannelCredentials::CreateChannelImpl(
+ const TString& target, const ChannelArguments& args) {
+ return CreateChannelWithInterceptors(
+ target, args,
+ std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>());
+}
+
+std::shared_ptr<Channel>
+SecureChannelCredentials::CreateChannelWithInterceptors(
+ const TString& target, const ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
+ grpc_channel_args channel_args;
+ args.SetChannelArgs(&channel_args);
+ return ::grpc::CreateChannelInternal(
+ args.GetSslTargetNameOverride(),
+ grpc_secure_channel_create(c_creds_, target.c_str(), &channel_args,
+ nullptr),
+ std::move(interceptor_creators));
+}
+
+SecureCallCredentials::SecureCallCredentials(grpc_call_credentials* c_creds)
+ : c_creds_(c_creds) {
+ g_gli_initializer.summon();
+}
+
+bool SecureCallCredentials::ApplyToCall(grpc_call* call) {
+ return grpc_call_set_credentials(call, c_creds_) == GRPC_CALL_OK;
+}
+
+namespace {
+std::shared_ptr<ChannelCredentials> WrapChannelCredentials(
+ grpc_channel_credentials* creds) {
+ return creds == nullptr ? nullptr
+ : std::shared_ptr<ChannelCredentials>(
+ new SecureChannelCredentials(creds));
+}
+
+std::shared_ptr<CallCredentials> WrapCallCredentials(
+ grpc_call_credentials* creds) {
+ return creds == nullptr ? nullptr
+ : std::shared_ptr<CallCredentials>(
+ new SecureCallCredentials(creds));
+}
+} // namespace
+
+std::shared_ptr<ChannelCredentials> GoogleDefaultCredentials() {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ return WrapChannelCredentials(
+ grpc_google_default_credentials_create(nullptr));
+}
+
+// Builds SSL Credentials given SSL specific options
+std::shared_ptr<ChannelCredentials> SslCredentials(
+ const SslCredentialsOptions& options) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {
+ options.pem_private_key.c_str(), options.pem_cert_chain.c_str()};
+
+ grpc_channel_credentials* c_creds = grpc_ssl_credentials_create(
+ options.pem_root_certs.empty() ? nullptr : options.pem_root_certs.c_str(),
+ options.pem_private_key.empty() ? nullptr : &pem_key_cert_pair, nullptr,
+ nullptr);
+ return WrapChannelCredentials(c_creds);
+}
+
+namespace experimental {
+
+namespace {
+
+void ClearStsCredentialsOptions(StsCredentialsOptions* options) {
+ if (options == nullptr) return;
+ options->token_exchange_service_uri.clear();
+ options->resource.clear();
+ options->audience.clear();
+ options->scope.clear();
+ options->requested_token_type.clear();
+ options->subject_token_path.clear();
+ options->subject_token_type.clear();
+ options->actor_token_path.clear();
+ options->actor_token_type.clear();
+}
+
+} // namespace
+
+// Builds STS credentials options from JSON.
+grpc::Status StsCredentialsOptionsFromJson(const TString& json_string,
+ StsCredentialsOptions* options) {
+ if (options == nullptr) {
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "options cannot be nullptr.");
+ }
+ ClearStsCredentialsOptions(options);
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_core::Json json = grpc_core::Json::Parse(json_string.c_str(), &error);
+ if (error != GRPC_ERROR_NONE ||
+ json.type() != grpc_core::Json::Type::OBJECT) {
+ GRPC_ERROR_UNREF(error);
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Invalid json.");
+ }
+
+ // Required fields.
+ const char* value = grpc_json_get_string_property(
+ json, "token_exchange_service_uri", nullptr);
+ if (value == nullptr) {
+ ClearStsCredentialsOptions(options);
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "token_exchange_service_uri must be specified.");
+ }
+ options->token_exchange_service_uri.assign(value);
+ value = grpc_json_get_string_property(json, "subject_token_path", nullptr);
+ if (value == nullptr) {
+ ClearStsCredentialsOptions(options);
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "subject_token_path must be specified.");
+ }
+ options->subject_token_path.assign(value);
+ value = grpc_json_get_string_property(json, "subject_token_type", nullptr);
+ if (value == nullptr) {
+ ClearStsCredentialsOptions(options);
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "subject_token_type must be specified.");
+ }
+ options->subject_token_type.assign(value);
+
+ // Optional fields.
+ value = grpc_json_get_string_property(json, "resource", nullptr);
+ if (value != nullptr) options->resource.assign(value);
+ value = grpc_json_get_string_property(json, "audience", nullptr);
+ if (value != nullptr) options->audience.assign(value);
+ value = grpc_json_get_string_property(json, "scope", nullptr);
+ if (value != nullptr) options->scope.assign(value);
+ value = grpc_json_get_string_property(json, "requested_token_type", nullptr);
+ if (value != nullptr) options->requested_token_type.assign(value);
+ value = grpc_json_get_string_property(json, "actor_token_path", nullptr);
+ if (value != nullptr) options->actor_token_path.assign(value);
+ value = grpc_json_get_string_property(json, "actor_token_type", nullptr);
+ if (value != nullptr) options->actor_token_type.assign(value);
+
+ return grpc::Status();
+}
+
+// Builds STS credentials Options from the $STS_CREDENTIALS env var.
+grpc::Status StsCredentialsOptionsFromEnv(StsCredentialsOptions* options) {
+ if (options == nullptr) {
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "options cannot be nullptr.");
+ }
+ ClearStsCredentialsOptions(options);
+ grpc_slice json_string = grpc_empty_slice();
+ char* sts_creds_path = gpr_getenv("STS_CREDENTIALS");
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc::Status status;
+ auto cleanup = [&json_string, &sts_creds_path, &error, &status]() {
+ grpc_slice_unref_internal(json_string);
+ gpr_free(sts_creds_path);
+ GRPC_ERROR_UNREF(error);
+ return status;
+ };
+
+ if (sts_creds_path == nullptr) {
+ status = grpc::Status(grpc::StatusCode::NOT_FOUND,
+ "STS_CREDENTIALS environment variable not set.");
+ return cleanup();
+ }
+ error = grpc_load_file(sts_creds_path, 1, &json_string);
+ if (error != GRPC_ERROR_NONE) {
+ status =
+ grpc::Status(grpc::StatusCode::NOT_FOUND, grpc_error_string(error));
+ return cleanup();
+ }
+ status = StsCredentialsOptionsFromJson(
+ reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(json_string)),
+ options);
+ return cleanup();
+}
+
+// C++ to Core STS Credentials options.
+grpc_sts_credentials_options StsCredentialsCppToCoreOptions(
+ const StsCredentialsOptions& options) {
+ grpc_sts_credentials_options opts;
+ memset(&opts, 0, sizeof(opts));
+ opts.token_exchange_service_uri = options.token_exchange_service_uri.c_str();
+ opts.resource = options.resource.c_str();
+ opts.audience = options.audience.c_str();
+ opts.scope = options.scope.c_str();
+ opts.requested_token_type = options.requested_token_type.c_str();
+ opts.subject_token_path = options.subject_token_path.c_str();
+ opts.subject_token_type = options.subject_token_type.c_str();
+ opts.actor_token_path = options.actor_token_path.c_str();
+ opts.actor_token_type = options.actor_token_type.c_str();
+ return opts;
+}
+
+// Builds STS credentials.
+std::shared_ptr<CallCredentials> StsCredentials(
+ const StsCredentialsOptions& options) {
+ auto opts = StsCredentialsCppToCoreOptions(options);
+ return WrapCallCredentials(grpc_sts_credentials_create(&opts, nullptr));
+}
+
+std::shared_ptr<CallCredentials> MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin> plugin,
+ grpc_security_level min_security_level) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ const char* type = plugin->GetType();
+ grpc::MetadataCredentialsPluginWrapper* wrapper =
+ new grpc::MetadataCredentialsPluginWrapper(std::move(plugin));
+ grpc_metadata_credentials_plugin c_plugin = {
+ grpc::MetadataCredentialsPluginWrapper::GetMetadata,
+ grpc::MetadataCredentialsPluginWrapper::DebugString,
+ grpc::MetadataCredentialsPluginWrapper::Destroy, wrapper, type};
+ return WrapCallCredentials(grpc_metadata_credentials_create_from_plugin(
+ c_plugin, min_security_level, nullptr));
+}
+
+// Builds ALTS Credentials given ALTS specific options
+std::shared_ptr<ChannelCredentials> AltsCredentials(
+ const AltsCredentialsOptions& options) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ grpc_alts_credentials_options* c_options =
+ grpc_alts_credentials_client_options_create();
+ for (const auto& service_account : options.target_service_accounts) {
+ grpc_alts_credentials_client_options_add_target_service_account(
+ c_options, service_account.c_str());
+ }
+ grpc_channel_credentials* c_creds = grpc_alts_credentials_create(c_options);
+ grpc_alts_credentials_options_destroy(c_options);
+ return WrapChannelCredentials(c_creds);
+}
+
+// Builds Local Credentials
+std::shared_ptr<ChannelCredentials> LocalCredentials(
+ grpc_local_connect_type type) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ return WrapChannelCredentials(grpc_local_credentials_create(type));
+}
+
+// Builds TLS Credentials given TLS options.
+std::shared_ptr<ChannelCredentials> TlsCredentials(
+ const TlsCredentialsOptions& options) {
+ return WrapChannelCredentials(
+ grpc_tls_credentials_create(options.c_credentials_options()));
+}
+
+} // namespace experimental
+
+// Builds credentials for use when running in GCE
+std::shared_ptr<CallCredentials> GoogleComputeEngineCredentials() {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ return WrapCallCredentials(
+ grpc_google_compute_engine_credentials_create(nullptr));
+}
+
+// Builds JWT credentials.
+std::shared_ptr<CallCredentials> ServiceAccountJWTAccessCredentials(
+ const TString& json_key, long token_lifetime_seconds) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ if (token_lifetime_seconds <= 0) {
+ gpr_log(GPR_ERROR,
+ "Trying to create JWTCredentials with non-positive lifetime");
+ return WrapCallCredentials(nullptr);
+ }
+ gpr_timespec lifetime =
+ gpr_time_from_seconds(token_lifetime_seconds, GPR_TIMESPAN);
+ return WrapCallCredentials(grpc_service_account_jwt_access_credentials_create(
+ json_key.c_str(), lifetime, nullptr));
+}
+
+// Builds refresh token credentials.
+std::shared_ptr<CallCredentials> GoogleRefreshTokenCredentials(
+ const TString& json_refresh_token) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ return WrapCallCredentials(grpc_google_refresh_token_credentials_create(
+ json_refresh_token.c_str(), nullptr));
+}
+
+// Builds access token credentials.
+std::shared_ptr<CallCredentials> AccessTokenCredentials(
+ const TString& access_token) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ return WrapCallCredentials(
+ grpc_access_token_credentials_create(access_token.c_str(), nullptr));
+}
+
+// Builds IAM credentials.
+std::shared_ptr<CallCredentials> GoogleIAMCredentials(
+ const TString& authorization_token,
+ const TString& authority_selector) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ return WrapCallCredentials(grpc_google_iam_credentials_create(
+ authorization_token.c_str(), authority_selector.c_str(), nullptr));
+}
+
+// Combines one channel credentials and one call credentials into a channel
+// composite credentials.
+std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
+ const std::shared_ptr<ChannelCredentials>& channel_creds,
+ const std::shared_ptr<CallCredentials>& call_creds) {
+ // Note that we are not saving shared_ptrs to the two credentials passed in
+ // here. This is OK because the underlying C objects (i.e., channel_creds and
+ // call_creds) into grpc_composite_credentials_create will see their refcounts
+ // incremented.
+ SecureChannelCredentials* s_channel_creds =
+ channel_creds->AsSecureCredentials();
+ SecureCallCredentials* s_call_creds = call_creds->AsSecureCredentials();
+ if (s_channel_creds && s_call_creds) {
+ return WrapChannelCredentials(grpc_composite_channel_credentials_create(
+ s_channel_creds->GetRawCreds(), s_call_creds->GetRawCreds(), nullptr));
+ }
+ return nullptr;
+}
+
+std::shared_ptr<CallCredentials> CompositeCallCredentials(
+ const std::shared_ptr<CallCredentials>& creds1,
+ const std::shared_ptr<CallCredentials>& creds2) {
+ SecureCallCredentials* s_creds1 = creds1->AsSecureCredentials();
+ SecureCallCredentials* s_creds2 = creds2->AsSecureCredentials();
+ if (s_creds1 != nullptr && s_creds2 != nullptr) {
+ return WrapCallCredentials(grpc_composite_call_credentials_create(
+ s_creds1->GetRawCreds(), s_creds2->GetRawCreds(), nullptr));
+ }
+ return nullptr;
+}
+
+std::shared_ptr<CallCredentials> MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin> plugin) {
+ grpc::GrpcLibraryCodegen init; // To call grpc_init().
+ const char* type = plugin->GetType();
+ grpc::MetadataCredentialsPluginWrapper* wrapper =
+ new grpc::MetadataCredentialsPluginWrapper(std::move(plugin));
+ grpc_metadata_credentials_plugin c_plugin = {
+ grpc::MetadataCredentialsPluginWrapper::GetMetadata,
+ grpc::MetadataCredentialsPluginWrapper::DebugString,
+ grpc::MetadataCredentialsPluginWrapper::Destroy, wrapper, type};
+ return WrapCallCredentials(grpc_metadata_credentials_create_from_plugin(
+ c_plugin, GRPC_PRIVACY_AND_INTEGRITY, nullptr));
+}
+
+namespace {
+void DeleteWrapper(void* wrapper, grpc_error* /*ignored*/) {
+ MetadataCredentialsPluginWrapper* w =
+ static_cast<MetadataCredentialsPluginWrapper*>(wrapper);
+ delete w;
+}
+} // namespace
+
+char* MetadataCredentialsPluginWrapper::DebugString(void* wrapper) {
+ GPR_ASSERT(wrapper);
+ MetadataCredentialsPluginWrapper* w =
+ static_cast<MetadataCredentialsPluginWrapper*>(wrapper);
+ return gpr_strdup(w->plugin_->DebugString().c_str());
+}
+
+void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
+ if (wrapper == nullptr) return;
+ grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
+ grpc_core::ExecCtx exec_ctx;
+ grpc_core::Executor::Run(GRPC_CLOSURE_CREATE(DeleteWrapper, wrapper, nullptr),
+ GRPC_ERROR_NONE);
+}
+
+int MetadataCredentialsPluginWrapper::GetMetadata(
+ void* wrapper, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void* user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t* num_creds_md, grpc_status_code* status,
+ const char** error_details) {
+ GPR_ASSERT(wrapper);
+ MetadataCredentialsPluginWrapper* w =
+ static_cast<MetadataCredentialsPluginWrapper*>(wrapper);
+ if (!w->plugin_) {
+ *num_creds_md = 0;
+ *status = GRPC_STATUS_OK;
+ *error_details = nullptr;
+ return 1;
+ }
+ if (w->plugin_->IsBlocking()) {
+ // The internals of context may be destroyed if GetMetadata is cancelled.
+ // Make a copy for InvokePlugin.
+ grpc_auth_metadata_context context_copy = grpc_auth_metadata_context();
+ grpc_auth_metadata_context_copy(&context, &context_copy);
+ // Asynchronous return.
+ w->thread_pool_->Add([w, context_copy, cb, user_data]() mutable {
+ w->MetadataCredentialsPluginWrapper::InvokePlugin(
+ context_copy, cb, user_data, nullptr, nullptr, nullptr, nullptr);
+ grpc_auth_metadata_context_reset(&context_copy);
+ });
+ return 0;
+ } else {
+ // Synchronous return.
+ w->InvokePlugin(context, cb, user_data, creds_md, num_creds_md, status,
+ error_details);
+ return 1;
+ }
+}
+
+namespace {
+
+void UnrefMetadata(const std::vector<grpc_metadata>& md) {
+ for (const auto& metadatum : md) {
+ grpc_slice_unref(metadatum.key);
+ grpc_slice_unref(metadatum.value);
+ }
+}
+
+} // namespace
+
+void MetadataCredentialsPluginWrapper::InvokePlugin(
+ grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb,
+ void* user_data, grpc_metadata creds_md[4], size_t* num_creds_md,
+ grpc_status_code* status_code, const char** error_details) {
+ std::multimap<TString, TString> metadata;
+
+ // const_cast is safe since the SecureAuthContext only inc/dec the refcount
+ // and the object is passed as a const ref to plugin_->GetMetadata.
+ SecureAuthContext cpp_channel_auth_context(
+ const_cast<grpc_auth_context*>(context.channel_auth_context));
+
+ Status status = plugin_->GetMetadata(context.service_url, context.method_name,
+ cpp_channel_auth_context, &metadata);
+ std::vector<grpc_metadata> md;
+ for (auto& metadatum : metadata) {
+ grpc_metadata md_entry;
+ md_entry.key = SliceFromCopiedString(metadatum.first);
+ md_entry.value = SliceFromCopiedString(metadatum.second);
+ md_entry.flags = 0;
+ md.push_back(md_entry);
+ }
+ if (creds_md != nullptr) {
+ // Synchronous return.
+ if (md.size() > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) {
+ *num_creds_md = 0;
+ *status_code = GRPC_STATUS_INTERNAL;
+ *error_details = gpr_strdup(
+ "blocking plugin credentials returned too many metadata keys");
+ UnrefMetadata(md);
+ } else {
+ for (const auto& elem : md) {
+ creds_md[*num_creds_md].key = elem.key;
+ creds_md[*num_creds_md].value = elem.value;
+ creds_md[*num_creds_md].flags = elem.flags;
+ ++(*num_creds_md);
+ }
+ *status_code = static_cast<grpc_status_code>(status.error_code());
+ *error_details =
+ status.ok() ? nullptr : gpr_strdup(status.error_message().c_str());
+ }
+ } else {
+ // Asynchronous return.
+ cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
+ static_cast<grpc_status_code>(status.error_code()),
+ status.error_message().c_str());
+ UnrefMetadata(md);
+ }
+}
+
+MetadataCredentialsPluginWrapper::MetadataCredentialsPluginWrapper(
+ std::unique_ptr<MetadataCredentialsPlugin> plugin)
+ : thread_pool_(CreateDefaultThreadPool()), plugin_(std::move(plugin)) {}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/client/secure_credentials.h b/contrib/libs/grpc/src/cpp/client/secure_credentials.h
new file mode 100644
index 0000000000..4fc79346bf
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/client/secure_credentials.h
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_CLIENT_SECURE_CREDENTIALS_H
+#define GRPC_INTERNAL_CPP_CLIENT_SECURE_CREDENTIALS_H
+
+#include <grpc/grpc_security.h>
+
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/security/tls_credentials_options.h>
+#include <grpcpp/support/config.h>
+
+#include "y_absl/strings/str_cat.h"
+#include "src/core/lib/security/credentials/credentials.h"
+#include "src/cpp/server/thread_pool_interface.h"
+
+namespace grpc {
+
+class Channel;
+
+class SecureChannelCredentials final : public ChannelCredentials {
+ public:
+ explicit SecureChannelCredentials(grpc_channel_credentials* c_creds);
+ ~SecureChannelCredentials() {
+ if (c_creds_ != nullptr) c_creds_->Unref();
+ }
+ grpc_channel_credentials* GetRawCreds() { return c_creds_; }
+
+ std::shared_ptr<Channel> CreateChannelImpl(
+ const TString& target, const ChannelArguments& args) override;
+
+ SecureChannelCredentials* AsSecureCredentials() override { return this; }
+
+ private:
+ std::shared_ptr<Channel> CreateChannelWithInterceptors(
+ const TString& target, const ChannelArguments& args,
+ std::vector<std::unique_ptr<
+ ::grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) override;
+ grpc_channel_credentials* const c_creds_;
+};
+
+class SecureCallCredentials final : public CallCredentials {
+ public:
+ explicit SecureCallCredentials(grpc_call_credentials* c_creds);
+ ~SecureCallCredentials() {
+ if (c_creds_ != nullptr) c_creds_->Unref();
+ }
+ grpc_call_credentials* GetRawCreds() { return c_creds_; }
+
+ bool ApplyToCall(grpc_call* call) override;
+ SecureCallCredentials* AsSecureCredentials() override { return this; }
+ TString DebugString() override {
+ return y_absl::StrCat("SecureCallCredentials{",
+ TString(c_creds_->debug_string()), "}");
+ }
+
+ private:
+ grpc_call_credentials* const c_creds_;
+};
+
+namespace experimental {
+
+// Transforms C++ STS Credentials options to core options. The pointers of the
+// resulting core options point to the memory held by the C++ options so C++
+// options need to be kept alive until after the core credentials creation.
+grpc_sts_credentials_options StsCredentialsCppToCoreOptions(
+ const StsCredentialsOptions& options);
+
+} // namespace experimental
+
+class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen {
+ public:
+ static void Destroy(void* wrapper);
+ static int GetMetadata(
+ void* wrapper, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void* user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t* num_creds_md, grpc_status_code* status,
+ const char** error_details);
+ static char* DebugString(void* wrapper);
+
+ explicit MetadataCredentialsPluginWrapper(
+ std::unique_ptr<MetadataCredentialsPlugin> plugin);
+
+ private:
+ void InvokePlugin(
+ grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void* user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t* num_creds_md, grpc_status_code* status_code,
+ const char** error_details);
+ std::unique_ptr<ThreadPoolInterface> thread_pool_;
+ std::unique_ptr<MetadataCredentialsPlugin> plugin_;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_CLIENT_SECURE_CREDENTIALS_H
diff --git a/contrib/libs/grpc/src/cpp/codegen/codegen_init.cc b/contrib/libs/grpc/src/cpp/codegen/codegen_init.cc
new file mode 100644
index 0000000000..e1e47cbb17
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/codegen/codegen_init.cc
@@ -0,0 +1,30 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/grpc_library.h>
+
+/// Null-initializes the global gRPC variables for the codegen library. These
+/// stay null in the absence of grpc++ library. In this case, no gRPC
+/// features such as the ability to perform calls will be available. Trying to
+/// perform them would result in a segmentation fault when trying to deference
+/// the following nulled globals. These should be associated with actual
+/// as part of the instantiation of a \a grpc::GrpcLibraryInitializer variable.
+
+grpc::CoreCodegenInterface* grpc::g_core_codegen_interface;
+grpc::GrpcLibraryInterface* grpc::g_glip;
diff --git a/contrib/libs/grpc/src/cpp/common/.yandex_meta/licenses.list.txt b/contrib/libs/grpc/src/cpp/common/.yandex_meta/licenses.list.txt
new file mode 100644
index 0000000000..a5d42d5b53
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/.yandex_meta/licenses.list.txt
@@ -0,0 +1,28 @@
+====================Apache-2.0====================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+
+====================COPYRIGHT====================
+ * Copyright 2015 gRPC authors.
+
+
+====================COPYRIGHT====================
+ * Copyright 2016 gRPC authors.
+
+
+====================COPYRIGHT====================
+ * Copyright 2018 gRPC authors.
+
+
+====================COPYRIGHT====================
+# Copyright 2019 gRPC authors.
diff --git a/contrib/libs/grpc/src/cpp/common/alarm.cc b/contrib/libs/grpc/src/cpp/common/alarm.cc
new file mode 100644
index 0000000000..a2612874b2
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/alarm.cc
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/alarm.h>
+
+#include <memory>
+
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+#include <grpcpp/completion_queue.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/support/time.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+#include <grpc/support/log.h>
+#include "src/core/lib/debug/trace.h"
+
+namespace grpc {
+
+namespace internal {
+class AlarmImpl : public ::grpc::internal::CompletionQueueTag {
+ public:
+ AlarmImpl() : cq_(nullptr), tag_(nullptr) {
+ gpr_ref_init(&refs_, 1);
+ grpc_timer_init_unset(&timer_);
+ }
+ ~AlarmImpl() {}
+ bool FinalizeResult(void** tag, bool* /*status*/) override {
+ *tag = tag_;
+ Unref();
+ return true;
+ }
+ void Set(::grpc::CompletionQueue* cq, gpr_timespec deadline, void* tag) {
+ grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
+ grpc_core::ExecCtx exec_ctx;
+ GRPC_CQ_INTERNAL_REF(cq->cq(), "alarm");
+ cq_ = cq->cq();
+ tag_ = tag;
+ GPR_ASSERT(grpc_cq_begin_op(cq_, this));
+ GRPC_CLOSURE_INIT(
+ &on_alarm_,
+ [](void* arg, grpc_error* error) {
+ // queue the op on the completion queue
+ AlarmImpl* alarm = static_cast<AlarmImpl*>(arg);
+ alarm->Ref();
+ // Preserve the cq and reset the cq_ so that the alarm
+ // can be reset when the alarm tag is delivered.
+ grpc_completion_queue* cq = alarm->cq_;
+ alarm->cq_ = nullptr;
+ grpc_cq_end_op(
+ cq, alarm, error,
+ [](void* /*arg*/, grpc_cq_completion* /*completion*/) {}, arg,
+ &alarm->completion_);
+ GRPC_CQ_INTERNAL_UNREF(cq, "alarm");
+ },
+ this, grpc_schedule_on_exec_ctx);
+ grpc_timer_init(&timer_, grpc_timespec_to_millis_round_up(deadline),
+ &on_alarm_);
+ }
+ void Set(gpr_timespec deadline, std::function<void(bool)> f) {
+ grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
+ grpc_core::ExecCtx exec_ctx;
+ // Don't use any CQ at all. Instead just use the timer to fire the function
+ callback_ = std::move(f);
+ Ref();
+ GRPC_CLOSURE_INIT(&on_alarm_,
+ [](void* arg, grpc_error* error) {
+ grpc_core::Executor::Run(
+ GRPC_CLOSURE_CREATE(
+ [](void* arg, grpc_error* error) {
+ AlarmImpl* alarm =
+ static_cast<AlarmImpl*>(arg);
+ alarm->callback_(error == GRPC_ERROR_NONE);
+ alarm->Unref();
+ },
+ arg, nullptr),
+ error);
+ },
+ this, grpc_schedule_on_exec_ctx);
+ grpc_timer_init(&timer_, grpc_timespec_to_millis_round_up(deadline),
+ &on_alarm_);
+ }
+ void Cancel() {
+ grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
+ grpc_core::ExecCtx exec_ctx;
+ grpc_timer_cancel(&timer_);
+ }
+ void Destroy() {
+ Cancel();
+ Unref();
+ }
+
+ private:
+ void Ref() { gpr_ref(&refs_); }
+ void Unref() {
+ if (gpr_unref(&refs_)) {
+ delete this;
+ }
+ }
+
+ grpc_timer timer_;
+ gpr_refcount refs_;
+ grpc_closure on_alarm_;
+ grpc_cq_completion completion_;
+ // completion queue where events about this alarm will be posted
+ grpc_completion_queue* cq_;
+ void* tag_;
+ std::function<void(bool)> callback_;
+};
+} // namespace internal
+
+static ::grpc::internal::GrpcLibraryInitializer g_gli_initializer;
+
+Alarm::Alarm() : alarm_(new internal::AlarmImpl()) {
+ g_gli_initializer.summon();
+}
+
+void Alarm::SetInternal(::grpc::CompletionQueue* cq, gpr_timespec deadline,
+ void* tag) {
+ // Note that we know that alarm_ is actually an internal::AlarmImpl
+ // but we declared it as the base pointer to avoid a forward declaration
+ // or exposing core data structures in the C++ public headers.
+ // Thus it is safe to use a static_cast to the subclass here, and the
+ // C++ style guide allows us to do so in this case
+ static_cast<internal::AlarmImpl*>(alarm_)->Set(cq, deadline, tag);
+}
+
+void Alarm::SetInternal(gpr_timespec deadline, std::function<void(bool)> f) {
+ // Note that we know that alarm_ is actually an internal::AlarmImpl
+ // but we declared it as the base pointer to avoid a forward declaration
+ // or exposing core data structures in the C++ public headers.
+ // Thus it is safe to use a static_cast to the subclass here, and the
+ // C++ style guide allows us to do so in this case
+ static_cast<internal::AlarmImpl*>(alarm_)->Set(deadline, std::move(f));
+}
+
+Alarm::~Alarm() {
+ if (alarm_ != nullptr) {
+ static_cast<internal::AlarmImpl*>(alarm_)->Destroy();
+ }
+}
+
+void Alarm::Cancel() { static_cast<internal::AlarmImpl*>(alarm_)->Cancel(); }
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/alts_context.cc b/contrib/libs/grpc/src/cpp/common/alts_context.cc
new file mode 100644
index 0000000000..31f0f083ef
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/alts_context.cc
@@ -0,0 +1,127 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc_security.h>
+#include <grpcpp/security/alts_context.h>
+
+#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h"
+#include "src/proto/grpc/gcp/altscontext.upb.h"
+
+namespace grpc {
+namespace experimental {
+
+// A upb-generated grpc_gcp_AltsContext is passed in to construct an
+// AltsContext. Normal users should use GetAltsContextFromAuthContext to get
+// AltsContext, instead of constructing their own.
+AltsContext::AltsContext(const grpc_gcp_AltsContext* ctx) {
+ upb_strview application_protocol =
+ grpc_gcp_AltsContext_application_protocol(ctx);
+ if (application_protocol.data != nullptr && application_protocol.size > 0) {
+ application_protocol_ =
+ TString(application_protocol.data, application_protocol.size);
+ }
+ upb_strview record_protocol = grpc_gcp_AltsContext_record_protocol(ctx);
+ if (record_protocol.data != nullptr && record_protocol.size > 0) {
+ record_protocol_ = TString(record_protocol.data, record_protocol.size);
+ }
+ upb_strview peer_service_account =
+ grpc_gcp_AltsContext_peer_service_account(ctx);
+ if (peer_service_account.data != nullptr && peer_service_account.size > 0) {
+ peer_service_account_ =
+ TString(peer_service_account.data, peer_service_account.size);
+ }
+ upb_strview local_service_account =
+ grpc_gcp_AltsContext_local_service_account(ctx);
+ if (local_service_account.data != nullptr && local_service_account.size > 0) {
+ local_service_account_ =
+ TString(local_service_account.data, local_service_account.size);
+ }
+ const grpc_gcp_RpcProtocolVersions* versions =
+ grpc_gcp_AltsContext_peer_rpc_versions(ctx);
+ if (versions != nullptr) {
+ const grpc_gcp_RpcProtocolVersions_Version* max_version =
+ grpc_gcp_RpcProtocolVersions_max_rpc_version(versions);
+ if (max_version != nullptr) {
+ int max_version_major =
+ grpc_gcp_RpcProtocolVersions_Version_major(max_version);
+ int max_version_minor =
+ grpc_gcp_RpcProtocolVersions_Version_minor(max_version);
+ peer_rpc_versions_.max_rpc_version.major_version = max_version_major;
+ peer_rpc_versions_.max_rpc_version.minor_version = max_version_minor;
+ }
+ const grpc_gcp_RpcProtocolVersions_Version* min_version =
+ grpc_gcp_RpcProtocolVersions_min_rpc_version(versions);
+ if (min_version != nullptr) {
+ int min_version_major =
+ grpc_gcp_RpcProtocolVersions_Version_major(min_version);
+ int min_version_minor =
+ grpc_gcp_RpcProtocolVersions_Version_minor(min_version);
+ peer_rpc_versions_.min_rpc_version.major_version = min_version_major;
+ peer_rpc_versions_.min_rpc_version.minor_version = min_version_minor;
+ }
+ }
+ if (grpc_gcp_AltsContext_security_level(ctx) >= GRPC_SECURITY_MIN ||
+ grpc_gcp_AltsContext_security_level(ctx) <= GRPC_SECURITY_MAX) {
+ security_level_ = static_cast<grpc_security_level>(
+ grpc_gcp_AltsContext_security_level(ctx));
+ }
+ if (grpc_gcp_AltsContext_has_peer_attributes(ctx)) {
+ size_t iter = UPB_MAP_BEGIN;
+ const grpc_gcp_AltsContext_PeerAttributesEntry* peer_attributes_entry =
+ grpc_gcp_AltsContext_peer_attributes_next(ctx, &iter);
+ while (peer_attributes_entry != nullptr) {
+ upb_strview key =
+ grpc_gcp_AltsContext_PeerAttributesEntry_key(peer_attributes_entry);
+ upb_strview val =
+ grpc_gcp_AltsContext_PeerAttributesEntry_value(peer_attributes_entry);
+ peer_attributes_map_[TString(key.data, key.size)] =
+ TString(val.data, val.size);
+ peer_attributes_entry =
+ grpc_gcp_AltsContext_peer_attributes_next(ctx, &iter);
+ }
+ }
+}
+
+TString AltsContext::application_protocol() const {
+ return application_protocol_;
+}
+
+TString AltsContext::record_protocol() const { return record_protocol_; }
+
+TString AltsContext::peer_service_account() const {
+ return peer_service_account_;
+}
+
+TString AltsContext::local_service_account() const {
+ return local_service_account_;
+}
+
+grpc_security_level AltsContext::security_level() const {
+ return security_level_;
+}
+
+AltsContext::RpcProtocolVersions AltsContext::peer_rpc_versions() const {
+ return peer_rpc_versions_;
+}
+
+const std::map<TString, TString>& AltsContext::peer_attributes() const {
+ return peer_attributes_map_;
+}
+
+} // namespace experimental
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/alts_util.cc b/contrib/libs/grpc/src/cpp/common/alts_util.cc
new file mode 100644
index 0000000000..4b955c621c
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/alts_util.cc
@@ -0,0 +1,82 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "upb/upb.hpp"
+
+#include <grpc/grpc_security.h>
+#include <grpc/support/log.h>
+#include <grpcpp/security/alts_context.h>
+#include <grpcpp/security/alts_util.h>
+
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h"
+#include "src/cpp/common/secure_auth_context.h"
+#include "src/proto/grpc/gcp/altscontext.upb.h"
+
+namespace grpc {
+namespace experimental {
+
+std::unique_ptr<AltsContext> GetAltsContextFromAuthContext(
+ const std::shared_ptr<const AuthContext>& auth_context) {
+ if (auth_context == nullptr) {
+ gpr_log(GPR_ERROR, "auth_context is nullptr.");
+ return nullptr;
+ }
+ std::vector<string_ref> ctx_vector =
+ auth_context->FindPropertyValues(TSI_ALTS_CONTEXT);
+ if (ctx_vector.size() != 1) {
+ gpr_log(GPR_ERROR, "contains zero or more than one ALTS context.");
+ return nullptr;
+ }
+ upb::Arena context_arena;
+ grpc_gcp_AltsContext* ctx = grpc_gcp_AltsContext_parse(
+ ctx_vector[0].data(), ctx_vector[0].size(), context_arena.ptr());
+ if (ctx == nullptr) {
+ gpr_log(GPR_ERROR, "fails to parse ALTS context.");
+ return nullptr;
+ }
+ if (grpc_gcp_AltsContext_security_level(ctx) < GRPC_SECURITY_MIN ||
+ grpc_gcp_AltsContext_security_level(ctx) > GRPC_SECURITY_MAX) {
+ gpr_log(GPR_ERROR, "security_level is invalid.");
+ return nullptr;
+ }
+ return y_absl::make_unique<AltsContext>(AltsContext(ctx));
+}
+
+grpc::Status AltsClientAuthzCheck(
+ const std::shared_ptr<const AuthContext>& auth_context,
+ const std::vector<TString>& expected_service_accounts) {
+ std::unique_ptr<AltsContext> alts_ctx =
+ GetAltsContextFromAuthContext(auth_context);
+ if (alts_ctx == nullptr) {
+ return grpc::Status(grpc::StatusCode::PERMISSION_DENIED,
+ "fails to parse ALTS context.");
+ }
+ if (std::find(expected_service_accounts.begin(),
+ expected_service_accounts.end(),
+ alts_ctx->peer_service_account()) !=
+ expected_service_accounts.end()) {
+ return grpc::Status::OK;
+ }
+ return grpc::Status(
+ grpc::StatusCode::PERMISSION_DENIED,
+ "client " + alts_ctx->peer_service_account() + " is not authorized.");
+}
+
+} // namespace experimental
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/auth_property_iterator.cc b/contrib/libs/grpc/src/cpp/common/auth_property_iterator.cc
new file mode 100644
index 0000000000..fbb18e9915
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/auth_property_iterator.cc
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/security/auth_context.h>
+
+#include <grpc/grpc_security.h>
+
+namespace grpc {
+
+AuthPropertyIterator::AuthPropertyIterator()
+ : property_(nullptr), ctx_(nullptr), index_(0), name_(nullptr) {}
+
+AuthPropertyIterator::AuthPropertyIterator(
+ const grpc_auth_property* property, const grpc_auth_property_iterator* iter)
+ : property_(property),
+ ctx_(iter->ctx),
+ index_(iter->index),
+ name_(iter->name) {}
+
+AuthPropertyIterator::~AuthPropertyIterator() {}
+
+AuthPropertyIterator& AuthPropertyIterator::operator++() {
+ grpc_auth_property_iterator iter = {ctx_, index_, name_};
+ property_ = grpc_auth_property_iterator_next(&iter);
+ ctx_ = iter.ctx;
+ index_ = iter.index;
+ name_ = iter.name;
+ return *this;
+}
+
+AuthPropertyIterator AuthPropertyIterator::operator++(int) {
+ AuthPropertyIterator tmp(*this);
+ operator++();
+ return tmp;
+}
+
+bool AuthPropertyIterator::operator==(const AuthPropertyIterator& rhs) const {
+ if (property_ == nullptr || rhs.property_ == nullptr) {
+ return property_ == rhs.property_;
+ } else {
+ return index_ == rhs.index_;
+ }
+}
+
+bool AuthPropertyIterator::operator!=(const AuthPropertyIterator& rhs) const {
+ return !operator==(rhs);
+}
+
+const AuthProperty AuthPropertyIterator::operator*() {
+ return std::pair<grpc::string_ref, grpc::string_ref>(
+ property_->name,
+ grpc::string_ref(property_->value, property_->value_length));
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/channel_arguments.cc b/contrib/libs/grpc/src/cpp/common/channel_arguments.cc
new file mode 100644
index 0000000000..5a5dd91b5e
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/channel_arguments.cc
@@ -0,0 +1,217 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+#include <grpcpp/support/channel_arguments.h>
+
+#include <sstream>
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/log.h>
+#include <grpcpp/grpcpp.h>
+#include <grpcpp/resource_quota.h>
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/socket_mutator.h"
+
+namespace grpc {
+
+ChannelArguments::ChannelArguments() {
+ // This will be ignored if used on the server side.
+ SetString(GRPC_ARG_PRIMARY_USER_AGENT_STRING, "grpc-c++/" + grpc::Version());
+}
+
+ChannelArguments::ChannelArguments(const ChannelArguments& other)
+ : strings_(other.strings_) {
+ args_.reserve(other.args_.size());
+ auto list_it_dst = strings_.begin();
+ auto list_it_src = other.strings_.begin();
+ for (const auto& a : other.args_) {
+ grpc_arg ap;
+ ap.type = a.type;
+ GPR_ASSERT(list_it_src->c_str() == a.key);
+ ap.key = const_cast<char*>(list_it_dst->c_str());
+ ++list_it_src;
+ ++list_it_dst;
+ switch (a.type) {
+ case GRPC_ARG_INTEGER:
+ ap.value.integer = a.value.integer;
+ break;
+ case GRPC_ARG_STRING:
+ GPR_ASSERT(list_it_src->c_str() == a.value.string);
+ ap.value.string = const_cast<char*>(list_it_dst->c_str());
+ ++list_it_src;
+ ++list_it_dst;
+ break;
+ case GRPC_ARG_POINTER:
+ ap.value.pointer = a.value.pointer;
+ ap.value.pointer.p = a.value.pointer.vtable->copy(ap.value.pointer.p);
+ break;
+ }
+ args_.push_back(ap);
+ }
+}
+
+ChannelArguments::~ChannelArguments() {
+ grpc_core::ExecCtx exec_ctx;
+ for (auto& arg : args_) {
+ if (arg.type == GRPC_ARG_POINTER) {
+ arg.value.pointer.vtable->destroy(arg.value.pointer.p);
+ }
+ }
+}
+
+void ChannelArguments::Swap(ChannelArguments& other) {
+ args_.swap(other.args_);
+ strings_.swap(other.strings_);
+}
+
+void ChannelArguments::SetCompressionAlgorithm(
+ grpc_compression_algorithm algorithm) {
+ SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm);
+}
+
+void ChannelArguments::SetGrpclbFallbackTimeout(int fallback_timeout) {
+ SetInt(GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, fallback_timeout);
+}
+
+void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
+ if (!mutator) {
+ return;
+ }
+ grpc_arg mutator_arg = grpc_socket_mutator_to_arg(mutator);
+ bool replaced = false;
+ grpc_core::ExecCtx exec_ctx;
+ for (auto& arg : args_) {
+ if (arg.type == mutator_arg.type &&
+ TString(arg.key) == TString(mutator_arg.key)) {
+ GPR_ASSERT(!replaced);
+ arg.value.pointer.vtable->destroy(arg.value.pointer.p);
+ arg.value.pointer = mutator_arg.value.pointer;
+ replaced = true;
+ }
+ }
+
+ if (!replaced) {
+ strings_.push_back(TString(mutator_arg.key));
+ args_.push_back(mutator_arg);
+ args_.back().key = const_cast<char*>(strings_.back().c_str());
+ }
+}
+
+// Note: a second call to this will add in front the result of the first call.
+// An example is calling this on a copy of ChannelArguments which already has a
+// prefix. The user can build up a prefix string by calling this multiple times,
+// each with more significant identifier.
+void ChannelArguments::SetUserAgentPrefix(
+ const TString& user_agent_prefix) {
+ if (user_agent_prefix.empty()) {
+ return;
+ }
+ bool replaced = false;
+ auto strings_it = strings_.begin();
+ for (auto& arg : args_) {
+ ++strings_it;
+ if (arg.type == GRPC_ARG_STRING) {
+ if (TString(arg.key) == GRPC_ARG_PRIMARY_USER_AGENT_STRING) {
+ GPR_ASSERT(arg.value.string == strings_it->c_str());
+ *(strings_it) = user_agent_prefix + " " + arg.value.string;
+ arg.value.string = const_cast<char*>(strings_it->c_str());
+ replaced = true;
+ break;
+ }
+ ++strings_it;
+ }
+ }
+ if (!replaced) {
+ SetString(GRPC_ARG_PRIMARY_USER_AGENT_STRING, user_agent_prefix);
+ }
+}
+
+void ChannelArguments::SetResourceQuota(
+ const grpc::ResourceQuota& resource_quota) {
+ SetPointerWithVtable(GRPC_ARG_RESOURCE_QUOTA,
+ resource_quota.c_resource_quota(),
+ grpc_resource_quota_arg_vtable());
+}
+
+void ChannelArguments::SetMaxReceiveMessageSize(int size) {
+ SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, size);
+}
+
+void ChannelArguments::SetMaxSendMessageSize(int size) {
+ SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, size);
+}
+
+void ChannelArguments::SetLoadBalancingPolicyName(
+ const TString& lb_policy_name) {
+ SetString(GRPC_ARG_LB_POLICY_NAME, lb_policy_name);
+}
+
+void ChannelArguments::SetServiceConfigJSON(
+ const TString& service_config_json) {
+ SetString(GRPC_ARG_SERVICE_CONFIG, service_config_json);
+}
+
+void ChannelArguments::SetInt(const TString& key, int value) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_INTEGER;
+ strings_.push_back(key);
+ arg.key = const_cast<char*>(strings_.back().c_str());
+ arg.value.integer = value;
+
+ args_.push_back(arg);
+}
+
+void ChannelArguments::SetPointer(const TString& key, void* value) {
+ static const grpc_arg_pointer_vtable vtable = {
+ &PointerVtableMembers::Copy, &PointerVtableMembers::Destroy,
+ &PointerVtableMembers::Compare};
+ SetPointerWithVtable(key, value, &vtable);
+}
+
+void ChannelArguments::SetPointerWithVtable(
+ const TString& key, void* value,
+ const grpc_arg_pointer_vtable* vtable) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ strings_.push_back(key);
+ arg.key = const_cast<char*>(strings_.back().c_str());
+ arg.value.pointer.p = vtable->copy(value);
+ arg.value.pointer.vtable = vtable;
+ args_.push_back(arg);
+}
+
+void ChannelArguments::SetString(const TString& key,
+ const TString& value) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_STRING;
+ strings_.push_back(key);
+ arg.key = const_cast<char*>(strings_.back().c_str());
+ strings_.push_back(value);
+ arg.value.string = const_cast<char*>(strings_.back().c_str());
+
+ args_.push_back(arg);
+}
+
+void ChannelArguments::SetChannelArgs(grpc_channel_args* channel_args) const {
+ channel_args->num_args = args_.size();
+ if (channel_args->num_args > 0) {
+ channel_args->args = const_cast<grpc_arg*>(&args_[0]);
+ }
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/channel_filter.cc b/contrib/libs/grpc/src/cpp/common/channel_filter.cc
new file mode 100644
index 0000000000..8df6c7b98f
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/channel_filter.cc
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <string.h>
+
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/cpp/common/channel_filter.h"
+
+#include <grpcpp/impl/codegen/slice.h>
+
+namespace grpc {
+
+// MetadataBatch
+
+grpc_linked_mdelem* MetadataBatch::AddMetadata(const string& key,
+ const string& value) {
+ grpc_linked_mdelem* storage = new grpc_linked_mdelem;
+ storage->md = grpc_mdelem_from_slices(SliceFromCopiedString(key),
+ SliceFromCopiedString(value));
+ GRPC_LOG_IF_ERROR("MetadataBatch::AddMetadata",
+ grpc_metadata_batch_link_head(batch_, storage));
+ return storage;
+}
+
+// ChannelData
+
+void ChannelData::StartTransportOp(grpc_channel_element* elem,
+ TransportOp* op) {
+ grpc_channel_next_op(elem, op->op());
+}
+
+void ChannelData::GetInfo(grpc_channel_element* elem,
+ const grpc_channel_info* channel_info) {
+ grpc_channel_next_get_info(elem, channel_info);
+}
+
+// CallData
+
+void CallData::StartTransportStreamOpBatch(grpc_call_element* elem,
+ TransportStreamOpBatch* op) {
+ grpc_call_next_op(elem, op->op());
+}
+
+void CallData::SetPollsetOrPollsetSet(grpc_call_element* elem,
+ grpc_polling_entity* pollent) {
+ grpc_call_stack_ignore_set_pollset_or_pollset_set(elem, pollent);
+}
+
+// internal code used by RegisterChannelFilter()
+
+namespace internal {
+
+// Note: Implicitly initialized to nullptr due to static lifetime.
+std::vector<FilterRecord>* channel_filters;
+
+namespace {
+
+bool MaybeAddFilter(grpc_channel_stack_builder* builder, void* arg) {
+ const FilterRecord& filter = *static_cast<FilterRecord*>(arg);
+ if (filter.include_filter) {
+ const grpc_channel_args* args =
+ grpc_channel_stack_builder_get_channel_arguments(builder);
+ if (!filter.include_filter(*args)) return true;
+ }
+ return grpc_channel_stack_builder_prepend_filter(builder, &filter.filter,
+ nullptr, nullptr);
+}
+
+} // namespace
+
+void ChannelFilterPluginInit() {
+ for (size_t i = 0; i < channel_filters->size(); ++i) {
+ FilterRecord& filter = (*channel_filters)[i];
+ grpc_channel_init_register_stage(filter.stack_type, filter.priority,
+ MaybeAddFilter, (void*)&filter);
+ }
+}
+
+void ChannelFilterPluginShutdown() {}
+
+} // namespace internal
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/channel_filter.h b/contrib/libs/grpc/src/cpp/common/channel_filter.h
new file mode 100644
index 0000000000..5ce720b307
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/channel_filter.h
@@ -0,0 +1,402 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPCXX_CHANNEL_FILTER_H
+#define GRPCXX_CHANNEL_FILTER_H
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpcpp/impl/codegen/config.h>
+
+#include <functional>
+#include <vector>
+
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/surface/channel_init.h"
+#include "src/core/lib/transport/metadata_batch.h"
+
+/// An interface to define filters.
+///
+/// To define a filter, implement a subclass of each of \c CallData and
+/// \c ChannelData. Then register the filter using something like this:
+/// \code{.cpp}
+/// RegisterChannelFilter<MyChannelDataSubclass, MyCallDataSubclass>(
+/// "name-of-filter", GRPC_SERVER_CHANNEL, INT_MAX, nullptr);
+/// \endcode
+
+namespace grpc {
+
+/// A C++ wrapper for the \c grpc_metadata_batch struct.
+class MetadataBatch {
+ public:
+ /// Borrows a pointer to \a batch, but does NOT take ownership.
+ /// The caller must ensure that \a batch continues to exist for as
+ /// long as the MetadataBatch object does.
+ explicit MetadataBatch(grpc_metadata_batch* batch) : batch_(batch) {}
+
+ grpc_metadata_batch* batch() const { return batch_; }
+
+ /// Adds metadata and returns the newly allocated storage.
+ /// The caller takes ownership of the result, which must exist for the
+ /// lifetime of the gRPC call.
+ grpc_linked_mdelem* AddMetadata(const string& key, const string& value);
+
+ class const_iterator : public std::iterator<std::bidirectional_iterator_tag,
+ const grpc_mdelem> {
+ public:
+ const grpc_mdelem& operator*() const { return elem_->md; }
+ const grpc_mdelem operator->() const { return elem_->md; }
+
+ const_iterator& operator++() {
+ elem_ = elem_->next;
+ return *this;
+ }
+ const_iterator operator++(int) {
+ const_iterator tmp(*this);
+ operator++();
+ return tmp;
+ }
+ const_iterator& operator--() {
+ elem_ = elem_->prev;
+ return *this;
+ }
+ const_iterator operator--(int) {
+ const_iterator tmp(*this);
+ operator--();
+ return tmp;
+ }
+
+ bool operator==(const const_iterator& other) const {
+ return elem_ == other.elem_;
+ }
+ bool operator!=(const const_iterator& other) const {
+ return elem_ != other.elem_;
+ }
+
+ private:
+ friend class MetadataBatch;
+ explicit const_iterator(grpc_linked_mdelem* elem) : elem_(elem) {}
+
+ grpc_linked_mdelem* elem_;
+ };
+
+ const_iterator begin() const { return const_iterator(batch_->list.head); }
+ const_iterator end() const { return const_iterator(nullptr); }
+
+ private:
+ grpc_metadata_batch* batch_; // Not owned.
+};
+
+/// A C++ wrapper for the \c grpc_transport_op struct.
+class TransportOp {
+ public:
+ /// Borrows a pointer to \a op, but does NOT take ownership.
+ /// The caller must ensure that \a op continues to exist for as
+ /// long as the TransportOp object does.
+ explicit TransportOp(grpc_transport_op* op) : op_(op) {}
+
+ grpc_transport_op* op() const { return op_; }
+
+ // TODO(roth): Add a C++ wrapper for grpc_error?
+ grpc_error* disconnect_with_error() const {
+ return op_->disconnect_with_error;
+ }
+ bool send_goaway() const { return op_->goaway_error != GRPC_ERROR_NONE; }
+
+ // TODO(roth): Add methods for additional fields as needed.
+
+ private:
+ grpc_transport_op* op_; // Not owned.
+};
+
+/// A C++ wrapper for the \c grpc_transport_stream_op_batch struct.
+class TransportStreamOpBatch {
+ public:
+ /// Borrows a pointer to \a op, but does NOT take ownership.
+ /// The caller must ensure that \a op continues to exist for as
+ /// long as the TransportStreamOpBatch object does.
+ explicit TransportStreamOpBatch(grpc_transport_stream_op_batch* op)
+ : op_(op),
+ send_initial_metadata_(
+ op->send_initial_metadata
+ ? op->payload->send_initial_metadata.send_initial_metadata
+ : nullptr),
+ send_trailing_metadata_(
+ op->send_trailing_metadata
+ ? op->payload->send_trailing_metadata.send_trailing_metadata
+ : nullptr),
+ recv_initial_metadata_(
+ op->recv_initial_metadata
+ ? op->payload->recv_initial_metadata.recv_initial_metadata
+ : nullptr),
+ recv_trailing_metadata_(
+ op->recv_trailing_metadata
+ ? op->payload->recv_trailing_metadata.recv_trailing_metadata
+ : nullptr) {}
+
+ grpc_transport_stream_op_batch* op() const { return op_; }
+
+ grpc_closure* on_complete() const { return op_->on_complete; }
+ void set_on_complete(grpc_closure* closure) { op_->on_complete = closure; }
+
+ MetadataBatch* send_initial_metadata() {
+ return op_->send_initial_metadata ? &send_initial_metadata_ : nullptr;
+ }
+ MetadataBatch* send_trailing_metadata() {
+ return op_->send_trailing_metadata ? &send_trailing_metadata_ : nullptr;
+ }
+ MetadataBatch* recv_initial_metadata() {
+ return op_->recv_initial_metadata ? &recv_initial_metadata_ : nullptr;
+ }
+ MetadataBatch* recv_trailing_metadata() {
+ return op_->recv_trailing_metadata ? &recv_trailing_metadata_ : nullptr;
+ }
+
+ uint32_t* send_initial_metadata_flags() const {
+ return op_->send_initial_metadata ? &op_->payload->send_initial_metadata
+ .send_initial_metadata_flags
+ : nullptr;
+ }
+
+ grpc_closure* recv_initial_metadata_ready() const {
+ return op_->recv_initial_metadata
+ ? op_->payload->recv_initial_metadata.recv_initial_metadata_ready
+ : nullptr;
+ }
+ void set_recv_initial_metadata_ready(grpc_closure* closure) {
+ op_->payload->recv_initial_metadata.recv_initial_metadata_ready = closure;
+ }
+
+ grpc_core::OrphanablePtr<grpc_core::ByteStream>* send_message() const {
+ return op_->send_message ? &op_->payload->send_message.send_message
+ : nullptr;
+ }
+ void set_send_message(
+ grpc_core::OrphanablePtr<grpc_core::ByteStream> send_message) {
+ op_->send_message = true;
+ op_->payload->send_message.send_message = std::move(send_message);
+ }
+
+ grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message() const {
+ return op_->recv_message ? op_->payload->recv_message.recv_message
+ : nullptr;
+ }
+ void set_recv_message(
+ grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message) {
+ op_->recv_message = true;
+ op_->payload->recv_message.recv_message = recv_message;
+ }
+
+ census_context* get_census_context() const {
+ return static_cast<census_context*>(
+ op_->payload->context[GRPC_CONTEXT_TRACING].value);
+ }
+
+ const gpr_atm* get_peer_string() const {
+ if (op_->send_initial_metadata &&
+ op_->payload->send_initial_metadata.peer_string != nullptr) {
+ return op_->payload->send_initial_metadata.peer_string;
+ } else if (op_->recv_initial_metadata &&
+ op_->payload->recv_initial_metadata.peer_string != nullptr) {
+ return op_->payload->recv_initial_metadata.peer_string;
+ } else {
+ return nullptr;
+ }
+ }
+
+ private:
+ grpc_transport_stream_op_batch* op_; // Not owned.
+ MetadataBatch send_initial_metadata_;
+ MetadataBatch send_trailing_metadata_;
+ MetadataBatch recv_initial_metadata_;
+ MetadataBatch recv_trailing_metadata_;
+};
+
+/// Represents channel data.
+class ChannelData {
+ public:
+ ChannelData() {}
+ virtual ~ChannelData() {}
+
+ // TODO(roth): Come up with a more C++-like API for the channel element.
+
+ /// Initializes the channel data.
+ virtual grpc_error* Init(grpc_channel_element* /*elem*/,
+ grpc_channel_element_args* /*args*/) {
+ return GRPC_ERROR_NONE;
+ }
+
+ // Called before destruction.
+ virtual void Destroy(grpc_channel_element* /*elem*/) {}
+
+ virtual void StartTransportOp(grpc_channel_element* elem, TransportOp* op);
+
+ virtual void GetInfo(grpc_channel_element* elem,
+ const grpc_channel_info* channel_info);
+};
+
+/// Represents call data.
+class CallData {
+ public:
+ CallData() {}
+ virtual ~CallData() {}
+
+ // TODO(roth): Come up with a more C++-like API for the call element.
+
+ /// Initializes the call data.
+ virtual grpc_error* Init(grpc_call_element* /*elem*/,
+ const grpc_call_element_args* /*args*/) {
+ return GRPC_ERROR_NONE;
+ }
+
+ // Called before destruction.
+ virtual void Destroy(grpc_call_element* /*elem*/,
+ const grpc_call_final_info* /*final_info*/,
+ grpc_closure* /*then_call_closure*/) {}
+
+ /// Starts a new stream operation.
+ virtual void StartTransportStreamOpBatch(grpc_call_element* elem,
+ TransportStreamOpBatch* op);
+
+ /// Sets a pollset or pollset set.
+ virtual void SetPollsetOrPollsetSet(grpc_call_element* elem,
+ grpc_polling_entity* pollent);
+};
+
+namespace internal {
+
+// Defines static members for passing to C core.
+// Members of this class correspond to the members of the C
+// grpc_channel_filter struct.
+template <typename ChannelDataType, typename CallDataType>
+class ChannelFilter final {
+ public:
+ static const size_t channel_data_size = sizeof(ChannelDataType);
+
+ static grpc_error* InitChannelElement(grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ // Construct the object in the already-allocated memory.
+ ChannelDataType* channel_data = new (elem->channel_data) ChannelDataType();
+ return channel_data->Init(elem, args);
+ }
+
+ static void DestroyChannelElement(grpc_channel_element* elem) {
+ ChannelDataType* channel_data =
+ static_cast<ChannelDataType*>(elem->channel_data);
+ channel_data->Destroy(elem);
+ channel_data->~ChannelDataType();
+ }
+
+ static void StartTransportOp(grpc_channel_element* elem,
+ grpc_transport_op* op) {
+ ChannelDataType* channel_data =
+ static_cast<ChannelDataType*>(elem->channel_data);
+ TransportOp op_wrapper(op);
+ channel_data->StartTransportOp(elem, &op_wrapper);
+ }
+
+ static void GetChannelInfo(grpc_channel_element* elem,
+ const grpc_channel_info* channel_info) {
+ ChannelDataType* channel_data =
+ static_cast<ChannelDataType*>(elem->channel_data);
+ channel_data->GetInfo(elem, channel_info);
+ }
+
+ static const size_t call_data_size = sizeof(CallDataType);
+
+ static grpc_error* InitCallElement(grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ // Construct the object in the already-allocated memory.
+ CallDataType* call_data = new (elem->call_data) CallDataType();
+ return call_data->Init(elem, args);
+ }
+
+ static void DestroyCallElement(grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_call_closure) {
+ CallDataType* call_data = static_cast<CallDataType*>(elem->call_data);
+ call_data->Destroy(elem, final_info, then_call_closure);
+ call_data->~CallDataType();
+ }
+
+ static void StartTransportStreamOpBatch(grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ CallDataType* call_data = static_cast<CallDataType*>(elem->call_data);
+ TransportStreamOpBatch op_wrapper(op);
+ call_data->StartTransportStreamOpBatch(elem, &op_wrapper);
+ }
+
+ static void SetPollsetOrPollsetSet(grpc_call_element* elem,
+ grpc_polling_entity* pollent) {
+ CallDataType* call_data = static_cast<CallDataType*>(elem->call_data);
+ call_data->SetPollsetOrPollsetSet(elem, pollent);
+ }
+};
+
+struct FilterRecord {
+ grpc_channel_stack_type stack_type;
+ int priority;
+ std::function<bool(const grpc_channel_args&)> include_filter;
+ grpc_channel_filter filter;
+};
+extern std::vector<FilterRecord>* channel_filters;
+
+void ChannelFilterPluginInit();
+void ChannelFilterPluginShutdown();
+
+} // namespace internal
+
+/// Registers a new filter.
+/// Must be called by only one thread at a time.
+/// The \a include_filter argument specifies a function that will be called
+/// to determine at run-time whether or not to add the filter. If the
+/// value is nullptr, the filter will be added unconditionally.
+/// If the channel stack type is GRPC_CLIENT_SUBCHANNEL, the caller should
+/// ensure that subchannels with different filter lists will always have
+/// different channel args. This requires setting a channel arg in case the
+/// registration function relies on some condition other than channel args to
+/// decide whether to add a filter or not.
+template <typename ChannelDataType, typename CallDataType>
+void RegisterChannelFilter(
+ const char* name, grpc_channel_stack_type stack_type, int priority,
+ std::function<bool(const grpc_channel_args&)> include_filter) {
+ // If we haven't been called before, initialize channel_filters and
+ // call grpc_register_plugin().
+ if (internal::channel_filters == nullptr) {
+ grpc_register_plugin(internal::ChannelFilterPluginInit,
+ internal::ChannelFilterPluginShutdown);
+ internal::channel_filters = new std::vector<internal::FilterRecord>();
+ }
+ // Add an entry to channel_filters. The filter will be added when the
+ // C-core initialization code calls ChannelFilterPluginInit().
+ typedef internal::ChannelFilter<ChannelDataType, CallDataType> FilterType;
+ internal::FilterRecord filter_record = {
+ stack_type,
+ priority,
+ include_filter,
+ {FilterType::StartTransportStreamOpBatch, FilterType::StartTransportOp,
+ FilterType::call_data_size, FilterType::InitCallElement,
+ FilterType::SetPollsetOrPollsetSet, FilterType::DestroyCallElement,
+ FilterType::channel_data_size, FilterType::InitChannelElement,
+ FilterType::DestroyChannelElement, FilterType::GetChannelInfo, name}};
+ internal::channel_filters->push_back(filter_record);
+}
+
+} // namespace grpc
+
+#endif // GRPCXX_CHANNEL_FILTER_H
diff --git a/contrib/libs/grpc/src/cpp/common/completion_queue_cc.cc b/contrib/libs/grpc/src/cpp/common/completion_queue_cc.cc
new file mode 100644
index 0000000000..96a7105eaf
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/completion_queue_cc.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/completion_queue.h>
+
+#include <memory>
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/support/time.h>
+
+namespace grpc {
+
+static internal::GrpcLibraryInitializer g_gli_initializer;
+
+// 'CompletionQueue' constructor can safely call GrpcLibraryCodegen(false) here
+// i.e not have GrpcLibraryCodegen call grpc_init(). This is because, to create
+// a 'grpc_completion_queue' instance (which is being passed as the input to
+// this constructor), one must have already called grpc_init().
+CompletionQueue::CompletionQueue(grpc_completion_queue* take)
+ : GrpcLibraryCodegen(false), cq_(take) {
+ InitialAvalanching();
+}
+
+void CompletionQueue::Shutdown() {
+ g_gli_initializer.summon();
+#ifndef NDEBUG
+ if (!ServerListEmpty()) {
+ gpr_log(GPR_ERROR,
+ "CompletionQueue shutdown being shutdown before its server.");
+ }
+#endif
+ CompleteAvalanching();
+}
+
+CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
+ void** tag, bool* ok, gpr_timespec deadline) {
+ for (;;) {
+ auto ev = grpc_completion_queue_next(cq_, deadline, nullptr);
+ switch (ev.type) {
+ case GRPC_QUEUE_TIMEOUT:
+ return TIMEOUT;
+ case GRPC_QUEUE_SHUTDOWN:
+ return SHUTDOWN;
+ case GRPC_OP_COMPLETE:
+ auto core_cq_tag =
+ static_cast<::grpc::internal::CompletionQueueTag*>(ev.tag);
+ *ok = ev.success != 0;
+ *tag = core_cq_tag;
+ if (core_cq_tag->FinalizeResult(tag, ok)) {
+ return GOT_EVENT;
+ }
+ break;
+ }
+ }
+}
+
+CompletionQueue::CompletionQueueTLSCache::CompletionQueueTLSCache(
+ CompletionQueue* cq)
+ : cq_(cq), flushed_(false) {
+ grpc_completion_queue_thread_local_cache_init(cq_->cq_);
+}
+
+CompletionQueue::CompletionQueueTLSCache::~CompletionQueueTLSCache() {
+ GPR_ASSERT(flushed_);
+}
+
+bool CompletionQueue::CompletionQueueTLSCache::Flush(void** tag, bool* ok) {
+ int res = 0;
+ void* res_tag;
+ flushed_ = true;
+ if (grpc_completion_queue_thread_local_cache_flush(cq_->cq_, &res_tag,
+ &res)) {
+ auto core_cq_tag =
+ static_cast<::grpc::internal::CompletionQueueTag*>(res_tag);
+ *ok = res == 1;
+ if (core_cq_tag->FinalizeResult(tag, ok)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/core_codegen.cc b/contrib/libs/grpc/src/cpp/common/core_codegen.cc
new file mode 100644
index 0000000000..75383ed511
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/core_codegen.cc
@@ -0,0 +1,240 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/codegen/core_codegen.h>
+
+#include <stdlib.h>
+
+#include <grpc/byte_buffer.h>
+#include <grpc/byte_buffer_reader.h>
+#include <grpc/grpc.h>
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+#include <grpc/support/sync.h>
+#include <grpcpp/support/config.h>
+
+#include "src/core/lib/profiling/timers.h"
+
+struct grpc_byte_buffer;
+
+namespace grpc {
+
+const grpc_completion_queue_factory*
+CoreCodegen::grpc_completion_queue_factory_lookup(
+ const grpc_completion_queue_attributes* attributes) {
+ return ::grpc_completion_queue_factory_lookup(attributes);
+}
+
+grpc_completion_queue* CoreCodegen::grpc_completion_queue_create(
+ const grpc_completion_queue_factory* factory,
+ const grpc_completion_queue_attributes* attributes, void* reserved) {
+ return ::grpc_completion_queue_create(factory, attributes, reserved);
+}
+
+grpc_completion_queue* CoreCodegen::grpc_completion_queue_create_for_next(
+ void* reserved) {
+ return ::grpc_completion_queue_create_for_next(reserved);
+}
+
+grpc_completion_queue* CoreCodegen::grpc_completion_queue_create_for_pluck(
+ void* reserved) {
+ return ::grpc_completion_queue_create_for_pluck(reserved);
+}
+
+void CoreCodegen::grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
+ ::grpc_completion_queue_shutdown(cq);
+}
+
+void CoreCodegen::grpc_completion_queue_destroy(grpc_completion_queue* cq) {
+ ::grpc_completion_queue_destroy(cq);
+}
+
+grpc_event CoreCodegen::grpc_completion_queue_pluck(grpc_completion_queue* cq,
+ void* tag,
+ gpr_timespec deadline,
+ void* reserved) {
+ return ::grpc_completion_queue_pluck(cq, tag, deadline, reserved);
+}
+
+void* CoreCodegen::gpr_malloc(size_t size) { return ::gpr_malloc(size); }
+
+void CoreCodegen::gpr_free(void* p) { return ::gpr_free(p); }
+
+void CoreCodegen::grpc_init() { ::grpc_init(); }
+void CoreCodegen::grpc_shutdown() { ::grpc_shutdown(); }
+
+void CoreCodegen::gpr_mu_init(gpr_mu* mu) { ::gpr_mu_init(mu); }
+void CoreCodegen::gpr_mu_destroy(gpr_mu* mu) { ::gpr_mu_destroy(mu); }
+void CoreCodegen::gpr_mu_lock(gpr_mu* mu) { ::gpr_mu_lock(mu); }
+void CoreCodegen::gpr_mu_unlock(gpr_mu* mu) { ::gpr_mu_unlock(mu); }
+void CoreCodegen::gpr_cv_init(gpr_cv* cv) { ::gpr_cv_init(cv); }
+void CoreCodegen::gpr_cv_destroy(gpr_cv* cv) { ::gpr_cv_destroy(cv); }
+int CoreCodegen::gpr_cv_wait(gpr_cv* cv, gpr_mu* mu,
+ gpr_timespec abs_deadline) {
+ return ::gpr_cv_wait(cv, mu, abs_deadline);
+}
+void CoreCodegen::gpr_cv_signal(gpr_cv* cv) { ::gpr_cv_signal(cv); }
+void CoreCodegen::gpr_cv_broadcast(gpr_cv* cv) { ::gpr_cv_broadcast(cv); }
+
+grpc_byte_buffer* CoreCodegen::grpc_byte_buffer_copy(grpc_byte_buffer* bb) {
+ return ::grpc_byte_buffer_copy(bb);
+}
+
+void CoreCodegen::grpc_byte_buffer_destroy(grpc_byte_buffer* bb) {
+ ::grpc_byte_buffer_destroy(bb);
+}
+
+size_t CoreCodegen::grpc_byte_buffer_length(grpc_byte_buffer* bb) {
+ return ::grpc_byte_buffer_length(bb);
+}
+
+grpc_call_error CoreCodegen::grpc_call_start_batch(grpc_call* call,
+ const grpc_op* ops,
+ size_t nops, void* tag,
+ void* reserved) {
+ return ::grpc_call_start_batch(call, ops, nops, tag, reserved);
+}
+
+grpc_call_error CoreCodegen::grpc_call_cancel_with_status(
+ grpc_call* call, grpc_status_code status, const char* description,
+ void* reserved) {
+ return ::grpc_call_cancel_with_status(call, status, description, reserved);
+}
+void CoreCodegen::grpc_call_ref(grpc_call* call) { ::grpc_call_ref(call); }
+void CoreCodegen::grpc_call_unref(grpc_call* call) { ::grpc_call_unref(call); }
+void* CoreCodegen::grpc_call_arena_alloc(grpc_call* call, size_t length) {
+ return ::grpc_call_arena_alloc(call, length);
+}
+const char* CoreCodegen::grpc_call_error_to_string(grpc_call_error error) {
+ return ::grpc_call_error_to_string(error);
+}
+
+int CoreCodegen::grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
+ grpc_byte_buffer* buffer) {
+ return ::grpc_byte_buffer_reader_init(reader, buffer);
+}
+
+void CoreCodegen::grpc_byte_buffer_reader_destroy(
+ grpc_byte_buffer_reader* reader) {
+ ::grpc_byte_buffer_reader_destroy(reader);
+}
+
+int CoreCodegen::grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
+ grpc_slice* slice) {
+ return ::grpc_byte_buffer_reader_next(reader, slice);
+}
+
+int CoreCodegen::grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader,
+ grpc_slice** slice) {
+ return ::grpc_byte_buffer_reader_peek(reader, slice);
+}
+
+grpc_byte_buffer* CoreCodegen::grpc_raw_byte_buffer_create(grpc_slice* slice,
+ size_t nslices) {
+ return ::grpc_raw_byte_buffer_create(slice, nslices);
+}
+
+grpc_slice CoreCodegen::grpc_slice_new_with_user_data(void* p, size_t len,
+ void (*destroy)(void*),
+ void* user_data) {
+ return ::grpc_slice_new_with_user_data(p, len, destroy, user_data);
+}
+
+grpc_slice CoreCodegen::grpc_slice_new_with_len(void* p, size_t len,
+ void (*destroy)(void*,
+ size_t)) {
+ return ::grpc_slice_new_with_len(p, len, destroy);
+}
+
+grpc_slice CoreCodegen::grpc_empty_slice() { return ::grpc_empty_slice(); }
+
+grpc_slice CoreCodegen::grpc_slice_malloc(size_t length) {
+ return ::grpc_slice_malloc(length);
+}
+
+void CoreCodegen::grpc_slice_unref(grpc_slice slice) {
+ ::grpc_slice_unref(slice);
+}
+
+grpc_slice CoreCodegen::grpc_slice_ref(grpc_slice slice) {
+ return ::grpc_slice_ref(slice);
+}
+
+grpc_slice CoreCodegen::grpc_slice_split_tail(grpc_slice* s, size_t split) {
+ return ::grpc_slice_split_tail(s, split);
+}
+
+grpc_slice CoreCodegen::grpc_slice_split_head(grpc_slice* s, size_t split) {
+ return ::grpc_slice_split_head(s, split);
+}
+
+grpc_slice CoreCodegen::grpc_slice_sub(grpc_slice s, size_t begin, size_t end) {
+ return ::grpc_slice_sub(s, begin, end);
+}
+
+grpc_slice CoreCodegen::grpc_slice_from_static_buffer(const void* buffer,
+ size_t length) {
+ return ::grpc_slice_from_static_buffer(buffer, length);
+}
+
+grpc_slice CoreCodegen::grpc_slice_from_copied_buffer(const void* buffer,
+ size_t length) {
+ return ::grpc_slice_from_copied_buffer(static_cast<const char*>(buffer),
+ length);
+}
+
+void CoreCodegen::grpc_slice_buffer_add(grpc_slice_buffer* sb,
+ grpc_slice slice) {
+ ::grpc_slice_buffer_add(sb, slice);
+}
+
+void CoreCodegen::grpc_slice_buffer_pop(grpc_slice_buffer* sb) {
+ ::grpc_slice_buffer_pop(sb);
+}
+
+void CoreCodegen::grpc_metadata_array_init(grpc_metadata_array* array) {
+ ::grpc_metadata_array_init(array);
+}
+
+void CoreCodegen::grpc_metadata_array_destroy(grpc_metadata_array* array) {
+ ::grpc_metadata_array_destroy(array);
+}
+
+const Status& CoreCodegen::ok() { return grpc::Status::OK; }
+
+const Status& CoreCodegen::cancelled() { return grpc::Status::CANCELLED; }
+
+gpr_timespec CoreCodegen::gpr_inf_future(gpr_clock_type type) {
+ return ::gpr_inf_future(type);
+}
+
+gpr_timespec CoreCodegen::gpr_time_0(gpr_clock_type type) {
+ return ::gpr_time_0(type);
+}
+
+void CoreCodegen::assert_fail(const char* failed_assertion, const char* file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, "assertion failed: %s",
+ failed_assertion);
+ abort();
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/insecure_create_auth_context.cc b/contrib/libs/grpc/src/cpp/common/insecure_create_auth_context.cc
new file mode 100644
index 0000000000..4e5cbd0372
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/insecure_create_auth_context.cc
@@ -0,0 +1,30 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+#include <memory>
+
+#include <grpc/grpc.h>
+#include <grpcpp/security/auth_context.h>
+
+namespace grpc {
+
+std::shared_ptr<const AuthContext> CreateAuthContext(grpc_call* call) {
+ (void)call;
+ return std::shared_ptr<const AuthContext>();
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/resource_quota_cc.cc b/contrib/libs/grpc/src/cpp/common/resource_quota_cc.cc
new file mode 100644
index 0000000000..64abff9633
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/resource_quota_cc.cc
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpcpp/resource_quota.h>
+
+namespace grpc {
+
+ResourceQuota::ResourceQuota() : impl_(grpc_resource_quota_create(nullptr)) {}
+
+ResourceQuota::ResourceQuota(const TString& name)
+ : impl_(grpc_resource_quota_create(name.c_str())) {}
+
+ResourceQuota::~ResourceQuota() { grpc_resource_quota_unref(impl_); }
+
+ResourceQuota& ResourceQuota::Resize(size_t new_size) {
+ grpc_resource_quota_resize(impl_, new_size);
+ return *this;
+}
+
+ResourceQuota& ResourceQuota::SetMaxThreads(int new_max_threads) {
+ grpc_resource_quota_set_max_threads(impl_, new_max_threads);
+ return *this;
+}
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/rpc_method.cc b/contrib/libs/grpc/src/cpp/common/rpc_method.cc
new file mode 100644
index 0000000000..a47dd3e444
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/rpc_method.cc
@@ -0,0 +1,21 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/rpc_method.h>
+
+namespace grpc {} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/secure_auth_context.cc b/contrib/libs/grpc/src/cpp/common/secure_auth_context.cc
new file mode 100644
index 0000000000..e1f97889c8
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/secure_auth_context.cc
@@ -0,0 +1,97 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/cpp/common/secure_auth_context.h"
+
+#include <grpc/grpc_security.h>
+
+namespace grpc {
+
+std::vector<grpc::string_ref> SecureAuthContext::GetPeerIdentity() const {
+ if (ctx_ == nullptr) {
+ return std::vector<grpc::string_ref>();
+ }
+ grpc_auth_property_iterator iter =
+ grpc_auth_context_peer_identity(ctx_.get());
+ std::vector<grpc::string_ref> identity;
+ const grpc_auth_property* property = nullptr;
+ while ((property = grpc_auth_property_iterator_next(&iter))) {
+ identity.push_back(
+ grpc::string_ref(property->value, property->value_length));
+ }
+ return identity;
+}
+
+TString SecureAuthContext::GetPeerIdentityPropertyName() const {
+ if (ctx_ == nullptr) {
+ return "";
+ }
+ const char* name = grpc_auth_context_peer_identity_property_name(ctx_.get());
+ return name == nullptr ? "" : name;
+}
+
+std::vector<grpc::string_ref> SecureAuthContext::FindPropertyValues(
+ const TString& name) const {
+ if (ctx_ == nullptr) {
+ return std::vector<grpc::string_ref>();
+ }
+ grpc_auth_property_iterator iter =
+ grpc_auth_context_find_properties_by_name(ctx_.get(), name.c_str());
+ const grpc_auth_property* property = nullptr;
+ std::vector<grpc::string_ref> values;
+ while ((property = grpc_auth_property_iterator_next(&iter))) {
+ values.push_back(grpc::string_ref(property->value, property->value_length));
+ }
+ return values;
+}
+
+AuthPropertyIterator SecureAuthContext::begin() const {
+ if (ctx_ != nullptr) {
+ grpc_auth_property_iterator iter =
+ grpc_auth_context_property_iterator(ctx_.get());
+ const grpc_auth_property* property =
+ grpc_auth_property_iterator_next(&iter);
+ return AuthPropertyIterator(property, &iter);
+ } else {
+ return end();
+ }
+}
+
+AuthPropertyIterator SecureAuthContext::end() const {
+ return AuthPropertyIterator();
+}
+
+void SecureAuthContext::AddProperty(const TString& key,
+ const grpc::string_ref& value) {
+ if (ctx_ == nullptr) return;
+ grpc_auth_context_add_property(ctx_.get(), key.c_str(), value.data(),
+ value.size());
+}
+
+bool SecureAuthContext::SetPeerIdentityPropertyName(const TString& name) {
+ if (ctx_ == nullptr) return false;
+ return grpc_auth_context_set_peer_identity_property_name(ctx_.get(),
+ name.c_str()) != 0;
+}
+
+bool SecureAuthContext::IsPeerAuthenticated() const {
+ if (ctx_ == nullptr) return false;
+ return grpc_auth_context_peer_is_authenticated(ctx_.get()) != 0;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/secure_auth_context.h b/contrib/libs/grpc/src/cpp/common/secure_auth_context.h
new file mode 100644
index 0000000000..51013efac7
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/secure_auth_context.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_COMMON_SECURE_AUTH_CONTEXT_H
+#define GRPC_INTERNAL_CPP_COMMON_SECURE_AUTH_CONTEXT_H
+
+#include <grpcpp/security/auth_context.h>
+
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/security/context/security_context.h"
+
+namespace grpc {
+
+class SecureAuthContext final : public AuthContext {
+ public:
+ explicit SecureAuthContext(grpc_auth_context* ctx)
+ : ctx_(ctx != nullptr ? ctx->Ref() : nullptr) {}
+
+ ~SecureAuthContext() override = default;
+
+ bool IsPeerAuthenticated() const override;
+
+ std::vector<grpc::string_ref> GetPeerIdentity() const override;
+
+ TString GetPeerIdentityPropertyName() const override;
+
+ std::vector<grpc::string_ref> FindPropertyValues(
+ const TString& name) const override;
+
+ AuthPropertyIterator begin() const override;
+
+ AuthPropertyIterator end() const override;
+
+ void AddProperty(const TString& key,
+ const grpc::string_ref& value) override;
+
+ virtual bool SetPeerIdentityPropertyName(const TString& name) override;
+
+ private:
+ grpc_core::RefCountedPtr<grpc_auth_context> ctx_;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_COMMON_SECURE_AUTH_CONTEXT_H
diff --git a/contrib/libs/grpc/src/cpp/common/secure_channel_arguments.cc b/contrib/libs/grpc/src/cpp/common/secure_channel_arguments.cc
new file mode 100644
index 0000000000..844bc627ab
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/secure_channel_arguments.cc
@@ -0,0 +1,39 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/support/channel_arguments.h>
+
+#include <grpc/grpc_security.h>
+#include "src/core/lib/channel/channel_args.h"
+
+namespace grpc {
+
+void ChannelArguments::SetSslTargetNameOverride(const TString& name) {
+ SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG, name);
+}
+
+TString ChannelArguments::GetSslTargetNameOverride() const {
+ for (unsigned int i = 0; i < args_.size(); i++) {
+ if (TString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == args_[i].key) {
+ return args_[i].value.string;
+ }
+ }
+ return "";
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/secure_create_auth_context.cc b/contrib/libs/grpc/src/cpp/common/secure_create_auth_context.cc
new file mode 100644
index 0000000000..908c46629e
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/secure_create_auth_context.cc
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+#include <memory>
+
+#include <grpc/grpc.h>
+#include <grpc/grpc_security.h>
+#include <grpcpp/security/auth_context.h>
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/cpp/common/secure_auth_context.h"
+
+namespace grpc {
+
+std::shared_ptr<const AuthContext> CreateAuthContext(grpc_call* call) {
+ if (call == nullptr) {
+ return std::shared_ptr<const AuthContext>();
+ }
+ grpc_core::RefCountedPtr<grpc_auth_context> ctx(grpc_call_auth_context(call));
+ return std::make_shared<SecureAuthContext>(ctx.get());
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/tls_credentials_options.cc b/contrib/libs/grpc/src/cpp/common/tls_credentials_options.cc
new file mode 100644
index 0000000000..7e435ac1de
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/tls_credentials_options.cc
@@ -0,0 +1,343 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/alloc.h>
+#include <grpcpp/security/tls_credentials_options.h>
+
+#include "y_absl/container/inlined_vector.h"
+#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
+#include "src/cpp/common/tls_credentials_options_util.h"
+
+namespace grpc {
+namespace experimental {
+
+/** TLS key materials config API implementation **/
+void TlsKeyMaterialsConfig::set_pem_root_certs(
+ const TString& pem_root_certs) {
+ pem_root_certs_ = pem_root_certs;
+}
+
+void TlsKeyMaterialsConfig::add_pem_key_cert_pair(
+ const PemKeyCertPair& pem_key_cert_pair) {
+ pem_key_cert_pair_list_.push_back(pem_key_cert_pair);
+}
+
+void TlsKeyMaterialsConfig::set_key_materials(
+ const TString& pem_root_certs,
+ const std::vector<PemKeyCertPair>& pem_key_cert_pair_list) {
+ pem_key_cert_pair_list_ = pem_key_cert_pair_list;
+ pem_root_certs_ = pem_root_certs;
+}
+
+/** TLS credential reload arg API implementation **/
+TlsCredentialReloadArg::TlsCredentialReloadArg(
+ grpc_tls_credential_reload_arg* arg)
+ : c_arg_(arg) {
+ if (c_arg_ != nullptr && c_arg_->context != nullptr) {
+ gpr_log(GPR_ERROR, "c_arg context has already been set");
+ }
+ c_arg_->context = static_cast<void*>(this);
+ c_arg_->destroy_context = &TlsCredentialReloadArgDestroyContext;
+}
+
+TlsCredentialReloadArg::~TlsCredentialReloadArg() {}
+
+void* TlsCredentialReloadArg::cb_user_data() const {
+ return c_arg_->cb_user_data;
+}
+bool TlsCredentialReloadArg::is_pem_key_cert_pair_list_empty() const {
+ return c_arg_->key_materials_config->pem_key_cert_pair_list().empty();
+}
+
+grpc_ssl_certificate_config_reload_status TlsCredentialReloadArg::status()
+ const {
+ return c_arg_->status;
+}
+
+TString TlsCredentialReloadArg::error_details() const {
+ return c_arg_->error_details->error_details();
+}
+
+void TlsCredentialReloadArg::set_cb_user_data(void* cb_user_data) {
+ c_arg_->cb_user_data = cb_user_data;
+}
+
+void TlsCredentialReloadArg::set_pem_root_certs(
+ const TString& pem_root_certs) {
+ ::grpc_core::UniquePtr<char> c_pem_root_certs(
+ gpr_strdup(pem_root_certs.c_str()));
+ c_arg_->key_materials_config->set_pem_root_certs(std::move(c_pem_root_certs));
+}
+
+namespace {
+
+::grpc_core::PemKeyCertPair ConvertToCorePemKeyCertPair(
+ const TlsKeyMaterialsConfig::PemKeyCertPair& pem_key_cert_pair) {
+ grpc_ssl_pem_key_cert_pair* ssl_pair =
+ (grpc_ssl_pem_key_cert_pair*)gpr_malloc(
+ sizeof(grpc_ssl_pem_key_cert_pair));
+ ssl_pair->private_key = gpr_strdup(pem_key_cert_pair.private_key.c_str());
+ ssl_pair->cert_chain = gpr_strdup(pem_key_cert_pair.cert_chain.c_str());
+ return ::grpc_core::PemKeyCertPair(ssl_pair);
+}
+
+} // namespace
+
+void TlsCredentialReloadArg::add_pem_key_cert_pair(
+ const TlsKeyMaterialsConfig::PemKeyCertPair& pem_key_cert_pair) {
+ c_arg_->key_materials_config->add_pem_key_cert_pair(
+ ConvertToCorePemKeyCertPair(pem_key_cert_pair));
+}
+
+void TlsCredentialReloadArg::set_key_materials(
+ const TString& pem_root_certs,
+ std::vector<TlsKeyMaterialsConfig::PemKeyCertPair> pem_key_cert_pair_list) {
+ /** Initialize the |key_materials_config| field of |c_arg_|, if it has not
+ * already been done. **/
+ if (c_arg_->key_materials_config == nullptr) {
+ c_arg_->key_materials_config = grpc_tls_key_materials_config_create();
+ }
+ /** Convert |pem_key_cert_pair_list| to an inlined vector of ssl pairs. **/
+ ::y_absl::InlinedVector<::grpc_core::PemKeyCertPair, 1>
+ c_pem_key_cert_pair_list;
+ for (const auto& key_cert_pair : pem_key_cert_pair_list) {
+ c_pem_key_cert_pair_list.emplace_back(
+ ConvertToCorePemKeyCertPair(key_cert_pair));
+ }
+ /** Populate the key materials config field of |c_arg_|. **/
+ c_arg_->key_materials_config->set_key_materials(pem_root_certs.c_str(),
+ c_pem_key_cert_pair_list);
+}
+
+void TlsCredentialReloadArg::set_key_materials_config(
+ const std::shared_ptr<TlsKeyMaterialsConfig>& key_materials_config) {
+ if (key_materials_config == nullptr) {
+ c_arg_->key_materials_config = nullptr;
+ return;
+ }
+ ::y_absl::InlinedVector<::grpc_core::PemKeyCertPair, 1>
+ c_pem_key_cert_pair_list;
+ for (const auto& key_cert_pair :
+ key_materials_config->pem_key_cert_pair_list()) {
+ grpc_ssl_pem_key_cert_pair* ssl_pair =
+ (grpc_ssl_pem_key_cert_pair*)gpr_malloc(
+ sizeof(grpc_ssl_pem_key_cert_pair));
+ ssl_pair->private_key = gpr_strdup(key_cert_pair.private_key.c_str());
+ ssl_pair->cert_chain = gpr_strdup(key_cert_pair.cert_chain.c_str());
+ ::grpc_core::PemKeyCertPair c_pem_key_cert_pair =
+ ::grpc_core::PemKeyCertPair(ssl_pair);
+ c_pem_key_cert_pair_list.emplace_back(std::move(c_pem_key_cert_pair));
+ }
+ ::grpc_core::UniquePtr<char> c_pem_root_certs(
+ gpr_strdup(key_materials_config->pem_root_certs().c_str()));
+ if (c_arg_->key_materials_config == nullptr) {
+ c_arg_->key_materials_config = grpc_tls_key_materials_config_create();
+ }
+ c_arg_->key_materials_config->set_key_materials(
+ key_materials_config->pem_root_certs().c_str(), c_pem_key_cert_pair_list);
+ c_arg_->key_materials_config->set_version(key_materials_config->version());
+}
+
+void TlsCredentialReloadArg::set_status(
+ grpc_ssl_certificate_config_reload_status status) {
+ c_arg_->status = status;
+}
+
+void TlsCredentialReloadArg::set_error_details(
+ const TString& error_details) {
+ c_arg_->error_details->set_error_details(error_details.c_str());
+}
+
+void TlsCredentialReloadArg::OnCredentialReloadDoneCallback() {
+ if (c_arg_->cb == nullptr) {
+ gpr_log(GPR_ERROR, "credential reload arg callback API is nullptr");
+ return;
+ }
+ c_arg_->cb(c_arg_);
+}
+
+/** gRPC TLS credential reload config API implementation **/
+TlsCredentialReloadConfig::TlsCredentialReloadConfig(
+ std::shared_ptr<TlsCredentialReloadInterface> credential_reload_interface)
+ : credential_reload_interface_(std::move(credential_reload_interface)) {
+ c_config_ = grpc_tls_credential_reload_config_create(
+ nullptr, &TlsCredentialReloadConfigCSchedule,
+ &TlsCredentialReloadConfigCCancel, nullptr);
+ c_config_->set_context(static_cast<void*>(this));
+}
+
+TlsCredentialReloadConfig::~TlsCredentialReloadConfig() {}
+
+/** gRPC TLS server authorization check arg API implementation **/
+TlsServerAuthorizationCheckArg::TlsServerAuthorizationCheckArg(
+ grpc_tls_server_authorization_check_arg* arg)
+ : c_arg_(arg) {
+ if (c_arg_ != nullptr && c_arg_->context != nullptr) {
+ gpr_log(GPR_ERROR, "c_arg context has already been set");
+ }
+ c_arg_->context = static_cast<void*>(this);
+ c_arg_->destroy_context = &TlsServerAuthorizationCheckArgDestroyContext;
+}
+
+TlsServerAuthorizationCheckArg::~TlsServerAuthorizationCheckArg() {}
+
+void* TlsServerAuthorizationCheckArg::cb_user_data() const {
+ return c_arg_->cb_user_data;
+}
+
+int TlsServerAuthorizationCheckArg::success() const { return c_arg_->success; }
+
+TString TlsServerAuthorizationCheckArg::target_name() const {
+ TString cpp_target_name(c_arg_->target_name);
+ return cpp_target_name;
+}
+
+TString TlsServerAuthorizationCheckArg::peer_cert() const {
+ TString cpp_peer_cert(c_arg_->peer_cert);
+ return cpp_peer_cert;
+}
+
+TString TlsServerAuthorizationCheckArg::peer_cert_full_chain() const {
+ TString cpp_peer_cert_full_chain(c_arg_->peer_cert_full_chain);
+ return cpp_peer_cert_full_chain;
+}
+
+grpc_status_code TlsServerAuthorizationCheckArg::status() const {
+ return c_arg_->status;
+}
+
+TString TlsServerAuthorizationCheckArg::error_details() const {
+ return c_arg_->error_details->error_details();
+}
+
+void TlsServerAuthorizationCheckArg::set_cb_user_data(void* cb_user_data) {
+ c_arg_->cb_user_data = cb_user_data;
+}
+
+void TlsServerAuthorizationCheckArg::set_success(int success) {
+ c_arg_->success = success;
+}
+
+void TlsServerAuthorizationCheckArg::set_target_name(
+ const TString& target_name) {
+ c_arg_->target_name = gpr_strdup(target_name.c_str());
+}
+
+void TlsServerAuthorizationCheckArg::set_peer_cert(
+ const TString& peer_cert) {
+ c_arg_->peer_cert = gpr_strdup(peer_cert.c_str());
+}
+
+void TlsServerAuthorizationCheckArg::set_peer_cert_full_chain(
+ const TString& peer_cert_full_chain) {
+ c_arg_->peer_cert_full_chain = gpr_strdup(peer_cert_full_chain.c_str());
+}
+
+void TlsServerAuthorizationCheckArg::set_status(grpc_status_code status) {
+ c_arg_->status = status;
+}
+
+void TlsServerAuthorizationCheckArg::set_error_details(
+ const TString& error_details) {
+ c_arg_->error_details->set_error_details(error_details.c_str());
+}
+
+void TlsServerAuthorizationCheckArg::OnServerAuthorizationCheckDoneCallback() {
+ if (c_arg_->cb == nullptr) {
+ gpr_log(GPR_ERROR, "server authorizaton check arg callback API is nullptr");
+ return;
+ }
+ c_arg_->cb(c_arg_);
+}
+
+/** gRPC TLS server authorization check config API implementation. **/
+TlsServerAuthorizationCheckConfig::TlsServerAuthorizationCheckConfig(
+ std::shared_ptr<TlsServerAuthorizationCheckInterface>
+ server_authorization_check_interface)
+ : server_authorization_check_interface_(
+ std::move(server_authorization_check_interface)) {
+ c_config_ = grpc_tls_server_authorization_check_config_create(
+ nullptr, &TlsServerAuthorizationCheckConfigCSchedule,
+ &TlsServerAuthorizationCheckConfigCCancel, nullptr);
+ c_config_->set_context(static_cast<void*>(this));
+}
+
+TlsServerAuthorizationCheckConfig::~TlsServerAuthorizationCheckConfig() {}
+
+/** gRPC TLS credential options API implementation **/
+TlsCredentialsOptions::TlsCredentialsOptions(
+ grpc_tls_server_verification_option server_verification_option,
+ std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config,
+ std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config,
+ std::shared_ptr<TlsServerAuthorizationCheckConfig>
+ server_authorization_check_config)
+ : TlsCredentialsOptions(
+ GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE, server_verification_option,
+ std::move(key_materials_config), std::move(credential_reload_config),
+ std::move(server_authorization_check_config)) {}
+
+TlsCredentialsOptions::TlsCredentialsOptions(
+ grpc_ssl_client_certificate_request_type cert_request_type,
+ std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config,
+ std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config)
+ : TlsCredentialsOptions(cert_request_type, GRPC_TLS_SERVER_VERIFICATION,
+ std::move(key_materials_config),
+ std::move(credential_reload_config), nullptr) {}
+
+TlsCredentialsOptions::TlsCredentialsOptions(
+ grpc_ssl_client_certificate_request_type cert_request_type,
+ grpc_tls_server_verification_option server_verification_option,
+ std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config,
+ std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config,
+ std::shared_ptr<TlsServerAuthorizationCheckConfig>
+ server_authorization_check_config)
+ : cert_request_type_(cert_request_type),
+ server_verification_option_(server_verification_option),
+ key_materials_config_(std::move(key_materials_config)),
+ credential_reload_config_(std::move(credential_reload_config)),
+ server_authorization_check_config_(
+ std::move(server_authorization_check_config)) {
+ c_credentials_options_ = grpc_tls_credentials_options_create();
+ grpc_tls_credentials_options_set_cert_request_type(c_credentials_options_,
+ cert_request_type_);
+ if (key_materials_config_ != nullptr) {
+ grpc_tls_credentials_options_set_key_materials_config(
+ c_credentials_options_,
+ ConvertToCKeyMaterialsConfig(key_materials_config_));
+ }
+ if (credential_reload_config_ != nullptr) {
+ grpc_tls_credentials_options_set_credential_reload_config(
+ c_credentials_options_, credential_reload_config_->c_config());
+ }
+ if (server_authorization_check_config_ != nullptr) {
+ grpc_tls_credentials_options_set_server_authorization_check_config(
+ c_credentials_options_, server_authorization_check_config_->c_config());
+ }
+ grpc_tls_credentials_options_set_server_verification_option(
+ c_credentials_options_, server_verification_option);
+}
+
+/** Whenever a TlsCredentialsOptions instance is created, the caller takes
+ * ownership of the c_credentials_options_ pointer (see e.g. the implementation
+ * of the TlsCredentials API in secure_credentials.cc). For this reason, the
+ * TlsCredentialsOptions destructor is not responsible for freeing
+ * c_credentials_options_. **/
+TlsCredentialsOptions::~TlsCredentialsOptions() {}
+
+} // namespace experimental
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/tls_credentials_options_util.cc b/contrib/libs/grpc/src/cpp/common/tls_credentials_options_util.cc
new file mode 100644
index 0000000000..ed84003212
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/tls_credentials_options_util.cc
@@ -0,0 +1,149 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "y_absl/container/inlined_vector.h"
+
+#include <grpcpp/security/tls_credentials_options.h>
+#include "src/cpp/common/tls_credentials_options_util.h"
+
+namespace grpc {
+namespace experimental {
+
+/** Converts the Cpp key materials to C key materials; this allocates memory for
+ * the C key materials. Note that the user must free
+ * the underlying pointer to private key and cert chain duplicates; they are not
+ * freed when the grpc_core::UniquePtr<char> member variables of PemKeyCertPair
+ * are unused. Similarly, the user must free the underlying pointer to
+ * c_pem_root_certs. **/
+grpc_tls_key_materials_config* ConvertToCKeyMaterialsConfig(
+ const std::shared_ptr<TlsKeyMaterialsConfig>& config) {
+ if (config == nullptr) {
+ return nullptr;
+ }
+ grpc_tls_key_materials_config* c_config =
+ grpc_tls_key_materials_config_create();
+ ::y_absl::InlinedVector<::grpc_core::PemKeyCertPair, 1>
+ c_pem_key_cert_pair_list;
+ for (const auto& key_cert_pair : config->pem_key_cert_pair_list()) {
+ grpc_ssl_pem_key_cert_pair* ssl_pair =
+ (grpc_ssl_pem_key_cert_pair*)gpr_malloc(
+ sizeof(grpc_ssl_pem_key_cert_pair));
+ ssl_pair->private_key = gpr_strdup(key_cert_pair.private_key.c_str());
+ ssl_pair->cert_chain = gpr_strdup(key_cert_pair.cert_chain.c_str());
+ ::grpc_core::PemKeyCertPair c_pem_key_cert_pair =
+ ::grpc_core::PemKeyCertPair(ssl_pair);
+ c_pem_key_cert_pair_list.push_back(::std::move(c_pem_key_cert_pair));
+ }
+ c_config->set_key_materials(config->pem_root_certs().c_str(),
+ c_pem_key_cert_pair_list);
+ c_config->set_version(config->version());
+ return c_config;
+}
+
+/** The C schedule and cancel functions for the credential reload config.
+ * They populate a C credential reload arg with the result of a C++ credential
+ * reload schedule/cancel API. **/
+int TlsCredentialReloadConfigCSchedule(void* /*config_user_data*/,
+ grpc_tls_credential_reload_arg* arg) {
+ if (arg == nullptr || arg->config == nullptr ||
+ arg->config->context() == nullptr) {
+ gpr_log(GPR_ERROR, "credential reload arg was not properly initialized");
+ return 1;
+ }
+ TlsCredentialReloadConfig* cpp_config =
+ static_cast<TlsCredentialReloadConfig*>(arg->config->context());
+ TlsCredentialReloadArg* cpp_arg = new TlsCredentialReloadArg(arg);
+ int schedule_result = cpp_config->Schedule(cpp_arg);
+ return schedule_result;
+}
+
+void TlsCredentialReloadConfigCCancel(void* /*config_user_data*/,
+ grpc_tls_credential_reload_arg* arg) {
+ if (arg == nullptr || arg->config == nullptr ||
+ arg->config->context() == nullptr) {
+ gpr_log(GPR_ERROR, "credential reload arg was not properly initialized");
+ return;
+ }
+ if (arg->context == nullptr) {
+ gpr_log(GPR_ERROR, "credential reload arg schedule has already completed");
+ return;
+ }
+ TlsCredentialReloadConfig* cpp_config =
+ static_cast<TlsCredentialReloadConfig*>(arg->config->context());
+ TlsCredentialReloadArg* cpp_arg =
+ static_cast<TlsCredentialReloadArg*>(arg->context);
+ cpp_config->Cancel(cpp_arg);
+}
+
+void TlsCredentialReloadArgDestroyContext(void* context) {
+ if (context != nullptr) {
+ TlsCredentialReloadArg* cpp_arg =
+ static_cast<TlsCredentialReloadArg*>(context);
+ delete cpp_arg;
+ }
+}
+
+/** The C schedule and cancel functions for the server authorization check
+ * config. They populate a C server authorization check arg with the result
+ * of a C++ server authorization check schedule/cancel API. **/
+int TlsServerAuthorizationCheckConfigCSchedule(
+ void* /*config_user_data*/, grpc_tls_server_authorization_check_arg* arg) {
+ if (arg == nullptr || arg->config == nullptr ||
+ arg->config->context() == nullptr) {
+ gpr_log(GPR_ERROR,
+ "server authorization check arg was not properly initialized");
+ return 1;
+ }
+ TlsServerAuthorizationCheckConfig* cpp_config =
+ static_cast<TlsServerAuthorizationCheckConfig*>(arg->config->context());
+ TlsServerAuthorizationCheckArg* cpp_arg =
+ new TlsServerAuthorizationCheckArg(arg);
+ int schedule_result = cpp_config->Schedule(cpp_arg);
+ return schedule_result;
+}
+
+void TlsServerAuthorizationCheckConfigCCancel(
+ void* /*config_user_data*/, grpc_tls_server_authorization_check_arg* arg) {
+ if (arg == nullptr || arg->config == nullptr ||
+ arg->config->context() == nullptr) {
+ gpr_log(GPR_ERROR,
+ "server authorization check arg was not properly initialized");
+ return;
+ }
+ if (arg->context == nullptr) {
+ gpr_log(GPR_ERROR,
+ "server authorization check arg schedule has already completed");
+ return;
+ }
+ TlsServerAuthorizationCheckConfig* cpp_config =
+ static_cast<TlsServerAuthorizationCheckConfig*>(arg->config->context());
+ TlsServerAuthorizationCheckArg* cpp_arg =
+ static_cast<TlsServerAuthorizationCheckArg*>(arg->context);
+ cpp_config->Cancel(cpp_arg);
+}
+
+void TlsServerAuthorizationCheckArgDestroyContext(void* context) {
+ if (context != nullptr) {
+ TlsServerAuthorizationCheckArg* cpp_arg =
+ static_cast<TlsServerAuthorizationCheckArg*>(context);
+ delete cpp_arg;
+ }
+}
+
+} // namespace experimental
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/tls_credentials_options_util.h b/contrib/libs/grpc/src/cpp/common/tls_credentials_options_util.h
new file mode 100644
index 0000000000..4ee04d15d7
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/tls_credentials_options_util.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_COMMON_TLS_CREDENTIALS_OPTIONS_UTIL_H
+#define GRPC_INTERNAL_CPP_COMMON_TLS_CREDENTIALS_OPTIONS_UTIL_H
+
+#include <grpc/grpc_security.h>
+#include <grpcpp/security/tls_credentials_options.h>
+
+#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
+
+namespace grpc {
+namespace experimental {
+
+/** The following function is exposed for testing purposes. **/
+grpc_tls_key_materials_config* ConvertToCKeyMaterialsConfig(
+ const std::shared_ptr<TlsKeyMaterialsConfig>& config);
+
+/** The following 4 functions convert the user-provided schedule or cancel
+ * functions into C style schedule or cancel functions. These are internal
+ * functions, not meant to be accessed by the user. **/
+int TlsCredentialReloadConfigCSchedule(void* config_user_data,
+ grpc_tls_credential_reload_arg* arg);
+
+void TlsCredentialReloadConfigCCancel(void* config_user_data,
+ grpc_tls_credential_reload_arg* arg);
+
+int TlsServerAuthorizationCheckConfigCSchedule(
+ void* config_user_data, grpc_tls_server_authorization_check_arg* arg);
+
+void TlsServerAuthorizationCheckConfigCCancel(
+ void* config_user_data, grpc_tls_server_authorization_check_arg* arg);
+
+/** The following 2 functions cleanup data created in the above C schedule
+ * functions. **/
+void TlsCredentialReloadArgDestroyContext(void* context);
+
+void TlsServerAuthorizationCheckArgDestroyContext(void* context);
+
+} // namespace experimental
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_COMMON_TLS_CREDENTIALS_OPTIONS_UTIL_H
diff --git a/contrib/libs/grpc/src/cpp/common/validate_service_config.cc b/contrib/libs/grpc/src/cpp/common/validate_service_config.cc
new file mode 100644
index 0000000000..f63cfbc68c
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/validate_service_config.cc
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpcpp/support/validate_service_config.h>
+
+#include "src/core/ext/filters/client_channel/service_config.h"
+
+namespace grpc {
+namespace experimental {
+TString ValidateServiceConfigJSON(const TString& service_config_json) {
+ grpc_init();
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_core::ServiceConfig::Create(/*args=*/nullptr,
+ service_config_json.c_str(), &error);
+ TString return_value;
+ if (error != GRPC_ERROR_NONE) {
+ return_value = grpc_error_string(error);
+ GRPC_ERROR_UNREF(error);
+ }
+ grpc_shutdown();
+ return return_value;
+}
+} // namespace experimental
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/version_cc.cc b/contrib/libs/grpc/src/cpp/common/version_cc.cc
new file mode 100644
index 0000000000..7f4228346a
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/version_cc.cc
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* This file is autogenerated from:
+ templates/src/core/surface/version.c.template */
+
+#include <grpcpp/grpcpp.h>
+
+namespace grpc {
+TString Version() { return "1.33.2"; }
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/common/ya.make b/contrib/libs/grpc/src/cpp/common/ya.make
new file mode 100644
index 0000000000..f1966dce37
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/common/ya.make
@@ -0,0 +1,41 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(g:cpp-contrib)
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/grpc/grpc
+ contrib/libs/grpc/grpc++
+ contrib/libs/grpc/src/core/lib
+ contrib/libs/grpc/third_party/address_sorting
+ contrib/libs/grpc/third_party/upb
+ contrib/libs/openssl
+)
+
+ADDINCL(
+ GLOBAL contrib/libs/grpc/include
+ ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
+ contrib/libs/grpc
+ contrib/libs/grpc/src/core/ext/upb-generated
+ contrib/libs/grpc/third_party/upb
+)
+
+NO_COMPILER_WARNINGS()
+
+IF (OS_LINUX OR OS_DARWIN)
+ CFLAGS(
+ -DGRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK=1
+ )
+ENDIF()
+
+SRCS(
+ alts_context.cc
+ alts_util.cc
+)
+
+END()
diff --git a/contrib/libs/grpc/src/cpp/ext/proto_server_reflection.cc b/contrib/libs/grpc/src/cpp/ext/proto_server_reflection.cc
new file mode 100644
index 0000000000..1b388210c0
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/ext/proto_server_reflection.cc
@@ -0,0 +1,212 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <unordered_set>
+#include <vector>
+
+#include <grpcpp/grpcpp.h>
+
+#include "src/cpp/ext/proto_server_reflection.h"
+
+using grpc::Status;
+using grpc::StatusCode;
+using grpc::reflection::v1alpha::ErrorResponse;
+using grpc::reflection::v1alpha::ExtensionNumberResponse;
+using grpc::reflection::v1alpha::ExtensionRequest;
+using grpc::reflection::v1alpha::FileDescriptorResponse;
+using grpc::reflection::v1alpha::ListServiceResponse;
+using grpc::reflection::v1alpha::ServerReflectionRequest;
+using grpc::reflection::v1alpha::ServerReflectionResponse;
+using grpc::reflection::v1alpha::ServiceResponse;
+
+namespace grpc {
+
+ProtoServerReflection::ProtoServerReflection()
+ : descriptor_pool_(protobuf::DescriptorPool::generated_pool()) {}
+
+void ProtoServerReflection::SetServiceList(
+ const std::vector<TString>* services) {
+ services_ = services;
+}
+
+Status ProtoServerReflection::ServerReflectionInfo(
+ ServerContext* context,
+ ServerReaderWriter<ServerReflectionResponse, ServerReflectionRequest>*
+ stream) {
+ ServerReflectionRequest request;
+ ServerReflectionResponse response;
+ Status status;
+ while (stream->Read(&request)) {
+ switch (request.message_request_case()) {
+ case ServerReflectionRequest::MessageRequestCase::kFileByFilename:
+ status = GetFileByName(context, request.file_by_filename(), &response);
+ break;
+ case ServerReflectionRequest::MessageRequestCase::kFileContainingSymbol:
+ status = GetFileContainingSymbol(
+ context, request.file_containing_symbol(), &response);
+ break;
+ case ServerReflectionRequest::MessageRequestCase::
+ kFileContainingExtension:
+ status = GetFileContainingExtension(
+ context, &request.file_containing_extension(), &response);
+ break;
+ case ServerReflectionRequest::MessageRequestCase::
+ kAllExtensionNumbersOfType:
+ status = GetAllExtensionNumbers(
+ context, request.all_extension_numbers_of_type(),
+ response.mutable_all_extension_numbers_response());
+ break;
+ case ServerReflectionRequest::MessageRequestCase::kListServices:
+ status =
+ ListService(context, response.mutable_list_services_response());
+ break;
+ default:
+ status = Status(StatusCode::UNIMPLEMENTED, "");
+ }
+
+ if (!status.ok()) {
+ FillErrorResponse(status, response.mutable_error_response());
+ }
+ response.set_valid_host(request.host());
+ response.set_allocated_original_request(
+ new ServerReflectionRequest(request));
+ stream->Write(response);
+ }
+
+ return Status::OK;
+}
+
+void ProtoServerReflection::FillErrorResponse(const Status& status,
+ ErrorResponse* error_response) {
+ error_response->set_error_code(status.error_code());
+ error_response->set_error_message(TProtoStringType(status.error_message()));
+}
+
+Status ProtoServerReflection::ListService(ServerContext* /*context*/,
+ ListServiceResponse* response) {
+ if (services_ == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Services not found.");
+ }
+ for (const auto& value : *services_) {
+ ServiceResponse* service_response = response->add_service();
+ service_response->set_name(TProtoStringType(value));
+ }
+ return Status::OK;
+}
+
+Status ProtoServerReflection::GetFileByName(
+ ServerContext* /*context*/, const TString& filename,
+ ServerReflectionResponse* response) {
+ if (descriptor_pool_ == nullptr) {
+ return Status::CANCELLED;
+ }
+
+ const protobuf::FileDescriptor* file_desc =
+ descriptor_pool_->FindFileByName(TProtoStringType(filename));
+ if (file_desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "File not found.");
+ }
+ std::unordered_set<TString> seen_files;
+ FillFileDescriptorResponse(file_desc, response, &seen_files);
+ return Status::OK;
+}
+
+Status ProtoServerReflection::GetFileContainingSymbol(
+ ServerContext* /*context*/, const TString& symbol,
+ ServerReflectionResponse* response) {
+ if (descriptor_pool_ == nullptr) {
+ return Status::CANCELLED;
+ }
+
+ const protobuf::FileDescriptor* file_desc =
+ descriptor_pool_->FindFileContainingSymbol(TProtoStringType(symbol));
+ if (file_desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Symbol not found.");
+ }
+ std::unordered_set<TString> seen_files;
+ FillFileDescriptorResponse(file_desc, response, &seen_files);
+ return Status::OK;
+}
+
+Status ProtoServerReflection::GetFileContainingExtension(
+ ServerContext* /*context*/, const ExtensionRequest* request,
+ ServerReflectionResponse* response) {
+ if (descriptor_pool_ == nullptr) {
+ return Status::CANCELLED;
+ }
+
+ const protobuf::Descriptor* desc =
+ descriptor_pool_->FindMessageTypeByName(request->containing_type());
+ if (desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Type not found.");
+ }
+
+ const protobuf::FieldDescriptor* field_desc =
+ descriptor_pool_->FindExtensionByNumber(desc,
+ request->extension_number());
+ if (field_desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Extension not found.");
+ }
+ std::unordered_set<TString> seen_files;
+ FillFileDescriptorResponse(field_desc->file(), response, &seen_files);
+ return Status::OK;
+}
+
+Status ProtoServerReflection::GetAllExtensionNumbers(
+ ServerContext* /*context*/, const TString& type,
+ ExtensionNumberResponse* response) {
+ if (descriptor_pool_ == nullptr) {
+ return Status::CANCELLED;
+ }
+
+ const protobuf::Descriptor* desc =
+ descriptor_pool_->FindMessageTypeByName(TProtoStringType(type));
+ if (desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Type not found.");
+ }
+
+ std::vector<const protobuf::FieldDescriptor*> extensions;
+ descriptor_pool_->FindAllExtensions(desc, &extensions);
+ for (const auto& value : extensions) {
+ response->add_extension_number(value->number());
+ }
+ response->set_base_type_name(TProtoStringType(type));
+ return Status::OK;
+}
+
+void ProtoServerReflection::FillFileDescriptorResponse(
+ const protobuf::FileDescriptor* file_desc,
+ ServerReflectionResponse* response,
+ std::unordered_set<TString>* seen_files) {
+ if (seen_files->find(file_desc->name()) != seen_files->end()) {
+ return;
+ }
+ seen_files->insert(file_desc->name());
+
+ protobuf::FileDescriptorProto file_desc_proto;
+ TProtoStringType data;
+ file_desc->CopyTo(&file_desc_proto);
+ file_desc_proto.SerializeToString(&data);
+ response->mutable_file_descriptor_response()->add_file_descriptor_proto(data);
+
+ for (int i = 0; i < file_desc->dependency_count(); ++i) {
+ FillFileDescriptorResponse(file_desc->dependency(i), response, seen_files);
+ }
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/ext/proto_server_reflection.h b/contrib/libs/grpc/src/cpp/ext/proto_server_reflection.h
new file mode 100644
index 0000000000..2d17eed95a
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/ext/proto_server_reflection.h
@@ -0,0 +1,80 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_EXT_PROTO_SERVER_REFLECTION_H
+#define GRPC_INTERNAL_CPP_EXT_PROTO_SERVER_REFLECTION_H
+
+#include <unordered_set>
+#include <vector>
+
+#include <grpcpp/grpcpp.h>
+#include "src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h"
+
+namespace grpc {
+
+class ProtoServerReflection final
+ : public reflection::v1alpha::ServerReflection::Service {
+ public:
+ ProtoServerReflection();
+
+ // Add the full names of registered services
+ void SetServiceList(const std::vector<TString>* services);
+
+ // implementation of ServerReflectionInfo(stream ServerReflectionRequest) rpc
+ // in ServerReflection service
+ Status ServerReflectionInfo(
+ ServerContext* context,
+ ServerReaderWriter<reflection::v1alpha::ServerReflectionResponse,
+ reflection::v1alpha::ServerReflectionRequest>* stream)
+ override;
+
+ private:
+ Status ListService(ServerContext* context,
+ reflection::v1alpha::ListServiceResponse* response);
+
+ Status GetFileByName(ServerContext* context, const TString& file_name,
+ reflection::v1alpha::ServerReflectionResponse* response);
+
+ Status GetFileContainingSymbol(
+ ServerContext* context, const TString& symbol,
+ reflection::v1alpha::ServerReflectionResponse* response);
+
+ Status GetFileContainingExtension(
+ ServerContext* context,
+ const reflection::v1alpha::ExtensionRequest* request,
+ reflection::v1alpha::ServerReflectionResponse* response);
+
+ Status GetAllExtensionNumbers(
+ ServerContext* context, const TString& type,
+ reflection::v1alpha::ExtensionNumberResponse* response);
+
+ void FillFileDescriptorResponse(
+ const protobuf::FileDescriptor* file_desc,
+ reflection::v1alpha::ServerReflectionResponse* response,
+ std::unordered_set<TString>* seen_files);
+
+ void FillErrorResponse(const Status& status,
+ reflection::v1alpha::ErrorResponse* error_response);
+
+ const protobuf::DescriptorPool* descriptor_pool_;
+ const std::vector<string>* services_;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_EXT_PROTO_SERVER_REFLECTION_H
diff --git a/contrib/libs/grpc/src/cpp/ext/proto_server_reflection_plugin.cc b/contrib/libs/grpc/src/cpp/ext/proto_server_reflection_plugin.cc
new file mode 100644
index 0000000000..007193d7f7
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/ext/proto_server_reflection_plugin.cc
@@ -0,0 +1,83 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/ext/proto_server_reflection_plugin.h>
+#include <grpcpp/impl/server_builder_plugin.h>
+#include <grpcpp/impl/server_initializer.h>
+#include <grpcpp/server.h>
+
+#include "src/cpp/ext/proto_server_reflection.h"
+
+namespace grpc {
+namespace reflection {
+
+ProtoServerReflectionPlugin::ProtoServerReflectionPlugin()
+ : reflection_service_(new grpc::ProtoServerReflection()) {}
+
+TString ProtoServerReflectionPlugin::name() {
+ return "proto_server_reflection";
+}
+
+void ProtoServerReflectionPlugin::InitServer(grpc::ServerInitializer* si) {
+ si->RegisterService(reflection_service_);
+}
+
+void ProtoServerReflectionPlugin::Finish(grpc::ServerInitializer* si) {
+ reflection_service_->SetServiceList(si->GetServiceList());
+}
+
+void ProtoServerReflectionPlugin::ChangeArguments(const TString& /*name*/,
+ void* /*value*/) {}
+
+bool ProtoServerReflectionPlugin::has_sync_methods() const {
+ if (reflection_service_) {
+ return reflection_service_->has_synchronous_methods();
+ }
+ return false;
+}
+
+bool ProtoServerReflectionPlugin::has_async_methods() const {
+ if (reflection_service_) {
+ return reflection_service_->has_async_methods();
+ }
+ return false;
+}
+
+static std::unique_ptr< ::grpc::ServerBuilderPlugin> CreateProtoReflection() {
+ return std::unique_ptr< ::grpc::ServerBuilderPlugin>(
+ new ProtoServerReflectionPlugin());
+}
+
+void InitProtoReflectionServerBuilderPlugin() {
+ static struct Initialize {
+ Initialize() {
+ ::grpc::ServerBuilder::InternalAddPluginFactory(&CreateProtoReflection);
+ }
+ } initializer;
+}
+
+// Force InitProtoReflectionServerBuilderPlugin() to be called at static
+// initialization time.
+struct StaticProtoReflectionPluginInitializer {
+ StaticProtoReflectionPluginInitializer() {
+ InitProtoReflectionServerBuilderPlugin();
+ }
+} static_proto_reflection_plugin_initializer;
+
+} // namespace reflection
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/async_generic_service.cc b/contrib/libs/grpc/src/cpp/server/async_generic_service.cc
new file mode 100644
index 0000000000..07697a52d1
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/async_generic_service.cc
@@ -0,0 +1,33 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/generic/async_generic_service.h>
+
+#include <grpcpp/server.h>
+
+namespace grpc {
+
+void AsyncGenericService::RequestCall(
+ GenericServerContext* ctx, GenericServerAsyncReaderWriter* reader_writer,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
+ server_->RequestAsyncGenericCall(ctx, reader_writer, call_cq, notification_cq,
+ tag);
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/channel_argument_option.cc b/contrib/libs/grpc/src/cpp/server/channel_argument_option.cc
new file mode 100644
index 0000000000..9aad932429
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/channel_argument_option.cc
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/channel_argument_option.h>
+
+namespace grpc {
+
+std::unique_ptr<ServerBuilderOption> MakeChannelArgumentOption(
+ const TString& name, const TString& value) {
+ class StringOption final : public ServerBuilderOption {
+ public:
+ StringOption(const TString& name, const TString& value)
+ : name_(name), value_(value) {}
+
+ virtual void UpdateArguments(ChannelArguments* args) override {
+ args->SetString(name_, value_);
+ }
+ virtual void UpdatePlugins(
+ std::vector<std::unique_ptr<ServerBuilderPlugin>>* /*plugins*/)
+ override {}
+
+ private:
+ const TString name_;
+ const TString value_;
+ };
+ return std::unique_ptr<ServerBuilderOption>(new StringOption(name, value));
+}
+
+std::unique_ptr<ServerBuilderOption> MakeChannelArgumentOption(
+ const TString& name, int value) {
+ class IntOption final : public ServerBuilderOption {
+ public:
+ IntOption(const TString& name, int value)
+ : name_(name), value_(value) {}
+
+ virtual void UpdateArguments(ChannelArguments* args) override {
+ args->SetInt(name_, value_);
+ }
+ virtual void UpdatePlugins(
+ std::vector<std::unique_ptr<ServerBuilderPlugin>>* /*plugins*/)
+ override {}
+
+ private:
+ const TString name_;
+ const int value_;
+ };
+ return std::unique_ptr<ServerBuilderOption>(new IntOption(name, value));
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/channelz/channelz_service.cc b/contrib/libs/grpc/src/cpp/server/channelz/channelz_service.cc
new file mode 100644
index 0000000000..6dcf84bf40
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/channelz/channelz_service.cc
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/cpp/server/channelz/channelz_service.h"
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+
+namespace grpc {
+
+namespace {
+
+grpc::protobuf::util::Status ParseJson(const char* json_str,
+ grpc::protobuf::Message* message) {
+ grpc::protobuf::json::JsonParseOptions options;
+ options.case_insensitive_enum_parsing = true;
+ return grpc::protobuf::json::JsonStringToMessage(json_str, message, options);
+}
+
+} // namespace
+
+Status ChannelzService::GetTopChannels(
+ ServerContext* /*unused*/,
+ const channelz::v1::GetTopChannelsRequest* request,
+ channelz::v1::GetTopChannelsResponse* response) {
+ char* json_str = grpc_channelz_get_top_channels(request->start_channel_id());
+ if (json_str == nullptr) {
+ return Status(StatusCode::INTERNAL,
+ "grpc_channelz_get_top_channels returned null");
+ }
+ grpc::protobuf::util::Status s = ParseJson(json_str, response);
+ gpr_free(json_str);
+ if (!s.ok()) {
+ return Status(StatusCode::INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+Status ChannelzService::GetServers(
+ ServerContext* /*unused*/, const channelz::v1::GetServersRequest* request,
+ channelz::v1::GetServersResponse* response) {
+ char* json_str = grpc_channelz_get_servers(request->start_server_id());
+ if (json_str == nullptr) {
+ return Status(StatusCode::INTERNAL,
+ "grpc_channelz_get_servers returned null");
+ }
+ grpc::protobuf::util::Status s = ParseJson(json_str, response);
+ gpr_free(json_str);
+ if (!s.ok()) {
+ return Status(StatusCode::INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+Status ChannelzService::GetServer(ServerContext* /*unused*/,
+ const channelz::v1::GetServerRequest* request,
+ channelz::v1::GetServerResponse* response) {
+ char* json_str = grpc_channelz_get_server(request->server_id());
+ if (json_str == nullptr) {
+ return Status(StatusCode::INTERNAL,
+ "grpc_channelz_get_server returned null");
+ }
+ grpc::protobuf::util::Status s = ParseJson(json_str, response);
+ gpr_free(json_str);
+ if (!s.ok()) {
+ return Status(StatusCode::INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+Status ChannelzService::GetServerSockets(
+ ServerContext* /*unused*/,
+ const channelz::v1::GetServerSocketsRequest* request,
+ channelz::v1::GetServerSocketsResponse* response) {
+ char* json_str = grpc_channelz_get_server_sockets(
+ request->server_id(), request->start_socket_id(), request->max_results());
+ if (json_str == nullptr) {
+ return Status(StatusCode::INTERNAL,
+ "grpc_channelz_get_server_sockets returned null");
+ }
+ grpc::protobuf::util::Status s = ParseJson(json_str, response);
+ gpr_free(json_str);
+ if (!s.ok()) {
+ return Status(StatusCode::INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+Status ChannelzService::GetChannel(
+ ServerContext* /*unused*/, const channelz::v1::GetChannelRequest* request,
+ channelz::v1::GetChannelResponse* response) {
+ char* json_str = grpc_channelz_get_channel(request->channel_id());
+ if (json_str == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "No object found for that ChannelId");
+ }
+ grpc::protobuf::util::Status s = ParseJson(json_str, response);
+ gpr_free(json_str);
+ if (!s.ok()) {
+ return Status(StatusCode::INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+Status ChannelzService::GetSubchannel(
+ ServerContext* /*unused*/,
+ const channelz::v1::GetSubchannelRequest* request,
+ channelz::v1::GetSubchannelResponse* response) {
+ char* json_str = grpc_channelz_get_subchannel(request->subchannel_id());
+ if (json_str == nullptr) {
+ return Status(StatusCode::NOT_FOUND,
+ "No object found for that SubchannelId");
+ }
+ grpc::protobuf::util::Status s = ParseJson(json_str, response);
+ gpr_free(json_str);
+ if (!s.ok()) {
+ return Status(StatusCode::INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+Status ChannelzService::GetSocket(ServerContext* /*unused*/,
+ const channelz::v1::GetSocketRequest* request,
+ channelz::v1::GetSocketResponse* response) {
+ char* json_str = grpc_channelz_get_socket(request->socket_id());
+ if (json_str == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "No object found for that SocketId");
+ }
+ grpc::protobuf::util::Status s = ParseJson(json_str, response);
+ gpr_free(json_str);
+ if (!s.ok()) {
+ return Status(StatusCode::INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/channelz/channelz_service.h b/contrib/libs/grpc/src/cpp/server/channelz/channelz_service.h
new file mode 100644
index 0000000000..b4a66ba1c6
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/channelz/channelz_service.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_SERVER_CHANNELZ_SERVICE_H
+#define GRPC_INTERNAL_CPP_SERVER_CHANNELZ_SERVICE_H
+
+#include <grpc/support/port_platform.h>
+
+#include <grpcpp/grpcpp.h>
+#include "src/proto/grpc/channelz/channelz.grpc.pb.h"
+
+namespace grpc {
+
+class ChannelzService final : public channelz::v1::Channelz::Service {
+ private:
+ // implementation of GetTopChannels rpc
+ Status GetTopChannels(
+ ServerContext* unused, const channelz::v1::GetTopChannelsRequest* request,
+ channelz::v1::GetTopChannelsResponse* response) override;
+ // implementation of GetServers rpc
+ Status GetServers(ServerContext* unused,
+ const channelz::v1::GetServersRequest* request,
+ channelz::v1::GetServersResponse* response) override;
+ // implementation of GetServer rpc
+ Status GetServer(ServerContext* unused,
+ const channelz::v1::GetServerRequest* request,
+ channelz::v1::GetServerResponse* response) override;
+ // implementation of GetServerSockets rpc
+ Status GetServerSockets(
+ ServerContext* unused,
+ const channelz::v1::GetServerSocketsRequest* request,
+ channelz::v1::GetServerSocketsResponse* response) override;
+ // implementation of GetChannel rpc
+ Status GetChannel(ServerContext* unused,
+ const channelz::v1::GetChannelRequest* request,
+ channelz::v1::GetChannelResponse* response) override;
+ // implementation of GetSubchannel rpc
+ Status GetSubchannel(ServerContext* unused,
+ const channelz::v1::GetSubchannelRequest* request,
+ channelz::v1::GetSubchannelResponse* response) override;
+ // implementation of GetSocket rpc
+ Status GetSocket(ServerContext* unused,
+ const channelz::v1::GetSocketRequest* request,
+ channelz::v1::GetSocketResponse* response) override;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_SERVER_CHANNELZ_SERVICE_H
diff --git a/contrib/libs/grpc/src/cpp/server/channelz/channelz_service_plugin.cc b/contrib/libs/grpc/src/cpp/server/channelz/channelz_service_plugin.cc
new file mode 100644
index 0000000000..ae26a447ab
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/channelz/channelz_service_plugin.cc
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <grpcpp/ext/channelz_service_plugin.h>
+#include <grpcpp/impl/server_builder_plugin.h>
+#include <grpcpp/impl/server_initializer.h>
+#include <grpcpp/server.h>
+
+#include "src/cpp/server/channelz/channelz_service.h"
+
+namespace grpc {
+namespace channelz {
+namespace experimental {
+
+class ChannelzServicePlugin : public ::grpc::ServerBuilderPlugin {
+ public:
+ ChannelzServicePlugin() : channelz_service_(new grpc::ChannelzService()) {}
+
+ TString name() override { return "channelz_service"; }
+
+ void InitServer(grpc::ServerInitializer* si) override {
+ si->RegisterService(channelz_service_);
+ }
+
+ void Finish(grpc::ServerInitializer* /*si*/) override {}
+
+ void ChangeArguments(const TString& /*name*/, void* /*value*/) override {}
+
+ bool has_sync_methods() const override {
+ if (channelz_service_) {
+ return channelz_service_->has_synchronous_methods();
+ }
+ return false;
+ }
+
+ bool has_async_methods() const override {
+ if (channelz_service_) {
+ return channelz_service_->has_async_methods();
+ }
+ return false;
+ }
+
+ private:
+ std::shared_ptr<grpc::ChannelzService> channelz_service_;
+};
+
+static std::unique_ptr< ::grpc::ServerBuilderPlugin>
+CreateChannelzServicePlugin() {
+ return std::unique_ptr< ::grpc::ServerBuilderPlugin>(
+ new ChannelzServicePlugin());
+}
+
+} // namespace experimental
+} // namespace channelz
+} // namespace grpc
+namespace grpc_impl {
+namespace channelz {
+namespace experimental {
+
+void InitChannelzService() {
+ static struct Initializer {
+ Initializer() {
+ ::grpc::ServerBuilder::InternalAddPluginFactory(
+ &grpc::channelz::experimental::CreateChannelzServicePlugin);
+ }
+ } initialize;
+}
+
+} // namespace experimental
+} // namespace channelz
+} // namespace grpc_impl
diff --git a/contrib/libs/grpc/src/cpp/server/create_default_thread_pool.cc b/contrib/libs/grpc/src/cpp/server/create_default_thread_pool.cc
new file mode 100644
index 0000000000..8ca3e32c2f
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/create_default_thread_pool.cc
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/cpu.h>
+
+#include "src/cpp/server/dynamic_thread_pool.h"
+
+#ifndef GRPC_CUSTOM_DEFAULT_THREAD_POOL
+
+namespace grpc {
+namespace {
+
+ThreadPoolInterface* CreateDefaultThreadPoolImpl() {
+ int cores = gpr_cpu_num_cores();
+ if (!cores) cores = 4;
+ return new DynamicThreadPool(cores);
+}
+
+CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPoolImpl;
+
+} // namespace
+
+ThreadPoolInterface* CreateDefaultThreadPool() { return g_ctp_impl(); }
+
+void SetCreateThreadPool(CreateThreadPoolFunc func) { g_ctp_impl = func; }
+
+} // namespace grpc
+
+#endif // !GRPC_CUSTOM_DEFAULT_THREAD_POOL
diff --git a/contrib/libs/grpc/src/cpp/server/dynamic_thread_pool.cc b/contrib/libs/grpc/src/cpp/server/dynamic_thread_pool.cc
new file mode 100644
index 0000000000..77c5d6a263
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/dynamic_thread_pool.cc
@@ -0,0 +1,124 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/cpp/server/dynamic_thread_pool.h"
+
+#include <grpc/support/log.h>
+#include <grpcpp/impl/codegen/sync.h>
+
+#include "src/core/lib/gprpp/thd.h"
+
+namespace grpc {
+
+DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool)
+ : pool_(pool),
+ thd_("grpcpp_dynamic_pool",
+ [](void* th) {
+ static_cast<DynamicThreadPool::DynamicThread*>(th)->ThreadFunc();
+ },
+ this) {
+ thd_.Start();
+}
+DynamicThreadPool::DynamicThread::~DynamicThread() { thd_.Join(); }
+
+void DynamicThreadPool::DynamicThread::ThreadFunc() {
+ pool_->ThreadFunc();
+ // Now that we have killed ourselves, we should reduce the thread count
+ grpc_core::MutexLock lock(&pool_->mu_);
+ pool_->nthreads_--;
+ // Move ourselves to dead list
+ pool_->dead_threads_.push_back(this);
+
+ if ((pool_->shutdown_) && (pool_->nthreads_ == 0)) {
+ pool_->shutdown_cv_.Signal();
+ }
+}
+
+void DynamicThreadPool::ThreadFunc() {
+ for (;;) {
+ // Wait until work is available or we are shutting down.
+ grpc_core::ReleasableMutexLock lock(&mu_);
+ if (!shutdown_ && callbacks_.empty()) {
+ // If there are too many threads waiting, then quit this thread
+ if (threads_waiting_ >= reserve_threads_) {
+ break;
+ }
+ threads_waiting_++;
+ cv_.Wait(&mu_);
+ threads_waiting_--;
+ }
+ // Drain callbacks before considering shutdown to ensure all work
+ // gets completed.
+ if (!callbacks_.empty()) {
+ auto cb = callbacks_.front();
+ callbacks_.pop();
+ lock.Unlock();
+ cb();
+ } else if (shutdown_) {
+ break;
+ }
+ }
+}
+
+DynamicThreadPool::DynamicThreadPool(int reserve_threads)
+ : shutdown_(false),
+ reserve_threads_(reserve_threads),
+ nthreads_(0),
+ threads_waiting_(0) {
+ for (int i = 0; i < reserve_threads_; i++) {
+ grpc_core::MutexLock lock(&mu_);
+ nthreads_++;
+ new DynamicThread(this);
+ }
+}
+
+void DynamicThreadPool::ReapThreads(std::list<DynamicThread*>* tlist) {
+ for (auto t = tlist->begin(); t != tlist->end(); t = tlist->erase(t)) {
+ delete *t;
+ }
+}
+
+DynamicThreadPool::~DynamicThreadPool() {
+ grpc_core::MutexLock lock(&mu_);
+ shutdown_ = true;
+ cv_.Broadcast();
+ while (nthreads_ != 0) {
+ shutdown_cv_.Wait(&mu_);
+ }
+ ReapThreads(&dead_threads_);
+}
+
+void DynamicThreadPool::Add(const std::function<void()>& callback) {
+ grpc_core::MutexLock lock(&mu_);
+ // Add works to the callbacks list
+ callbacks_.push(callback);
+ // Increase pool size or notify as needed
+ if (threads_waiting_ == 0) {
+ // Kick off a new thread
+ nthreads_++;
+ new DynamicThread(this);
+ } else {
+ cv_.Signal();
+ }
+ // Also use this chance to harvest dead threads
+ if (!dead_threads_.empty()) {
+ ReapThreads(&dead_threads_);
+ }
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/dynamic_thread_pool.h b/contrib/libs/grpc/src/cpp/server/dynamic_thread_pool.h
new file mode 100644
index 0000000000..6f9f943bc3
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/dynamic_thread_pool.h
@@ -0,0 +1,68 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
+#define GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
+
+#include <list>
+#include <memory>
+#include <queue>
+
+#include <grpcpp/support/config.h>
+
+#include "src/core/lib/gprpp/sync.h"
+#include "src/core/lib/gprpp/thd.h"
+#include "src/cpp/server/thread_pool_interface.h"
+
+namespace grpc {
+
+class DynamicThreadPool final : public ThreadPoolInterface {
+ public:
+ explicit DynamicThreadPool(int reserve_threads);
+ ~DynamicThreadPool();
+
+ void Add(const std::function<void()>& callback) override;
+
+ private:
+ class DynamicThread {
+ public:
+ DynamicThread(DynamicThreadPool* pool);
+ ~DynamicThread();
+
+ private:
+ DynamicThreadPool* pool_;
+ grpc_core::Thread thd_;
+ void ThreadFunc();
+ };
+ grpc_core::Mutex mu_;
+ grpc_core::CondVar cv_;
+ grpc_core::CondVar shutdown_cv_;
+ bool shutdown_;
+ std::queue<std::function<void()>> callbacks_;
+ int reserve_threads_;
+ int nthreads_;
+ int threads_waiting_;
+ std::list<DynamicThread*> dead_threads_;
+
+ void ThreadFunc();
+ static void ReapThreads(std::list<DynamicThread*>* tlist);
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
diff --git a/contrib/libs/grpc/src/cpp/server/external_connection_acceptor_impl.cc b/contrib/libs/grpc/src/cpp/server/external_connection_acceptor_impl.cc
new file mode 100644
index 0000000000..09d2a9d3b5
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/external_connection_acceptor_impl.cc
@@ -0,0 +1,96 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/cpp/server/external_connection_acceptor_impl.h"
+
+#include <memory>
+
+#include <grpcpp/server_builder.h>
+#include <grpcpp/support/channel_arguments.h>
+
+namespace grpc {
+namespace internal {
+namespace {
+// The actual type to return to user. It co-owns the internal impl object with
+// the server.
+class AcceptorWrapper : public experimental::ExternalConnectionAcceptor {
+ public:
+ explicit AcceptorWrapper(std::shared_ptr<ExternalConnectionAcceptorImpl> impl)
+ : impl_(std::move(impl)) {}
+ void HandleNewConnection(NewConnectionParameters* p) override {
+ impl_->HandleNewConnection(p);
+ }
+
+ private:
+ std::shared_ptr<ExternalConnectionAcceptorImpl> impl_;
+};
+} // namespace
+
+ExternalConnectionAcceptorImpl::ExternalConnectionAcceptorImpl(
+ const TString& name,
+ ServerBuilder::experimental_type::ExternalConnectionType type,
+ std::shared_ptr<ServerCredentials> creds)
+ : name_(name), creds_(std::move(creds)) {
+ GPR_ASSERT(type ==
+ ServerBuilder::experimental_type::ExternalConnectionType::FROM_FD);
+}
+
+std::unique_ptr<experimental::ExternalConnectionAcceptor>
+ExternalConnectionAcceptorImpl::GetAcceptor() {
+ grpc_core::MutexLock lock(&mu_);
+ GPR_ASSERT(!has_acceptor_);
+ has_acceptor_ = true;
+ return std::unique_ptr<experimental::ExternalConnectionAcceptor>(
+ new AcceptorWrapper(shared_from_this()));
+}
+
+void ExternalConnectionAcceptorImpl::HandleNewConnection(
+ experimental::ExternalConnectionAcceptor::NewConnectionParameters* p) {
+ grpc_core::MutexLock lock(&mu_);
+ if (shutdown_ || !started_) {
+ // TODO(yangg) clean up.
+ gpr_log(
+ GPR_ERROR,
+ "NOT handling external connection with fd %d, started %d, shutdown %d",
+ p->fd, started_, shutdown_);
+ return;
+ }
+ if (handler_) {
+ handler_->Handle(p->listener_fd, p->fd, p->read_buffer.c_buffer());
+ }
+}
+
+void ExternalConnectionAcceptorImpl::Shutdown() {
+ grpc_core::MutexLock lock(&mu_);
+ shutdown_ = true;
+}
+
+void ExternalConnectionAcceptorImpl::Start() {
+ grpc_core::MutexLock lock(&mu_);
+ GPR_ASSERT(!started_);
+ GPR_ASSERT(has_acceptor_);
+ GPR_ASSERT(!shutdown_);
+ started_ = true;
+}
+
+void ExternalConnectionAcceptorImpl::SetToChannelArgs(ChannelArguments* args) {
+ args->SetPointer(name_.c_str(), &handler_);
+}
+
+} // namespace internal
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/external_connection_acceptor_impl.h b/contrib/libs/grpc/src/cpp/server/external_connection_acceptor_impl.h
new file mode 100644
index 0000000000..430c72862e
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/external_connection_acceptor_impl.h
@@ -0,0 +1,71 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef SRC_CPP_SERVER_EXTERNAL_CONNECTION_ACCEPTOR_IMPL_H_
+#define SRC_CPP_SERVER_EXTERNAL_CONNECTION_ACCEPTOR_IMPL_H_
+
+#include <memory>
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/support/channel_arguments.h>
+
+#include "src/core/lib/gprpp/sync.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+
+namespace grpc {
+namespace internal {
+
+class ExternalConnectionAcceptorImpl
+ : public std::enable_shared_from_this<ExternalConnectionAcceptorImpl> {
+ public:
+ ExternalConnectionAcceptorImpl(
+ const TString& name,
+ ServerBuilder::experimental_type::ExternalConnectionType type,
+ std::shared_ptr<ServerCredentials> creds);
+ // Should only be called once.
+ std::unique_ptr<experimental::ExternalConnectionAcceptor> GetAcceptor();
+
+ void HandleNewConnection(
+ experimental::ExternalConnectionAcceptor::NewConnectionParameters* p);
+
+ void Shutdown();
+
+ void Start();
+
+ const char* name() { return name_.c_str(); }
+
+ ServerCredentials* GetCredentials() { return creds_.get(); }
+
+ void SetToChannelArgs(::grpc::ChannelArguments* args);
+
+ private:
+ const TString name_;
+ std::shared_ptr<ServerCredentials> creds_;
+ grpc_core::TcpServerFdHandler* handler_ = nullptr; // not owned
+ grpc_core::Mutex mu_;
+ bool has_acceptor_ = false;
+ bool started_ = false;
+ bool shutdown_ = false;
+};
+
+} // namespace internal
+} // namespace grpc
+
+#endif // SRC_CPP_SERVER_EXTERNAL_CONNECTION_ACCEPTOR_IMPL_H_
diff --git a/contrib/libs/grpc/src/cpp/server/health/default_health_check_service.cc b/contrib/libs/grpc/src/cpp/server/health/default_health_check_service.cc
new file mode 100644
index 0000000000..3cc508d0cb
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/health/default_health_check_service.cc
@@ -0,0 +1,504 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+
+#include "upb/upb.hpp"
+
+#include <grpc/slice.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpcpp/impl/codegen/method_handler.h>
+
+#include "src/cpp/server/health/default_health_check_service.h"
+#include "src/proto/grpc/health/v1/health.upb.h"
+#include "upb/upb.hpp"
+
+#define MAX_SERVICE_NAME_LENGTH 200
+
+namespace grpc {
+
+//
+// DefaultHealthCheckService
+//
+
+DefaultHealthCheckService::DefaultHealthCheckService() {
+ services_map_[""].SetServingStatus(SERVING);
+}
+
+void DefaultHealthCheckService::SetServingStatus(
+ const TString& service_name, bool serving) {
+ grpc_core::MutexLock lock(&mu_);
+ if (shutdown_) {
+ // Set to NOT_SERVING in case service_name is not in the map.
+ serving = false;
+ }
+ services_map_[service_name].SetServingStatus(serving ? SERVING : NOT_SERVING);
+}
+
+void DefaultHealthCheckService::SetServingStatus(bool serving) {
+ const ServingStatus status = serving ? SERVING : NOT_SERVING;
+ grpc_core::MutexLock lock(&mu_);
+ if (shutdown_) {
+ return;
+ }
+ for (auto& p : services_map_) {
+ ServiceData& service_data = p.second;
+ service_data.SetServingStatus(status);
+ }
+}
+
+void DefaultHealthCheckService::Shutdown() {
+ grpc_core::MutexLock lock(&mu_);
+ if (shutdown_) {
+ return;
+ }
+ shutdown_ = true;
+ for (auto& p : services_map_) {
+ ServiceData& service_data = p.second;
+ service_data.SetServingStatus(NOT_SERVING);
+ }
+}
+
+DefaultHealthCheckService::ServingStatus
+DefaultHealthCheckService::GetServingStatus(
+ const TString& service_name) const {
+ grpc_core::MutexLock lock(&mu_);
+ auto it = services_map_.find(service_name);
+ if (it == services_map_.end()) {
+ return NOT_FOUND;
+ }
+ const ServiceData& service_data = it->second;
+ return service_data.GetServingStatus();
+}
+
+void DefaultHealthCheckService::RegisterCallHandler(
+ const TString& service_name,
+ std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
+ grpc_core::MutexLock lock(&mu_);
+ ServiceData& service_data = services_map_[service_name];
+ service_data.AddCallHandler(handler /* copies ref */);
+ HealthCheckServiceImpl::CallHandler* h = handler.get();
+ h->SendHealth(std::move(handler), service_data.GetServingStatus());
+}
+
+void DefaultHealthCheckService::UnregisterCallHandler(
+ const TString& service_name,
+ const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler) {
+ grpc_core::MutexLock lock(&mu_);
+ auto it = services_map_.find(service_name);
+ if (it == services_map_.end()) return;
+ ServiceData& service_data = it->second;
+ service_data.RemoveCallHandler(handler);
+ if (service_data.Unused()) {
+ services_map_.erase(it);
+ }
+}
+
+DefaultHealthCheckService::HealthCheckServiceImpl*
+DefaultHealthCheckService::GetHealthCheckService(
+ std::unique_ptr<ServerCompletionQueue> cq) {
+ GPR_ASSERT(impl_ == nullptr);
+ impl_.reset(new HealthCheckServiceImpl(this, std::move(cq)));
+ return impl_.get();
+}
+
+//
+// DefaultHealthCheckService::ServiceData
+//
+
+void DefaultHealthCheckService::ServiceData::SetServingStatus(
+ ServingStatus status) {
+ status_ = status;
+ for (auto& call_handler : call_handlers_) {
+ call_handler->SendHealth(call_handler /* copies ref */, status);
+ }
+}
+
+void DefaultHealthCheckService::ServiceData::AddCallHandler(
+ std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
+ call_handlers_.insert(std::move(handler));
+}
+
+void DefaultHealthCheckService::ServiceData::RemoveCallHandler(
+ const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler) {
+ call_handlers_.erase(handler);
+}
+
+//
+// DefaultHealthCheckService::HealthCheckServiceImpl
+//
+
+namespace {
+const char kHealthCheckMethodName[] = "/grpc.health.v1.Health/Check";
+const char kHealthWatchMethodName[] = "/grpc.health.v1.Health/Watch";
+} // namespace
+
+DefaultHealthCheckService::HealthCheckServiceImpl::HealthCheckServiceImpl(
+ DefaultHealthCheckService* database,
+ std::unique_ptr<ServerCompletionQueue> cq)
+ : database_(database), cq_(std::move(cq)) {
+ // Add Check() method.
+ AddMethod(new internal::RpcServiceMethod(
+ kHealthCheckMethodName, internal::RpcMethod::NORMAL_RPC, nullptr));
+ // Add Watch() method.
+ AddMethod(new internal::RpcServiceMethod(
+ kHealthWatchMethodName, internal::RpcMethod::SERVER_STREAMING, nullptr));
+ // Create serving thread.
+ thread_ = std::unique_ptr<::grpc_core::Thread>(
+ new ::grpc_core::Thread("grpc_health_check_service", Serve, this));
+}
+
+DefaultHealthCheckService::HealthCheckServiceImpl::~HealthCheckServiceImpl() {
+ // We will reach here after the server starts shutting down.
+ shutdown_ = true;
+ {
+ grpc_core::MutexLock lock(&cq_shutdown_mu_);
+ cq_->Shutdown();
+ }
+ thread_->Join();
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::StartServingThread() {
+ // Request the calls we're interested in.
+ // We do this before starting the serving thread, so that we know it's
+ // done before server startup is complete.
+ CheckCallHandler::CreateAndStart(cq_.get(), database_, this);
+ WatchCallHandler::CreateAndStart(cq_.get(), database_, this);
+ // Start serving thread.
+ thread_->Start();
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::Serve(void* arg) {
+ HealthCheckServiceImpl* service = static_cast<HealthCheckServiceImpl*>(arg);
+ void* tag;
+ bool ok;
+ while (true) {
+ if (!service->cq_->Next(&tag, &ok)) {
+ // The completion queue is shutting down.
+ GPR_ASSERT(service->shutdown_);
+ break;
+ }
+ auto* next_step = static_cast<CallableTag*>(tag);
+ next_step->Run(ok);
+ }
+}
+
+bool DefaultHealthCheckService::HealthCheckServiceImpl::DecodeRequest(
+ const ByteBuffer& request, TString* service_name) {
+ std::vector<Slice> slices;
+ if (!request.Dump(&slices).ok()) return false;
+ uint8_t* request_bytes = nullptr;
+ size_t request_size = 0;
+ if (slices.size() == 1) {
+ request_bytes = const_cast<uint8_t*>(slices[0].begin());
+ request_size = slices[0].size();
+ } else if (slices.size() > 1) {
+ request_bytes = static_cast<uint8_t*>(gpr_malloc(request.Length()));
+ uint8_t* copy_to = request_bytes;
+ for (size_t i = 0; i < slices.size(); i++) {
+ memcpy(copy_to, slices[i].begin(), slices[i].size());
+ copy_to += slices[i].size();
+ }
+ }
+ upb::Arena arena;
+ grpc_health_v1_HealthCheckRequest* request_struct =
+ grpc_health_v1_HealthCheckRequest_parse(
+ reinterpret_cast<char*>(request_bytes), request_size, arena.ptr());
+ if (slices.size() > 1) {
+ gpr_free(request_bytes);
+ }
+ if (request_struct == nullptr) {
+ return false;
+ }
+ upb_strview service =
+ grpc_health_v1_HealthCheckRequest_service(request_struct);
+ if (service.size > MAX_SERVICE_NAME_LENGTH) {
+ return false;
+ }
+ service_name->assign(service.data, service.size);
+ return true;
+}
+
+bool DefaultHealthCheckService::HealthCheckServiceImpl::EncodeResponse(
+ ServingStatus status, ByteBuffer* response) {
+ upb::Arena arena;
+ grpc_health_v1_HealthCheckResponse* response_struct =
+ grpc_health_v1_HealthCheckResponse_new(arena.ptr());
+ grpc_health_v1_HealthCheckResponse_set_status(
+ response_struct,
+ status == NOT_FOUND
+ ? grpc_health_v1_HealthCheckResponse_SERVICE_UNKNOWN
+ : status == SERVING ? grpc_health_v1_HealthCheckResponse_SERVING
+ : grpc_health_v1_HealthCheckResponse_NOT_SERVING);
+ size_t buf_length;
+ char* buf = grpc_health_v1_HealthCheckResponse_serialize(
+ response_struct, arena.ptr(), &buf_length);
+ if (buf == nullptr) {
+ return false;
+ }
+ grpc_slice response_slice = grpc_slice_from_copied_buffer(buf, buf_length);
+ Slice encoded_response(response_slice, Slice::STEAL_REF);
+ ByteBuffer response_buffer(&encoded_response, 1);
+ response->Swap(&response_buffer);
+ return true;
+}
+
+//
+// DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler
+//
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
+ CreateAndStart(ServerCompletionQueue* cq,
+ DefaultHealthCheckService* database,
+ HealthCheckServiceImpl* service) {
+ std::shared_ptr<CallHandler> self =
+ std::make_shared<CheckCallHandler>(cq, database, service);
+ CheckCallHandler* handler = static_cast<CheckCallHandler*>(self.get());
+ {
+ grpc_core::MutexLock lock(&service->cq_shutdown_mu_);
+ if (service->shutdown_) return;
+ // Request a Check() call.
+ handler->next_ =
+ CallableTag(std::bind(&CheckCallHandler::OnCallReceived, handler,
+ std::placeholders::_1, std::placeholders::_2),
+ std::move(self));
+ service->RequestAsyncUnary(0, &handler->ctx_, &handler->request_,
+ &handler->writer_, cq, cq, &handler->next_);
+ }
+}
+
+DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
+ CheckCallHandler(ServerCompletionQueue* cq,
+ DefaultHealthCheckService* database,
+ HealthCheckServiceImpl* service)
+ : cq_(cq), database_(database), service_(service), writer_(&ctx_) {}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
+ OnCallReceived(std::shared_ptr<CallHandler> self, bool ok) {
+ if (!ok) {
+ // The value of ok being false means that the server is shutting down.
+ return;
+ }
+ // Spawn a new handler instance to serve the next new client. Every handler
+ // instance will deallocate itself when it's done.
+ CreateAndStart(cq_, database_, service_);
+ // Process request.
+ gpr_log(GPR_DEBUG, "[HCS %p] Health check started for handler %p", service_,
+ this);
+ TString service_name;
+ grpc::Status status = Status::OK;
+ ByteBuffer response;
+ if (!service_->DecodeRequest(request_, &service_name)) {
+ status = Status(StatusCode::INVALID_ARGUMENT, "could not parse request");
+ } else {
+ ServingStatus serving_status = database_->GetServingStatus(service_name);
+ if (serving_status == NOT_FOUND) {
+ status = Status(StatusCode::NOT_FOUND, "service name unknown");
+ } else if (!service_->EncodeResponse(serving_status, &response)) {
+ status = Status(StatusCode::INTERNAL, "could not encode response");
+ }
+ }
+ // Send response.
+ {
+ grpc_core::MutexLock lock(&service_->cq_shutdown_mu_);
+ if (!service_->shutdown_) {
+ next_ =
+ CallableTag(std::bind(&CheckCallHandler::OnFinishDone, this,
+ std::placeholders::_1, std::placeholders::_2),
+ std::move(self));
+ if (status.ok()) {
+ writer_.Finish(response, status, &next_);
+ } else {
+ writer_.FinishWithError(status, &next_);
+ }
+ }
+ }
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
+ OnFinishDone(std::shared_ptr<CallHandler> self, bool ok) {
+ if (ok) {
+ gpr_log(GPR_DEBUG, "[HCS %p] Health check call finished for handler %p",
+ service_, this);
+ }
+ self.reset(); // To appease clang-tidy.
+}
+
+//
+// DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler
+//
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ CreateAndStart(ServerCompletionQueue* cq,
+ DefaultHealthCheckService* database,
+ HealthCheckServiceImpl* service) {
+ std::shared_ptr<CallHandler> self =
+ std::make_shared<WatchCallHandler>(cq, database, service);
+ WatchCallHandler* handler = static_cast<WatchCallHandler*>(self.get());
+ {
+ grpc_core::MutexLock lock(&service->cq_shutdown_mu_);
+ if (service->shutdown_) return;
+ // Request AsyncNotifyWhenDone().
+ handler->on_done_notified_ =
+ CallableTag(std::bind(&WatchCallHandler::OnDoneNotified, handler,
+ std::placeholders::_1, std::placeholders::_2),
+ self /* copies ref */);
+ handler->ctx_.AsyncNotifyWhenDone(&handler->on_done_notified_);
+ // Request a Watch() call.
+ handler->next_ =
+ CallableTag(std::bind(&WatchCallHandler::OnCallReceived, handler,
+ std::placeholders::_1, std::placeholders::_2),
+ std::move(self));
+ service->RequestAsyncServerStreaming(1, &handler->ctx_, &handler->request_,
+ &handler->stream_, cq, cq,
+ &handler->next_);
+ }
+}
+
+DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ WatchCallHandler(ServerCompletionQueue* cq,
+ DefaultHealthCheckService* database,
+ HealthCheckServiceImpl* service)
+ : cq_(cq), database_(database), service_(service), stream_(&ctx_) {}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ OnCallReceived(std::shared_ptr<CallHandler> self, bool ok) {
+ if (!ok) {
+ // Server shutting down.
+ //
+ // AsyncNotifyWhenDone() needs to be called before the call starts, but the
+ // tag will not pop out if the call never starts (
+ // https://github.com/grpc/grpc/issues/10136). So we need to manually
+ // release the ownership of the handler in this case.
+ GPR_ASSERT(on_done_notified_.ReleaseHandler() != nullptr);
+ return;
+ }
+ // Spawn a new handler instance to serve the next new client. Every handler
+ // instance will deallocate itself when it's done.
+ CreateAndStart(cq_, database_, service_);
+ // Parse request.
+ if (!service_->DecodeRequest(request_, &service_name_)) {
+ SendFinish(std::move(self),
+ Status(StatusCode::INVALID_ARGUMENT, "could not parse request"));
+ return;
+ }
+ // Register the call for updates to the service.
+ gpr_log(GPR_DEBUG,
+ "[HCS %p] Health watch started for service \"%s\" (handler: %p)",
+ service_, service_name_.c_str(), this);
+ database_->RegisterCallHandler(service_name_, std::move(self));
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ SendHealth(std::shared_ptr<CallHandler> self, ServingStatus status) {
+ grpc_core::MutexLock lock(&send_mu_);
+ // If there's already a send in flight, cache the new status, and
+ // we'll start a new send for it when the one in flight completes.
+ if (send_in_flight_) {
+ pending_status_ = status;
+ return;
+ }
+ // Start a send.
+ SendHealthLocked(std::move(self), status);
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ SendHealthLocked(std::shared_ptr<CallHandler> self, ServingStatus status) {
+ send_in_flight_ = true;
+ // Construct response.
+ ByteBuffer response;
+ bool success = service_->EncodeResponse(status, &response);
+ // Grab shutdown lock and send response.
+ grpc_core::MutexLock cq_lock(&service_->cq_shutdown_mu_);
+ if (service_->shutdown_) {
+ SendFinishLocked(std::move(self), Status::CANCELLED);
+ return;
+ }
+ if (!success) {
+ SendFinishLocked(std::move(self),
+ Status(StatusCode::INTERNAL, "could not encode response"));
+ return;
+ }
+ next_ = CallableTag(std::bind(&WatchCallHandler::OnSendHealthDone, this,
+ std::placeholders::_1, std::placeholders::_2),
+ std::move(self));
+ stream_.Write(response, &next_);
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ OnSendHealthDone(std::shared_ptr<CallHandler> self, bool ok) {
+ if (!ok) {
+ SendFinish(std::move(self), Status::CANCELLED);
+ return;
+ }
+ grpc_core::MutexLock lock(&send_mu_);
+ send_in_flight_ = false;
+ // If we got a new status since we started the last send, start a
+ // new send for it.
+ if (pending_status_ != NOT_FOUND) {
+ auto status = pending_status_;
+ pending_status_ = NOT_FOUND;
+ SendHealthLocked(std::move(self), status);
+ }
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ SendFinish(std::shared_ptr<CallHandler> self, const Status& status) {
+ if (finish_called_) return;
+ grpc_core::MutexLock cq_lock(&service_->cq_shutdown_mu_);
+ if (service_->shutdown_) return;
+ SendFinishLocked(std::move(self), status);
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ SendFinishLocked(std::shared_ptr<CallHandler> self, const Status& status) {
+ on_finish_done_ =
+ CallableTag(std::bind(&WatchCallHandler::OnFinishDone, this,
+ std::placeholders::_1, std::placeholders::_2),
+ std::move(self));
+ stream_.Finish(status, &on_finish_done_);
+ finish_called_ = true;
+}
+
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ OnFinishDone(std::shared_ptr<CallHandler> self, bool ok) {
+ if (ok) {
+ gpr_log(GPR_DEBUG,
+ "[HCS %p] Health watch call finished (service_name: \"%s\", "
+ "handler: %p).",
+ service_, service_name_.c_str(), this);
+ }
+ self.reset(); // To appease clang-tidy.
+}
+
+// TODO(roth): This method currently assumes that there will be only one
+// thread polling the cq and invoking the corresponding callbacks. If
+// that changes, we will need to add synchronization here.
+void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
+ OnDoneNotified(std::shared_ptr<CallHandler> self, bool ok) {
+ GPR_ASSERT(ok);
+ gpr_log(GPR_DEBUG,
+ "[HCS %p] Health watch call is notified done (handler: %p, "
+ "is_cancelled: %d).",
+ service_, this, static_cast<int>(ctx_.IsCancelled()));
+ database_->UnregisterCallHandler(service_name_, self);
+ SendFinish(std::move(self), Status::CANCELLED);
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/health/default_health_check_service.h b/contrib/libs/grpc/src/cpp/server/health/default_health_check_service.h
new file mode 100644
index 0000000000..9da1dfc15f
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/health/default_health_check_service.h
@@ -0,0 +1,284 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H
+#define GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H
+
+#include <atomic>
+#include <set>
+
+#include <grpc/support/log.h>
+#include <grpcpp/grpcpp.h>
+#include <grpcpp/health_check_service_interface.h>
+#include <grpcpp/impl/codegen/async_generic_service.h>
+#include <grpcpp/impl/codegen/async_unary_call.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/impl/codegen/service_type.h>
+#include <grpcpp/support/byte_buffer.h>
+
+#include "src/core/lib/gprpp/sync.h"
+#include "src/core/lib/gprpp/thd.h"
+
+namespace grpc {
+
+// Default implementation of HealthCheckServiceInterface. Server will create and
+// own it.
+class DefaultHealthCheckService final : public HealthCheckServiceInterface {
+ public:
+ enum ServingStatus { NOT_FOUND, SERVING, NOT_SERVING };
+
+ // The service impl to register with the server.
+ class HealthCheckServiceImpl : public Service {
+ public:
+ // Base class for call handlers.
+ class CallHandler {
+ public:
+ virtual ~CallHandler() = default;
+ virtual void SendHealth(std::shared_ptr<CallHandler> self,
+ ServingStatus status) = 0;
+ };
+
+ HealthCheckServiceImpl(DefaultHealthCheckService* database,
+ std::unique_ptr<ServerCompletionQueue> cq);
+
+ ~HealthCheckServiceImpl();
+
+ void StartServingThread();
+
+ private:
+ // A tag that can be called with a bool argument. It's tailored for
+ // CallHandler's use. Before being used, it should be constructed with a
+ // method of CallHandler and a shared pointer to the handler. The
+ // shared pointer will be moved to the invoked function and the function
+ // can only be invoked once. That makes ref counting of the handler easier,
+ // because the shared pointer is not bound to the function and can be gone
+ // once the invoked function returns (if not used any more).
+ class CallableTag {
+ public:
+ using HandlerFunction =
+ std::function<void(std::shared_ptr<CallHandler>, bool)>;
+
+ CallableTag() {}
+
+ CallableTag(HandlerFunction func, std::shared_ptr<CallHandler> handler)
+ : handler_function_(std::move(func)), handler_(std::move(handler)) {
+ GPR_ASSERT(handler_function_ != nullptr);
+ GPR_ASSERT(handler_ != nullptr);
+ }
+
+ // Runs the tag. This should be called only once. The handler is no
+ // longer owned by this tag after this method is invoked.
+ void Run(bool ok) {
+ GPR_ASSERT(handler_function_ != nullptr);
+ GPR_ASSERT(handler_ != nullptr);
+ handler_function_(std::move(handler_), ok);
+ }
+
+ // Releases and returns the shared pointer to the handler.
+ std::shared_ptr<CallHandler> ReleaseHandler() {
+ return std::move(handler_);
+ }
+
+ private:
+ HandlerFunction handler_function_ = nullptr;
+ std::shared_ptr<CallHandler> handler_;
+ };
+
+ // Call handler for Check method.
+ // Each handler takes care of one call. It contains per-call data and it
+ // will access the members of the parent class (i.e.,
+ // DefaultHealthCheckService) for per-service health data.
+ class CheckCallHandler : public CallHandler {
+ public:
+ // Instantiates a CheckCallHandler and requests the next health check
+ // call. The handler object will manage its own lifetime, so no action is
+ // needed from the caller any more regarding that object.
+ static void CreateAndStart(ServerCompletionQueue* cq,
+ DefaultHealthCheckService* database,
+ HealthCheckServiceImpl* service);
+
+ // This ctor is public because we want to use std::make_shared<> in
+ // CreateAndStart(). This ctor shouldn't be used elsewhere.
+ CheckCallHandler(ServerCompletionQueue* cq,
+ DefaultHealthCheckService* database,
+ HealthCheckServiceImpl* service);
+
+ // Not used for Check.
+ void SendHealth(std::shared_ptr<CallHandler> /*self*/,
+ ServingStatus /*status*/) override {}
+
+ private:
+ // Called when we receive a call.
+ // Spawns a new handler so that we can keep servicing future calls.
+ void OnCallReceived(std::shared_ptr<CallHandler> self, bool ok);
+
+ // Called when Finish() is done.
+ void OnFinishDone(std::shared_ptr<CallHandler> self, bool ok);
+
+ // The members passed down from HealthCheckServiceImpl.
+ ServerCompletionQueue* cq_;
+ DefaultHealthCheckService* database_;
+ HealthCheckServiceImpl* service_;
+
+ ByteBuffer request_;
+ GenericServerAsyncResponseWriter writer_;
+ ServerContext ctx_;
+
+ CallableTag next_;
+ };
+
+ // Call handler for Watch method.
+ // Each handler takes care of one call. It contains per-call data and it
+ // will access the members of the parent class (i.e.,
+ // DefaultHealthCheckService) for per-service health data.
+ class WatchCallHandler : public CallHandler {
+ public:
+ // Instantiates a WatchCallHandler and requests the next health check
+ // call. The handler object will manage its own lifetime, so no action is
+ // needed from the caller any more regarding that object.
+ static void CreateAndStart(ServerCompletionQueue* cq,
+ DefaultHealthCheckService* database,
+ HealthCheckServiceImpl* service);
+
+ // This ctor is public because we want to use std::make_shared<> in
+ // CreateAndStart(). This ctor shouldn't be used elsewhere.
+ WatchCallHandler(ServerCompletionQueue* cq,
+ DefaultHealthCheckService* database,
+ HealthCheckServiceImpl* service);
+
+ void SendHealth(std::shared_ptr<CallHandler> self,
+ ServingStatus status) override;
+
+ private:
+ // Called when we receive a call.
+ // Spawns a new handler so that we can keep servicing future calls.
+ void OnCallReceived(std::shared_ptr<CallHandler> self, bool ok);
+
+ // Requires holding send_mu_.
+ void SendHealthLocked(std::shared_ptr<CallHandler> self,
+ ServingStatus status);
+
+ // When sending a health result finishes.
+ void OnSendHealthDone(std::shared_ptr<CallHandler> self, bool ok);
+
+ void SendFinish(std::shared_ptr<CallHandler> self, const Status& status);
+
+ // Requires holding service_->cq_shutdown_mu_.
+ void SendFinishLocked(std::shared_ptr<CallHandler> self,
+ const Status& status);
+
+ // Called when Finish() is done.
+ void OnFinishDone(std::shared_ptr<CallHandler> self, bool ok);
+
+ // Called when AsyncNotifyWhenDone() notifies us.
+ void OnDoneNotified(std::shared_ptr<CallHandler> self, bool ok);
+
+ // The members passed down from HealthCheckServiceImpl.
+ ServerCompletionQueue* cq_;
+ DefaultHealthCheckService* database_;
+ HealthCheckServiceImpl* service_;
+
+ ByteBuffer request_;
+ TString service_name_;
+ GenericServerAsyncWriter stream_;
+ ServerContext ctx_;
+
+ grpc_core::Mutex send_mu_;
+ bool send_in_flight_ = false; // Guarded by mu_.
+ ServingStatus pending_status_ = NOT_FOUND; // Guarded by mu_.
+
+ bool finish_called_ = false;
+ CallableTag next_;
+ CallableTag on_done_notified_;
+ CallableTag on_finish_done_;
+ };
+
+ // Handles the incoming requests and drives the completion queue in a loop.
+ static void Serve(void* arg);
+
+ // Returns true on success.
+ static bool DecodeRequest(const ByteBuffer& request,
+ TString* service_name);
+ static bool EncodeResponse(ServingStatus status, ByteBuffer* response);
+
+ // Needed to appease Windows compilers, which don't seem to allow
+ // nested classes to access protected members in the parent's
+ // superclass.
+ using Service::RequestAsyncServerStreaming;
+ using Service::RequestAsyncUnary;
+
+ DefaultHealthCheckService* database_;
+ std::unique_ptr<ServerCompletionQueue> cq_;
+
+ // To synchronize the operations related to shutdown state of cq_, so that
+ // we don't enqueue new tags into cq_ after it is already shut down.
+ grpc_core::Mutex cq_shutdown_mu_;
+ std::atomic_bool shutdown_{false};
+ std::unique_ptr<::grpc_core::Thread> thread_;
+ };
+
+ DefaultHealthCheckService();
+
+ void SetServingStatus(const TString& service_name, bool serving) override;
+ void SetServingStatus(bool serving) override;
+
+ void Shutdown() override;
+
+ ServingStatus GetServingStatus(const TString& service_name) const;
+
+ HealthCheckServiceImpl* GetHealthCheckService(
+ std::unique_ptr<ServerCompletionQueue> cq);
+
+ private:
+ // Stores the current serving status of a service and any call
+ // handlers registered for updates when the service's status changes.
+ class ServiceData {
+ public:
+ void SetServingStatus(ServingStatus status);
+ ServingStatus GetServingStatus() const { return status_; }
+ void AddCallHandler(
+ std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler);
+ void RemoveCallHandler(
+ const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler);
+ bool Unused() const {
+ return call_handlers_.empty() && status_ == NOT_FOUND;
+ }
+
+ private:
+ ServingStatus status_ = NOT_FOUND;
+ std::set<std::shared_ptr<HealthCheckServiceImpl::CallHandler>>
+ call_handlers_;
+ };
+
+ void RegisterCallHandler(
+ const TString& service_name,
+ std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler);
+
+ void UnregisterCallHandler(
+ const TString& service_name,
+ const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler);
+
+ mutable grpc_core::Mutex mu_;
+ bool shutdown_ = false; // Guarded by mu_.
+ std::map<TString, ServiceData> services_map_; // Guarded by mu_.
+ std::unique_ptr<HealthCheckServiceImpl> impl_;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H
diff --git a/contrib/libs/grpc/src/cpp/server/health/health_check_service.cc b/contrib/libs/grpc/src/cpp/server/health/health_check_service.cc
new file mode 100644
index 0000000000..a0fa2d62f5
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/health/health_check_service.cc
@@ -0,0 +1,34 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/health_check_service_interface.h>
+
+namespace grpc {
+namespace {
+bool g_grpc_default_health_check_service_enabled = false;
+} // namespace
+
+bool DefaultHealthCheckServiceEnabled() {
+ return g_grpc_default_health_check_service_enabled;
+}
+
+void EnableDefaultHealthCheckService(bool enable) {
+ g_grpc_default_health_check_service_enabled = enable;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/health/health_check_service_server_builder_option.cc b/contrib/libs/grpc/src/cpp/server/health/health_check_service_server_builder_option.cc
new file mode 100644
index 0000000000..3fa384ace9
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/health/health_check_service_server_builder_option.cc
@@ -0,0 +1,35 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/ext/health_check_service_server_builder_option.h>
+
+namespace grpc {
+
+HealthCheckServiceServerBuilderOption::HealthCheckServiceServerBuilderOption(
+ std::unique_ptr<HealthCheckServiceInterface> hc)
+ : hc_(std::move(hc)) {}
+// Hand over hc_ to the server.
+void HealthCheckServiceServerBuilderOption::UpdateArguments(
+ ChannelArguments* args) {
+ args->SetPointer(kHealthCheckServiceInterfaceArg, hc_.release());
+}
+
+void HealthCheckServiceServerBuilderOption::UpdatePlugins(
+ std::vector<std::unique_ptr<ServerBuilderPlugin>>* /*plugins*/) {}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/insecure_server_credentials.cc b/contrib/libs/grpc/src/cpp/server/insecure_server_credentials.cc
new file mode 100644
index 0000000000..3f33f4e045
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/insecure_server_credentials.cc
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/security/server_credentials.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+
+namespace grpc {
+namespace {
+class InsecureServerCredentialsImpl final : public ServerCredentials {
+ public:
+ int AddPortToServer(const TString& addr, grpc_server* server) override {
+ return grpc_server_add_insecure_http2_port(server, addr.c_str());
+ }
+ void SetAuthMetadataProcessor(
+ const std::shared_ptr<grpc::AuthMetadataProcessor>& processor) override {
+ (void)processor;
+ GPR_ASSERT(0); // Should not be called on InsecureServerCredentials.
+ }
+};
+} // namespace
+
+std::shared_ptr<ServerCredentials> InsecureServerCredentials() {
+ return std::shared_ptr<ServerCredentials>(
+ new InsecureServerCredentialsImpl());
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/constants.h b/contrib/libs/grpc/src/cpp/server/load_reporter/constants.h
new file mode 100644
index 0000000000..00ad794a04
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/constants.h
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_SRC_CPP_SERVER_LOAD_REPORTER_UTIL_H
+#define GRPC_SRC_CPP_SERVER_LOAD_REPORTER_UTIL_H
+
+#include <grpc/impl/codegen/port_platform.h>
+
+namespace grpc {
+namespace load_reporter {
+
+// TODO(juanlishen): Update the version number with the PR number every time
+// there is any change to the server load reporter.
+constexpr uint32_t kVersion = 15853;
+
+// TODO(juanlishen): This window size is from the internal spec for the load
+// reporter. Need to ask the gRPC LB team whether we should make this and the
+// fetching interval configurable.
+constexpr uint32_t kFeedbackSampleWindowSeconds = 10;
+constexpr uint32_t kFetchAndSampleIntervalSeconds = 1;
+
+constexpr size_t kLbIdLength = 8;
+constexpr size_t kIpv4AddressLength = 8;
+constexpr size_t kIpv6AddressLength = 32;
+
+constexpr char kInvalidLbId[] = "<INVALID_LBID_238dsb234890rb>";
+
+// Call statuses.
+
+constexpr char kCallStatusOk[] = "OK";
+constexpr char kCallStatusServerError[] = "5XX";
+constexpr char kCallStatusClientError[] = "4XX";
+
+// Tag keys.
+
+constexpr char kTagKeyToken[] = "token";
+constexpr char kTagKeyHost[] = "host";
+constexpr char kTagKeyUserId[] = "user_id";
+constexpr char kTagKeyStatus[] = "status";
+constexpr char kTagKeyMetricName[] = "metric_name";
+
+// Measure names.
+
+constexpr char kMeasureStartCount[] = "grpc.io/lb/start_count";
+constexpr char kMeasureEndCount[] = "grpc.io/lb/end_count";
+constexpr char kMeasureEndBytesSent[] = "grpc.io/lb/bytes_sent";
+constexpr char kMeasureEndBytesReceived[] = "grpc.io/lb/bytes_received";
+constexpr char kMeasureEndLatencyMs[] = "grpc.io/lb/latency_ms";
+constexpr char kMeasureOtherCallMetric[] = "grpc.io/lb/other_call_metric";
+
+// View names.
+
+constexpr char kViewStartCount[] = "grpc.io/lb_view/start_count";
+constexpr char kViewEndCount[] = "grpc.io/lb_view/end_count";
+constexpr char kViewEndBytesSent[] = "grpc.io/lb_view/bytes_sent";
+constexpr char kViewEndBytesReceived[] = "grpc.io/lb_view/bytes_received";
+constexpr char kViewEndLatencyMs[] = "grpc.io/lb_view/latency_ms";
+constexpr char kViewOtherCallMetricCount[] =
+ "grpc.io/lb_view/other_call_metric_count";
+constexpr char kViewOtherCallMetricValue[] =
+ "grpc.io/lb_view/other_call_metric_value";
+
+} // namespace load_reporter
+} // namespace grpc
+
+#endif // GRPC_SRC_CPP_SERVER_LOAD_REPORTER_UTIL_H
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats.h b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats.h
new file mode 100644
index 0000000000..f514b0752f
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats.h
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_SRC_CPP_SERVER_LOAD_REPORTER_GET_CPU_STATS_H
+#define GRPC_SRC_CPP_SERVER_LOAD_REPORTER_GET_CPU_STATS_H
+
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <utility>
+
+namespace grpc {
+namespace load_reporter {
+
+// Reads the CPU stats (in a pair of busy and total numbers) from the system.
+// The units of the stats should be the same.
+std::pair<uint64_t, uint64_t> GetCpuStatsImpl();
+
+} // namespace load_reporter
+} // namespace grpc
+
+#endif // GRPC_SRC_CPP_SERVER_LOAD_REPORTER_GET_CPU_STATS_H
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_linux.cc b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_linux.cc
new file mode 100644
index 0000000000..561d4f5048
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_linux.cc
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_LINUX
+
+#include <cstdio>
+
+#include "src/cpp/server/load_reporter/get_cpu_stats.h"
+
+namespace grpc {
+namespace load_reporter {
+
+std::pair<uint64_t, uint64_t> GetCpuStatsImpl() {
+ uint64_t busy = 0, total = 0;
+ FILE* fp;
+ fp = fopen("/proc/stat", "r");
+ uint64_t user, nice, system, idle;
+ if (fscanf(fp, "cpu %lu %lu %lu %lu", &user, &nice, &system, &idle) != 4) {
+ // Something bad happened with the information, so assume it's all invalid
+ user = nice = system = idle = 0;
+ }
+ fclose(fp);
+ busy = user + nice + system;
+ total = busy + idle;
+ return std::make_pair(busy, total);
+}
+
+} // namespace load_reporter
+} // namespace grpc
+
+#endif // GPR_LINUX
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_macos.cc b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_macos.cc
new file mode 100644
index 0000000000..dbdde304c2
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_macos.cc
@@ -0,0 +1,45 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_APPLE
+
+#include <mach/mach.h>
+
+#include "src/cpp/server/load_reporter/get_cpu_stats.h"
+
+namespace grpc {
+namespace load_reporter {
+
+std::pair<uint64_t, uint64_t> GetCpuStatsImpl() {
+ uint64_t busy = 0, total = 0;
+ host_cpu_load_info_data_t cpuinfo;
+ mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT;
+ if (host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO,
+ (host_info_t)&cpuinfo, &count) == KERN_SUCCESS) {
+ for (int i = 0; i < CPU_STATE_MAX; i++) total += cpuinfo.cpu_ticks[i];
+ busy = total - cpuinfo.cpu_ticks[CPU_STATE_IDLE];
+ }
+ return std::make_pair(busy, total);
+}
+
+} // namespace load_reporter
+} // namespace grpc
+
+#endif // GPR_APPLE
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_unsupported.cc b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_unsupported.cc
new file mode 100644
index 0000000000..80fb8b6da1
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_unsupported.cc
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#if !defined(GPR_LINUX) && !defined(GPR_WINDOWS) && !defined(GPR_APPLE)
+
+#include <grpc/support/log.h>
+
+#include "src/cpp/server/load_reporter/get_cpu_stats.h"
+
+namespace grpc {
+namespace load_reporter {
+
+std::pair<uint64_t, uint64_t> GetCpuStatsImpl() {
+ uint64_t busy = 0, total = 0;
+ gpr_log(GPR_ERROR,
+ "Platforms other than Linux, Windows, and MacOS are not supported.");
+ return std::make_pair(busy, total);
+}
+
+} // namespace load_reporter
+} // namespace grpc
+
+#endif // !defined(GPR_LINUX) && !defined(GPR_WINDOWS) && !defined(GPR_APPLE)
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_windows.cc b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_windows.cc
new file mode 100644
index 0000000000..0a98e848a2
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/get_cpu_stats_windows.cc
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_WINDOWS
+
+#include <windows.h>
+#include <cstdint>
+
+#include "src/cpp/server/load_reporter/get_cpu_stats.h"
+
+namespace grpc {
+namespace load_reporter {
+
+namespace {
+
+uint64_t FiletimeToInt(const FILETIME& ft) {
+ ULARGE_INTEGER i;
+ i.LowPart = ft.dwLowDateTime;
+ i.HighPart = ft.dwHighDateTime;
+ return i.QuadPart;
+}
+
+} // namespace
+
+std::pair<uint64_t, uint64_t> GetCpuStatsImpl() {
+ uint64_t busy = 0, total = 0;
+ FILETIME idle, kernel, user;
+ if (GetSystemTimes(&idle, &kernel, &user) != 0) {
+ total = FiletimeToInt(kernel) + FiletimeToInt(user);
+ busy = total - FiletimeToInt(idle);
+ }
+ return std::make_pair(busy, total);
+}
+
+} // namespace load_reporter
+} // namespace grpc
+
+#endif // GPR_WINDOWS
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/load_data_store.cc b/contrib/libs/grpc/src/cpp/server/load_reporter/load_data_store.cc
new file mode 100644
index 0000000000..f07fa812a7
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/load_data_store.cc
@@ -0,0 +1,338 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <stdio.h>
+#include <cstdlib>
+#include <set>
+#include <unordered_map>
+#include <vector>
+
+#include "src/core/lib/iomgr/socket_utils.h"
+#include "src/cpp/server/load_reporter/load_data_store.h"
+
+namespace grpc {
+namespace load_reporter {
+
+// Some helper functions.
+namespace {
+
+// Given a map from type K to a set of value type V, finds the set associated
+// with the given key and erases the value from the set. If the set becomes
+// empty, also erases the key-set pair. Returns true if the value is erased
+// successfully.
+template <typename K, typename V>
+bool UnorderedMapOfSetEraseKeyValue(std::unordered_map<K, std::set<V>>& map,
+ const K& key, const V& value) {
+ auto it = map.find(key);
+ if (it != map.end()) {
+ size_t erased = it->second.erase(value);
+ if (it->second.size() == 0) {
+ map.erase(it);
+ }
+ return erased;
+ }
+ return false;
+};
+
+// Given a map from type K to a set of value type V, removes the given key and
+// the associated set, and returns the set. Returns an empty set if the key is
+// not found.
+template <typename K, typename V>
+std::set<V> UnorderedMapOfSetExtract(std::unordered_map<K, std::set<V>>& map,
+ const K& key) {
+ auto it = map.find(key);
+ if (it != map.end()) {
+ auto set = std::move(it->second);
+ map.erase(it);
+ return set;
+ }
+ return {};
+};
+
+// From a non-empty container, returns a pointer to a random element.
+template <typename C>
+const typename C::value_type* RandomElement(const C& container) {
+ GPR_ASSERT(!container.empty());
+ auto it = container.begin();
+ std::advance(it, std::rand() % container.size());
+ return &(*it);
+}
+
+} // namespace
+
+LoadRecordKey::LoadRecordKey(const TString& client_ip_and_token,
+ TString user_id)
+ : user_id_(std::move(user_id)) {
+ GPR_ASSERT(client_ip_and_token.size() >= 2);
+ int ip_hex_size;
+ GPR_ASSERT(sscanf(client_ip_and_token.substr(0, 2).c_str(), "%d",
+ &ip_hex_size) == 1);
+ GPR_ASSERT(ip_hex_size == 0 || ip_hex_size == kIpv4AddressLength ||
+ ip_hex_size == kIpv6AddressLength);
+ size_t cur_pos = 2;
+ client_ip_hex_ = client_ip_and_token.substr(cur_pos, ip_hex_size);
+ cur_pos += ip_hex_size;
+ if (client_ip_and_token.size() - cur_pos < kLbIdLength) {
+ lb_id_ = kInvalidLbId;
+ lb_tag_ = "";
+ } else {
+ lb_id_ = client_ip_and_token.substr(cur_pos, kLbIdLength);
+ lb_tag_ = client_ip_and_token.substr(cur_pos + kLbIdLength);
+ }
+}
+
+TString LoadRecordKey::GetClientIpBytes() const {
+ if (client_ip_hex_.empty()) {
+ return "";
+ } else if (client_ip_hex_.size() == kIpv4AddressLength) {
+ uint32_t ip_bytes;
+ if (sscanf(client_ip_hex_.c_str(), "%x", &ip_bytes) != 1) {
+ gpr_log(GPR_ERROR,
+ "Can't parse client IP (%s) from a hex string to an integer.",
+ client_ip_hex_.c_str());
+ return "";
+ }
+ ip_bytes = grpc_htonl(ip_bytes);
+ return TString(reinterpret_cast<const char*>(&ip_bytes),
+ sizeof(ip_bytes));
+ } else if (client_ip_hex_.size() == kIpv6AddressLength) {
+ uint32_t ip_bytes[4];
+ for (size_t i = 0; i < 4; ++i) {
+ if (sscanf(client_ip_hex_.substr(i * 8, (i + 1) * 8).c_str(), "%x",
+ ip_bytes + i) != 1) {
+ gpr_log(
+ GPR_ERROR,
+ "Can't parse client IP part (%s) from a hex string to an integer.",
+ client_ip_hex_.substr(i * 8, (i + 1) * 8).c_str());
+ return "";
+ }
+ ip_bytes[i] = grpc_htonl(ip_bytes[i]);
+ }
+ return TString(reinterpret_cast<const char*>(ip_bytes),
+ sizeof(ip_bytes));
+ } else {
+ GPR_UNREACHABLE_CODE(return "");
+ }
+}
+
+LoadRecordValue::LoadRecordValue(TString metric_name, uint64_t num_calls,
+ double total_metric_value) {
+ call_metrics_.emplace(std::move(metric_name),
+ CallMetricValue(num_calls, total_metric_value));
+}
+
+void PerBalancerStore::MergeRow(const LoadRecordKey& key,
+ const LoadRecordValue& value) {
+ // During suspension, the load data received will be dropped.
+ if (!suspended_) {
+ load_record_map_[key].MergeFrom(value);
+ gpr_log(GPR_DEBUG,
+ "[PerBalancerStore %p] Load data merged (Key: %s, Value: %s).",
+ this, key.ToString().c_str(), value.ToString().c_str());
+ } else {
+ gpr_log(GPR_DEBUG,
+ "[PerBalancerStore %p] Load data dropped (Key: %s, Value: %s).",
+ this, key.ToString().c_str(), value.ToString().c_str());
+ }
+ // We always keep track of num_calls_in_progress_, so that when this
+ // store is resumed, we still have a correct value of
+ // num_calls_in_progress_.
+ GPR_ASSERT(static_cast<int64_t>(num_calls_in_progress_) +
+ value.GetNumCallsInProgressDelta() >=
+ 0);
+ num_calls_in_progress_ += value.GetNumCallsInProgressDelta();
+}
+
+void PerBalancerStore::Suspend() {
+ suspended_ = true;
+ load_record_map_.clear();
+ gpr_log(GPR_DEBUG, "[PerBalancerStore %p] Suspended.", this);
+}
+
+void PerBalancerStore::Resume() {
+ suspended_ = false;
+ gpr_log(GPR_DEBUG, "[PerBalancerStore %p] Resumed.", this);
+}
+
+uint64_t PerBalancerStore::GetNumCallsInProgressForReport() {
+ GPR_ASSERT(!suspended_);
+ last_reported_num_calls_in_progress_ = num_calls_in_progress_;
+ return num_calls_in_progress_;
+}
+
+void PerHostStore::ReportStreamCreated(const TString& lb_id,
+ const TString& load_key) {
+ GPR_ASSERT(lb_id != kInvalidLbId);
+ SetUpForNewLbId(lb_id, load_key);
+ // Prior to this one, there was no load balancer receiving report, so we may
+ // have unassigned orphaned stores to assign to this new balancer.
+ // TODO(juanlishen): If the load key of this new stream is the same with
+ // some previously adopted orphan store, we may want to take the orphan to
+ // this stream. Need to discuss with LB team.
+ if (assigned_stores_.size() == 1) {
+ for (const auto& p : per_balancer_stores_) {
+ const TString& other_lb_id = p.first;
+ const std::unique_ptr<PerBalancerStore>& orphaned_store = p.second;
+ if (other_lb_id != lb_id) {
+ orphaned_store->Resume();
+ AssignOrphanedStore(orphaned_store.get(), lb_id);
+ }
+ }
+ }
+ // The first connected balancer will adopt the kInvalidLbId.
+ if (per_balancer_stores_.size() == 1) {
+ SetUpForNewLbId(kInvalidLbId, "");
+ ReportStreamClosed(kInvalidLbId);
+ }
+}
+
+void PerHostStore::ReportStreamClosed(const TString& lb_id) {
+ auto it_store_for_gone_lb = per_balancer_stores_.find(lb_id);
+ GPR_ASSERT(it_store_for_gone_lb != per_balancer_stores_.end());
+ // Remove this closed stream from our records.
+ GPR_ASSERT(UnorderedMapOfSetEraseKeyValue(
+ load_key_to_receiving_lb_ids_, it_store_for_gone_lb->second->load_key(),
+ lb_id));
+ std::set<PerBalancerStore*> orphaned_stores =
+ UnorderedMapOfSetExtract(assigned_stores_, lb_id);
+ // The stores that were assigned to this balancer are orphaned now. They
+ // should be re-assigned to other balancers which are still receiving reports.
+ for (PerBalancerStore* orphaned_store : orphaned_stores) {
+ const TString* new_receiver = nullptr;
+ auto it = load_key_to_receiving_lb_ids_.find(orphaned_store->load_key());
+ if (it != load_key_to_receiving_lb_ids_.end()) {
+ // First, try to pick from the active balancers with the same load key.
+ new_receiver = RandomElement(it->second);
+ } else if (!assigned_stores_.empty()) {
+ // If failed, pick from all the remaining active balancers.
+ new_receiver = &(RandomElement(assigned_stores_)->first);
+ }
+ if (new_receiver != nullptr) {
+ AssignOrphanedStore(orphaned_store, *new_receiver);
+ } else {
+ // Load data for an LB ID that can't be assigned to any stream should
+ // be dropped.
+ orphaned_store->Suspend();
+ }
+ }
+}
+
+PerBalancerStore* PerHostStore::FindPerBalancerStore(
+ const TString& lb_id) const {
+ return per_balancer_stores_.find(lb_id) != per_balancer_stores_.end()
+ ? per_balancer_stores_.find(lb_id)->second.get()
+ : nullptr;
+}
+
+const std::set<PerBalancerStore*>* PerHostStore::GetAssignedStores(
+ const TString& lb_id) const {
+ auto it = assigned_stores_.find(lb_id);
+ if (it == assigned_stores_.end()) return nullptr;
+ return &(it->second);
+}
+
+void PerHostStore::AssignOrphanedStore(PerBalancerStore* orphaned_store,
+ const TString& new_receiver) {
+ auto it = assigned_stores_.find(new_receiver);
+ GPR_ASSERT(it != assigned_stores_.end());
+ it->second.insert(orphaned_store);
+ gpr_log(GPR_INFO,
+ "[PerHostStore %p] Re-assigned orphaned store (%p) with original LB"
+ " ID of %s to new receiver %s",
+ this, orphaned_store, orphaned_store->lb_id().c_str(),
+ new_receiver.c_str());
+}
+
+void PerHostStore::SetUpForNewLbId(const TString& lb_id,
+ const TString& load_key) {
+ // The top-level caller (i.e., LoadReportService) should guarantee the
+ // lb_id is unique for each reporting stream.
+ GPR_ASSERT(per_balancer_stores_.find(lb_id) == per_balancer_stores_.end());
+ GPR_ASSERT(assigned_stores_.find(lb_id) == assigned_stores_.end());
+ load_key_to_receiving_lb_ids_[load_key].insert(lb_id);
+ std::unique_ptr<PerBalancerStore> per_balancer_store(
+ new PerBalancerStore(lb_id, load_key));
+ assigned_stores_[lb_id] = {per_balancer_store.get()};
+ per_balancer_stores_[lb_id] = std::move(per_balancer_store);
+}
+
+PerBalancerStore* LoadDataStore::FindPerBalancerStore(
+ const string& hostname, const string& lb_id) const {
+ auto it = per_host_stores_.find(hostname);
+ if (it != per_host_stores_.end()) {
+ const PerHostStore& per_host_store = it->second;
+ return per_host_store.FindPerBalancerStore(lb_id);
+ } else {
+ return nullptr;
+ }
+}
+
+void LoadDataStore::MergeRow(const TString& hostname,
+ const LoadRecordKey& key,
+ const LoadRecordValue& value) {
+ PerBalancerStore* per_balancer_store =
+ FindPerBalancerStore(hostname, key.lb_id());
+ if (per_balancer_store != nullptr) {
+ per_balancer_store->MergeRow(key, value);
+ return;
+ }
+ // Unknown LB ID. Track it until its number of in-progress calls drops to
+ // zero.
+ int64_t in_progress_delta = value.GetNumCallsInProgressDelta();
+ if (in_progress_delta != 0) {
+ auto it_tracker = unknown_balancer_id_trackers_.find(key.lb_id());
+ if (it_tracker == unknown_balancer_id_trackers_.end()) {
+ gpr_log(
+ GPR_DEBUG,
+ "[LoadDataStore %p] Start tracking unknown balancer (lb_id_: %s).",
+ this, key.lb_id().c_str());
+ unknown_balancer_id_trackers_.insert(
+ {key.lb_id(), static_cast<uint64_t>(in_progress_delta)});
+ } else if ((it_tracker->second += in_progress_delta) == 0) {
+ unknown_balancer_id_trackers_.erase(it_tracker);
+ gpr_log(GPR_DEBUG,
+ "[LoadDataStore %p] Stop tracking unknown balancer (lb_id_: %s).",
+ this, key.lb_id().c_str());
+ }
+ }
+}
+
+const std::set<PerBalancerStore*>* LoadDataStore::GetAssignedStores(
+ const TString& hostname, const TString& lb_id) {
+ auto it = per_host_stores_.find(hostname);
+ if (it == per_host_stores_.end()) return nullptr;
+ return it->second.GetAssignedStores(lb_id);
+}
+
+void LoadDataStore::ReportStreamCreated(const TString& hostname,
+ const TString& lb_id,
+ const TString& load_key) {
+ per_host_stores_[hostname].ReportStreamCreated(lb_id, load_key);
+}
+
+void LoadDataStore::ReportStreamClosed(const TString& hostname,
+ const TString& lb_id) {
+ auto it_per_host_store = per_host_stores_.find(hostname);
+ GPR_ASSERT(it_per_host_store != per_host_stores_.end());
+ it_per_host_store->second.ReportStreamClosed(lb_id);
+}
+
+} // namespace load_reporter
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/load_data_store.h b/contrib/libs/grpc/src/cpp/server/load_reporter/load_data_store.h
new file mode 100644
index 0000000000..61ba618331
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/load_data_store.h
@@ -0,0 +1,348 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_SRC_CPP_SERVER_LOAD_REPORTER_LOAD_DATA_STORE_H
+#define GRPC_SRC_CPP_SERVER_LOAD_REPORTER_LOAD_DATA_STORE_H
+
+#include <grpc/support/port_platform.h>
+
+#include <memory>
+#include <set>
+#include <unordered_map>
+
+#include <grpc/support/log.h>
+#include <grpcpp/impl/codegen/config.h>
+
+#include "src/cpp/server/load_reporter/constants.h"
+
+#include <util/string/cast.h>
+
+namespace grpc {
+namespace load_reporter {
+
+// The load data storage is organized in hierarchy. The LoadDataStore is the
+// top-level data store. In LoadDataStore, for each host we keep a
+// PerHostStore, in which for each balancer we keep a PerBalancerStore. Each
+// PerBalancerStore maintains a map of load records, mapping from LoadRecordKey
+// to LoadRecordValue. The LoadRecordValue contains a map of customized call
+// metrics, mapping from a call metric name to the CallMetricValue.
+
+// The value of a customized call metric.
+class CallMetricValue {
+ public:
+ explicit CallMetricValue(uint64_t num_calls = 0,
+ double total_metric_value = 0)
+ : num_calls_(num_calls), total_metric_value_(total_metric_value) {}
+
+ void MergeFrom(CallMetricValue other) {
+ num_calls_ += other.num_calls_;
+ total_metric_value_ += other.total_metric_value_;
+ }
+
+ // Getters.
+ uint64_t num_calls() const { return num_calls_; }
+ double total_metric_value() const { return total_metric_value_; }
+
+ private:
+ // The number of calls that finished with this metric.
+ uint64_t num_calls_ = 0;
+ // The sum of metric values across all the calls that finished with this
+ // metric.
+ double total_metric_value_ = 0;
+};
+
+// The key of a load record.
+class LoadRecordKey {
+ public:
+ LoadRecordKey(TString lb_id, TString lb_tag, TString user_id,
+ TString client_ip_hex)
+ : lb_id_(std::move(lb_id)),
+ lb_tag_(std::move(lb_tag)),
+ user_id_(std::move(user_id)),
+ client_ip_hex_(std::move(client_ip_hex)) {}
+
+ // Parses the input client_ip_and_token to set client IP, LB ID, and LB tag.
+ LoadRecordKey(const TString& client_ip_and_token, TString user_id);
+
+ TString ToString() const {
+ return "[lb_id_=" + lb_id_ + ", lb_tag_=" + lb_tag_ +
+ ", user_id_=" + user_id_ + ", client_ip_hex_=" + client_ip_hex_ +
+ "]";
+ }
+
+ bool operator==(const LoadRecordKey& other) const {
+ return lb_id_ == other.lb_id_ && lb_tag_ == other.lb_tag_ &&
+ user_id_ == other.user_id_ && client_ip_hex_ == other.client_ip_hex_;
+ }
+
+ // Gets the client IP bytes in network order (i.e., big-endian).
+ TString GetClientIpBytes() const;
+
+ // Getters.
+ const TString& lb_id() const { return lb_id_; }
+ const TString& lb_tag() const { return lb_tag_; }
+ const TString& user_id() const { return user_id_; }
+ const TString& client_ip_hex() const { return client_ip_hex_; }
+
+ struct Hasher {
+ void hash_combine(size_t* seed, const TString& k) const {
+ *seed ^= std::hash<TString>()(k) + 0x9e3779b9 + (*seed << 6) +
+ (*seed >> 2);
+ }
+
+ size_t operator()(const LoadRecordKey& k) const {
+ size_t h = 0;
+ hash_combine(&h, k.lb_id_);
+ hash_combine(&h, k.lb_tag_);
+ hash_combine(&h, k.user_id_);
+ hash_combine(&h, k.client_ip_hex_);
+ return h;
+ }
+ };
+
+ private:
+ TString lb_id_;
+ TString lb_tag_;
+ TString user_id_;
+ TString client_ip_hex_;
+};
+
+// The value of a load record.
+class LoadRecordValue {
+ public:
+ explicit LoadRecordValue(uint64_t start_count = 0, uint64_t ok_count = 0,
+ uint64_t error_count = 0, uint64_t bytes_sent = 0,
+ uint64_t bytes_recv = 0, uint64_t latency_ms = 0)
+ : start_count_(start_count),
+ ok_count_(ok_count),
+ error_count_(error_count),
+ bytes_sent_(bytes_sent),
+ bytes_recv_(bytes_recv),
+ latency_ms_(latency_ms) {}
+
+ LoadRecordValue(TString metric_name, uint64_t num_calls,
+ double total_metric_value);
+
+ void MergeFrom(const LoadRecordValue& other) {
+ start_count_ += other.start_count_;
+ ok_count_ += other.ok_count_;
+ error_count_ += other.error_count_;
+ bytes_sent_ += other.bytes_sent_;
+ bytes_recv_ += other.bytes_recv_;
+ latency_ms_ += other.latency_ms_;
+ for (const auto& p : other.call_metrics_) {
+ const TString& key = p.first;
+ const CallMetricValue& value = p.second;
+ call_metrics_[key].MergeFrom(value);
+ }
+ }
+
+ int64_t GetNumCallsInProgressDelta() const {
+ return static_cast<int64_t>(start_count_ - ok_count_ - error_count_);
+ }
+
+ TString ToString() const {
+ return "[start_count_=" + ::ToString(start_count_) +
+ ", ok_count_=" + ::ToString(ok_count_) +
+ ", error_count_=" + ::ToString(error_count_) +
+ ", bytes_sent_=" + ::ToString(bytes_sent_) +
+ ", bytes_recv_=" + ::ToString(bytes_recv_) +
+ ", latency_ms_=" + ::ToString(latency_ms_) + ", " +
+ ::ToString(call_metrics_.size()) + " other call metric(s)]";
+ }
+
+ bool InsertCallMetric(const TString& metric_name,
+ const CallMetricValue& metric_value) {
+ return call_metrics_.insert({metric_name, metric_value}).second;
+ }
+
+ // Getters.
+ uint64_t start_count() const { return start_count_; }
+ uint64_t ok_count() const { return ok_count_; }
+ uint64_t error_count() const { return error_count_; }
+ uint64_t bytes_sent() const { return bytes_sent_; }
+ uint64_t bytes_recv() const { return bytes_recv_; }
+ uint64_t latency_ms() const { return latency_ms_; }
+ const std::unordered_map<TString, CallMetricValue>& call_metrics() const {
+ return call_metrics_;
+ }
+
+ private:
+ uint64_t start_count_ = 0;
+ uint64_t ok_count_ = 0;
+ uint64_t error_count_ = 0;
+ uint64_t bytes_sent_ = 0;
+ uint64_t bytes_recv_ = 0;
+ uint64_t latency_ms_ = 0;
+ std::unordered_map<TString, CallMetricValue> call_metrics_;
+};
+
+// Stores the data associated with a particular LB ID.
+class PerBalancerStore {
+ public:
+ using LoadRecordMap =
+ std::unordered_map<LoadRecordKey, LoadRecordValue, LoadRecordKey::Hasher>;
+
+ PerBalancerStore(TString lb_id, TString load_key)
+ : lb_id_(std::move(lb_id)), load_key_(std::move(load_key)) {}
+
+ // Merge a load record with the given key and value if the store is not
+ // suspended.
+ void MergeRow(const LoadRecordKey& key, const LoadRecordValue& value);
+
+ // Suspend this store, so that no detailed load data will be recorded.
+ void Suspend();
+ // Resume this store from suspension.
+ void Resume();
+ // Is this store suspended or not?
+ bool IsSuspended() const { return suspended_; }
+
+ bool IsNumCallsInProgressChangedSinceLastReport() const {
+ return num_calls_in_progress_ != last_reported_num_calls_in_progress_;
+ }
+
+ uint64_t GetNumCallsInProgressForReport();
+
+ TString ToString() {
+ return "[PerBalancerStore lb_id_=" + lb_id_ + " load_key_=" + load_key_ +
+ "]";
+ }
+
+ void ClearLoadRecordMap() { load_record_map_.clear(); }
+
+ // Getters.
+ const TString& lb_id() const { return lb_id_; }
+ const TString& load_key() const { return load_key_; }
+ const LoadRecordMap& load_record_map() const { return load_record_map_; }
+
+ private:
+ TString lb_id_;
+ // TODO(juanlishen): Use bytestring protobuf type?
+ TString load_key_;
+ LoadRecordMap load_record_map_;
+ uint64_t num_calls_in_progress_ = 0;
+ uint64_t last_reported_num_calls_in_progress_ = 0;
+ bool suspended_ = false;
+};
+
+// Stores the data associated with a particular host.
+class PerHostStore {
+ public:
+ // When a report stream is created, a PerBalancerStore is created for the
+ // LB ID (guaranteed unique) associated with that stream. If it is the only
+ // active store, adopt all the orphaned stores. If it is the first created
+ // store, adopt the store of kInvalidLbId.
+ void ReportStreamCreated(const TString& lb_id,
+ const TString& load_key);
+
+ // When a report stream is closed, the PerBalancerStores assigned to the
+ // associate LB ID need to be re-assigned to other active balancers,
+ // ideally with the same load key. If there is no active balancer, we have
+ // to suspend those stores and drop the incoming load data until they are
+ // resumed.
+ void ReportStreamClosed(const TString& lb_id);
+
+ // Returns null if not found. Caller doesn't own the returned store.
+ PerBalancerStore* FindPerBalancerStore(const TString& lb_id) const;
+
+ // Returns null if lb_id is not found. The returned pointer points to the
+ // underlying data structure, which is not owned by the caller.
+ const std::set<PerBalancerStore*>* GetAssignedStores(
+ const TString& lb_id) const;
+
+ private:
+ // Creates a PerBalancerStore for the given LB ID, assigns the store to
+ // itself, and records the LB ID to the load key.
+ void SetUpForNewLbId(const TString& lb_id, const TString& load_key);
+
+ void AssignOrphanedStore(PerBalancerStore* orphaned_store,
+ const TString& new_receiver);
+
+ std::unordered_map<TString, std::set<TString>>
+ load_key_to_receiving_lb_ids_;
+
+ // Key: LB ID. The key set includes all the LB IDs that have been
+ // allocated for reporting streams so far.
+ // Value: the unique pointer to the PerBalancerStore of the LB ID.
+ std::unordered_map<TString, std::unique_ptr<PerBalancerStore>>
+ per_balancer_stores_;
+
+ // Key: LB ID. The key set includes the LB IDs of the balancers that are
+ // currently receiving report.
+ // Value: the set of raw pointers to the PerBalancerStores assigned to the LB
+ // ID. Note that the sets in assigned_stores_ form a division of the value set
+ // of per_balancer_stores_.
+ std::unordered_map<TString, std::set<PerBalancerStore*>> assigned_stores_;
+};
+
+// Thread-unsafe two-level bookkeeper of all the load data.
+// Note: We never remove any store objects from this class, as per the
+// current spec. That's because premature removal of the store objects
+// may lead to loss of critical information, e.g., mapping from lb_id to
+// load_key, and the number of in-progress calls. Such loss will cause
+// information inconsistency when the balancer is re-connected. Keeping
+// all the stores should be fine for PerHostStore, since we assume there
+// should only be a few hostnames. But it's a potential problem for
+// PerBalancerStore.
+class LoadDataStore {
+ public:
+ // Returns null if not found. Caller doesn't own the returned store.
+ PerBalancerStore* FindPerBalancerStore(const TString& hostname,
+ const TString& lb_id) const;
+
+ // Returns null if hostname or lb_id is not found. The returned pointer points
+ // to the underlying data structure, which is not owned by the caller.
+ const std::set<PerBalancerStore*>* GetAssignedStores(const string& hostname,
+ const string& lb_id);
+
+ // If a PerBalancerStore can be found by the hostname and LB ID in
+ // LoadRecordKey, the load data will be merged to that store. Otherwise,
+ // only track the number of the in-progress calls for this unknown LB ID.
+ void MergeRow(const TString& hostname, const LoadRecordKey& key,
+ const LoadRecordValue& value);
+
+ // Is the given lb_id a tracked unknown LB ID (i.e., the LB ID was associated
+ // with some received load data but unknown to this load data store)?
+ bool IsTrackedUnknownBalancerId(const TString& lb_id) const {
+ return unknown_balancer_id_trackers_.find(lb_id) !=
+ unknown_balancer_id_trackers_.end();
+ }
+
+ // Wrapper around PerHostStore::ReportStreamCreated.
+ void ReportStreamCreated(const TString& hostname,
+ const TString& lb_id,
+ const TString& load_key);
+
+ // Wrapper around PerHostStore::ReportStreamClosed.
+ void ReportStreamClosed(const TString& hostname,
+ const TString& lb_id);
+
+ private:
+ // Buffered data that was fetched from Census but hasn't been sent to
+ // balancer. We need to keep this data ourselves because Census will
+ // delete the data once it's returned.
+ std::unordered_map<TString, PerHostStore> per_host_stores_;
+
+ // Tracks the number of in-progress calls for each unknown LB ID.
+ std::unordered_map<TString, uint64_t> unknown_balancer_id_trackers_;
+};
+
+} // namespace load_reporter
+} // namespace grpc
+
+#endif // GRPC_SRC_CPP_SERVER_LOAD_REPORTER_LOAD_DATA_STORE_H
diff --git a/contrib/libs/grpc/src/cpp/server/load_reporter/util.cc b/contrib/libs/grpc/src/cpp/server/load_reporter/util.cc
new file mode 100644
index 0000000000..24ad9f3f24
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/load_reporter/util.cc
@@ -0,0 +1,47 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <grpcpp/ext/server_load_reporting.h>
+
+#include <cmath>
+
+#include <grpc/support/log.h>
+
+namespace grpc {
+namespace load_reporter {
+namespace experimental {
+
+void AddLoadReportingCost(grpc::ServerContext* ctx,
+ const TString& cost_name, double cost_value) {
+ if (std::isnormal(cost_value)) {
+ TString buf;
+ buf.resize(sizeof(cost_value) + cost_name.size());
+ memcpy(&(*buf.begin()), &cost_value, sizeof(cost_value));
+ memcpy(&(*buf.begin()) + sizeof(cost_value), cost_name.data(),
+ cost_name.size());
+ ctx->AddTrailingMetadata(GRPC_LB_COST_MD_KEY, buf);
+ } else {
+ gpr_log(GPR_ERROR, "Call metric value is not normal.");
+ }
+}
+
+} // namespace experimental
+} // namespace load_reporter
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/secure_server_credentials.cc b/contrib/libs/grpc/src/cpp/server/secure_server_credentials.cc
new file mode 100644
index 0000000000..732602bcb7
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/secure_server_credentials.cc
@@ -0,0 +1,155 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <functional>
+#include <map>
+#include <memory>
+
+#include <grpcpp/impl/codegen/slice.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/security/auth_metadata_processor.h>
+
+#include "src/cpp/common/secure_auth_context.h"
+#include "src/cpp/server/secure_server_credentials.h"
+
+namespace grpc {
+
+void AuthMetadataProcessorAyncWrapper::Destroy(void* wrapper) {
+ auto* w = static_cast<AuthMetadataProcessorAyncWrapper*>(wrapper);
+ delete w;
+}
+
+void AuthMetadataProcessorAyncWrapper::Process(
+ void* wrapper, grpc_auth_context* context, const grpc_metadata* md,
+ size_t num_md, grpc_process_auth_metadata_done_cb cb, void* user_data) {
+ auto* w = static_cast<AuthMetadataProcessorAyncWrapper*>(wrapper);
+ if (!w->processor_) {
+ // Early exit.
+ cb(user_data, nullptr, 0, nullptr, 0, GRPC_STATUS_OK, nullptr);
+ return;
+ }
+ if (w->processor_->IsBlocking()) {
+ w->thread_pool_->Add([w, context, md, num_md, cb, user_data] {
+ w->AuthMetadataProcessorAyncWrapper::InvokeProcessor(context, md, num_md,
+ cb, user_data);
+ });
+ } else {
+ // invoke directly.
+ w->InvokeProcessor(context, md, num_md, cb, user_data);
+ }
+}
+
+void AuthMetadataProcessorAyncWrapper::InvokeProcessor(
+ grpc_auth_context* ctx, const grpc_metadata* md, size_t num_md,
+ grpc_process_auth_metadata_done_cb cb, void* user_data) {
+ AuthMetadataProcessor::InputMetadata metadata;
+ for (size_t i = 0; i < num_md; i++) {
+ metadata.insert(std::make_pair(StringRefFromSlice(&md[i].key),
+ StringRefFromSlice(&md[i].value)));
+ }
+ SecureAuthContext context(ctx);
+ AuthMetadataProcessor::OutputMetadata consumed_metadata;
+ AuthMetadataProcessor::OutputMetadata response_metadata;
+
+ Status status = processor_->Process(metadata, &context, &consumed_metadata,
+ &response_metadata);
+
+ std::vector<grpc_metadata> consumed_md;
+ for (const auto& consumed : consumed_metadata) {
+ grpc_metadata md_entry;
+ md_entry.key = SliceReferencingString(consumed.first);
+ md_entry.value = SliceReferencingString(consumed.second);
+ md_entry.flags = 0;
+ consumed_md.push_back(md_entry);
+ }
+ std::vector<grpc_metadata> response_md;
+ for (const auto& response : response_metadata) {
+ grpc_metadata md_entry;
+ md_entry.key = SliceReferencingString(response.first);
+ md_entry.value = SliceReferencingString(response.second);
+ md_entry.flags = 0;
+ response_md.push_back(md_entry);
+ }
+ auto consumed_md_data = consumed_md.empty() ? nullptr : &consumed_md[0];
+ auto response_md_data = response_md.empty() ? nullptr : &response_md[0];
+ cb(user_data, consumed_md_data, consumed_md.size(), response_md_data,
+ response_md.size(), static_cast<grpc_status_code>(status.error_code()),
+ status.error_message().c_str());
+}
+
+int SecureServerCredentials::AddPortToServer(const TString& addr,
+ grpc_server* server) {
+ return grpc_server_add_secure_http2_port(server, addr.c_str(), creds_);
+}
+
+void SecureServerCredentials::SetAuthMetadataProcessor(
+ const std::shared_ptr<grpc::AuthMetadataProcessor>& processor) {
+ auto* wrapper = new grpc::AuthMetadataProcessorAyncWrapper(processor);
+ grpc_server_credentials_set_auth_metadata_processor(
+ creds_, {grpc::AuthMetadataProcessorAyncWrapper::Process,
+ grpc::AuthMetadataProcessorAyncWrapper::Destroy, wrapper});
+}
+
+std::shared_ptr<ServerCredentials> SslServerCredentials(
+ const grpc::SslServerCredentialsOptions& options) {
+ std::vector<grpc_ssl_pem_key_cert_pair> pem_key_cert_pairs;
+ for (const auto& key_cert_pair : options.pem_key_cert_pairs) {
+ grpc_ssl_pem_key_cert_pair p = {key_cert_pair.private_key.c_str(),
+ key_cert_pair.cert_chain.c_str()};
+ pem_key_cert_pairs.push_back(p);
+ }
+ grpc_server_credentials* c_creds = grpc_ssl_server_credentials_create_ex(
+ options.pem_root_certs.empty() ? nullptr : options.pem_root_certs.c_str(),
+ pem_key_cert_pairs.empty() ? nullptr : &pem_key_cert_pairs[0],
+ pem_key_cert_pairs.size(),
+ options.force_client_auth
+ ? GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
+ : options.client_certificate_request,
+ nullptr);
+ return std::shared_ptr<ServerCredentials>(
+ new SecureServerCredentials(c_creds));
+}
+
+namespace experimental {
+
+std::shared_ptr<ServerCredentials> AltsServerCredentials(
+ const AltsServerCredentialsOptions& /* options */) {
+ grpc_alts_credentials_options* c_options =
+ grpc_alts_credentials_server_options_create();
+ grpc_server_credentials* c_creds =
+ grpc_alts_server_credentials_create(c_options);
+ grpc_alts_credentials_options_destroy(c_options);
+ return std::shared_ptr<ServerCredentials>(
+ new SecureServerCredentials(c_creds));
+}
+
+std::shared_ptr<ServerCredentials> LocalServerCredentials(
+ grpc_local_connect_type type) {
+ return std::shared_ptr<ServerCredentials>(
+ new SecureServerCredentials(grpc_local_server_credentials_create(type)));
+}
+
+std::shared_ptr<ServerCredentials> TlsServerCredentials(
+ const grpc::experimental::TlsCredentialsOptions& options) {
+ grpc::GrpcLibraryCodegen init;
+ return std::shared_ptr<ServerCredentials>(new SecureServerCredentials(
+ grpc_tls_server_credentials_create(options.c_credentials_options())));
+}
+
+} // namespace experimental
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/secure_server_credentials.h b/contrib/libs/grpc/src/cpp/server/secure_server_credentials.h
new file mode 100644
index 0000000000..9e3fb3f9eb
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/secure_server_credentials.h
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_SERVER_SECURE_SERVER_CREDENTIALS_H
+#define GRPC_INTERNAL_CPP_SERVER_SECURE_SERVER_CREDENTIALS_H
+
+#include <memory>
+
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/security/tls_credentials_options.h>
+
+#include <grpc/grpc_security.h>
+
+#include "src/cpp/server/thread_pool_interface.h"
+
+namespace grpc {
+
+class SecureServerCredentials;
+
+class AuthMetadataProcessorAyncWrapper final {
+ public:
+ static void Destroy(void* wrapper);
+
+ static void Process(void* wrapper, grpc_auth_context* context,
+ const grpc_metadata* md, size_t num_md,
+ grpc_process_auth_metadata_done_cb cb, void* user_data);
+
+ AuthMetadataProcessorAyncWrapper(
+ const std::shared_ptr<AuthMetadataProcessor>& processor)
+ : processor_(processor) {
+ if (processor && processor->IsBlocking()) {
+ thread_pool_.reset(CreateDefaultThreadPool());
+ }
+ }
+
+ private:
+ void InvokeProcessor(grpc_auth_context* context, const grpc_metadata* md,
+ size_t num_md, grpc_process_auth_metadata_done_cb cb,
+ void* user_data);
+ std::unique_ptr<ThreadPoolInterface> thread_pool_;
+ std::shared_ptr<AuthMetadataProcessor> processor_;
+};
+
+class SecureServerCredentials final : public ServerCredentials {
+ public:
+ explicit SecureServerCredentials(grpc_server_credentials* creds)
+ : creds_(creds) {}
+ ~SecureServerCredentials() override {
+ grpc_server_credentials_release(creds_);
+ }
+
+ int AddPortToServer(const TString& addr, grpc_server* server) override;
+
+ void SetAuthMetadataProcessor(
+ const std::shared_ptr<grpc::AuthMetadataProcessor>& processor) override;
+
+ private:
+ grpc_server_credentials* creds_;
+ std::unique_ptr<grpc::AuthMetadataProcessorAyncWrapper> processor_;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_SERVER_SECURE_SERVER_CREDENTIALS_H
diff --git a/contrib/libs/grpc/src/cpp/server/server_builder.cc b/contrib/libs/grpc/src/cpp/server/server_builder.cc
new file mode 100644
index 0000000000..0cc00b365f
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/server_builder.cc
@@ -0,0 +1,434 @@
+/*
+ *
+ * Copyright 2015-2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/server_builder.h>
+
+#include <grpc/support/cpu.h>
+#include <grpc/support/log.h>
+#include <grpcpp/impl/service_type.h>
+#include <grpcpp/resource_quota.h>
+#include <grpcpp/server.h>
+
+#include <utility>
+
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gpr/useful.h"
+#include "src/cpp/server/external_connection_acceptor_impl.h"
+#include "src/cpp/server/thread_pool_interface.h"
+
+namespace grpc {
+
+static std::vector<std::unique_ptr<ServerBuilderPlugin> (*)()>*
+ g_plugin_factory_list;
+static gpr_once once_init_plugin_list = GPR_ONCE_INIT;
+
+static void do_plugin_list_init(void) {
+ g_plugin_factory_list =
+ new std::vector<std::unique_ptr<ServerBuilderPlugin> (*)()>();
+}
+
+ServerBuilder::ServerBuilder()
+ : max_receive_message_size_(INT_MIN),
+ max_send_message_size_(INT_MIN),
+ sync_server_settings_(SyncServerSettings()),
+ resource_quota_(nullptr) {
+ gpr_once_init(&once_init_plugin_list, do_plugin_list_init);
+ for (const auto& value : *g_plugin_factory_list) {
+ plugins_.emplace_back(value());
+ }
+
+ // all compression algorithms enabled by default.
+ enabled_compression_algorithms_bitset_ =
+ (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
+ memset(&maybe_default_compression_level_, 0,
+ sizeof(maybe_default_compression_level_));
+ memset(&maybe_default_compression_algorithm_, 0,
+ sizeof(maybe_default_compression_algorithm_));
+}
+
+ServerBuilder::~ServerBuilder() {
+ if (resource_quota_ != nullptr) {
+ grpc_resource_quota_unref(resource_quota_);
+ }
+}
+
+std::unique_ptr<grpc::ServerCompletionQueue> ServerBuilder::AddCompletionQueue(
+ bool is_frequently_polled) {
+ grpc::ServerCompletionQueue* cq = new grpc::ServerCompletionQueue(
+ GRPC_CQ_NEXT,
+ is_frequently_polled ? GRPC_CQ_DEFAULT_POLLING : GRPC_CQ_NON_LISTENING,
+ nullptr);
+ cqs_.push_back(cq);
+ return std::unique_ptr<grpc::ServerCompletionQueue>(cq);
+}
+
+ServerBuilder& ServerBuilder::RegisterService(Service* service) {
+ services_.emplace_back(new NamedService(service));
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::RegisterService(const TString& addr,
+ Service* service) {
+ services_.emplace_back(new NamedService(addr, service));
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::RegisterAsyncGenericService(
+ AsyncGenericService* service) {
+ if (generic_service_ || callback_generic_service_) {
+ gpr_log(GPR_ERROR,
+ "Adding multiple generic services is unsupported for now. "
+ "Dropping the service %p",
+ (void*)service);
+ } else {
+ generic_service_ = service;
+ }
+ return *this;
+}
+
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ServerBuilder& ServerBuilder::RegisterCallbackGenericService(
+ CallbackGenericService* service) {
+ if (generic_service_ || callback_generic_service_) {
+ gpr_log(GPR_ERROR,
+ "Adding multiple generic services is unsupported for now. "
+ "Dropping the service %p",
+ (void*)service);
+ } else {
+ callback_generic_service_ = service;
+ }
+ return *this;
+}
+#else
+ServerBuilder& ServerBuilder::experimental_type::RegisterCallbackGenericService(
+ experimental::CallbackGenericService* service) {
+ if (builder_->generic_service_ || builder_->callback_generic_service_) {
+ gpr_log(GPR_ERROR,
+ "Adding multiple generic services is unsupported for now. "
+ "Dropping the service %p",
+ (void*)service);
+ } else {
+ builder_->callback_generic_service_ = service;
+ }
+ return *builder_;
+}
+#endif
+
+std::unique_ptr<grpc::experimental::ExternalConnectionAcceptor>
+ServerBuilder::experimental_type::AddExternalConnectionAcceptor(
+ experimental_type::ExternalConnectionType type,
+ std::shared_ptr<ServerCredentials> creds) {
+ TString name_prefix("external:");
+ char count_str[GPR_LTOA_MIN_BUFSIZE];
+ gpr_ltoa(static_cast<long>(builder_->acceptors_.size()), count_str);
+ builder_->acceptors_.emplace_back(
+ std::make_shared<grpc::internal::ExternalConnectionAcceptorImpl>(
+ name_prefix.append(count_str), type, creds));
+ return builder_->acceptors_.back()->GetAcceptor();
+}
+
+ServerBuilder& ServerBuilder::SetOption(
+ std::unique_ptr<ServerBuilderOption> option) {
+ options_.push_back(std::move(option));
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::SetSyncServerOption(
+ ServerBuilder::SyncServerOption option, int val) {
+ switch (option) {
+ case NUM_CQS:
+ sync_server_settings_.num_cqs = val;
+ break;
+ case MIN_POLLERS:
+ sync_server_settings_.min_pollers = val;
+ break;
+ case MAX_POLLERS:
+ sync_server_settings_.max_pollers = val;
+ break;
+ case CQ_TIMEOUT_MSEC:
+ sync_server_settings_.cq_timeout_msec = val;
+ break;
+ }
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::SetCompressionAlgorithmSupportStatus(
+ grpc_compression_algorithm algorithm, bool enabled) {
+ if (enabled) {
+ GPR_BITSET(&enabled_compression_algorithms_bitset_, algorithm);
+ } else {
+ GPR_BITCLEAR(&enabled_compression_algorithms_bitset_, algorithm);
+ }
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::SetDefaultCompressionLevel(
+ grpc_compression_level level) {
+ maybe_default_compression_level_.is_set = true;
+ maybe_default_compression_level_.level = level;
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::SetDefaultCompressionAlgorithm(
+ grpc_compression_algorithm algorithm) {
+ maybe_default_compression_algorithm_.is_set = true;
+ maybe_default_compression_algorithm_.algorithm = algorithm;
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::SetResourceQuota(
+ const grpc::ResourceQuota& resource_quota) {
+ if (resource_quota_ != nullptr) {
+ grpc_resource_quota_unref(resource_quota_);
+ }
+ resource_quota_ = resource_quota.c_resource_quota();
+ grpc_resource_quota_ref(resource_quota_);
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::AddListeningPort(
+ const TString& addr_uri, std::shared_ptr<ServerCredentials> creds,
+ int* selected_port) {
+ const TString uri_scheme = "dns:";
+ TString addr = addr_uri;
+ if (addr_uri.compare(0, uri_scheme.size(), uri_scheme) == 0) {
+ size_t pos = uri_scheme.size();
+ while (addr_uri[pos] == '/') ++pos; // Skip slashes.
+ addr = addr_uri.substr(pos);
+ }
+ Port port = {addr, std::move(creds), selected_port};
+ ports_.push_back(port);
+ return *this;
+}
+
+std::unique_ptr<grpc::Server> ServerBuilder::BuildAndStart() {
+ grpc::ChannelArguments args;
+ if (max_receive_message_size_ >= -1) {
+ args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, max_receive_message_size_);
+ }
+ if (max_send_message_size_ >= -1) {
+ args.SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, max_send_message_size_);
+ }
+ for (const auto& option : options_) {
+ option->UpdateArguments(&args);
+ option->UpdatePlugins(&plugins_);
+ }
+ args.SetInt(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET,
+ enabled_compression_algorithms_bitset_);
+ if (maybe_default_compression_level_.is_set) {
+ args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL,
+ maybe_default_compression_level_.level);
+ }
+ if (maybe_default_compression_algorithm_.is_set) {
+ args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
+ maybe_default_compression_algorithm_.algorithm);
+ }
+
+ if (resource_quota_ != nullptr) {
+ args.SetPointerWithVtable(GRPC_ARG_RESOURCE_QUOTA, resource_quota_,
+ grpc_resource_quota_arg_vtable());
+ }
+
+ for (const auto& plugin : plugins_) {
+ plugin->UpdateServerBuilder(this);
+ plugin->UpdateChannelArguments(&args);
+ }
+
+ // == Determine if the server has any syncrhonous methods ==
+ bool has_sync_methods = false;
+ for (const auto& value : services_) {
+ if (value->service->has_synchronous_methods()) {
+ has_sync_methods = true;
+ break;
+ }
+ }
+
+ if (!has_sync_methods) {
+ for (const auto& value : plugins_) {
+ if (value->has_sync_methods()) {
+ has_sync_methods = true;
+ break;
+ }
+ }
+ }
+
+ // If this is a Sync server, i.e a server expositing sync API, then the server
+ // needs to create some completion queues to listen for incoming requests.
+ // 'sync_server_cqs' are those internal completion queues.
+ //
+ // This is different from the completion queues added to the server via
+ // ServerBuilder's AddCompletionQueue() method (those completion queues
+ // are in 'cqs_' member variable of ServerBuilder object)
+ std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
+ sync_server_cqs(
+ std::make_shared<
+ std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>());
+
+ bool has_frequently_polled_cqs = false;
+ for (const auto& cq : cqs_) {
+ if (cq->IsFrequentlyPolled()) {
+ has_frequently_polled_cqs = true;
+ break;
+ }
+ }
+
+ // == Determine if the server has any callback methods ==
+ bool has_callback_methods = false;
+ for (const auto& service : services_) {
+ if (service->service->has_callback_methods()) {
+ has_callback_methods = true;
+ has_frequently_polled_cqs = true;
+ break;
+ }
+ }
+
+ const bool is_hybrid_server = has_sync_methods && has_frequently_polled_cqs;
+
+ if (has_sync_methods) {
+ grpc_cq_polling_type polling_type =
+ is_hybrid_server ? GRPC_CQ_NON_POLLING : GRPC_CQ_DEFAULT_POLLING;
+
+ // Create completion queues to listen to incoming rpc requests
+ for (int i = 0; i < sync_server_settings_.num_cqs; i++) {
+ sync_server_cqs->emplace_back(
+ new grpc::ServerCompletionQueue(GRPC_CQ_NEXT, polling_type, nullptr));
+ }
+ }
+
+ // TODO(vjpai): Add a section here for plugins once they can support callback
+ // methods
+
+ if (has_sync_methods) {
+ // This is a Sync server
+ gpr_log(GPR_INFO,
+ "Synchronous server. Num CQs: %d, Min pollers: %d, Max Pollers: "
+ "%d, CQ timeout (msec): %d",
+ sync_server_settings_.num_cqs, sync_server_settings_.min_pollers,
+ sync_server_settings_.max_pollers,
+ sync_server_settings_.cq_timeout_msec);
+ }
+
+ if (has_callback_methods) {
+ gpr_log(GPR_INFO, "Callback server.");
+ }
+
+ std::unique_ptr<grpc::Server> server(new grpc::Server(
+ &args, sync_server_cqs, sync_server_settings_.min_pollers,
+ sync_server_settings_.max_pollers, sync_server_settings_.cq_timeout_msec,
+ std::move(acceptors_), resource_quota_,
+ std::move(interceptor_creators_)));
+
+ ServerInitializer* initializer = server->initializer();
+
+ // Register all the completion queues with the server. i.e
+ // 1. sync_server_cqs: internal completion queues created IF this is a sync
+ // server
+ // 2. cqs_: Completion queues added via AddCompletionQueue() call
+
+ for (const auto& cq : *sync_server_cqs) {
+ grpc_server_register_completion_queue(server->server_, cq->cq(), nullptr);
+ has_frequently_polled_cqs = true;
+ }
+
+ if (has_callback_methods || callback_generic_service_ != nullptr) {
+ auto* cq = server->CallbackCQ();
+ grpc_server_register_completion_queue(server->server_, cq->cq(), nullptr);
+ }
+
+ // cqs_ contains the completion queue added by calling the ServerBuilder's
+ // AddCompletionQueue() API. Some of them may not be frequently polled (i.e by
+ // calling Next() or AsyncNext()) and hence are not safe to be used for
+ // listening to incoming channels. Such completion queues must be registered
+ // as non-listening queues. In debug mode, these should have their server list
+ // tracked since these are provided the user and must be Shutdown by the user
+ // after the server is shutdown.
+ for (const auto& cq : cqs_) {
+ grpc_server_register_completion_queue(server->server_, cq->cq(), nullptr);
+ cq->RegisterServer(server.get());
+ }
+
+ if (!has_frequently_polled_cqs) {
+ gpr_log(GPR_ERROR,
+ "At least one of the completion queues must be frequently polled");
+ return nullptr;
+ }
+
+ for (const auto& value : services_) {
+ if (!server->RegisterService(value->host.get(), value->service)) {
+ return nullptr;
+ }
+ }
+
+ for (const auto& value : plugins_) {
+ value->InitServer(initializer);
+ }
+
+ if (generic_service_) {
+ server->RegisterAsyncGenericService(generic_service_);
+ } else if (callback_generic_service_) {
+ server->RegisterCallbackGenericService(callback_generic_service_);
+ } else {
+ for (const auto& value : services_) {
+ if (value->service->has_generic_methods()) {
+ gpr_log(GPR_ERROR,
+ "Some methods were marked generic but there is no "
+ "generic service registered.");
+ return nullptr;
+ }
+ }
+ }
+
+ for (auto& port : ports_) {
+ int r = server->AddListeningPort(port.addr, port.creds.get());
+ if (!r) {
+ server->Shutdown();
+ return nullptr;
+ }
+ if (port.selected_port != nullptr) {
+ *port.selected_port = r;
+ }
+ }
+
+ auto cqs_data = cqs_.empty() ? nullptr : &cqs_[0];
+ server->Start(cqs_data, cqs_.size());
+
+ for (const auto& value : plugins_) {
+ value->Finish(initializer);
+ }
+
+ return server;
+}
+
+void ServerBuilder::InternalAddPluginFactory(
+ std::unique_ptr<ServerBuilderPlugin> (*CreatePlugin)()) {
+ gpr_once_init(&once_init_plugin_list, do_plugin_list_init);
+ (*g_plugin_factory_list).push_back(CreatePlugin);
+}
+
+ServerBuilder& ServerBuilder::EnableWorkaround(grpc_workaround_list id) {
+ switch (id) {
+ case GRPC_WORKAROUND_ID_CRONET_COMPRESSION:
+ return AddChannelArgument(GRPC_ARG_WORKAROUND_CRONET_COMPRESSION, 1);
+ default:
+ gpr_log(GPR_ERROR, "Workaround %u does not exist or is obsolete.", id);
+ return *this;
+ }
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/server_callback.cc b/contrib/libs/grpc/src/cpp/server/server_callback.cc
new file mode 100644
index 0000000000..40aef8e735
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/server_callback.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/codegen/server_callback.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/executor.h"
+
+namespace grpc {
+namespace internal {
+
+void ServerCallbackCall::ScheduleOnDone(bool inline_ondone) {
+ if (inline_ondone) {
+ CallOnDone();
+ } else {
+ // Unlike other uses of closure, do not Ref or Unref here since at this
+ // point, all the Ref'fing and Unref'fing is done for this call.
+ grpc_core::ExecCtx exec_ctx;
+ struct ClosureWithArg {
+ grpc_closure closure;
+ ServerCallbackCall* call;
+ explicit ClosureWithArg(ServerCallbackCall* call_arg) : call(call_arg) {
+ GRPC_CLOSURE_INIT(&closure,
+ [](void* void_arg, grpc_error*) {
+ ClosureWithArg* arg =
+ static_cast<ClosureWithArg*>(void_arg);
+ arg->call->CallOnDone();
+ delete arg;
+ },
+ this, grpc_schedule_on_exec_ctx);
+ }
+ };
+ ClosureWithArg* arg = new ClosureWithArg(this);
+ grpc_core::Executor::Run(&arg->closure, GRPC_ERROR_NONE);
+ }
+}
+
+void ServerCallbackCall::CallOnCancel(ServerReactor* reactor) {
+ if (reactor->InternalInlineable()) {
+ reactor->OnCancel();
+ } else {
+ // Ref to make sure that the closure executes before the whole call gets
+ // destructed, and Unref within the closure.
+ Ref();
+ grpc_core::ExecCtx exec_ctx;
+ struct ClosureWithArg {
+ grpc_closure closure;
+ ServerCallbackCall* call;
+ ServerReactor* reactor;
+ ClosureWithArg(ServerCallbackCall* call_arg, ServerReactor* reactor_arg)
+ : call(call_arg), reactor(reactor_arg) {
+ GRPC_CLOSURE_INIT(&closure,
+ [](void* void_arg, grpc_error*) {
+ ClosureWithArg* arg =
+ static_cast<ClosureWithArg*>(void_arg);
+ arg->reactor->OnCancel();
+ arg->call->MaybeDone();
+ delete arg;
+ },
+ this, grpc_schedule_on_exec_ctx);
+ }
+ };
+ ClosureWithArg* arg = new ClosureWithArg(this, reactor);
+ grpc_core::Executor::Run(&arg->closure, GRPC_ERROR_NONE);
+ }
+}
+
+} // namespace internal
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/server_cc.cc b/contrib/libs/grpc/src/cpp/server/server_cc.cc
new file mode 100644
index 0000000000..c2a911c7f7
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/server_cc.cc
@@ -0,0 +1,1340 @@
+/*
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/server.h>
+
+#include <cstdlib>
+#include <sstream>
+#include <type_traits>
+#include <utility>
+
+#include <grpc/grpc.h>
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpcpp/completion_queue.h>
+#include <grpcpp/generic/async_generic_service.h>
+#include <grpcpp/impl/codegen/async_unary_call.h>
+#include <grpcpp/impl/codegen/byte_buffer.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/completion_queue_tag.h>
+#include <grpcpp/impl/codegen/method_handler.h>
+#include <grpcpp/impl/codegen/server_interceptor.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/impl/rpc_service_method.h>
+#include <grpcpp/impl/server_initializer.h>
+#include <grpcpp/impl/service_type.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/time.h>
+
+#include "src/core/ext/transport/inproc/inproc_transport.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/completion_queue.h"
+#include "src/core/lib/surface/server.h"
+#include "src/cpp/client/create_channel_internal.h"
+#include "src/cpp/server/external_connection_acceptor_impl.h"
+#include "src/cpp/server/health/default_health_check_service.h"
+#include "src/cpp/thread_manager/thread_manager.h"
+
+#include <util/stream/str.h>
+
+namespace grpc {
+namespace {
+
+// The default value for maximum number of threads that can be created in the
+// sync server. This value of INT_MAX is chosen to match the default behavior if
+// no ResourceQuota is set. To modify the max number of threads in a sync
+// server, pass a custom ResourceQuota object (with the desired number of
+// max-threads set) to the server builder.
+#define DEFAULT_MAX_SYNC_SERVER_THREADS INT_MAX
+
+class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
+ public:
+ ~DefaultGlobalCallbacks() override {}
+ void PreSynchronousRequest(ServerContext* /*context*/) override {}
+ void PostSynchronousRequest(ServerContext* /*context*/) override {}
+};
+
+std::shared_ptr<Server::GlobalCallbacks> g_callbacks = nullptr;
+gpr_once g_once_init_callbacks = GPR_ONCE_INIT;
+
+void InitGlobalCallbacks() {
+ if (!g_callbacks) {
+ g_callbacks.reset(new DefaultGlobalCallbacks());
+ }
+}
+
+class ShutdownTag : public internal::CompletionQueueTag {
+ public:
+ bool FinalizeResult(void** /*tag*/, bool* /*status*/) { return false; }
+};
+
+class DummyTag : public internal::CompletionQueueTag {
+ public:
+ bool FinalizeResult(void** /*tag*/, bool* /*status*/) { return true; }
+};
+
+class UnimplementedAsyncRequestContext {
+ protected:
+ UnimplementedAsyncRequestContext() : generic_stream_(&server_context_) {}
+
+ GenericServerContext server_context_;
+ GenericServerAsyncReaderWriter generic_stream_;
+};
+
+// TODO(vjpai): Just for this file, use some contents of the experimental
+// namespace here to make the code easier to read below. Remove this when
+// de-experimentalized fully.
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+using ::grpc::experimental::CallbackGenericService;
+using ::grpc::experimental::CallbackServerContext;
+using ::grpc::experimental::GenericCallbackServerContext;
+#endif
+
+} // namespace
+
+ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
+ ServerInterface* server, ServerContext* context,
+ internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
+ ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
+ : server_(server),
+ context_(context),
+ stream_(stream),
+ call_cq_(call_cq),
+ notification_cq_(notification_cq),
+ tag_(tag),
+ delete_on_finalize_(delete_on_finalize),
+ call_(nullptr),
+ done_intercepting_(false) {
+ /* Set up interception state partially for the receive ops. call_wrapper_ is
+ * not filled at this point, but it will be filled before the interceptors are
+ * run. */
+ interceptor_methods_.SetCall(&call_wrapper_);
+ interceptor_methods_.SetReverse();
+ call_cq_->RegisterAvalanching(); // This op will trigger more ops
+}
+
+ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
+ call_cq_->CompleteAvalanching();
+}
+
+bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
+ bool* status) {
+ if (done_intercepting_) {
+ *tag = tag_;
+ if (delete_on_finalize_) {
+ delete this;
+ }
+ return true;
+ }
+ context_->set_call(call_);
+ context_->cq_ = call_cq_;
+ if (call_wrapper_.call() == nullptr) {
+ // Fill it since it is empty.
+ call_wrapper_ = internal::Call(
+ call_, server_, call_cq_, server_->max_receive_message_size(), nullptr);
+ }
+
+ // just the pointers inside call are copied here
+ stream_->BindCall(&call_wrapper_);
+
+ if (*status && call_ && call_wrapper_.server_rpc_info()) {
+ done_intercepting_ = true;
+ // Set interception point for RECV INITIAL METADATA
+ interceptor_methods_.AddInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
+ interceptor_methods_.SetRecvInitialMetadata(&context_->client_metadata_);
+ if (interceptor_methods_.RunInterceptors(
+ [this]() { ContinueFinalizeResultAfterInterception(); })) {
+ // There are no interceptors to run. Continue
+ } else {
+ // There were interceptors to be run, so
+ // ContinueFinalizeResultAfterInterception will be run when interceptors
+ // are done.
+ return false;
+ }
+ }
+ if (*status && call_) {
+ context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
+ }
+ *tag = tag_;
+ if (delete_on_finalize_) {
+ delete this;
+ }
+ return true;
+}
+
+void ServerInterface::BaseAsyncRequest::
+ ContinueFinalizeResultAfterInterception() {
+ context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
+ // Queue a tag which will be returned immediately
+ grpc_core::ExecCtx exec_ctx;
+ grpc_cq_begin_op(notification_cq_->cq(), this);
+ grpc_cq_end_op(
+ notification_cq_->cq(), this, GRPC_ERROR_NONE,
+ [](void* /*arg*/, grpc_cq_completion* completion) { delete completion; },
+ nullptr, new grpc_cq_completion());
+}
+
+ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest(
+ ServerInterface* server, ServerContext* context,
+ internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
+ ServerCompletionQueue* notification_cq, void* tag, const char* name,
+ internal::RpcMethod::RpcType type)
+ : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
+ true),
+ name_(name),
+ type_(type) {}
+
+void ServerInterface::RegisteredAsyncRequest::IssueRequest(
+ void* registered_method, grpc_byte_buffer** payload,
+ ServerCompletionQueue* notification_cq) {
+ // The following call_start_batch is internally-generated so no need for an
+ // explanatory log on failure.
+ GPR_ASSERT(grpc_server_request_registered_call(
+ server_->server(), registered_method, &call_,
+ &context_->deadline_, context_->client_metadata_.arr(),
+ payload, call_cq_->cq(), notification_cq->cq(),
+ this) == GRPC_CALL_OK);
+}
+
+ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
+ ServerInterface* server, GenericServerContext* context,
+ internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
+ ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
+ : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
+ delete_on_finalize) {
+ grpc_call_details_init(&call_details_);
+ GPR_ASSERT(notification_cq);
+ GPR_ASSERT(call_cq);
+ // The following call_start_batch is internally-generated so no need for an
+ // explanatory log on failure.
+ GPR_ASSERT(grpc_server_request_call(server->server(), &call_, &call_details_,
+ context->client_metadata_.arr(),
+ call_cq->cq(), notification_cq->cq(),
+ this) == GRPC_CALL_OK);
+}
+
+bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
+ bool* status) {
+ // If we are done intercepting, there is nothing more for us to do
+ if (done_intercepting_) {
+ return BaseAsyncRequest::FinalizeResult(tag, status);
+ }
+ // TODO(yangg) remove the copy here.
+ if (*status) {
+ static_cast<GenericServerContext*>(context_)->method_ =
+ StringFromCopiedSlice(call_details_.method);
+ static_cast<GenericServerContext*>(context_)->host_ =
+ StringFromCopiedSlice(call_details_.host);
+ context_->deadline_ = call_details_.deadline;
+ }
+ grpc_slice_unref(call_details_.method);
+ grpc_slice_unref(call_details_.host);
+ call_wrapper_ = internal::Call(
+ call_, server_, call_cq_, server_->max_receive_message_size(),
+ context_->set_server_rpc_info(
+ static_cast<GenericServerContext*>(context_)->method_.c_str(),
+ internal::RpcMethod::BIDI_STREAMING,
+ *server_->interceptor_creators()));
+ return BaseAsyncRequest::FinalizeResult(tag, status);
+}
+
+namespace {
+class ShutdownCallback : public grpc_experimental_completion_queue_functor {
+ public:
+ ShutdownCallback() {
+ functor_run = &ShutdownCallback::Run;
+ // Set inlineable to true since this callback is trivial and thus does not
+ // need to be run from the executor (triggering a thread hop). This should
+ // only be used by internal callbacks like this and not by user application
+ // code.
+ inlineable = true;
+ }
+ // TakeCQ takes ownership of the cq into the shutdown callback
+ // so that the shutdown callback will be responsible for destroying it
+ void TakeCQ(CompletionQueue* cq) { cq_ = cq; }
+
+ // The Run function will get invoked by the completion queue library
+ // when the shutdown is actually complete
+ static void Run(grpc_experimental_completion_queue_functor* cb, int) {
+ auto* callback = static_cast<ShutdownCallback*>(cb);
+ delete callback->cq_;
+ delete callback;
+ }
+
+ private:
+ CompletionQueue* cq_ = nullptr;
+};
+} // namespace
+
+/// Use private inheritance rather than composition only to establish order
+/// of construction, since the public base class should be constructed after the
+/// elements belonging to the private base class are constructed. This is not
+/// possible using true composition.
+class Server::UnimplementedAsyncRequest final
+ : private grpc::UnimplementedAsyncRequestContext,
+ public GenericAsyncRequest {
+ public:
+ UnimplementedAsyncRequest(ServerInterface* server,
+ grpc::ServerCompletionQueue* cq)
+ : GenericAsyncRequest(server, &server_context_, &generic_stream_, cq, cq,
+ nullptr, false) {}
+
+ bool FinalizeResult(void** tag, bool* status) override;
+
+ grpc::ServerContext* context() { return &server_context_; }
+ grpc::GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
+};
+
+/// UnimplementedAsyncResponse should not post user-visible completions to the
+/// C++ completion queue, but is generated as a CQ event by the core
+class Server::UnimplementedAsyncResponse final
+ : public grpc::internal::CallOpSet<
+ grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpServerSendStatus> {
+ public:
+ UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
+ ~UnimplementedAsyncResponse() { delete request_; }
+
+ bool FinalizeResult(void** tag, bool* status) override {
+ if (grpc::internal::CallOpSet<
+ grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpServerSendStatus>::FinalizeResult(tag,
+ status)) {
+ delete this;
+ } else {
+ // The tag was swallowed due to interception. We will see it again.
+ }
+ return false;
+ }
+
+ private:
+ UnimplementedAsyncRequest* const request_;
+};
+
+class Server::SyncRequest final : public grpc::internal::CompletionQueueTag {
+ public:
+ SyncRequest(grpc::internal::RpcServiceMethod* method, void* method_tag)
+ : method_(method),
+ method_tag_(method_tag),
+ in_flight_(false),
+ has_request_payload_(method->method_type() ==
+ grpc::internal::RpcMethod::NORMAL_RPC ||
+ method->method_type() ==
+ grpc::internal::RpcMethod::SERVER_STREAMING),
+ call_details_(nullptr),
+ cq_(nullptr) {
+ grpc_metadata_array_init(&request_metadata_);
+ }
+
+ ~SyncRequest() {
+ if (call_details_) {
+ delete call_details_;
+ }
+ grpc_metadata_array_destroy(&request_metadata_);
+ }
+
+ void SetupRequest() { cq_ = grpc_completion_queue_create_for_pluck(nullptr); }
+
+ void TeardownRequest() {
+ grpc_completion_queue_destroy(cq_);
+ cq_ = nullptr;
+ }
+
+ void Request(grpc_server* server, grpc_completion_queue* notify_cq) {
+ GPR_ASSERT(cq_ && !in_flight_);
+ in_flight_ = true;
+ if (method_tag_) {
+ if (grpc_server_request_registered_call(
+ server, method_tag_, &call_, &deadline_, &request_metadata_,
+ has_request_payload_ ? &request_payload_ : nullptr, cq_,
+ notify_cq, this) != GRPC_CALL_OK) {
+ TeardownRequest();
+ return;
+ }
+ } else {
+ if (!call_details_) {
+ call_details_ = new grpc_call_details;
+ grpc_call_details_init(call_details_);
+ }
+ if (grpc_server_request_call(server, &call_, call_details_,
+ &request_metadata_, cq_, notify_cq,
+ this) != GRPC_CALL_OK) {
+ TeardownRequest();
+ return;
+ }
+ }
+ }
+
+ void PostShutdownCleanup() {
+ if (call_) {
+ grpc_call_unref(call_);
+ call_ = nullptr;
+ }
+ if (cq_) {
+ grpc_completion_queue_destroy(cq_);
+ cq_ = nullptr;
+ }
+ }
+
+ bool FinalizeResult(void** /*tag*/, bool* status) override {
+ if (!*status) {
+ grpc_completion_queue_destroy(cq_);
+ cq_ = nullptr;
+ }
+ if (call_details_) {
+ deadline_ = call_details_->deadline;
+ grpc_call_details_destroy(call_details_);
+ grpc_call_details_init(call_details_);
+ }
+ return true;
+ }
+
+ // The CallData class represents a call that is "active" as opposed
+ // to just being requested. It wraps and takes ownership of the cq from
+ // the call request
+ class CallData final {
+ public:
+ explicit CallData(Server* server, SyncRequest* mrd)
+ : cq_(mrd->cq_),
+ ctx_(mrd->deadline_, &mrd->request_metadata_),
+ has_request_payload_(mrd->has_request_payload_),
+ request_payload_(has_request_payload_ ? mrd->request_payload_
+ : nullptr),
+ request_(nullptr),
+ method_(mrd->method_),
+ call_(
+ mrd->call_, server, &cq_, server->max_receive_message_size(),
+ ctx_.set_server_rpc_info(method_->name(), method_->method_type(),
+ server->interceptor_creators_)),
+ server_(server),
+ global_callbacks_(nullptr),
+ resources_(false) {
+ ctx_.set_call(mrd->call_);
+ ctx_.cq_ = &cq_;
+ GPR_ASSERT(mrd->in_flight_);
+ mrd->in_flight_ = false;
+ mrd->request_metadata_.count = 0;
+ }
+
+ ~CallData() {
+ if (has_request_payload_ && request_payload_) {
+ grpc_byte_buffer_destroy(request_payload_);
+ }
+ }
+
+ void Run(const std::shared_ptr<GlobalCallbacks>& global_callbacks,
+ bool resources) {
+ global_callbacks_ = global_callbacks;
+ resources_ = resources;
+
+ interceptor_methods_.SetCall(&call_);
+ interceptor_methods_.SetReverse();
+ // Set interception point for RECV INITIAL METADATA
+ interceptor_methods_.AddInterceptionHookPoint(
+ grpc::experimental::InterceptionHookPoints::
+ POST_RECV_INITIAL_METADATA);
+ interceptor_methods_.SetRecvInitialMetadata(&ctx_.client_metadata_);
+
+ if (has_request_payload_) {
+ // Set interception point for RECV MESSAGE
+ auto* handler = resources_ ? method_->handler()
+ : server_->resource_exhausted_handler_.get();
+ request_ = handler->Deserialize(call_.call(), request_payload_,
+ &request_status_, nullptr);
+
+ request_payload_ = nullptr;
+ interceptor_methods_.AddInterceptionHookPoint(
+ grpc::experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
+ interceptor_methods_.SetRecvMessage(request_, nullptr);
+ }
+
+ if (interceptor_methods_.RunInterceptors(
+ [this]() { ContinueRunAfterInterception(); })) {
+ ContinueRunAfterInterception();
+ } else {
+ // There were interceptors to be run, so ContinueRunAfterInterception
+ // will be run when interceptors are done.
+ }
+ }
+
+ void ContinueRunAfterInterception() {
+ {
+ ctx_.BeginCompletionOp(&call_, nullptr, nullptr);
+ global_callbacks_->PreSynchronousRequest(&ctx_);
+ auto* handler = resources_ ? method_->handler()
+ : server_->resource_exhausted_handler_.get();
+ handler->RunHandler(grpc::internal::MethodHandler::HandlerParameter(
+ &call_, &ctx_, request_, request_status_, nullptr, nullptr));
+ request_ = nullptr;
+ global_callbacks_->PostSynchronousRequest(&ctx_);
+
+ cq_.Shutdown();
+
+ grpc::internal::CompletionQueueTag* op_tag = ctx_.GetCompletionOpTag();
+ cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
+
+ /* Ensure the cq_ is shutdown */
+ grpc::DummyTag ignored_tag;
+ GPR_ASSERT(cq_.Pluck(&ignored_tag) == false);
+ }
+ delete this;
+ }
+
+ private:
+ grpc::CompletionQueue cq_;
+ grpc::ServerContext ctx_;
+ const bool has_request_payload_;
+ grpc_byte_buffer* request_payload_;
+ void* request_;
+ grpc::Status request_status_;
+ grpc::internal::RpcServiceMethod* const method_;
+ grpc::internal::Call call_;
+ Server* server_;
+ std::shared_ptr<GlobalCallbacks> global_callbacks_;
+ bool resources_;
+ grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
+ };
+
+ private:
+ grpc::internal::RpcServiceMethod* const method_;
+ void* const method_tag_;
+ bool in_flight_;
+ const bool has_request_payload_;
+ grpc_call* call_;
+ grpc_call_details* call_details_;
+ gpr_timespec deadline_;
+ grpc_metadata_array request_metadata_;
+ grpc_byte_buffer* request_payload_;
+ grpc_completion_queue* cq_;
+};
+
+template <class ServerContextType>
+class Server::CallbackRequest final
+ : public grpc::internal::CompletionQueueTag {
+ public:
+ static_assert(
+ std::is_base_of<grpc::CallbackServerContext, ServerContextType>::value,
+ "ServerContextType must be derived from CallbackServerContext");
+
+ // For codegen services, the value of method represents the defined
+ // characteristics of the method being requested. For generic services, method
+ // is nullptr since these services don't have pre-defined methods.
+ CallbackRequest(Server* server, grpc::internal::RpcServiceMethod* method,
+ grpc::CompletionQueue* cq,
+ grpc_core::Server::RegisteredCallAllocation* data)
+ : server_(server),
+ method_(method),
+ has_request_payload_(method->method_type() ==
+ grpc::internal::RpcMethod::NORMAL_RPC ||
+ method->method_type() ==
+ grpc::internal::RpcMethod::SERVER_STREAMING),
+ cq_(cq),
+ tag_(this) {
+ CommonSetup(server, data);
+ data->deadline = &deadline_;
+ data->optional_payload = has_request_payload_ ? &request_payload_ : nullptr;
+ }
+
+ // For generic services, method is nullptr since these services don't have
+ // pre-defined methods.
+ CallbackRequest(Server* server, grpc::CompletionQueue* cq,
+ grpc_core::Server::BatchCallAllocation* data)
+ : server_(server),
+ method_(nullptr),
+ has_request_payload_(false),
+ call_details_(new grpc_call_details),
+ cq_(cq),
+ tag_(this) {
+ CommonSetup(server, data);
+ grpc_call_details_init(call_details_);
+ data->details = call_details_;
+ }
+
+ ~CallbackRequest() {
+ delete call_details_;
+ grpc_metadata_array_destroy(&request_metadata_);
+ if (has_request_payload_ && request_payload_) {
+ grpc_byte_buffer_destroy(request_payload_);
+ }
+ server_->UnrefWithPossibleNotify();
+ }
+
+ // Needs specialization to account for different processing of metadata
+ // in generic API
+ bool FinalizeResult(void** tag, bool* status) override;
+
+ private:
+ // method_name needs to be specialized between named method and generic
+ const char* method_name() const;
+
+ class CallbackCallTag : public grpc_experimental_completion_queue_functor {
+ public:
+ CallbackCallTag(Server::CallbackRequest<ServerContextType>* req)
+ : req_(req) {
+ functor_run = &CallbackCallTag::StaticRun;
+ // Set inlineable to true since this callback is internally-controlled
+ // without taking any locks, and thus does not need to be run from the
+ // executor (which triggers a thread hop). This should only be used by
+ // internal callbacks like this and not by user application code. The work
+ // here is actually non-trivial, but there is no chance of having user
+ // locks conflict with each other so it's ok to run inlined.
+ inlineable = true;
+ }
+
+ // force_run can not be performed on a tag if operations using this tag
+ // have been sent to PerformOpsOnCall. It is intended for error conditions
+ // that are detected before the operations are internally processed.
+ void force_run(bool ok) { Run(ok); }
+
+ private:
+ Server::CallbackRequest<ServerContextType>* req_;
+ grpc::internal::Call* call_;
+
+ static void StaticRun(grpc_experimental_completion_queue_functor* cb,
+ int ok) {
+ static_cast<CallbackCallTag*>(cb)->Run(static_cast<bool>(ok));
+ }
+ void Run(bool ok) {
+ void* ignored = req_;
+ bool new_ok = ok;
+ GPR_ASSERT(!req_->FinalizeResult(&ignored, &new_ok));
+ GPR_ASSERT(ignored == req_);
+
+ if (!ok) {
+ // The call has been shutdown.
+ // Delete its contents to free up the request.
+ delete req_;
+ return;
+ }
+
+ // Bind the call, deadline, and metadata from what we got
+ req_->ctx_.set_call(req_->call_);
+ req_->ctx_.cq_ = req_->cq_;
+ req_->ctx_.BindDeadlineAndMetadata(req_->deadline_,
+ &req_->request_metadata_);
+ req_->request_metadata_.count = 0;
+
+ // Create a C++ Call to control the underlying core call
+ call_ =
+ new (grpc_call_arena_alloc(req_->call_, sizeof(grpc::internal::Call)))
+ grpc::internal::Call(
+ req_->call_, req_->server_, req_->cq_,
+ req_->server_->max_receive_message_size(),
+ req_->ctx_.set_server_rpc_info(
+ req_->method_name(),
+ (req_->method_ != nullptr)
+ ? req_->method_->method_type()
+ : grpc::internal::RpcMethod::BIDI_STREAMING,
+ req_->server_->interceptor_creators_));
+
+ req_->interceptor_methods_.SetCall(call_);
+ req_->interceptor_methods_.SetReverse();
+ // Set interception point for RECV INITIAL METADATA
+ req_->interceptor_methods_.AddInterceptionHookPoint(
+ grpc::experimental::InterceptionHookPoints::
+ POST_RECV_INITIAL_METADATA);
+ req_->interceptor_methods_.SetRecvInitialMetadata(
+ &req_->ctx_.client_metadata_);
+
+ if (req_->has_request_payload_) {
+ // Set interception point for RECV MESSAGE
+ req_->request_ = req_->method_->handler()->Deserialize(
+ req_->call_, req_->request_payload_, &req_->request_status_,
+ &req_->handler_data_);
+ req_->request_payload_ = nullptr;
+ req_->interceptor_methods_.AddInterceptionHookPoint(
+ grpc::experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
+ req_->interceptor_methods_.SetRecvMessage(req_->request_, nullptr);
+ }
+
+ if (req_->interceptor_methods_.RunInterceptors(
+ [this] { ContinueRunAfterInterception(); })) {
+ ContinueRunAfterInterception();
+ } else {
+ // There were interceptors to be run, so ContinueRunAfterInterception
+ // will be run when interceptors are done.
+ }
+ }
+ void ContinueRunAfterInterception() {
+ auto* handler = (req_->method_ != nullptr)
+ ? req_->method_->handler()
+ : req_->server_->generic_handler_.get();
+ handler->RunHandler(grpc::internal::MethodHandler::HandlerParameter(
+ call_, &req_->ctx_, req_->request_, req_->request_status_,
+ req_->handler_data_, [this] { delete req_; }));
+ }
+ };
+
+ template <class CallAllocation>
+ void CommonSetup(Server* server, CallAllocation* data) {
+ server->Ref();
+ grpc_metadata_array_init(&request_metadata_);
+ data->tag = &tag_;
+ data->call = &call_;
+ data->initial_metadata = &request_metadata_;
+ }
+
+ Server* const server_;
+ grpc::internal::RpcServiceMethod* const method_;
+ const bool has_request_payload_;
+ grpc_byte_buffer* request_payload_ = nullptr;
+ void* request_ = nullptr;
+ void* handler_data_ = nullptr;
+ grpc::Status request_status_;
+ grpc_call_details* const call_details_ = nullptr;
+ grpc_call* call_;
+ gpr_timespec deadline_;
+ grpc_metadata_array request_metadata_;
+ grpc::CompletionQueue* const cq_;
+ CallbackCallTag tag_;
+ ServerContextType ctx_;
+ grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
+};
+
+template <>
+bool Server::CallbackRequest<grpc::CallbackServerContext>::FinalizeResult(
+ void** /*tag*/, bool* /*status*/) {
+ return false;
+}
+
+template <>
+bool Server::CallbackRequest<
+ grpc::GenericCallbackServerContext>::FinalizeResult(void** /*tag*/,
+ bool* status) {
+ if (*status) {
+ deadline_ = call_details_->deadline;
+ // TODO(yangg) remove the copy here
+ ctx_.method_ = grpc::StringFromCopiedSlice(call_details_->method);
+ ctx_.host_ = grpc::StringFromCopiedSlice(call_details_->host);
+ }
+ grpc_slice_unref(call_details_->method);
+ grpc_slice_unref(call_details_->host);
+ return false;
+}
+
+template <>
+const char* Server::CallbackRequest<grpc::CallbackServerContext>::method_name()
+ const {
+ return method_->name();
+}
+
+template <>
+const char* Server::CallbackRequest<
+ grpc::GenericCallbackServerContext>::method_name() const {
+ return ctx_.method().c_str();
+}
+
+// Implementation of ThreadManager. Each instance of SyncRequestThreadManager
+// manages a pool of threads that poll for incoming Sync RPCs and call the
+// appropriate RPC handlers
+class Server::SyncRequestThreadManager : public grpc::ThreadManager {
+ public:
+ SyncRequestThreadManager(Server* server, grpc::CompletionQueue* server_cq,
+ std::shared_ptr<GlobalCallbacks> global_callbacks,
+ grpc_resource_quota* rq, int min_pollers,
+ int max_pollers, int cq_timeout_msec)
+ : ThreadManager("SyncServer", rq, min_pollers, max_pollers),
+ server_(server),
+ server_cq_(server_cq),
+ cq_timeout_msec_(cq_timeout_msec),
+ global_callbacks_(std::move(global_callbacks)) {}
+
+ WorkStatus PollForWork(void** tag, bool* ok) override {
+ *tag = nullptr;
+ // TODO(ctiller): workaround for GPR_TIMESPAN based deadlines not working
+ // right now
+ gpr_timespec deadline =
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN));
+
+ switch (server_cq_->AsyncNext(tag, ok, deadline)) {
+ case grpc::CompletionQueue::TIMEOUT:
+ return TIMEOUT;
+ case grpc::CompletionQueue::SHUTDOWN:
+ return SHUTDOWN;
+ case grpc::CompletionQueue::GOT_EVENT:
+ return WORK_FOUND;
+ }
+
+ GPR_UNREACHABLE_CODE(return TIMEOUT);
+ }
+
+ void DoWork(void* tag, bool ok, bool resources) override {
+ SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
+
+ if (!sync_req) {
+ // No tag. Nothing to work on. This is an unlikley scenario and possibly a
+ // bug in RPC Manager implementation.
+ gpr_log(GPR_ERROR, "Sync server. DoWork() was called with NULL tag");
+ return;
+ }
+
+ if (ok) {
+ // Calldata takes ownership of the completion queue and interceptors
+ // inside sync_req
+ auto* cd = new SyncRequest::CallData(server_, sync_req);
+ // Prepare for the next request
+ if (!IsShutdown()) {
+ sync_req->SetupRequest(); // Create new completion queue for sync_req
+ sync_req->Request(server_->c_server(), server_cq_->cq());
+ }
+
+ GPR_TIMER_SCOPE("cd.Run()", 0);
+ cd->Run(global_callbacks_, resources);
+ }
+ // TODO (sreek) If ok is false here (which it isn't in case of
+ // grpc_request_registered_call), we should still re-queue the request
+ // object
+ }
+
+ void AddSyncMethod(grpc::internal::RpcServiceMethod* method, void* tag) {
+ sync_requests_.emplace_back(new SyncRequest(method, tag));
+ }
+
+ void AddUnknownSyncMethod() {
+ if (!sync_requests_.empty()) {
+ unknown_method_.reset(new grpc::internal::RpcServiceMethod(
+ "unknown", grpc::internal::RpcMethod::BIDI_STREAMING,
+ new grpc::internal::UnknownMethodHandler));
+ sync_requests_.emplace_back(
+ new SyncRequest(unknown_method_.get(), nullptr));
+ }
+ }
+
+ void Shutdown() override {
+ ThreadManager::Shutdown();
+ server_cq_->Shutdown();
+ }
+
+ void Wait() override {
+ ThreadManager::Wait();
+ // Drain any pending items from the queue
+ void* tag;
+ bool ok;
+ while (server_cq_->Next(&tag, &ok)) {
+ if (ok) {
+ // If a request was pulled off the queue, it means that the thread
+ // handling the request added it to the completion queue after shutdown
+ // was called - because the thread had already started and checked the
+ // shutdown flag before shutdown was called. In this case, we simply
+ // clean it up here, *after* calling wait on all the worker threads, at
+ // which point we are certain no in-flight requests will add more to the
+ // queue. This fixes an intermittent memory leak on shutdown.
+ SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
+ sync_req->PostShutdownCleanup();
+ }
+ }
+ }
+
+ void Start() {
+ if (!sync_requests_.empty()) {
+ for (const auto& value : sync_requests_) {
+ value->SetupRequest();
+ value->Request(server_->c_server(), server_cq_->cq());
+ }
+
+ Initialize(); // ThreadManager's Initialize()
+ }
+ }
+
+ private:
+ Server* server_;
+ grpc::CompletionQueue* server_cq_;
+ int cq_timeout_msec_;
+ std::vector<std::unique_ptr<SyncRequest>> sync_requests_;
+ std::unique_ptr<grpc::internal::RpcServiceMethod> unknown_method_;
+ std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
+};
+
+static grpc::internal::GrpcLibraryInitializer g_gli_initializer;
+Server::Server(
+ grpc::ChannelArguments* args,
+ std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
+ sync_server_cqs,
+ int min_pollers, int max_pollers, int sync_cq_timeout_msec,
+ std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>>
+ acceptors,
+ grpc_resource_quota* server_rq,
+ std::vector<
+ std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
+ interceptor_creators)
+ : acceptors_(std::move(acceptors)),
+ interceptor_creators_(std::move(interceptor_creators)),
+ max_receive_message_size_(INT_MIN),
+ sync_server_cqs_(std::move(sync_server_cqs)),
+ started_(false),
+ shutdown_(false),
+ shutdown_notified_(false),
+ server_(nullptr),
+ server_initializer_(new ServerInitializer(this)),
+ health_check_service_disabled_(false) {
+ g_gli_initializer.summon();
+ gpr_once_init(&grpc::g_once_init_callbacks, grpc::InitGlobalCallbacks);
+ global_callbacks_ = grpc::g_callbacks;
+ global_callbacks_->UpdateArguments(args);
+
+ if (sync_server_cqs_ != nullptr) {
+ bool default_rq_created = false;
+ if (server_rq == nullptr) {
+ server_rq = grpc_resource_quota_create("SyncServer-default-rq");
+ grpc_resource_quota_set_max_threads(server_rq,
+ DEFAULT_MAX_SYNC_SERVER_THREADS);
+ default_rq_created = true;
+ }
+
+ for (const auto& it : *sync_server_cqs_) {
+ sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
+ this, it.get(), global_callbacks_, server_rq, min_pollers,
+ max_pollers, sync_cq_timeout_msec));
+ }
+
+ if (default_rq_created) {
+ grpc_resource_quota_unref(server_rq);
+ }
+ }
+
+ for (auto& acceptor : acceptors_) {
+ acceptor->SetToChannelArgs(args);
+ }
+
+ grpc_channel_args channel_args;
+ args->SetChannelArgs(&channel_args);
+
+ for (size_t i = 0; i < channel_args.num_args; i++) {
+ if (0 == strcmp(channel_args.args[i].key,
+ grpc::kHealthCheckServiceInterfaceArg)) {
+ if (channel_args.args[i].value.pointer.p == nullptr) {
+ health_check_service_disabled_ = true;
+ } else {
+ health_check_service_.reset(
+ static_cast<grpc::HealthCheckServiceInterface*>(
+ channel_args.args[i].value.pointer.p));
+ }
+ }
+ if (0 ==
+ strcmp(channel_args.args[i].key, GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)) {
+ max_receive_message_size_ = channel_args.args[i].value.integer;
+ }
+ }
+ server_ = grpc_server_create(&channel_args, nullptr);
+}
+
+Server::~Server() {
+ {
+ grpc::internal::ReleasableMutexLock lock(&mu_);
+ if (started_ && !shutdown_) {
+ lock.Unlock();
+ Shutdown();
+ } else if (!started_) {
+ // Shutdown the completion queues
+ for (const auto& value : sync_req_mgrs_) {
+ value->Shutdown();
+ }
+ if (callback_cq_ != nullptr) {
+ callback_cq_->Shutdown();
+ callback_cq_ = nullptr;
+ }
+ }
+ }
+ // Destroy health check service before we destroy the C server so that
+ // it does not call grpc_server_request_registered_call() after the C
+ // server has been destroyed.
+ health_check_service_.reset();
+ grpc_server_destroy(server_);
+}
+
+void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) {
+ GPR_ASSERT(!grpc::g_callbacks);
+ GPR_ASSERT(callbacks);
+ grpc::g_callbacks.reset(callbacks);
+}
+
+grpc_server* Server::c_server() { return server_; }
+
+std::shared_ptr<grpc::Channel> Server::InProcessChannel(
+ const grpc::ChannelArguments& args) {
+ grpc_channel_args channel_args = args.c_channel_args();
+ return grpc::CreateChannelInternal(
+ "inproc", grpc_inproc_channel_create(server_, &channel_args, nullptr),
+ std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>());
+}
+
+std::shared_ptr<grpc::Channel>
+Server::experimental_type::InProcessChannelWithInterceptors(
+ const grpc::ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
+ grpc_channel_args channel_args = args.c_channel_args();
+ return grpc::CreateChannelInternal(
+ "inproc",
+ grpc_inproc_channel_create(server_->server_, &channel_args, nullptr),
+ std::move(interceptor_creators));
+}
+
+static grpc_server_register_method_payload_handling PayloadHandlingForMethod(
+ grpc::internal::RpcServiceMethod* method) {
+ switch (method->method_type()) {
+ case grpc::internal::RpcMethod::NORMAL_RPC:
+ case grpc::internal::RpcMethod::SERVER_STREAMING:
+ return GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER;
+ case grpc::internal::RpcMethod::CLIENT_STREAMING:
+ case grpc::internal::RpcMethod::BIDI_STREAMING:
+ return GRPC_SRM_PAYLOAD_NONE;
+ }
+ GPR_UNREACHABLE_CODE(return GRPC_SRM_PAYLOAD_NONE;);
+}
+
+bool Server::RegisterService(const TString* host, grpc::Service* service) {
+ bool has_async_methods = service->has_async_methods();
+ if (has_async_methods) {
+ GPR_ASSERT(service->server_ == nullptr &&
+ "Can only register an asynchronous service against one server.");
+ service->server_ = this;
+ }
+
+ const char* method_name = nullptr;
+
+ for (const auto& method : service->methods_) {
+ if (method.get() == nullptr) { // Handled by generic service if any.
+ continue;
+ }
+
+ void* method_registration_tag = grpc_server_register_method(
+ server_, method->name(), host ? host->c_str() : nullptr,
+ PayloadHandlingForMethod(method.get()), 0);
+ if (method_registration_tag == nullptr) {
+ gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
+ method->name());
+ return false;
+ }
+
+ if (method->handler() == nullptr) { // Async method without handler
+ method->set_server_tag(method_registration_tag);
+ } else if (method->api_type() ==
+ grpc::internal::RpcServiceMethod::ApiType::SYNC) {
+ for (const auto& value : sync_req_mgrs_) {
+ value->AddSyncMethod(method.get(), method_registration_tag);
+ }
+ } else {
+ has_callback_methods_ = true;
+ grpc::internal::RpcServiceMethod* method_value = method.get();
+ grpc::CompletionQueue* cq = CallbackCQ();
+ server_->core_server->SetRegisteredMethodAllocator(
+ cq->cq(), method_registration_tag, [this, cq, method_value] {
+ grpc_core::Server::RegisteredCallAllocation result;
+ new CallbackRequest<grpc::CallbackServerContext>(this, method_value,
+ cq, &result);
+ return result;
+ });
+ }
+
+ method_name = method->name();
+ }
+
+ // Parse service name.
+ if (method_name != nullptr) {
+ std::stringstream ss(method_name);
+ std::string service_name;
+ if (std::getline(ss, service_name, '/') &&
+ std::getline(ss, service_name, '/')) {
+ services_.push_back(service_name.c_str());
+ }
+ }
+ return true;
+}
+
+void Server::RegisterAsyncGenericService(grpc::AsyncGenericService* service) {
+ GPR_ASSERT(service->server_ == nullptr &&
+ "Can only register an async generic service against one server.");
+ service->server_ = this;
+ has_async_generic_service_ = true;
+}
+
+void Server::RegisterCallbackGenericService(
+ grpc::CallbackGenericService* service) {
+ GPR_ASSERT(
+ service->server_ == nullptr &&
+ "Can only register a callback generic service against one server.");
+ service->server_ = this;
+ has_callback_generic_service_ = true;
+ generic_handler_.reset(service->Handler());
+
+ grpc::CompletionQueue* cq = CallbackCQ();
+ server_->core_server->SetBatchMethodAllocator(cq->cq(), [this, cq] {
+ grpc_core::Server::BatchCallAllocation result;
+ new CallbackRequest<grpc::GenericCallbackServerContext>(this, cq, &result);
+ return result;
+ });
+}
+
+int Server::AddListeningPort(const TString& addr,
+ grpc::ServerCredentials* creds) {
+ GPR_ASSERT(!started_);
+ int port = creds->AddPortToServer(addr, server_);
+ global_callbacks_->AddPort(this, addr, creds, port);
+ return port;
+}
+
+void Server::Ref() {
+ shutdown_refs_outstanding_.fetch_add(1, std::memory_order_relaxed);
+}
+
+void Server::UnrefWithPossibleNotify() {
+ if (GPR_UNLIKELY(shutdown_refs_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ // No refs outstanding means that shutdown has been initiated and no more
+ // callback requests are outstanding.
+ grpc::internal::MutexLock lock(&mu_);
+ GPR_ASSERT(shutdown_);
+ shutdown_done_ = true;
+ shutdown_done_cv_.Signal();
+ }
+}
+
+void Server::UnrefAndWaitLocked() {
+ if (GPR_UNLIKELY(shutdown_refs_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ shutdown_done_ = true;
+ return; // no need to wait on CV since done condition already set
+ }
+ shutdown_done_cv_.WaitUntil(&mu_, [this] { return shutdown_done_; });
+}
+
+void Server::Start(grpc::ServerCompletionQueue** cqs, size_t num_cqs) {
+ GPR_ASSERT(!started_);
+ global_callbacks_->PreServerStart(this);
+ started_ = true;
+
+ // Only create default health check service when user did not provide an
+ // explicit one.
+ grpc::ServerCompletionQueue* health_check_cq = nullptr;
+ grpc::DefaultHealthCheckService::HealthCheckServiceImpl*
+ default_health_check_service_impl = nullptr;
+ if (health_check_service_ == nullptr && !health_check_service_disabled_ &&
+ grpc::DefaultHealthCheckServiceEnabled()) {
+ auto* default_hc_service = new grpc::DefaultHealthCheckService;
+ health_check_service_.reset(default_hc_service);
+ // We create a non-polling CQ to avoid impacting application
+ // performance. This ensures that we don't introduce thread hops
+ // for application requests that wind up on this CQ, which is polled
+ // in its own thread.
+ health_check_cq = new grpc::ServerCompletionQueue(
+ GRPC_CQ_NEXT, GRPC_CQ_NON_POLLING, nullptr);
+ grpc_server_register_completion_queue(server_, health_check_cq->cq(),
+ nullptr);
+ default_health_check_service_impl =
+ default_hc_service->GetHealthCheckService(
+ std::unique_ptr<grpc::ServerCompletionQueue>(health_check_cq));
+ RegisterService(nullptr, default_health_check_service_impl);
+ }
+
+ for (auto& acceptor : acceptors_) {
+ acceptor->GetCredentials()->AddPortToServer(acceptor->name(), server_);
+ }
+
+ // If this server uses callback methods, then create a callback generic
+ // service to handle any unimplemented methods using the default reactor
+ // creator
+ if (has_callback_methods_ && !has_callback_generic_service_) {
+ unimplemented_service_.reset(new grpc::CallbackGenericService);
+ RegisterCallbackGenericService(unimplemented_service_.get());
+ }
+
+#ifndef NDEBUG
+ for (size_t i = 0; i < num_cqs; i++) {
+ cq_list_.push_back(cqs[i]);
+ }
+#endif
+
+ grpc_server_start(server_);
+
+ if (!has_async_generic_service_ && !has_callback_generic_service_) {
+ for (const auto& value : sync_req_mgrs_) {
+ value->AddUnknownSyncMethod();
+ }
+
+ for (size_t i = 0; i < num_cqs; i++) {
+ if (cqs[i]->IsFrequentlyPolled()) {
+ new UnimplementedAsyncRequest(this, cqs[i]);
+ }
+ }
+ if (health_check_cq != nullptr) {
+ new UnimplementedAsyncRequest(this, health_check_cq);
+ }
+ }
+
+ // If this server has any support for synchronous methods (has any sync
+ // server CQs), make sure that we have a ResourceExhausted handler
+ // to deal with the case of thread exhaustion
+ if (sync_server_cqs_ != nullptr && !sync_server_cqs_->empty()) {
+ resource_exhausted_handler_.reset(
+ new grpc::internal::ResourceExhaustedHandler);
+ }
+
+ for (const auto& value : sync_req_mgrs_) {
+ value->Start();
+ }
+
+ if (default_health_check_service_impl != nullptr) {
+ default_health_check_service_impl->StartServingThread();
+ }
+
+ for (auto& acceptor : acceptors_) {
+ acceptor->Start();
+ }
+}
+
+void Server::ShutdownInternal(gpr_timespec deadline) {
+ grpc::internal::MutexLock lock(&mu_);
+ if (shutdown_) {
+ return;
+ }
+
+ shutdown_ = true;
+
+ for (auto& acceptor : acceptors_) {
+ acceptor->Shutdown();
+ }
+
+ /// The completion queue to use for server shutdown completion notification
+ grpc::CompletionQueue shutdown_cq;
+ grpc::ShutdownTag shutdown_tag; // Dummy shutdown tag
+ grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag);
+
+ shutdown_cq.Shutdown();
+
+ void* tag;
+ bool ok;
+ grpc::CompletionQueue::NextStatus status =
+ shutdown_cq.AsyncNext(&tag, &ok, deadline);
+
+ // If this timed out, it means we are done with the grace period for a clean
+ // shutdown. We should force a shutdown now by cancelling all inflight calls
+ if (status == grpc::CompletionQueue::NextStatus::TIMEOUT) {
+ grpc_server_cancel_all_calls(server_);
+ }
+ // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has
+ // successfully shutdown
+
+ // Shutdown all ThreadManagers. This will try to gracefully stop all the
+ // threads in the ThreadManagers (once they process any inflight requests)
+ for (const auto& value : sync_req_mgrs_) {
+ value->Shutdown(); // ThreadManager's Shutdown()
+ }
+
+ // Wait for threads in all ThreadManagers to terminate
+ for (const auto& value : sync_req_mgrs_) {
+ value->Wait();
+ }
+
+ // Drop the shutdown ref and wait for all other refs to drop as well.
+ UnrefAndWaitLocked();
+
+ // Shutdown the callback CQ. The CQ is owned by its own shutdown tag, so it
+ // will delete itself at true shutdown.
+ if (callback_cq_ != nullptr) {
+ callback_cq_->Shutdown();
+ callback_cq_ = nullptr;
+ }
+
+ // Drain the shutdown queue (if the previous call to AsyncNext() timed out
+ // and we didn't remove the tag from the queue yet)
+ while (shutdown_cq.Next(&tag, &ok)) {
+ // Nothing to be done here. Just ignore ok and tag values
+ }
+
+ shutdown_notified_ = true;
+ shutdown_cv_.Broadcast();
+
+#ifndef NDEBUG
+ // Unregister this server with the CQs passed into it by the user so that
+ // those can be checked for properly-ordered shutdown.
+ for (auto* cq : cq_list_) {
+ cq->UnregisterServer(this);
+ }
+ cq_list_.clear();
+#endif
+}
+
+void Server::Wait() {
+ grpc::internal::MutexLock lock(&mu_);
+ while (started_ && !shutdown_notified_) {
+ shutdown_cv_.Wait(&mu_);
+ }
+}
+
+void Server::PerformOpsOnCall(grpc::internal::CallOpSetInterface* ops,
+ grpc::internal::Call* call) {
+ ops->FillOps(call);
+}
+
+bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
+ bool* status) {
+ if (GenericAsyncRequest::FinalizeResult(tag, status)) {
+ // We either had no interceptors run or we are done intercepting
+ if (*status) {
+ // Create a new request/response pair using the server and CQ values
+ // stored in this object's base class.
+ new UnimplementedAsyncRequest(server_, notification_cq_);
+ new UnimplementedAsyncResponse(this);
+ } else {
+ delete this;
+ }
+ } else {
+ // The tag was swallowed due to interception. We will see it again.
+ }
+ return false;
+}
+
+Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
+ UnimplementedAsyncRequest* request)
+ : request_(request) {
+ grpc::Status status(grpc::StatusCode::UNIMPLEMENTED, "");
+ grpc::internal::UnknownMethodHandler::FillOps(request_->context(), this);
+ request_->stream()->call_.PerformOps(this);
+}
+
+grpc::ServerInitializer* Server::initializer() {
+ return server_initializer_.get();
+}
+
+grpc::CompletionQueue* Server::CallbackCQ() {
+ // TODO(vjpai): Consider using a single global CQ for the default CQ
+ // if there is no explicit per-server CQ registered
+ grpc::internal::MutexLock l(&mu_);
+ if (callback_cq_ != nullptr) {
+ return callback_cq_;
+ }
+ auto* shutdown_callback = new grpc::ShutdownCallback;
+ callback_cq_ = new grpc::CompletionQueue(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
+ shutdown_callback});
+
+ // Transfer ownership of the new cq to its own shutdown callback
+ shutdown_callback->TakeCQ(callback_cq_);
+
+ return callback_cq_;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/server_context.cc b/contrib/libs/grpc/src/cpp/server/server_context.cc
new file mode 100644
index 0000000000..458ac20d87
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/server_context.cc
@@ -0,0 +1,361 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/codegen/server_context.h>
+
+#include <algorithm>
+#include <utility>
+
+#include <grpc/compression.h>
+#include <grpc/grpc.h>
+#include <grpc/load_reporting.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpcpp/impl/call.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/support/server_callback.h>
+#include <grpcpp/support/time.h>
+
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/sync.h"
+#include "src/core/lib/surface/call.h"
+
+namespace grpc {
+
+// CompletionOp
+
+class ServerContextBase::CompletionOp final
+ : public internal::CallOpSetInterface {
+ public:
+ // initial refs: one in the server context, one in the cq
+ // must ref the call before calling constructor and after deleting this
+ CompletionOp(internal::Call* call,
+ ::grpc::internal::ServerCallbackCall* callback_controller)
+ : call_(*call),
+ callback_controller_(callback_controller),
+ has_tag_(false),
+ tag_(nullptr),
+ core_cq_tag_(this),
+ refs_(2),
+ finalized_(false),
+ cancelled_(0),
+ done_intercepting_(false) {}
+
+ // CompletionOp isn't copyable or movable
+ CompletionOp(const CompletionOp&) = delete;
+ CompletionOp& operator=(const CompletionOp&) = delete;
+ CompletionOp(CompletionOp&&) = delete;
+ CompletionOp& operator=(CompletionOp&&) = delete;
+
+ ~CompletionOp() {
+ if (call_.server_rpc_info()) {
+ call_.server_rpc_info()->Unref();
+ }
+ }
+
+ void FillOps(internal::Call* call) override;
+
+ // This should always be arena allocated in the call, so override delete.
+ // But this class is not trivially destructible, so must actually call delete
+ // before allowing the arena to be freed
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ // Use size to avoid unused-parameter warning since assert seems to be
+ // compiled out and treated as unused in some gcc optimized versions.
+ (void)size;
+ assert(size == sizeof(CompletionOp));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { assert(0); }
+
+ bool FinalizeResult(void** tag, bool* status) override;
+
+ bool CheckCancelled(CompletionQueue* cq) {
+ cq->TryPluck(this);
+ return CheckCancelledNoPluck();
+ }
+ bool CheckCancelledAsync() { return CheckCancelledNoPluck(); }
+
+ void set_tag(void* tag) {
+ has_tag_ = true;
+ tag_ = tag;
+ }
+
+ void set_core_cq_tag(void* core_cq_tag) { core_cq_tag_ = core_cq_tag; }
+
+ void* core_cq_tag() override { return core_cq_tag_; }
+
+ void Unref();
+
+ // This will be called while interceptors are run if the RPC is a hijacked
+ // RPC. This should set hijacking state for each of the ops.
+ void SetHijackingState() override {
+ /* Servers don't allow hijacking */
+ GPR_ASSERT(false);
+ }
+
+ /* Should be called after interceptors are done running */
+ void ContinueFillOpsAfterInterception() override {}
+
+ /* Should be called after interceptors are done running on the finalize result
+ * path */
+ void ContinueFinalizeResultAfterInterception() override {
+ done_intercepting_ = true;
+ if (!has_tag_) {
+ /* We don't have a tag to return. */
+ Unref();
+ return;
+ }
+ /* Start a dummy op so that we can return the tag */
+ GPR_ASSERT(grpc_call_start_batch(call_.call(), nullptr, 0, core_cq_tag_,
+ nullptr) == GRPC_CALL_OK);
+ }
+
+ private:
+ bool CheckCancelledNoPluck() {
+ grpc_core::MutexLock lock(&mu_);
+ return finalized_ ? (cancelled_ != 0) : false;
+ }
+
+ internal::Call call_;
+ ::grpc::internal::ServerCallbackCall* const callback_controller_;
+ bool has_tag_;
+ void* tag_;
+ void* core_cq_tag_;
+ grpc_core::RefCount refs_;
+ grpc_core::Mutex mu_;
+ bool finalized_;
+ int cancelled_; // This is an int (not bool) because it is passed to core
+ bool done_intercepting_;
+ internal::InterceptorBatchMethodsImpl interceptor_methods_;
+};
+
+void ServerContextBase::CompletionOp::Unref() {
+ if (refs_.Unref()) {
+ grpc_call* call = call_.call();
+ delete this;
+ grpc_call_unref(call);
+ }
+}
+
+void ServerContextBase::CompletionOp::FillOps(internal::Call* call) {
+ grpc_op ops;
+ ops.op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+ ops.data.recv_close_on_server.cancelled = &cancelled_;
+ ops.flags = 0;
+ ops.reserved = nullptr;
+ interceptor_methods_.SetCall(&call_);
+ interceptor_methods_.SetReverse();
+ interceptor_methods_.SetCallOpSetInterface(this);
+ // The following call_start_batch is internally-generated so no need for an
+ // explanatory log on failure.
+ GPR_ASSERT(grpc_call_start_batch(call->call(), &ops, 1, core_cq_tag_,
+ nullptr) == GRPC_CALL_OK);
+ /* No interceptors to run here */
+}
+
+bool ServerContextBase::CompletionOp::FinalizeResult(void** tag, bool* status) {
+ // Decide whether to call the cancel callback within the lock
+ bool call_cancel;
+
+ {
+ grpc_core::MutexLock lock(&mu_);
+ if (done_intercepting_) {
+ // We are done intercepting.
+ bool has_tag = has_tag_;
+ if (has_tag) {
+ *tag = tag_;
+ }
+ Unref();
+ return has_tag;
+ }
+ finalized_ = true;
+
+ // If for some reason the incoming status is false, mark that as a
+ // cancellation.
+ // TODO(vjpai): does this ever happen?
+ if (!*status) {
+ cancelled_ = 1;
+ }
+
+ call_cancel = (cancelled_ != 0);
+ // Release the lock since we may call a callback and interceptors.
+ }
+
+ if (call_cancel && callback_controller_ != nullptr) {
+ callback_controller_->MaybeCallOnCancel();
+ }
+ /* Add interception point and run through interceptors */
+ interceptor_methods_.AddInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_CLOSE);
+ if (interceptor_methods_.RunInterceptors()) {
+ // No interceptors were run
+ bool has_tag = has_tag_;
+ if (has_tag) {
+ *tag = tag_;
+ }
+ Unref();
+ return has_tag;
+ }
+ // There are interceptors to be run. Return false for now.
+ return false;
+}
+
+// ServerContextBase body
+
+ServerContextBase::ServerContextBase()
+ : deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)) {}
+
+ServerContextBase::ServerContextBase(gpr_timespec deadline,
+ grpc_metadata_array* arr)
+ : deadline_(deadline) {
+ std::swap(*client_metadata_.arr(), *arr);
+}
+
+void ServerContextBase::BindDeadlineAndMetadata(gpr_timespec deadline,
+ grpc_metadata_array* arr) {
+ deadline_ = deadline;
+ std::swap(*client_metadata_.arr(), *arr);
+}
+
+ServerContextBase::~ServerContextBase() {
+ if (completion_op_) {
+ completion_op_->Unref();
+ }
+ if (rpc_info_) {
+ rpc_info_->Unref();
+ }
+ if (default_reactor_used_.load(std::memory_order_relaxed)) {
+ reinterpret_cast<Reactor*>(&default_reactor_)->~Reactor();
+ }
+}
+
+ServerContextBase::CallWrapper::~CallWrapper() {
+ if (call) {
+ // If the ServerContext is part of the call's arena, this could free the
+ // object itself.
+ grpc_call_unref(call);
+ }
+}
+
+void ServerContextBase::BeginCompletionOp(
+ internal::Call* call, std::function<void(bool)> callback,
+ ::grpc::internal::ServerCallbackCall* callback_controller) {
+ GPR_ASSERT(!completion_op_);
+ if (rpc_info_) {
+ rpc_info_->Ref();
+ }
+ grpc_call_ref(call->call());
+ completion_op_ =
+ new (grpc_call_arena_alloc(call->call(), sizeof(CompletionOp)))
+ CompletionOp(call, callback_controller);
+ if (callback_controller != nullptr) {
+ completion_tag_.Set(call->call(), std::move(callback), completion_op_,
+ true);
+ completion_op_->set_core_cq_tag(&completion_tag_);
+ completion_op_->set_tag(completion_op_);
+ } else if (has_notify_when_done_tag_) {
+ completion_op_->set_tag(async_notify_when_done_tag_);
+ }
+ call->PerformOps(completion_op_);
+}
+
+internal::CompletionQueueTag* ServerContextBase::GetCompletionOpTag() {
+ return static_cast<internal::CompletionQueueTag*>(completion_op_);
+}
+
+void ServerContextBase::AddInitialMetadata(const TString& key,
+ const TString& value) {
+ initial_metadata_.insert(std::make_pair(key, value));
+}
+
+void ServerContextBase::AddTrailingMetadata(const TString& key,
+ const TString& value) {
+ trailing_metadata_.insert(std::make_pair(key, value));
+}
+
+void ServerContextBase::TryCancel() const {
+ internal::CancelInterceptorBatchMethods cancel_methods;
+ if (rpc_info_) {
+ for (size_t i = 0; i < rpc_info_->interceptors_.size(); i++) {
+ rpc_info_->RunInterceptor(&cancel_methods, i);
+ }
+ }
+ grpc_call_error err =
+ grpc_call_cancel_with_status(call_.call, GRPC_STATUS_CANCELLED,
+ "Cancelled on the server side", nullptr);
+ if (err != GRPC_CALL_OK) {
+ gpr_log(GPR_ERROR, "TryCancel failed with: %d", err);
+ }
+}
+
+bool ServerContextBase::IsCancelled() const {
+ if (completion_tag_) {
+ // When using callback API, this result is always valid.
+ return completion_op_->CheckCancelledAsync();
+ } else if (has_notify_when_done_tag_) {
+ // When using async API, the result is only valid
+ // if the tag has already been delivered at the completion queue
+ return completion_op_ && completion_op_->CheckCancelledAsync();
+ } else {
+ // when using sync API, the result is always valid
+ return completion_op_ && completion_op_->CheckCancelled(cq_);
+ }
+}
+
+void ServerContextBase::set_compression_algorithm(
+ grpc_compression_algorithm algorithm) {
+ compression_algorithm_ = algorithm;
+ const char* algorithm_name = nullptr;
+ if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {
+ gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.",
+ algorithm);
+ abort();
+ }
+ GPR_ASSERT(algorithm_name != nullptr);
+ AddInitialMetadata(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, algorithm_name);
+}
+
+TString ServerContextBase::peer() const {
+ TString peer;
+ if (call_.call) {
+ char* c_peer = grpc_call_get_peer(call_.call);
+ peer = c_peer;
+ gpr_free(c_peer);
+ }
+ return peer;
+}
+
+const struct census_context* ServerContextBase::census_context() const {
+ return call_.call == nullptr ? nullptr
+ : grpc_census_call_get_context(call_.call);
+}
+
+void ServerContextBase::SetLoadReportingCosts(
+ const std::vector<TString>& cost_data) {
+ if (call_.call == nullptr) return;
+ for (const auto& cost_datum : cost_data) {
+ AddTrailingMetadata(GRPC_LB_COST_MD_KEY, cost_datum);
+ }
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/server_credentials.cc b/contrib/libs/grpc/src/cpp/server/server_credentials.cc
new file mode 100644
index 0000000000..c3b3a8b379
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/server_credentials.cc
@@ -0,0 +1,25 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/security/server_credentials.h>
+
+namespace grpc {
+
+ServerCredentials::~ServerCredentials() {}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/server_posix.cc b/contrib/libs/grpc/src/cpp/server/server_posix.cc
new file mode 100644
index 0000000000..c3d40d4fa2
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/server_posix.cc
@@ -0,0 +1,33 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/server_posix.h>
+
+#include <grpc/grpc_posix.h>
+
+namespace grpc {
+
+#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
+
+void AddInsecureChannelFromFd(grpc::Server* server, int fd) {
+ grpc_server_add_insecure_channel_from_fd(server->c_server(), nullptr, fd);
+}
+
+#endif // GPR_SUPPORT_CHANNELS_FROM_FD
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/server/thread_pool_interface.h b/contrib/libs/grpc/src/cpp/server/thread_pool_interface.h
new file mode 100644
index 0000000000..028842a776
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/server/thread_pool_interface.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_THREAD_POOL_INTERFACE_H
+#define GRPC_INTERNAL_CPP_THREAD_POOL_INTERFACE_H
+
+#include <functional>
+
+namespace grpc {
+
+// A thread pool interface for running callbacks.
+class ThreadPoolInterface {
+ public:
+ virtual ~ThreadPoolInterface() {}
+
+ // Schedule the given callback for execution.
+ virtual void Add(const std::function<void()>& callback) = 0;
+};
+
+// Allows different codebases to use their own thread pool impls
+typedef ThreadPoolInterface* (*CreateThreadPoolFunc)(void);
+void SetCreateThreadPool(CreateThreadPoolFunc func);
+
+ThreadPoolInterface* CreateDefaultThreadPool();
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_THREAD_POOL_INTERFACE_H
diff --git a/contrib/libs/grpc/src/cpp/thread_manager/thread_manager.cc b/contrib/libs/grpc/src/cpp/thread_manager/thread_manager.cc
new file mode 100644
index 0000000000..c8560aa81d
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/thread_manager/thread_manager.cc
@@ -0,0 +1,265 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/cpp/thread_manager/thread_manager.h"
+
+#include <climits>
+
+#include <grpc/support/log.h>
+#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+namespace grpc {
+
+ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr)
+ : thd_mgr_(thd_mgr) {
+ // Make thread creation exclusive with respect to its join happening in
+ // ~WorkerThread().
+ thd_ = grpc_core::Thread(
+ "grpcpp_sync_server",
+ [](void* th) { static_cast<ThreadManager::WorkerThread*>(th)->Run(); },
+ this, &created_);
+ if (!created_) {
+ gpr_log(GPR_ERROR, "Could not create grpc_sync_server worker-thread");
+ }
+}
+
+void ThreadManager::WorkerThread::Run() {
+ thd_mgr_->MainWorkLoop();
+ thd_mgr_->MarkAsCompleted(this);
+}
+
+ThreadManager::WorkerThread::~WorkerThread() {
+ // Don't join until the thread is fully constructed.
+ thd_.Join();
+}
+
+ThreadManager::ThreadManager(const char* name,
+ grpc_resource_quota* resource_quota,
+ int min_pollers, int max_pollers)
+ : shutdown_(false),
+ num_pollers_(0),
+ min_pollers_(min_pollers),
+ max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers),
+ num_threads_(0),
+ max_active_threads_sofar_(0) {
+ resource_user_ = grpc_resource_user_create(resource_quota, name);
+}
+
+ThreadManager::~ThreadManager() {
+ {
+ grpc_core::MutexLock lock(&mu_);
+ GPR_ASSERT(num_threads_ == 0);
+ }
+
+ grpc_core::ExecCtx exec_ctx; // grpc_resource_user_unref needs an exec_ctx
+ grpc_resource_user_unref(resource_user_);
+ CleanupCompletedThreads();
+}
+
+void ThreadManager::Wait() {
+ grpc_core::MutexLock lock(&mu_);
+ while (num_threads_ != 0) {
+ shutdown_cv_.Wait(&mu_);
+ }
+}
+
+void ThreadManager::Shutdown() {
+ grpc_core::MutexLock lock(&mu_);
+ shutdown_ = true;
+}
+
+bool ThreadManager::IsShutdown() {
+ grpc_core::MutexLock lock(&mu_);
+ return shutdown_;
+}
+
+int ThreadManager::GetMaxActiveThreadsSoFar() {
+ grpc_core::MutexLock list_lock(&list_mu_);
+ return max_active_threads_sofar_;
+}
+
+void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
+ {
+ grpc_core::MutexLock list_lock(&list_mu_);
+ completed_threads_.push_back(thd);
+ }
+
+ {
+ grpc_core::MutexLock lock(&mu_);
+ num_threads_--;
+ if (num_threads_ == 0) {
+ shutdown_cv_.Signal();
+ }
+ }
+
+ // Give a thread back to the resource quota
+ grpc_resource_user_free_threads(resource_user_, 1);
+}
+
+void ThreadManager::CleanupCompletedThreads() {
+ std::list<WorkerThread*> completed_threads;
+ {
+ // swap out the completed threads list: allows other threads to clean up
+ // more quickly
+ grpc_core::MutexLock lock(&list_mu_);
+ completed_threads.swap(completed_threads_);
+ }
+ for (auto thd : completed_threads) delete thd;
+}
+
+void ThreadManager::Initialize() {
+ if (!grpc_resource_user_allocate_threads(resource_user_, min_pollers_)) {
+ gpr_log(GPR_ERROR,
+ "No thread quota available to even create the minimum required "
+ "polling threads (i.e %d). Unable to start the thread manager",
+ min_pollers_);
+ abort();
+ }
+
+ {
+ grpc_core::MutexLock lock(&mu_);
+ num_pollers_ = min_pollers_;
+ num_threads_ = min_pollers_;
+ max_active_threads_sofar_ = min_pollers_;
+ }
+
+ for (int i = 0; i < min_pollers_; i++) {
+ WorkerThread* worker = new WorkerThread(this);
+ GPR_ASSERT(worker->created()); // Must be able to create the minimum
+ worker->Start();
+ }
+}
+
+void ThreadManager::MainWorkLoop() {
+ while (true) {
+ void* tag;
+ bool ok;
+ WorkStatus work_status = PollForWork(&tag, &ok);
+
+ grpc_core::ReleasableMutexLock lock(&mu_);
+ // Reduce the number of pollers by 1 and check what happened with the poll
+ num_pollers_--;
+ bool done = false;
+ switch (work_status) {
+ case TIMEOUT:
+ // If we timed out and we have more pollers than we need (or we are
+ // shutdown), finish this thread
+ if (shutdown_ || num_pollers_ > max_pollers_) done = true;
+ break;
+ case SHUTDOWN:
+ // If the thread manager is shutdown, finish this thread
+ done = true;
+ break;
+ case WORK_FOUND:
+ // If we got work and there are now insufficient pollers and there is
+ // quota available to create a new thread, start a new poller thread
+ bool resource_exhausted = false;
+ if (!shutdown_ && num_pollers_ < min_pollers_) {
+ if (grpc_resource_user_allocate_threads(resource_user_, 1)) {
+ // We can allocate a new poller thread
+ num_pollers_++;
+ num_threads_++;
+ if (num_threads_ > max_active_threads_sofar_) {
+ max_active_threads_sofar_ = num_threads_;
+ }
+ // Drop lock before spawning thread to avoid contention
+ lock.Unlock();
+ WorkerThread* worker = new WorkerThread(this);
+ if (worker->created()) {
+ worker->Start();
+ } else {
+ // Get lock again to undo changes to poller/thread counters.
+ grpc_core::MutexLock failure_lock(&mu_);
+ num_pollers_--;
+ num_threads_--;
+ resource_exhausted = true;
+ delete worker;
+ }
+ } else if (num_pollers_ > 0) {
+ // There is still at least some thread polling, so we can go on
+ // even though we are below the number of pollers that we would
+ // like to have (min_pollers_)
+ lock.Unlock();
+ } else {
+ // There are no pollers to spare and we couldn't allocate
+ // a new thread, so resources are exhausted!
+ lock.Unlock();
+ resource_exhausted = true;
+ }
+ } else {
+ // There are a sufficient number of pollers available so we can do
+ // the work and continue polling with our existing poller threads
+ lock.Unlock();
+ }
+ // Lock is always released at this point - do the application work
+ // or return resource exhausted if there is new work but we couldn't
+ // get a thread in which to do it.
+ DoWork(tag, ok, !resource_exhausted);
+ // Take the lock again to check post conditions
+ lock.Lock();
+ // If we're shutdown, we should finish at this point.
+ if (shutdown_) done = true;
+ break;
+ }
+ // If we decided to finish the thread, break out of the while loop
+ if (done) break;
+
+ // Otherwise go back to polling as long as it doesn't exceed max_pollers_
+ //
+ // **WARNING**:
+ // There is a possibility of threads thrashing here (i.e excessive thread
+ // shutdowns and creations than the ideal case). This happens if max_poller_
+ // count is small and the rate of incoming requests is also small. In such
+ // scenarios we can possibly configure max_pollers_ to a higher value and/or
+ // increase the cq timeout.
+ //
+ // However, not doing this check here and unconditionally incrementing
+ // num_pollers (and hoping that the system will eventually settle down) has
+ // far worse consequences i.e huge number of threads getting created to the
+ // point of thread-exhaustion. For example: if the incoming request rate is
+ // very high, all the polling threads will return very quickly from
+ // PollForWork() with WORK_FOUND. They all briefly decrement num_pollers_
+ // counter thereby possibly - and briefly - making it go below min_pollers;
+ // This will most likely result in the creation of a new poller since
+ // num_pollers_ dipped below min_pollers_.
+ //
+ // Now, If we didn't do the max_poller_ check here, all these threads will
+ // go back to doing PollForWork() and the whole cycle repeats (with a new
+ // thread being added in each cycle). Once the total number of threads in
+ // the system crosses a certain threshold (around ~1500), there is heavy
+ // contention on mutexes (the mu_ here or the mutexes in gRPC core like the
+ // pollset mutex) that makes DoWork() take longer to finish thereby causing
+ // new poller threads to be created even faster. This results in a thread
+ // avalanche.
+ if (num_pollers_ < max_pollers_) {
+ num_pollers_++;
+ } else {
+ break;
+ }
+ };
+
+ // This thread is exiting. Do some cleanup work i.e delete already completed
+ // worker threads
+ CleanupCompletedThreads();
+
+ // If we are here, either ThreadManager is shutting down or it already has
+ // enough threads.
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/thread_manager/thread_manager.h b/contrib/libs/grpc/src/cpp/thread_manager/thread_manager.h
new file mode 100644
index 0000000000..43f1fd5585
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/thread_manager/thread_manager.h
@@ -0,0 +1,181 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_THREAD_MANAGER_H
+#define GRPC_INTERNAL_CPP_THREAD_MANAGER_H
+
+#include <list>
+#include <memory>
+
+#include <grpcpp/support/config.h>
+
+#include "src/core/lib/gprpp/sync.h"
+#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/resource_quota.h"
+
+namespace grpc {
+
+class ThreadManager {
+ public:
+ explicit ThreadManager(const char* name, grpc_resource_quota* resource_quota,
+ int min_pollers, int max_pollers);
+ virtual ~ThreadManager();
+
+ // Initializes and Starts the Rpc Manager threads
+ void Initialize();
+
+ // The return type of PollForWork() function
+ enum WorkStatus { WORK_FOUND, SHUTDOWN, TIMEOUT };
+
+ // "Polls" for new work.
+ // If the return value is WORK_FOUND:
+ // - The implementaion of PollForWork() MAY set some opaque identifier to
+ // (identify the work item found) via the '*tag' parameter
+ // - The implementaion MUST set the value of 'ok' to 'true' or 'false'. A
+ // value of 'false' indicates some implemenation specific error (that is
+ // neither SHUTDOWN nor TIMEOUT)
+ // - ThreadManager does not interpret the values of 'tag' and 'ok'
+ // - ThreadManager WILL call DoWork() and pass '*tag' and 'ok' as input to
+ // DoWork()
+ //
+ // If the return value is SHUTDOWN:,
+ // - ThreadManager WILL NOT call DoWork() and terminates the thread
+ //
+ // If the return value is TIMEOUT:,
+ // - ThreadManager WILL NOT call DoWork()
+ // - ThreadManager MAY terminate the thread depending on the current number
+ // of active poller threads and mix_pollers/max_pollers settings
+ // - Also, the value of timeout is specific to the derived class
+ // implementation
+ virtual WorkStatus PollForWork(void** tag, bool* ok) = 0;
+
+ // The implementation of DoWork() is supposed to perform the work found by
+ // PollForWork(). The tag and ok parameters are the same as returned by
+ // PollForWork(). The resources parameter indicates that the call actually
+ // has the resources available for performing the RPC's work. If it doesn't,
+ // the implementation should fail it appropriately.
+ //
+ // The implementation of DoWork() should also do any setup needed to ensure
+ // that the next call to PollForWork() (not necessarily by the current thread)
+ // actually finds some work
+ virtual void DoWork(void* tag, bool ok, bool resources) = 0;
+
+ // Mark the ThreadManager as shutdown and begin draining the work. This is a
+ // non-blocking call and the caller should call Wait(), a blocking call which
+ // returns only once the shutdown is complete
+ virtual void Shutdown();
+
+ // Has Shutdown() been called
+ bool IsShutdown();
+
+ // A blocking call that returns only after the ThreadManager has shutdown and
+ // all the threads have drained all the outstanding work
+ virtual void Wait();
+
+ // Max number of concurrent threads that were ever active in this thread
+ // manager so far. This is useful for debugging purposes (and in unit tests)
+ // to check if resource_quota is properly being enforced.
+ int GetMaxActiveThreadsSoFar();
+
+ private:
+ // Helper wrapper class around grpc_core::Thread. Takes a ThreadManager object
+ // and starts a new grpc_core::Thread to calls the Run() function.
+ //
+ // The Run() function calls ThreadManager::MainWorkLoop() function and once
+ // that completes, it marks the WorkerThread completed by calling
+ // ThreadManager::MarkAsCompleted()
+ //
+ // WHY IS THIS NEEDED?:
+ // When a thread terminates, some other thread *must* call Join() on that
+ // thread so that the resources are released. Having a WorkerThread wrapper
+ // will make this easier. Once Run() completes, each thread calls the
+ // following two functions:
+ // ThreadManager::CleanupCompletedThreads()
+ // ThreadManager::MarkAsCompleted()
+ //
+ // - MarkAsCompleted() puts the WorkerThread object in the ThreadManger's
+ // completed_threads_ list
+ // - CleanupCompletedThreads() calls "Join()" on the threads that are already
+ // in the completed_threads_ list (since a thread cannot call Join() on
+ // itself, it calls CleanupCompletedThreads() *before* calling
+ // MarkAsCompleted())
+ //
+ // TODO(sreek): Consider creating the threads 'detached' so that Join() need
+ // not be called (and the need for this WorkerThread class is eliminated)
+ class WorkerThread {
+ public:
+ WorkerThread(ThreadManager* thd_mgr);
+ ~WorkerThread();
+
+ bool created() const { return created_; }
+ void Start() { thd_.Start(); }
+
+ private:
+ // Calls thd_mgr_->MainWorkLoop() and once that completes, calls
+ // thd_mgr_>MarkAsCompleted(this) to mark the thread as completed
+ void Run();
+
+ ThreadManager* const thd_mgr_;
+ grpc_core::Thread thd_;
+ bool created_;
+ };
+
+ // The main function in ThreadManager
+ void MainWorkLoop();
+
+ void MarkAsCompleted(WorkerThread* thd);
+ void CleanupCompletedThreads();
+
+ // Protects shutdown_, num_pollers_, num_threads_ and
+ // max_active_threads_sofar_
+ grpc_core::Mutex mu_;
+
+ bool shutdown_;
+ grpc_core::CondVar shutdown_cv_;
+
+ // The resource user object to use when requesting quota to create threads
+ //
+ // Note: The user of this ThreadManager object must create grpc_resource_quota
+ // object (that contains the actual max thread quota) and a grpc_resource_user
+ // object through which quota is requested whenever new threads need to be
+ // created
+ grpc_resource_user* resource_user_;
+
+ // Number of threads doing polling
+ int num_pollers_;
+
+ // The minimum and maximum number of threads that should be doing polling
+ int min_pollers_;
+ int max_pollers_;
+
+ // The total number of threads currently active (includes threads includes the
+ // threads that are currently polling i.e num_pollers_)
+ int num_threads_;
+
+ // See GetMaxActiveThreadsSoFar()'s description.
+ // To be more specific, this variable tracks the max value num_threads_ was
+ // ever set so far
+ int max_active_threads_sofar_;
+
+ grpc_core::Mutex list_mu_;
+ std::list<WorkerThread*> completed_threads_;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_THREAD_MANAGER_H
diff --git a/contrib/libs/grpc/src/cpp/util/byte_buffer_cc.cc b/contrib/libs/grpc/src/cpp/util/byte_buffer_cc.cc
new file mode 100644
index 0000000000..fb70590645
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/util/byte_buffer_cc.cc
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/byte_buffer.h>
+#include <grpc/byte_buffer_reader.h>
+#include <grpcpp/impl/grpc_library.h>
+#include <grpcpp/support/byte_buffer.h>
+
+namespace grpc {
+
+static internal::GrpcLibraryInitializer g_gli_initializer;
+
+Status ByteBuffer::Dump(std::vector<Slice>* slices) const {
+ slices->clear();
+ if (!buffer_) {
+ return Status(StatusCode::FAILED_PRECONDITION, "Buffer not initialized");
+ }
+ grpc_byte_buffer_reader reader;
+ if (!grpc_byte_buffer_reader_init(&reader, buffer_)) {
+ return Status(StatusCode::INTERNAL,
+ "Couldn't initialize byte buffer reader");
+ }
+ grpc_slice s;
+ while (grpc_byte_buffer_reader_next(&reader, &s)) {
+ slices->push_back(Slice(s, Slice::STEAL_REF));
+ }
+ grpc_byte_buffer_reader_destroy(&reader);
+ return Status::OK;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/util/error_details.cc b/contrib/libs/grpc/src/cpp/util/error_details.cc
new file mode 100644
index 0000000000..dfd3351be1
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/util/error_details.cc
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/support/error_details.h>
+
+#include "src/proto/grpc/status/status.pb.h"
+
+namespace grpc {
+
+grpc::Status ExtractErrorDetails(const grpc::Status& from,
+ ::google::rpc::Status* to) {
+ if (to == nullptr) {
+ return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "");
+ }
+ if (!to->ParseFromString(TProtoStringType(from.error_details()))) {
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "");
+ }
+ return grpc::Status::OK;
+}
+
+grpc::Status SetErrorDetails(const ::google::rpc::Status& from,
+ grpc::Status* to) {
+ if (to == nullptr) {
+ return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "");
+ }
+ grpc::StatusCode code = grpc::StatusCode::UNKNOWN;
+ if (from.code() >= grpc::StatusCode::OK &&
+ from.code() <= grpc::StatusCode::UNAUTHENTICATED) {
+ code = static_cast<grpc::StatusCode>(from.code());
+ }
+ *to = grpc::Status(code, from.message(), from.SerializeAsString());
+ return grpc::Status::OK;
+}
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/util/status.cc b/contrib/libs/grpc/src/cpp/util/status.cc
new file mode 100644
index 0000000000..93696d8126
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/util/status.cc
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/support/status.h>
+
+namespace grpc {
+
+const Status& Status::OK = Status();
+const Status& Status::CANCELLED = Status(StatusCode::CANCELLED, "");
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/util/string_ref.cc b/contrib/libs/grpc/src/cpp/util/string_ref.cc
new file mode 100644
index 0000000000..8b09a82a63
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/util/string_ref.cc
@@ -0,0 +1,25 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/support/string_ref.h>
+
+namespace grpc {
+
+const size_t string_ref::npos = size_t(-1);
+
+} // namespace grpc
diff --git a/contrib/libs/grpc/src/cpp/util/time_cc.cc b/contrib/libs/grpc/src/cpp/util/time_cc.cc
new file mode 100644
index 0000000000..6c9c228d7c
--- /dev/null
+++ b/contrib/libs/grpc/src/cpp/util/time_cc.cc
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/time.h>
+#include <grpcpp/support/config.h>
+#include <grpcpp/support/time.h>
+
+using std::chrono::duration_cast;
+using std::chrono::high_resolution_clock;
+using std::chrono::nanoseconds;
+using std::chrono::seconds;
+using std::chrono::system_clock;
+
+namespace grpc {
+
+void Timepoint2Timespec(const system_clock::time_point& from,
+ gpr_timespec* to) {
+ system_clock::duration deadline = from.time_since_epoch();
+ seconds secs = duration_cast<seconds>(deadline);
+ if (from == system_clock::time_point::max() ||
+ secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec ||
+ secs.count() < 0) {
+ *to = gpr_inf_future(GPR_CLOCK_REALTIME);
+ return;
+ }
+ nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
+ to->tv_sec = static_cast<int64_t>(secs.count());
+ to->tv_nsec = static_cast<int32_t>(nsecs.count());
+ to->clock_type = GPR_CLOCK_REALTIME;
+}
+
+void TimepointHR2Timespec(const high_resolution_clock::time_point& from,
+ gpr_timespec* to) {
+ high_resolution_clock::duration deadline = from.time_since_epoch();
+ seconds secs = duration_cast<seconds>(deadline);
+ if (from == high_resolution_clock::time_point::max() ||
+ secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec ||
+ secs.count() < 0) {
+ *to = gpr_inf_future(GPR_CLOCK_REALTIME);
+ return;
+ }
+ nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
+ to->tv_sec = static_cast<int64_t>(secs.count());
+ to->tv_nsec = static_cast<int32_t>(nsecs.count());
+ to->clock_type = GPR_CLOCK_REALTIME;
+}
+
+system_clock::time_point Timespec2Timepoint(gpr_timespec t) {
+ if (gpr_time_cmp(t, gpr_inf_future(t.clock_type)) == 0) {
+ return system_clock::time_point::max();
+ }
+ t = gpr_convert_clock_type(t, GPR_CLOCK_REALTIME);
+ system_clock::time_point tp;
+ tp += duration_cast<system_clock::time_point::duration>(seconds(t.tv_sec));
+ tp +=
+ duration_cast<system_clock::time_point::duration>(nanoseconds(t.tv_nsec));
+ return tp;
+}
+
+} // namespace grpc