aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/grpc/include/grpcpp
diff options
context:
space:
mode:
authorheretic <heretic@yandex-team.ru>2022-02-10 16:45:46 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:46 +0300
commit81eddc8c0b55990194e112b02d127b87d54164a9 (patch)
tree9142afc54d335ea52910662635b898e79e192e49 /contrib/libs/grpc/include/grpcpp
parent397cbe258b9e064f49c4ca575279f02f39fef76e (diff)
downloadydb-81eddc8c0b55990194e112b02d127b87d54164a9.tar.gz
Restoring authorship annotation for <heretic@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/grpc/include/grpcpp')
-rw-r--r--contrib/libs/grpc/include/grpcpp/alarm.h202
-rw-r--r--contrib/libs/grpc/include/grpcpp/channel.h184
-rw-r--r--contrib/libs/grpc/include/grpcpp/create_channel.h80
-rw-r--r--contrib/libs/grpc/include/grpcpp/create_channel_posix.h62
-rw-r--r--contrib/libs/grpc/include/grpcpp/ext/proto_server_reflection_plugin.h48
-rw-r--r--contrib/libs/grpc/include/grpcpp/ext/server_load_reporting.h44
-rw-r--r--contrib/libs/grpc/include/grpcpp/generic/generic_stub.h376
-rw-r--r--contrib/libs/grpc/include/grpcpp/grpcpp.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/health_check_service_interface.h58
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/channel_argument_option.h4
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/README.md42
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h70
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h2178
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h556
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h22
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/call.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h138
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h44
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h2360
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h1000
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h846
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/config.h18
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h10
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h32
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h40
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h690
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h2
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h6
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h1524
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h404
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h1182
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h10
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h114
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h128
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/status.h14
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h1792
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/time.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/method_handler_impl.h2
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/server_builder_option.h32
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/server_builder_plugin.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/server_initializer.h58
-rw-r--r--contrib/libs/grpc/include/grpcpp/opencensus.h48
-rw-r--r--contrib/libs/grpc/include/grpcpp/resource_quota.h84
-rw-r--r--contrib/libs/grpc/include/grpcpp/security/alts_context.h138
-rw-r--r--contrib/libs/grpc/include/grpcpp/security/alts_util.h100
-rw-r--r--contrib/libs/grpc/include/grpcpp/security/auth_metadata_processor.h70
-rw-r--r--contrib/libs/grpc/include/grpcpp/security/credentials.h580
-rw-r--r--contrib/libs/grpc/include/grpcpp/security/cronet_credentials.h10
-rw-r--r--contrib/libs/grpc/include/grpcpp/security/server_credentials.h110
-rw-r--r--contrib/libs/grpc/include/grpcpp/security/tls_credentials_options.h128
-rw-r--r--contrib/libs/grpc/include/grpcpp/server.h702
-rw-r--r--contrib/libs/grpc/include/grpcpp/server_builder.h774
-rw-r--r--contrib/libs/grpc/include/grpcpp/server_posix.h20
-rw-r--r--contrib/libs/grpc/include/grpcpp/support/channel_arguments.h242
-rw-r--r--contrib/libs/grpc/include/grpcpp/support/error_details.h28
-rw-r--r--contrib/libs/grpc/include/grpcpp/support/validate_service_config.h2
-rw-r--r--contrib/libs/grpc/include/grpcpp/test/channel_test_peer.h88
-rw-r--r--contrib/libs/grpc/include/grpcpp/test/default_reactor_test_peer.h10
-rw-r--r--contrib/libs/grpc/include/grpcpp/test/mock_stream.h10
-rw-r--r--contrib/libs/grpc/include/grpcpp/test/server_context_test_spouse.h10
68 files changed, 8825 insertions, 8825 deletions
diff --git a/contrib/libs/grpc/include/grpcpp/alarm.h b/contrib/libs/grpc/include/grpcpp/alarm.h
index ab7785a329..96add23e32 100644
--- a/contrib/libs/grpc/include/grpcpp/alarm.h
+++ b/contrib/libs/grpc/include/grpcpp/alarm.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,110 +16,110 @@
*
*/
-/// An Alarm posts the user-provided tag to its associated completion queue or
-/// invokes the user-provided function on expiry or cancellation.
+/// An Alarm posts the user-provided tag to its associated completion queue or
+/// invokes the user-provided function on expiry or cancellation.
#ifndef GRPCPP_ALARM_H
#define GRPCPP_ALARM_H
-#include <functional>
+#include <functional>
+
+#include <grpc/grpc.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/impl/codegen/completion_queue_tag.h>
+#include <grpcpp/impl/codegen/grpc_library.h>
+#include <grpcpp/impl/codegen/time.h>
+#include <grpcpp/impl/grpc_library.h>
-#include <grpc/grpc.h>
-#include <grpcpp/impl/codegen/completion_queue.h>
-#include <grpcpp/impl/codegen/completion_queue_tag.h>
-#include <grpcpp/impl/codegen/grpc_library.h>
-#include <grpcpp/impl/codegen/time.h>
-#include <grpcpp/impl/grpc_library.h>
-
namespace grpc {
-class Alarm : private ::grpc::GrpcLibraryCodegen {
- public:
- /// Create an unset completion queue alarm
- Alarm();
-
- /// Destroy the given completion queue alarm, cancelling it in the process.
- ~Alarm();
-
- /// DEPRECATED: Create and set a completion queue alarm instance associated to
- /// \a cq.
- /// This form is deprecated because it is inherently racy.
- /// \internal We rely on the presence of \a cq for grpc initialization. If \a
- /// cq were ever to be removed, a reference to a static
- /// internal::GrpcLibraryInitializer instance would need to be introduced
- /// here. \endinternal.
- template <typename T>
- Alarm(::grpc::CompletionQueue* cq, const T& deadline, void* tag) : Alarm() {
- SetInternal(cq, ::grpc::TimePoint<T>(deadline).raw_time(), tag);
- }
-
- /// Trigger an alarm instance on completion queue \a cq at the specified time.
- /// Once the alarm expires (at \a deadline) or it's cancelled (see \a Cancel),
- /// an event with tag \a tag will be added to \a cq. If the alarm expired, the
- /// event's success bit will be true, false otherwise (ie, upon cancellation).
- template <typename T>
- void Set(::grpc::CompletionQueue* cq, const T& deadline, void* tag) {
- SetInternal(cq, ::grpc::TimePoint<T>(deadline).raw_time(), tag);
- }
-
- /// Alarms aren't copyable.
- Alarm(const Alarm&) = delete;
- Alarm& operator=(const Alarm&) = delete;
-
- /// Alarms are movable.
- Alarm(Alarm&& rhs) : alarm_(rhs.alarm_) { rhs.alarm_ = nullptr; }
- Alarm& operator=(Alarm&& rhs) {
- alarm_ = rhs.alarm_;
- rhs.alarm_ = nullptr;
- return *this;
- }
-
- /// Cancel a completion queue alarm. Calling this function over an alarm that
- /// has already fired has no effect.
- void Cancel();
-
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- /// Set an alarm to invoke callback \a f. The argument to the callback
- /// states whether the alarm expired at \a deadline (true) or was cancelled
- /// (false)
- template <typename T>
- void Set(const T& deadline, std::function<void(bool)> f) {
- SetInternal(::grpc::TimePoint<T>(deadline).raw_time(), std::move(f));
- }
-#endif
-
- /// NOTE: class experimental_type is not part of the public API of this class
- /// TODO(vjpai): Move these contents to the public API of Alarm when
- /// they are no longer experimental
- class experimental_type {
- public:
- explicit experimental_type(Alarm* alarm) : alarm_(alarm) {}
-
- /// Set an alarm to invoke callback \a f. The argument to the callback
- /// states whether the alarm expired at \a deadline (true) or was cancelled
- /// (false)
- template <typename T>
- void Set(const T& deadline, std::function<void(bool)> f) {
- alarm_->SetInternal(::grpc::TimePoint<T>(deadline).raw_time(),
- std::move(f));
- }
-
- private:
- Alarm* alarm_;
- };
-
- /// NOTE: The function experimental() is not stable public API. It is a view
- /// to the experimental components of this class. It may be changed or removed
- /// at any time.
- experimental_type experimental() { return experimental_type(this); }
-
- private:
- void SetInternal(::grpc::CompletionQueue* cq, gpr_timespec deadline,
- void* tag);
- void SetInternal(gpr_timespec deadline, std::function<void(bool)> f);
-
- ::grpc::internal::CompletionQueueTag* alarm_;
-};
-
-} // namespace grpc
-
+class Alarm : private ::grpc::GrpcLibraryCodegen {
+ public:
+ /// Create an unset completion queue alarm
+ Alarm();
+
+ /// Destroy the given completion queue alarm, cancelling it in the process.
+ ~Alarm();
+
+ /// DEPRECATED: Create and set a completion queue alarm instance associated to
+ /// \a cq.
+ /// This form is deprecated because it is inherently racy.
+ /// \internal We rely on the presence of \a cq for grpc initialization. If \a
+ /// cq were ever to be removed, a reference to a static
+ /// internal::GrpcLibraryInitializer instance would need to be introduced
+ /// here. \endinternal.
+ template <typename T>
+ Alarm(::grpc::CompletionQueue* cq, const T& deadline, void* tag) : Alarm() {
+ SetInternal(cq, ::grpc::TimePoint<T>(deadline).raw_time(), tag);
+ }
+
+ /// Trigger an alarm instance on completion queue \a cq at the specified time.
+ /// Once the alarm expires (at \a deadline) or it's cancelled (see \a Cancel),
+ /// an event with tag \a tag will be added to \a cq. If the alarm expired, the
+ /// event's success bit will be true, false otherwise (ie, upon cancellation).
+ template <typename T>
+ void Set(::grpc::CompletionQueue* cq, const T& deadline, void* tag) {
+ SetInternal(cq, ::grpc::TimePoint<T>(deadline).raw_time(), tag);
+ }
+
+ /// Alarms aren't copyable.
+ Alarm(const Alarm&) = delete;
+ Alarm& operator=(const Alarm&) = delete;
+
+ /// Alarms are movable.
+ Alarm(Alarm&& rhs) : alarm_(rhs.alarm_) { rhs.alarm_ = nullptr; }
+ Alarm& operator=(Alarm&& rhs) {
+ alarm_ = rhs.alarm_;
+ rhs.alarm_ = nullptr;
+ return *this;
+ }
+
+ /// Cancel a completion queue alarm. Calling this function over an alarm that
+ /// has already fired has no effect.
+ void Cancel();
+
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ /// Set an alarm to invoke callback \a f. The argument to the callback
+ /// states whether the alarm expired at \a deadline (true) or was cancelled
+ /// (false)
+ template <typename T>
+ void Set(const T& deadline, std::function<void(bool)> f) {
+ SetInternal(::grpc::TimePoint<T>(deadline).raw_time(), std::move(f));
+ }
+#endif
+
+ /// NOTE: class experimental_type is not part of the public API of this class
+ /// TODO(vjpai): Move these contents to the public API of Alarm when
+ /// they are no longer experimental
+ class experimental_type {
+ public:
+ explicit experimental_type(Alarm* alarm) : alarm_(alarm) {}
+
+ /// Set an alarm to invoke callback \a f. The argument to the callback
+ /// states whether the alarm expired at \a deadline (true) or was cancelled
+ /// (false)
+ template <typename T>
+ void Set(const T& deadline, std::function<void(bool)> f) {
+ alarm_->SetInternal(::grpc::TimePoint<T>(deadline).raw_time(),
+ std::move(f));
+ }
+
+ private:
+ Alarm* alarm_;
+ };
+
+ /// NOTE: The function experimental() is not stable public API. It is a view
+ /// to the experimental components of this class. It may be changed or removed
+ /// at any time.
+ experimental_type experimental() { return experimental_type(this); }
+
+ private:
+ void SetInternal(::grpc::CompletionQueue* cq, gpr_timespec deadline,
+ void* tag);
+ void SetInternal(gpr_timespec deadline, std::function<void(bool)> f);
+
+ ::grpc::internal::CompletionQueueTag* alarm_;
+};
+
+} // namespace grpc
+
#endif // GRPCPP_ALARM_H
diff --git a/contrib/libs/grpc/include/grpcpp/channel.h b/contrib/libs/grpc/include/grpcpp/channel.h
index edf39615c2..6a31ff7aa9 100644
--- a/contrib/libs/grpc/include/grpcpp/channel.h
+++ b/contrib/libs/grpc/include/grpcpp/channel.h
@@ -19,29 +19,29 @@
#ifndef GRPCPP_CHANNEL_H
#define GRPCPP_CHANNEL_H
-#include <memory>
-
-#include <grpc/grpc.h>
-#include <grpcpp/impl/call.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/client_interceptor.h>
-#include <grpcpp/impl/codegen/completion_queue.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/grpc_library.h>
-#include <grpcpp/impl/codegen/sync.h>
-
-struct grpc_channel;
-
+#include <memory>
+
+#include <grpc/grpc.h>
+#include <grpcpp/impl/call.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/client_interceptor.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/grpc_library.h>
+#include <grpcpp/impl/codegen/sync.h>
+
+struct grpc_channel;
+
namespace grpc {
-namespace testing {
-class ChannelTestPeer;
-} // namespace testing
+namespace testing {
+class ChannelTestPeer;
+} // namespace testing
-std::shared_ptr<Channel> CreateChannelInternal(
- const TString& host, grpc_channel* c_channel,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
+std::shared_ptr<Channel> CreateChannelInternal(
+ const TString& host, grpc_channel* c_channel,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
namespace experimental {
/// Resets the channel's connection backoff.
@@ -50,77 +50,77 @@ namespace experimental {
void ChannelResetConnectionBackoff(Channel* channel);
} // namespace experimental
-/// Channels represent a connection to an endpoint. Created by \a CreateChannel.
-class Channel final : public ::grpc::ChannelInterface,
- public ::grpc::internal::CallHook,
- public std::enable_shared_from_this<Channel>,
- private ::grpc::GrpcLibraryCodegen {
- public:
- ~Channel();
-
- /// Get the current channel state. If the channel is in IDLE and
- /// \a try_to_connect is set to true, try to connect.
- grpc_connectivity_state GetState(bool try_to_connect) override;
-
- /// Returns the LB policy name, or the empty string if not yet available.
- TString GetLoadBalancingPolicyName() const;
-
- /// Returns the service config in JSON form, or the empty string if
- /// not available.
- TString GetServiceConfigJSON() const;
-
- private:
- template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::BlockingUnaryCallImpl;
- friend class ::grpc::testing::ChannelTestPeer;
- friend void experimental::ChannelResetConnectionBackoff(Channel* channel);
- friend std::shared_ptr<Channel> grpc::CreateChannelInternal(
- const TString& host, grpc_channel* c_channel,
- std::vector<std::unique_ptr<
- ::grpc::experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
- friend class ::grpc::internal::InterceptedChannel;
- Channel(const TString& host, grpc_channel* c_channel,
- std::vector<std::unique_ptr<
- ::grpc::experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
-
- ::grpc::internal::Call CreateCall(const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- ::grpc::CompletionQueue* cq) override;
- void PerformOpsOnCall(::grpc::internal::CallOpSetInterface* ops,
- ::grpc::internal::Call* call) override;
- void* RegisterMethod(const char* method) override;
-
- void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
- gpr_timespec deadline,
- ::grpc::CompletionQueue* cq, void* tag) override;
- bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
- gpr_timespec deadline) override;
-
- ::grpc::CompletionQueue* CallbackCQ() override;
-
- ::grpc::internal::Call CreateCallInternal(
- const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
- ::grpc::CompletionQueue* cq, size_t interceptor_pos) override;
-
- const TString host_;
- grpc_channel* const c_channel_; // owned
-
- // mu_ protects callback_cq_ (the per-channel callbackable completion queue)
- grpc::internal::Mutex mu_;
-
- // callback_cq_ references the callbackable completion queue associated
- // with this channel (if any). It is set on the first call to CallbackCQ().
- // It is _not owned_ by the channel; ownership belongs with its internal
- // shutdown callback tag (invoked when the CQ is fully shutdown).
- ::grpc::CompletionQueue* callback_cq_ = nullptr;
-
- std::vector<
- std::unique_ptr<::grpc::experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators_;
-};
-
+/// Channels represent a connection to an endpoint. Created by \a CreateChannel.
+class Channel final : public ::grpc::ChannelInterface,
+ public ::grpc::internal::CallHook,
+ public std::enable_shared_from_this<Channel>,
+ private ::grpc::GrpcLibraryCodegen {
+ public:
+ ~Channel();
+
+ /// Get the current channel state. If the channel is in IDLE and
+ /// \a try_to_connect is set to true, try to connect.
+ grpc_connectivity_state GetState(bool try_to_connect) override;
+
+ /// Returns the LB policy name, or the empty string if not yet available.
+ TString GetLoadBalancingPolicyName() const;
+
+ /// Returns the service config in JSON form, or the empty string if
+ /// not available.
+ TString GetServiceConfigJSON() const;
+
+ private:
+ template <class InputMessage, class OutputMessage>
+ friend class ::grpc::internal::BlockingUnaryCallImpl;
+ friend class ::grpc::testing::ChannelTestPeer;
+ friend void experimental::ChannelResetConnectionBackoff(Channel* channel);
+ friend std::shared_ptr<Channel> grpc::CreateChannelInternal(
+ const TString& host, grpc_channel* c_channel,
+ std::vector<std::unique_ptr<
+ ::grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+ friend class ::grpc::internal::InterceptedChannel;
+ Channel(const TString& host, grpc_channel* c_channel,
+ std::vector<std::unique_ptr<
+ ::grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+
+ ::grpc::internal::Call CreateCall(const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ ::grpc::CompletionQueue* cq) override;
+ void PerformOpsOnCall(::grpc::internal::CallOpSetInterface* ops,
+ ::grpc::internal::Call* call) override;
+ void* RegisterMethod(const char* method) override;
+
+ void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
+ gpr_timespec deadline,
+ ::grpc::CompletionQueue* cq, void* tag) override;
+ bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
+ gpr_timespec deadline) override;
+
+ ::grpc::CompletionQueue* CallbackCQ() override;
+
+ ::grpc::internal::Call CreateCallInternal(
+ const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
+ ::grpc::CompletionQueue* cq, size_t interceptor_pos) override;
+
+ const TString host_;
+ grpc_channel* const c_channel_; // owned
+
+ // mu_ protects callback_cq_ (the per-channel callbackable completion queue)
+ grpc::internal::Mutex mu_;
+
+ // callback_cq_ references the callbackable completion queue associated
+ // with this channel (if any). It is set on the first call to CallbackCQ().
+ // It is _not owned_ by the channel; ownership belongs with its internal
+ // shutdown callback tag (invoked when the CQ is fully shutdown).
+ ::grpc::CompletionQueue* callback_cq_ = nullptr;
+
+ std::vector<
+ std::unique_ptr<::grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators_;
+};
+
} // namespace grpc
#endif // GRPCPP_CHANNEL_H
diff --git a/contrib/libs/grpc/include/grpcpp/create_channel.h b/contrib/libs/grpc/include/grpcpp/create_channel.h
index b04b2bffa6..4b94a08e45 100644
--- a/contrib/libs/grpc/include/grpcpp/create_channel.h
+++ b/contrib/libs/grpc/include/grpcpp/create_channel.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,59 +19,59 @@
#ifndef GRPCPP_CREATE_CHANNEL_H
#define GRPCPP_CREATE_CHANNEL_H
-#include <memory>
-
-#include <grpcpp/channel.h>
-#include <grpcpp/impl/codegen/client_interceptor.h>
-#include <grpcpp/security/credentials.h>
+#include <memory>
+
+#include <grpcpp/channel.h>
+#include <grpcpp/impl/codegen/client_interceptor.h>
+#include <grpcpp/security/credentials.h>
#include <grpcpp/support/channel_arguments.h>
-#include <grpcpp/support/config.h>
+#include <grpcpp/support/config.h>
namespace grpc {
-/// Create a new \a Channel pointing to \a target.
-///
-/// \param target The URI of the endpoint to connect to.
-/// \param creds Credentials to use for the created channel. If it does not
-/// hold an object or is invalid, a lame channel (one on which all operations
-/// fail) is returned.
-std::shared_ptr<Channel> CreateChannel(
+/// Create a new \a Channel pointing to \a target.
+///
+/// \param target The URI of the endpoint to connect to.
+/// \param creds Credentials to use for the created channel. If it does not
+/// hold an object or is invalid, a lame channel (one on which all operations
+/// fail) is returned.
+std::shared_ptr<Channel> CreateChannel(
const grpc::string& target,
- const std::shared_ptr<ChannelCredentials>& creds);
+ const std::shared_ptr<ChannelCredentials>& creds);
-/// Create a new \em custom \a Channel pointing to \a target.
-///
-/// \warning For advanced use and testing ONLY. Override default channel
-/// arguments only if necessary.
-///
-/// \param target The URI of the endpoint to connect to.
-/// \param creds Credentials to use for the created channel. If it does not
-/// hold an object or is invalid, a lame channel (one on which all operations
-/// fail) is returned.
-/// \param args Options for channel creation.
-std::shared_ptr<Channel> CreateCustomChannel(
+/// Create a new \em custom \a Channel pointing to \a target.
+///
+/// \warning For advanced use and testing ONLY. Override default channel
+/// arguments only if necessary.
+///
+/// \param target The URI of the endpoint to connect to.
+/// \param creds Credentials to use for the created channel. If it does not
+/// hold an object or is invalid, a lame channel (one on which all operations
+/// fail) is returned.
+/// \param args Options for channel creation.
+std::shared_ptr<Channel> CreateCustomChannel(
const grpc::string& target,
const std::shared_ptr<ChannelCredentials>& creds,
- const ChannelArguments& args);
+ const ChannelArguments& args);
namespace experimental {
-/// Create a new \em custom \a Channel pointing to \a target with \a
-/// interceptors being invoked per call.
-///
-/// \warning For advanced use and testing ONLY. Override default channel
-/// arguments only if necessary.
-///
-/// \param target The URI of the endpoint to connect to.
-/// \param creds Credentials to use for the created channel. If it does not
-/// hold an object or is invalid, a lame channel (one on which all operations
-/// fail) is returned.
-/// \param args Options for channel creation.
-std::shared_ptr<Channel> CreateCustomChannelWithInterceptors(
+/// Create a new \em custom \a Channel pointing to \a target with \a
+/// interceptors being invoked per call.
+///
+/// \warning For advanced use and testing ONLY. Override default channel
+/// arguments only if necessary.
+///
+/// \param target The URI of the endpoint to connect to.
+/// \param creds Credentials to use for the created channel. If it does not
+/// hold an object or is invalid, a lame channel (one on which all operations
+/// fail) is returned.
+/// \param args Options for channel creation.
+std::shared_ptr<Channel> CreateCustomChannelWithInterceptors(
const grpc::string& target,
const std::shared_ptr<ChannelCredentials>& creds,
const ChannelArguments& args,
std::vector<
std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
+ interceptor_creators);
} // namespace experimental
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/create_channel_posix.h b/contrib/libs/grpc/include/grpcpp/create_channel_posix.h
index 23797dd63e..c6755b0aa6 100644
--- a/contrib/libs/grpc/include/grpcpp/create_channel_posix.h
+++ b/contrib/libs/grpc/include/grpcpp/create_channel_posix.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 gRPC authors.
+ * Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,47 +19,47 @@
#ifndef GRPCPP_CREATE_CHANNEL_POSIX_H
#define GRPCPP_CREATE_CHANNEL_POSIX_H
-#include <memory>
+#include <memory>
+
+#include <grpc/support/port_platform.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/support/channel_arguments.h>
-#include <grpc/support/port_platform.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/support/channel_arguments.h>
-
namespace grpc {
#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
-/// Create a new \a Channel communicating over the given file descriptor.
-///
-/// \param target The name of the target.
-/// \param fd The file descriptor representing a socket.
-std::shared_ptr<grpc::Channel> CreateInsecureChannelFromFd(
- const TString& target, int fd);
+/// Create a new \a Channel communicating over the given file descriptor.
+///
+/// \param target The name of the target.
+/// \param fd The file descriptor representing a socket.
+std::shared_ptr<grpc::Channel> CreateInsecureChannelFromFd(
+ const TString& target, int fd);
-/// Create a new \a Channel communicating over given file descriptor with custom
-/// channel arguments.
-///
-/// \param target The name of the target.
-/// \param fd The file descriptor representing a socket.
-/// \param args Options for channel creation.
-std::shared_ptr<grpc::Channel> CreateCustomInsecureChannelFromFd(
- const TString& target, int fd, const grpc::ChannelArguments& args);
+/// Create a new \a Channel communicating over given file descriptor with custom
+/// channel arguments.
+///
+/// \param target The name of the target.
+/// \param fd The file descriptor representing a socket.
+/// \param args Options for channel creation.
+std::shared_ptr<grpc::Channel> CreateCustomInsecureChannelFromFd(
+ const TString& target, int fd, const grpc::ChannelArguments& args);
namespace experimental {
-/// Create a new \a Channel communicating over given file descriptor with custom
-/// channel arguments.
-///
-/// \param target The name of the target.
-/// \param fd The file descriptor representing a socket.
-/// \param args Options for channel creation.
-/// \param interceptor_creators Vector of interceptor factory objects.
-std::shared_ptr<grpc::Channel>
+/// Create a new \a Channel communicating over given file descriptor with custom
+/// channel arguments.
+///
+/// \param target The name of the target.
+/// \param fd The file descriptor representing a socket.
+/// \param args Options for channel creation.
+/// \param interceptor_creators Vector of interceptor factory objects.
+std::shared_ptr<grpc::Channel>
CreateCustomInsecureChannelWithInterceptorsFromFd(
- const TString& target, int fd, const grpc::ChannelArguments& args,
+ const TString& target, int fd, const grpc::ChannelArguments& args,
std::unique_ptr<std::vector<
- std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>>
- interceptor_creators);
+ std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>>
+ interceptor_creators);
} // namespace experimental
diff --git a/contrib/libs/grpc/include/grpcpp/ext/proto_server_reflection_plugin.h b/contrib/libs/grpc/include/grpcpp/ext/proto_server_reflection_plugin.h
index ccbcf04f8e..7df4aa931b 100644
--- a/contrib/libs/grpc/include/grpcpp/ext/proto_server_reflection_plugin.h
+++ b/contrib/libs/grpc/include/grpcpp/ext/proto_server_reflection_plugin.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,33 +19,33 @@
#ifndef GRPCPP_EXT_PROTO_SERVER_REFLECTION_PLUGIN_H
#define GRPCPP_EXT_PROTO_SERVER_REFLECTION_PLUGIN_H
-#include <grpcpp/impl/server_builder_plugin.h>
-#include <grpcpp/support/config.h>
+#include <grpcpp/impl/server_builder_plugin.h>
+#include <grpcpp/support/config.h>
namespace grpc {
-class ProtoServerReflection;
-class ServerInitializer;
-
+class ProtoServerReflection;
+class ServerInitializer;
+
namespace reflection {
-class ProtoServerReflectionPlugin : public ::grpc::ServerBuilderPlugin {
- public:
- ProtoServerReflectionPlugin();
- ::TString name() override;
- void InitServer(ServerInitializer* si) override;
- void Finish(ServerInitializer* si) override;
- void ChangeArguments(const ::TString& name, void* value) override;
- bool has_async_methods() const override;
- bool has_sync_methods() const override;
-
- private:
- std::shared_ptr<grpc::ProtoServerReflection> reflection_service_;
-};
-
-/// Add proto reflection plugin to \a ServerBuilder.
-/// This function should be called at the static initialization time.
-void InitProtoReflectionServerBuilderPlugin();
-
+class ProtoServerReflectionPlugin : public ::grpc::ServerBuilderPlugin {
+ public:
+ ProtoServerReflectionPlugin();
+ ::TString name() override;
+ void InitServer(ServerInitializer* si) override;
+ void Finish(ServerInitializer* si) override;
+ void ChangeArguments(const ::TString& name, void* value) override;
+ bool has_async_methods() const override;
+ bool has_sync_methods() const override;
+
+ private:
+ std::shared_ptr<grpc::ProtoServerReflection> reflection_service_;
+};
+
+/// Add proto reflection plugin to \a ServerBuilder.
+/// This function should be called at the static initialization time.
+void InitProtoReflectionServerBuilderPlugin();
+
} // namespace reflection
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/ext/server_load_reporting.h b/contrib/libs/grpc/include/grpcpp/ext/server_load_reporting.h
index 63b2ecfcc2..987a48ee79 100644
--- a/contrib/libs/grpc/include/grpcpp/ext/server_load_reporting.h
+++ b/contrib/libs/grpc/include/grpcpp/ext/server_load_reporting.h
@@ -19,33 +19,33 @@
#ifndef GRPCPP_EXT_SERVER_LOAD_REPORTING_H
#define GRPCPP_EXT_SERVER_LOAD_REPORTING_H
-#include <grpc/support/port_platform.h>
+#include <grpc/support/port_platform.h>
+
+#include <grpc/load_reporting.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/server_builder_option.h>
-#include <grpc/load_reporting.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/server_context.h>
-#include <grpcpp/impl/server_builder_option.h>
-
namespace grpc {
namespace load_reporter {
namespace experimental {
-// The ServerBuilderOption to enable server-side load reporting feature. To
-// enable the feature, please make sure the binary builds with the
-// grpcpp_server_load_reporting library and set this option in the
-// ServerBuilder.
-class LoadReportingServiceServerBuilderOption
- : public grpc::ServerBuilderOption {
- public:
- void UpdateArguments(::grpc::ChannelArguments* args) override;
- void UpdatePlugins(std::vector<std::unique_ptr<::grpc::ServerBuilderPlugin>>*
- plugins) override;
-};
-
-// Adds the load reporting cost with \a cost_name and \a cost_value in the
-// trailing metadata of the server context.
-void AddLoadReportingCost(grpc::ServerContext* ctx,
- const TString& cost_name, double cost_value);
+// The ServerBuilderOption to enable server-side load reporting feature. To
+// enable the feature, please make sure the binary builds with the
+// grpcpp_server_load_reporting library and set this option in the
+// ServerBuilder.
+class LoadReportingServiceServerBuilderOption
+ : public grpc::ServerBuilderOption {
+ public:
+ void UpdateArguments(::grpc::ChannelArguments* args) override;
+ void UpdatePlugins(std::vector<std::unique_ptr<::grpc::ServerBuilderPlugin>>*
+ plugins) override;
+};
+
+// Adds the load reporting cost with \a cost_name and \a cost_value in the
+// trailing metadata of the server context.
+void AddLoadReportingCost(grpc::ServerContext* ctx,
+ const TString& cost_name, double cost_value);
} // namespace experimental
} // namespace load_reporter
diff --git a/contrib/libs/grpc/include/grpcpp/generic/generic_stub.h b/contrib/libs/grpc/include/grpcpp/generic/generic_stub.h
index b44b975569..102d2591c9 100644
--- a/contrib/libs/grpc/include/grpcpp/generic/generic_stub.h
+++ b/contrib/libs/grpc/include/grpcpp/generic/generic_stub.h
@@ -19,196 +19,196 @@
#ifndef GRPCPP_GENERIC_GENERIC_STUB_H
#define GRPCPP_GENERIC_GENERIC_STUB_H
-#include <functional>
-
-#include <grpcpp/client_context.h>
-#include <grpcpp/impl/rpc_method.h>
-#include <grpcpp/support/async_stream.h>
-#include <grpcpp/support/async_unary_call.h>
-#include <grpcpp/support/byte_buffer.h>
-#include <grpcpp/support/client_callback.h>
-#include <grpcpp/support/status.h>
-
+#include <functional>
+
+#include <grpcpp/client_context.h>
+#include <grpcpp/impl/rpc_method.h>
+#include <grpcpp/support/async_stream.h>
+#include <grpcpp/support/async_unary_call.h>
+#include <grpcpp/support/byte_buffer.h>
+#include <grpcpp/support/client_callback.h>
+#include <grpcpp/support/status.h>
+
namespace grpc {
-class CompletionQueue;
-
-typedef ClientAsyncReaderWriter<ByteBuffer, ByteBuffer>
- GenericClientAsyncReaderWriter;
-typedef ClientAsyncResponseReader<ByteBuffer> GenericClientAsyncResponseReader;
-
-/// Generic stubs provide a type-unaware interface to call gRPC methods
-/// by name. In practice, the Request and Response types should be basic
-/// types like grpc::ByteBuffer or proto::MessageLite (the base protobuf).
-template <class RequestType, class ResponseType>
-class TemplatedGenericStub final {
- public:
- explicit TemplatedGenericStub(std::shared_ptr<grpc::ChannelInterface> channel)
- : channel_(channel) {}
-
- /// Setup a call to a named method \a method using \a context, but don't
- /// start it. Let it be started explicitly with StartCall and a tag.
- /// The return value only indicates whether or not registration of the call
- /// succeeded (i.e. the call won't proceed if the return value is nullptr).
- std::unique_ptr<ClientAsyncReaderWriter<RequestType, ResponseType>>
- PrepareCall(ClientContext* context, const TString& method,
- ::grpc::CompletionQueue* cq) {
- return CallInternal(channel_.get(), context, method, cq, false, nullptr);
- }
-
- /// Setup a unary call to a named method \a method using \a context, and don't
- /// start it. Let it be started explicitly with StartCall.
- /// The return value only indicates whether or not registration of the call
- /// succeeded (i.e. the call won't proceed if the return value is nullptr).
- std::unique_ptr<ClientAsyncResponseReader<ResponseType>> PrepareUnaryCall(
- ClientContext* context, const TString& method,
- const RequestType& request, ::grpc::CompletionQueue* cq) {
- return std::unique_ptr<ClientAsyncResponseReader<ResponseType>>(
- internal::ClientAsyncResponseReaderFactory<ResponseType>::Create(
- channel_.get(), cq,
- grpc::internal::RpcMethod(method.c_str(),
- grpc::internal::RpcMethod::NORMAL_RPC),
- context, request, false));
- }
-
- /// DEPRECATED for multi-threaded use
- /// Begin a call to a named method \a method using \a context.
- /// A tag \a tag will be delivered to \a cq when the call has been started
- /// (i.e, initial metadata has been sent).
- /// The return value only indicates whether or not registration of the call
- /// succeeded (i.e. the call won't proceed if the return value is nullptr).
- std::unique_ptr<ClientAsyncReaderWriter<RequestType, ResponseType>> Call(
- ClientContext* context, const TString& method,
- ::grpc::CompletionQueue* cq, void* tag) {
- return CallInternal(channel_.get(), context, method, cq, true, tag);
- }
-
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- /// Setup and start a unary call to a named method \a method using
- /// \a context and specifying the \a request and \a response buffers.
- void UnaryCall(ClientContext* context, const TString& method,
- const RequestType* request, ResponseType* response,
- std::function<void(grpc::Status)> on_completion) {
- UnaryCallInternal(context, method, request, response,
- std::move(on_completion));
- }
-
- /// Setup a unary call to a named method \a method using
- /// \a context and specifying the \a request and \a response buffers.
- /// Like any other reactor-based RPC, it will not be activated until
- /// StartCall is invoked on its reactor.
- void PrepareUnaryCall(ClientContext* context, const TString& method,
- const RequestType* request, ResponseType* response,
- ClientUnaryReactor* reactor) {
- PrepareUnaryCallInternal(context, method, request, response, reactor);
- }
-
- /// Setup a call to a named method \a method using \a context and tied to
- /// \a reactor . Like any other bidi streaming RPC, it will not be activated
- /// until StartCall is invoked on its reactor.
- void PrepareBidiStreamingCall(
- ClientContext* context, const TString& method,
- ClientBidiReactor<RequestType, ResponseType>* reactor) {
- PrepareBidiStreamingCallInternal(context, method, reactor);
- }
-#endif
-
- /// NOTE: class experimental_type is not part of the public API of this class
- /// TODO(vjpai): Move these contents to the public API of GenericStub when
- /// they are no longer experimental
- class experimental_type {
- public:
- explicit experimental_type(TemplatedGenericStub* stub) : stub_(stub) {}
-
- /// Setup and start a unary call to a named method \a method using
- /// \a context and specifying the \a request and \a response buffers.
- void UnaryCall(ClientContext* context, const TString& method,
- const RequestType* request, ResponseType* response,
- std::function<void(grpc::Status)> on_completion) {
- stub_->UnaryCallInternal(context, method, request, response,
- std::move(on_completion));
- }
-
- /// Setup a unary call to a named method \a method using
- /// \a context and specifying the \a request and \a response buffers.
- /// Like any other reactor-based RPC, it will not be activated until
- /// StartCall is invoked on its reactor.
- void PrepareUnaryCall(ClientContext* context, const TString& method,
- const RequestType* request, ResponseType* response,
- ClientUnaryReactor* reactor) {
- stub_->PrepareUnaryCallInternal(context, method, request, response,
- reactor);
- }
-
- /// Setup a call to a named method \a method using \a context and tied to
- /// \a reactor . Like any other bidi streaming RPC, it will not be activated
- /// until StartCall is invoked on its reactor.
- void PrepareBidiStreamingCall(
- ClientContext* context, const TString& method,
- ClientBidiReactor<RequestType, ResponseType>* reactor) {
- stub_->PrepareBidiStreamingCallInternal(context, method, reactor);
- }
-
- private:
- TemplatedGenericStub* stub_;
- };
-
- /// NOTE: The function experimental() is not stable public API. It is a view
- /// to the experimental components of this class. It may be changed or removed
- /// at any time.
- experimental_type experimental() { return experimental_type(this); }
-
- private:
- std::shared_ptr<grpc::ChannelInterface> channel_;
-
- void UnaryCallInternal(ClientContext* context, const TString& method,
- const RequestType* request, ResponseType* response,
- std::function<void(grpc::Status)> on_completion) {
- internal::CallbackUnaryCall(
- channel_.get(),
- grpc::internal::RpcMethod(method.c_str(),
- grpc::internal::RpcMethod::NORMAL_RPC),
- context, request, response, std::move(on_completion));
- }
-
- void PrepareUnaryCallInternal(ClientContext* context,
- const TString& method,
- const RequestType* request,
- ResponseType* response,
- ClientUnaryReactor* reactor) {
- internal::ClientCallbackUnaryFactory::Create<RequestType, ResponseType>(
- channel_.get(),
- grpc::internal::RpcMethod(method.c_str(),
- grpc::internal::RpcMethod::NORMAL_RPC),
- context, request, response, reactor);
- }
-
- void PrepareBidiStreamingCallInternal(
- ClientContext* context, const TString& method,
- ClientBidiReactor<RequestType, ResponseType>* reactor) {
- internal::ClientCallbackReaderWriterFactory<RequestType, ResponseType>::
- Create(channel_.get(),
- grpc::internal::RpcMethod(
- method.c_str(), grpc::internal::RpcMethod::BIDI_STREAMING),
- context, reactor);
- }
-
- std::unique_ptr<ClientAsyncReaderWriter<RequestType, ResponseType>>
- CallInternal(grpc::ChannelInterface* channel, ClientContext* context,
- const TString& method, ::grpc::CompletionQueue* cq,
- bool start, void* tag) {
- return std::unique_ptr<ClientAsyncReaderWriter<RequestType, ResponseType>>(
- internal::ClientAsyncReaderWriterFactory<RequestType, ResponseType>::
- Create(
- channel, cq,
- grpc::internal::RpcMethod(
- method.c_str(), grpc::internal::RpcMethod::BIDI_STREAMING),
- context, start, tag));
- }
-};
-
-typedef TemplatedGenericStub<grpc::ByteBuffer, grpc::ByteBuffer> GenericStub;
-
+class CompletionQueue;
+
+typedef ClientAsyncReaderWriter<ByteBuffer, ByteBuffer>
+ GenericClientAsyncReaderWriter;
+typedef ClientAsyncResponseReader<ByteBuffer> GenericClientAsyncResponseReader;
+
+/// Generic stubs provide a type-unaware interface to call gRPC methods
+/// by name. In practice, the Request and Response types should be basic
+/// types like grpc::ByteBuffer or proto::MessageLite (the base protobuf).
+template <class RequestType, class ResponseType>
+class TemplatedGenericStub final {
+ public:
+ explicit TemplatedGenericStub(std::shared_ptr<grpc::ChannelInterface> channel)
+ : channel_(channel) {}
+
+ /// Setup a call to a named method \a method using \a context, but don't
+ /// start it. Let it be started explicitly with StartCall and a tag.
+ /// The return value only indicates whether or not registration of the call
+ /// succeeded (i.e. the call won't proceed if the return value is nullptr).
+ std::unique_ptr<ClientAsyncReaderWriter<RequestType, ResponseType>>
+ PrepareCall(ClientContext* context, const TString& method,
+ ::grpc::CompletionQueue* cq) {
+ return CallInternal(channel_.get(), context, method, cq, false, nullptr);
+ }
+
+ /// Setup a unary call to a named method \a method using \a context, and don't
+ /// start it. Let it be started explicitly with StartCall.
+ /// The return value only indicates whether or not registration of the call
+ /// succeeded (i.e. the call won't proceed if the return value is nullptr).
+ std::unique_ptr<ClientAsyncResponseReader<ResponseType>> PrepareUnaryCall(
+ ClientContext* context, const TString& method,
+ const RequestType& request, ::grpc::CompletionQueue* cq) {
+ return std::unique_ptr<ClientAsyncResponseReader<ResponseType>>(
+ internal::ClientAsyncResponseReaderFactory<ResponseType>::Create(
+ channel_.get(), cq,
+ grpc::internal::RpcMethod(method.c_str(),
+ grpc::internal::RpcMethod::NORMAL_RPC),
+ context, request, false));
+ }
+
+ /// DEPRECATED for multi-threaded use
+ /// Begin a call to a named method \a method using \a context.
+ /// A tag \a tag will be delivered to \a cq when the call has been started
+ /// (i.e, initial metadata has been sent).
+ /// The return value only indicates whether or not registration of the call
+ /// succeeded (i.e. the call won't proceed if the return value is nullptr).
+ std::unique_ptr<ClientAsyncReaderWriter<RequestType, ResponseType>> Call(
+ ClientContext* context, const TString& method,
+ ::grpc::CompletionQueue* cq, void* tag) {
+ return CallInternal(channel_.get(), context, method, cq, true, tag);
+ }
+
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ /// Setup and start a unary call to a named method \a method using
+ /// \a context and specifying the \a request and \a response buffers.
+ void UnaryCall(ClientContext* context, const TString& method,
+ const RequestType* request, ResponseType* response,
+ std::function<void(grpc::Status)> on_completion) {
+ UnaryCallInternal(context, method, request, response,
+ std::move(on_completion));
+ }
+
+ /// Setup a unary call to a named method \a method using
+ /// \a context and specifying the \a request and \a response buffers.
+ /// Like any other reactor-based RPC, it will not be activated until
+ /// StartCall is invoked on its reactor.
+ void PrepareUnaryCall(ClientContext* context, const TString& method,
+ const RequestType* request, ResponseType* response,
+ ClientUnaryReactor* reactor) {
+ PrepareUnaryCallInternal(context, method, request, response, reactor);
+ }
+
+ /// Setup a call to a named method \a method using \a context and tied to
+ /// \a reactor . Like any other bidi streaming RPC, it will not be activated
+ /// until StartCall is invoked on its reactor.
+ void PrepareBidiStreamingCall(
+ ClientContext* context, const TString& method,
+ ClientBidiReactor<RequestType, ResponseType>* reactor) {
+ PrepareBidiStreamingCallInternal(context, method, reactor);
+ }
+#endif
+
+ /// NOTE: class experimental_type is not part of the public API of this class
+ /// TODO(vjpai): Move these contents to the public API of GenericStub when
+ /// they are no longer experimental
+ class experimental_type {
+ public:
+ explicit experimental_type(TemplatedGenericStub* stub) : stub_(stub) {}
+
+ /// Setup and start a unary call to a named method \a method using
+ /// \a context and specifying the \a request and \a response buffers.
+ void UnaryCall(ClientContext* context, const TString& method,
+ const RequestType* request, ResponseType* response,
+ std::function<void(grpc::Status)> on_completion) {
+ stub_->UnaryCallInternal(context, method, request, response,
+ std::move(on_completion));
+ }
+
+ /// Setup a unary call to a named method \a method using
+ /// \a context and specifying the \a request and \a response buffers.
+ /// Like any other reactor-based RPC, it will not be activated until
+ /// StartCall is invoked on its reactor.
+ void PrepareUnaryCall(ClientContext* context, const TString& method,
+ const RequestType* request, ResponseType* response,
+ ClientUnaryReactor* reactor) {
+ stub_->PrepareUnaryCallInternal(context, method, request, response,
+ reactor);
+ }
+
+ /// Setup a call to a named method \a method using \a context and tied to
+ /// \a reactor . Like any other bidi streaming RPC, it will not be activated
+ /// until StartCall is invoked on its reactor.
+ void PrepareBidiStreamingCall(
+ ClientContext* context, const TString& method,
+ ClientBidiReactor<RequestType, ResponseType>* reactor) {
+ stub_->PrepareBidiStreamingCallInternal(context, method, reactor);
+ }
+
+ private:
+ TemplatedGenericStub* stub_;
+ };
+
+ /// NOTE: The function experimental() is not stable public API. It is a view
+ /// to the experimental components of this class. It may be changed or removed
+ /// at any time.
+ experimental_type experimental() { return experimental_type(this); }
+
+ private:
+ std::shared_ptr<grpc::ChannelInterface> channel_;
+
+ void UnaryCallInternal(ClientContext* context, const TString& method,
+ const RequestType* request, ResponseType* response,
+ std::function<void(grpc::Status)> on_completion) {
+ internal::CallbackUnaryCall(
+ channel_.get(),
+ grpc::internal::RpcMethod(method.c_str(),
+ grpc::internal::RpcMethod::NORMAL_RPC),
+ context, request, response, std::move(on_completion));
+ }
+
+ void PrepareUnaryCallInternal(ClientContext* context,
+ const TString& method,
+ const RequestType* request,
+ ResponseType* response,
+ ClientUnaryReactor* reactor) {
+ internal::ClientCallbackUnaryFactory::Create<RequestType, ResponseType>(
+ channel_.get(),
+ grpc::internal::RpcMethod(method.c_str(),
+ grpc::internal::RpcMethod::NORMAL_RPC),
+ context, request, response, reactor);
+ }
+
+ void PrepareBidiStreamingCallInternal(
+ ClientContext* context, const TString& method,
+ ClientBidiReactor<RequestType, ResponseType>* reactor) {
+ internal::ClientCallbackReaderWriterFactory<RequestType, ResponseType>::
+ Create(channel_.get(),
+ grpc::internal::RpcMethod(
+ method.c_str(), grpc::internal::RpcMethod::BIDI_STREAMING),
+ context, reactor);
+ }
+
+ std::unique_ptr<ClientAsyncReaderWriter<RequestType, ResponseType>>
+ CallInternal(grpc::ChannelInterface* channel, ClientContext* context,
+ const TString& method, ::grpc::CompletionQueue* cq,
+ bool start, void* tag) {
+ return std::unique_ptr<ClientAsyncReaderWriter<RequestType, ResponseType>>(
+ internal::ClientAsyncReaderWriterFactory<RequestType, ResponseType>::
+ Create(
+ channel, cq,
+ grpc::internal::RpcMethod(
+ method.c_str(), grpc::internal::RpcMethod::BIDI_STREAMING),
+ context, start, tag));
+ }
+};
+
+typedef TemplatedGenericStub<grpc::ByteBuffer, grpc::ByteBuffer> GenericStub;
+
} // namespace grpc
#endif // GRPCPP_GENERIC_GENERIC_STUB_H
diff --git a/contrib/libs/grpc/include/grpcpp/grpcpp.h b/contrib/libs/grpc/include/grpcpp/grpcpp.h
index 38cbfde6af..d37258cf3b 100644
--- a/contrib/libs/grpc/include/grpcpp/grpcpp.h
+++ b/contrib/libs/grpc/include/grpcpp/grpcpp.h
@@ -21,9 +21,9 @@
/// The gRPC C++ API mainly consists of the following classes:
/// <br>
/// - grpc::Channel, which represents the connection to an endpoint. See [the
-/// gRPC Concepts page](https://grpc.io/docs/what-is-grpc/core-concepts) for
-/// more details. Channels are created by the factory function
-/// grpc::CreateChannel.
+/// gRPC Concepts page](https://grpc.io/docs/what-is-grpc/core-concepts) for
+/// more details. Channels are created by the factory function
+/// grpc::CreateChannel.
///
/// - grpc::CompletionQueue, the producer-consumer queue used for all
/// asynchronous communication with the gRPC runtime.
@@ -63,7 +63,7 @@
namespace grpc {
/// Return gRPC library version.
-TString Version();
+TString Version();
} // namespace grpc
#endif // GRPCPP_GRPCPP_H
diff --git a/contrib/libs/grpc/include/grpcpp/health_check_service_interface.h b/contrib/libs/grpc/include/grpcpp/health_check_service_interface.h
index 16f342805c..de055a82ea 100644
--- a/contrib/libs/grpc/include/grpcpp/health_check_service_interface.h
+++ b/contrib/libs/grpc/include/grpcpp/health_check_service_interface.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 gRPC authors.
+ * Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,40 +19,40 @@
#ifndef GRPCPP_HEALTH_CHECK_SERVICE_INTERFACE_H
#define GRPCPP_HEALTH_CHECK_SERVICE_INTERFACE_H
-#include <grpcpp/support/config.h>
+#include <grpcpp/support/config.h>
namespace grpc {
const char kHealthCheckServiceInterfaceArg[] =
"grpc.health_check_service_interface";
-/// The gRPC server uses this interface to expose the health checking service
-/// without depending on protobuf.
-class HealthCheckServiceInterface {
- public:
- virtual ~HealthCheckServiceInterface() {}
-
- /// Set or change the serving status of the given \a service_name.
- virtual void SetServingStatus(const TString& service_name,
- bool serving) = 0;
- /// Apply to all registered service names.
- virtual void SetServingStatus(bool serving) = 0;
-
- /// Set all registered service names to not serving and prevent future
- /// state changes.
- virtual void Shutdown() {}
-};
-
-/// Enable/disable the default health checking service. This applies to all C++
-/// servers created afterwards. For each server, user can override the default
-/// with a HealthCheckServiceServerBuilderOption.
-/// NOT thread safe.
-void EnableDefaultHealthCheckService(bool enable);
-
-/// Returns whether the default health checking service is enabled.
-/// NOT thread safe.
-bool DefaultHealthCheckServiceEnabled();
-
+/// The gRPC server uses this interface to expose the health checking service
+/// without depending on protobuf.
+class HealthCheckServiceInterface {
+ public:
+ virtual ~HealthCheckServiceInterface() {}
+
+ /// Set or change the serving status of the given \a service_name.
+ virtual void SetServingStatus(const TString& service_name,
+ bool serving) = 0;
+ /// Apply to all registered service names.
+ virtual void SetServingStatus(bool serving) = 0;
+
+ /// Set all registered service names to not serving and prevent future
+ /// state changes.
+ virtual void Shutdown() {}
+};
+
+/// Enable/disable the default health checking service. This applies to all C++
+/// servers created afterwards. For each server, user can override the default
+/// with a HealthCheckServiceServerBuilderOption.
+/// NOT thread safe.
+void EnableDefaultHealthCheckService(bool enable);
+
+/// Returns whether the default health checking service is enabled.
+/// NOT thread safe.
+bool DefaultHealthCheckServiceEnabled();
+
} // namespace grpc
#endif // GRPCPP_HEALTH_CHECK_SERVICE_INTERFACE_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/channel_argument_option.h b/contrib/libs/grpc/include/grpcpp/impl/channel_argument_option.h
index 922ca95287..6e93199694 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/channel_argument_option.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/channel_argument_option.h
@@ -28,9 +28,9 @@
namespace grpc {
std::unique_ptr<ServerBuilderOption> MakeChannelArgumentOption(
- const TString& name, const TString& value);
+ const TString& name, const TString& value);
std::unique_ptr<ServerBuilderOption> MakeChannelArgumentOption(
- const TString& name, int value);
+ const TString& name, int value);
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md b/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md
index 155146e99f..ade9d05484 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md
@@ -1,21 +1,21 @@
-# Welcome to `include/grpcpp/impl/codegen`
-
-## Why is this directory here?
-
-This directory exists so that generated code can include selected files upon
-which it depends without having to depend on the entire gRPC C++ library. This
-is particularly relevant for users of bazel, particularly if they use the
-multi-lingual `proto_library` target type. Generated code that uses this target
-only depends on the gRPC C++ targets associated with these header files, not the
-entire gRPC C++ codebase since that would make the build time of these types of
-targets excessively large (particularly when they are not even C++ specific).
-
-## What should user code do?
-
-User code should *not* include anything from this directory. Only generated code
-and gRPC library code should include contents from this directory. User code
-should instead include contents from the main `grpcpp` directory or its
-accessible subcomponents like `grpcpp/support`. It is possible that we may
-remove this directory altogether if the motivations for its existence are no
-longer strong enough (e.g., if most users migrate away from the `proto_library`
-target type or if the additional overhead of depending on gRPC C++ is not high).
+# Welcome to `include/grpcpp/impl/codegen`
+
+## Why is this directory here?
+
+This directory exists so that generated code can include selected files upon
+which it depends without having to depend on the entire gRPC C++ library. This
+is particularly relevant for users of bazel, particularly if they use the
+multi-lingual `proto_library` target type. Generated code that uses this target
+only depends on the gRPC C++ targets associated with these header files, not the
+entire gRPC C++ codebase since that would make the build time of these types of
+targets excessively large (particularly when they are not even C++ specific).
+
+## What should user code do?
+
+User code should *not* include anything from this directory. Only generated code
+and gRPC library code should include contents from this directory. User code
+should instead include contents from the main `grpcpp` directory or its
+accessible subcomponents like `grpcpp/support`. It is possible that we may
+remove this directory altogether if the motivations for its existence are no
+longer strong enough (e.g., if most users migrate away from the `proto_library`
+target type or if the additional overhead of depending on gRPC C++ is not high).
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h
index 3f2b8fc20a..a812b086a2 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h
@@ -19,33 +19,33 @@
#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H
#define GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H
-#include <grpc/impl/codegen/port_platform.h>
-
-#include <grpcpp/impl/codegen/async_stream.h>
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <grpcpp/impl/codegen/async_stream.h>
#include <grpcpp/impl/codegen/byte_buffer.h>
-#include <grpcpp/impl/codegen/server_callback.h>
+#include <grpcpp/impl/codegen/server_callback.h>
#include <grpcpp/impl/codegen/server_callback_handlers.h>
struct grpc_server;
namespace grpc {
-typedef ServerAsyncReaderWriter<ByteBuffer, ByteBuffer>
+typedef ServerAsyncReaderWriter<ByteBuffer, ByteBuffer>
GenericServerAsyncReaderWriter;
-typedef ServerAsyncResponseWriter<ByteBuffer> GenericServerAsyncResponseWriter;
-typedef ServerAsyncReader<ByteBuffer, ByteBuffer> GenericServerAsyncReader;
-typedef ServerAsyncWriter<ByteBuffer> GenericServerAsyncWriter;
+typedef ServerAsyncResponseWriter<ByteBuffer> GenericServerAsyncResponseWriter;
+typedef ServerAsyncReader<ByteBuffer, ByteBuffer> GenericServerAsyncReader;
+typedef ServerAsyncWriter<ByteBuffer> GenericServerAsyncWriter;
-class GenericServerContext final : public ServerContext {
+class GenericServerContext final : public ServerContext {
public:
- const TString& method() const { return method_; }
- const TString& host() const { return host_; }
+ const TString& method() const { return method_; }
+ const TString& host() const { return host_; }
private:
- friend class ServerInterface;
+ friend class ServerInterface;
- TString method_;
- TString host_;
+ TString method_;
+ TString host_;
};
// A generic service at the server side accepts all RPC methods and hosts. It is
@@ -71,33 +71,33 @@ class AsyncGenericService final {
void RequestCall(GenericServerContext* ctx,
GenericServerAsyncReaderWriter* reader_writer,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag);
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag);
private:
- friend class grpc::Server;
- grpc::Server* server_;
+ friend class grpc::Server;
+ grpc::Server* server_;
};
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
namespace experimental {
-#endif
+#endif
/// \a ServerGenericBidiReactor is the reactor class for bidi streaming RPCs
/// invoked on a CallbackGenericService. It is just a ServerBidi reactor with
/// ByteBuffer arguments.
-using ServerGenericBidiReactor = ServerBidiReactor<ByteBuffer, ByteBuffer>;
+using ServerGenericBidiReactor = ServerBidiReactor<ByteBuffer, ByteBuffer>;
-class GenericCallbackServerContext final : public grpc::CallbackServerContext {
+class GenericCallbackServerContext final : public grpc::CallbackServerContext {
public:
- const TString& method() const { return method_; }
- const TString& host() const { return host_; }
+ const TString& method() const { return method_; }
+ const TString& host() const { return host_; }
private:
- friend class ::grpc::Server;
+ friend class ::grpc::Server;
- TString method_;
- TString host_;
+ TString method_;
+ TString host_;
};
/// \a CallbackGenericService is the base class for generic services implemented
@@ -122,21 +122,21 @@ class CallbackGenericService {
}
private:
- friend class grpc::Server;
+ friend class grpc::Server;
- internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>* Handler() {
- return new internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>(
- [this](::grpc::CallbackServerContext* ctx) {
+ internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>* Handler() {
+ return new internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>(
+ [this](::grpc::CallbackServerContext* ctx) {
return CreateReactor(static_cast<GenericCallbackServerContext*>(ctx));
});
}
- grpc::Server* server_{nullptr};
+ grpc::Server* server_{nullptr};
};
-
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
} // namespace experimental
-#endif
+#endif
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h
index 3a848861ca..aaee93df93 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,1114 +18,1114 @@
#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H
#define GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/server_context.h>
-#include <grpcpp/impl/codegen/service_type.h>
-#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/service_type.h>
+#include <grpcpp/impl/codegen/status.h>
namespace grpc {
namespace internal {
-/// Common interface for all client side asynchronous streaming.
-class ClientAsyncStreamingInterface {
- public:
- virtual ~ClientAsyncStreamingInterface() {}
-
- /// Start the call that was set up by the constructor, but only if the
- /// constructor was invoked through the "Prepare" API which doesn't actually
- /// start the call
- virtual void StartCall(void* tag) = 0;
-
- /// Request notification of the reading of the initial metadata. Completion
- /// will be notified by \a tag on the associated completion queue.
- /// This call is optional, but if it is used, it cannot be used concurrently
- /// with or after the \a AsyncReaderInterface::Read method.
- ///
- /// \param[in] tag Tag identifying this request.
- virtual void ReadInitialMetadata(void* tag) = 0;
-
- /// Indicate that the stream is to be finished and request notification for
- /// when the call has been ended.
- /// Should not be used concurrently with other operations.
- ///
- /// It is appropriate to call this method exactly once when both:
- /// * the client side has no more message to send
- /// (this can be declared implicitly by calling this method, or
- /// explicitly through an earlier call to the <i>WritesDone</i> method
- /// of the class in use, e.g. \a ClientAsyncWriterInterface::WritesDone or
- /// \a ClientAsyncReaderWriterInterface::WritesDone).
- /// * there are no more messages to be received from the server (this can
- /// be known implicitly by the calling code, or explicitly from an
- /// earlier call to \a AsyncReaderInterface::Read that yielded a failed
- /// result, e.g. cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
- ///
- /// The tag will be returned when either:
- /// - all incoming messages have been read and the server has returned
- /// a status.
- /// - the server has returned a non-OK status.
- /// - the call failed for some reason and the library generated a
- /// status.
- ///
- /// Note that implementations of this method attempt to receive initial
- /// metadata from the server if initial metadata hasn't yet been received.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[out] status To be updated with the operation status.
- virtual void Finish(::grpc::Status* status, void* tag) = 0;
-};
-
-/// An interface that yields a sequence of messages of type \a R.
+/// Common interface for all client side asynchronous streaming.
+class ClientAsyncStreamingInterface {
+ public:
+ virtual ~ClientAsyncStreamingInterface() {}
+
+ /// Start the call that was set up by the constructor, but only if the
+ /// constructor was invoked through the "Prepare" API which doesn't actually
+ /// start the call
+ virtual void StartCall(void* tag) = 0;
+
+ /// Request notification of the reading of the initial metadata. Completion
+ /// will be notified by \a tag on the associated completion queue.
+ /// This call is optional, but if it is used, it cannot be used concurrently
+ /// with or after the \a AsyncReaderInterface::Read method.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ virtual void ReadInitialMetadata(void* tag) = 0;
+
+ /// Indicate that the stream is to be finished and request notification for
+ /// when the call has been ended.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// It is appropriate to call this method exactly once when both:
+ /// * the client side has no more message to send
+ /// (this can be declared implicitly by calling this method, or
+ /// explicitly through an earlier call to the <i>WritesDone</i> method
+ /// of the class in use, e.g. \a ClientAsyncWriterInterface::WritesDone or
+ /// \a ClientAsyncReaderWriterInterface::WritesDone).
+ /// * there are no more messages to be received from the server (this can
+ /// be known implicitly by the calling code, or explicitly from an
+ /// earlier call to \a AsyncReaderInterface::Read that yielded a failed
+ /// result, e.g. cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
+ ///
+ /// The tag will be returned when either:
+ /// - all incoming messages have been read and the server has returned
+ /// a status.
+ /// - the server has returned a non-OK status.
+ /// - the call failed for some reason and the library generated a
+ /// status.
+ ///
+ /// Note that implementations of this method attempt to receive initial
+ /// metadata from the server if initial metadata hasn't yet been received.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[out] status To be updated with the operation status.
+ virtual void Finish(::grpc::Status* status, void* tag) = 0;
+};
+
+/// An interface that yields a sequence of messages of type \a R.
template <class R>
-class AsyncReaderInterface {
- public:
- virtual ~AsyncReaderInterface() {}
-
- /// Read a message of type \a R into \a msg. Completion will be notified by \a
- /// tag on the associated completion queue.
- /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
- /// should not be called concurrently with other streaming APIs
- /// on the same stream. It is not meaningful to call it concurrently
- /// with another \a AsyncReaderInterface::Read on the same stream since reads
- /// on the same stream are delivered in order.
- ///
- /// \param[out] msg Where to eventually store the read message.
- /// \param[in] tag The tag identifying the operation.
- ///
- /// Side effect: note that this method attempt to receive initial metadata for
- /// a stream if it hasn't yet been received.
- virtual void Read(R* msg, void* tag) = 0;
-};
-
-/// An interface that can be fed a sequence of messages of type \a W.
+class AsyncReaderInterface {
+ public:
+ virtual ~AsyncReaderInterface() {}
+
+ /// Read a message of type \a R into \a msg. Completion will be notified by \a
+ /// tag on the associated completion queue.
+ /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
+ /// should not be called concurrently with other streaming APIs
+ /// on the same stream. It is not meaningful to call it concurrently
+ /// with another \a AsyncReaderInterface::Read on the same stream since reads
+ /// on the same stream are delivered in order.
+ ///
+ /// \param[out] msg Where to eventually store the read message.
+ /// \param[in] tag The tag identifying the operation.
+ ///
+ /// Side effect: note that this method attempt to receive initial metadata for
+ /// a stream if it hasn't yet been received.
+ virtual void Read(R* msg, void* tag) = 0;
+};
+
+/// An interface that can be fed a sequence of messages of type \a W.
template <class W>
-class AsyncWriterInterface {
- public:
- virtual ~AsyncWriterInterface() {}
-
- /// Request the writing of \a msg with identifying tag \a tag.
- ///
- /// Only one write may be outstanding at any given time. This means that
- /// after calling Write, one must wait to receive \a tag from the completion
- /// queue BEFORE calling Write again.
- /// This is thread-safe with respect to \a AsyncReaderInterface::Read
- ///
- /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
- /// to deallocate once Write returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] tag The tag identifying the operation.
- virtual void Write(const W& msg, void* tag) = 0;
-
- /// Request the writing of \a msg using WriteOptions \a options with
- /// identifying tag \a tag.
- ///
- /// Only one write may be outstanding at any given time. This means that
- /// after calling Write, one must wait to receive \a tag from the completion
- /// queue BEFORE calling Write again.
- /// WriteOptions \a options is used to set the write options of this message.
- /// This is thread-safe with respect to \a AsyncReaderInterface::Read
- ///
- /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
- /// to deallocate once Write returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] options The WriteOptions to be used to write this message.
- /// \param[in] tag The tag identifying the operation.
- virtual void Write(const W& msg, ::grpc::WriteOptions options, void* tag) = 0;
-
- /// Request the writing of \a msg and coalesce it with the writing
- /// of trailing metadata, using WriteOptions \a options with
- /// identifying tag \a tag.
- ///
- /// For client, WriteLast is equivalent of performing Write and
- /// WritesDone in a single step.
- /// For server, WriteLast buffers the \a msg. The writing of \a msg is held
- /// until Finish is called, where \a msg and trailing metadata are coalesced
- /// and write is initiated. Note that WriteLast can only buffer \a msg up to
- /// the flow control window size. If \a msg size is larger than the window
- /// size, it will be sent on wire without buffering.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
- /// to deallocate once Write returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] options The WriteOptions to be used to write this message.
- /// \param[in] tag The tag identifying the operation.
- void WriteLast(const W& msg, ::grpc::WriteOptions options, void* tag) {
- Write(msg, options.set_last_message(), tag);
- }
-};
-
+class AsyncWriterInterface {
+ public:
+ virtual ~AsyncWriterInterface() {}
+
+ /// Request the writing of \a msg with identifying tag \a tag.
+ ///
+ /// Only one write may be outstanding at any given time. This means that
+ /// after calling Write, one must wait to receive \a tag from the completion
+ /// queue BEFORE calling Write again.
+ /// This is thread-safe with respect to \a AsyncReaderInterface::Read
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
+ /// to deallocate once Write returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] tag The tag identifying the operation.
+ virtual void Write(const W& msg, void* tag) = 0;
+
+ /// Request the writing of \a msg using WriteOptions \a options with
+ /// identifying tag \a tag.
+ ///
+ /// Only one write may be outstanding at any given time. This means that
+ /// after calling Write, one must wait to receive \a tag from the completion
+ /// queue BEFORE calling Write again.
+ /// WriteOptions \a options is used to set the write options of this message.
+ /// This is thread-safe with respect to \a AsyncReaderInterface::Read
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
+ /// to deallocate once Write returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ /// \param[in] tag The tag identifying the operation.
+ virtual void Write(const W& msg, ::grpc::WriteOptions options, void* tag) = 0;
+
+ /// Request the writing of \a msg and coalesce it with the writing
+ /// of trailing metadata, using WriteOptions \a options with
+ /// identifying tag \a tag.
+ ///
+ /// For client, WriteLast is equivalent of performing Write and
+ /// WritesDone in a single step.
+ /// For server, WriteLast buffers the \a msg. The writing of \a msg is held
+ /// until Finish is called, where \a msg and trailing metadata are coalesced
+ /// and write is initiated. Note that WriteLast can only buffer \a msg up to
+ /// the flow control window size. If \a msg size is larger than the window
+ /// size, it will be sent on wire without buffering.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
+ /// to deallocate once Write returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ /// \param[in] tag The tag identifying the operation.
+ void WriteLast(const W& msg, ::grpc::WriteOptions options, void* tag) {
+ Write(msg, options.set_last_message(), tag);
+ }
+};
+
} // namespace internal
template <class R>
-class ClientAsyncReaderInterface
- : public internal::ClientAsyncStreamingInterface,
- public internal::AsyncReaderInterface<R> {};
+class ClientAsyncReaderInterface
+ : public internal::ClientAsyncStreamingInterface,
+ public internal::AsyncReaderInterface<R> {};
-namespace internal {
+namespace internal {
template <class R>
-class ClientAsyncReaderFactory {
- public:
- /// Create a stream object.
- /// Write the first request out if \a start is set.
- /// \a tag will be notified on \a cq when the call has been started and
- /// \a request has been written out. If \a start is not set, \a tag must be
- /// nullptr and the actual call must be initiated by StartCall
- /// Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- template <class W>
- static ClientAsyncReader<R>* Create(::grpc::ChannelInterface* channel,
- ::grpc::CompletionQueue* cq,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- const W& request, bool start, void* tag) {
- ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
- return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientAsyncReader<R>)))
- ClientAsyncReader<R>(call, context, request, start, tag);
- }
-};
-} // namespace internal
-
-/// Async client-side API for doing server-streaming RPCs,
-/// where the incoming message stream coming from the server has
-/// messages of type \a R.
-template <class R>
-class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReader));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall(void* tag) override {
- GPR_CODEGEN_ASSERT(!started_);
- started_ = true;
- StartCallInternal(tag);
- }
-
- /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata
- /// method for semantics.
- ///
- /// Side effect:
- /// - upon receiving initial metadata from the server,
- /// the \a ClientContext associated with this call is updated, and the
- /// calling code can access the received metadata through the
- /// \a ClientContext.
- void ReadInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.RecvInitialMetadata(context_);
- call_.PerformOps(&meta_ops_);
- }
-
- void Read(R* msg, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- read_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- read_ops_.RecvInitialMetadata(context_);
- }
- read_ops_.RecvMessage(msg);
- call_.PerformOps(&read_ops_);
- }
-
- /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata received from the server.
- void Finish(::grpc::Status* status, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- finish_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- finish_ops_.RecvInitialMetadata(context_);
- }
- finish_ops_.ClientRecvStatus(context_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class internal::ClientAsyncReaderFactory<R>;
- template <class W>
- ClientAsyncReader(::grpc::internal::Call call, ::grpc::ClientContext* context,
- const W& request, bool start, void* tag)
- : context_(context), call_(call), started_(start) {
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
- init_ops_.ClientSendClose();
- if (start) {
- StartCallInternal(tag);
- } else {
- GPR_CODEGEN_ASSERT(tag == nullptr);
- }
- }
-
- void StartCallInternal(void* tag) {
- init_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- init_ops_.set_output_tag(tag);
- call_.PerformOps(&init_ops_);
- }
-
- ::grpc::ClientContext* context_;
- ::grpc::internal::Call call_;
- bool started_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- init_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>>
- read_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
-};
-
-/// Common interface for client side asynchronous writing.
+class ClientAsyncReaderFactory {
+ public:
+ /// Create a stream object.
+ /// Write the first request out if \a start is set.
+ /// \a tag will be notified on \a cq when the call has been started and
+ /// \a request has been written out. If \a start is not set, \a tag must be
+ /// nullptr and the actual call must be initiated by StartCall
+ /// Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ template <class W>
+ static ClientAsyncReader<R>* Create(::grpc::ChannelInterface* channel,
+ ::grpc::CompletionQueue* cq,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ const W& request, bool start, void* tag) {
+ ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
+ return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientAsyncReader<R>)))
+ ClientAsyncReader<R>(call, context, request, start, tag);
+ }
+};
+} // namespace internal
+
+/// Async client-side API for doing server-streaming RPCs,
+/// where the incoming message stream coming from the server has
+/// messages of type \a R.
+template <class R>
+class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReader));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall(void* tag) override {
+ GPR_CODEGEN_ASSERT(!started_);
+ started_ = true;
+ StartCallInternal(tag);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata
+ /// method for semantics.
+ ///
+ /// Side effect:
+ /// - upon receiving initial metadata from the server,
+ /// the \a ClientContext associated with this call is updated, and the
+ /// calling code can access the received metadata through the
+ /// \a ClientContext.
+ void ReadInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.RecvInitialMetadata(context_);
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Read(R* msg, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ read_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ read_ops_.RecvInitialMetadata(context_);
+ }
+ read_ops_.RecvMessage(msg);
+ call_.PerformOps(&read_ops_);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata received from the server.
+ void Finish(::grpc::Status* status, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ finish_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ finish_ops_.RecvInitialMetadata(context_);
+ }
+ finish_ops_.ClientRecvStatus(context_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class internal::ClientAsyncReaderFactory<R>;
+ template <class W>
+ ClientAsyncReader(::grpc::internal::Call call, ::grpc::ClientContext* context,
+ const W& request, bool start, void* tag)
+ : context_(context), call_(call), started_(start) {
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
+ init_ops_.ClientSendClose();
+ if (start) {
+ StartCallInternal(tag);
+ } else {
+ GPR_CODEGEN_ASSERT(tag == nullptr);
+ }
+ }
+
+ void StartCallInternal(void* tag) {
+ init_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ init_ops_.set_output_tag(tag);
+ call_.PerformOps(&init_ops_);
+ }
+
+ ::grpc::ClientContext* context_;
+ ::grpc::internal::Call call_;
+ bool started_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ init_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>>
+ read_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+};
+
+/// Common interface for client side asynchronous writing.
template <class W>
-class ClientAsyncWriterInterface
- : public internal::ClientAsyncStreamingInterface,
- public internal::AsyncWriterInterface<W> {
- public:
- /// Signal the client is done with the writes (half-close the client stream).
- /// Thread-safe with respect to \a AsyncReaderInterface::Read
- ///
- /// \param[in] tag The tag identifying the operation.
- virtual void WritesDone(void* tag) = 0;
-};
-
-namespace internal {
+class ClientAsyncWriterInterface
+ : public internal::ClientAsyncStreamingInterface,
+ public internal::AsyncWriterInterface<W> {
+ public:
+ /// Signal the client is done with the writes (half-close the client stream).
+ /// Thread-safe with respect to \a AsyncReaderInterface::Read
+ ///
+ /// \param[in] tag The tag identifying the operation.
+ virtual void WritesDone(void* tag) = 0;
+};
+
+namespace internal {
+template <class W>
+class ClientAsyncWriterFactory {
+ public:
+ /// Create a stream object.
+ /// Start the RPC if \a start is set
+ /// \a tag will be notified on \a cq when the call has been started (i.e.
+ /// intitial metadata sent) and \a request has been written out.
+ /// If \a start is not set, \a tag must be nullptr and the actual call
+ /// must be initiated by StartCall
+ /// Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ /// \a response will be filled in with the single expected response
+ /// message from the server upon a successful call to the \a Finish
+ /// method of this instance.
+ template <class R>
+ static ClientAsyncWriter<W>* Create(::grpc::ChannelInterface* channel,
+ ::grpc::CompletionQueue* cq,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ R* response, bool start, void* tag) {
+ ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
+ return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientAsyncWriter<W>)))
+ ClientAsyncWriter<W>(call, context, response, start, tag);
+ }
+};
+} // namespace internal
+
+/// Async API on the client side for doing client-streaming RPCs,
+/// where the outgoing message stream going to the server contains
+/// messages of type \a W.
template <class W>
-class ClientAsyncWriterFactory {
- public:
- /// Create a stream object.
- /// Start the RPC if \a start is set
- /// \a tag will be notified on \a cq when the call has been started (i.e.
- /// intitial metadata sent) and \a request has been written out.
- /// If \a start is not set, \a tag must be nullptr and the actual call
- /// must be initiated by StartCall
- /// Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- /// \a response will be filled in with the single expected response
- /// message from the server upon a successful call to the \a Finish
- /// method of this instance.
- template <class R>
- static ClientAsyncWriter<W>* Create(::grpc::ChannelInterface* channel,
- ::grpc::CompletionQueue* cq,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- R* response, bool start, void* tag) {
- ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
- return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientAsyncWriter<W>)))
- ClientAsyncWriter<W>(call, context, response, start, tag);
- }
-};
-} // namespace internal
-
-/// Async API on the client side for doing client-streaming RPCs,
-/// where the outgoing message stream going to the server contains
-/// messages of type \a W.
-template <class W>
-class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncWriter));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall(void* tag) override {
- GPR_CODEGEN_ASSERT(!started_);
- started_ = true;
- StartCallInternal(tag);
- }
-
- /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method for
- /// semantics.
- ///
- /// Side effect:
- /// - upon receiving initial metadata from the server, the \a ClientContext
- /// associated with this call is updated, and the calling code can access
- /// the received metadata through the \a ClientContext.
- void ReadInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.RecvInitialMetadata(context_);
- call_.PerformOps(&meta_ops_);
- }
-
- void Write(const W& msg, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- if (options.is_last_message()) {
- options.set_buffer_hint();
- write_ops_.ClientSendClose();
- }
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void WritesDone(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- write_ops_.ClientSendClose();
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata received from the server.
- /// - attempts to fill in the \a response parameter passed to this class's
- /// constructor with the server's response message.
- void Finish(::grpc::Status* status, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- finish_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- finish_ops_.RecvInitialMetadata(context_);
- }
- finish_ops_.ClientRecvStatus(context_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class internal::ClientAsyncWriterFactory<W>;
- template <class R>
- ClientAsyncWriter(::grpc::internal::Call call, ::grpc::ClientContext* context,
- R* response, bool start, void* tag)
- : context_(context), call_(call), started_(start) {
- finish_ops_.RecvMessage(response);
- finish_ops_.AllowNoMessage();
- if (start) {
- StartCallInternal(tag);
- } else {
- GPR_CODEGEN_ASSERT(tag == nullptr);
- }
- }
-
- void StartCallInternal(void* tag) {
- write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- // if corked bit is set in context, we just keep the initial metadata
- // buffered up to coalesce with later message send. No op is performed.
- if (!context_->initial_metadata_corked_) {
- write_ops_.set_output_tag(tag);
- call_.PerformOps(&write_ops_);
- }
- }
-
- ::grpc::ClientContext* context_;
- ::grpc::internal::Call call_;
- bool started_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- write_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpGenericRecvMessage,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
-};
-
-/// Async client-side interface for bi-directional streaming,
-/// where the client-to-server message stream has messages of type \a W,
-/// and the server-to-client message stream has messages of type \a R.
+class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncWriter));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall(void* tag) override {
+ GPR_CODEGEN_ASSERT(!started_);
+ started_ = true;
+ StartCallInternal(tag);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method for
+ /// semantics.
+ ///
+ /// Side effect:
+ /// - upon receiving initial metadata from the server, the \a ClientContext
+ /// associated with this call is updated, and the calling code can access
+ /// the received metadata through the \a ClientContext.
+ void ReadInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.RecvInitialMetadata(context_);
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Write(const W& msg, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ write_ops_.ClientSendClose();
+ }
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void WritesDone(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ write_ops_.ClientSendClose();
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata received from the server.
+ /// - attempts to fill in the \a response parameter passed to this class's
+ /// constructor with the server's response message.
+ void Finish(::grpc::Status* status, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ finish_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ finish_ops_.RecvInitialMetadata(context_);
+ }
+ finish_ops_.ClientRecvStatus(context_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class internal::ClientAsyncWriterFactory<W>;
+ template <class R>
+ ClientAsyncWriter(::grpc::internal::Call call, ::grpc::ClientContext* context,
+ R* response, bool start, void* tag)
+ : context_(context), call_(call), started_(start) {
+ finish_ops_.RecvMessage(response);
+ finish_ops_.AllowNoMessage();
+ if (start) {
+ StartCallInternal(tag);
+ } else {
+ GPR_CODEGEN_ASSERT(tag == nullptr);
+ }
+ }
+
+ void StartCallInternal(void* tag) {
+ write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ // if corked bit is set in context, we just keep the initial metadata
+ // buffered up to coalesce with later message send. No op is performed.
+ if (!context_->initial_metadata_corked_) {
+ write_ops_.set_output_tag(tag);
+ call_.PerformOps(&write_ops_);
+ }
+ }
+
+ ::grpc::ClientContext* context_;
+ ::grpc::internal::Call call_;
+ bool started_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ write_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpGenericRecvMessage,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+};
+
+/// Async client-side interface for bi-directional streaming,
+/// where the client-to-server message stream has messages of type \a W,
+/// and the server-to-client message stream has messages of type \a R.
+template <class W, class R>
+class ClientAsyncReaderWriterInterface
+ : public internal::ClientAsyncStreamingInterface,
+ public internal::AsyncWriterInterface<W>,
+ public internal::AsyncReaderInterface<R> {
+ public:
+ /// Signal the client is done with the writes (half-close the client stream).
+ /// Thread-safe with respect to \a AsyncReaderInterface::Read
+ ///
+ /// \param[in] tag The tag identifying the operation.
+ virtual void WritesDone(void* tag) = 0;
+};
+
+namespace internal {
template <class W, class R>
-class ClientAsyncReaderWriterInterface
- : public internal::ClientAsyncStreamingInterface,
- public internal::AsyncWriterInterface<W>,
- public internal::AsyncReaderInterface<R> {
- public:
- /// Signal the client is done with the writes (half-close the client stream).
- /// Thread-safe with respect to \a AsyncReaderInterface::Read
- ///
- /// \param[in] tag The tag identifying the operation.
- virtual void WritesDone(void* tag) = 0;
-};
-
-namespace internal {
+class ClientAsyncReaderWriterFactory {
+ public:
+ /// Create a stream object.
+ /// Start the RPC request if \a start is set.
+ /// \a tag will be notified on \a cq when the call has been started (i.e.
+ /// intitial metadata sent). If \a start is not set, \a tag must be
+ /// nullptr and the actual call must be initiated by StartCall
+ /// Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ static ClientAsyncReaderWriter<W, R>* Create(
+ ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq,
+ const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
+ bool start, void* tag) {
+ ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
+
+ return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientAsyncReaderWriter<W, R>)))
+ ClientAsyncReaderWriter<W, R>(call, context, start, tag);
+ }
+};
+} // namespace internal
+
+/// Async client-side interface for bi-directional streaming,
+/// where the outgoing message stream going to the server
+/// has messages of type \a W, and the incoming message stream coming
+/// from the server has messages of type \a R.
template <class W, class R>
-class ClientAsyncReaderWriterFactory {
- public:
- /// Create a stream object.
- /// Start the RPC request if \a start is set.
- /// \a tag will be notified on \a cq when the call has been started (i.e.
- /// intitial metadata sent). If \a start is not set, \a tag must be
- /// nullptr and the actual call must be initiated by StartCall
- /// Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- static ClientAsyncReaderWriter<W, R>* Create(
- ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq,
- const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
- bool start, void* tag) {
- ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
-
- return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientAsyncReaderWriter<W, R>)))
- ClientAsyncReaderWriter<W, R>(call, context, start, tag);
- }
-};
-} // namespace internal
-
-/// Async client-side interface for bi-directional streaming,
-/// where the outgoing message stream going to the server
-/// has messages of type \a W, and the incoming message stream coming
-/// from the server has messages of type \a R.
+class ClientAsyncReaderWriter final
+ : public ClientAsyncReaderWriterInterface<W, R> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReaderWriter));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall(void* tag) override {
+ GPR_CODEGEN_ASSERT(!started_);
+ started_ = true;
+ StartCallInternal(tag);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method
+ /// for semantics of this method.
+ ///
+ /// Side effect:
+ /// - upon receiving initial metadata from the server, the \a ClientContext
+ /// is updated with it, and then the receiving initial metadata can
+ /// be accessed through this \a ClientContext.
+ void ReadInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.RecvInitialMetadata(context_);
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Read(R* msg, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ read_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ read_ops_.RecvInitialMetadata(context_);
+ }
+ read_ops_.RecvMessage(msg);
+ call_.PerformOps(&read_ops_);
+ }
+
+ void Write(const W& msg, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ write_ops_.ClientSendClose();
+ }
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void WritesDone(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ write_ops_.ClientSendClose();
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
+ /// Side effect
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata sent from the server.
+ void Finish(::grpc::Status* status, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ finish_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ finish_ops_.RecvInitialMetadata(context_);
+ }
+ finish_ops_.ClientRecvStatus(context_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class internal::ClientAsyncReaderWriterFactory<W, R>;
+ ClientAsyncReaderWriter(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, bool start, void* tag)
+ : context_(context), call_(call), started_(start) {
+ if (start) {
+ StartCallInternal(tag);
+ } else {
+ GPR_CODEGEN_ASSERT(tag == nullptr);
+ }
+ }
+
+ void StartCallInternal(void* tag) {
+ write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ // if corked bit is set in context, we just keep the initial metadata
+ // buffered up to coalesce with later message send. No op is performed.
+ if (!context_->initial_metadata_corked_) {
+ write_ops_.set_output_tag(tag);
+ call_.PerformOps(&write_ops_);
+ }
+ }
+
+ ::grpc::ClientContext* context_;
+ ::grpc::internal::Call call_;
+ bool started_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>>
+ read_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ write_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+};
+
template <class W, class R>
-class ClientAsyncReaderWriter final
- : public ClientAsyncReaderWriterInterface<W, R> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReaderWriter));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall(void* tag) override {
- GPR_CODEGEN_ASSERT(!started_);
- started_ = true;
- StartCallInternal(tag);
- }
-
- /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method
- /// for semantics of this method.
- ///
- /// Side effect:
- /// - upon receiving initial metadata from the server, the \a ClientContext
- /// is updated with it, and then the receiving initial metadata can
- /// be accessed through this \a ClientContext.
- void ReadInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.RecvInitialMetadata(context_);
- call_.PerformOps(&meta_ops_);
- }
-
- void Read(R* msg, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- read_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- read_ops_.RecvInitialMetadata(context_);
- }
- read_ops_.RecvMessage(msg);
- call_.PerformOps(&read_ops_);
- }
-
- void Write(const W& msg, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- if (options.is_last_message()) {
- options.set_buffer_hint();
- write_ops_.ClientSendClose();
- }
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void WritesDone(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- write_ops_.ClientSendClose();
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
- /// Side effect
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata sent from the server.
- void Finish(::grpc::Status* status, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- finish_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- finish_ops_.RecvInitialMetadata(context_);
- }
- finish_ops_.ClientRecvStatus(context_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class internal::ClientAsyncReaderWriterFactory<W, R>;
- ClientAsyncReaderWriter(::grpc::internal::Call call,
- ::grpc::ClientContext* context, bool start, void* tag)
- : context_(context), call_(call), started_(start) {
- if (start) {
- StartCallInternal(tag);
- } else {
- GPR_CODEGEN_ASSERT(tag == nullptr);
- }
- }
-
- void StartCallInternal(void* tag) {
- write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- // if corked bit is set in context, we just keep the initial metadata
- // buffered up to coalesce with later message send. No op is performed.
- if (!context_->initial_metadata_corked_) {
- write_ops_.set_output_tag(tag);
- call_.PerformOps(&write_ops_);
- }
- }
-
- ::grpc::ClientContext* context_;
- ::grpc::internal::Call call_;
- bool started_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>>
- read_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- write_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
-};
-
+class ServerAsyncReaderInterface
+ : public ::grpc::internal::ServerAsyncStreamingInterface,
+ public internal::AsyncReaderInterface<R> {
+ public:
+ /// Indicate that the stream is to be finished with a certain status code
+ /// and also send out \a msg response to the client.
+ /// Request notification for when the server has sent the response and the
+ /// appropriate signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// It is appropriate to call this method when:
+ /// * all messages from the client have been received (either known
+ /// implictly, or explicitly because a previous
+ /// \a AsyncReaderInterface::Read operation with a non-ok result,
+ /// e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
+ ///
+ /// This operation will end when the server has finished sending out initial
+ /// metadata (if not sent already), response message, and status, or if
+ /// some failure occurred when trying to do so.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg or \a status, so it
+ /// is safe to deallocate once Finish returns.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of this call.
+ /// \param[in] msg To be sent to the client as the response for this call.
+ virtual void Finish(const W& msg, const ::grpc::Status& status,
+ void* tag) = 0;
+
+ /// Indicate that the stream is to be finished with a certain
+ /// non-OK status code.
+ /// Request notification for when the server has sent the appropriate
+ /// signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// This call is meant to end the call with some error, and can be called at
+ /// any point that the server would like to "fail" the call (though note
+ /// this shouldn't be called concurrently with any other "sending" call, like
+ /// \a AsyncWriterInterface::Write).
+ ///
+ /// This operation will end when the server has finished sending out initial
+ /// metadata (if not sent already), and status, or if some failure occurred
+ /// when trying to do so.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once FinishWithError returns.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of this call.
+ /// - Note: \a status must have a non-OK code.
+ virtual void FinishWithError(const ::grpc::Status& status, void* tag) = 0;
+};
+
+/// Async server-side API for doing client-streaming RPCs,
+/// where the incoming message stream from the client has messages of type \a R,
+/// and the single response message sent from the server is type \a W.
template <class W, class R>
-class ServerAsyncReaderInterface
- : public ::grpc::internal::ServerAsyncStreamingInterface,
- public internal::AsyncReaderInterface<R> {
- public:
- /// Indicate that the stream is to be finished with a certain status code
- /// and also send out \a msg response to the client.
- /// Request notification for when the server has sent the response and the
- /// appropriate signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// It is appropriate to call this method when:
- /// * all messages from the client have been received (either known
- /// implictly, or explicitly because a previous
- /// \a AsyncReaderInterface::Read operation with a non-ok result,
- /// e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
- ///
- /// This operation will end when the server has finished sending out initial
- /// metadata (if not sent already), response message, and status, or if
- /// some failure occurred when trying to do so.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg or \a status, so it
- /// is safe to deallocate once Finish returns.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of this call.
- /// \param[in] msg To be sent to the client as the response for this call.
- virtual void Finish(const W& msg, const ::grpc::Status& status,
- void* tag) = 0;
-
- /// Indicate that the stream is to be finished with a certain
- /// non-OK status code.
- /// Request notification for when the server has sent the appropriate
- /// signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// This call is meant to end the call with some error, and can be called at
- /// any point that the server would like to "fail" the call (though note
- /// this shouldn't be called concurrently with any other "sending" call, like
- /// \a AsyncWriterInterface::Write).
- ///
- /// This operation will end when the server has finished sending out initial
- /// metadata (if not sent already), and status, or if some failure occurred
- /// when trying to do so.
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once FinishWithError returns.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of this call.
- /// - Note: \a status must have a non-OK code.
- virtual void FinishWithError(const ::grpc::Status& status, void* tag) = 0;
-};
-
-/// Async server-side API for doing client-streaming RPCs,
-/// where the incoming message stream from the client has messages of type \a R,
-/// and the single response message sent from the server is type \a W.
-template <class W, class R>
-class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
- public:
- explicit ServerAsyncReader(::grpc::ServerContext* ctx)
- : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
- /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
- ///
- /// Implicit input parameter:
- /// - The initial metadata that will be sent to the client from this op will
- /// be taken from the \a ServerContext associated with the call.
- void SendInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- meta_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_.PerformOps(&meta_ops_);
- }
-
- void Read(R* msg, void* tag) override {
- read_ops_.set_output_tag(tag);
- read_ops_.RecvMessage(msg);
- call_.PerformOps(&read_ops_);
- }
-
- /// See the \a ServerAsyncReaderInterface.Read method for semantics
- ///
- /// Side effect:
- /// - also sends initial metadata if not alreay sent.
- /// - uses the \a ServerContext associated with this call to send possible
- /// initial and trailing metadata.
- ///
- /// Note: \a msg is not sent if \a status has a non-OK code.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once Finish returns.
- void Finish(const W& msg, const ::grpc::Status& status, void* tag) override {
- finish_ops_.set_output_tag(tag);
- if (!ctx_->sent_initial_metadata_) {
- finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- finish_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- // The response is dropped if the status is not OK.
- if (status.ok()) {
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_,
- finish_ops_.SendMessage(msg));
- } else {
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- }
- call_.PerformOps(&finish_ops_);
- }
-
- /// See the \a ServerAsyncReaderInterface.Read method for semantics
- ///
- /// Side effect:
- /// - also sends initial metadata if not alreay sent.
- /// - uses the \a ServerContext associated with this call to send possible
- /// initial and trailing metadata.
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once FinishWithError returns.
- void FinishWithError(const ::grpc::Status& status, void* tag) override {
- GPR_CODEGEN_ASSERT(!status.ok());
- finish_ops_.set_output_tag(tag);
- if (!ctx_->sent_initial_metadata_) {
- finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- finish_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
-
- ::grpc::internal::Call call_;
- ::grpc::ServerContext* ctx_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- finish_ops_;
-};
-
+class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
+ public:
+ explicit ServerAsyncReader(::grpc::ServerContext* ctx)
+ : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+ /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - The initial metadata that will be sent to the client from this op will
+ /// be taken from the \a ServerContext associated with the call.
+ void SendInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ meta_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Read(R* msg, void* tag) override {
+ read_ops_.set_output_tag(tag);
+ read_ops_.RecvMessage(msg);
+ call_.PerformOps(&read_ops_);
+ }
+
+ /// See the \a ServerAsyncReaderInterface.Read method for semantics
+ ///
+ /// Side effect:
+ /// - also sends initial metadata if not alreay sent.
+ /// - uses the \a ServerContext associated with this call to send possible
+ /// initial and trailing metadata.
+ ///
+ /// Note: \a msg is not sent if \a status has a non-OK code.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once Finish returns.
+ void Finish(const W& msg, const ::grpc::Status& status, void* tag) override {
+ finish_ops_.set_output_tag(tag);
+ if (!ctx_->sent_initial_metadata_) {
+ finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ finish_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ // The response is dropped if the status is not OK.
+ if (status.ok()) {
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_,
+ finish_ops_.SendMessage(msg));
+ } else {
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ }
+ call_.PerformOps(&finish_ops_);
+ }
+
+ /// See the \a ServerAsyncReaderInterface.Read method for semantics
+ ///
+ /// Side effect:
+ /// - also sends initial metadata if not alreay sent.
+ /// - uses the \a ServerContext associated with this call to send possible
+ /// initial and trailing metadata.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once FinishWithError returns.
+ void FinishWithError(const ::grpc::Status& status, void* tag) override {
+ GPR_CODEGEN_ASSERT(!status.ok());
+ finish_ops_.set_output_tag(tag);
+ if (!ctx_->sent_initial_metadata_) {
+ finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ finish_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
+
+ ::grpc::internal::Call call_;
+ ::grpc::ServerContext* ctx_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ finish_ops_;
+};
+
template <class W>
-class ServerAsyncWriterInterface
- : public ::grpc::internal::ServerAsyncStreamingInterface,
- public internal::AsyncWriterInterface<W> {
- public:
- /// Indicate that the stream is to be finished with a certain status code.
- /// Request notification for when the server has sent the appropriate
- /// signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// It is appropriate to call this method when either:
- /// * all messages from the client have been received (either known
- /// implictly, or explicitly because a previous \a
- /// AsyncReaderInterface::Read operation with a non-ok
- /// result (e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false'.
- /// * it is desired to end the call early with some non-OK status code.
- ///
- /// This operation will end when the server has finished sending out initial
- /// metadata (if not sent already), response message, and status, or if
- /// some failure occurred when trying to do so.
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once Finish returns.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of this call.
- virtual void Finish(const ::grpc::Status& status, void* tag) = 0;
-
- /// Request the writing of \a msg and coalesce it with trailing metadata which
- /// contains \a status, using WriteOptions options with
- /// identifying tag \a tag.
- ///
- /// WriteAndFinish is equivalent of performing WriteLast and Finish
- /// in a single step.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once WriteAndFinish returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] options The WriteOptions to be used to write this message.
- /// \param[in] status The Status that server returns to client.
- /// \param[in] tag The tag identifying the operation.
- virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
- const ::grpc::Status& status, void* tag) = 0;
-};
-
-/// Async server-side API for doing server streaming RPCs,
-/// where the outgoing message stream from the server has messages of type \a W.
+class ServerAsyncWriterInterface
+ : public ::grpc::internal::ServerAsyncStreamingInterface,
+ public internal::AsyncWriterInterface<W> {
+ public:
+ /// Indicate that the stream is to be finished with a certain status code.
+ /// Request notification for when the server has sent the appropriate
+ /// signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// It is appropriate to call this method when either:
+ /// * all messages from the client have been received (either known
+ /// implictly, or explicitly because a previous \a
+ /// AsyncReaderInterface::Read operation with a non-ok
+ /// result (e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false'.
+ /// * it is desired to end the call early with some non-OK status code.
+ ///
+ /// This operation will end when the server has finished sending out initial
+ /// metadata (if not sent already), response message, and status, or if
+ /// some failure occurred when trying to do so.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once Finish returns.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of this call.
+ virtual void Finish(const ::grpc::Status& status, void* tag) = 0;
+
+ /// Request the writing of \a msg and coalesce it with trailing metadata which
+ /// contains \a status, using WriteOptions options with
+ /// identifying tag \a tag.
+ ///
+ /// WriteAndFinish is equivalent of performing WriteLast and Finish
+ /// in a single step.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once WriteAndFinish returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ /// \param[in] status The Status that server returns to client.
+ /// \param[in] tag The tag identifying the operation.
+ virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
+ const ::grpc::Status& status, void* tag) = 0;
+};
+
+/// Async server-side API for doing server streaming RPCs,
+/// where the outgoing message stream from the server has messages of type \a W.
template <class W>
-class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
- public:
- explicit ServerAsyncWriter(::grpc::ServerContext* ctx)
- : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
- /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
- ///
- /// Implicit input parameter:
- /// - The initial metadata that will be sent to the client from this op will
- /// be taken from the \a ServerContext associated with the call.
- ///
- /// \param[in] tag Tag identifying this request.
- void SendInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- meta_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_.PerformOps(&meta_ops_);
- }
-
- void Write(const W& msg, void* tag) override {
- write_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&write_ops_);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
- write_ops_.set_output_tag(tag);
- if (options.is_last_message()) {
- options.set_buffer_hint();
- }
-
- EnsureInitialMetadataSent(&write_ops_);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ServerAsyncWriterInterface.WriteAndFinish method for semantics.
- ///
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call is used
- /// for sending trailing (and initial) metadata to the client.
- ///
- /// Note: \a status must have an OK code.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once WriteAndFinish returns.
- void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
- const ::grpc::Status& status, void* tag) override {
- write_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&write_ops_);
- options.set_buffer_hint();
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ServerAsyncWriterInterface.Finish method for semantics.
- ///
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call is used for sending
- /// trailing (and initial if not already sent) metadata to the client.
- ///
- /// Note: there are no restrictions are the code of
- /// \a status,it may be non-OK
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once Finish returns.
- void Finish(const ::grpc::Status& status, void* tag) override {
- finish_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&finish_ops_);
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
-
- template <class T>
- void EnsureInitialMetadataSent(T* ops) {
- if (!ctx_->sent_initial_metadata_) {
- ops->SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops->set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- }
-
- ::grpc::internal::Call call_;
- ::grpc::ServerContext* ctx_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- write_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- finish_ops_;
-};
-
-/// Server-side interface for asynchronous bi-directional streaming.
+class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
+ public:
+ explicit ServerAsyncWriter(::grpc::ServerContext* ctx)
+ : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+ /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - The initial metadata that will be sent to the client from this op will
+ /// be taken from the \a ServerContext associated with the call.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ void SendInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ meta_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Write(const W& msg, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&write_ops_);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ }
+
+ EnsureInitialMetadataSent(&write_ops_);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ServerAsyncWriterInterface.WriteAndFinish method for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call is used
+ /// for sending trailing (and initial) metadata to the client.
+ ///
+ /// Note: \a status must have an OK code.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once WriteAndFinish returns.
+ void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
+ const ::grpc::Status& status, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&write_ops_);
+ options.set_buffer_hint();
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ServerAsyncWriterInterface.Finish method for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call is used for sending
+ /// trailing (and initial if not already sent) metadata to the client.
+ ///
+ /// Note: there are no restrictions are the code of
+ /// \a status,it may be non-OK
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once Finish returns.
+ void Finish(const ::grpc::Status& status, void* tag) override {
+ finish_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&finish_ops_);
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
+
+ template <class T>
+ void EnsureInitialMetadataSent(T* ops) {
+ if (!ctx_->sent_initial_metadata_) {
+ ops->SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops->set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ }
+
+ ::grpc::internal::Call call_;
+ ::grpc::ServerContext* ctx_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ write_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ finish_ops_;
+};
+
+/// Server-side interface for asynchronous bi-directional streaming.
template <class W, class R>
-class ServerAsyncReaderWriterInterface
- : public ::grpc::internal::ServerAsyncStreamingInterface,
- public internal::AsyncWriterInterface<W>,
- public internal::AsyncReaderInterface<R> {
- public:
- /// Indicate that the stream is to be finished with a certain status code.
- /// Request notification for when the server has sent the appropriate
- /// signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// It is appropriate to call this method when either:
- /// * all messages from the client have been received (either known
- /// implictly, or explicitly because a previous \a
- /// AsyncReaderInterface::Read operation
- /// with a non-ok result (e.g., cq->Next(&read_tag, &ok) filled in 'ok'
- /// with 'false'.
- /// * it is desired to end the call early with some non-OK status code.
- ///
- /// This operation will end when the server has finished sending out initial
- /// metadata (if not sent already), response message, and status, or if some
- /// failure occurred when trying to do so.
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once Finish returns.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of this call.
- virtual void Finish(const ::grpc::Status& status, void* tag) = 0;
-
- /// Request the writing of \a msg and coalesce it with trailing metadata which
- /// contains \a status, using WriteOptions options with
- /// identifying tag \a tag.
- ///
- /// WriteAndFinish is equivalent of performing WriteLast and Finish in a
- /// single step.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once WriteAndFinish returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] options The WriteOptions to be used to write this message.
- /// \param[in] status The Status that server returns to client.
- /// \param[in] tag The tag identifying the operation.
- virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
- const ::grpc::Status& status, void* tag) = 0;
-};
-
-/// Async server-side API for doing bidirectional streaming RPCs,
-/// where the incoming message stream coming from the client has messages of
-/// type \a R, and the outgoing message stream coming from the server has
-/// messages of type \a W.
+class ServerAsyncReaderWriterInterface
+ : public ::grpc::internal::ServerAsyncStreamingInterface,
+ public internal::AsyncWriterInterface<W>,
+ public internal::AsyncReaderInterface<R> {
+ public:
+ /// Indicate that the stream is to be finished with a certain status code.
+ /// Request notification for when the server has sent the appropriate
+ /// signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// It is appropriate to call this method when either:
+ /// * all messages from the client have been received (either known
+ /// implictly, or explicitly because a previous \a
+ /// AsyncReaderInterface::Read operation
+ /// with a non-ok result (e.g., cq->Next(&read_tag, &ok) filled in 'ok'
+ /// with 'false'.
+ /// * it is desired to end the call early with some non-OK status code.
+ ///
+ /// This operation will end when the server has finished sending out initial
+ /// metadata (if not sent already), response message, and status, or if some
+ /// failure occurred when trying to do so.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once Finish returns.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of this call.
+ virtual void Finish(const ::grpc::Status& status, void* tag) = 0;
+
+ /// Request the writing of \a msg and coalesce it with trailing metadata which
+ /// contains \a status, using WriteOptions options with
+ /// identifying tag \a tag.
+ ///
+ /// WriteAndFinish is equivalent of performing WriteLast and Finish in a
+ /// single step.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once WriteAndFinish returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ /// \param[in] status The Status that server returns to client.
+ /// \param[in] tag The tag identifying the operation.
+ virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
+ const ::grpc::Status& status, void* tag) = 0;
+};
+
+/// Async server-side API for doing bidirectional streaming RPCs,
+/// where the incoming message stream coming from the client has messages of
+/// type \a R, and the outgoing message stream coming from the server has
+/// messages of type \a W.
template <class W, class R>
-class ServerAsyncReaderWriter final
- : public ServerAsyncReaderWriterInterface<W, R> {
- public:
- explicit ServerAsyncReaderWriter(::grpc::ServerContext* ctx)
- : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
- /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
- ///
- /// Implicit input parameter:
- /// - The initial metadata that will be sent to the client from this op will
- /// be taken from the \a ServerContext associated with the call.
- ///
- /// \param[in] tag Tag identifying this request.
- void SendInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- meta_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_.PerformOps(&meta_ops_);
- }
-
- void Read(R* msg, void* tag) override {
- read_ops_.set_output_tag(tag);
- read_ops_.RecvMessage(msg);
- call_.PerformOps(&read_ops_);
- }
-
- void Write(const W& msg, void* tag) override {
- write_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&write_ops_);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
- write_ops_.set_output_tag(tag);
- if (options.is_last_message()) {
- options.set_buffer_hint();
- }
- EnsureInitialMetadataSent(&write_ops_);
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ServerAsyncReaderWriterInterface.WriteAndFinish
- /// method for semantics.
- ///
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call is used
- /// for sending trailing (and initial) metadata to the client.
- ///
- /// Note: \a status must have an OK code.
- //
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once WriteAndFinish returns.
- void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
- const ::grpc::Status& status, void* tag) override {
- write_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&write_ops_);
- options.set_buffer_hint();
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ServerAsyncReaderWriterInterface.Finish method for semantics.
- ///
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call is used for sending
- /// trailing (and initial if not already sent) metadata to the client.
- ///
- /// Note: there are no restrictions are the code of \a status,
- /// it may be non-OK
- //
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once Finish returns.
- void Finish(const ::grpc::Status& status, void* tag) override {
- finish_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&finish_ops_);
-
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class ::grpc::Server;
-
- void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
-
- template <class T>
- void EnsureInitialMetadataSent(T* ops) {
- if (!ctx_->sent_initial_metadata_) {
- ops->SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops->set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- }
-
- ::grpc::internal::Call call_;
- ::grpc::ServerContext* ctx_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- write_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- finish_ops_;
-};
-
+class ServerAsyncReaderWriter final
+ : public ServerAsyncReaderWriterInterface<W, R> {
+ public:
+ explicit ServerAsyncReaderWriter(::grpc::ServerContext* ctx)
+ : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+ /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - The initial metadata that will be sent to the client from this op will
+ /// be taken from the \a ServerContext associated with the call.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ void SendInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ meta_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Read(R* msg, void* tag) override {
+ read_ops_.set_output_tag(tag);
+ read_ops_.RecvMessage(msg);
+ call_.PerformOps(&read_ops_);
+ }
+
+ void Write(const W& msg, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&write_ops_);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ }
+ EnsureInitialMetadataSent(&write_ops_);
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ServerAsyncReaderWriterInterface.WriteAndFinish
+ /// method for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call is used
+ /// for sending trailing (and initial) metadata to the client.
+ ///
+ /// Note: \a status must have an OK code.
+ //
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once WriteAndFinish returns.
+ void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
+ const ::grpc::Status& status, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&write_ops_);
+ options.set_buffer_hint();
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ServerAsyncReaderWriterInterface.Finish method for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call is used for sending
+ /// trailing (and initial if not already sent) metadata to the client.
+ ///
+ /// Note: there are no restrictions are the code of \a status,
+ /// it may be non-OK
+ //
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once Finish returns.
+ void Finish(const ::grpc::Status& status, void* tag) override {
+ finish_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&finish_ops_);
+
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class ::grpc::Server;
+
+ void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
+
+ template <class T>
+ void EnsureInitialMetadataSent(T* ops) {
+ if (!ctx_->sent_initial_metadata_) {
+ ops->SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops->set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ }
+
+ ::grpc::internal::Call call_;
+ ::grpc::ServerContext* ctx_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ write_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ finish_ops_;
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h
index 0fccae0033..3deeda8c7f 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h
@@ -19,296 +19,296 @@
#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
#define GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/client_context.h>
-#include <grpcpp/impl/codegen/server_context.h>
-#include <grpcpp/impl/codegen/service_type.h>
-#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/client_context.h>
+#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/service_type.h>
+#include <grpcpp/impl/codegen/status.h>
namespace grpc {
-/// An interface relevant for async client side unary RPCs (which send
-/// one request message to a server and receive one response message).
+/// An interface relevant for async client side unary RPCs (which send
+/// one request message to a server and receive one response message).
template <class R>
-class ClientAsyncResponseReaderInterface {
- public:
- virtual ~ClientAsyncResponseReaderInterface() {}
+class ClientAsyncResponseReaderInterface {
+ public:
+ virtual ~ClientAsyncResponseReaderInterface() {}
- /// Start the call that was set up by the constructor, but only if the
- /// constructor was invoked through the "Prepare" API which doesn't actually
- /// start the call
- virtual void StartCall() = 0;
+ /// Start the call that was set up by the constructor, but only if the
+ /// constructor was invoked through the "Prepare" API which doesn't actually
+ /// start the call
+ virtual void StartCall() = 0;
- /// Request notification of the reading of initial metadata. Completion
- /// will be notified by \a tag on the associated completion queue.
- /// This call is optional, but if it is used, it cannot be used concurrently
- /// with or after the \a Finish method.
- ///
- /// \param[in] tag Tag identifying this request.
- virtual void ReadInitialMetadata(void* tag) = 0;
+ /// Request notification of the reading of initial metadata. Completion
+ /// will be notified by \a tag on the associated completion queue.
+ /// This call is optional, but if it is used, it cannot be used concurrently
+ /// with or after the \a Finish method.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ virtual void ReadInitialMetadata(void* tag) = 0;
+
+ /// Request to receive the server's response \a msg and final \a status for
+ /// the call, and to notify \a tag on this call's completion queue when
+ /// finished.
+ ///
+ /// This function will return when either:
+ /// - when the server's response message and status have been received.
+ /// - when the server has returned a non-OK status (no message expected in
+ /// this case).
+ /// - when the call failed for some reason and the library generated a
+ /// non-OK status.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[out] status To be updated with the operation status.
+ /// \param[out] msg To be filled in with the server's response message.
+ virtual void Finish(R* msg, ::grpc::Status* status, void* tag) = 0;
+};
- /// Request to receive the server's response \a msg and final \a status for
- /// the call, and to notify \a tag on this call's completion queue when
- /// finished.
- ///
- /// This function will return when either:
- /// - when the server's response message and status have been received.
- /// - when the server has returned a non-OK status (no message expected in
- /// this case).
- /// - when the call failed for some reason and the library generated a
- /// non-OK status.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[out] status To be updated with the operation status.
- /// \param[out] msg To be filled in with the server's response message.
- virtual void Finish(R* msg, ::grpc::Status* status, void* tag) = 0;
-};
-
namespace internal {
-template <class R>
-class ClientAsyncResponseReaderFactory {
- public:
- /// Start a call and write the request out if \a start is set.
- /// \a tag will be notified on \a cq when the call has been started (i.e.
- /// intitial metadata sent) and \a request has been written out.
- /// If \a start is not set, the actual call must be initiated by StartCall
- /// Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- template <class W>
- static ClientAsyncResponseReader<R>* Create(
- ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq,
- const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
- const W& request, bool start) {
- ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
- return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientAsyncResponseReader<R>)))
- ClientAsyncResponseReader<R>(call, context, request, start);
- }
-};
-} // namespace internal
+template <class R>
+class ClientAsyncResponseReaderFactory {
+ public:
+ /// Start a call and write the request out if \a start is set.
+ /// \a tag will be notified on \a cq when the call has been started (i.e.
+ /// intitial metadata sent) and \a request has been written out.
+ /// If \a start is not set, the actual call must be initiated by StartCall
+ /// Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ template <class W>
+ static ClientAsyncResponseReader<R>* Create(
+ ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq,
+ const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
+ const W& request, bool start) {
+ ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
+ return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientAsyncResponseReader<R>)))
+ ClientAsyncResponseReader<R>(call, context, request, start);
+ }
+};
+} // namespace internal
-/// Async API for client-side unary RPCs, where the message response
-/// received from the server is of type \a R.
+/// Async API for client-side unary RPCs, where the message response
+/// received from the server is of type \a R.
template <class R>
-class ClientAsyncResponseReader final
- : public ClientAsyncResponseReaderInterface<R> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncResponseReader));
- }
+class ClientAsyncResponseReader final
+ : public ClientAsyncResponseReaderInterface<R> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncResponseReader));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ GPR_CODEGEN_ASSERT(!started_);
+ started_ = true;
+ StartCallInternal();
+ }
+
+ /// See \a ClientAsyncResponseReaderInterface::ReadInitialMetadata for
+ /// semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata sent from the server.
+ void ReadInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ single_buf.set_output_tag(tag);
+ single_buf.RecvInitialMetadata(context_);
+ call_.PerformOps(&single_buf);
+ initial_metadata_read_ = true;
+ }
+
+ /// See \a ClientAysncResponseReaderInterface::Finish for semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata sent from the server.
+ void Finish(R* msg, ::grpc::Status* status, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ if (initial_metadata_read_) {
+ finish_buf.set_output_tag(tag);
+ finish_buf.RecvMessage(msg);
+ finish_buf.AllowNoMessage();
+ finish_buf.ClientRecvStatus(context_, status);
+ call_.PerformOps(&finish_buf);
+ } else {
+ single_buf.set_output_tag(tag);
+ single_buf.RecvInitialMetadata(context_);
+ single_buf.RecvMessage(msg);
+ single_buf.AllowNoMessage();
+ single_buf.ClientRecvStatus(context_, status);
+ call_.PerformOps(&single_buf);
+ }
+ }
+
+ private:
+ friend class internal::ClientAsyncResponseReaderFactory<R>;
+ ::grpc::ClientContext* const context_;
+ ::grpc::internal::Call call_;
+ bool started_;
+ bool initial_metadata_read_ = false;
+
+ template <class W>
+ ClientAsyncResponseReader(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, const W& request,
+ bool start)
+ : context_(context), call_(call), started_(start) {
+ // Bind the metadata at time of StartCallInternal but set up the rest here
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(single_buf.SendMessage(request).ok());
+ single_buf.ClientSendClose();
+ if (start) StartCallInternal();
+ }
+
+ void StartCallInternal() {
+ single_buf.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ }
+
+ // disable operator new
+ static void* operator new(std::size_t size);
+ static void* operator new(std::size_t /*size*/, void* p) { return p; }
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose,
+ ::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>,
+ ::grpc::internal::CallOpClientRecvStatus>
+ single_buf;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_buf;
+};
+
+/// Async server-side API for handling unary calls, where the single
+/// response message sent to the client is of type \a W.
+template <class W>
+class ServerAsyncResponseWriter final
+ : public ::grpc::internal::ServerAsyncStreamingInterface {
+ public:
+ explicit ServerAsyncResponseWriter(::grpc::ServerContext* ctx)
+ : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+ /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
+ ///
+ /// Side effect:
+ /// The initial metadata that will be sent to the client from this op will
+ /// be taken from the \a ServerContext associated with the call.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ void SendInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ meta_buf_.set_output_tag(tag);
+ meta_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ meta_buf_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_.PerformOps(&meta_buf_);
+ }
+
+ /// Indicate that the stream is to be finished and request notification
+ /// when the server has sent the appropriate signals to the client to
+ /// end the call. Should not be used concurrently with other operations.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of the call.
+ /// \param[in] msg Message to be sent to the client.
+ ///
+ /// Side effect:
+ /// - also sends initial metadata if not already sent (using the
+ /// \a ServerContext associated with this call).
+ ///
+ /// Note: if \a status has a non-OK code, then \a msg will not be sent,
+ /// and the client will receive only the status with possible trailing
+ /// metadata.
+ void Finish(const W& msg, const ::grpc::Status& status, void* tag) {
+ finish_buf_.set_output_tag(tag);
+ finish_buf_.set_core_cq_tag(&finish_buf_);
+ if (!ctx_->sent_initial_metadata_) {
+ finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ finish_buf_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ // The response is dropped if the status is not OK.
+ if (status.ok()) {
+ finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_,
+ finish_buf_.SendMessage(msg));
+ } else {
+ finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ }
+ call_.PerformOps(&finish_buf_);
+ }
+
+ /// Indicate that the stream is to be finished with a non-OK status,
+ /// and request notification for when the server has finished sending the
+ /// appropriate signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of the call.
+ /// - Note: \a status must have a non-OK code.
+ ///
+ /// Side effect:
+ /// - also sends initial metadata if not already sent (using the
+ /// \a ServerContext associated with this call).
+ void FinishWithError(const ::grpc::Status& status, void* tag) {
+ GPR_CODEGEN_ASSERT(!status.ok());
+ finish_buf_.set_output_tag(tag);
+ if (!ctx_->sent_initial_metadata_) {
+ finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ finish_buf_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&finish_buf_);
+ }
+
+ private:
+ void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
+
+ ::grpc::internal::Call call_;
+ ::grpc::ServerContext* ctx_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ meta_buf_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ finish_buf_;
+};
- void StartCall() override {
- GPR_CODEGEN_ASSERT(!started_);
- started_ = true;
- StartCallInternal();
- }
-
- /// See \a ClientAsyncResponseReaderInterface::ReadInitialMetadata for
- /// semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata sent from the server.
- void ReadInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- single_buf.set_output_tag(tag);
- single_buf.RecvInitialMetadata(context_);
- call_.PerformOps(&single_buf);
- initial_metadata_read_ = true;
- }
-
- /// See \a ClientAysncResponseReaderInterface::Finish for semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata sent from the server.
- void Finish(R* msg, ::grpc::Status* status, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- if (initial_metadata_read_) {
- finish_buf.set_output_tag(tag);
- finish_buf.RecvMessage(msg);
- finish_buf.AllowNoMessage();
- finish_buf.ClientRecvStatus(context_, status);
- call_.PerformOps(&finish_buf);
- } else {
- single_buf.set_output_tag(tag);
- single_buf.RecvInitialMetadata(context_);
- single_buf.RecvMessage(msg);
- single_buf.AllowNoMessage();
- single_buf.ClientRecvStatus(context_, status);
- call_.PerformOps(&single_buf);
- }
- }
-
- private:
- friend class internal::ClientAsyncResponseReaderFactory<R>;
- ::grpc::ClientContext* const context_;
- ::grpc::internal::Call call_;
- bool started_;
- bool initial_metadata_read_ = false;
-
- template <class W>
- ClientAsyncResponseReader(::grpc::internal::Call call,
- ::grpc::ClientContext* context, const W& request,
- bool start)
- : context_(context), call_(call), started_(start) {
- // Bind the metadata at time of StartCallInternal but set up the rest here
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(single_buf.SendMessage(request).ok());
- single_buf.ClientSendClose();
- if (start) StartCallInternal();
- }
-
- void StartCallInternal() {
- single_buf.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- }
-
- // disable operator new
- static void* operator new(std::size_t size);
- static void* operator new(std::size_t /*size*/, void* p) { return p; }
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose,
- ::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>,
- ::grpc::internal::CallOpClientRecvStatus>
- single_buf;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_buf;
-};
-
-/// Async server-side API for handling unary calls, where the single
-/// response message sent to the client is of type \a W.
-template <class W>
-class ServerAsyncResponseWriter final
- : public ::grpc::internal::ServerAsyncStreamingInterface {
- public:
- explicit ServerAsyncResponseWriter(::grpc::ServerContext* ctx)
- : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
- /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
- ///
- /// Side effect:
- /// The initial metadata that will be sent to the client from this op will
- /// be taken from the \a ServerContext associated with the call.
- ///
- /// \param[in] tag Tag identifying this request.
- void SendInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- meta_buf_.set_output_tag(tag);
- meta_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- meta_buf_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_.PerformOps(&meta_buf_);
- }
-
- /// Indicate that the stream is to be finished and request notification
- /// when the server has sent the appropriate signals to the client to
- /// end the call. Should not be used concurrently with other operations.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of the call.
- /// \param[in] msg Message to be sent to the client.
- ///
- /// Side effect:
- /// - also sends initial metadata if not already sent (using the
- /// \a ServerContext associated with this call).
- ///
- /// Note: if \a status has a non-OK code, then \a msg will not be sent,
- /// and the client will receive only the status with possible trailing
- /// metadata.
- void Finish(const W& msg, const ::grpc::Status& status, void* tag) {
- finish_buf_.set_output_tag(tag);
- finish_buf_.set_core_cq_tag(&finish_buf_);
- if (!ctx_->sent_initial_metadata_) {
- finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- finish_buf_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- // The response is dropped if the status is not OK.
- if (status.ok()) {
- finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_,
- finish_buf_.SendMessage(msg));
- } else {
- finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- }
- call_.PerformOps(&finish_buf_);
- }
-
- /// Indicate that the stream is to be finished with a non-OK status,
- /// and request notification for when the server has finished sending the
- /// appropriate signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of the call.
- /// - Note: \a status must have a non-OK code.
- ///
- /// Side effect:
- /// - also sends initial metadata if not already sent (using the
- /// \a ServerContext associated with this call).
- void FinishWithError(const ::grpc::Status& status, void* tag) {
- GPR_CODEGEN_ASSERT(!status.ok());
- finish_buf_.set_output_tag(tag);
- if (!ctx_->sent_initial_metadata_) {
- finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- finish_buf_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&finish_buf_);
- }
-
- private:
- void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
-
- ::grpc::internal::Call call_;
- ::grpc::ServerContext* ctx_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- meta_buf_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- finish_buf_;
-};
-
} // namespace grpc
-namespace std {
-template <class R>
-class default_delete<::grpc::ClientAsyncResponseReader<R>> {
- public:
- void operator()(void* /*p*/) {}
-};
-template <class R>
-class default_delete<::grpc::ClientAsyncResponseReaderInterface<R>> {
- public:
- void operator()(void* /*p*/) {}
-};
-} // namespace std
-
+namespace std {
+template <class R>
+class default_delete<::grpc::ClientAsyncResponseReader<R>> {
+ public:
+ void operator()(void* /*p*/) {}
+};
+template <class R>
+class default_delete<::grpc::ClientAsyncResponseReaderInterface<R>> {
+ public:
+ void operator()(void* /*p*/) {}
+};
+} // namespace std
+
#endif // GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h
index 59f1d71759..6e64ec9981 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h
@@ -29,12 +29,12 @@
#include <vector>
-namespace grpc {
-
-class ServerInterface;
-class ByteBuffer;
-class ServerInterface;
-
+namespace grpc {
+
+class ServerInterface;
+class ByteBuffer;
+class ServerInterface;
+
namespace internal {
template <class RequestType, class ResponseType>
class CallbackUnaryHandler;
@@ -163,15 +163,15 @@ class ByteBuffer final {
friend class internal::CallOpRecvMessage;
friend class internal::CallOpGenericRecvMessage;
template <class ServiceType, class RequestType, class ResponseType>
- friend class internal::RpcMethodHandler;
+ friend class internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
- friend class internal::ServerStreamingHandler;
+ friend class internal::ServerStreamingHandler;
template <class RequestType, class ResponseType>
- friend class internal::CallbackUnaryHandler;
+ friend class internal::CallbackUnaryHandler;
template <class RequestType, class ResponseType>
- friend class internal::CallbackServerStreamingHandler;
+ friend class internal::CallbackServerStreamingHandler;
template <StatusCode code>
- friend class internal::ErrorMethodHandler;
+ friend class internal::ErrorMethodHandler;
template <class R>
friend class internal::DeserializeFuncType;
friend class ProtoBufferReader;
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h
index bddb179401..b229286215 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h
@@ -21,7 +21,7 @@
#include <grpc/impl/codegen/grpc_types.h>
#include <grpcpp/impl/codegen/call_hook.h>
-namespace grpc {
+namespace grpc {
class CompletionQueue;
namespace experimental {
class ClientRpcInfo;
@@ -40,13 +40,13 @@ class Call final {
call_(nullptr),
max_receive_message_size_(-1) {}
/** call is owned by the caller */
- Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq)
+ Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq)
: call_hook_(call_hook),
cq_(cq),
call_(call),
max_receive_message_size_(-1) {}
- Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq,
+ Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq,
experimental::ClientRpcInfo* rpc_info)
: call_hook_(call_hook),
cq_(cq),
@@ -54,7 +54,7 @@ class Call final {
max_receive_message_size_(-1),
client_rpc_info_(rpc_info) {}
- Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq,
+ Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq,
int max_receive_message_size, experimental::ServerRpcInfo* rpc_info)
: call_hook_(call_hook),
cq_(cq),
@@ -67,7 +67,7 @@ class Call final {
}
grpc_call* call() const { return call_; }
- ::grpc::CompletionQueue* cq() const { return cq_; }
+ ::grpc::CompletionQueue* cq() const { return cq_; }
int max_receive_message_size() const { return max_receive_message_size_; }
@@ -81,7 +81,7 @@ class Call final {
private:
CallHook* call_hook_;
- ::grpc::CompletionQueue* cq_;
+ ::grpc::CompletionQueue* cq_;
grpc_call* call_;
int max_receive_message_size_;
experimental::ClientRpcInfo* client_rpc_info_ = nullptr;
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h
index f47b69c61b..379333164a 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h
@@ -33,8 +33,8 @@
#include <grpcpp/impl/codegen/call.h>
#include <grpcpp/impl/codegen/call_hook.h>
#include <grpcpp/impl/codegen/call_op_set_interface.h>
-#include <grpcpp/impl/codegen/client_context.h>
-#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/impl/codegen/client_context.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
#include <grpcpp/impl/codegen/completion_queue_tag.h>
#include <grpcpp/impl/codegen/config.h>
#include <grpcpp/impl/codegen/core_codegen_interface.h>
@@ -55,8 +55,8 @@ class CallHook;
// TODO(yangg) if the map is changed before we send, the pointers will be a
// mess. Make sure it does not happen.
inline grpc_metadata* FillMetadataArray(
- const std::multimap<TString, TString>& metadata,
- size_t* metadata_count, const TString& optional_error_details) {
+ const std::multimap<TString, TString>& metadata,
+ size_t* metadata_count, const TString& optional_error_details) {
*metadata_count = metadata.size() + (optional_error_details.empty() ? 0 : 1);
if (*metadata_count == 0) {
return nullptr;
@@ -202,10 +202,10 @@ class WriteOptions {
namespace internal {
-/// Default argument for CallOpSet. The Unused parameter is unused by
-/// the class, but can be used for generating multiple names for the
-/// same thing.
-template <int Unused>
+/// Default argument for CallOpSet. The Unused parameter is unused by
+/// the class, but can be used for generating multiple names for the
+/// same thing.
+template <int Unused>
class CallNoOp {
protected:
void AddOp(grpc_op* /*ops*/, size_t* /*nops*/) {}
@@ -224,7 +224,7 @@ class CallOpSendInitialMetadata {
maybe_compression_level_.is_set = false;
}
- void SendInitialMetadata(std::multimap<TString, TString>* metadata,
+ void SendInitialMetadata(std::multimap<TString, TString>* metadata,
uint32_t flags) {
maybe_compression_level_.is_set = false;
send_ = true;
@@ -280,7 +280,7 @@ class CallOpSendInitialMetadata {
bool send_;
uint32_t flags_;
size_t initial_metadata_count_;
- std::multimap<TString, TString>* metadata_map_;
+ std::multimap<TString, TString>* metadata_map_;
grpc_metadata* initial_metadata_;
struct {
bool is_set;
@@ -431,7 +431,7 @@ class CallOpRecvMessage {
// Do not change status if no message is received.
void AllowNoMessage() { allow_not_getting_message_ = true; }
- bool got_message = false;
+ bool got_message = false;
protected:
void AddOp(grpc_op* ops, size_t* nops) {
@@ -444,7 +444,7 @@ class CallOpRecvMessage {
}
void FinishOp(bool* status) {
- if (message_ == nullptr) return;
+ if (message_ == nullptr) return;
if (recv_buf_.Valid()) {
if (*status) {
got_message = *status =
@@ -455,24 +455,24 @@ class CallOpRecvMessage {
got_message = false;
recv_buf_.Clear();
}
- } else if (hijacked_) {
- if (hijacked_recv_message_failed_) {
- FinishOpRecvMessageFailureHandler(status);
- } else {
- // The op was hijacked and it was successful. There is no further action
- // to be performed since the message is already in its non-serialized
- // form.
- }
+ } else if (hijacked_) {
+ if (hijacked_recv_message_failed_) {
+ FinishOpRecvMessageFailureHandler(status);
+ } else {
+ // The op was hijacked and it was successful. There is no further action
+ // to be performed since the message is already in its non-serialized
+ // form.
+ }
} else {
- FinishOpRecvMessageFailureHandler(status);
+ FinishOpRecvMessageFailureHandler(status);
}
}
void SetInterceptionHookPoint(
InterceptorBatchMethodsImpl* interceptor_methods) {
if (message_ == nullptr) return;
- interceptor_methods->SetRecvMessage(message_,
- &hijacked_recv_message_failed_);
+ interceptor_methods->SetRecvMessage(message_,
+ &hijacked_recv_message_failed_);
}
void SetFinishInterceptionHookPoint(
@@ -491,19 +491,19 @@ class CallOpRecvMessage {
}
private:
- // Sets got_message and \a status for a failed recv message op
- void FinishOpRecvMessageFailureHandler(bool* status) {
- got_message = false;
- if (!allow_not_getting_message_) {
- *status = false;
- }
- }
-
- R* message_ = nullptr;
+ // Sets got_message and \a status for a failed recv message op
+ void FinishOpRecvMessageFailureHandler(bool* status) {
+ got_message = false;
+ if (!allow_not_getting_message_) {
+ *status = false;
+ }
+ }
+
+ R* message_ = nullptr;
ByteBuffer recv_buf_;
- bool allow_not_getting_message_ = false;
+ bool allow_not_getting_message_ = false;
bool hijacked_ = false;
- bool hijacked_recv_message_failed_ = false;
+ bool hijacked_recv_message_failed_ = false;
};
class DeserializeFunc {
@@ -540,7 +540,7 @@ class CallOpGenericRecvMessage {
// Do not change status if no message is received.
void AllowNoMessage() { allow_not_getting_message_ = true; }
- bool got_message = false;
+ bool got_message = false;
protected:
void AddOp(grpc_op* ops, size_t* nops) {
@@ -553,7 +553,7 @@ class CallOpGenericRecvMessage {
}
void FinishOp(bool* status) {
- if (!deserialize_) return;
+ if (!deserialize_) return;
if (recv_buf_.Valid()) {
if (*status) {
got_message = true;
@@ -563,14 +563,14 @@ class CallOpGenericRecvMessage {
got_message = false;
recv_buf_.Clear();
}
- } else if (hijacked_) {
- if (hijacked_recv_message_failed_) {
- FinishOpRecvMessageFailureHandler(status);
- } else {
- // The op was hijacked and it was successful. There is no further action
- // to be performed since the message is already in its non-serialized
- // form.
- }
+ } else if (hijacked_) {
+ if (hijacked_recv_message_failed_) {
+ FinishOpRecvMessageFailureHandler(status);
+ } else {
+ // The op was hijacked and it was successful. There is no further action
+ // to be performed since the message is already in its non-serialized
+ // form.
+ }
} else {
got_message = false;
if (!allow_not_getting_message_) {
@@ -582,8 +582,8 @@ class CallOpGenericRecvMessage {
void SetInterceptionHookPoint(
InterceptorBatchMethodsImpl* interceptor_methods) {
if (!deserialize_) return;
- interceptor_methods->SetRecvMessage(message_,
- &hijacked_recv_message_failed_);
+ interceptor_methods->SetRecvMessage(message_,
+ &hijacked_recv_message_failed_);
}
void SetFinishInterceptionHookPoint(
@@ -603,20 +603,20 @@ class CallOpGenericRecvMessage {
}
private:
- // Sets got_message and \a status for a failed recv message op
- void FinishOpRecvMessageFailureHandler(bool* status) {
- got_message = false;
- if (!allow_not_getting_message_) {
- *status = false;
- }
- }
-
- void* message_ = nullptr;
+ // Sets got_message and \a status for a failed recv message op
+ void FinishOpRecvMessageFailureHandler(bool* status) {
+ got_message = false;
+ if (!allow_not_getting_message_) {
+ *status = false;
+ }
+ }
+
+ void* message_ = nullptr;
std::unique_ptr<DeserializeFunc> deserialize_;
ByteBuffer recv_buf_;
- bool allow_not_getting_message_ = false;
- bool hijacked_ = false;
- bool hijacked_recv_message_failed_ = false;
+ bool allow_not_getting_message_ = false;
+ bool hijacked_ = false;
+ bool hijacked_recv_message_failed_ = false;
};
class CallOpClientSendClose {
@@ -659,7 +659,7 @@ class CallOpServerSendStatus {
CallOpServerSendStatus() : send_status_available_(false) {}
void ServerSendStatus(
- std::multimap<TString, TString>* trailing_metadata,
+ std::multimap<TString, TString>* trailing_metadata,
const Status& status) {
send_error_details_ = status.error_details();
metadata_map_ = trailing_metadata;
@@ -713,10 +713,10 @@ class CallOpServerSendStatus {
bool hijacked_ = false;
bool send_status_available_;
grpc_status_code send_status_code_;
- TString send_error_details_;
- TString send_error_message_;
+ TString send_error_details_;
+ TString send_error_message_;
size_t trailing_metadata_count_;
- std::multimap<TString, TString>* metadata_map_;
+ std::multimap<TString, TString>* metadata_map_;
grpc_metadata* trailing_metadata_;
grpc_slice error_message_slice_;
};
@@ -725,7 +725,7 @@ class CallOpRecvInitialMetadata {
public:
CallOpRecvInitialMetadata() : metadata_map_(nullptr) {}
- void RecvInitialMetadata(::grpc::ClientContext* context) {
+ void RecvInitialMetadata(::grpc::ClientContext* context) {
context->initial_metadata_received_ = true;
metadata_map_ = &context->recv_initial_metadata_;
}
@@ -774,7 +774,7 @@ class CallOpClientRecvStatus {
CallOpClientRecvStatus()
: recv_status_(nullptr), debug_error_string_(nullptr) {}
- void ClientRecvStatus(::grpc::ClientContext* context, Status* status) {
+ void ClientRecvStatus(::grpc::ClientContext* context, Status* status) {
client_context_ = context;
metadata_map_ = &client_context_->trailing_metadata_;
recv_status_ = status;
@@ -803,9 +803,9 @@ class CallOpClientRecvStatus {
*recv_status_ =
Status(static_cast<StatusCode>(status_code_),
GRPC_SLICE_IS_EMPTY(error_message_)
- ? TString()
- : TString(reinterpret_cast<const char*>GRPC_SLICE_START_PTR(error_message_),
- reinterpret_cast<const char*>GRPC_SLICE_END_PTR(error_message_)),
+ ? TString()
+ : TString(reinterpret_cast<const char*>GRPC_SLICE_START_PTR(error_message_),
+ reinterpret_cast<const char*>GRPC_SLICE_END_PTR(error_message_)),
metadata_map_->GetBinaryErrorDetails());
if (debug_error_string_ != nullptr) {
client_context_->set_debug_error_string(debug_error_string_);
@@ -840,7 +840,7 @@ class CallOpClientRecvStatus {
private:
bool hijacked_ = false;
- ::grpc::ClientContext* client_context_;
+ ::grpc::ClientContext* client_context_;
MetadataMap* metadata_map_;
Status* recv_status_;
const char* debug_error_string_;
@@ -858,7 +858,7 @@ class CallOpSet;
/// the maximum count of ops we'll need in a set. We leverage the
/// empty base class optimization to slim this class (especially
/// when there are many unused slots used). To avoid duplicate base classes,
-/// the template parameter for CallNoOp is varied by argument position.
+/// the template parameter for CallNoOp is varied by argument position.
template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
class CallOpSet : public CallOpSetInterface,
public Op1,
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h
index 0666ee056e..ea0752d90e 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h
@@ -28,7 +28,7 @@
#include <grpcpp/impl/codegen/status.h>
#include <grpcpp/impl/codegen/time.h>
-namespace grpc {
+namespace grpc {
template <class R>
class ClientReader;
template <class W>
@@ -56,8 +56,8 @@ class ClientCallbackUnaryFactory;
} // namespace internal
class ChannelInterface;
-class ClientContext;
-class CompletionQueue;
+class ClientContext;
+class CompletionQueue;
namespace experimental {
class DelegatingChannel;
@@ -84,7 +84,7 @@ class ChannelInterface {
/// deadline expires. \a GetState needs to called to get the current state.
template <typename T>
void NotifyOnStateChange(grpc_connectivity_state last_observed, T deadline,
- ::grpc::CompletionQueue* cq, void* tag) {
+ ::grpc::CompletionQueue* cq, void* tag) {
TimePoint<T> deadline_tp(deadline);
NotifyOnStateChangeImpl(last_observed, deadline_tp.raw_time(), cq, tag);
}
@@ -109,42 +109,42 @@ class ChannelInterface {
private:
template <class R>
- friend class ::grpc::ClientReader;
+ friend class ::grpc::ClientReader;
template <class W>
- friend class ::grpc::ClientWriter;
+ friend class ::grpc::ClientWriter;
template <class W, class R>
- friend class ::grpc::ClientReaderWriter;
+ friend class ::grpc::ClientReaderWriter;
template <class R>
- friend class ::grpc::internal::ClientAsyncReaderFactory;
+ friend class ::grpc::internal::ClientAsyncReaderFactory;
template <class W>
- friend class ::grpc::internal::ClientAsyncWriterFactory;
+ friend class ::grpc::internal::ClientAsyncWriterFactory;
template <class W, class R>
- friend class ::grpc::internal::ClientAsyncReaderWriterFactory;
+ friend class ::grpc::internal::ClientAsyncReaderWriterFactory;
template <class R>
- friend class ::grpc::internal::ClientAsyncResponseReaderFactory;
+ friend class ::grpc::internal::ClientAsyncResponseReaderFactory;
template <class W, class R>
- friend class ::grpc::internal::ClientCallbackReaderWriterFactory;
+ friend class ::grpc::internal::ClientCallbackReaderWriterFactory;
template <class R>
- friend class ::grpc::internal::ClientCallbackReaderFactory;
+ friend class ::grpc::internal::ClientCallbackReaderFactory;
template <class W>
- friend class ::grpc::internal::ClientCallbackWriterFactory;
- friend class ::grpc::internal::ClientCallbackUnaryFactory;
+ friend class ::grpc::internal::ClientCallbackWriterFactory;
+ friend class ::grpc::internal::ClientCallbackUnaryFactory;
template <class InputMessage, class OutputMessage>
friend class ::grpc::internal::BlockingUnaryCallImpl;
template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::CallbackUnaryCallImpl;
+ friend class ::grpc::internal::CallbackUnaryCallImpl;
friend class ::grpc::internal::RpcMethod;
friend class ::grpc::experimental::DelegatingChannel;
friend class ::grpc::internal::InterceptedChannel;
virtual internal::Call CreateCall(const internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- ::grpc::CompletionQueue* cq) = 0;
+ ::grpc::ClientContext* context,
+ ::grpc::CompletionQueue* cq) = 0;
virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
virtual void* RegisterMethod(const char* method) = 0;
virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
- ::grpc::CompletionQueue* cq,
+ ::grpc::CompletionQueue* cq,
void* tag) = 0;
virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) = 0;
@@ -157,8 +157,8 @@ class ChannelInterface {
// method and adding a new pure method to an interface would be a breaking
// change (even though this is private and non-API)
virtual internal::Call CreateCallInternal(
- const internal::RpcMethod& /*method*/, ::grpc::ClientContext* /*context*/,
- ::grpc::CompletionQueue* /*cq*/, size_t /*interceptor_pos*/) {
+ const internal::RpcMethod& /*method*/, ::grpc::ClientContext* /*context*/,
+ ::grpc::CompletionQueue* /*cq*/, size_t /*interceptor_pos*/) {
return internal::Call();
}
@@ -170,7 +170,7 @@ class ChannelInterface {
// Returns nullptr (rather than being pure) since this is a post-1.0 method
// and adding a new pure method to an interface would be a breaking change
// (even though this is private and non-API)
- virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; }
+ virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; }
};
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h
index 6a08838423..90c817ceaa 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,1203 +17,1203 @@
#ifndef GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H
#define GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H
-#include <atomic>
-#include <functional>
+#include <atomic>
+#include <functional>
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/call_op_set.h>
-#include <grpcpp/impl/codegen/callback_common.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/call_op_set.h>
+#include <grpcpp/impl/codegen/callback_common.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/status.h>
namespace grpc {
-class Channel;
-class ClientContext;
-
-namespace internal {
-class RpcMethod;
-
-/// Perform a callback-based unary call
-/// TODO(vjpai): Combine as much as possible with the blocking unary call code
-template <class InputMessage, class OutputMessage>
-void CallbackUnaryCall(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- const InputMessage* request, OutputMessage* result,
- std::function<void(::grpc::Status)> on_completion) {
- CallbackUnaryCallImpl<InputMessage, OutputMessage> x(
- channel, method, context, request, result, on_completion);
-}
-
-template <class InputMessage, class OutputMessage>
-class CallbackUnaryCallImpl {
- public:
- CallbackUnaryCallImpl(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- const InputMessage* request, OutputMessage* result,
- std::function<void(::grpc::Status)> on_completion) {
- ::grpc::CompletionQueue* cq = channel->CallbackCQ();
- GPR_CODEGEN_ASSERT(cq != nullptr);
- grpc::internal::Call call(channel->CreateCall(method, context, cq));
-
- using FullCallOpSet = grpc::internal::CallOpSet<
- ::grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpRecvInitialMetadata,
- grpc::internal::CallOpRecvMessage<OutputMessage>,
- grpc::internal::CallOpClientSendClose,
- grpc::internal::CallOpClientRecvStatus>;
-
- struct OpSetAndTag {
- FullCallOpSet opset;
- grpc::internal::CallbackWithStatusTag tag;
- };
- const size_t alloc_sz = sizeof(OpSetAndTag);
- auto* const alloced = static_cast<OpSetAndTag*>(
- ::grpc::g_core_codegen_interface->grpc_call_arena_alloc(call.call(),
- alloc_sz));
- auto* ops = new (&alloced->opset) FullCallOpSet;
- auto* tag = new (&alloced->tag)
- grpc::internal::CallbackWithStatusTag(call.call(), on_completion, ops);
-
- // TODO(vjpai): Unify code with sync API as much as possible
- ::grpc::Status s = ops->SendMessagePtr(request);
- if (!s.ok()) {
- tag->force_run(s);
- return;
- }
- ops->SendInitialMetadata(&context->send_initial_metadata_,
- context->initial_metadata_flags());
- ops->RecvInitialMetadata(context);
- ops->RecvMessage(result);
- ops->AllowNoMessage();
- ops->ClientSendClose();
- ops->ClientRecvStatus(context, tag->status_ptr());
- ops->set_core_cq_tag(tag);
- call.PerformOps(ops);
- }
-};
-
-// Base class for public API classes.
-class ClientReactor {
- public:
- /// Called by the library when all operations associated with this RPC have
- /// completed and all Holds have been removed. OnDone provides the RPC status
- /// outcome for both successful and failed RPCs. If it is never called on an
- /// RPC, it indicates an application-level problem (like failure to remove a
- /// hold).
- ///
- /// \param[in] s The status outcome of this RPC
- virtual void OnDone(const ::grpc::Status& /*s*/) = 0;
-
- /// InternalScheduleOnDone is not part of the API and is not meant to be
- /// overridden. It is virtual to allow successful builds for certain bazel
- /// build users that only want to depend on gRPC codegen headers and not the
- /// full library (although this is not a generally-supported option). Although
- /// the virtual call is slower than a direct call, this function is
- /// heavyweight and the cost of the virtual call is not much in comparison.
- /// This function may be removed or devirtualized in the future.
- virtual void InternalScheduleOnDone(::grpc::Status s);
-};
-
-} // namespace internal
-
-// Forward declarations
-template <class Request, class Response>
-class ClientBidiReactor;
-template <class Response>
-class ClientReadReactor;
-template <class Request>
-class ClientWriteReactor;
-class ClientUnaryReactor;
-
-// NOTE: The streaming objects are not actually implemented in the public API.
-// These interfaces are provided for mocking only. Typical applications
-// will interact exclusively with the reactors that they define.
-template <class Request, class Response>
-class ClientCallbackReaderWriter {
- public:
- virtual ~ClientCallbackReaderWriter() {}
- virtual void StartCall() = 0;
- virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0;
- virtual void WritesDone() = 0;
- virtual void Read(Response* resp) = 0;
- virtual void AddHold(int holds) = 0;
- virtual void RemoveHold() = 0;
-
- protected:
- void BindReactor(ClientBidiReactor<Request, Response>* reactor) {
- reactor->BindStream(this);
- }
-};
-
-template <class Response>
-class ClientCallbackReader {
- public:
- virtual ~ClientCallbackReader() {}
- virtual void StartCall() = 0;
- virtual void Read(Response* resp) = 0;
- virtual void AddHold(int holds) = 0;
- virtual void RemoveHold() = 0;
-
- protected:
- void BindReactor(ClientReadReactor<Response>* reactor) {
- reactor->BindReader(this);
- }
-};
-
-template <class Request>
-class ClientCallbackWriter {
- public:
- virtual ~ClientCallbackWriter() {}
- virtual void StartCall() = 0;
- void Write(const Request* req) { Write(req, ::grpc::WriteOptions()); }
- virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0;
- void WriteLast(const Request* req, ::grpc::WriteOptions options) {
- Write(req, options.set_last_message());
- }
- virtual void WritesDone() = 0;
-
- virtual void AddHold(int holds) = 0;
- virtual void RemoveHold() = 0;
-
- protected:
- void BindReactor(ClientWriteReactor<Request>* reactor) {
- reactor->BindWriter(this);
- }
-};
-
-class ClientCallbackUnary {
- public:
- virtual ~ClientCallbackUnary() {}
- virtual void StartCall() = 0;
-
- protected:
- void BindReactor(ClientUnaryReactor* reactor);
-};
-
-// The following classes are the reactor interfaces that are to be implemented
-// by the user. They are passed in to the library as an argument to a call on a
-// stub (either a codegen-ed call or a generic call). The streaming RPC is
-// activated by calling StartCall, possibly after initiating StartRead,
-// StartWrite, or AddHold operations on the streaming object. Note that none of
-// the classes are pure; all reactions have a default empty reaction so that the
-// user class only needs to override those classes that it cares about.
-// The reactor must be passed to the stub invocation before any of the below
-// operations can be called.
-
-/// \a ClientBidiReactor is the interface for a bidirectional streaming RPC.
-template <class Request, class Response>
-class ClientBidiReactor : public internal::ClientReactor {
- public:
- virtual ~ClientBidiReactor() {}
-
- /// Activate the RPC and initiate any reads or writes that have been Start'ed
- /// before this call. All streaming RPCs issued by the client MUST have
- /// StartCall invoked on them (even if they are canceled) as this call is the
- /// activation of their lifecycle.
- void StartCall() { stream_->StartCall(); }
-
- /// Initiate a read operation (or post it for later initiation if StartCall
- /// has not yet been invoked).
- ///
- /// \param[out] resp Where to eventually store the read message. Valid when
- /// the library calls OnReadDone
- void StartRead(Response* resp) { stream_->Read(resp); }
-
- /// Initiate a write operation (or post it for later initiation if StartCall
- /// has not yet been invoked).
- ///
- /// \param[in] req The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- void StartWrite(const Request* req) {
- StartWrite(req, ::grpc::WriteOptions());
- }
-
- /// Initiate/post a write operation with specified options.
- ///
- /// \param[in] req The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- void StartWrite(const Request* req, ::grpc::WriteOptions options) {
- stream_->Write(req, std::move(options));
- }
-
- /// Initiate/post a write operation with specified options and an indication
- /// that this is the last write (like StartWrite and StartWritesDone, merged).
- /// Note that calling this means that no more calls to StartWrite,
- /// StartWriteLast, or StartWritesDone are allowed.
- ///
- /// \param[in] req The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- void StartWriteLast(const Request* req, ::grpc::WriteOptions options) {
- StartWrite(req, std::move(options.set_last_message()));
- }
-
- /// Indicate that the RPC will have no more write operations. This can only be
- /// issued once for a given RPC. This is not required or allowed if
- /// StartWriteLast is used since that already has the same implication.
- /// Note that calling this means that no more calls to StartWrite,
- /// StartWriteLast, or StartWritesDone are allowed.
- void StartWritesDone() { stream_->WritesDone(); }
-
- /// Holds are needed if (and only if) this stream has operations that take
- /// place on it after StartCall but from outside one of the reactions
- /// (OnReadDone, etc). This is _not_ a common use of the streaming API.
- ///
- /// Holds must be added before calling StartCall. If a stream still has a hold
- /// in place, its resources will not be destroyed even if the status has
- /// already come in from the wire and there are currently no active callbacks
- /// outstanding. Similarly, the stream will not call OnDone if there are still
- /// holds on it.
- ///
- /// For example, if a StartRead or StartWrite operation is going to be
- /// initiated from elsewhere in the application, the application should call
- /// AddHold or AddMultipleHolds before StartCall. If there is going to be,
- /// for example, a read-flow and a write-flow taking place outside the
- /// reactions, then call AddMultipleHolds(2) before StartCall. When the
- /// application knows that it won't issue any more read operations (such as
- /// when a read comes back as not ok), it should issue a RemoveHold(). It
- /// should also call RemoveHold() again after it does StartWriteLast or
- /// StartWritesDone that indicates that there will be no more write ops.
- /// The number of RemoveHold calls must match the total number of AddHold
- /// calls plus the number of holds added by AddMultipleHolds.
- /// The argument to AddMultipleHolds must be positive.
- void AddHold() { AddMultipleHolds(1); }
- void AddMultipleHolds(int holds) {
- GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
- stream_->AddHold(holds);
- }
- void RemoveHold() { stream_->RemoveHold(); }
-
- /// Notifies the application that all operations associated with this RPC
- /// have completed and all Holds have been removed. OnDone provides the RPC
- /// status outcome for both successful and failed RPCs and will be called in
- /// all cases. If it is not called, it indicates an application-level problem
- /// (like failure to remove a hold).
- ///
- /// \param[in] s The status outcome of this RPC
- void OnDone(const ::grpc::Status& /*s*/) override {}
-
- /// Notifies the application that a read of initial metadata from the
- /// server is done. If the application chooses not to implement this method,
- /// it can assume that the initial metadata has been read before the first
- /// call of OnReadDone or OnDone.
- ///
- /// \param[in] ok Was the initial metadata read successfully? If false, no
- /// new read/write operation will succeed, and any further
- /// Start* operations should not be called.
- virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartRead operation completed.
- ///
- /// \param[in] ok Was it successful? If false, no new read/write operation
- /// will succeed, and any further Start* should not be called.
- virtual void OnReadDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartWrite or StartWriteLast operation
- /// completed.
- ///
- /// \param[in] ok Was it successful? If false, no new read/write operation
- /// will succeed, and any further Start* should not be called.
- virtual void OnWriteDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartWritesDone operation completed. Note
- /// that this is only used on explicit StartWritesDone operations and not for
- /// those that are implicitly invoked as part of a StartWriteLast.
- ///
- /// \param[in] ok Was it successful? If false, the application will later see
- /// the failure reflected as a bad status in OnDone and no
- /// further Start* should be called.
- virtual void OnWritesDoneDone(bool /*ok*/) {}
-
- private:
- friend class ClientCallbackReaderWriter<Request, Response>;
- void BindStream(ClientCallbackReaderWriter<Request, Response>* stream) {
- stream_ = stream;
- }
- ClientCallbackReaderWriter<Request, Response>* stream_;
-};
-
-/// \a ClientReadReactor is the interface for a server-streaming RPC.
-/// All public methods behave as in ClientBidiReactor.
-template <class Response>
-class ClientReadReactor : public internal::ClientReactor {
- public:
- virtual ~ClientReadReactor() {}
-
- void StartCall() { reader_->StartCall(); }
- void StartRead(Response* resp) { reader_->Read(resp); }
-
- void AddHold() { AddMultipleHolds(1); }
- void AddMultipleHolds(int holds) {
- GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
- reader_->AddHold(holds);
- }
- void RemoveHold() { reader_->RemoveHold(); }
-
- void OnDone(const ::grpc::Status& /*s*/) override {}
- virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
- virtual void OnReadDone(bool /*ok*/) {}
-
- private:
- friend class ClientCallbackReader<Response>;
- void BindReader(ClientCallbackReader<Response>* reader) { reader_ = reader; }
- ClientCallbackReader<Response>* reader_;
-};
-
-/// \a ClientWriteReactor is the interface for a client-streaming RPC.
-/// All public methods behave as in ClientBidiReactor.
-template <class Request>
-class ClientWriteReactor : public internal::ClientReactor {
- public:
- virtual ~ClientWriteReactor() {}
-
- void StartCall() { writer_->StartCall(); }
- void StartWrite(const Request* req) {
- StartWrite(req, ::grpc::WriteOptions());
- }
- void StartWrite(const Request* req, ::grpc::WriteOptions options) {
- writer_->Write(req, std::move(options));
- }
- void StartWriteLast(const Request* req, ::grpc::WriteOptions options) {
- StartWrite(req, std::move(options.set_last_message()));
- }
- void StartWritesDone() { writer_->WritesDone(); }
-
- void AddHold() { AddMultipleHolds(1); }
- void AddMultipleHolds(int holds) {
- GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
- writer_->AddHold(holds);
- }
- void RemoveHold() { writer_->RemoveHold(); }
-
- void OnDone(const ::grpc::Status& /*s*/) override {}
- virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
- virtual void OnWriteDone(bool /*ok*/) {}
- virtual void OnWritesDoneDone(bool /*ok*/) {}
-
- private:
- friend class ClientCallbackWriter<Request>;
- void BindWriter(ClientCallbackWriter<Request>* writer) { writer_ = writer; }
-
- ClientCallbackWriter<Request>* writer_;
-};
-
-/// \a ClientUnaryReactor is a reactor-style interface for a unary RPC.
-/// This is _not_ a common way of invoking a unary RPC. In practice, this
-/// option should be used only if the unary RPC wants to receive initial
-/// metadata without waiting for the response to complete. Most deployments of
-/// RPC systems do not use this option, but it is needed for generality.
-/// All public methods behave as in ClientBidiReactor.
-/// StartCall is included for consistency with the other reactor flavors: even
-/// though there are no StartRead or StartWrite operations to queue before the
-/// call (that is part of the unary call itself) and there is no reactor object
-/// being created as a result of this call, we keep a consistent 2-phase
-/// initiation API among all the reactor flavors.
-class ClientUnaryReactor : public internal::ClientReactor {
- public:
- virtual ~ClientUnaryReactor() {}
-
- void StartCall() { call_->StartCall(); }
- void OnDone(const ::grpc::Status& /*s*/) override {}
- virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
-
- private:
- friend class ClientCallbackUnary;
- void BindCall(ClientCallbackUnary* call) { call_ = call; }
- ClientCallbackUnary* call_;
-};
-
-// Define function out-of-line from class to avoid forward declaration issue
-inline void ClientCallbackUnary::BindReactor(ClientUnaryReactor* reactor) {
- reactor->BindCall(this);
-}
-
-namespace internal {
-
-// Forward declare factory classes for friendship
-template <class Request, class Response>
-class ClientCallbackReaderWriterFactory;
-template <class Response>
-class ClientCallbackReaderFactory;
-template <class Request>
-class ClientCallbackWriterFactory;
-
-template <class Request, class Response>
-class ClientCallbackReaderWriterImpl
- : public ClientCallbackReaderWriter<Request, Response> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderWriterImpl));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- // This call initiates two batches, plus any backlog, each with a callback
- // 1. Send initial metadata (unless corked) + recv initial metadata
- // 2. Any read backlog
- // 3. Any write backlog
- // 4. Recv trailing metadata (unless corked)
- if (!start_corked_) {
- start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- }
-
- call_.PerformOps(&start_ops_);
-
- {
- grpc::internal::MutexLock lock(&start_mu_);
-
- if (backlog_.read_ops) {
- call_.PerformOps(&read_ops_);
- }
- if (backlog_.write_ops) {
- call_.PerformOps(&write_ops_);
- }
- if (backlog_.writes_done_ops) {
- call_.PerformOps(&writes_done_ops_);
- }
- call_.PerformOps(&finish_ops_);
- // The last thing in this critical section is to set started_ so that it
- // can be used lock-free as well.
- started_.store(true, std::memory_order_release);
- }
- // MaybeFinish outside the lock to make sure that destruction of this object
- // doesn't take place while holding the lock (which would cause the lock to
- // be released after destruction)
- this->MaybeFinish(/*from_reaction=*/false);
- }
-
- void Read(Response* msg) override {
- read_ops_.RecvMessage(msg);
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.read_ops = true;
- return;
- }
- }
- call_.PerformOps(&read_ops_);
- }
-
- void Write(const Request* msg, ::grpc::WriteOptions options) override {
- if (options.is_last_message()) {
- options.set_buffer_hint();
- write_ops_.ClientSendClose();
- }
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
- if (GPR_UNLIKELY(corked_write_needed_)) {
- write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- corked_write_needed_ = false;
- }
-
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.write_ops = true;
- return;
- }
- }
- call_.PerformOps(&write_ops_);
- }
- void WritesDone() override {
- writes_done_ops_.ClientSendClose();
- writes_done_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnWritesDoneDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &writes_done_ops_, /*can_inline=*/false);
- writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
- if (GPR_UNLIKELY(corked_write_needed_)) {
- writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- corked_write_needed_ = false;
- }
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.writes_done_ops = true;
- return;
- }
- }
- call_.PerformOps(&writes_done_ops_);
- }
-
- void AddHold(int holds) override {
- callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
- }
- void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
-
- private:
- friend class ClientCallbackReaderWriterFactory<Request, Response>;
-
- ClientCallbackReaderWriterImpl(grpc::internal::Call call,
- ::grpc::ClientContext* context,
- ClientBidiReactor<Request, Response>* reactor)
- : context_(context),
- call_(call),
- reactor_(reactor),
- start_corked_(context_->initial_metadata_corked_),
- corked_write_needed_(start_corked_) {
- this->BindReactor(reactor);
-
- // Set up the unchanging parts of the start, read, and write tags and ops.
- start_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadInitialMetadataDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &start_ops_, /*can_inline=*/false);
- start_ops_.RecvInitialMetadata(context_);
- start_ops_.set_core_cq_tag(&start_tag_);
-
- write_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnWriteDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &write_ops_, /*can_inline=*/false);
- write_ops_.set_core_cq_tag(&write_tag_);
-
- read_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &read_ops_, /*can_inline=*/false);
- read_ops_.set_core_cq_tag(&read_tag_);
-
- // Also set up the Finish tag and op set.
- finish_tag_.Set(
- call_.call(),
- [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
- &finish_ops_,
- /*can_inline=*/false);
- finish_ops_.ClientRecvStatus(context_, &finish_status_);
- finish_ops_.set_core_cq_tag(&finish_tag_);
- }
-
- // MaybeFinish can be called from reactions or from user-initiated operations
- // like StartCall or RemoveHold. If this is the last operation or hold on this
- // object, it will invoke the OnDone reaction. If MaybeFinish was called from
- // a reaction, it can call OnDone directly. If not, it would need to schedule
- // OnDone onto an executor thread to avoid the possibility of deadlocking with
- // any locks in the user code that invoked it.
- void MaybeFinish(bool from_reaction) {
- if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1)) {
- ::grpc::Status s = std::move(finish_status_);
- auto* reactor = reactor_;
- auto* call = call_.call();
- this->~ClientCallbackReaderWriterImpl();
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- if (GPR_LIKELY(from_reaction)) {
- reactor->OnDone(s);
- } else {
- reactor->InternalScheduleOnDone(std::move(s));
- }
- }
- }
-
- ::grpc::ClientContext* const context_;
- grpc::internal::Call call_;
- ClientBidiReactor<Request, Response>* const reactor_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpRecvInitialMetadata>
- start_ops_;
- grpc::internal::CallbackWithSuccessTag start_tag_;
- const bool start_corked_;
- bool corked_write_needed_; // no lock needed since only accessed in
- // Write/WritesDone which cannot be concurrent
-
- grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
- grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::Status finish_status_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpClientSendClose>
- write_ops_;
- grpc::internal::CallbackWithSuccessTag write_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpClientSendClose>
- writes_done_ops_;
- grpc::internal::CallbackWithSuccessTag writes_done_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
- read_ops_;
- grpc::internal::CallbackWithSuccessTag read_tag_;
-
- struct StartCallBacklog {
- bool write_ops = false;
- bool writes_done_ops = false;
- bool read_ops = false;
- };
- StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
-
- // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
- std::atomic<intptr_t> callbacks_outstanding_{3};
- std::atomic_bool started_{false};
- grpc::internal::Mutex start_mu_;
-};
-
-template <class Request, class Response>
-class ClientCallbackReaderWriterFactory {
- public:
- static void Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- ClientBidiReactor<Request, Response>* reactor) {
- grpc::internal::Call call =
- channel->CreateCall(method, context, channel->CallbackCQ());
-
- ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientCallbackReaderWriterImpl<Request, Response>)))
- ClientCallbackReaderWriterImpl<Request, Response>(call, context,
- reactor);
- }
-};
-
-template <class Response>
-class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderImpl));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- // This call initiates two batches, plus any backlog, each with a callback
- // 1. Send initial metadata (unless corked) + recv initial metadata
- // 2. Any backlog
- // 3. Recv trailing metadata
-
- start_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadInitialMetadataDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &start_ops_, /*can_inline=*/false);
- start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- start_ops_.RecvInitialMetadata(context_);
- start_ops_.set_core_cq_tag(&start_tag_);
- call_.PerformOps(&start_ops_);
-
- // Also set up the read tag so it doesn't have to be set up each time
- read_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &read_ops_, /*can_inline=*/false);
- read_ops_.set_core_cq_tag(&read_tag_);
-
- {
- grpc::internal::MutexLock lock(&start_mu_);
- if (backlog_.read_ops) {
- call_.PerformOps(&read_ops_);
- }
- started_.store(true, std::memory_order_release);
- }
-
- finish_tag_.Set(
- call_.call(),
- [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
- &finish_ops_, /*can_inline=*/false);
- finish_ops_.ClientRecvStatus(context_, &finish_status_);
- finish_ops_.set_core_cq_tag(&finish_tag_);
- call_.PerformOps(&finish_ops_);
- }
-
- void Read(Response* msg) override {
- read_ops_.RecvMessage(msg);
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.read_ops = true;
- return;
- }
- }
- call_.PerformOps(&read_ops_);
- }
-
- void AddHold(int holds) override {
- callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
- }
- void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
-
- private:
- friend class ClientCallbackReaderFactory<Response>;
-
- template <class Request>
- ClientCallbackReaderImpl(::grpc::internal::Call call,
- ::grpc::ClientContext* context, Request* request,
- ClientReadReactor<Response>* reactor)
- : context_(context), call_(call), reactor_(reactor) {
- this->BindReactor(reactor);
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok());
- start_ops_.ClientSendClose();
- }
-
- // MaybeFinish behaves as in ClientCallbackReaderWriterImpl.
- void MaybeFinish(bool from_reaction) {
- if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1)) {
- ::grpc::Status s = std::move(finish_status_);
- auto* reactor = reactor_;
- auto* call = call_.call();
- this->~ClientCallbackReaderImpl();
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- if (GPR_LIKELY(from_reaction)) {
- reactor->OnDone(s);
- } else {
- reactor->InternalScheduleOnDone(std::move(s));
- }
- }
- }
-
- ::grpc::ClientContext* const context_;
- grpc::internal::Call call_;
- ClientReadReactor<Response>* const reactor_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpClientSendClose,
- grpc::internal::CallOpRecvInitialMetadata>
- start_ops_;
- grpc::internal::CallbackWithSuccessTag start_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
- grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::Status finish_status_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
- read_ops_;
- grpc::internal::CallbackWithSuccessTag read_tag_;
-
- struct StartCallBacklog {
- bool read_ops = false;
- };
- StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
-
- // Minimum of 2 callbacks to pre-register for start and finish
- std::atomic<intptr_t> callbacks_outstanding_{2};
- std::atomic_bool started_{false};
- grpc::internal::Mutex start_mu_;
-};
-
-template <class Response>
-class ClientCallbackReaderFactory {
- public:
- template <class Request>
- static void Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, const Request* request,
- ClientReadReactor<Response>* reactor) {
- grpc::internal::Call call =
- channel->CreateCall(method, context, channel->CallbackCQ());
-
- ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientCallbackReaderImpl<Response>)))
- ClientCallbackReaderImpl<Response>(call, context, request, reactor);
- }
-};
-
-template <class Request>
-class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackWriterImpl));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- // This call initiates two batches, plus any backlog, each with a callback
- // 1. Send initial metadata (unless corked) + recv initial metadata
- // 2. Any backlog
- // 3. Recv trailing metadata
-
- if (!start_corked_) {
- start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- }
- call_.PerformOps(&start_ops_);
-
- {
- grpc::internal::MutexLock lock(&start_mu_);
-
- if (backlog_.write_ops) {
- call_.PerformOps(&write_ops_);
- }
- if (backlog_.writes_done_ops) {
- call_.PerformOps(&writes_done_ops_);
- }
- call_.PerformOps(&finish_ops_);
- // The last thing in this critical section is to set started_ so that it
- // can be used lock-free as well.
- started_.store(true, std::memory_order_release);
- }
- // MaybeFinish outside the lock to make sure that destruction of this object
- // doesn't take place while holding the lock (which would cause the lock to
- // be released after destruction)
- this->MaybeFinish(/*from_reaction=*/false);
- }
-
- void Write(const Request* msg, ::grpc::WriteOptions options) override {
- if (GPR_UNLIKELY(options.is_last_message())) {
- options.set_buffer_hint();
- write_ops_.ClientSendClose();
- }
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-
- if (GPR_UNLIKELY(corked_write_needed_)) {
- write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- corked_write_needed_ = false;
- }
-
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.write_ops = true;
- return;
- }
- }
- call_.PerformOps(&write_ops_);
- }
-
- void WritesDone() override {
- writes_done_ops_.ClientSendClose();
- writes_done_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnWritesDoneDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &writes_done_ops_, /*can_inline=*/false);
- writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-
- if (GPR_UNLIKELY(corked_write_needed_)) {
- writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- corked_write_needed_ = false;
- }
-
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.writes_done_ops = true;
- return;
- }
- }
- call_.PerformOps(&writes_done_ops_);
- }
-
- void AddHold(int holds) override {
- callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
- }
- void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
-
- private:
- friend class ClientCallbackWriterFactory<Request>;
-
- template <class Response>
- ClientCallbackWriterImpl(::grpc::internal::Call call,
- ::grpc::ClientContext* context, Response* response,
- ClientWriteReactor<Request>* reactor)
- : context_(context),
- call_(call),
- reactor_(reactor),
- start_corked_(context_->initial_metadata_corked_),
- corked_write_needed_(start_corked_) {
- this->BindReactor(reactor);
-
- // Set up the unchanging parts of the start and write tags and ops.
- start_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadInitialMetadataDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &start_ops_, /*can_inline=*/false);
- start_ops_.RecvInitialMetadata(context_);
- start_ops_.set_core_cq_tag(&start_tag_);
-
- write_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnWriteDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &write_ops_, /*can_inline=*/false);
- write_ops_.set_core_cq_tag(&write_tag_);
-
- // Also set up the Finish tag and op set.
- finish_ops_.RecvMessage(response);
- finish_ops_.AllowNoMessage();
- finish_tag_.Set(
- call_.call(),
- [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
- &finish_ops_,
- /*can_inline=*/false);
- finish_ops_.ClientRecvStatus(context_, &finish_status_);
- finish_ops_.set_core_cq_tag(&finish_tag_);
- }
-
- // MaybeFinish behaves as in ClientCallbackReaderWriterImpl.
- void MaybeFinish(bool from_reaction) {
- if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1)) {
- ::grpc::Status s = std::move(finish_status_);
- auto* reactor = reactor_;
- auto* call = call_.call();
- this->~ClientCallbackWriterImpl();
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- if (GPR_LIKELY(from_reaction)) {
- reactor->OnDone(s);
- } else {
- reactor->InternalScheduleOnDone(std::move(s));
- }
- }
- }
-
- ::grpc::ClientContext* const context_;
- grpc::internal::Call call_;
- ClientWriteReactor<Request>* const reactor_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpRecvInitialMetadata>
- start_ops_;
- grpc::internal::CallbackWithSuccessTag start_tag_;
- const bool start_corked_;
- bool corked_write_needed_; // no lock needed since only accessed in
- // Write/WritesDone which cannot be concurrent
-
- grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
- grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
- grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::Status finish_status_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpClientSendClose>
- write_ops_;
- grpc::internal::CallbackWithSuccessTag write_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpClientSendClose>
- writes_done_ops_;
- grpc::internal::CallbackWithSuccessTag writes_done_tag_;
-
- struct StartCallBacklog {
- bool write_ops = false;
- bool writes_done_ops = false;
- };
- StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
-
- // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
- std::atomic<intptr_t> callbacks_outstanding_{3};
- std::atomic_bool started_{false};
- grpc::internal::Mutex start_mu_;
-};
-
-template <class Request>
-class ClientCallbackWriterFactory {
- public:
- template <class Response>
- static void Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, Response* response,
- ClientWriteReactor<Request>* reactor) {
- grpc::internal::Call call =
- channel->CreateCall(method, context, channel->CallbackCQ());
-
- ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientCallbackWriterImpl<Request>)))
- ClientCallbackWriterImpl<Request>(call, context, response, reactor);
- }
-};
-
-class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackUnaryImpl));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- // This call initiates two batches, each with a callback
- // 1. Send initial metadata + write + writes done + recv initial metadata
- // 2. Read message, recv trailing metadata
-
- start_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadInitialMetadataDone(ok);
- MaybeFinish();
- },
- &start_ops_, /*can_inline=*/false);
- start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- start_ops_.RecvInitialMetadata(context_);
- start_ops_.set_core_cq_tag(&start_tag_);
- call_.PerformOps(&start_ops_);
-
- finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
- &finish_ops_,
- /*can_inline=*/false);
- finish_ops_.ClientRecvStatus(context_, &finish_status_);
- finish_ops_.set_core_cq_tag(&finish_tag_);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class ClientCallbackUnaryFactory;
-
- template <class Request, class Response>
- ClientCallbackUnaryImpl(::grpc::internal::Call call,
- ::grpc::ClientContext* context, Request* request,
- Response* response, ClientUnaryReactor* reactor)
- : context_(context), call_(call), reactor_(reactor) {
- this->BindReactor(reactor);
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok());
- start_ops_.ClientSendClose();
- finish_ops_.RecvMessage(response);
- finish_ops_.AllowNoMessage();
- }
-
- // In the unary case, MaybeFinish is only ever invoked from a
- // library-initiated reaction, so it will just directly call OnDone if this is
- // the last reaction for this RPC.
- void MaybeFinish() {
- if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1)) {
- ::grpc::Status s = std::move(finish_status_);
- auto* reactor = reactor_;
- auto* call = call_.call();
- this->~ClientCallbackUnaryImpl();
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- reactor->OnDone(s);
- }
- }
-
- ::grpc::ClientContext* const context_;
- grpc::internal::Call call_;
- ClientUnaryReactor* const reactor_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpClientSendClose,
- grpc::internal::CallOpRecvInitialMetadata>
- start_ops_;
- grpc::internal::CallbackWithSuccessTag start_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
- grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
- grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::Status finish_status_;
-
- // This call will have 2 callbacks: start and finish
- std::atomic<intptr_t> callbacks_outstanding_{2};
-};
-
-class ClientCallbackUnaryFactory {
- public:
- template <class Request, class Response>
- static void Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, const Request* request,
- Response* response, ClientUnaryReactor* reactor) {
- grpc::internal::Call call =
- channel->CreateCall(method, context, channel->CallbackCQ());
-
- ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
-
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientCallbackUnaryImpl)))
- ClientCallbackUnaryImpl(call, context, request, response, reactor);
- }
-};
-
-} // namespace internal
-
-// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
+class Channel;
+class ClientContext;
+
+namespace internal {
+class RpcMethod;
+
+/// Perform a callback-based unary call
+/// TODO(vjpai): Combine as much as possible with the blocking unary call code
+template <class InputMessage, class OutputMessage>
+void CallbackUnaryCall(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ const InputMessage* request, OutputMessage* result,
+ std::function<void(::grpc::Status)> on_completion) {
+ CallbackUnaryCallImpl<InputMessage, OutputMessage> x(
+ channel, method, context, request, result, on_completion);
+}
+
+template <class InputMessage, class OutputMessage>
+class CallbackUnaryCallImpl {
+ public:
+ CallbackUnaryCallImpl(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ const InputMessage* request, OutputMessage* result,
+ std::function<void(::grpc::Status)> on_completion) {
+ ::grpc::CompletionQueue* cq = channel->CallbackCQ();
+ GPR_CODEGEN_ASSERT(cq != nullptr);
+ grpc::internal::Call call(channel->CreateCall(method, context, cq));
+
+ using FullCallOpSet = grpc::internal::CallOpSet<
+ ::grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpRecvInitialMetadata,
+ grpc::internal::CallOpRecvMessage<OutputMessage>,
+ grpc::internal::CallOpClientSendClose,
+ grpc::internal::CallOpClientRecvStatus>;
+
+ struct OpSetAndTag {
+ FullCallOpSet opset;
+ grpc::internal::CallbackWithStatusTag tag;
+ };
+ const size_t alloc_sz = sizeof(OpSetAndTag);
+ auto* const alloced = static_cast<OpSetAndTag*>(
+ ::grpc::g_core_codegen_interface->grpc_call_arena_alloc(call.call(),
+ alloc_sz));
+ auto* ops = new (&alloced->opset) FullCallOpSet;
+ auto* tag = new (&alloced->tag)
+ grpc::internal::CallbackWithStatusTag(call.call(), on_completion, ops);
+
+ // TODO(vjpai): Unify code with sync API as much as possible
+ ::grpc::Status s = ops->SendMessagePtr(request);
+ if (!s.ok()) {
+ tag->force_run(s);
+ return;
+ }
+ ops->SendInitialMetadata(&context->send_initial_metadata_,
+ context->initial_metadata_flags());
+ ops->RecvInitialMetadata(context);
+ ops->RecvMessage(result);
+ ops->AllowNoMessage();
+ ops->ClientSendClose();
+ ops->ClientRecvStatus(context, tag->status_ptr());
+ ops->set_core_cq_tag(tag);
+ call.PerformOps(ops);
+ }
+};
+
+// Base class for public API classes.
+class ClientReactor {
+ public:
+ /// Called by the library when all operations associated with this RPC have
+ /// completed and all Holds have been removed. OnDone provides the RPC status
+ /// outcome for both successful and failed RPCs. If it is never called on an
+ /// RPC, it indicates an application-level problem (like failure to remove a
+ /// hold).
+ ///
+ /// \param[in] s The status outcome of this RPC
+ virtual void OnDone(const ::grpc::Status& /*s*/) = 0;
+
+ /// InternalScheduleOnDone is not part of the API and is not meant to be
+ /// overridden. It is virtual to allow successful builds for certain bazel
+ /// build users that only want to depend on gRPC codegen headers and not the
+ /// full library (although this is not a generally-supported option). Although
+ /// the virtual call is slower than a direct call, this function is
+ /// heavyweight and the cost of the virtual call is not much in comparison.
+ /// This function may be removed or devirtualized in the future.
+ virtual void InternalScheduleOnDone(::grpc::Status s);
+};
+
+} // namespace internal
+
+// Forward declarations
+template <class Request, class Response>
+class ClientBidiReactor;
+template <class Response>
+class ClientReadReactor;
+template <class Request>
+class ClientWriteReactor;
+class ClientUnaryReactor;
+
+// NOTE: The streaming objects are not actually implemented in the public API.
+// These interfaces are provided for mocking only. Typical applications
+// will interact exclusively with the reactors that they define.
+template <class Request, class Response>
+class ClientCallbackReaderWriter {
+ public:
+ virtual ~ClientCallbackReaderWriter() {}
+ virtual void StartCall() = 0;
+ virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0;
+ virtual void WritesDone() = 0;
+ virtual void Read(Response* resp) = 0;
+ virtual void AddHold(int holds) = 0;
+ virtual void RemoveHold() = 0;
+
+ protected:
+ void BindReactor(ClientBidiReactor<Request, Response>* reactor) {
+ reactor->BindStream(this);
+ }
+};
+
+template <class Response>
+class ClientCallbackReader {
+ public:
+ virtual ~ClientCallbackReader() {}
+ virtual void StartCall() = 0;
+ virtual void Read(Response* resp) = 0;
+ virtual void AddHold(int holds) = 0;
+ virtual void RemoveHold() = 0;
+
+ protected:
+ void BindReactor(ClientReadReactor<Response>* reactor) {
+ reactor->BindReader(this);
+ }
+};
+
+template <class Request>
+class ClientCallbackWriter {
+ public:
+ virtual ~ClientCallbackWriter() {}
+ virtual void StartCall() = 0;
+ void Write(const Request* req) { Write(req, ::grpc::WriteOptions()); }
+ virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0;
+ void WriteLast(const Request* req, ::grpc::WriteOptions options) {
+ Write(req, options.set_last_message());
+ }
+ virtual void WritesDone() = 0;
+
+ virtual void AddHold(int holds) = 0;
+ virtual void RemoveHold() = 0;
+
+ protected:
+ void BindReactor(ClientWriteReactor<Request>* reactor) {
+ reactor->BindWriter(this);
+ }
+};
+
+class ClientCallbackUnary {
+ public:
+ virtual ~ClientCallbackUnary() {}
+ virtual void StartCall() = 0;
+
+ protected:
+ void BindReactor(ClientUnaryReactor* reactor);
+};
+
+// The following classes are the reactor interfaces that are to be implemented
+// by the user. They are passed in to the library as an argument to a call on a
+// stub (either a codegen-ed call or a generic call). The streaming RPC is
+// activated by calling StartCall, possibly after initiating StartRead,
+// StartWrite, or AddHold operations on the streaming object. Note that none of
+// the classes are pure; all reactions have a default empty reaction so that the
+// user class only needs to override those classes that it cares about.
+// The reactor must be passed to the stub invocation before any of the below
+// operations can be called.
+
+/// \a ClientBidiReactor is the interface for a bidirectional streaming RPC.
+template <class Request, class Response>
+class ClientBidiReactor : public internal::ClientReactor {
+ public:
+ virtual ~ClientBidiReactor() {}
+
+ /// Activate the RPC and initiate any reads or writes that have been Start'ed
+ /// before this call. All streaming RPCs issued by the client MUST have
+ /// StartCall invoked on them (even if they are canceled) as this call is the
+ /// activation of their lifecycle.
+ void StartCall() { stream_->StartCall(); }
+
+ /// Initiate a read operation (or post it for later initiation if StartCall
+ /// has not yet been invoked).
+ ///
+ /// \param[out] resp Where to eventually store the read message. Valid when
+ /// the library calls OnReadDone
+ void StartRead(Response* resp) { stream_->Read(resp); }
+
+ /// Initiate a write operation (or post it for later initiation if StartCall
+ /// has not yet been invoked).
+ ///
+ /// \param[in] req The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ void StartWrite(const Request* req) {
+ StartWrite(req, ::grpc::WriteOptions());
+ }
+
+ /// Initiate/post a write operation with specified options.
+ ///
+ /// \param[in] req The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ void StartWrite(const Request* req, ::grpc::WriteOptions options) {
+ stream_->Write(req, std::move(options));
+ }
+
+ /// Initiate/post a write operation with specified options and an indication
+ /// that this is the last write (like StartWrite and StartWritesDone, merged).
+ /// Note that calling this means that no more calls to StartWrite,
+ /// StartWriteLast, or StartWritesDone are allowed.
+ ///
+ /// \param[in] req The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ void StartWriteLast(const Request* req, ::grpc::WriteOptions options) {
+ StartWrite(req, std::move(options.set_last_message()));
+ }
+
+ /// Indicate that the RPC will have no more write operations. This can only be
+ /// issued once for a given RPC. This is not required or allowed if
+ /// StartWriteLast is used since that already has the same implication.
+ /// Note that calling this means that no more calls to StartWrite,
+ /// StartWriteLast, or StartWritesDone are allowed.
+ void StartWritesDone() { stream_->WritesDone(); }
+
+ /// Holds are needed if (and only if) this stream has operations that take
+ /// place on it after StartCall but from outside one of the reactions
+ /// (OnReadDone, etc). This is _not_ a common use of the streaming API.
+ ///
+ /// Holds must be added before calling StartCall. If a stream still has a hold
+ /// in place, its resources will not be destroyed even if the status has
+ /// already come in from the wire and there are currently no active callbacks
+ /// outstanding. Similarly, the stream will not call OnDone if there are still
+ /// holds on it.
+ ///
+ /// For example, if a StartRead or StartWrite operation is going to be
+ /// initiated from elsewhere in the application, the application should call
+ /// AddHold or AddMultipleHolds before StartCall. If there is going to be,
+ /// for example, a read-flow and a write-flow taking place outside the
+ /// reactions, then call AddMultipleHolds(2) before StartCall. When the
+ /// application knows that it won't issue any more read operations (such as
+ /// when a read comes back as not ok), it should issue a RemoveHold(). It
+ /// should also call RemoveHold() again after it does StartWriteLast or
+ /// StartWritesDone that indicates that there will be no more write ops.
+ /// The number of RemoveHold calls must match the total number of AddHold
+ /// calls plus the number of holds added by AddMultipleHolds.
+ /// The argument to AddMultipleHolds must be positive.
+ void AddHold() { AddMultipleHolds(1); }
+ void AddMultipleHolds(int holds) {
+ GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
+ stream_->AddHold(holds);
+ }
+ void RemoveHold() { stream_->RemoveHold(); }
+
+ /// Notifies the application that all operations associated with this RPC
+ /// have completed and all Holds have been removed. OnDone provides the RPC
+ /// status outcome for both successful and failed RPCs and will be called in
+ /// all cases. If it is not called, it indicates an application-level problem
+ /// (like failure to remove a hold).
+ ///
+ /// \param[in] s The status outcome of this RPC
+ void OnDone(const ::grpc::Status& /*s*/) override {}
+
+ /// Notifies the application that a read of initial metadata from the
+ /// server is done. If the application chooses not to implement this method,
+ /// it can assume that the initial metadata has been read before the first
+ /// call of OnReadDone or OnDone.
+ ///
+ /// \param[in] ok Was the initial metadata read successfully? If false, no
+ /// new read/write operation will succeed, and any further
+ /// Start* operations should not be called.
+ virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartRead operation completed.
+ ///
+ /// \param[in] ok Was it successful? If false, no new read/write operation
+ /// will succeed, and any further Start* should not be called.
+ virtual void OnReadDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartWrite or StartWriteLast operation
+ /// completed.
+ ///
+ /// \param[in] ok Was it successful? If false, no new read/write operation
+ /// will succeed, and any further Start* should not be called.
+ virtual void OnWriteDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartWritesDone operation completed. Note
+ /// that this is only used on explicit StartWritesDone operations and not for
+ /// those that are implicitly invoked as part of a StartWriteLast.
+ ///
+ /// \param[in] ok Was it successful? If false, the application will later see
+ /// the failure reflected as a bad status in OnDone and no
+ /// further Start* should be called.
+ virtual void OnWritesDoneDone(bool /*ok*/) {}
+
+ private:
+ friend class ClientCallbackReaderWriter<Request, Response>;
+ void BindStream(ClientCallbackReaderWriter<Request, Response>* stream) {
+ stream_ = stream;
+ }
+ ClientCallbackReaderWriter<Request, Response>* stream_;
+};
+
+/// \a ClientReadReactor is the interface for a server-streaming RPC.
+/// All public methods behave as in ClientBidiReactor.
+template <class Response>
+class ClientReadReactor : public internal::ClientReactor {
+ public:
+ virtual ~ClientReadReactor() {}
+
+ void StartCall() { reader_->StartCall(); }
+ void StartRead(Response* resp) { reader_->Read(resp); }
+
+ void AddHold() { AddMultipleHolds(1); }
+ void AddMultipleHolds(int holds) {
+ GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
+ reader_->AddHold(holds);
+ }
+ void RemoveHold() { reader_->RemoveHold(); }
+
+ void OnDone(const ::grpc::Status& /*s*/) override {}
+ virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
+ virtual void OnReadDone(bool /*ok*/) {}
+
+ private:
+ friend class ClientCallbackReader<Response>;
+ void BindReader(ClientCallbackReader<Response>* reader) { reader_ = reader; }
+ ClientCallbackReader<Response>* reader_;
+};
+
+/// \a ClientWriteReactor is the interface for a client-streaming RPC.
+/// All public methods behave as in ClientBidiReactor.
+template <class Request>
+class ClientWriteReactor : public internal::ClientReactor {
+ public:
+ virtual ~ClientWriteReactor() {}
+
+ void StartCall() { writer_->StartCall(); }
+ void StartWrite(const Request* req) {
+ StartWrite(req, ::grpc::WriteOptions());
+ }
+ void StartWrite(const Request* req, ::grpc::WriteOptions options) {
+ writer_->Write(req, std::move(options));
+ }
+ void StartWriteLast(const Request* req, ::grpc::WriteOptions options) {
+ StartWrite(req, std::move(options.set_last_message()));
+ }
+ void StartWritesDone() { writer_->WritesDone(); }
+
+ void AddHold() { AddMultipleHolds(1); }
+ void AddMultipleHolds(int holds) {
+ GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
+ writer_->AddHold(holds);
+ }
+ void RemoveHold() { writer_->RemoveHold(); }
+
+ void OnDone(const ::grpc::Status& /*s*/) override {}
+ virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
+ virtual void OnWriteDone(bool /*ok*/) {}
+ virtual void OnWritesDoneDone(bool /*ok*/) {}
+
+ private:
+ friend class ClientCallbackWriter<Request>;
+ void BindWriter(ClientCallbackWriter<Request>* writer) { writer_ = writer; }
+
+ ClientCallbackWriter<Request>* writer_;
+};
+
+/// \a ClientUnaryReactor is a reactor-style interface for a unary RPC.
+/// This is _not_ a common way of invoking a unary RPC. In practice, this
+/// option should be used only if the unary RPC wants to receive initial
+/// metadata without waiting for the response to complete. Most deployments of
+/// RPC systems do not use this option, but it is needed for generality.
+/// All public methods behave as in ClientBidiReactor.
+/// StartCall is included for consistency with the other reactor flavors: even
+/// though there are no StartRead or StartWrite operations to queue before the
+/// call (that is part of the unary call itself) and there is no reactor object
+/// being created as a result of this call, we keep a consistent 2-phase
+/// initiation API among all the reactor flavors.
+class ClientUnaryReactor : public internal::ClientReactor {
+ public:
+ virtual ~ClientUnaryReactor() {}
+
+ void StartCall() { call_->StartCall(); }
+ void OnDone(const ::grpc::Status& /*s*/) override {}
+ virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
+
+ private:
+ friend class ClientCallbackUnary;
+ void BindCall(ClientCallbackUnary* call) { call_ = call; }
+ ClientCallbackUnary* call_;
+};
+
+// Define function out-of-line from class to avoid forward declaration issue
+inline void ClientCallbackUnary::BindReactor(ClientUnaryReactor* reactor) {
+ reactor->BindCall(this);
+}
+
+namespace internal {
+
+// Forward declare factory classes for friendship
+template <class Request, class Response>
+class ClientCallbackReaderWriterFactory;
+template <class Response>
+class ClientCallbackReaderFactory;
+template <class Request>
+class ClientCallbackWriterFactory;
+
+template <class Request, class Response>
+class ClientCallbackReaderWriterImpl
+ : public ClientCallbackReaderWriter<Request, Response> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderWriterImpl));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ // This call initiates two batches, plus any backlog, each with a callback
+ // 1. Send initial metadata (unless corked) + recv initial metadata
+ // 2. Any read backlog
+ // 3. Any write backlog
+ // 4. Recv trailing metadata (unless corked)
+ if (!start_corked_) {
+ start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ }
+
+ call_.PerformOps(&start_ops_);
+
+ {
+ grpc::internal::MutexLock lock(&start_mu_);
+
+ if (backlog_.read_ops) {
+ call_.PerformOps(&read_ops_);
+ }
+ if (backlog_.write_ops) {
+ call_.PerformOps(&write_ops_);
+ }
+ if (backlog_.writes_done_ops) {
+ call_.PerformOps(&writes_done_ops_);
+ }
+ call_.PerformOps(&finish_ops_);
+ // The last thing in this critical section is to set started_ so that it
+ // can be used lock-free as well.
+ started_.store(true, std::memory_order_release);
+ }
+ // MaybeFinish outside the lock to make sure that destruction of this object
+ // doesn't take place while holding the lock (which would cause the lock to
+ // be released after destruction)
+ this->MaybeFinish(/*from_reaction=*/false);
+ }
+
+ void Read(Response* msg) override {
+ read_ops_.RecvMessage(msg);
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.read_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&read_ops_);
+ }
+
+ void Write(const Request* msg, ::grpc::WriteOptions options) override {
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ write_ops_.ClientSendClose();
+ }
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+ if (GPR_UNLIKELY(corked_write_needed_)) {
+ write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ corked_write_needed_ = false;
+ }
+
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.write_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&write_ops_);
+ }
+ void WritesDone() override {
+ writes_done_ops_.ClientSendClose();
+ writes_done_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnWritesDoneDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &writes_done_ops_, /*can_inline=*/false);
+ writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+ if (GPR_UNLIKELY(corked_write_needed_)) {
+ writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ corked_write_needed_ = false;
+ }
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.writes_done_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&writes_done_ops_);
+ }
+
+ void AddHold(int holds) override {
+ callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
+ }
+ void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
+
+ private:
+ friend class ClientCallbackReaderWriterFactory<Request, Response>;
+
+ ClientCallbackReaderWriterImpl(grpc::internal::Call call,
+ ::grpc::ClientContext* context,
+ ClientBidiReactor<Request, Response>* reactor)
+ : context_(context),
+ call_(call),
+ reactor_(reactor),
+ start_corked_(context_->initial_metadata_corked_),
+ corked_write_needed_(start_corked_) {
+ this->BindReactor(reactor);
+
+ // Set up the unchanging parts of the start, read, and write tags and ops.
+ start_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadInitialMetadataDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &start_ops_, /*can_inline=*/false);
+ start_ops_.RecvInitialMetadata(context_);
+ start_ops_.set_core_cq_tag(&start_tag_);
+
+ write_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnWriteDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &write_ops_, /*can_inline=*/false);
+ write_ops_.set_core_cq_tag(&write_tag_);
+
+ read_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &read_ops_, /*can_inline=*/false);
+ read_ops_.set_core_cq_tag(&read_tag_);
+
+ // Also set up the Finish tag and op set.
+ finish_tag_.Set(
+ call_.call(),
+ [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
+ &finish_ops_,
+ /*can_inline=*/false);
+ finish_ops_.ClientRecvStatus(context_, &finish_status_);
+ finish_ops_.set_core_cq_tag(&finish_tag_);
+ }
+
+ // MaybeFinish can be called from reactions or from user-initiated operations
+ // like StartCall or RemoveHold. If this is the last operation or hold on this
+ // object, it will invoke the OnDone reaction. If MaybeFinish was called from
+ // a reaction, it can call OnDone directly. If not, it would need to schedule
+ // OnDone onto an executor thread to avoid the possibility of deadlocking with
+ // any locks in the user code that invoked it.
+ void MaybeFinish(bool from_reaction) {
+ if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ ::grpc::Status s = std::move(finish_status_);
+ auto* reactor = reactor_;
+ auto* call = call_.call();
+ this->~ClientCallbackReaderWriterImpl();
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ if (GPR_LIKELY(from_reaction)) {
+ reactor->OnDone(s);
+ } else {
+ reactor->InternalScheduleOnDone(std::move(s));
+ }
+ }
+ }
+
+ ::grpc::ClientContext* const context_;
+ grpc::internal::Call call_;
+ ClientBidiReactor<Request, Response>* const reactor_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpRecvInitialMetadata>
+ start_ops_;
+ grpc::internal::CallbackWithSuccessTag start_tag_;
+ const bool start_corked_;
+ bool corked_write_needed_; // no lock needed since only accessed in
+ // Write/WritesDone which cannot be concurrent
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
+ grpc::internal::CallbackWithSuccessTag finish_tag_;
+ ::grpc::Status finish_status_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpClientSendClose>
+ write_ops_;
+ grpc::internal::CallbackWithSuccessTag write_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpClientSendClose>
+ writes_done_ops_;
+ grpc::internal::CallbackWithSuccessTag writes_done_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
+ read_ops_;
+ grpc::internal::CallbackWithSuccessTag read_tag_;
+
+ struct StartCallBacklog {
+ bool write_ops = false;
+ bool writes_done_ops = false;
+ bool read_ops = false;
+ };
+ StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
+
+ // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
+ std::atomic<intptr_t> callbacks_outstanding_{3};
+ std::atomic_bool started_{false};
+ grpc::internal::Mutex start_mu_;
+};
+
+template <class Request, class Response>
+class ClientCallbackReaderWriterFactory {
+ public:
+ static void Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ ClientBidiReactor<Request, Response>* reactor) {
+ grpc::internal::Call call =
+ channel->CreateCall(method, context, channel->CallbackCQ());
+
+ ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientCallbackReaderWriterImpl<Request, Response>)))
+ ClientCallbackReaderWriterImpl<Request, Response>(call, context,
+ reactor);
+ }
+};
+
+template <class Response>
+class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderImpl));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ // This call initiates two batches, plus any backlog, each with a callback
+ // 1. Send initial metadata (unless corked) + recv initial metadata
+ // 2. Any backlog
+ // 3. Recv trailing metadata
+
+ start_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadInitialMetadataDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &start_ops_, /*can_inline=*/false);
+ start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ start_ops_.RecvInitialMetadata(context_);
+ start_ops_.set_core_cq_tag(&start_tag_);
+ call_.PerformOps(&start_ops_);
+
+ // Also set up the read tag so it doesn't have to be set up each time
+ read_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &read_ops_, /*can_inline=*/false);
+ read_ops_.set_core_cq_tag(&read_tag_);
+
+ {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (backlog_.read_ops) {
+ call_.PerformOps(&read_ops_);
+ }
+ started_.store(true, std::memory_order_release);
+ }
+
+ finish_tag_.Set(
+ call_.call(),
+ [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
+ &finish_ops_, /*can_inline=*/false);
+ finish_ops_.ClientRecvStatus(context_, &finish_status_);
+ finish_ops_.set_core_cq_tag(&finish_tag_);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ void Read(Response* msg) override {
+ read_ops_.RecvMessage(msg);
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.read_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&read_ops_);
+ }
+
+ void AddHold(int holds) override {
+ callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
+ }
+ void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
+
+ private:
+ friend class ClientCallbackReaderFactory<Response>;
+
+ template <class Request>
+ ClientCallbackReaderImpl(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, Request* request,
+ ClientReadReactor<Response>* reactor)
+ : context_(context), call_(call), reactor_(reactor) {
+ this->BindReactor(reactor);
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok());
+ start_ops_.ClientSendClose();
+ }
+
+ // MaybeFinish behaves as in ClientCallbackReaderWriterImpl.
+ void MaybeFinish(bool from_reaction) {
+ if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ ::grpc::Status s = std::move(finish_status_);
+ auto* reactor = reactor_;
+ auto* call = call_.call();
+ this->~ClientCallbackReaderImpl();
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ if (GPR_LIKELY(from_reaction)) {
+ reactor->OnDone(s);
+ } else {
+ reactor->InternalScheduleOnDone(std::move(s));
+ }
+ }
+ }
+
+ ::grpc::ClientContext* const context_;
+ grpc::internal::Call call_;
+ ClientReadReactor<Response>* const reactor_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpClientSendClose,
+ grpc::internal::CallOpRecvInitialMetadata>
+ start_ops_;
+ grpc::internal::CallbackWithSuccessTag start_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
+ grpc::internal::CallbackWithSuccessTag finish_tag_;
+ ::grpc::Status finish_status_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
+ read_ops_;
+ grpc::internal::CallbackWithSuccessTag read_tag_;
+
+ struct StartCallBacklog {
+ bool read_ops = false;
+ };
+ StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
+
+ // Minimum of 2 callbacks to pre-register for start and finish
+ std::atomic<intptr_t> callbacks_outstanding_{2};
+ std::atomic_bool started_{false};
+ grpc::internal::Mutex start_mu_;
+};
+
+template <class Response>
+class ClientCallbackReaderFactory {
+ public:
+ template <class Request>
+ static void Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, const Request* request,
+ ClientReadReactor<Response>* reactor) {
+ grpc::internal::Call call =
+ channel->CreateCall(method, context, channel->CallbackCQ());
+
+ ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientCallbackReaderImpl<Response>)))
+ ClientCallbackReaderImpl<Response>(call, context, request, reactor);
+ }
+};
+
+template <class Request>
+class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackWriterImpl));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ // This call initiates two batches, plus any backlog, each with a callback
+ // 1. Send initial metadata (unless corked) + recv initial metadata
+ // 2. Any backlog
+ // 3. Recv trailing metadata
+
+ if (!start_corked_) {
+ start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ }
+ call_.PerformOps(&start_ops_);
+
+ {
+ grpc::internal::MutexLock lock(&start_mu_);
+
+ if (backlog_.write_ops) {
+ call_.PerformOps(&write_ops_);
+ }
+ if (backlog_.writes_done_ops) {
+ call_.PerformOps(&writes_done_ops_);
+ }
+ call_.PerformOps(&finish_ops_);
+ // The last thing in this critical section is to set started_ so that it
+ // can be used lock-free as well.
+ started_.store(true, std::memory_order_release);
+ }
+ // MaybeFinish outside the lock to make sure that destruction of this object
+ // doesn't take place while holding the lock (which would cause the lock to
+ // be released after destruction)
+ this->MaybeFinish(/*from_reaction=*/false);
+ }
+
+ void Write(const Request* msg, ::grpc::WriteOptions options) override {
+ if (GPR_UNLIKELY(options.is_last_message())) {
+ options.set_buffer_hint();
+ write_ops_.ClientSendClose();
+ }
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+
+ if (GPR_UNLIKELY(corked_write_needed_)) {
+ write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ corked_write_needed_ = false;
+ }
+
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.write_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&write_ops_);
+ }
+
+ void WritesDone() override {
+ writes_done_ops_.ClientSendClose();
+ writes_done_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnWritesDoneDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &writes_done_ops_, /*can_inline=*/false);
+ writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+
+ if (GPR_UNLIKELY(corked_write_needed_)) {
+ writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ corked_write_needed_ = false;
+ }
+
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.writes_done_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&writes_done_ops_);
+ }
+
+ void AddHold(int holds) override {
+ callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
+ }
+ void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
+
+ private:
+ friend class ClientCallbackWriterFactory<Request>;
+
+ template <class Response>
+ ClientCallbackWriterImpl(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, Response* response,
+ ClientWriteReactor<Request>* reactor)
+ : context_(context),
+ call_(call),
+ reactor_(reactor),
+ start_corked_(context_->initial_metadata_corked_),
+ corked_write_needed_(start_corked_) {
+ this->BindReactor(reactor);
+
+ // Set up the unchanging parts of the start and write tags and ops.
+ start_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadInitialMetadataDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &start_ops_, /*can_inline=*/false);
+ start_ops_.RecvInitialMetadata(context_);
+ start_ops_.set_core_cq_tag(&start_tag_);
+
+ write_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnWriteDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &write_ops_, /*can_inline=*/false);
+ write_ops_.set_core_cq_tag(&write_tag_);
+
+ // Also set up the Finish tag and op set.
+ finish_ops_.RecvMessage(response);
+ finish_ops_.AllowNoMessage();
+ finish_tag_.Set(
+ call_.call(),
+ [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
+ &finish_ops_,
+ /*can_inline=*/false);
+ finish_ops_.ClientRecvStatus(context_, &finish_status_);
+ finish_ops_.set_core_cq_tag(&finish_tag_);
+ }
+
+ // MaybeFinish behaves as in ClientCallbackReaderWriterImpl.
+ void MaybeFinish(bool from_reaction) {
+ if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ ::grpc::Status s = std::move(finish_status_);
+ auto* reactor = reactor_;
+ auto* call = call_.call();
+ this->~ClientCallbackWriterImpl();
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ if (GPR_LIKELY(from_reaction)) {
+ reactor->OnDone(s);
+ } else {
+ reactor->InternalScheduleOnDone(std::move(s));
+ }
+ }
+ }
+
+ ::grpc::ClientContext* const context_;
+ grpc::internal::Call call_;
+ ClientWriteReactor<Request>* const reactor_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpRecvInitialMetadata>
+ start_ops_;
+ grpc::internal::CallbackWithSuccessTag start_tag_;
+ const bool start_corked_;
+ bool corked_write_needed_; // no lock needed since only accessed in
+ // Write/WritesDone which cannot be concurrent
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
+ grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+ grpc::internal::CallbackWithSuccessTag finish_tag_;
+ ::grpc::Status finish_status_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpClientSendClose>
+ write_ops_;
+ grpc::internal::CallbackWithSuccessTag write_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpClientSendClose>
+ writes_done_ops_;
+ grpc::internal::CallbackWithSuccessTag writes_done_tag_;
+
+ struct StartCallBacklog {
+ bool write_ops = false;
+ bool writes_done_ops = false;
+ };
+ StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
+
+ // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
+ std::atomic<intptr_t> callbacks_outstanding_{3};
+ std::atomic_bool started_{false};
+ grpc::internal::Mutex start_mu_;
+};
+
+template <class Request>
+class ClientCallbackWriterFactory {
+ public:
+ template <class Response>
+ static void Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, Response* response,
+ ClientWriteReactor<Request>* reactor) {
+ grpc::internal::Call call =
+ channel->CreateCall(method, context, channel->CallbackCQ());
+
+ ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientCallbackWriterImpl<Request>)))
+ ClientCallbackWriterImpl<Request>(call, context, response, reactor);
+ }
+};
+
+class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackUnaryImpl));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ // This call initiates two batches, each with a callback
+ // 1. Send initial metadata + write + writes done + recv initial metadata
+ // 2. Read message, recv trailing metadata
+
+ start_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadInitialMetadataDone(ok);
+ MaybeFinish();
+ },
+ &start_ops_, /*can_inline=*/false);
+ start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ start_ops_.RecvInitialMetadata(context_);
+ start_ops_.set_core_cq_tag(&start_tag_);
+ call_.PerformOps(&start_ops_);
+
+ finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
+ &finish_ops_,
+ /*can_inline=*/false);
+ finish_ops_.ClientRecvStatus(context_, &finish_status_);
+ finish_ops_.set_core_cq_tag(&finish_tag_);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class ClientCallbackUnaryFactory;
+
+ template <class Request, class Response>
+ ClientCallbackUnaryImpl(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, Request* request,
+ Response* response, ClientUnaryReactor* reactor)
+ : context_(context), call_(call), reactor_(reactor) {
+ this->BindReactor(reactor);
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok());
+ start_ops_.ClientSendClose();
+ finish_ops_.RecvMessage(response);
+ finish_ops_.AllowNoMessage();
+ }
+
+ // In the unary case, MaybeFinish is only ever invoked from a
+ // library-initiated reaction, so it will just directly call OnDone if this is
+ // the last reaction for this RPC.
+ void MaybeFinish() {
+ if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ ::grpc::Status s = std::move(finish_status_);
+ auto* reactor = reactor_;
+ auto* call = call_.call();
+ this->~ClientCallbackUnaryImpl();
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ reactor->OnDone(s);
+ }
+ }
+
+ ::grpc::ClientContext* const context_;
+ grpc::internal::Call call_;
+ ClientUnaryReactor* const reactor_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpClientSendClose,
+ grpc::internal::CallOpRecvInitialMetadata>
+ start_ops_;
+ grpc::internal::CallbackWithSuccessTag start_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
+ grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+ grpc::internal::CallbackWithSuccessTag finish_tag_;
+ ::grpc::Status finish_status_;
+
+ // This call will have 2 callbacks: start and finish
+ std::atomic<intptr_t> callbacks_outstanding_{2};
+};
+
+class ClientCallbackUnaryFactory {
+ public:
+ template <class Request, class Response>
+ static void Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, const Request* request,
+ Response* response, ClientUnaryReactor* reactor) {
+ grpc::internal::Call call =
+ channel->CreateCall(method, context, channel->CallbackCQ());
+
+ ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
+
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientCallbackUnaryImpl)))
+ ClientCallbackUnaryImpl(call, context, request, response, reactor);
+ }
+};
+
+} // namespace internal
+
+// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
namespace experimental {
template <class Response>
-using ClientCallbackReader = ::grpc::ClientCallbackReader<Response>;
+using ClientCallbackReader = ::grpc::ClientCallbackReader<Response>;
template <class Request>
-using ClientCallbackWriter = ::grpc::ClientCallbackWriter<Request>;
+using ClientCallbackWriter = ::grpc::ClientCallbackWriter<Request>;
template <class Request, class Response>
using ClientCallbackReaderWriter =
- ::grpc::ClientCallbackReaderWriter<Request, Response>;
+ ::grpc::ClientCallbackReaderWriter<Request, Response>;
template <class Response>
-using ClientReadReactor = ::grpc::ClientReadReactor<Response>;
+using ClientReadReactor = ::grpc::ClientReadReactor<Response>;
template <class Request>
-using ClientWriteReactor = ::grpc::ClientWriteReactor<Request>;
+using ClientWriteReactor = ::grpc::ClientWriteReactor<Request>;
template <class Request, class Response>
-using ClientBidiReactor = ::grpc::ClientBidiReactor<Request, Response>;
+using ClientBidiReactor = ::grpc::ClientBidiReactor<Request, Response>;
+
+typedef ::grpc::ClientUnaryReactor ClientUnaryReactor;
-typedef ::grpc::ClientUnaryReactor ClientUnaryReactor;
-
} // namespace experimental
-
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h
index 85be2853ef..a4e58f34c5 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,509 +16,509 @@
*
*/
-/// A ClientContext allows the person implementing a service client to:
-///
-/// - Add custom metadata key-value pairs that will propagated to the server
-/// side.
-/// - Control call settings such as compression and authentication.
-/// - Initial and trailing metadata coming from the server.
-/// - Get performance metrics (ie, census).
-///
-/// Context settings are only relevant to the call they are invoked with, that
-/// is to say, they aren't sticky. Some of these settings, such as the
-/// compression options, can be made persistent at channel construction time
-/// (see \a grpc::CreateCustomChannel).
-///
-/// \warning ClientContext instances should \em not be reused across rpcs.
-
+/// A ClientContext allows the person implementing a service client to:
+///
+/// - Add custom metadata key-value pairs that will propagated to the server
+/// side.
+/// - Control call settings such as compression and authentication.
+/// - Initial and trailing metadata coming from the server.
+/// - Get performance metrics (ie, census).
+///
+/// Context settings are only relevant to the call they are invoked with, that
+/// is to say, they aren't sticky. Some of these settings, such as the
+/// compression options, can be made persistent at channel construction time
+/// (see \a grpc::CreateCustomChannel).
+///
+/// \warning ClientContext instances should \em not be reused across rpcs.
+
#ifndef GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H
#define GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H
-#include <map>
-#include <memory>
-#include <util/generic/string.h>
-
-#include <grpc/impl/codegen/compression_types.h>
-#include <grpc/impl/codegen/propagation_bits.h>
-#include <grpcpp/impl/codegen/client_interceptor.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/create_auth_context.h>
-#include <grpcpp/impl/codegen/metadata_map.h>
-#include <grpcpp/impl/codegen/rpc_method.h>
-#include <grpcpp/impl/codegen/security/auth_context.h>
-#include <grpcpp/impl/codegen/slice.h>
-#include <grpcpp/impl/codegen/status.h>
-#include <grpcpp/impl/codegen/string_ref.h>
-#include <grpcpp/impl/codegen/sync.h>
-#include <grpcpp/impl/codegen/time.h>
-
-struct census_context;
-struct grpc_call;
-
+#include <map>
+#include <memory>
+#include <util/generic/string.h>
+
+#include <grpc/impl/codegen/compression_types.h>
+#include <grpc/impl/codegen/propagation_bits.h>
+#include <grpcpp/impl/codegen/client_interceptor.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/create_auth_context.h>
+#include <grpcpp/impl/codegen/metadata_map.h>
+#include <grpcpp/impl/codegen/rpc_method.h>
+#include <grpcpp/impl/codegen/security/auth_context.h>
+#include <grpcpp/impl/codegen/slice.h>
+#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/string_ref.h>
+#include <grpcpp/impl/codegen/sync.h>
+#include <grpcpp/impl/codegen/time.h>
+
+struct census_context;
+struct grpc_call;
+
namespace grpc {
-class ServerContext;
-class ServerContextBase;
-class CallbackServerContext;
-
-namespace internal {
-template <class InputMessage, class OutputMessage>
-class CallbackUnaryCallImpl;
-template <class Request, class Response>
-class ClientCallbackReaderWriterImpl;
-template <class Response>
-class ClientCallbackReaderImpl;
-template <class Request>
-class ClientCallbackWriterImpl;
-class ClientCallbackUnaryImpl;
-class ClientContextAccessor;
-} // namespace internal
-
-template <class R>
-class ClientReader;
-template <class W>
-class ClientWriter;
-template <class W, class R>
-class ClientReaderWriter;
-template <class R>
-class ClientAsyncReader;
-template <class W>
-class ClientAsyncWriter;
-template <class W, class R>
-class ClientAsyncReaderWriter;
-template <class R>
-class ClientAsyncResponseReader;
-
-namespace testing {
-class InteropClientContextInspector;
-} // namespace testing
-
-namespace internal {
-class RpcMethod;
-template <class InputMessage, class OutputMessage>
-class BlockingUnaryCallImpl;
-class CallOpClientRecvStatus;
-class CallOpRecvInitialMetadata;
-class ServerContextImpl;
-template <class InputMessage, class OutputMessage>
-class CallbackUnaryCallImpl;
-template <class Request, class Response>
-class ClientCallbackReaderWriterImpl;
-template <class Response>
-class ClientCallbackReaderImpl;
-template <class Request>
-class ClientCallbackWriterImpl;
-class ClientCallbackUnaryImpl;
-class ClientContextAccessor;
-} // namespace internal
-
-class CallCredentials;
-class Channel;
-class ChannelInterface;
-class CompletionQueue;
-
-/// Options for \a ClientContext::FromServerContext specifying which traits from
-/// the \a ServerContext to propagate (copy) from it into a new \a
-/// ClientContext.
-///
-/// \see ClientContext::FromServerContext
-class PropagationOptions {
- public:
- PropagationOptions() : propagate_(GRPC_PROPAGATE_DEFAULTS) {}
-
- PropagationOptions& enable_deadline_propagation() {
- propagate_ |= GRPC_PROPAGATE_DEADLINE;
- return *this;
- }
-
- PropagationOptions& disable_deadline_propagation() {
- propagate_ &= ~GRPC_PROPAGATE_DEADLINE;
- return *this;
- }
-
- PropagationOptions& enable_census_stats_propagation() {
- propagate_ |= GRPC_PROPAGATE_CENSUS_STATS_CONTEXT;
- return *this;
- }
-
- PropagationOptions& disable_census_stats_propagation() {
- propagate_ &= ~GRPC_PROPAGATE_CENSUS_STATS_CONTEXT;
- return *this;
- }
-
- PropagationOptions& enable_census_tracing_propagation() {
- propagate_ |= GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT;
- return *this;
- }
-
- PropagationOptions& disable_census_tracing_propagation() {
- propagate_ &= ~GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT;
- return *this;
- }
-
- PropagationOptions& enable_cancellation_propagation() {
- propagate_ |= GRPC_PROPAGATE_CANCELLATION;
- return *this;
- }
-
- PropagationOptions& disable_cancellation_propagation() {
- propagate_ &= ~GRPC_PROPAGATE_CANCELLATION;
- return *this;
- }
-
- uint32_t c_bitmask() const { return propagate_; }
-
- private:
- uint32_t propagate_;
-};
-
-/// A ClientContext allows the person implementing a service client to:
-///
-/// - Add custom metadata key-value pairs that will propagated to the server
-/// side.
-/// - Control call settings such as compression and authentication.
-/// - Initial and trailing metadata coming from the server.
-/// - Get performance metrics (ie, census).
-///
-/// Context settings are only relevant to the call they are invoked with, that
-/// is to say, they aren't sticky. Some of these settings, such as the
-/// compression options, can be made persistent at channel construction time
-/// (see \a grpc::CreateCustomChannel).
-///
-/// \warning ClientContext instances should \em not be reused across rpcs.
-/// \warning The ClientContext instance used for creating an rpc must remain
-/// alive and valid for the lifetime of the rpc.
-class ClientContext {
- public:
- ClientContext();
- ~ClientContext();
-
- /// Create a new \a ClientContext as a child of an incoming server call,
- /// according to \a options (\see PropagationOptions).
- ///
- /// \param server_context The source server context to use as the basis for
- /// constructing the client context.
- /// \param options The options controlling what to copy from the \a
- /// server_context.
- ///
- /// \return A newly constructed \a ClientContext instance based on \a
- /// server_context, with traits propagated (copied) according to \a options.
- static std::unique_ptr<ClientContext> FromServerContext(
- const grpc::ServerContext& server_context,
- PropagationOptions options = PropagationOptions());
- static std::unique_ptr<ClientContext> FromCallbackServerContext(
- const grpc::CallbackServerContext& server_context,
- PropagationOptions options = PropagationOptions());
-
- /// Add the (\a meta_key, \a meta_value) pair to the metadata associated with
- /// a client call. These are made available at the server side by the \a
- /// grpc::ServerContext::client_metadata() method.
- ///
- /// \warning This method should only be called before invoking the rpc.
- ///
- /// \param meta_key The metadata key. If \a meta_value is binary data, it must
- /// end in "-bin".
- /// \param meta_value The metadata value. If its value is binary, the key name
- /// must end in "-bin".
- ///
- /// Metadata must conform to the following format:
- /// Custom-Metadata -> Binary-Header / ASCII-Header
- /// Binary-Header -> {Header-Name "-bin" } {binary value}
- /// ASCII-Header -> Header-Name ASCII-Value
- /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
- /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
- void AddMetadata(const TString& meta_key, const TString& meta_value);
-
- /// Return a collection of initial metadata key-value pairs. Note that keys
- /// may happen more than once (ie, a \a std::multimap is returned).
- ///
- /// \warning This method should only be called after initial metadata has been
- /// received. For streaming calls, see \a
- /// ClientReaderInterface::WaitForInitialMetadata().
- ///
- /// \return A multimap of initial metadata key-value pairs from the server.
- const std::multimap<grpc::string_ref, grpc::string_ref>&
- GetServerInitialMetadata() const {
- GPR_CODEGEN_ASSERT(initial_metadata_received_);
- return *recv_initial_metadata_.map();
- }
-
- /// Return a collection of trailing metadata key-value pairs. Note that keys
- /// may happen more than once (ie, a \a std::multimap is returned).
- ///
- /// \warning This method is only callable once the stream has finished.
- ///
- /// \return A multimap of metadata trailing key-value pairs from the server.
- const std::multimap<grpc::string_ref, grpc::string_ref>&
- GetServerTrailingMetadata() const {
- // TODO(yangg) check finished
- return *trailing_metadata_.map();
- }
-
- /// Set the deadline for the client call.
- ///
- /// \warning This method should only be called before invoking the rpc.
- ///
- /// \param deadline the deadline for the client call. Units are determined by
- /// the type used. The deadline is an absolute (not relative) time.
- template <typename T>
- void set_deadline(const T& deadline) {
- grpc::TimePoint<T> deadline_tp(deadline);
- deadline_ = deadline_tp.raw_time();
- }
-
- /// EXPERIMENTAL: Indicate that this request is idempotent.
- /// By default, RPCs are assumed to <i>not</i> be idempotent.
- ///
- /// If true, the gRPC library assumes that it's safe to initiate
- /// this RPC multiple times.
- void set_idempotent(bool idempotent) { idempotent_ = idempotent; }
-
- /// EXPERIMENTAL: Set this request to be cacheable.
- /// If set, grpc is free to use the HTTP GET verb for sending the request,
- /// with the possibility of receiving a cached response.
- void set_cacheable(bool cacheable) { cacheable_ = cacheable; }
-
- /// EXPERIMENTAL: Trigger wait-for-ready or not on this request.
- /// See https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
- /// If set, if an RPC is made when a channel's connectivity state is
- /// TRANSIENT_FAILURE or CONNECTING, the call will not "fail fast",
- /// and the channel will wait until the channel is READY before making the
- /// call.
- void set_wait_for_ready(bool wait_for_ready) {
- wait_for_ready_ = wait_for_ready;
- wait_for_ready_explicitly_set_ = true;
- }
-
- /// DEPRECATED: Use set_wait_for_ready() instead.
- void set_fail_fast(bool fail_fast) { set_wait_for_ready(!fail_fast); }
-
- /// Return the deadline for the client call.
- std::chrono::system_clock::time_point deadline() const {
- return grpc::Timespec2Timepoint(deadline_);
- }
-
- /// Return a \a gpr_timespec representation of the client call's deadline.
- gpr_timespec raw_deadline() const { return deadline_; }
-
- /// Set the per call authority header (see
- /// https://tools.ietf.org/html/rfc7540#section-8.1.2.3).
- void set_authority(const TString& authority) { authority_ = authority; }
-
- /// Return the authentication context for the associated client call.
- /// It is only valid to call this during the lifetime of the client call.
- ///
- /// \see grpc::AuthContext.
- std::shared_ptr<const grpc::AuthContext> auth_context() const {
- if (auth_context_.get() == nullptr) {
- auth_context_ = grpc::CreateAuthContext(call_);
- }
- return auth_context_;
- }
-
- /// Set credentials for the client call.
- ///
- /// A credentials object encapsulates all the state needed by a client to
- /// authenticate with a server and make various assertions, e.g., about the
- /// client’s identity, role, or whether it is authorized to make a particular
- /// call.
- ///
- /// It is legal to call this only before initial metadata is sent.
- ///
- /// \see https://grpc.io/docs/guides/auth.html
- void set_credentials(const std::shared_ptr<grpc::CallCredentials>& creds);
-
- /// EXPERIMENTAL debugging API
- ///
- /// Returns the credentials for the client call. This should be used only in
- /// tests and for diagnostic purposes, and should not be used by application
- /// logic.
- std::shared_ptr<grpc::CallCredentials> credentials() { return creds_; }
-
- /// Return the compression algorithm the client call will request be used.
- /// Note that the gRPC runtime may decide to ignore this request, for example,
- /// due to resource constraints.
- grpc_compression_algorithm compression_algorithm() const {
- return compression_algorithm_;
- }
-
- /// Set \a algorithm to be the compression algorithm used for the client call.
- ///
- /// \param algorithm The compression algorithm used for the client call.
- void set_compression_algorithm(grpc_compression_algorithm algorithm);
-
- /// Flag whether the initial metadata should be \a corked
- ///
- /// If \a corked is true, then the initial metadata will be coalesced with the
- /// write of first message in the stream. As a result, any tag set for the
- /// initial metadata operation (starting a client-streaming or bidi-streaming
- /// RPC) will not actually be sent to the completion queue or delivered
- /// via Next.
- ///
- /// \param corked The flag indicating whether the initial metadata is to be
- /// corked or not.
- void set_initial_metadata_corked(bool corked) {
- initial_metadata_corked_ = corked;
- }
-
- /// Return the peer uri in a string.
- /// It is only valid to call this during the lifetime of the client call.
- ///
- /// \warning This value is never authenticated or subject to any security
- /// related code. It must not be used for any authentication related
- /// functionality. Instead, use auth_context.
- ///
- /// \return The call's peer URI.
- TString peer() const;
-
- /// Sets the census context.
- /// It is only valid to call this before the client call is created. A common
- /// place of setting census context is from within the DefaultConstructor
- /// method of GlobalCallbacks.
- void set_census_context(struct census_context* ccp) { census_context_ = ccp; }
-
- /// Returns the census context that has been set, or nullptr if not set.
- struct census_context* census_context() const {
- return census_context_;
- }
-
- /// Send a best-effort out-of-band cancel on the call associated with
- /// this client context. The call could be in any stage; e.g., if it is
- /// already finished, it may still return success.
- ///
- /// There is no guarantee the call will be cancelled.
- ///
- /// Note that TryCancel() does not change any of the tags that are pending
- /// on the completion queue. All pending tags will still be delivered
- /// (though their ok result may reflect the effect of cancellation).
- void TryCancel();
-
- /// Global Callbacks
- ///
- /// Can be set exactly once per application to install hooks whenever
- /// a client context is constructed and destructed.
- class GlobalCallbacks {
- public:
- virtual ~GlobalCallbacks() {}
- virtual void DefaultConstructor(ClientContext* context) = 0;
- virtual void Destructor(ClientContext* context) = 0;
- };
- static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
-
- /// Should be used for framework-level extensions only.
- /// Applications never need to call this method.
- grpc_call* c_call() { return call_; }
-
- /// EXPERIMENTAL debugging API
- ///
- /// if status is not ok() for an RPC, this will return a detailed string
- /// of the gRPC Core error that led to the failure. It should not be relied
- /// upon for anything other than gaining more debug data in failure cases.
- TString debug_error_string() const { return debug_error_string_; }
-
- private:
- // Disallow copy and assign.
- ClientContext(const ClientContext&);
- ClientContext& operator=(const ClientContext&);
-
- friend class ::grpc::testing::InteropClientContextInspector;
- friend class ::grpc::internal::CallOpClientRecvStatus;
- friend class ::grpc::internal::CallOpRecvInitialMetadata;
- friend class ::grpc::Channel;
- template <class R>
- friend class ::grpc::ClientReader;
- template <class W>
- friend class ::grpc::ClientWriter;
- template <class W, class R>
- friend class ::grpc::ClientReaderWriter;
- template <class R>
- friend class ::grpc::ClientAsyncReader;
- template <class W>
- friend class ::grpc::ClientAsyncWriter;
- template <class W, class R>
- friend class ::grpc::ClientAsyncReaderWriter;
- template <class R>
- friend class ::grpc::ClientAsyncResponseReader;
- template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::BlockingUnaryCallImpl;
- template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::CallbackUnaryCallImpl;
- template <class Request, class Response>
- friend class ::grpc::internal::ClientCallbackReaderWriterImpl;
- template <class Response>
- friend class ::grpc::internal::ClientCallbackReaderImpl;
- template <class Request>
- friend class ::grpc::internal::ClientCallbackWriterImpl;
- friend class ::grpc::internal::ClientCallbackUnaryImpl;
- friend class ::grpc::internal::ClientContextAccessor;
-
- // Used by friend class CallOpClientRecvStatus
- void set_debug_error_string(const TString& debug_error_string) {
- debug_error_string_ = debug_error_string;
- }
-
- grpc_call* call() const { return call_; }
- void set_call(grpc_call* call,
- const std::shared_ptr<::grpc::Channel>& channel);
-
- grpc::experimental::ClientRpcInfo* set_client_rpc_info(
- const char* method, grpc::internal::RpcMethod::RpcType type,
- grpc::ChannelInterface* channel,
- const std::vector<std::unique_ptr<
- grpc::experimental::ClientInterceptorFactoryInterface>>& creators,
- size_t interceptor_pos) {
- rpc_info_ = grpc::experimental::ClientRpcInfo(this, type, method, channel);
- rpc_info_.RegisterInterceptors(creators, interceptor_pos);
- return &rpc_info_;
- }
-
- uint32_t initial_metadata_flags() const {
- return (idempotent_ ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST : 0) |
- (wait_for_ready_ ? GRPC_INITIAL_METADATA_WAIT_FOR_READY : 0) |
- (cacheable_ ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST : 0) |
- (wait_for_ready_explicitly_set_
- ? GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
- : 0) |
- (initial_metadata_corked_ ? GRPC_INITIAL_METADATA_CORKED : 0);
- }
-
- TString authority() { return authority_; }
-
- void SendCancelToInterceptors();
-
- static std::unique_ptr<ClientContext> FromInternalServerContext(
- const grpc::ServerContextBase& server_context,
- PropagationOptions options);
-
- bool initial_metadata_received_;
- bool wait_for_ready_;
- bool wait_for_ready_explicitly_set_;
- bool idempotent_;
- bool cacheable_;
- std::shared_ptr<::grpc::Channel> channel_;
- grpc::internal::Mutex mu_;
- grpc_call* call_;
- bool call_canceled_;
- gpr_timespec deadline_;
- grpc::string authority_;
- std::shared_ptr<grpc::CallCredentials> creds_;
- mutable std::shared_ptr<const grpc::AuthContext> auth_context_;
- struct census_context* census_context_;
- std::multimap<TString, TString> send_initial_metadata_;
- mutable grpc::internal::MetadataMap recv_initial_metadata_;
- mutable grpc::internal::MetadataMap trailing_metadata_;
-
- grpc_call* propagate_from_call_;
- PropagationOptions propagation_options_;
-
- grpc_compression_algorithm compression_algorithm_;
- bool initial_metadata_corked_;
-
- TString debug_error_string_;
-
- grpc::experimental::ClientRpcInfo rpc_info_;
-};
-
+class ServerContext;
+class ServerContextBase;
+class CallbackServerContext;
+
+namespace internal {
+template <class InputMessage, class OutputMessage>
+class CallbackUnaryCallImpl;
+template <class Request, class Response>
+class ClientCallbackReaderWriterImpl;
+template <class Response>
+class ClientCallbackReaderImpl;
+template <class Request>
+class ClientCallbackWriterImpl;
+class ClientCallbackUnaryImpl;
+class ClientContextAccessor;
+} // namespace internal
+
+template <class R>
+class ClientReader;
+template <class W>
+class ClientWriter;
+template <class W, class R>
+class ClientReaderWriter;
+template <class R>
+class ClientAsyncReader;
+template <class W>
+class ClientAsyncWriter;
+template <class W, class R>
+class ClientAsyncReaderWriter;
+template <class R>
+class ClientAsyncResponseReader;
+
+namespace testing {
+class InteropClientContextInspector;
+} // namespace testing
+
+namespace internal {
+class RpcMethod;
+template <class InputMessage, class OutputMessage>
+class BlockingUnaryCallImpl;
+class CallOpClientRecvStatus;
+class CallOpRecvInitialMetadata;
+class ServerContextImpl;
+template <class InputMessage, class OutputMessage>
+class CallbackUnaryCallImpl;
+template <class Request, class Response>
+class ClientCallbackReaderWriterImpl;
+template <class Response>
+class ClientCallbackReaderImpl;
+template <class Request>
+class ClientCallbackWriterImpl;
+class ClientCallbackUnaryImpl;
+class ClientContextAccessor;
+} // namespace internal
+
+class CallCredentials;
+class Channel;
+class ChannelInterface;
+class CompletionQueue;
+
+/// Options for \a ClientContext::FromServerContext specifying which traits from
+/// the \a ServerContext to propagate (copy) from it into a new \a
+/// ClientContext.
+///
+/// \see ClientContext::FromServerContext
+class PropagationOptions {
+ public:
+ PropagationOptions() : propagate_(GRPC_PROPAGATE_DEFAULTS) {}
+
+ PropagationOptions& enable_deadline_propagation() {
+ propagate_ |= GRPC_PROPAGATE_DEADLINE;
+ return *this;
+ }
+
+ PropagationOptions& disable_deadline_propagation() {
+ propagate_ &= ~GRPC_PROPAGATE_DEADLINE;
+ return *this;
+ }
+
+ PropagationOptions& enable_census_stats_propagation() {
+ propagate_ |= GRPC_PROPAGATE_CENSUS_STATS_CONTEXT;
+ return *this;
+ }
+
+ PropagationOptions& disable_census_stats_propagation() {
+ propagate_ &= ~GRPC_PROPAGATE_CENSUS_STATS_CONTEXT;
+ return *this;
+ }
+
+ PropagationOptions& enable_census_tracing_propagation() {
+ propagate_ |= GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT;
+ return *this;
+ }
+
+ PropagationOptions& disable_census_tracing_propagation() {
+ propagate_ &= ~GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT;
+ return *this;
+ }
+
+ PropagationOptions& enable_cancellation_propagation() {
+ propagate_ |= GRPC_PROPAGATE_CANCELLATION;
+ return *this;
+ }
+
+ PropagationOptions& disable_cancellation_propagation() {
+ propagate_ &= ~GRPC_PROPAGATE_CANCELLATION;
+ return *this;
+ }
+
+ uint32_t c_bitmask() const { return propagate_; }
+
+ private:
+ uint32_t propagate_;
+};
+
+/// A ClientContext allows the person implementing a service client to:
+///
+/// - Add custom metadata key-value pairs that will propagated to the server
+/// side.
+/// - Control call settings such as compression and authentication.
+/// - Initial and trailing metadata coming from the server.
+/// - Get performance metrics (ie, census).
+///
+/// Context settings are only relevant to the call they are invoked with, that
+/// is to say, they aren't sticky. Some of these settings, such as the
+/// compression options, can be made persistent at channel construction time
+/// (see \a grpc::CreateCustomChannel).
+///
+/// \warning ClientContext instances should \em not be reused across rpcs.
+/// \warning The ClientContext instance used for creating an rpc must remain
+/// alive and valid for the lifetime of the rpc.
+class ClientContext {
+ public:
+ ClientContext();
+ ~ClientContext();
+
+ /// Create a new \a ClientContext as a child of an incoming server call,
+ /// according to \a options (\see PropagationOptions).
+ ///
+ /// \param server_context The source server context to use as the basis for
+ /// constructing the client context.
+ /// \param options The options controlling what to copy from the \a
+ /// server_context.
+ ///
+ /// \return A newly constructed \a ClientContext instance based on \a
+ /// server_context, with traits propagated (copied) according to \a options.
+ static std::unique_ptr<ClientContext> FromServerContext(
+ const grpc::ServerContext& server_context,
+ PropagationOptions options = PropagationOptions());
+ static std::unique_ptr<ClientContext> FromCallbackServerContext(
+ const grpc::CallbackServerContext& server_context,
+ PropagationOptions options = PropagationOptions());
+
+ /// Add the (\a meta_key, \a meta_value) pair to the metadata associated with
+ /// a client call. These are made available at the server side by the \a
+ /// grpc::ServerContext::client_metadata() method.
+ ///
+ /// \warning This method should only be called before invoking the rpc.
+ ///
+ /// \param meta_key The metadata key. If \a meta_value is binary data, it must
+ /// end in "-bin".
+ /// \param meta_value The metadata value. If its value is binary, the key name
+ /// must end in "-bin".
+ ///
+ /// Metadata must conform to the following format:
+ /// Custom-Metadata -> Binary-Header / ASCII-Header
+ /// Binary-Header -> {Header-Name "-bin" } {binary value}
+ /// ASCII-Header -> Header-Name ASCII-Value
+ /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
+ /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
+ void AddMetadata(const TString& meta_key, const TString& meta_value);
+
+ /// Return a collection of initial metadata key-value pairs. Note that keys
+ /// may happen more than once (ie, a \a std::multimap is returned).
+ ///
+ /// \warning This method should only be called after initial metadata has been
+ /// received. For streaming calls, see \a
+ /// ClientReaderInterface::WaitForInitialMetadata().
+ ///
+ /// \return A multimap of initial metadata key-value pairs from the server.
+ const std::multimap<grpc::string_ref, grpc::string_ref>&
+ GetServerInitialMetadata() const {
+ GPR_CODEGEN_ASSERT(initial_metadata_received_);
+ return *recv_initial_metadata_.map();
+ }
+
+ /// Return a collection of trailing metadata key-value pairs. Note that keys
+ /// may happen more than once (ie, a \a std::multimap is returned).
+ ///
+ /// \warning This method is only callable once the stream has finished.
+ ///
+ /// \return A multimap of metadata trailing key-value pairs from the server.
+ const std::multimap<grpc::string_ref, grpc::string_ref>&
+ GetServerTrailingMetadata() const {
+ // TODO(yangg) check finished
+ return *trailing_metadata_.map();
+ }
+
+ /// Set the deadline for the client call.
+ ///
+ /// \warning This method should only be called before invoking the rpc.
+ ///
+ /// \param deadline the deadline for the client call. Units are determined by
+ /// the type used. The deadline is an absolute (not relative) time.
+ template <typename T>
+ void set_deadline(const T& deadline) {
+ grpc::TimePoint<T> deadline_tp(deadline);
+ deadline_ = deadline_tp.raw_time();
+ }
+
+ /// EXPERIMENTAL: Indicate that this request is idempotent.
+ /// By default, RPCs are assumed to <i>not</i> be idempotent.
+ ///
+ /// If true, the gRPC library assumes that it's safe to initiate
+ /// this RPC multiple times.
+ void set_idempotent(bool idempotent) { idempotent_ = idempotent; }
+
+ /// EXPERIMENTAL: Set this request to be cacheable.
+ /// If set, grpc is free to use the HTTP GET verb for sending the request,
+ /// with the possibility of receiving a cached response.
+ void set_cacheable(bool cacheable) { cacheable_ = cacheable; }
+
+ /// EXPERIMENTAL: Trigger wait-for-ready or not on this request.
+ /// See https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+ /// If set, if an RPC is made when a channel's connectivity state is
+ /// TRANSIENT_FAILURE or CONNECTING, the call will not "fail fast",
+ /// and the channel will wait until the channel is READY before making the
+ /// call.
+ void set_wait_for_ready(bool wait_for_ready) {
+ wait_for_ready_ = wait_for_ready;
+ wait_for_ready_explicitly_set_ = true;
+ }
+
+ /// DEPRECATED: Use set_wait_for_ready() instead.
+ void set_fail_fast(bool fail_fast) { set_wait_for_ready(!fail_fast); }
+
+ /// Return the deadline for the client call.
+ std::chrono::system_clock::time_point deadline() const {
+ return grpc::Timespec2Timepoint(deadline_);
+ }
+
+ /// Return a \a gpr_timespec representation of the client call's deadline.
+ gpr_timespec raw_deadline() const { return deadline_; }
+
+ /// Set the per call authority header (see
+ /// https://tools.ietf.org/html/rfc7540#section-8.1.2.3).
+ void set_authority(const TString& authority) { authority_ = authority; }
+
+ /// Return the authentication context for the associated client call.
+ /// It is only valid to call this during the lifetime of the client call.
+ ///
+ /// \see grpc::AuthContext.
+ std::shared_ptr<const grpc::AuthContext> auth_context() const {
+ if (auth_context_.get() == nullptr) {
+ auth_context_ = grpc::CreateAuthContext(call_);
+ }
+ return auth_context_;
+ }
+
+ /// Set credentials for the client call.
+ ///
+ /// A credentials object encapsulates all the state needed by a client to
+ /// authenticate with a server and make various assertions, e.g., about the
+ /// client’s identity, role, or whether it is authorized to make a particular
+ /// call.
+ ///
+ /// It is legal to call this only before initial metadata is sent.
+ ///
+ /// \see https://grpc.io/docs/guides/auth.html
+ void set_credentials(const std::shared_ptr<grpc::CallCredentials>& creds);
+
+ /// EXPERIMENTAL debugging API
+ ///
+ /// Returns the credentials for the client call. This should be used only in
+ /// tests and for diagnostic purposes, and should not be used by application
+ /// logic.
+ std::shared_ptr<grpc::CallCredentials> credentials() { return creds_; }
+
+ /// Return the compression algorithm the client call will request be used.
+ /// Note that the gRPC runtime may decide to ignore this request, for example,
+ /// due to resource constraints.
+ grpc_compression_algorithm compression_algorithm() const {
+ return compression_algorithm_;
+ }
+
+ /// Set \a algorithm to be the compression algorithm used for the client call.
+ ///
+ /// \param algorithm The compression algorithm used for the client call.
+ void set_compression_algorithm(grpc_compression_algorithm algorithm);
+
+ /// Flag whether the initial metadata should be \a corked
+ ///
+ /// If \a corked is true, then the initial metadata will be coalesced with the
+ /// write of first message in the stream. As a result, any tag set for the
+ /// initial metadata operation (starting a client-streaming or bidi-streaming
+ /// RPC) will not actually be sent to the completion queue or delivered
+ /// via Next.
+ ///
+ /// \param corked The flag indicating whether the initial metadata is to be
+ /// corked or not.
+ void set_initial_metadata_corked(bool corked) {
+ initial_metadata_corked_ = corked;
+ }
+
+ /// Return the peer uri in a string.
+ /// It is only valid to call this during the lifetime of the client call.
+ ///
+ /// \warning This value is never authenticated or subject to any security
+ /// related code. It must not be used for any authentication related
+ /// functionality. Instead, use auth_context.
+ ///
+ /// \return The call's peer URI.
+ TString peer() const;
+
+ /// Sets the census context.
+ /// It is only valid to call this before the client call is created. A common
+ /// place of setting census context is from within the DefaultConstructor
+ /// method of GlobalCallbacks.
+ void set_census_context(struct census_context* ccp) { census_context_ = ccp; }
+
+ /// Returns the census context that has been set, or nullptr if not set.
+ struct census_context* census_context() const {
+ return census_context_;
+ }
+
+ /// Send a best-effort out-of-band cancel on the call associated with
+ /// this client context. The call could be in any stage; e.g., if it is
+ /// already finished, it may still return success.
+ ///
+ /// There is no guarantee the call will be cancelled.
+ ///
+ /// Note that TryCancel() does not change any of the tags that are pending
+ /// on the completion queue. All pending tags will still be delivered
+ /// (though their ok result may reflect the effect of cancellation).
+ void TryCancel();
+
+ /// Global Callbacks
+ ///
+ /// Can be set exactly once per application to install hooks whenever
+ /// a client context is constructed and destructed.
+ class GlobalCallbacks {
+ public:
+ virtual ~GlobalCallbacks() {}
+ virtual void DefaultConstructor(ClientContext* context) = 0;
+ virtual void Destructor(ClientContext* context) = 0;
+ };
+ static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
+
+ /// Should be used for framework-level extensions only.
+ /// Applications never need to call this method.
+ grpc_call* c_call() { return call_; }
+
+ /// EXPERIMENTAL debugging API
+ ///
+ /// if status is not ok() for an RPC, this will return a detailed string
+ /// of the gRPC Core error that led to the failure. It should not be relied
+ /// upon for anything other than gaining more debug data in failure cases.
+ TString debug_error_string() const { return debug_error_string_; }
+
+ private:
+ // Disallow copy and assign.
+ ClientContext(const ClientContext&);
+ ClientContext& operator=(const ClientContext&);
+
+ friend class ::grpc::testing::InteropClientContextInspector;
+ friend class ::grpc::internal::CallOpClientRecvStatus;
+ friend class ::grpc::internal::CallOpRecvInitialMetadata;
+ friend class ::grpc::Channel;
+ template <class R>
+ friend class ::grpc::ClientReader;
+ template <class W>
+ friend class ::grpc::ClientWriter;
+ template <class W, class R>
+ friend class ::grpc::ClientReaderWriter;
+ template <class R>
+ friend class ::grpc::ClientAsyncReader;
+ template <class W>
+ friend class ::grpc::ClientAsyncWriter;
+ template <class W, class R>
+ friend class ::grpc::ClientAsyncReaderWriter;
+ template <class R>
+ friend class ::grpc::ClientAsyncResponseReader;
+ template <class InputMessage, class OutputMessage>
+ friend class ::grpc::internal::BlockingUnaryCallImpl;
+ template <class InputMessage, class OutputMessage>
+ friend class ::grpc::internal::CallbackUnaryCallImpl;
+ template <class Request, class Response>
+ friend class ::grpc::internal::ClientCallbackReaderWriterImpl;
+ template <class Response>
+ friend class ::grpc::internal::ClientCallbackReaderImpl;
+ template <class Request>
+ friend class ::grpc::internal::ClientCallbackWriterImpl;
+ friend class ::grpc::internal::ClientCallbackUnaryImpl;
+ friend class ::grpc::internal::ClientContextAccessor;
+
+ // Used by friend class CallOpClientRecvStatus
+ void set_debug_error_string(const TString& debug_error_string) {
+ debug_error_string_ = debug_error_string;
+ }
+
+ grpc_call* call() const { return call_; }
+ void set_call(grpc_call* call,
+ const std::shared_ptr<::grpc::Channel>& channel);
+
+ grpc::experimental::ClientRpcInfo* set_client_rpc_info(
+ const char* method, grpc::internal::RpcMethod::RpcType type,
+ grpc::ChannelInterface* channel,
+ const std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>& creators,
+ size_t interceptor_pos) {
+ rpc_info_ = grpc::experimental::ClientRpcInfo(this, type, method, channel);
+ rpc_info_.RegisterInterceptors(creators, interceptor_pos);
+ return &rpc_info_;
+ }
+
+ uint32_t initial_metadata_flags() const {
+ return (idempotent_ ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST : 0) |
+ (wait_for_ready_ ? GRPC_INITIAL_METADATA_WAIT_FOR_READY : 0) |
+ (cacheable_ ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST : 0) |
+ (wait_for_ready_explicitly_set_
+ ? GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
+ : 0) |
+ (initial_metadata_corked_ ? GRPC_INITIAL_METADATA_CORKED : 0);
+ }
+
+ TString authority() { return authority_; }
+
+ void SendCancelToInterceptors();
+
+ static std::unique_ptr<ClientContext> FromInternalServerContext(
+ const grpc::ServerContextBase& server_context,
+ PropagationOptions options);
+
+ bool initial_metadata_received_;
+ bool wait_for_ready_;
+ bool wait_for_ready_explicitly_set_;
+ bool idempotent_;
+ bool cacheable_;
+ std::shared_ptr<::grpc::Channel> channel_;
+ grpc::internal::Mutex mu_;
+ grpc_call* call_;
+ bool call_canceled_;
+ gpr_timespec deadline_;
+ grpc::string authority_;
+ std::shared_ptr<grpc::CallCredentials> creds_;
+ mutable std::shared_ptr<const grpc::AuthContext> auth_context_;
+ struct census_context* census_context_;
+ std::multimap<TString, TString> send_initial_metadata_;
+ mutable grpc::internal::MetadataMap recv_initial_metadata_;
+ mutable grpc::internal::MetadataMap trailing_metadata_;
+
+ grpc_call* propagate_from_call_;
+ PropagationOptions propagation_options_;
+
+ grpc_compression_algorithm compression_algorithm_;
+ bool initial_metadata_corked_;
+
+ TString debug_error_string_;
+
+ grpc::experimental::ClientRpcInfo rpc_info_;
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h
index 359f03560f..78be1f7597 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h
@@ -26,7 +26,7 @@
#include <grpcpp/impl/codegen/rpc_method.h>
#include <grpcpp/impl/codegen/string_ref.h>
-namespace grpc {
+namespace grpc {
class Channel;
class ClientContext;
@@ -92,7 +92,7 @@ class ClientRpcInfo {
/// Return a pointer to the underlying ClientContext structure associated
/// with the RPC to support features that apply to it
- grpc::ClientContext* client_context() { return ctx_; }
+ grpc::ClientContext* client_context() { return ctx_; }
/// Return the type of the RPC (unary or a streaming flavor)
Type type() const { return type_; }
@@ -115,8 +115,8 @@ class ClientRpcInfo {
ClientRpcInfo() = default;
// Constructor will only be called from ClientContext
- ClientRpcInfo(grpc::ClientContext* ctx, internal::RpcMethod::RpcType type,
- const char* method, grpc::ChannelInterface* channel)
+ ClientRpcInfo(grpc::ClientContext* ctx, internal::RpcMethod::RpcType type,
+ const char* method, grpc::ChannelInterface* channel)
: ctx_(ctx),
type_(static_cast<Type>(type)),
method_(method),
@@ -158,7 +158,7 @@ class ClientRpcInfo {
}
}
- grpc::ClientContext* ctx_ = nullptr;
+ grpc::ClientContext* ctx_ = nullptr;
// TODO(yashykt): make type_ const once move-assignment is deleted
Type type_{Type::UNKNOWN};
const char* method_ = nullptr;
@@ -168,7 +168,7 @@ class ClientRpcInfo {
size_t hijacked_interceptor_ = 0;
friend class internal::InterceptorBatchMethodsImpl;
- friend class grpc::ClientContext;
+ friend class grpc::ClientContext;
};
// PLEASE DO NOT USE THIS. ALWAYS PREFER PER CHANNEL INTERCEPTORS OVER A GLOBAL
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h
index 8b4afe52ca..098bb50ee2 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h
@@ -25,7 +25,7 @@
#include <grpcpp/impl/codegen/core_codegen_interface.h>
#include <grpcpp/impl/codegen/status.h>
-namespace grpc {
+namespace grpc {
class ClientContext;
namespace internal {
@@ -33,7 +33,7 @@ class RpcMethod;
/// Wrapper that performs a blocking unary call
template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
- grpc::ClientContext* context,
+ grpc::ClientContext* context,
const InputMessage& request, OutputMessage* result) {
return BlockingUnaryCallImpl<InputMessage, OutputMessage>(
channel, method, context, request, result)
@@ -44,9 +44,9 @@ template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl {
public:
BlockingUnaryCallImpl(ChannelInterface* channel, const RpcMethod& method,
- grpc::ClientContext* context,
+ grpc::ClientContext* context,
const InputMessage& request, OutputMessage* result) {
- ::grpc::CompletionQueue cq(grpc_completion_queue_attributes{
+ ::grpc::CompletionQueue cq(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
nullptr}); // Pluckable completion queue
::grpc::internal::Call call(channel->CreateCall(method, context, &cq));
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h
index 44f3a938be..ca0c77276a 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h
@@ -16,433 +16,433 @@
*
*/
-/// A completion queue implements a concurrent producer-consumer queue, with
-/// two main API-exposed methods: \a Next and \a AsyncNext. These
-/// methods are the essential component of the gRPC C++ asynchronous API.
-/// There is also a \a Shutdown method to indicate that a given completion queue
-/// will no longer have regular events. This must be called before the
-/// completion queue is destroyed.
-/// All completion queue APIs are thread-safe and may be used concurrently with
-/// any other completion queue API invocation; it is acceptable to have
-/// multiple threads calling \a Next or \a AsyncNext on the same or different
-/// completion queues, or to call these methods concurrently with a \a Shutdown
-/// elsewhere.
-/// \remark{All other API calls on completion queue should be completed before
-/// a completion queue destructor is called.}
+/// A completion queue implements a concurrent producer-consumer queue, with
+/// two main API-exposed methods: \a Next and \a AsyncNext. These
+/// methods are the essential component of the gRPC C++ asynchronous API.
+/// There is also a \a Shutdown method to indicate that a given completion queue
+/// will no longer have regular events. This must be called before the
+/// completion queue is destroyed.
+/// All completion queue APIs are thread-safe and may be used concurrently with
+/// any other completion queue API invocation; it is acceptable to have
+/// multiple threads calling \a Next or \a AsyncNext on the same or different
+/// completion queues, or to call these methods concurrently with a \a Shutdown
+/// elsewhere.
+/// \remark{All other API calls on completion queue should be completed before
+/// a completion queue destructor is called.}
#ifndef GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H
#define GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H
-#include <list>
-
-#include <grpc/impl/codegen/atm.h>
-#include <grpcpp/impl/codegen/completion_queue_tag.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/grpc_library.h>
-#include <grpcpp/impl/codegen/status.h>
-#include <grpcpp/impl/codegen/sync.h>
-#include <grpcpp/impl/codegen/time.h>
-
-struct grpc_completion_queue;
-
+#include <list>
+
+#include <grpc/impl/codegen/atm.h>
+#include <grpcpp/impl/codegen/completion_queue_tag.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/grpc_library.h>
+#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/sync.h>
+#include <grpcpp/impl/codegen/time.h>
+
+struct grpc_completion_queue;
+
namespace grpc {
-template <class R>
-class ClientReader;
-template <class W>
-class ClientWriter;
-template <class W, class R>
-class ClientReaderWriter;
-template <class R>
-class ServerReader;
-template <class W>
-class ServerWriter;
-namespace internal {
-template <class W, class R>
-class ServerReaderWriterBody;
-
-template <class ServiceType, class RequestType, class ResponseType>
-class RpcMethodHandler;
-template <class ServiceType, class RequestType, class ResponseType>
-class ClientStreamingHandler;
-template <class ServiceType, class RequestType, class ResponseType>
-class ServerStreamingHandler;
-template <class Streamer, bool WriteNeeded>
-class TemplatedBidiStreamingHandler;
-template <::grpc::StatusCode code>
-class ErrorMethodHandler;
-} // namespace internal
-
-class Channel;
-class ChannelInterface;
-class Server;
-class ServerBuilder;
-class ServerContextBase;
-class ServerInterface;
-
-namespace internal {
-class CompletionQueueTag;
-class RpcMethod;
-template <class InputMessage, class OutputMessage>
-class BlockingUnaryCallImpl;
-template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
-class CallOpSet;
-} // namespace internal
-
-extern CoreCodegenInterface* g_core_codegen_interface;
-
-/// A thin wrapper around \ref grpc_completion_queue (see \ref
-/// src/core/lib/surface/completion_queue.h).
-/// See \ref doc/cpp/perf_notes.md for notes on best practices for high
-/// performance servers.
-class CompletionQueue : private ::grpc::GrpcLibraryCodegen {
- public:
- /// Default constructor. Implicitly creates a \a grpc_completion_queue
- /// instance.
- CompletionQueue()
- : CompletionQueue(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING,
- nullptr}) {}
-
- /// Wrap \a take, taking ownership of the instance.
- ///
- /// \param take The completion queue instance to wrap. Ownership is taken.
- explicit CompletionQueue(grpc_completion_queue* take);
-
- /// Destructor. Destroys the owned wrapped completion queue / instance.
- ~CompletionQueue() {
- ::grpc::g_core_codegen_interface->grpc_completion_queue_destroy(cq_);
- }
-
- /// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT.
- enum NextStatus {
- SHUTDOWN, ///< The completion queue has been shutdown and fully-drained
- GOT_EVENT, ///< Got a new event; \a tag will be filled in with its
- ///< associated value; \a ok indicating its success.
- TIMEOUT ///< deadline was reached.
- };
-
- /// Read from the queue, blocking until an event is available or the queue is
- /// shutting down.
- ///
- /// \param tag [out] Updated to point to the read event's tag.
- /// \param ok [out] true if read a successful event, false otherwise.
- ///
- /// Note that each tag sent to the completion queue (through RPC operations
- /// or alarms) will be delivered out of the completion queue by a call to
- /// Next (or a related method), regardless of whether the operation succeeded
- /// or not. Success here means that this operation completed in the normal
- /// valid manner.
- ///
- /// Server-side RPC request: \a ok indicates that the RPC has indeed
- /// been started. If it is false, the server has been Shutdown
- /// before this particular call got matched to an incoming RPC.
- ///
- /// Client-side StartCall/RPC invocation: \a ok indicates that the RPC is
- /// going to go to the wire. If it is false, it not going to the wire. This
- /// would happen if the channel is either permanently broken or
- /// transiently broken but with the fail-fast option. (Note that async unary
- /// RPCs don't post a CQ tag at this point, nor do client-streaming
- /// or bidi-streaming RPCs that have the initial metadata corked option set.)
- ///
- /// Client-side Write, Client-side WritesDone, Server-side Write,
- /// Server-side Finish, Server-side SendInitialMetadata (which is
- /// typically included in Write or Finish when not done explicitly):
- /// \a ok means that the data/metadata/status/etc is going to go to the
- /// wire. If it is false, it not going to the wire because the call
- /// is already dead (i.e., canceled, deadline expired, other side
- /// dropped the channel, etc).
- ///
- /// Client-side Read, Server-side Read, Client-side
- /// RecvInitialMetadata (which is typically included in Read if not
- /// done explicitly): \a ok indicates whether there is a valid message
- /// that got read. If not, you know that there are certainly no more
- /// messages that can ever be read from this stream. For the client-side
- /// operations, this only happens because the call is dead. For the
- /// server-sider operation, though, this could happen because the client
- /// has done a WritesDone already.
- ///
- /// Client-side Finish: \a ok should always be true
- ///
- /// Server-side AsyncNotifyWhenDone: \a ok should always be true
- ///
- /// Alarm: \a ok is true if it expired, false if it was canceled
- ///
- /// \return true if got an event, false if the queue is fully drained and
- /// shut down.
- bool Next(void** tag, bool* ok) {
- return (AsyncNextInternal(tag, ok,
- ::grpc::g_core_codegen_interface->gpr_inf_future(
- GPR_CLOCK_REALTIME)) != SHUTDOWN);
- }
-
- /// Read from the queue, blocking up to \a deadline (or the queue's shutdown).
- /// Both \a tag and \a ok are updated upon success (if an event is available
- /// within the \a deadline). A \a tag points to an arbitrary location usually
- /// employed to uniquely identify an event.
- ///
- /// \param tag [out] Upon success, updated to point to the event's tag.
- /// \param ok [out] Upon success, true if a successful event, false otherwise
- /// See documentation for CompletionQueue::Next for explanation of ok
- /// \param deadline [in] How long to block in wait for an event.
- ///
- /// \return The type of event read.
- template <typename T>
- NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) {
- ::grpc::TimePoint<T> deadline_tp(deadline);
- return AsyncNextInternal(tag, ok, deadline_tp.raw_time());
- }
-
- /// EXPERIMENTAL
- /// First executes \a F, then reads from the queue, blocking up to
- /// \a deadline (or the queue's shutdown).
- /// Both \a tag and \a ok are updated upon success (if an event is available
- /// within the \a deadline). A \a tag points to an arbitrary location usually
- /// employed to uniquely identify an event.
- ///
- /// \param f [in] Function to execute before calling AsyncNext on this queue.
- /// \param tag [out] Upon success, updated to point to the event's tag.
- /// \param ok [out] Upon success, true if read a regular event, false
- /// otherwise.
- /// \param deadline [in] How long to block in wait for an event.
- ///
- /// \return The type of event read.
- template <typename T, typename F>
- NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) {
- CompletionQueueTLSCache cache = CompletionQueueTLSCache(this);
- f();
- if (cache.Flush(tag, ok)) {
- return GOT_EVENT;
- } else {
- return AsyncNext(tag, ok, deadline);
- }
- }
-
- /// Request the shutdown of the queue.
- ///
- /// \warning This method must be called at some point if this completion queue
- /// is accessed with Next or AsyncNext. \a Next will not return false
- /// until this method has been called and all pending tags have been drained.
- /// (Likewise for \a AsyncNext returning \a NextStatus::SHUTDOWN .)
- /// Only once either one of these methods does that (that is, once the queue
- /// has been \em drained) can an instance of this class be destroyed.
- /// Also note that applications must ensure that no work is enqueued on this
- /// completion queue after this method is called.
- void Shutdown();
-
- /// Returns a \em raw pointer to the underlying \a grpc_completion_queue
- /// instance.
- ///
- /// \warning Remember that the returned instance is owned. No transfer of
- /// owership is performed.
- grpc_completion_queue* cq() { return cq_; }
-
- protected:
- /// Private constructor of CompletionQueue only visible to friend classes
- CompletionQueue(const grpc_completion_queue_attributes& attributes) {
- cq_ = ::grpc::g_core_codegen_interface->grpc_completion_queue_create(
- ::grpc::g_core_codegen_interface->grpc_completion_queue_factory_lookup(
- &attributes),
- &attributes, NULL);
- InitialAvalanching(); // reserve this for the future shutdown
- }
-
- private:
- // Friends for access to server registration lists that enable checking and
- // logging on shutdown
- friend class ::grpc::ServerBuilder;
- friend class ::grpc::Server;
-
- // Friend synchronous wrappers so that they can access Pluck(), which is
- // a semi-private API geared towards the synchronous implementation.
- template <class R>
- friend class ::grpc::ClientReader;
- template <class W>
- friend class ::grpc::ClientWriter;
- template <class W, class R>
- friend class ::grpc::ClientReaderWriter;
- template <class R>
- friend class ::grpc::ServerReader;
- template <class W>
- friend class ::grpc::ServerWriter;
- template <class W, class R>
- friend class ::grpc::internal::ServerReaderWriterBody;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::RpcMethodHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::ClientStreamingHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::ServerStreamingHandler;
- template <class Streamer, bool WriteNeeded>
- friend class ::grpc::internal::TemplatedBidiStreamingHandler;
- template <::grpc::StatusCode code>
- friend class ::grpc::internal::ErrorMethodHandler;
- friend class ::grpc::ServerContextBase;
- friend class ::grpc::ServerInterface;
- template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::BlockingUnaryCallImpl;
-
- // Friends that need access to constructor for callback CQ
- friend class ::grpc::Channel;
-
- // For access to Register/CompleteAvalanching
- template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
- friend class ::grpc::internal::CallOpSet;
-
- /// EXPERIMENTAL
- /// Creates a Thread Local cache to store the first event
- /// On this completion queue queued from this thread. Once
- /// initialized, it must be flushed on the same thread.
- class CompletionQueueTLSCache {
- public:
- CompletionQueueTLSCache(CompletionQueue* cq);
- ~CompletionQueueTLSCache();
- bool Flush(void** tag, bool* ok);
-
- private:
- CompletionQueue* cq_;
- bool flushed_;
- };
-
- NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);
-
- /// Wraps \a grpc_completion_queue_pluck.
- /// \warning Must not be mixed with calls to \a Next.
- bool Pluck(::grpc::internal::CompletionQueueTag* tag) {
- auto deadline =
- ::grpc::g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
- while (true) {
- auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
- cq_, tag, deadline, nullptr);
- bool ok = ev.success != 0;
- void* ignored = tag;
- if (tag->FinalizeResult(&ignored, &ok)) {
- GPR_CODEGEN_ASSERT(ignored == tag);
- return ok;
- }
- }
- }
-
- /// Performs a single polling pluck on \a tag.
- /// \warning Must not be mixed with calls to \a Next.
- ///
- /// TODO: sreek - This calls tag->FinalizeResult() even if the cq_ is already
- /// shutdown. This is most likely a bug and if it is a bug, then change this
- /// implementation to simple call the other TryPluck function with a zero
- /// timeout. i.e:
- /// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
- void TryPluck(::grpc::internal::CompletionQueueTag* tag) {
- auto deadline =
- ::grpc::g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
- auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
- cq_, tag, deadline, nullptr);
- if (ev.type == GRPC_QUEUE_TIMEOUT) return;
- bool ok = ev.success != 0;
- void* ignored = tag;
- // the tag must be swallowed if using TryPluck
- GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
- }
-
- /// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if
- /// the pluck() was successful and returned the tag.
- ///
- /// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
- /// that the tag is internal not something that is returned to the user.
- void TryPluck(::grpc::internal::CompletionQueueTag* tag,
- gpr_timespec deadline) {
- auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
- cq_, tag, deadline, nullptr);
- if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {
- return;
- }
-
- bool ok = ev.success != 0;
- void* ignored = tag;
- GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
- }
-
- /// Manage state of avalanching operations : completion queue tags that
- /// trigger other completion queue operations. The underlying core completion
- /// queue should not really shutdown until all avalanching operations have
- /// been finalized. Note that we maintain the requirement that an avalanche
- /// registration must take place before CQ shutdown (which must be maintained
- /// elsehwere)
- void InitialAvalanching() {
- gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1));
- }
- void RegisterAvalanching() {
- gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
- static_cast<gpr_atm>(1));
- }
- void CompleteAvalanching() {
- if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
- static_cast<gpr_atm>(-1)) == 1) {
- ::grpc::g_core_codegen_interface->grpc_completion_queue_shutdown(cq_);
- }
- }
-
- void RegisterServer(const ::grpc::Server* server) {
- (void)server;
-#ifndef NDEBUG
- grpc::internal::MutexLock l(&server_list_mutex_);
- server_list_.push_back(server);
-#endif
- }
- void UnregisterServer(const ::grpc::Server* server) {
- (void)server;
-#ifndef NDEBUG
- grpc::internal::MutexLock l(&server_list_mutex_);
- server_list_.remove(server);
-#endif
- }
- bool ServerListEmpty() const {
-#ifndef NDEBUG
- grpc::internal::MutexLock l(&server_list_mutex_);
- return server_list_.empty();
-#endif
- return true;
- }
-
- grpc_completion_queue* cq_; // owned
-
- gpr_atm avalanches_in_flight_;
-
- // List of servers associated with this CQ. Even though this is only used with
- // NDEBUG, instantiate it in all cases since otherwise the size will be
- // inconsistent.
- mutable grpc::internal::Mutex server_list_mutex_;
- std::list<const ::grpc::Server*>
- server_list_ /* GUARDED_BY(server_list_mutex_) */;
-};
-
-/// A specific type of completion queue used by the processing of notifications
-/// by servers. Instantiated by \a ServerBuilder or Server (for health checker).
-class ServerCompletionQueue : public CompletionQueue {
- public:
- bool IsFrequentlyPolled() { return polling_type_ != GRPC_CQ_NON_LISTENING; }
-
- protected:
- /// Default constructor
- ServerCompletionQueue() : polling_type_(GRPC_CQ_DEFAULT_POLLING) {}
-
- private:
- /// \param completion_type indicates whether this is a NEXT or CALLBACK
- /// completion queue.
- /// \param polling_type Informs the GRPC library about the type of polling
- /// allowed on this completion queue. See grpc_cq_polling_type's description
- /// in grpc_types.h for more details.
- /// \param shutdown_cb is the shutdown callback used for CALLBACK api queues
- ServerCompletionQueue(grpc_cq_completion_type completion_type,
- grpc_cq_polling_type polling_type,
- grpc_experimental_completion_queue_functor* shutdown_cb)
- : CompletionQueue(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, completion_type, polling_type,
- shutdown_cb}),
- polling_type_(polling_type) {}
-
- grpc_cq_polling_type polling_type_;
- friend class ::grpc::ServerBuilder;
- friend class ::grpc::Server;
-};
-
+template <class R>
+class ClientReader;
+template <class W>
+class ClientWriter;
+template <class W, class R>
+class ClientReaderWriter;
+template <class R>
+class ServerReader;
+template <class W>
+class ServerWriter;
+namespace internal {
+template <class W, class R>
+class ServerReaderWriterBody;
+
+template <class ServiceType, class RequestType, class ResponseType>
+class RpcMethodHandler;
+template <class ServiceType, class RequestType, class ResponseType>
+class ClientStreamingHandler;
+template <class ServiceType, class RequestType, class ResponseType>
+class ServerStreamingHandler;
+template <class Streamer, bool WriteNeeded>
+class TemplatedBidiStreamingHandler;
+template <::grpc::StatusCode code>
+class ErrorMethodHandler;
+} // namespace internal
+
+class Channel;
+class ChannelInterface;
+class Server;
+class ServerBuilder;
+class ServerContextBase;
+class ServerInterface;
+
+namespace internal {
+class CompletionQueueTag;
+class RpcMethod;
+template <class InputMessage, class OutputMessage>
+class BlockingUnaryCallImpl;
+template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
+class CallOpSet;
+} // namespace internal
+
+extern CoreCodegenInterface* g_core_codegen_interface;
+
+/// A thin wrapper around \ref grpc_completion_queue (see \ref
+/// src/core/lib/surface/completion_queue.h).
+/// See \ref doc/cpp/perf_notes.md for notes on best practices for high
+/// performance servers.
+class CompletionQueue : private ::grpc::GrpcLibraryCodegen {
+ public:
+ /// Default constructor. Implicitly creates a \a grpc_completion_queue
+ /// instance.
+ CompletionQueue()
+ : CompletionQueue(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING,
+ nullptr}) {}
+
+ /// Wrap \a take, taking ownership of the instance.
+ ///
+ /// \param take The completion queue instance to wrap. Ownership is taken.
+ explicit CompletionQueue(grpc_completion_queue* take);
+
+ /// Destructor. Destroys the owned wrapped completion queue / instance.
+ ~CompletionQueue() {
+ ::grpc::g_core_codegen_interface->grpc_completion_queue_destroy(cq_);
+ }
+
+ /// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT.
+ enum NextStatus {
+ SHUTDOWN, ///< The completion queue has been shutdown and fully-drained
+ GOT_EVENT, ///< Got a new event; \a tag will be filled in with its
+ ///< associated value; \a ok indicating its success.
+ TIMEOUT ///< deadline was reached.
+ };
+
+ /// Read from the queue, blocking until an event is available or the queue is
+ /// shutting down.
+ ///
+ /// \param tag [out] Updated to point to the read event's tag.
+ /// \param ok [out] true if read a successful event, false otherwise.
+ ///
+ /// Note that each tag sent to the completion queue (through RPC operations
+ /// or alarms) will be delivered out of the completion queue by a call to
+ /// Next (or a related method), regardless of whether the operation succeeded
+ /// or not. Success here means that this operation completed in the normal
+ /// valid manner.
+ ///
+ /// Server-side RPC request: \a ok indicates that the RPC has indeed
+ /// been started. If it is false, the server has been Shutdown
+ /// before this particular call got matched to an incoming RPC.
+ ///
+ /// Client-side StartCall/RPC invocation: \a ok indicates that the RPC is
+ /// going to go to the wire. If it is false, it not going to the wire. This
+ /// would happen if the channel is either permanently broken or
+ /// transiently broken but with the fail-fast option. (Note that async unary
+ /// RPCs don't post a CQ tag at this point, nor do client-streaming
+ /// or bidi-streaming RPCs that have the initial metadata corked option set.)
+ ///
+ /// Client-side Write, Client-side WritesDone, Server-side Write,
+ /// Server-side Finish, Server-side SendInitialMetadata (which is
+ /// typically included in Write or Finish when not done explicitly):
+ /// \a ok means that the data/metadata/status/etc is going to go to the
+ /// wire. If it is false, it not going to the wire because the call
+ /// is already dead (i.e., canceled, deadline expired, other side
+ /// dropped the channel, etc).
+ ///
+ /// Client-side Read, Server-side Read, Client-side
+ /// RecvInitialMetadata (which is typically included in Read if not
+ /// done explicitly): \a ok indicates whether there is a valid message
+ /// that got read. If not, you know that there are certainly no more
+ /// messages that can ever be read from this stream. For the client-side
+ /// operations, this only happens because the call is dead. For the
+ /// server-sider operation, though, this could happen because the client
+ /// has done a WritesDone already.
+ ///
+ /// Client-side Finish: \a ok should always be true
+ ///
+ /// Server-side AsyncNotifyWhenDone: \a ok should always be true
+ ///
+ /// Alarm: \a ok is true if it expired, false if it was canceled
+ ///
+ /// \return true if got an event, false if the queue is fully drained and
+ /// shut down.
+ bool Next(void** tag, bool* ok) {
+ return (AsyncNextInternal(tag, ok,
+ ::grpc::g_core_codegen_interface->gpr_inf_future(
+ GPR_CLOCK_REALTIME)) != SHUTDOWN);
+ }
+
+ /// Read from the queue, blocking up to \a deadline (or the queue's shutdown).
+ /// Both \a tag and \a ok are updated upon success (if an event is available
+ /// within the \a deadline). A \a tag points to an arbitrary location usually
+ /// employed to uniquely identify an event.
+ ///
+ /// \param tag [out] Upon success, updated to point to the event's tag.
+ /// \param ok [out] Upon success, true if a successful event, false otherwise
+ /// See documentation for CompletionQueue::Next for explanation of ok
+ /// \param deadline [in] How long to block in wait for an event.
+ ///
+ /// \return The type of event read.
+ template <typename T>
+ NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) {
+ ::grpc::TimePoint<T> deadline_tp(deadline);
+ return AsyncNextInternal(tag, ok, deadline_tp.raw_time());
+ }
+
+ /// EXPERIMENTAL
+ /// First executes \a F, then reads from the queue, blocking up to
+ /// \a deadline (or the queue's shutdown).
+ /// Both \a tag and \a ok are updated upon success (if an event is available
+ /// within the \a deadline). A \a tag points to an arbitrary location usually
+ /// employed to uniquely identify an event.
+ ///
+ /// \param f [in] Function to execute before calling AsyncNext on this queue.
+ /// \param tag [out] Upon success, updated to point to the event's tag.
+ /// \param ok [out] Upon success, true if read a regular event, false
+ /// otherwise.
+ /// \param deadline [in] How long to block in wait for an event.
+ ///
+ /// \return The type of event read.
+ template <typename T, typename F>
+ NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) {
+ CompletionQueueTLSCache cache = CompletionQueueTLSCache(this);
+ f();
+ if (cache.Flush(tag, ok)) {
+ return GOT_EVENT;
+ } else {
+ return AsyncNext(tag, ok, deadline);
+ }
+ }
+
+ /// Request the shutdown of the queue.
+ ///
+ /// \warning This method must be called at some point if this completion queue
+ /// is accessed with Next or AsyncNext. \a Next will not return false
+ /// until this method has been called and all pending tags have been drained.
+ /// (Likewise for \a AsyncNext returning \a NextStatus::SHUTDOWN .)
+ /// Only once either one of these methods does that (that is, once the queue
+ /// has been \em drained) can an instance of this class be destroyed.
+ /// Also note that applications must ensure that no work is enqueued on this
+ /// completion queue after this method is called.
+ void Shutdown();
+
+ /// Returns a \em raw pointer to the underlying \a grpc_completion_queue
+ /// instance.
+ ///
+ /// \warning Remember that the returned instance is owned. No transfer of
+ /// owership is performed.
+ grpc_completion_queue* cq() { return cq_; }
+
+ protected:
+ /// Private constructor of CompletionQueue only visible to friend classes
+ CompletionQueue(const grpc_completion_queue_attributes& attributes) {
+ cq_ = ::grpc::g_core_codegen_interface->grpc_completion_queue_create(
+ ::grpc::g_core_codegen_interface->grpc_completion_queue_factory_lookup(
+ &attributes),
+ &attributes, NULL);
+ InitialAvalanching(); // reserve this for the future shutdown
+ }
+
+ private:
+ // Friends for access to server registration lists that enable checking and
+ // logging on shutdown
+ friend class ::grpc::ServerBuilder;
+ friend class ::grpc::Server;
+
+ // Friend synchronous wrappers so that they can access Pluck(), which is
+ // a semi-private API geared towards the synchronous implementation.
+ template <class R>
+ friend class ::grpc::ClientReader;
+ template <class W>
+ friend class ::grpc::ClientWriter;
+ template <class W, class R>
+ friend class ::grpc::ClientReaderWriter;
+ template <class R>
+ friend class ::grpc::ServerReader;
+ template <class W>
+ friend class ::grpc::ServerWriter;
+ template <class W, class R>
+ friend class ::grpc::internal::ServerReaderWriterBody;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::RpcMethodHandler;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::ClientStreamingHandler;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::ServerStreamingHandler;
+ template <class Streamer, bool WriteNeeded>
+ friend class ::grpc::internal::TemplatedBidiStreamingHandler;
+ template <::grpc::StatusCode code>
+ friend class ::grpc::internal::ErrorMethodHandler;
+ friend class ::grpc::ServerContextBase;
+ friend class ::grpc::ServerInterface;
+ template <class InputMessage, class OutputMessage>
+ friend class ::grpc::internal::BlockingUnaryCallImpl;
+
+ // Friends that need access to constructor for callback CQ
+ friend class ::grpc::Channel;
+
+ // For access to Register/CompleteAvalanching
+ template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
+ friend class ::grpc::internal::CallOpSet;
+
+ /// EXPERIMENTAL
+ /// Creates a Thread Local cache to store the first event
+ /// On this completion queue queued from this thread. Once
+ /// initialized, it must be flushed on the same thread.
+ class CompletionQueueTLSCache {
+ public:
+ CompletionQueueTLSCache(CompletionQueue* cq);
+ ~CompletionQueueTLSCache();
+ bool Flush(void** tag, bool* ok);
+
+ private:
+ CompletionQueue* cq_;
+ bool flushed_;
+ };
+
+ NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);
+
+ /// Wraps \a grpc_completion_queue_pluck.
+ /// \warning Must not be mixed with calls to \a Next.
+ bool Pluck(::grpc::internal::CompletionQueueTag* tag) {
+ auto deadline =
+ ::grpc::g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
+ while (true) {
+ auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
+ cq_, tag, deadline, nullptr);
+ bool ok = ev.success != 0;
+ void* ignored = tag;
+ if (tag->FinalizeResult(&ignored, &ok)) {
+ GPR_CODEGEN_ASSERT(ignored == tag);
+ return ok;
+ }
+ }
+ }
+
+ /// Performs a single polling pluck on \a tag.
+ /// \warning Must not be mixed with calls to \a Next.
+ ///
+ /// TODO: sreek - This calls tag->FinalizeResult() even if the cq_ is already
+ /// shutdown. This is most likely a bug and if it is a bug, then change this
+ /// implementation to simple call the other TryPluck function with a zero
+ /// timeout. i.e:
+ /// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
+ void TryPluck(::grpc::internal::CompletionQueueTag* tag) {
+ auto deadline =
+ ::grpc::g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
+ auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
+ cq_, tag, deadline, nullptr);
+ if (ev.type == GRPC_QUEUE_TIMEOUT) return;
+ bool ok = ev.success != 0;
+ void* ignored = tag;
+ // the tag must be swallowed if using TryPluck
+ GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
+ }
+
+ /// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if
+ /// the pluck() was successful and returned the tag.
+ ///
+ /// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
+ /// that the tag is internal not something that is returned to the user.
+ void TryPluck(::grpc::internal::CompletionQueueTag* tag,
+ gpr_timespec deadline) {
+ auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
+ cq_, tag, deadline, nullptr);
+ if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {
+ return;
+ }
+
+ bool ok = ev.success != 0;
+ void* ignored = tag;
+ GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
+ }
+
+ /// Manage state of avalanching operations : completion queue tags that
+ /// trigger other completion queue operations. The underlying core completion
+ /// queue should not really shutdown until all avalanching operations have
+ /// been finalized. Note that we maintain the requirement that an avalanche
+ /// registration must take place before CQ shutdown (which must be maintained
+ /// elsehwere)
+ void InitialAvalanching() {
+ gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1));
+ }
+ void RegisterAvalanching() {
+ gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
+ static_cast<gpr_atm>(1));
+ }
+ void CompleteAvalanching() {
+ if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
+ static_cast<gpr_atm>(-1)) == 1) {
+ ::grpc::g_core_codegen_interface->grpc_completion_queue_shutdown(cq_);
+ }
+ }
+
+ void RegisterServer(const ::grpc::Server* server) {
+ (void)server;
+#ifndef NDEBUG
+ grpc::internal::MutexLock l(&server_list_mutex_);
+ server_list_.push_back(server);
+#endif
+ }
+ void UnregisterServer(const ::grpc::Server* server) {
+ (void)server;
+#ifndef NDEBUG
+ grpc::internal::MutexLock l(&server_list_mutex_);
+ server_list_.remove(server);
+#endif
+ }
+ bool ServerListEmpty() const {
+#ifndef NDEBUG
+ grpc::internal::MutexLock l(&server_list_mutex_);
+ return server_list_.empty();
+#endif
+ return true;
+ }
+
+ grpc_completion_queue* cq_; // owned
+
+ gpr_atm avalanches_in_flight_;
+
+ // List of servers associated with this CQ. Even though this is only used with
+ // NDEBUG, instantiate it in all cases since otherwise the size will be
+ // inconsistent.
+ mutable grpc::internal::Mutex server_list_mutex_;
+ std::list<const ::grpc::Server*>
+ server_list_ /* GUARDED_BY(server_list_mutex_) */;
+};
+
+/// A specific type of completion queue used by the processing of notifications
+/// by servers. Instantiated by \a ServerBuilder or Server (for health checker).
+class ServerCompletionQueue : public CompletionQueue {
+ public:
+ bool IsFrequentlyPolled() { return polling_type_ != GRPC_CQ_NON_LISTENING; }
+
+ protected:
+ /// Default constructor
+ ServerCompletionQueue() : polling_type_(GRPC_CQ_DEFAULT_POLLING) {}
+
+ private:
+ /// \param completion_type indicates whether this is a NEXT or CALLBACK
+ /// completion queue.
+ /// \param polling_type Informs the GRPC library about the type of polling
+ /// allowed on this completion queue. See grpc_cq_polling_type's description
+ /// in grpc_types.h for more details.
+ /// \param shutdown_cb is the shutdown callback used for CALLBACK api queues
+ ServerCompletionQueue(grpc_cq_completion_type completion_type,
+ grpc_cq_polling_type polling_type,
+ grpc_experimental_completion_queue_functor* shutdown_cb)
+ : CompletionQueue(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, completion_type, polling_type,
+ shutdown_cb}),
+ polling_type_(polling_type) {}
+
+ grpc_cq_polling_type polling_type_;
+ friend class ::grpc::ServerBuilder;
+ friend class ::grpc::Server;
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h
index 3b214c9b9b..87f9914273 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h
@@ -19,7 +19,7 @@
#ifndef GRPCPP_IMPL_CODEGEN_CONFIG_H
#define GRPCPP_IMPL_CODEGEN_CONFIG_H
-#include <util/generic/string.h>
+#include <util/generic/string.h>
/// The following macros are deprecated and appear only for users
/// with PB files generated using gRPC 1.0.x plugins. They should
@@ -27,16 +27,16 @@
#define GRPC_OVERRIDE override // deprecated
#define GRPC_FINAL final // deprecated
-#ifdef GRPC_CUSTOM_STRING
-#warning GRPC_CUSTOM_STRING is no longer supported. Please use TString.
-#endif
-
+#ifdef GRPC_CUSTOM_STRING
+#warning GRPC_CUSTOM_STRING is no longer supported. Please use TString.
+#endif
+
namespace grpc {
-// Using grpc::string and grpc::to_string is discouraged in favor of
-// TString and ToString. This is only for legacy code using
-// them explictly.
-typedef TString string; // deprecated
+// Using grpc::string and grpc::to_string is discouraged in favor of
+// TString and ToString. This is only for legacy code using
+// them explictly.
+typedef TString string; // deprecated
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h
index cb987e2e8a..1a3bbd3349 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h
@@ -40,7 +40,7 @@ class DelegatingChannel : public ::grpc::ChannelInterface {
private:
internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context,
- ::grpc::CompletionQueue* cq) final {
+ ::grpc::CompletionQueue* cq) final {
return delegate_channel()->CreateCall(method, context, cq);
}
@@ -55,7 +55,7 @@ class DelegatingChannel : public ::grpc::ChannelInterface {
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
- ::grpc::CompletionQueue* cq,
+ ::grpc::CompletionQueue* cq,
void* tag) override {
delegate_channel()->NotifyOnStateChangeImpl(last_observed, deadline, cq,
tag);
@@ -68,13 +68,13 @@ class DelegatingChannel : public ::grpc::ChannelInterface {
internal::Call CreateCallInternal(const internal::RpcMethod& method,
ClientContext* context,
- ::grpc::CompletionQueue* cq,
+ ::grpc::CompletionQueue* cq,
size_t interceptor_pos) final {
return delegate_channel()->CreateCallInternal(method, context, cq,
interceptor_pos);
}
- ::grpc::CompletionQueue* CallbackCQ() final {
+ ::grpc::CompletionQueue* CallbackCQ() final {
return delegate_channel()->CallbackCQ();
}
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h
index 46af38512d..c729970ca8 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h
@@ -21,7 +21,7 @@
#include <grpcpp/impl/codegen/channel_interface.h>
-namespace grpc {
+namespace grpc {
class CompletionQueue;
namespace internal {
@@ -46,8 +46,8 @@ class InterceptedChannel : public ChannelInterface {
InterceptedChannel(ChannelInterface* channel, size_t pos)
: channel_(channel), interceptor_pos_(pos) {}
- Call CreateCall(const RpcMethod& method, ::grpc::ClientContext* context,
- ::grpc::CompletionQueue* cq) override {
+ Call CreateCall(const RpcMethod& method, ::grpc::ClientContext* context,
+ ::grpc::CompletionQueue* cq) override {
return channel_->CreateCallInternal(method, context, cq, interceptor_pos_);
}
@@ -60,7 +60,7 @@ class InterceptedChannel : public ChannelInterface {
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
- ::grpc::CompletionQueue* cq,
+ ::grpc::CompletionQueue* cq,
void* tag) override {
return channel_->NotifyOnStateChangeImpl(last_observed, deadline, cq, tag);
}
@@ -69,7 +69,7 @@ class InterceptedChannel : public ChannelInterface {
return channel_->WaitForStateChangeImpl(last_observed, deadline);
}
- ::grpc::CompletionQueue* CallbackCQ() override {
+ ::grpc::CompletionQueue* CallbackCQ() override {
return channel_->CallbackCQ();
}
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h
index 457d5393f5..d0afa03a17 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h
@@ -19,8 +19,8 @@
#ifndef GRPCPP_IMPL_CODEGEN_INTERCEPTOR_H
#define GRPCPP_IMPL_CODEGEN_INTERCEPTOR_H
-#include <memory>
-
+#include <memory>
+
#include <grpc/impl/codegen/grpc_types.h>
#include <grpcpp/impl/codegen/byte_buffer.h>
#include <grpcpp/impl/codegen/config.h>
@@ -157,7 +157,7 @@ class InterceptorBatchMethods {
/// Returns a modifiable multimap of the initial metadata to be sent. Valid
/// for PRE_SEND_INITIAL_METADATA interceptions. A value of nullptr indicates
/// that this field is not valid.
- virtual std::multimap<TString, TString>* GetSendInitialMetadata() = 0;
+ virtual std::multimap<TString, TString>* GetSendInitialMetadata() = 0;
/// Returns the status to be sent. Valid for PRE_SEND_STATUS interceptions.
virtual Status GetSendStatus() = 0;
@@ -169,7 +169,7 @@ class InterceptorBatchMethods {
/// Returns a modifiable multimap of the trailing metadata to be sent. Valid
/// for PRE_SEND_STATUS interceptions. A value of nullptr indicates
/// that this field is not valid.
- virtual std::multimap<TString, TString>*
+ virtual std::multimap<TString, TString>*
GetSendTrailingMetadata() = 0;
/// Returns a pointer to the modifiable received message. Note that the
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h
index 32fbe9b883..714351f543 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h
@@ -104,7 +104,7 @@ class InterceptorBatchMethodsImpl
bool GetSendMessageStatus() override { return !*fail_send_message_; }
- std::multimap<TString, TString>* GetSendInitialMetadata() override {
+ std::multimap<TString, TString>* GetSendInitialMetadata() override {
return send_initial_metadata_;
}
@@ -119,7 +119,7 @@ class InterceptorBatchMethodsImpl
*error_message_ = status.error_message();
}
- std::multimap<TString, TString>* GetSendTrailingMetadata() override {
+ std::multimap<TString, TString>* GetSendTrailingMetadata() override {
return send_trailing_metadata_;
}
@@ -153,25 +153,25 @@ class InterceptorBatchMethodsImpl
}
void SetSendInitialMetadata(
- std::multimap<TString, TString>* metadata) {
+ std::multimap<TString, TString>* metadata) {
send_initial_metadata_ = metadata;
}
- void SetSendStatus(grpc_status_code* code, TString* error_details,
- TString* error_message) {
+ void SetSendStatus(grpc_status_code* code, TString* error_details,
+ TString* error_message) {
code_ = code;
error_details_ = error_details;
error_message_ = error_message;
}
void SetSendTrailingMetadata(
- std::multimap<TString, TString>* metadata) {
+ std::multimap<TString, TString>* metadata) {
send_trailing_metadata_ = metadata;
}
- void SetRecvMessage(void* message, bool* hijacked_recv_message_failed) {
+ void SetRecvMessage(void* message, bool* hijacked_recv_message_failed) {
recv_message_ = message;
- hijacked_recv_message_failed_ = hijacked_recv_message_failed;
+ hijacked_recv_message_failed_ = hijacked_recv_message_failed;
}
void SetRecvInitialMetadata(MetadataMap* map) {
@@ -198,7 +198,7 @@ class InterceptorBatchMethodsImpl
void FailHijackedRecvMessage() override {
GPR_CODEGEN_ASSERT(hooks_[static_cast<size_t>(
experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)]);
- *hijacked_recv_message_failed_ = true;
+ *hijacked_recv_message_failed_ = true;
}
// Clears all state
@@ -401,16 +401,16 @@ class InterceptorBatchMethodsImpl
const void** orig_send_message_ = nullptr;
std::function<Status(const void*)> serializer_;
- std::multimap<TString, TString>* send_initial_metadata_;
+ std::multimap<TString, TString>* send_initial_metadata_;
grpc_status_code* code_ = nullptr;
- TString* error_details_ = nullptr;
- TString* error_message_ = nullptr;
+ TString* error_details_ = nullptr;
+ TString* error_message_ = nullptr;
- std::multimap<TString, TString>* send_trailing_metadata_ = nullptr;
+ std::multimap<TString, TString>* send_trailing_metadata_ = nullptr;
void* recv_message_ = nullptr;
- bool* hijacked_recv_message_failed_ = nullptr;
+ bool* hijacked_recv_message_failed_ = nullptr;
MetadataMap* recv_initial_metadata_ = nullptr;
@@ -475,7 +475,7 @@ class CancelInterceptorBatchMethods
"has a Cancel notification");
}
- std::multimap<TString, TString>* GetSendInitialMetadata() override {
+ std::multimap<TString, TString>* GetSendInitialMetadata() override {
GPR_CODEGEN_ASSERT(false &&
"It is illegal to call GetSendInitialMetadata on a "
"method which has a Cancel notification");
@@ -496,7 +496,7 @@ class CancelInterceptorBatchMethods
return;
}
- std::multimap<TString, TString>* GetSendTrailingMetadata() override {
+ std::multimap<TString, TString>* GetSendTrailingMetadata() override {
GPR_CODEGEN_ASSERT(false &&
"It is illegal to call GetSendTrailingMetadata on a "
"method which has a Cancel notification");
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h
index 0299bb675d..4048ea1197 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h
@@ -20,9 +20,9 @@
#define GRPCPP_IMPL_CODEGEN_MESSAGE_ALLOCATOR_H
namespace grpc {
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
namespace experimental {
-#endif
+#endif
// NOTE: This is an API for advanced users who need custom allocators.
// Per rpc struct for the allocator. This is the interface to return to user.
@@ -69,25 +69,25 @@ class MessageAllocator {
virtual MessageHolder<RequestT, ResponseT>* AllocateMessages() = 0;
};
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
} // namespace experimental
-#endif
-
-// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
-namespace experimental {
-
-using ::grpc::RpcAllocatorState;
-
-template <typename RequestT, typename ResponseT>
-using MessageHolder = ::grpc::MessageHolder<RequestT, ResponseT>;
-
-template <typename RequestT, typename ResponseT>
-using MessageAllocator = ::grpc::MessageAllocator<RequestT, ResponseT>;
-
-} // namespace experimental
-#endif
-
+#endif
+
+// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+namespace experimental {
+
+using ::grpc::RpcAllocatorState;
+
+template <typename RequestT, typename ResponseT>
+using MessageHolder = ::grpc::MessageHolder<RequestT, ResponseT>;
+
+template <typename RequestT, typename ResponseT>
+using MessageAllocator = ::grpc::MessageAllocator<RequestT, ResponseT>;
+
+} // namespace experimental
+#endif
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_MESSAGE_ALLOCATOR_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h
index 1471153676..03afc0781a 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h
@@ -36,12 +36,12 @@ class MetadataMap {
~MetadataMap() { Destroy(); }
- TString GetBinaryErrorDetails() {
+ TString GetBinaryErrorDetails() {
// if filled_, extract from the multimap for O(log(n))
if (filled_) {
auto iter = map_.find(kBinaryErrorDetailsKey);
if (iter != map_.end()) {
- return TString(iter->second.begin(), iter->second.length());
+ return TString(iter->second.begin(), iter->second.length());
}
}
// if not yet filled, take the O(n) lookup to avoid allocating the
@@ -54,13 +54,13 @@ class MetadataMap {
GRPC_SLICE_START_PTR(arr_.metadata[i].key)),
kBinaryErrorDetailsKey,
GRPC_SLICE_LENGTH(arr_.metadata[i].key)) == 0) {
- return TString(reinterpret_cast<const char*>(
- GRPC_SLICE_START_PTR(arr_.metadata[i].value)),
- GRPC_SLICE_LENGTH(arr_.metadata[i].value));
+ return TString(reinterpret_cast<const char*>(
+ GRPC_SLICE_START_PTR(arr_.metadata[i].value)),
+ GRPC_SLICE_LENGTH(arr_.metadata[i].value));
}
}
}
- return TString();
+ return TString();
}
std::multimap<grpc::string_ref, grpc::string_ref>* map() {
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h
index c2b1c3a924..0033936b04 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,362 +19,362 @@
#ifndef GRPCPP_IMPL_CODEGEN_METHOD_HANDLER_H
#define GRPCPP_IMPL_CODEGEN_METHOD_HANDLER_H
-#include <grpcpp/impl/codegen/byte_buffer.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/rpc_service_method.h>
-#include <grpcpp/impl/codegen/sync_stream.h>
+#include <grpcpp/impl/codegen/byte_buffer.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/rpc_service_method.h>
+#include <grpcpp/impl/codegen/sync_stream.h>
namespace grpc {
namespace internal {
-// Invoke the method handler, fill in the status, and
-// return whether or not we finished safely (without an exception).
-// Note that exception handling is 0-cost in most compiler/library
-// implementations (except when an exception is actually thrown),
-// so this process doesn't require additional overhead in the common case.
-// Additionally, we don't need to return if we caught an exception or not;
-// the handling is the same in either case.
-template <class Callable>
-::grpc::Status CatchingFunctionHandler(Callable&& handler) {
-#if GRPC_ALLOW_EXCEPTIONS
- try {
- return handler();
- } catch (...) {
- return ::grpc::Status(::grpc::StatusCode::UNKNOWN,
- "Unexpected error in RPC handling");
- }
-#else // GRPC_ALLOW_EXCEPTIONS
- return handler();
-#endif // GRPC_ALLOW_EXCEPTIONS
-}
-
-/// A wrapper class of an application provided rpc method handler.
+// Invoke the method handler, fill in the status, and
+// return whether or not we finished safely (without an exception).
+// Note that exception handling is 0-cost in most compiler/library
+// implementations (except when an exception is actually thrown),
+// so this process doesn't require additional overhead in the common case.
+// Additionally, we don't need to return if we caught an exception or not;
+// the handling is the same in either case.
+template <class Callable>
+::grpc::Status CatchingFunctionHandler(Callable&& handler) {
+#if GRPC_ALLOW_EXCEPTIONS
+ try {
+ return handler();
+ } catch (...) {
+ return ::grpc::Status(::grpc::StatusCode::UNKNOWN,
+ "Unexpected error in RPC handling");
+ }
+#else // GRPC_ALLOW_EXCEPTIONS
+ return handler();
+#endif // GRPC_ALLOW_EXCEPTIONS
+}
+
+/// A wrapper class of an application provided rpc method handler.
template <class ServiceType, class RequestType, class ResponseType>
-class RpcMethodHandler : public ::grpc::internal::MethodHandler {
- public:
- RpcMethodHandler(
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- const RequestType*, ResponseType*)>
- func,
- ServiceType* service)
- : func_(func), service_(service) {}
-
- void RunHandler(const HandlerParameter& param) final {
- ResponseType rsp;
- ::grpc::Status status = param.status;
- if (status.ok()) {
- status = CatchingFunctionHandler([this, &param, &rsp] {
- return func_(service_,
- static_cast<::grpc::ServerContext*>(param.server_context),
- static_cast<RequestType*>(param.request), &rsp);
- });
- static_cast<RequestType*>(param.request)->~RequestType();
- }
-
- GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_);
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- ops.SendInitialMetadata(&param.server_context->initial_metadata_,
- param.server_context->initial_metadata_flags());
- if (param.server_context->compression_level_set()) {
- ops.set_compression_level(param.server_context->compression_level());
- }
- if (status.ok()) {
- status = ops.SendMessagePtr(&rsp);
- }
- ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
- param.call->PerformOps(&ops);
- param.call->cq()->Pluck(&ops);
- }
-
- void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
- ::grpc::Status* status, void** /*handler_data*/) final {
- ::grpc::ByteBuffer buf;
- buf.set_buffer(req);
- auto* request =
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call, sizeof(RequestType))) RequestType();
- *status =
- ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
- buf.Release();
- if (status->ok()) {
- return request;
- }
- request->~RequestType();
- return nullptr;
- }
-
- private:
- /// Application provided rpc handler function.
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- const RequestType*, ResponseType*)>
- func_;
- // The class the above handler function lives in.
- ServiceType* service_;
-};
-
-/// A wrapper class of an application provided client streaming handler.
+class RpcMethodHandler : public ::grpc::internal::MethodHandler {
+ public:
+ RpcMethodHandler(
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ const RequestType*, ResponseType*)>
+ func,
+ ServiceType* service)
+ : func_(func), service_(service) {}
+
+ void RunHandler(const HandlerParameter& param) final {
+ ResponseType rsp;
+ ::grpc::Status status = param.status;
+ if (status.ok()) {
+ status = CatchingFunctionHandler([this, &param, &rsp] {
+ return func_(service_,
+ static_cast<::grpc::ServerContext*>(param.server_context),
+ static_cast<RequestType*>(param.request), &rsp);
+ });
+ static_cast<RequestType*>(param.request)->~RequestType();
+ }
+
+ GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_);
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ ops.SendInitialMetadata(&param.server_context->initial_metadata_,
+ param.server_context->initial_metadata_flags());
+ if (param.server_context->compression_level_set()) {
+ ops.set_compression_level(param.server_context->compression_level());
+ }
+ if (status.ok()) {
+ status = ops.SendMessagePtr(&rsp);
+ }
+ ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
+ param.call->PerformOps(&ops);
+ param.call->cq()->Pluck(&ops);
+ }
+
+ void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
+ ::grpc::Status* status, void** /*handler_data*/) final {
+ ::grpc::ByteBuffer buf;
+ buf.set_buffer(req);
+ auto* request =
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call, sizeof(RequestType))) RequestType();
+ *status =
+ ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
+ buf.Release();
+ if (status->ok()) {
+ return request;
+ }
+ request->~RequestType();
+ return nullptr;
+ }
+
+ private:
+ /// Application provided rpc handler function.
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ const RequestType*, ResponseType*)>
+ func_;
+ // The class the above handler function lives in.
+ ServiceType* service_;
+};
+
+/// A wrapper class of an application provided client streaming handler.
template <class ServiceType, class RequestType, class ResponseType>
-class ClientStreamingHandler : public ::grpc::internal::MethodHandler {
- public:
- ClientStreamingHandler(
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- ServerReader<RequestType>*, ResponseType*)>
- func,
- ServiceType* service)
- : func_(func), service_(service) {}
-
- void RunHandler(const HandlerParameter& param) final {
- ServerReader<RequestType> reader(
- param.call, static_cast<::grpc::ServerContext*>(param.server_context));
- ResponseType rsp;
- ::grpc::Status status = CatchingFunctionHandler([this, &param, &reader,
- &rsp] {
- return func_(service_,
- static_cast<::grpc::ServerContext*>(param.server_context),
- &reader, &rsp);
- });
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- if (!param.server_context->sent_initial_metadata_) {
- ops.SendInitialMetadata(&param.server_context->initial_metadata_,
- param.server_context->initial_metadata_flags());
- if (param.server_context->compression_level_set()) {
- ops.set_compression_level(param.server_context->compression_level());
- }
- }
- if (status.ok()) {
- status = ops.SendMessagePtr(&rsp);
- }
- ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
- param.call->PerformOps(&ops);
- param.call->cq()->Pluck(&ops);
- }
-
- private:
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- ServerReader<RequestType>*, ResponseType*)>
- func_;
- ServiceType* service_;
-};
-
-/// A wrapper class of an application provided server streaming handler.
+class ClientStreamingHandler : public ::grpc::internal::MethodHandler {
+ public:
+ ClientStreamingHandler(
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ ServerReader<RequestType>*, ResponseType*)>
+ func,
+ ServiceType* service)
+ : func_(func), service_(service) {}
+
+ void RunHandler(const HandlerParameter& param) final {
+ ServerReader<RequestType> reader(
+ param.call, static_cast<::grpc::ServerContext*>(param.server_context));
+ ResponseType rsp;
+ ::grpc::Status status = CatchingFunctionHandler([this, &param, &reader,
+ &rsp] {
+ return func_(service_,
+ static_cast<::grpc::ServerContext*>(param.server_context),
+ &reader, &rsp);
+ });
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ if (!param.server_context->sent_initial_metadata_) {
+ ops.SendInitialMetadata(&param.server_context->initial_metadata_,
+ param.server_context->initial_metadata_flags());
+ if (param.server_context->compression_level_set()) {
+ ops.set_compression_level(param.server_context->compression_level());
+ }
+ }
+ if (status.ok()) {
+ status = ops.SendMessagePtr(&rsp);
+ }
+ ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
+ param.call->PerformOps(&ops);
+ param.call->cq()->Pluck(&ops);
+ }
+
+ private:
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ ServerReader<RequestType>*, ResponseType*)>
+ func_;
+ ServiceType* service_;
+};
+
+/// A wrapper class of an application provided server streaming handler.
template <class ServiceType, class RequestType, class ResponseType>
-class ServerStreamingHandler : public ::grpc::internal::MethodHandler {
- public:
- ServerStreamingHandler(std::function<::grpc::Status(
- ServiceType*, ::grpc::ServerContext*,
- const RequestType*, ServerWriter<ResponseType>*)>
- func,
- ServiceType* service)
- : func_(func), service_(service) {}
-
- void RunHandler(const HandlerParameter& param) final {
- ::grpc::Status status = param.status;
- if (status.ok()) {
- ServerWriter<ResponseType> writer(
- param.call,
- static_cast<::grpc::ServerContext*>(param.server_context));
- status = CatchingFunctionHandler([this, &param, &writer] {
- return func_(service_,
- static_cast<::grpc::ServerContext*>(param.server_context),
- static_cast<RequestType*>(param.request), &writer);
- });
- static_cast<RequestType*>(param.request)->~RequestType();
- }
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- if (!param.server_context->sent_initial_metadata_) {
- ops.SendInitialMetadata(&param.server_context->initial_metadata_,
- param.server_context->initial_metadata_flags());
- if (param.server_context->compression_level_set()) {
- ops.set_compression_level(param.server_context->compression_level());
- }
- }
- ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
- param.call->PerformOps(&ops);
- if (param.server_context->has_pending_ops_) {
- param.call->cq()->Pluck(&param.server_context->pending_ops_);
- }
- param.call->cq()->Pluck(&ops);
- }
-
- void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
- ::grpc::Status* status, void** /*handler_data*/) final {
- ::grpc::ByteBuffer buf;
- buf.set_buffer(req);
- auto* request =
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call, sizeof(RequestType))) RequestType();
- *status =
- ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
- buf.Release();
- if (status->ok()) {
- return request;
- }
- request->~RequestType();
- return nullptr;
- }
-
- private:
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- const RequestType*, ServerWriter<ResponseType>*)>
- func_;
- ServiceType* service_;
-};
-
-/// A wrapper class of an application provided bidi-streaming handler.
-/// This also applies to server-streamed implementation of a unary method
-/// with the additional requirement that such methods must have done a
-/// write for status to be ok
-/// Since this is used by more than 1 class, the service is not passed in.
-/// Instead, it is expected to be an implicitly-captured argument of func
-/// (through bind or something along those lines)
+class ServerStreamingHandler : public ::grpc::internal::MethodHandler {
+ public:
+ ServerStreamingHandler(std::function<::grpc::Status(
+ ServiceType*, ::grpc::ServerContext*,
+ const RequestType*, ServerWriter<ResponseType>*)>
+ func,
+ ServiceType* service)
+ : func_(func), service_(service) {}
+
+ void RunHandler(const HandlerParameter& param) final {
+ ::grpc::Status status = param.status;
+ if (status.ok()) {
+ ServerWriter<ResponseType> writer(
+ param.call,
+ static_cast<::grpc::ServerContext*>(param.server_context));
+ status = CatchingFunctionHandler([this, &param, &writer] {
+ return func_(service_,
+ static_cast<::grpc::ServerContext*>(param.server_context),
+ static_cast<RequestType*>(param.request), &writer);
+ });
+ static_cast<RequestType*>(param.request)->~RequestType();
+ }
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ if (!param.server_context->sent_initial_metadata_) {
+ ops.SendInitialMetadata(&param.server_context->initial_metadata_,
+ param.server_context->initial_metadata_flags());
+ if (param.server_context->compression_level_set()) {
+ ops.set_compression_level(param.server_context->compression_level());
+ }
+ }
+ ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
+ param.call->PerformOps(&ops);
+ if (param.server_context->has_pending_ops_) {
+ param.call->cq()->Pluck(&param.server_context->pending_ops_);
+ }
+ param.call->cq()->Pluck(&ops);
+ }
+
+ void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
+ ::grpc::Status* status, void** /*handler_data*/) final {
+ ::grpc::ByteBuffer buf;
+ buf.set_buffer(req);
+ auto* request =
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call, sizeof(RequestType))) RequestType();
+ *status =
+ ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
+ buf.Release();
+ if (status->ok()) {
+ return request;
+ }
+ request->~RequestType();
+ return nullptr;
+ }
+
+ private:
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ const RequestType*, ServerWriter<ResponseType>*)>
+ func_;
+ ServiceType* service_;
+};
+
+/// A wrapper class of an application provided bidi-streaming handler.
+/// This also applies to server-streamed implementation of a unary method
+/// with the additional requirement that such methods must have done a
+/// write for status to be ok
+/// Since this is used by more than 1 class, the service is not passed in.
+/// Instead, it is expected to be an implicitly-captured argument of func
+/// (through bind or something along those lines)
template <class Streamer, bool WriteNeeded>
-class TemplatedBidiStreamingHandler : public ::grpc::internal::MethodHandler {
- public:
- TemplatedBidiStreamingHandler(
- std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func)
- : func_(func), write_needed_(WriteNeeded) {}
-
- void RunHandler(const HandlerParameter& param) final {
- Streamer stream(param.call,
- static_cast<::grpc::ServerContext*>(param.server_context));
- ::grpc::Status status = CatchingFunctionHandler([this, &param, &stream] {
- return func_(static_cast<::grpc::ServerContext*>(param.server_context),
- &stream);
- });
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- if (!param.server_context->sent_initial_metadata_) {
- ops.SendInitialMetadata(&param.server_context->initial_metadata_,
- param.server_context->initial_metadata_flags());
- if (param.server_context->compression_level_set()) {
- ops.set_compression_level(param.server_context->compression_level());
- }
- if (write_needed_ && status.ok()) {
- // If we needed a write but never did one, we need to mark the
- // status as a fail
- status = ::grpc::Status(::grpc::StatusCode::INTERNAL,
- "Service did not provide response message");
- }
- }
- ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
- param.call->PerformOps(&ops);
- if (param.server_context->has_pending_ops_) {
- param.call->cq()->Pluck(&param.server_context->pending_ops_);
- }
- param.call->cq()->Pluck(&ops);
- }
-
- private:
- std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func_;
- const bool write_needed_;
-};
-
-template <class ServiceType, class RequestType, class ResponseType>
-class BidiStreamingHandler
- : public TemplatedBidiStreamingHandler<
- ServerReaderWriter<ResponseType, RequestType>, false> {
- public:
- BidiStreamingHandler(std::function<::grpc::Status(
- ServiceType*, ::grpc::ServerContext*,
- ServerReaderWriter<ResponseType, RequestType>*)>
- func,
- ServiceType* service)
- // TODO(vjpai): When gRPC supports C++14, move-capture func in the below
- : TemplatedBidiStreamingHandler<
- ServerReaderWriter<ResponseType, RequestType>, false>(
- [func, service](
- ::grpc::ServerContext* ctx,
- ServerReaderWriter<ResponseType, RequestType>* streamer) {
- return func(service, ctx, streamer);
- }) {}
-};
-
+class TemplatedBidiStreamingHandler : public ::grpc::internal::MethodHandler {
+ public:
+ TemplatedBidiStreamingHandler(
+ std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func)
+ : func_(func), write_needed_(WriteNeeded) {}
+
+ void RunHandler(const HandlerParameter& param) final {
+ Streamer stream(param.call,
+ static_cast<::grpc::ServerContext*>(param.server_context));
+ ::grpc::Status status = CatchingFunctionHandler([this, &param, &stream] {
+ return func_(static_cast<::grpc::ServerContext*>(param.server_context),
+ &stream);
+ });
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ if (!param.server_context->sent_initial_metadata_) {
+ ops.SendInitialMetadata(&param.server_context->initial_metadata_,
+ param.server_context->initial_metadata_flags());
+ if (param.server_context->compression_level_set()) {
+ ops.set_compression_level(param.server_context->compression_level());
+ }
+ if (write_needed_ && status.ok()) {
+ // If we needed a write but never did one, we need to mark the
+ // status as a fail
+ status = ::grpc::Status(::grpc::StatusCode::INTERNAL,
+ "Service did not provide response message");
+ }
+ }
+ ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
+ param.call->PerformOps(&ops);
+ if (param.server_context->has_pending_ops_) {
+ param.call->cq()->Pluck(&param.server_context->pending_ops_);
+ }
+ param.call->cq()->Pluck(&ops);
+ }
+
+ private:
+ std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func_;
+ const bool write_needed_;
+};
+
+template <class ServiceType, class RequestType, class ResponseType>
+class BidiStreamingHandler
+ : public TemplatedBidiStreamingHandler<
+ ServerReaderWriter<ResponseType, RequestType>, false> {
+ public:
+ BidiStreamingHandler(std::function<::grpc::Status(
+ ServiceType*, ::grpc::ServerContext*,
+ ServerReaderWriter<ResponseType, RequestType>*)>
+ func,
+ ServiceType* service)
+ // TODO(vjpai): When gRPC supports C++14, move-capture func in the below
+ : TemplatedBidiStreamingHandler<
+ ServerReaderWriter<ResponseType, RequestType>, false>(
+ [func, service](
+ ::grpc::ServerContext* ctx,
+ ServerReaderWriter<ResponseType, RequestType>* streamer) {
+ return func(service, ctx, streamer);
+ }) {}
+};
+
template <class RequestType, class ResponseType>
-class StreamedUnaryHandler
- : public TemplatedBidiStreamingHandler<
- ServerUnaryStreamer<RequestType, ResponseType>, true> {
- public:
- explicit StreamedUnaryHandler(
- std::function<
- ::grpc::Status(::grpc::ServerContext*,
- ServerUnaryStreamer<RequestType, ResponseType>*)>
- func)
- : TemplatedBidiStreamingHandler<
- ServerUnaryStreamer<RequestType, ResponseType>, true>(
- std::move(func)) {}
-};
+class StreamedUnaryHandler
+ : public TemplatedBidiStreamingHandler<
+ ServerUnaryStreamer<RequestType, ResponseType>, true> {
+ public:
+ explicit StreamedUnaryHandler(
+ std::function<
+ ::grpc::Status(::grpc::ServerContext*,
+ ServerUnaryStreamer<RequestType, ResponseType>*)>
+ func)
+ : TemplatedBidiStreamingHandler<
+ ServerUnaryStreamer<RequestType, ResponseType>, true>(
+ std::move(func)) {}
+};
template <class RequestType, class ResponseType>
-class SplitServerStreamingHandler
- : public TemplatedBidiStreamingHandler<
- ServerSplitStreamer<RequestType, ResponseType>, false> {
- public:
- explicit SplitServerStreamingHandler(
- std::function<
- ::grpc::Status(::grpc::ServerContext*,
- ServerSplitStreamer<RequestType, ResponseType>*)>
- func)
- : TemplatedBidiStreamingHandler<
- ServerSplitStreamer<RequestType, ResponseType>, false>(
- std::move(func)) {}
-};
-
-/// General method handler class for errors that prevent real method use
-/// e.g., handle unknown method by returning UNIMPLEMENTED error.
-template <::grpc::StatusCode code>
-class ErrorMethodHandler : public ::grpc::internal::MethodHandler {
- public:
- template <class T>
- static void FillOps(::grpc::ServerContextBase* context, T* ops) {
- ::grpc::Status status(code, "");
- if (!context->sent_initial_metadata_) {
- ops->SendInitialMetadata(&context->initial_metadata_,
- context->initial_metadata_flags());
- if (context->compression_level_set()) {
- ops->set_compression_level(context->compression_level());
- }
- context->sent_initial_metadata_ = true;
- }
- ops->ServerSendStatus(&context->trailing_metadata_, status);
- }
-
- void RunHandler(const HandlerParameter& param) final {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- FillOps(param.server_context, &ops);
- param.call->PerformOps(&ops);
- param.call->cq()->Pluck(&ops);
- }
-
- void* Deserialize(grpc_call* /*call*/, grpc_byte_buffer* req,
- ::grpc::Status* /*status*/, void** /*handler_data*/) final {
- // We have to destroy any request payload
- if (req != nullptr) {
- ::grpc::g_core_codegen_interface->grpc_byte_buffer_destroy(req);
- }
- return nullptr;
- }
-};
-
-typedef ErrorMethodHandler<::grpc::StatusCode::UNIMPLEMENTED>
- UnknownMethodHandler;
-typedef ErrorMethodHandler<::grpc::StatusCode::RESOURCE_EXHAUSTED>
- ResourceExhaustedHandler;
-
+class SplitServerStreamingHandler
+ : public TemplatedBidiStreamingHandler<
+ ServerSplitStreamer<RequestType, ResponseType>, false> {
+ public:
+ explicit SplitServerStreamingHandler(
+ std::function<
+ ::grpc::Status(::grpc::ServerContext*,
+ ServerSplitStreamer<RequestType, ResponseType>*)>
+ func)
+ : TemplatedBidiStreamingHandler<
+ ServerSplitStreamer<RequestType, ResponseType>, false>(
+ std::move(func)) {}
+};
+
+/// General method handler class for errors that prevent real method use
+/// e.g., handle unknown method by returning UNIMPLEMENTED error.
+template <::grpc::StatusCode code>
+class ErrorMethodHandler : public ::grpc::internal::MethodHandler {
+ public:
+ template <class T>
+ static void FillOps(::grpc::ServerContextBase* context, T* ops) {
+ ::grpc::Status status(code, "");
+ if (!context->sent_initial_metadata_) {
+ ops->SendInitialMetadata(&context->initial_metadata_,
+ context->initial_metadata_flags());
+ if (context->compression_level_set()) {
+ ops->set_compression_level(context->compression_level());
+ }
+ context->sent_initial_metadata_ = true;
+ }
+ ops->ServerSendStatus(&context->trailing_metadata_, status);
+ }
+
+ void RunHandler(const HandlerParameter& param) final {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ FillOps(param.server_context, &ops);
+ param.call->PerformOps(&ops);
+ param.call->cq()->Pluck(&ops);
+ }
+
+ void* Deserialize(grpc_call* /*call*/, grpc_byte_buffer* req,
+ ::grpc::Status* /*status*/, void** /*handler_data*/) final {
+ // We have to destroy any request payload
+ if (req != nullptr) {
+ ::grpc::g_core_codegen_interface->grpc_byte_buffer_destroy(req);
+ }
+ return nullptr;
+ }
+};
+
+typedef ErrorMethodHandler<::grpc::StatusCode::UNIMPLEMENTED>
+ UnknownMethodHandler;
+typedef ErrorMethodHandler<::grpc::StatusCode::RESOURCE_EXHAUSTED>
+ ResourceExhaustedHandler;
+
} // namespace internal
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h
index 62c5dd5ea6..2e102135a3 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h
@@ -49,7 +49,7 @@ Status GenericSerialize(const grpc::protobuf::MessageLite& msg, ByteBuffer* bb,
"ProtoBufferWriter must be a subclass of "
"::protobuf::io::ZeroCopyOutputStream");
*own_buffer = true;
- int byte_size = msg.ByteSizeLong();
+ int byte_size = msg.ByteSizeLong();
if ((size_t)byte_size <= GRPC_SLICE_INLINED_SIZE) {
Slice slice(byte_size);
// We serialize directly into the allocated slices memory
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h
index 8366537360..4fcc211243 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h
@@ -31,7 +31,7 @@
#include <grpcpp/impl/codegen/rpc_method.h>
#include <grpcpp/impl/codegen/status.h>
-namespace grpc {
+namespace grpc {
class ServerContextBase;
namespace internal {
/// Base class for running an RPC handler.
@@ -49,8 +49,8 @@ class MethodHandler {
/// \param requester : used only by the callback API. It is a function
/// called by the RPC Controller to request another RPC (and also
/// to set up the state required to make that request possible)
- HandlerParameter(Call* c, ::grpc::ServerContextBase* context, void* req,
- Status req_status, void* handler_data,
+ HandlerParameter(Call* c, ::grpc::ServerContextBase* context, void* req,
+ Status req_status, void* handler_data,
std::function<void()> requester)
: call(c),
server_context(context),
@@ -60,7 +60,7 @@ class MethodHandler {
call_requester(std::move(requester)) {}
~HandlerParameter() {}
Call* const call;
- ::grpc::ServerContextBase* const server_context;
+ ::grpc::ServerContextBase* const server_context;
void* const request;
const Status status;
void* const internal_data;
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h
index 94f58b180b..220b78f2eb 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h
@@ -74,18 +74,18 @@ class AuthContext {
/// It is, in general, comprised of one or more properties (in which case they
/// have the same name).
virtual std::vector<grpc::string_ref> GetPeerIdentity() const = 0;
- virtual TString GetPeerIdentityPropertyName() const = 0;
+ virtual TString GetPeerIdentityPropertyName() const = 0;
/// Returns all the property values with the given name.
virtual std::vector<grpc::string_ref> FindPropertyValues(
- const TString& name) const = 0;
+ const TString& name) const = 0;
/// Iteration over all the properties.
virtual AuthPropertyIterator begin() const = 0;
virtual AuthPropertyIterator end() const = 0;
/// Mutation functions: should only be used by an AuthMetadataProcessor.
- virtual void AddProperty(const TString& key, const string_ref& value) = 0;
+ virtual void AddProperty(const TString& key, const string_ref& value) = 0;
virtual bool SetPeerIdentityPropertyName(const string& name) = 0;
};
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h
index d43f2a4e2c..3794a9ffa7 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,777 +18,777 @@
#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H
#define GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H
-#include <atomic>
-#include <functional>
-#include <type_traits>
-
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/call_op_set.h>
-#include <grpcpp/impl/codegen/callback_common.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/message_allocator.h>
-#include <grpcpp/impl/codegen/status.h>
-
+#include <atomic>
+#include <functional>
+#include <type_traits>
+
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/call_op_set.h>
+#include <grpcpp/impl/codegen/callback_common.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/message_allocator.h>
+#include <grpcpp/impl/codegen/status.h>
+
namespace grpc {
-
-// Declare base class of all reactors as internal
-namespace internal {
-
-// Forward declarations
-template <class Request, class Response>
-class CallbackUnaryHandler;
-template <class Request, class Response>
-class CallbackClientStreamingHandler;
-template <class Request, class Response>
-class CallbackServerStreamingHandler;
-template <class Request, class Response>
-class CallbackBidiHandler;
-
-class ServerReactor {
- public:
- virtual ~ServerReactor() = default;
- virtual void OnDone() = 0;
- virtual void OnCancel() = 0;
-
- // The following is not API. It is for internal use only and specifies whether
- // all reactions of this Reactor can be run without an extra executor
- // scheduling. This should only be used for internally-defined reactors with
- // trivial reactions.
- virtual bool InternalInlineable() { return false; }
-
- private:
- template <class Request, class Response>
- friend class CallbackUnaryHandler;
- template <class Request, class Response>
- friend class CallbackClientStreamingHandler;
- template <class Request, class Response>
- friend class CallbackServerStreamingHandler;
- template <class Request, class Response>
- friend class CallbackBidiHandler;
-};
-
-/// The base class of ServerCallbackUnary etc.
-class ServerCallbackCall {
- public:
- virtual ~ServerCallbackCall() {}
-
- // This object is responsible for tracking when it is safe to call OnDone and
- // OnCancel. OnDone should not be called until the method handler is complete,
- // Finish has been called, the ServerContext CompletionOp (which tracks
- // cancellation or successful completion) has completed, and all outstanding
- // Read/Write actions have seen their reactions. OnCancel should not be called
- // until after the method handler is done and the RPC has completed with a
- // cancellation. This is tracked by counting how many of these conditions have
- // been met and calling OnCancel when none remain unmet.
-
- // Public versions of MaybeDone: one where we don't know the reactor in
- // advance (used for the ServerContext CompletionOp), and one for where we
- // know the inlineability of the OnDone reaction. You should set the inline
- // flag to true if either the Reactor is InternalInlineable() or if this
- // callback is already being forced to run dispatched to an executor
- // (typically because it contains additional work than just the MaybeDone).
-
- void MaybeDone() {
- if (GPR_UNLIKELY(Unref() == 1)) {
- ScheduleOnDone(reactor()->InternalInlineable());
- }
- }
-
- void MaybeDone(bool inline_ondone) {
- if (GPR_UNLIKELY(Unref() == 1)) {
- ScheduleOnDone(inline_ondone);
- }
- }
-
- // Fast version called with known reactor passed in, used from derived
- // classes, typically in non-cancel case
- void MaybeCallOnCancel(ServerReactor* reactor) {
- if (GPR_UNLIKELY(UnblockCancellation())) {
- CallOnCancel(reactor);
- }
- }
-
- // Slower version called from object that doesn't know the reactor a priori
- // (such as the ServerContext CompletionOp which is formed before the
- // reactor). This is used in cancel cases only, so it's ok to be slower and
- // invoke a virtual function.
- void MaybeCallOnCancel() {
- if (GPR_UNLIKELY(UnblockCancellation())) {
- CallOnCancel(reactor());
- }
- }
-
- protected:
- /// Increases the reference count
- void Ref() { callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); }
-
- private:
- virtual ServerReactor* reactor() = 0;
-
- // CallOnDone performs the work required at completion of the RPC: invoking
- // the OnDone function and doing all necessary cleanup. This function is only
- // ever invoked on a fully-Unref'fed ServerCallbackCall.
- virtual void CallOnDone() = 0;
-
- // If the OnDone reaction is inlineable, execute it inline. Otherwise send it
- // to an executor.
- void ScheduleOnDone(bool inline_ondone);
-
- // If the OnCancel reaction is inlineable, execute it inline. Otherwise send
- // it to an executor.
- void CallOnCancel(ServerReactor* reactor);
-
- // Implement the cancellation constraint counter. Return true if OnCancel
- // should be called, false otherwise.
- bool UnblockCancellation() {
- return on_cancel_conditions_remaining_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1;
- }
-
- /// Decreases the reference count and returns the previous value
- int Unref() {
- return callbacks_outstanding_.fetch_sub(1, std::memory_order_acq_rel);
- }
-
- std::atomic_int on_cancel_conditions_remaining_{2};
- std::atomic_int callbacks_outstanding_{
- 3}; // reserve for start, Finish, and CompletionOp
-};
-
-template <class Request, class Response>
-class DefaultMessageHolder
- : public ::grpc::experimental::MessageHolder<Request, Response> {
- public:
- DefaultMessageHolder() {
- this->set_request(&request_obj_);
- this->set_response(&response_obj_);
- }
- void Release() override {
- // the object is allocated in the call arena.
- this->~DefaultMessageHolder<Request, Response>();
- }
-
- private:
- Request request_obj_;
- Response response_obj_;
-};
-
-} // namespace internal
-
-// Forward declarations
-class ServerUnaryReactor;
-template <class Request>
-class ServerReadReactor;
-template <class Response>
-class ServerWriteReactor;
-template <class Request, class Response>
-class ServerBidiReactor;
-
-// NOTE: The actual call/stream object classes are provided as API only to
-// support mocking. There are no implementations of these class interfaces in
-// the API.
-class ServerCallbackUnary : public internal::ServerCallbackCall {
- public:
- virtual ~ServerCallbackUnary() {}
- virtual void Finish(::grpc::Status s) = 0;
- virtual void SendInitialMetadata() = 0;
-
- protected:
- // Use a template rather than explicitly specifying ServerUnaryReactor to
- // delay binding and avoid a circular forward declaration issue
- template <class Reactor>
- void BindReactor(Reactor* reactor) {
- reactor->InternalBindCall(this);
- }
-};
-
-template <class Request>
-class ServerCallbackReader : public internal::ServerCallbackCall {
- public:
- virtual ~ServerCallbackReader() {}
- virtual void Finish(::grpc::Status s) = 0;
- virtual void SendInitialMetadata() = 0;
- virtual void Read(Request* msg) = 0;
-
- protected:
- void BindReactor(ServerReadReactor<Request>* reactor) {
- reactor->InternalBindReader(this);
- }
-};
-
-template <class Response>
-class ServerCallbackWriter : public internal::ServerCallbackCall {
- public:
- virtual ~ServerCallbackWriter() {}
-
- virtual void Finish(::grpc::Status s) = 0;
- virtual void SendInitialMetadata() = 0;
- virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
- virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
- ::grpc::Status s) = 0;
-
- protected:
- void BindReactor(ServerWriteReactor<Response>* reactor) {
- reactor->InternalBindWriter(this);
- }
-};
-
-template <class Request, class Response>
-class ServerCallbackReaderWriter : public internal::ServerCallbackCall {
- public:
- virtual ~ServerCallbackReaderWriter() {}
-
- virtual void Finish(::grpc::Status s) = 0;
- virtual void SendInitialMetadata() = 0;
- virtual void Read(Request* msg) = 0;
- virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
- virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
- ::grpc::Status s) = 0;
-
- protected:
- void BindReactor(ServerBidiReactor<Request, Response>* reactor) {
- reactor->InternalBindStream(this);
- }
-};
-
-// The following classes are the reactor interfaces that are to be implemented
-// by the user, returned as the output parameter of the method handler for a
-// callback method. Note that none of the classes are pure; all reactions have a
-// default empty reaction so that the user class only needs to override those
-// classes that it cares about.
-
-/// \a ServerBidiReactor is the interface for a bidirectional streaming RPC.
-template <class Request, class Response>
-class ServerBidiReactor : public internal::ServerReactor {
- public:
- // NOTE: Initializing stream_ as a constructor initializer rather than a
- // default initializer because gcc-4.x requires a copy constructor for
- // default initializing a templated member, which isn't ok for atomic.
- // TODO(vjpai): Switch to default constructor and default initializer when
- // gcc-4.x is no longer supported
- ServerBidiReactor() : stream_(nullptr) {}
- ~ServerBidiReactor() = default;
-
- /// Send any initial metadata stored in the RPC context. If not invoked,
- /// any initial metadata will be passed along with the first Write or the
- /// Finish (if there are no writes).
- void StartSendInitialMetadata() {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.send_initial_metadata_wanted = true;
- return;
- }
- }
- stream->SendInitialMetadata();
- }
-
- /// Initiate a read operation.
- ///
- /// \param[out] req Where to eventually store the read message. Valid when
- /// the library calls OnReadDone
- void StartRead(Request* req) {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.read_wanted = req;
- return;
- }
- }
- stream->Read(req);
- }
-
- /// Initiate a write operation.
- ///
- /// \param[in] resp The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- void StartWrite(const Response* resp) {
- StartWrite(resp, ::grpc::WriteOptions());
- }
-
- /// Initiate a write operation with specified options.
- ///
- /// \param[in] resp The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.write_wanted = resp;
- backlog_.write_options_wanted = std::move(options);
- return;
- }
- }
- stream->Write(resp, std::move(options));
- }
-
- /// Initiate a write operation with specified options and final RPC Status,
- /// which also causes any trailing metadata for this RPC to be sent out.
- /// StartWriteAndFinish is like merging StartWriteLast and Finish into a
- /// single step. A key difference, though, is that this operation doesn't have
- /// an OnWriteDone reaction - it is considered complete only when OnDone is
- /// available. An RPC can either have StartWriteAndFinish or Finish, but not
- /// both.
- ///
- /// \param[in] resp The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- /// \param[in] s The status outcome of this RPC
- void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
- ::grpc::Status s) {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.write_and_finish_wanted = true;
- backlog_.write_wanted = resp;
- backlog_.write_options_wanted = std::move(options);
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- stream->WriteAndFinish(resp, std::move(options), std::move(s));
- }
-
- /// Inform system of a planned write operation with specified options, but
- /// allow the library to schedule the actual write coalesced with the writing
- /// of trailing metadata (which takes place on a Finish call).
- ///
- /// \param[in] resp The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
- StartWrite(resp, std::move(options.set_last_message()));
- }
-
- /// Indicate that the stream is to be finished and the trailing metadata and
- /// RPC status are to be sent. Every RPC MUST be finished using either Finish
- /// or StartWriteAndFinish (but not both), even if the RPC is already
- /// cancelled.
- ///
- /// \param[in] s The status outcome of this RPC
- void Finish(::grpc::Status s) {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.finish_wanted = true;
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- stream->Finish(std::move(s));
- }
-
- /// Notifies the application that an explicit StartSendInitialMetadata
- /// operation completed. Not used when the sending of initial metadata
- /// piggybacks onto the first write.
- ///
- /// \param[in] ok Was it successful? If false, no further write-side operation
- /// will succeed.
- virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartRead operation completed.
- ///
- /// \param[in] ok Was it successful? If false, no further read-side operation
- /// will succeed.
- virtual void OnReadDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartWrite (or StartWriteLast) operation
- /// completed.
- ///
- /// \param[in] ok Was it successful? If false, no further write-side operation
- /// will succeed.
- virtual void OnWriteDone(bool /*ok*/) {}
-
- /// Notifies the application that all operations associated with this RPC
- /// have completed. This is an override (from the internal base class) but
- /// still abstract, so derived classes MUST override it to be instantiated.
- void OnDone() override = 0;
-
- /// Notifies the application that this RPC has been cancelled. This is an
- /// override (from the internal base class) but not final, so derived classes
- /// should override it if they want to take action.
- void OnCancel() override {}
-
- private:
- friend class ServerCallbackReaderWriter<Request, Response>;
- // May be overridden by internal implementation details. This is not a public
- // customization point.
- virtual void InternalBindStream(
- ServerCallbackReaderWriter<Request, Response>* stream) {
- grpc::internal::MutexLock l(&stream_mu_);
-
- if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
- stream->SendInitialMetadata();
- }
- if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
- stream->Read(backlog_.read_wanted);
- }
- if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) {
- stream->WriteAndFinish(backlog_.write_wanted,
- std::move(backlog_.write_options_wanted),
- std::move(backlog_.status_wanted));
- } else {
- if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) {
- stream->Write(backlog_.write_wanted,
- std::move(backlog_.write_options_wanted));
- }
- if (GPR_UNLIKELY(backlog_.finish_wanted)) {
- stream->Finish(std::move(backlog_.status_wanted));
- }
- }
- // Set stream_ last so that other functions can use it lock-free
- stream_.store(stream, std::memory_order_release);
- }
-
- grpc::internal::Mutex stream_mu_;
- // TODO(vjpai): Make stream_or_backlog_ into a std::variant or y_absl::variant
- // once C++17 or ABSL is supported since stream and backlog are
- // mutually exclusive in this class. Do likewise with the
- // remaining reactor classes and their backlogs as well.
- std::atomic<ServerCallbackReaderWriter<Request, Response>*> stream_{nullptr};
- struct PreBindBacklog {
- bool send_initial_metadata_wanted = false;
- bool write_and_finish_wanted = false;
- bool finish_wanted = false;
- Request* read_wanted = nullptr;
- const Response* write_wanted = nullptr;
- ::grpc::WriteOptions write_options_wanted;
- ::grpc::Status status_wanted;
- };
- PreBindBacklog backlog_ /* GUARDED_BY(stream_mu_) */;
-};
-
-/// \a ServerReadReactor is the interface for a client-streaming RPC.
-template <class Request>
-class ServerReadReactor : public internal::ServerReactor {
- public:
- ServerReadReactor() : reader_(nullptr) {}
- ~ServerReadReactor() = default;
-
- /// The following operation initiations are exactly like ServerBidiReactor.
- void StartSendInitialMetadata() {
- ServerCallbackReader<Request>* reader =
- reader_.load(std::memory_order_acquire);
- if (reader == nullptr) {
- grpc::internal::MutexLock l(&reader_mu_);
- reader = reader_.load(std::memory_order_relaxed);
- if (reader == nullptr) {
- backlog_.send_initial_metadata_wanted = true;
- return;
- }
- }
- reader->SendInitialMetadata();
- }
- void StartRead(Request* req) {
- ServerCallbackReader<Request>* reader =
- reader_.load(std::memory_order_acquire);
- if (reader == nullptr) {
- grpc::internal::MutexLock l(&reader_mu_);
- reader = reader_.load(std::memory_order_relaxed);
- if (reader == nullptr) {
- backlog_.read_wanted = req;
- return;
- }
- }
- reader->Read(req);
- }
- void Finish(::grpc::Status s) {
- ServerCallbackReader<Request>* reader =
- reader_.load(std::memory_order_acquire);
- if (reader == nullptr) {
- grpc::internal::MutexLock l(&reader_mu_);
- reader = reader_.load(std::memory_order_relaxed);
- if (reader == nullptr) {
- backlog_.finish_wanted = true;
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- reader->Finish(std::move(s));
- }
-
- /// The following notifications are exactly like ServerBidiReactor.
- virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
- virtual void OnReadDone(bool /*ok*/) {}
- void OnDone() override = 0;
- void OnCancel() override {}
-
- private:
- friend class ServerCallbackReader<Request>;
-
- // May be overridden by internal implementation details. This is not a public
- // customization point.
- virtual void InternalBindReader(ServerCallbackReader<Request>* reader) {
- grpc::internal::MutexLock l(&reader_mu_);
-
- if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
- reader->SendInitialMetadata();
- }
- if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
- reader->Read(backlog_.read_wanted);
- }
- if (GPR_UNLIKELY(backlog_.finish_wanted)) {
- reader->Finish(std::move(backlog_.status_wanted));
- }
- // Set reader_ last so that other functions can use it lock-free
- reader_.store(reader, std::memory_order_release);
- }
-
- grpc::internal::Mutex reader_mu_;
- std::atomic<ServerCallbackReader<Request>*> reader_{nullptr};
- struct PreBindBacklog {
- bool send_initial_metadata_wanted = false;
- bool finish_wanted = false;
- Request* read_wanted = nullptr;
- ::grpc::Status status_wanted;
- };
- PreBindBacklog backlog_ /* GUARDED_BY(reader_mu_) */;
-};
-
-/// \a ServerWriteReactor is the interface for a server-streaming RPC.
-template <class Response>
-class ServerWriteReactor : public internal::ServerReactor {
- public:
- ServerWriteReactor() : writer_(nullptr) {}
- ~ServerWriteReactor() = default;
-
- /// The following operation initiations are exactly like ServerBidiReactor.
- void StartSendInitialMetadata() {
- ServerCallbackWriter<Response>* writer =
- writer_.load(std::memory_order_acquire);
- if (writer == nullptr) {
- grpc::internal::MutexLock l(&writer_mu_);
- writer = writer_.load(std::memory_order_relaxed);
- if (writer == nullptr) {
- backlog_.send_initial_metadata_wanted = true;
- return;
- }
- }
- writer->SendInitialMetadata();
- }
- void StartWrite(const Response* resp) {
- StartWrite(resp, ::grpc::WriteOptions());
- }
- void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
- ServerCallbackWriter<Response>* writer =
- writer_.load(std::memory_order_acquire);
- if (writer == nullptr) {
- grpc::internal::MutexLock l(&writer_mu_);
- writer = writer_.load(std::memory_order_relaxed);
- if (writer == nullptr) {
- backlog_.write_wanted = resp;
- backlog_.write_options_wanted = std::move(options);
- return;
- }
- }
- writer->Write(resp, std::move(options));
- }
- void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
- ::grpc::Status s) {
- ServerCallbackWriter<Response>* writer =
- writer_.load(std::memory_order_acquire);
- if (writer == nullptr) {
- grpc::internal::MutexLock l(&writer_mu_);
- writer = writer_.load(std::memory_order_relaxed);
- if (writer == nullptr) {
- backlog_.write_and_finish_wanted = true;
- backlog_.write_wanted = resp;
- backlog_.write_options_wanted = std::move(options);
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- writer->WriteAndFinish(resp, std::move(options), std::move(s));
- }
- void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
- StartWrite(resp, std::move(options.set_last_message()));
- }
- void Finish(::grpc::Status s) {
- ServerCallbackWriter<Response>* writer =
- writer_.load(std::memory_order_acquire);
- if (writer == nullptr) {
- grpc::internal::MutexLock l(&writer_mu_);
- writer = writer_.load(std::memory_order_relaxed);
- if (writer == nullptr) {
- backlog_.finish_wanted = true;
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- writer->Finish(std::move(s));
- }
-
- /// The following notifications are exactly like ServerBidiReactor.
- virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
- virtual void OnWriteDone(bool /*ok*/) {}
- void OnDone() override = 0;
- void OnCancel() override {}
-
- private:
- friend class ServerCallbackWriter<Response>;
- // May be overridden by internal implementation details. This is not a public
- // customization point.
- virtual void InternalBindWriter(ServerCallbackWriter<Response>* writer) {
- grpc::internal::MutexLock l(&writer_mu_);
-
- if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
- writer->SendInitialMetadata();
- }
- if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) {
- writer->WriteAndFinish(backlog_.write_wanted,
- std::move(backlog_.write_options_wanted),
- std::move(backlog_.status_wanted));
- } else {
- if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) {
- writer->Write(backlog_.write_wanted,
- std::move(backlog_.write_options_wanted));
- }
- if (GPR_UNLIKELY(backlog_.finish_wanted)) {
- writer->Finish(std::move(backlog_.status_wanted));
- }
- }
- // Set writer_ last so that other functions can use it lock-free
- writer_.store(writer, std::memory_order_release);
- }
-
- grpc::internal::Mutex writer_mu_;
- std::atomic<ServerCallbackWriter<Response>*> writer_{nullptr};
- struct PreBindBacklog {
- bool send_initial_metadata_wanted = false;
- bool write_and_finish_wanted = false;
- bool finish_wanted = false;
- const Response* write_wanted = nullptr;
- ::grpc::WriteOptions write_options_wanted;
- ::grpc::Status status_wanted;
- };
- PreBindBacklog backlog_ /* GUARDED_BY(writer_mu_) */;
-};
-
-class ServerUnaryReactor : public internal::ServerReactor {
- public:
- ServerUnaryReactor() : call_(nullptr) {}
- ~ServerUnaryReactor() = default;
-
- /// StartSendInitialMetadata is exactly like ServerBidiReactor.
- void StartSendInitialMetadata() {
- ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
- if (call == nullptr) {
- grpc::internal::MutexLock l(&call_mu_);
- call = call_.load(std::memory_order_relaxed);
- if (call == nullptr) {
- backlog_.send_initial_metadata_wanted = true;
- return;
- }
- }
- call->SendInitialMetadata();
- }
- /// Finish is similar to ServerBidiReactor except for one detail.
- /// If the status is non-OK, any message will not be sent. Instead,
- /// the client will only receive the status and any trailing metadata.
- void Finish(::grpc::Status s) {
- ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
- if (call == nullptr) {
- grpc::internal::MutexLock l(&call_mu_);
- call = call_.load(std::memory_order_relaxed);
- if (call == nullptr) {
- backlog_.finish_wanted = true;
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- call->Finish(std::move(s));
- }
-
- /// The following notifications are exactly like ServerBidiReactor.
- virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
- void OnDone() override = 0;
- void OnCancel() override {}
-
- private:
- friend class ServerCallbackUnary;
- // May be overridden by internal implementation details. This is not a public
- // customization point.
- virtual void InternalBindCall(ServerCallbackUnary* call) {
- grpc::internal::MutexLock l(&call_mu_);
-
- if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
- call->SendInitialMetadata();
- }
- if (GPR_UNLIKELY(backlog_.finish_wanted)) {
- call->Finish(std::move(backlog_.status_wanted));
- }
- // Set call_ last so that other functions can use it lock-free
- call_.store(call, std::memory_order_release);
- }
-
- grpc::internal::Mutex call_mu_;
- std::atomic<ServerCallbackUnary*> call_{nullptr};
- struct PreBindBacklog {
- bool send_initial_metadata_wanted = false;
- bool finish_wanted = false;
- ::grpc::Status status_wanted;
- };
- PreBindBacklog backlog_ /* GUARDED_BY(call_mu_) */;
-};
-
-namespace internal {
-
-template <class Base>
-class FinishOnlyReactor : public Base {
- public:
- explicit FinishOnlyReactor(::grpc::Status s) { this->Finish(std::move(s)); }
- void OnDone() override { this->~FinishOnlyReactor(); }
-};
-
-using UnimplementedUnaryReactor = FinishOnlyReactor<ServerUnaryReactor>;
-template <class Request>
-using UnimplementedReadReactor = FinishOnlyReactor<ServerReadReactor<Request>>;
-template <class Response>
-using UnimplementedWriteReactor =
- FinishOnlyReactor<ServerWriteReactor<Response>>;
-template <class Request, class Response>
-using UnimplementedBidiReactor =
- FinishOnlyReactor<ServerBidiReactor<Request, Response>>;
-
-} // namespace internal
-
-// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
+
+// Declare base class of all reactors as internal
+namespace internal {
+
+// Forward declarations
+template <class Request, class Response>
+class CallbackUnaryHandler;
+template <class Request, class Response>
+class CallbackClientStreamingHandler;
+template <class Request, class Response>
+class CallbackServerStreamingHandler;
+template <class Request, class Response>
+class CallbackBidiHandler;
+
+class ServerReactor {
+ public:
+ virtual ~ServerReactor() = default;
+ virtual void OnDone() = 0;
+ virtual void OnCancel() = 0;
+
+ // The following is not API. It is for internal use only and specifies whether
+ // all reactions of this Reactor can be run without an extra executor
+ // scheduling. This should only be used for internally-defined reactors with
+ // trivial reactions.
+ virtual bool InternalInlineable() { return false; }
+
+ private:
+ template <class Request, class Response>
+ friend class CallbackUnaryHandler;
+ template <class Request, class Response>
+ friend class CallbackClientStreamingHandler;
+ template <class Request, class Response>
+ friend class CallbackServerStreamingHandler;
+ template <class Request, class Response>
+ friend class CallbackBidiHandler;
+};
+
+/// The base class of ServerCallbackUnary etc.
+class ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackCall() {}
+
+ // This object is responsible for tracking when it is safe to call OnDone and
+ // OnCancel. OnDone should not be called until the method handler is complete,
+ // Finish has been called, the ServerContext CompletionOp (which tracks
+ // cancellation or successful completion) has completed, and all outstanding
+ // Read/Write actions have seen their reactions. OnCancel should not be called
+ // until after the method handler is done and the RPC has completed with a
+ // cancellation. This is tracked by counting how many of these conditions have
+ // been met and calling OnCancel when none remain unmet.
+
+ // Public versions of MaybeDone: one where we don't know the reactor in
+ // advance (used for the ServerContext CompletionOp), and one for where we
+ // know the inlineability of the OnDone reaction. You should set the inline
+ // flag to true if either the Reactor is InternalInlineable() or if this
+ // callback is already being forced to run dispatched to an executor
+ // (typically because it contains additional work than just the MaybeDone).
+
+ void MaybeDone() {
+ if (GPR_UNLIKELY(Unref() == 1)) {
+ ScheduleOnDone(reactor()->InternalInlineable());
+ }
+ }
+
+ void MaybeDone(bool inline_ondone) {
+ if (GPR_UNLIKELY(Unref() == 1)) {
+ ScheduleOnDone(inline_ondone);
+ }
+ }
+
+ // Fast version called with known reactor passed in, used from derived
+ // classes, typically in non-cancel case
+ void MaybeCallOnCancel(ServerReactor* reactor) {
+ if (GPR_UNLIKELY(UnblockCancellation())) {
+ CallOnCancel(reactor);
+ }
+ }
+
+ // Slower version called from object that doesn't know the reactor a priori
+ // (such as the ServerContext CompletionOp which is formed before the
+ // reactor). This is used in cancel cases only, so it's ok to be slower and
+ // invoke a virtual function.
+ void MaybeCallOnCancel() {
+ if (GPR_UNLIKELY(UnblockCancellation())) {
+ CallOnCancel(reactor());
+ }
+ }
+
+ protected:
+ /// Increases the reference count
+ void Ref() { callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); }
+
+ private:
+ virtual ServerReactor* reactor() = 0;
+
+ // CallOnDone performs the work required at completion of the RPC: invoking
+ // the OnDone function and doing all necessary cleanup. This function is only
+ // ever invoked on a fully-Unref'fed ServerCallbackCall.
+ virtual void CallOnDone() = 0;
+
+ // If the OnDone reaction is inlineable, execute it inline. Otherwise send it
+ // to an executor.
+ void ScheduleOnDone(bool inline_ondone);
+
+ // If the OnCancel reaction is inlineable, execute it inline. Otherwise send
+ // it to an executor.
+ void CallOnCancel(ServerReactor* reactor);
+
+ // Implement the cancellation constraint counter. Return true if OnCancel
+ // should be called, false otherwise.
+ bool UnblockCancellation() {
+ return on_cancel_conditions_remaining_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1;
+ }
+
+ /// Decreases the reference count and returns the previous value
+ int Unref() {
+ return callbacks_outstanding_.fetch_sub(1, std::memory_order_acq_rel);
+ }
+
+ std::atomic_int on_cancel_conditions_remaining_{2};
+ std::atomic_int callbacks_outstanding_{
+ 3}; // reserve for start, Finish, and CompletionOp
+};
+
+template <class Request, class Response>
+class DefaultMessageHolder
+ : public ::grpc::experimental::MessageHolder<Request, Response> {
+ public:
+ DefaultMessageHolder() {
+ this->set_request(&request_obj_);
+ this->set_response(&response_obj_);
+ }
+ void Release() override {
+ // the object is allocated in the call arena.
+ this->~DefaultMessageHolder<Request, Response>();
+ }
+
+ private:
+ Request request_obj_;
+ Response response_obj_;
+};
+
+} // namespace internal
+
+// Forward declarations
+class ServerUnaryReactor;
+template <class Request>
+class ServerReadReactor;
+template <class Response>
+class ServerWriteReactor;
+template <class Request, class Response>
+class ServerBidiReactor;
+
+// NOTE: The actual call/stream object classes are provided as API only to
+// support mocking. There are no implementations of these class interfaces in
+// the API.
+class ServerCallbackUnary : public internal::ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackUnary() {}
+ virtual void Finish(::grpc::Status s) = 0;
+ virtual void SendInitialMetadata() = 0;
+
+ protected:
+ // Use a template rather than explicitly specifying ServerUnaryReactor to
+ // delay binding and avoid a circular forward declaration issue
+ template <class Reactor>
+ void BindReactor(Reactor* reactor) {
+ reactor->InternalBindCall(this);
+ }
+};
+
+template <class Request>
+class ServerCallbackReader : public internal::ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackReader() {}
+ virtual void Finish(::grpc::Status s) = 0;
+ virtual void SendInitialMetadata() = 0;
+ virtual void Read(Request* msg) = 0;
+
+ protected:
+ void BindReactor(ServerReadReactor<Request>* reactor) {
+ reactor->InternalBindReader(this);
+ }
+};
+
+template <class Response>
+class ServerCallbackWriter : public internal::ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackWriter() {}
+
+ virtual void Finish(::grpc::Status s) = 0;
+ virtual void SendInitialMetadata() = 0;
+ virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
+ virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
+ ::grpc::Status s) = 0;
+
+ protected:
+ void BindReactor(ServerWriteReactor<Response>* reactor) {
+ reactor->InternalBindWriter(this);
+ }
+};
+
+template <class Request, class Response>
+class ServerCallbackReaderWriter : public internal::ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackReaderWriter() {}
+
+ virtual void Finish(::grpc::Status s) = 0;
+ virtual void SendInitialMetadata() = 0;
+ virtual void Read(Request* msg) = 0;
+ virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
+ virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
+ ::grpc::Status s) = 0;
+
+ protected:
+ void BindReactor(ServerBidiReactor<Request, Response>* reactor) {
+ reactor->InternalBindStream(this);
+ }
+};
+
+// The following classes are the reactor interfaces that are to be implemented
+// by the user, returned as the output parameter of the method handler for a
+// callback method. Note that none of the classes are pure; all reactions have a
+// default empty reaction so that the user class only needs to override those
+// classes that it cares about.
+
+/// \a ServerBidiReactor is the interface for a bidirectional streaming RPC.
+template <class Request, class Response>
+class ServerBidiReactor : public internal::ServerReactor {
+ public:
+ // NOTE: Initializing stream_ as a constructor initializer rather than a
+ // default initializer because gcc-4.x requires a copy constructor for
+ // default initializing a templated member, which isn't ok for atomic.
+ // TODO(vjpai): Switch to default constructor and default initializer when
+ // gcc-4.x is no longer supported
+ ServerBidiReactor() : stream_(nullptr) {}
+ ~ServerBidiReactor() = default;
+
+ /// Send any initial metadata stored in the RPC context. If not invoked,
+ /// any initial metadata will be passed along with the first Write or the
+ /// Finish (if there are no writes).
+ void StartSendInitialMetadata() {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.send_initial_metadata_wanted = true;
+ return;
+ }
+ }
+ stream->SendInitialMetadata();
+ }
+
+ /// Initiate a read operation.
+ ///
+ /// \param[out] req Where to eventually store the read message. Valid when
+ /// the library calls OnReadDone
+ void StartRead(Request* req) {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.read_wanted = req;
+ return;
+ }
+ }
+ stream->Read(req);
+ }
+
+ /// Initiate a write operation.
+ ///
+ /// \param[in] resp The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ void StartWrite(const Response* resp) {
+ StartWrite(resp, ::grpc::WriteOptions());
+ }
+
+ /// Initiate a write operation with specified options.
+ ///
+ /// \param[in] resp The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.write_wanted = resp;
+ backlog_.write_options_wanted = std::move(options);
+ return;
+ }
+ }
+ stream->Write(resp, std::move(options));
+ }
+
+ /// Initiate a write operation with specified options and final RPC Status,
+ /// which also causes any trailing metadata for this RPC to be sent out.
+ /// StartWriteAndFinish is like merging StartWriteLast and Finish into a
+ /// single step. A key difference, though, is that this operation doesn't have
+ /// an OnWriteDone reaction - it is considered complete only when OnDone is
+ /// available. An RPC can either have StartWriteAndFinish or Finish, but not
+ /// both.
+ ///
+ /// \param[in] resp The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ /// \param[in] s The status outcome of this RPC
+ void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
+ ::grpc::Status s) {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.write_and_finish_wanted = true;
+ backlog_.write_wanted = resp;
+ backlog_.write_options_wanted = std::move(options);
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ stream->WriteAndFinish(resp, std::move(options), std::move(s));
+ }
+
+ /// Inform system of a planned write operation with specified options, but
+ /// allow the library to schedule the actual write coalesced with the writing
+ /// of trailing metadata (which takes place on a Finish call).
+ ///
+ /// \param[in] resp The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
+ StartWrite(resp, std::move(options.set_last_message()));
+ }
+
+ /// Indicate that the stream is to be finished and the trailing metadata and
+ /// RPC status are to be sent. Every RPC MUST be finished using either Finish
+ /// or StartWriteAndFinish (but not both), even if the RPC is already
+ /// cancelled.
+ ///
+ /// \param[in] s The status outcome of this RPC
+ void Finish(::grpc::Status s) {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.finish_wanted = true;
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ stream->Finish(std::move(s));
+ }
+
+ /// Notifies the application that an explicit StartSendInitialMetadata
+ /// operation completed. Not used when the sending of initial metadata
+ /// piggybacks onto the first write.
+ ///
+ /// \param[in] ok Was it successful? If false, no further write-side operation
+ /// will succeed.
+ virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartRead operation completed.
+ ///
+ /// \param[in] ok Was it successful? If false, no further read-side operation
+ /// will succeed.
+ virtual void OnReadDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartWrite (or StartWriteLast) operation
+ /// completed.
+ ///
+ /// \param[in] ok Was it successful? If false, no further write-side operation
+ /// will succeed.
+ virtual void OnWriteDone(bool /*ok*/) {}
+
+ /// Notifies the application that all operations associated with this RPC
+ /// have completed. This is an override (from the internal base class) but
+ /// still abstract, so derived classes MUST override it to be instantiated.
+ void OnDone() override = 0;
+
+ /// Notifies the application that this RPC has been cancelled. This is an
+ /// override (from the internal base class) but not final, so derived classes
+ /// should override it if they want to take action.
+ void OnCancel() override {}
+
+ private:
+ friend class ServerCallbackReaderWriter<Request, Response>;
+ // May be overridden by internal implementation details. This is not a public
+ // customization point.
+ virtual void InternalBindStream(
+ ServerCallbackReaderWriter<Request, Response>* stream) {
+ grpc::internal::MutexLock l(&stream_mu_);
+
+ if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
+ stream->SendInitialMetadata();
+ }
+ if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
+ stream->Read(backlog_.read_wanted);
+ }
+ if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) {
+ stream->WriteAndFinish(backlog_.write_wanted,
+ std::move(backlog_.write_options_wanted),
+ std::move(backlog_.status_wanted));
+ } else {
+ if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) {
+ stream->Write(backlog_.write_wanted,
+ std::move(backlog_.write_options_wanted));
+ }
+ if (GPR_UNLIKELY(backlog_.finish_wanted)) {
+ stream->Finish(std::move(backlog_.status_wanted));
+ }
+ }
+ // Set stream_ last so that other functions can use it lock-free
+ stream_.store(stream, std::memory_order_release);
+ }
+
+ grpc::internal::Mutex stream_mu_;
+ // TODO(vjpai): Make stream_or_backlog_ into a std::variant or y_absl::variant
+ // once C++17 or ABSL is supported since stream and backlog are
+ // mutually exclusive in this class. Do likewise with the
+ // remaining reactor classes and their backlogs as well.
+ std::atomic<ServerCallbackReaderWriter<Request, Response>*> stream_{nullptr};
+ struct PreBindBacklog {
+ bool send_initial_metadata_wanted = false;
+ bool write_and_finish_wanted = false;
+ bool finish_wanted = false;
+ Request* read_wanted = nullptr;
+ const Response* write_wanted = nullptr;
+ ::grpc::WriteOptions write_options_wanted;
+ ::grpc::Status status_wanted;
+ };
+ PreBindBacklog backlog_ /* GUARDED_BY(stream_mu_) */;
+};
+
+/// \a ServerReadReactor is the interface for a client-streaming RPC.
+template <class Request>
+class ServerReadReactor : public internal::ServerReactor {
+ public:
+ ServerReadReactor() : reader_(nullptr) {}
+ ~ServerReadReactor() = default;
+
+ /// The following operation initiations are exactly like ServerBidiReactor.
+ void StartSendInitialMetadata() {
+ ServerCallbackReader<Request>* reader =
+ reader_.load(std::memory_order_acquire);
+ if (reader == nullptr) {
+ grpc::internal::MutexLock l(&reader_mu_);
+ reader = reader_.load(std::memory_order_relaxed);
+ if (reader == nullptr) {
+ backlog_.send_initial_metadata_wanted = true;
+ return;
+ }
+ }
+ reader->SendInitialMetadata();
+ }
+ void StartRead(Request* req) {
+ ServerCallbackReader<Request>* reader =
+ reader_.load(std::memory_order_acquire);
+ if (reader == nullptr) {
+ grpc::internal::MutexLock l(&reader_mu_);
+ reader = reader_.load(std::memory_order_relaxed);
+ if (reader == nullptr) {
+ backlog_.read_wanted = req;
+ return;
+ }
+ }
+ reader->Read(req);
+ }
+ void Finish(::grpc::Status s) {
+ ServerCallbackReader<Request>* reader =
+ reader_.load(std::memory_order_acquire);
+ if (reader == nullptr) {
+ grpc::internal::MutexLock l(&reader_mu_);
+ reader = reader_.load(std::memory_order_relaxed);
+ if (reader == nullptr) {
+ backlog_.finish_wanted = true;
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ reader->Finish(std::move(s));
+ }
+
+ /// The following notifications are exactly like ServerBidiReactor.
+ virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+ virtual void OnReadDone(bool /*ok*/) {}
+ void OnDone() override = 0;
+ void OnCancel() override {}
+
+ private:
+ friend class ServerCallbackReader<Request>;
+
+ // May be overridden by internal implementation details. This is not a public
+ // customization point.
+ virtual void InternalBindReader(ServerCallbackReader<Request>* reader) {
+ grpc::internal::MutexLock l(&reader_mu_);
+
+ if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
+ reader->SendInitialMetadata();
+ }
+ if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
+ reader->Read(backlog_.read_wanted);
+ }
+ if (GPR_UNLIKELY(backlog_.finish_wanted)) {
+ reader->Finish(std::move(backlog_.status_wanted));
+ }
+ // Set reader_ last so that other functions can use it lock-free
+ reader_.store(reader, std::memory_order_release);
+ }
+
+ grpc::internal::Mutex reader_mu_;
+ std::atomic<ServerCallbackReader<Request>*> reader_{nullptr};
+ struct PreBindBacklog {
+ bool send_initial_metadata_wanted = false;
+ bool finish_wanted = false;
+ Request* read_wanted = nullptr;
+ ::grpc::Status status_wanted;
+ };
+ PreBindBacklog backlog_ /* GUARDED_BY(reader_mu_) */;
+};
+
+/// \a ServerWriteReactor is the interface for a server-streaming RPC.
+template <class Response>
+class ServerWriteReactor : public internal::ServerReactor {
+ public:
+ ServerWriteReactor() : writer_(nullptr) {}
+ ~ServerWriteReactor() = default;
+
+ /// The following operation initiations are exactly like ServerBidiReactor.
+ void StartSendInitialMetadata() {
+ ServerCallbackWriter<Response>* writer =
+ writer_.load(std::memory_order_acquire);
+ if (writer == nullptr) {
+ grpc::internal::MutexLock l(&writer_mu_);
+ writer = writer_.load(std::memory_order_relaxed);
+ if (writer == nullptr) {
+ backlog_.send_initial_metadata_wanted = true;
+ return;
+ }
+ }
+ writer->SendInitialMetadata();
+ }
+ void StartWrite(const Response* resp) {
+ StartWrite(resp, ::grpc::WriteOptions());
+ }
+ void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
+ ServerCallbackWriter<Response>* writer =
+ writer_.load(std::memory_order_acquire);
+ if (writer == nullptr) {
+ grpc::internal::MutexLock l(&writer_mu_);
+ writer = writer_.load(std::memory_order_relaxed);
+ if (writer == nullptr) {
+ backlog_.write_wanted = resp;
+ backlog_.write_options_wanted = std::move(options);
+ return;
+ }
+ }
+ writer->Write(resp, std::move(options));
+ }
+ void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
+ ::grpc::Status s) {
+ ServerCallbackWriter<Response>* writer =
+ writer_.load(std::memory_order_acquire);
+ if (writer == nullptr) {
+ grpc::internal::MutexLock l(&writer_mu_);
+ writer = writer_.load(std::memory_order_relaxed);
+ if (writer == nullptr) {
+ backlog_.write_and_finish_wanted = true;
+ backlog_.write_wanted = resp;
+ backlog_.write_options_wanted = std::move(options);
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ writer->WriteAndFinish(resp, std::move(options), std::move(s));
+ }
+ void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
+ StartWrite(resp, std::move(options.set_last_message()));
+ }
+ void Finish(::grpc::Status s) {
+ ServerCallbackWriter<Response>* writer =
+ writer_.load(std::memory_order_acquire);
+ if (writer == nullptr) {
+ grpc::internal::MutexLock l(&writer_mu_);
+ writer = writer_.load(std::memory_order_relaxed);
+ if (writer == nullptr) {
+ backlog_.finish_wanted = true;
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ writer->Finish(std::move(s));
+ }
+
+ /// The following notifications are exactly like ServerBidiReactor.
+ virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+ virtual void OnWriteDone(bool /*ok*/) {}
+ void OnDone() override = 0;
+ void OnCancel() override {}
+
+ private:
+ friend class ServerCallbackWriter<Response>;
+ // May be overridden by internal implementation details. This is not a public
+ // customization point.
+ virtual void InternalBindWriter(ServerCallbackWriter<Response>* writer) {
+ grpc::internal::MutexLock l(&writer_mu_);
+
+ if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
+ writer->SendInitialMetadata();
+ }
+ if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) {
+ writer->WriteAndFinish(backlog_.write_wanted,
+ std::move(backlog_.write_options_wanted),
+ std::move(backlog_.status_wanted));
+ } else {
+ if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) {
+ writer->Write(backlog_.write_wanted,
+ std::move(backlog_.write_options_wanted));
+ }
+ if (GPR_UNLIKELY(backlog_.finish_wanted)) {
+ writer->Finish(std::move(backlog_.status_wanted));
+ }
+ }
+ // Set writer_ last so that other functions can use it lock-free
+ writer_.store(writer, std::memory_order_release);
+ }
+
+ grpc::internal::Mutex writer_mu_;
+ std::atomic<ServerCallbackWriter<Response>*> writer_{nullptr};
+ struct PreBindBacklog {
+ bool send_initial_metadata_wanted = false;
+ bool write_and_finish_wanted = false;
+ bool finish_wanted = false;
+ const Response* write_wanted = nullptr;
+ ::grpc::WriteOptions write_options_wanted;
+ ::grpc::Status status_wanted;
+ };
+ PreBindBacklog backlog_ /* GUARDED_BY(writer_mu_) */;
+};
+
+class ServerUnaryReactor : public internal::ServerReactor {
+ public:
+ ServerUnaryReactor() : call_(nullptr) {}
+ ~ServerUnaryReactor() = default;
+
+ /// StartSendInitialMetadata is exactly like ServerBidiReactor.
+ void StartSendInitialMetadata() {
+ ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
+ if (call == nullptr) {
+ grpc::internal::MutexLock l(&call_mu_);
+ call = call_.load(std::memory_order_relaxed);
+ if (call == nullptr) {
+ backlog_.send_initial_metadata_wanted = true;
+ return;
+ }
+ }
+ call->SendInitialMetadata();
+ }
+ /// Finish is similar to ServerBidiReactor except for one detail.
+ /// If the status is non-OK, any message will not be sent. Instead,
+ /// the client will only receive the status and any trailing metadata.
+ void Finish(::grpc::Status s) {
+ ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
+ if (call == nullptr) {
+ grpc::internal::MutexLock l(&call_mu_);
+ call = call_.load(std::memory_order_relaxed);
+ if (call == nullptr) {
+ backlog_.finish_wanted = true;
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ call->Finish(std::move(s));
+ }
+
+ /// The following notifications are exactly like ServerBidiReactor.
+ virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+ void OnDone() override = 0;
+ void OnCancel() override {}
+
+ private:
+ friend class ServerCallbackUnary;
+ // May be overridden by internal implementation details. This is not a public
+ // customization point.
+ virtual void InternalBindCall(ServerCallbackUnary* call) {
+ grpc::internal::MutexLock l(&call_mu_);
+
+ if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
+ call->SendInitialMetadata();
+ }
+ if (GPR_UNLIKELY(backlog_.finish_wanted)) {
+ call->Finish(std::move(backlog_.status_wanted));
+ }
+ // Set call_ last so that other functions can use it lock-free
+ call_.store(call, std::memory_order_release);
+ }
+
+ grpc::internal::Mutex call_mu_;
+ std::atomic<ServerCallbackUnary*> call_{nullptr};
+ struct PreBindBacklog {
+ bool send_initial_metadata_wanted = false;
+ bool finish_wanted = false;
+ ::grpc::Status status_wanted;
+ };
+ PreBindBacklog backlog_ /* GUARDED_BY(call_mu_) */;
+};
+
+namespace internal {
+
+template <class Base>
+class FinishOnlyReactor : public Base {
+ public:
+ explicit FinishOnlyReactor(::grpc::Status s) { this->Finish(std::move(s)); }
+ void OnDone() override { this->~FinishOnlyReactor(); }
+};
+
+using UnimplementedUnaryReactor = FinishOnlyReactor<ServerUnaryReactor>;
+template <class Request>
+using UnimplementedReadReactor = FinishOnlyReactor<ServerReadReactor<Request>>;
+template <class Response>
+using UnimplementedWriteReactor =
+ FinishOnlyReactor<ServerWriteReactor<Response>>;
+template <class Request, class Response>
+using UnimplementedBidiReactor =
+ FinishOnlyReactor<ServerBidiReactor<Request, Response>>;
+
+} // namespace internal
+
+// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
namespace experimental {
-
+
template <class Request>
-using ServerReadReactor = ::grpc::ServerReadReactor<Request>;
+using ServerReadReactor = ::grpc::ServerReadReactor<Request>;
template <class Response>
-using ServerWriteReactor = ::grpc::ServerWriteReactor<Response>;
+using ServerWriteReactor = ::grpc::ServerWriteReactor<Response>;
template <class Request, class Response>
-using ServerBidiReactor = ::grpc::ServerBidiReactor<Request, Response>;
+using ServerBidiReactor = ::grpc::ServerBidiReactor<Request, Response>;
-using ServerUnaryReactor = ::grpc::ServerUnaryReactor;
+using ServerUnaryReactor = ::grpc::ServerUnaryReactor;
} // namespace experimental
-
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h
index 330d62ab37..8120fcaf85 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h
@@ -20,18 +20,18 @@
#include <grpcpp/impl/codegen/message_allocator.h>
#include <grpcpp/impl/codegen/rpc_service_method.h>
-#include <grpcpp/impl/codegen/server_callback.h>
-#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/server_callback.h>
+#include <grpcpp/impl/codegen/server_context.h>
#include <grpcpp/impl/codegen/status.h>
-namespace grpc {
+namespace grpc {
namespace internal {
template <class RequestType, class ResponseType>
class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
public:
explicit CallbackUnaryHandler(
- std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*,
+ std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*,
const RequestType*, ResponseType*)>
get_reactor)
: get_reactor_(std::move(get_reactor)) {}
@@ -52,7 +52,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
auto* call = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
param.call->call(), sizeof(ServerCallbackUnaryImpl)))
ServerCallbackUnaryImpl(
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
param.call, allocator_state, std::move(param.call_requester));
param.server_context->BeginCompletionOp(
param.call, [call](bool) { call->MaybeDone(); }, call);
@@ -61,7 +61,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
if (param.status.ok()) {
reactor = ::grpc::internal::CatchingReactorGetter<ServerUnaryReactor>(
get_reactor_,
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
call->request(), call->response());
}
@@ -106,7 +106,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
}
private:
- std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*,
+ std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*,
const RequestType*, ResponseType*)>
get_reactor_;
::grpc::experimental::MessageAllocator<RequestType, ResponseType>*
@@ -115,19 +115,19 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
class ServerCallbackUnaryImpl : public ServerCallbackUnary {
public:
void Finish(::grpc::Status s) override {
- // A callback that only contains a call to MaybeDone can be run as an
- // inline callback regardless of whether or not OnDone is inlineable
- // because if the actual OnDone callback needs to be scheduled, MaybeDone
- // is responsible for dispatching to an executor thread if needed. Thus,
- // when setting up the finish_tag_, we can set its own callback to
- // inlineable.
+ // A callback that only contains a call to MaybeDone can be run as an
+ // inline callback regardless of whether or not OnDone is inlineable
+ // because if the actual OnDone callback needs to be scheduled, MaybeDone
+ // is responsible for dispatching to an executor thread if needed. Thus,
+ // when setting up the finish_tag_, we can set its own callback to
+ // inlineable.
finish_tag_.Set(
- call_.call(),
- [this](bool) {
- this->MaybeDone(
- reactor_.load(std::memory_order_relaxed)->InternalInlineable());
- },
- &finish_ops_, /*can_inline=*/true);
+ call_.call(),
+ [this](bool) {
+ this->MaybeDone(
+ reactor_.load(std::memory_order_relaxed)->InternalInlineable());
+ },
+ &finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) {
@@ -152,19 +152,19 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref();
- // The callback for this function should not be marked inline because it
- // is directly invoking a user-controlled reaction
- // (OnSendInitialMetadataDone). Thus it must be dispatched to an executor
- // thread. However, any OnDone needed after that can be inlined because it
- // is already running on an executor thread.
+ // The callback for this function should not be marked inline because it
+ // is directly invoking a user-controlled reaction
+ // (OnSendInitialMetadataDone). Thus it must be dispatched to an executor
+ // thread. However, any OnDone needed after that can be inlined because it
+ // is already running on an executor thread.
meta_tag_.Set(call_.call(),
[this](bool ok) {
- ServerUnaryReactor* reactor =
- reactor_.load(std::memory_order_relaxed);
- reactor->OnSendInitialMetadataDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ ServerUnaryReactor* reactor =
+ reactor_.load(std::memory_order_relaxed);
+ reactor->OnSendInitialMetadataDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &meta_ops_, /*can_inline=*/false);
+ &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@@ -179,7 +179,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
friend class CallbackUnaryHandler<RequestType, ResponseType>;
ServerCallbackUnaryImpl(
- ::grpc::CallbackServerContext* ctx, ::grpc::internal::Call* call,
+ ::grpc::CallbackServerContext* ctx, ::grpc::internal::Call* call,
::grpc::experimental::MessageHolder<RequestType, ResponseType>*
allocator_state,
std::function<void()> call_requester)
@@ -198,20 +198,20 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
reactor_.store(reactor, std::memory_order_relaxed);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
- this->MaybeDone(reactor->InternalInlineable());
+ this->MaybeDone(reactor->InternalInlineable());
}
const RequestType* request() { return allocator_state_->request(); }
ResponseType* response() { return allocator_state_->response(); }
- void CallOnDone() override {
- reactor_.load(std::memory_order_relaxed)->OnDone();
- grpc_call* call = call_.call();
- auto call_requester = std::move(call_requester_);
- allocator_state_->Release();
- this->~ServerCallbackUnaryImpl(); // explicitly call destructor
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- call_requester();
+ void CallOnDone() override {
+ reactor_.load(std::memory_order_relaxed)->OnDone();
+ grpc_call* call = call_.call();
+ auto call_requester = std::move(call_requester_);
+ allocator_state_->Release();
+ this->~ServerCallbackUnaryImpl(); // explicitly call destructor
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ call_requester();
}
ServerReactor* reactor() override {
@@ -227,7 +227,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
finish_ops_;
::grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::CallbackServerContext* const ctx_;
+ ::grpc::CallbackServerContext* const ctx_;
::grpc::internal::Call call_;
::grpc::experimental::MessageHolder<RequestType, ResponseType>* const
allocator_state_;
@@ -254,7 +254,7 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
public:
explicit CallbackClientStreamingHandler(
std::function<ServerReadReactor<RequestType>*(
- ::grpc::CallbackServerContext*, ResponseType*)>
+ ::grpc::CallbackServerContext*, ResponseType*)>
get_reactor)
: get_reactor_(std::move(get_reactor)) {}
void RunHandler(const HandlerParameter& param) final {
@@ -264,22 +264,22 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
auto* reader = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
param.call->call(), sizeof(ServerCallbackReaderImpl)))
ServerCallbackReaderImpl(
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
param.call, std::move(param.call_requester));
- // Inlineable OnDone can be false in the CompletionOp callback because there
- // is no read reactor that has an inlineable OnDone; this only applies to
- // the DefaultReactor (which is unary).
+ // Inlineable OnDone can be false in the CompletionOp callback because there
+ // is no read reactor that has an inlineable OnDone; this only applies to
+ // the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp(
- param.call,
- [reader](bool) { reader->MaybeDone(/*inlineable_ondone=*/false); },
- reader);
+ param.call,
+ [reader](bool) { reader->MaybeDone(/*inlineable_ondone=*/false); },
+ reader);
ServerReadReactor<RequestType>* reactor = nullptr;
if (param.status.ok()) {
reactor = ::grpc::internal::CatchingReactorGetter<
ServerReadReactor<RequestType>>(
get_reactor_,
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
reader->response());
}
@@ -295,24 +295,24 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
}
private:
- std::function<ServerReadReactor<RequestType>*(::grpc::CallbackServerContext*,
- ResponseType*)>
+ std::function<ServerReadReactor<RequestType>*(::grpc::CallbackServerContext*,
+ ResponseType*)>
get_reactor_;
class ServerCallbackReaderImpl : public ServerCallbackReader<RequestType> {
public:
void Finish(::grpc::Status s) override {
- // A finish tag with only MaybeDone can have its callback inlined
- // regardless even if OnDone is not inlineable because this callback just
- // checks a ref and then decides whether or not to dispatch OnDone.
- finish_tag_.Set(call_.call(),
- [this](bool) {
- // Inlineable OnDone can be false here because there is
- // no read reactor that has an inlineable OnDone; this
- // only applies to the DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
- },
- &finish_ops_, /*can_inline=*/true);
+ // A finish tag with only MaybeDone can have its callback inlined
+ // regardless even if OnDone is not inlineable because this callback just
+ // checks a ref and then decides whether or not to dispatch OnDone.
+ finish_tag_.Set(call_.call(),
+ [this](bool) {
+ // Inlineable OnDone can be false here because there is
+ // no read reactor that has an inlineable OnDone; this
+ // only applies to the DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
+ },
+ &finish_ops_, /*can_inline=*/true);
if (!ctx_->sent_initial_metadata_) {
finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
@@ -335,17 +335,17 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref();
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
- ServerReadReactor<RequestType>* reactor =
- reactor_.load(std::memory_order_relaxed);
- reactor->OnSendInitialMetadataDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ ServerReadReactor<RequestType>* reactor =
+ reactor_.load(std::memory_order_relaxed);
+ reactor->OnSendInitialMetadataDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &meta_ops_, /*can_inline=*/false);
+ &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@@ -365,42 +365,42 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
private:
friend class CallbackClientStreamingHandler<RequestType, ResponseType>;
- ServerCallbackReaderImpl(::grpc::CallbackServerContext* ctx,
+ ServerCallbackReaderImpl(::grpc::CallbackServerContext* ctx,
::grpc::internal::Call* call,
std::function<void()> call_requester)
: ctx_(ctx), call_(*call), call_requester_(std::move(call_requester)) {}
void SetupReactor(ServerReadReactor<RequestType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed);
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
read_tag_.Set(call_.call(),
- [this, reactor](bool ok) {
- reactor->OnReadDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ [this, reactor](bool ok) {
+ reactor->OnReadDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &read_ops_, /*can_inline=*/false);
+ &read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
- // Inlineable OnDone can be false here because there is no read
- // reactor that has an inlineable OnDone; this only applies to the
- // DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
+ // Inlineable OnDone can be false here because there is no read
+ // reactor that has an inlineable OnDone; this only applies to the
+ // DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
}
~ServerCallbackReaderImpl() {}
ResponseType* response() { return &resp_; }
- void CallOnDone() override {
- reactor_.load(std::memory_order_relaxed)->OnDone();
- grpc_call* call = call_.call();
- auto call_requester = std::move(call_requester_);
- this->~ServerCallbackReaderImpl(); // explicitly call destructor
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- call_requester();
+ void CallOnDone() override {
+ reactor_.load(std::memory_order_relaxed)->OnDone();
+ grpc_call* call = call_.call();
+ auto call_requester = std::move(call_requester_);
+ this->~ServerCallbackReaderImpl(); // explicitly call destructor
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ call_requester();
}
ServerReactor* reactor() override {
@@ -420,7 +420,7 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
read_ops_;
::grpc::internal::CallbackWithSuccessTag read_tag_;
- ::grpc::CallbackServerContext* const ctx_;
+ ::grpc::CallbackServerContext* const ctx_;
::grpc::internal::Call call_;
ResponseType resp_;
std::function<void()> call_requester_;
@@ -437,7 +437,7 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
public:
explicit CallbackServerStreamingHandler(
std::function<ServerWriteReactor<ResponseType>*(
- ::grpc::CallbackServerContext*, const RequestType*)>
+ ::grpc::CallbackServerContext*, const RequestType*)>
get_reactor)
: get_reactor_(std::move(get_reactor)) {}
void RunHandler(const HandlerParameter& param) final {
@@ -447,23 +447,23 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
auto* writer = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
param.call->call(), sizeof(ServerCallbackWriterImpl)))
ServerCallbackWriterImpl(
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
param.call, static_cast<RequestType*>(param.request),
std::move(param.call_requester));
- // Inlineable OnDone can be false in the CompletionOp callback because there
- // is no write reactor that has an inlineable OnDone; this only applies to
- // the DefaultReactor (which is unary).
+ // Inlineable OnDone can be false in the CompletionOp callback because there
+ // is no write reactor that has an inlineable OnDone; this only applies to
+ // the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp(
- param.call,
- [writer](bool) { writer->MaybeDone(/*inlineable_ondone=*/false); },
- writer);
+ param.call,
+ [writer](bool) { writer->MaybeDone(/*inlineable_ondone=*/false); },
+ writer);
ServerWriteReactor<ResponseType>* reactor = nullptr;
if (param.status.ok()) {
reactor = ::grpc::internal::CatchingReactorGetter<
ServerWriteReactor<ResponseType>>(
get_reactor_,
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
writer->request());
}
if (reactor == nullptr) {
@@ -496,23 +496,23 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
private:
std::function<ServerWriteReactor<ResponseType>*(
- ::grpc::CallbackServerContext*, const RequestType*)>
+ ::grpc::CallbackServerContext*, const RequestType*)>
get_reactor_;
class ServerCallbackWriterImpl : public ServerCallbackWriter<ResponseType> {
public:
void Finish(::grpc::Status s) override {
- // A finish tag with only MaybeDone can have its callback inlined
- // regardless even if OnDone is not inlineable because this callback just
- // checks a ref and then decides whether or not to dispatch OnDone.
- finish_tag_.Set(call_.call(),
- [this](bool) {
- // Inlineable OnDone can be false here because there is
- // no write reactor that has an inlineable OnDone; this
- // only applies to the DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
- },
- &finish_ops_, /*can_inline=*/true);
+ // A finish tag with only MaybeDone can have its callback inlined
+ // regardless even if OnDone is not inlineable because this callback just
+ // checks a ref and then decides whether or not to dispatch OnDone.
+ finish_tag_.Set(call_.call(),
+ [this](bool) {
+ // Inlineable OnDone can be false here because there is
+ // no write reactor that has an inlineable OnDone; this
+ // only applies to the DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
+ },
+ &finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) {
@@ -530,17 +530,17 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref();
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
- ServerWriteReactor<ResponseType>* reactor =
- reactor_.load(std::memory_order_relaxed);
- reactor->OnSendInitialMetadataDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ ServerWriteReactor<ResponseType>* reactor =
+ reactor_.load(std::memory_order_relaxed);
+ reactor->OnSendInitialMetadataDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &meta_ops_, /*can_inline=*/false);
+ &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@@ -573,15 +573,15 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options,
::grpc::Status s) override {
// This combines the write into the finish callback
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
Finish(std::move(s));
}
private:
friend class CallbackServerStreamingHandler<RequestType, ResponseType>;
- ServerCallbackWriterImpl(::grpc::CallbackServerContext* ctx,
+ ServerCallbackWriterImpl(::grpc::CallbackServerContext* ctx,
::grpc::internal::Call* call,
const RequestType* req,
std::function<void()> call_requester)
@@ -592,34 +592,34 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
void SetupReactor(ServerWriteReactor<ResponseType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed);
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
- write_tag_.Set(call_.call(),
- [this, reactor](bool ok) {
- reactor->OnWriteDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
- },
- &write_ops_, /*can_inline=*/false);
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
+ write_tag_.Set(call_.call(),
+ [this, reactor](bool ok) {
+ reactor->OnWriteDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
+ },
+ &write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
- // Inlineable OnDone can be false here because there is no write
- // reactor that has an inlineable OnDone; this only applies to the
- // DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
+ // Inlineable OnDone can be false here because there is no write
+ // reactor that has an inlineable OnDone; this only applies to the
+ // DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
}
~ServerCallbackWriterImpl() { req_->~RequestType(); }
const RequestType* request() { return req_; }
- void CallOnDone() override {
- reactor_.load(std::memory_order_relaxed)->OnDone();
- grpc_call* call = call_.call();
- auto call_requester = std::move(call_requester_);
- this->~ServerCallbackWriterImpl(); // explicitly call destructor
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- call_requester();
+ void CallOnDone() override {
+ reactor_.load(std::memory_order_relaxed)->OnDone();
+ grpc_call* call = call_.call();
+ auto call_requester = std::move(call_requester_);
+ this->~ServerCallbackWriterImpl(); // explicitly call destructor
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ call_requester();
}
ServerReactor* reactor() override {
@@ -639,7 +639,7 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
write_ops_;
::grpc::internal::CallbackWithSuccessTag write_tag_;
- ::grpc::CallbackServerContext* const ctx_;
+ ::grpc::CallbackServerContext* const ctx_;
::grpc::internal::Call call_;
const RequestType* req_;
std::function<void()> call_requester_;
@@ -656,7 +656,7 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
public:
explicit CallbackBidiHandler(
std::function<ServerBidiReactor<RequestType, ResponseType>*(
- ::grpc::CallbackServerContext*)>
+ ::grpc::CallbackServerContext*)>
get_reactor)
: get_reactor_(std::move(get_reactor)) {}
void RunHandler(const HandlerParameter& param) final {
@@ -665,22 +665,22 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
auto* stream = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
param.call->call(), sizeof(ServerCallbackReaderWriterImpl)))
ServerCallbackReaderWriterImpl(
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
param.call, std::move(param.call_requester));
- // Inlineable OnDone can be false in the CompletionOp callback because there
- // is no bidi reactor that has an inlineable OnDone; this only applies to
- // the DefaultReactor (which is unary).
+ // Inlineable OnDone can be false in the CompletionOp callback because there
+ // is no bidi reactor that has an inlineable OnDone; this only applies to
+ // the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp(
- param.call,
- [stream](bool) { stream->MaybeDone(/*inlineable_ondone=*/false); },
- stream);
+ param.call,
+ [stream](bool) { stream->MaybeDone(/*inlineable_ondone=*/false); },
+ stream);
ServerBidiReactor<RequestType, ResponseType>* reactor = nullptr;
if (param.status.ok()) {
reactor = ::grpc::internal::CatchingReactorGetter<
ServerBidiReactor<RequestType, ResponseType>>(
- get_reactor_,
- static_cast<::grpc::CallbackServerContext*>(param.server_context));
+ get_reactor_,
+ static_cast<::grpc::CallbackServerContext*>(param.server_context));
}
if (reactor == nullptr) {
@@ -697,24 +697,24 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
private:
std::function<ServerBidiReactor<RequestType, ResponseType>*(
- ::grpc::CallbackServerContext*)>
+ ::grpc::CallbackServerContext*)>
get_reactor_;
class ServerCallbackReaderWriterImpl
: public ServerCallbackReaderWriter<RequestType, ResponseType> {
public:
void Finish(::grpc::Status s) override {
- // A finish tag with only MaybeDone can have its callback inlined
- // regardless even if OnDone is not inlineable because this callback just
- // checks a ref and then decides whether or not to dispatch OnDone.
- finish_tag_.Set(call_.call(),
- [this](bool) {
- // Inlineable OnDone can be false here because there is
- // no bidi reactor that has an inlineable OnDone; this
- // only applies to the DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
- },
- &finish_ops_, /*can_inline=*/true);
+ // A finish tag with only MaybeDone can have its callback inlined
+ // regardless even if OnDone is not inlineable because this callback just
+ // checks a ref and then decides whether or not to dispatch OnDone.
+ finish_tag_.Set(call_.call(),
+ [this](bool) {
+ // Inlineable OnDone can be false here because there is
+ // no bidi reactor that has an inlineable OnDone; this
+ // only applies to the DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
+ },
+ &finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) {
@@ -732,17 +732,17 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref();
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
- ServerBidiReactor<RequestType, ResponseType>* reactor =
- reactor_.load(std::memory_order_relaxed);
- reactor->OnSendInitialMetadataDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ ServerBidiReactor<RequestType, ResponseType>* reactor =
+ reactor_.load(std::memory_order_relaxed);
+ reactor->OnSendInitialMetadataDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &meta_ops_, /*can_inline=*/false);
+ &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@@ -774,8 +774,8 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options,
::grpc::Status s) override {
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
Finish(std::move(s));
}
@@ -788,45 +788,45 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
private:
friend class CallbackBidiHandler<RequestType, ResponseType>;
- ServerCallbackReaderWriterImpl(::grpc::CallbackServerContext* ctx,
+ ServerCallbackReaderWriterImpl(::grpc::CallbackServerContext* ctx,
::grpc::internal::Call* call,
std::function<void()> call_requester)
: ctx_(ctx), call_(*call), call_requester_(std::move(call_requester)) {}
void SetupReactor(ServerBidiReactor<RequestType, ResponseType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed);
- // The callbacks for these functions should not be inlined because they
- // invoke user-controlled reactions, but any resulting OnDones can be
- // inlined in the executor to which a callback is dispatched.
- write_tag_.Set(call_.call(),
- [this, reactor](bool ok) {
- reactor->OnWriteDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
- },
- &write_ops_, /*can_inline=*/false);
+ // The callbacks for these functions should not be inlined because they
+ // invoke user-controlled reactions, but any resulting OnDones can be
+ // inlined in the executor to which a callback is dispatched.
+ write_tag_.Set(call_.call(),
+ [this, reactor](bool ok) {
+ reactor->OnWriteDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
+ },
+ &write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
read_tag_.Set(call_.call(),
- [this, reactor](bool ok) {
- reactor->OnReadDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ [this, reactor](bool ok) {
+ reactor->OnReadDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &read_ops_, /*can_inline=*/false);
+ &read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
- // Inlineable OnDone can be false here because there is no bidi
- // reactor that has an inlineable OnDone; this only applies to the
- // DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
+ // Inlineable OnDone can be false here because there is no bidi
+ // reactor that has an inlineable OnDone; this only applies to the
+ // DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
}
- void CallOnDone() override {
- reactor_.load(std::memory_order_relaxed)->OnDone();
- grpc_call* call = call_.call();
- auto call_requester = std::move(call_requester_);
- this->~ServerCallbackReaderWriterImpl(); // explicitly call destructor
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- call_requester();
+ void CallOnDone() override {
+ reactor_.load(std::memory_order_relaxed)->OnDone();
+ grpc_call* call = call_.call();
+ auto call_requester = std::move(call_requester_);
+ this->~ServerCallbackReaderWriterImpl(); // explicitly call destructor
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ call_requester();
}
ServerReactor* reactor() override {
@@ -850,7 +850,7 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
read_ops_;
::grpc::internal::CallbackWithSuccessTag read_tag_;
- ::grpc::CallbackServerContext* const ctx_;
+ ::grpc::CallbackServerContext* const ctx_;
::grpc::internal::Call call_;
std::function<void()> call_requester_;
// The memory ordering of reactor_ follows ServerCallbackUnaryImpl.
@@ -862,6 +862,6 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
};
} // namespace internal
-} // namespace grpc
+} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_HANDLERS_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h
index 769b1b5b5d..685f006cda 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,601 +19,601 @@
#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H
#define GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H
-#include <atomic>
-#include <cassert>
-#include <map>
-#include <memory>
-#include <type_traits>
-#include <vector>
-
-#include <grpc/impl/codegen/port_platform.h>
-
-#include <grpc/impl/codegen/compression_types.h>
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/call_op_set.h>
-#include <grpcpp/impl/codegen/callback_common.h>
-#include <grpcpp/impl/codegen/completion_queue_tag.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/create_auth_context.h>
-#include <grpcpp/impl/codegen/message_allocator.h>
-#include <grpcpp/impl/codegen/metadata_map.h>
-#include <grpcpp/impl/codegen/security/auth_context.h>
-#include <grpcpp/impl/codegen/server_callback.h>
-#include <grpcpp/impl/codegen/server_interceptor.h>
-#include <grpcpp/impl/codegen/status.h>
-#include <grpcpp/impl/codegen/string_ref.h>
-#include <grpcpp/impl/codegen/time.h>
-
-struct grpc_metadata;
-struct grpc_call;
-struct census_context;
-
+#include <atomic>
+#include <cassert>
+#include <map>
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <grpc/impl/codegen/compression_types.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/call_op_set.h>
+#include <grpcpp/impl/codegen/callback_common.h>
+#include <grpcpp/impl/codegen/completion_queue_tag.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/create_auth_context.h>
+#include <grpcpp/impl/codegen/message_allocator.h>
+#include <grpcpp/impl/codegen/metadata_map.h>
+#include <grpcpp/impl/codegen/security/auth_context.h>
+#include <grpcpp/impl/codegen/server_callback.h>
+#include <grpcpp/impl/codegen/server_interceptor.h>
+#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/string_ref.h>
+#include <grpcpp/impl/codegen/time.h>
+
+struct grpc_metadata;
+struct grpc_call;
+struct census_context;
+
namespace grpc {
-template <class W, class R>
-class ServerAsyncReader;
-template <class W>
-class ServerAsyncWriter;
-template <class W>
-class ServerAsyncResponseWriter;
-template <class W, class R>
-class ServerAsyncReaderWriter;
-template <class R>
-class ServerReader;
-template <class W>
-class ServerWriter;
-
-namespace internal {
-template <class ServiceType, class RequestType, class ResponseType>
-class BidiStreamingHandler;
-template <class RequestType, class ResponseType>
-class CallbackUnaryHandler;
-template <class RequestType, class ResponseType>
-class CallbackClientStreamingHandler;
-template <class RequestType, class ResponseType>
-class CallbackServerStreamingHandler;
-template <class RequestType, class ResponseType>
-class CallbackBidiHandler;
-template <class ServiceType, class RequestType, class ResponseType>
-class ClientStreamingHandler;
-template <class ServiceType, class RequestType, class ResponseType>
-class RpcMethodHandler;
-template <class Base>
-class FinishOnlyReactor;
-template <class W, class R>
-class ServerReaderWriterBody;
-template <class ServiceType, class RequestType, class ResponseType>
-class ServerStreamingHandler;
-class ServerReactor;
-template <class Streamer, bool WriteNeeded>
-class TemplatedBidiStreamingHandler;
-template <::grpc::StatusCode code>
-class ErrorMethodHandler;
-} // namespace internal
-
-class ClientContext;
-class CompletionQueue;
-class GenericServerContext;
-class Server;
-class ServerInterface;
-
-// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
+template <class W, class R>
+class ServerAsyncReader;
+template <class W>
+class ServerAsyncWriter;
+template <class W>
+class ServerAsyncResponseWriter;
+template <class W, class R>
+class ServerAsyncReaderWriter;
+template <class R>
+class ServerReader;
+template <class W>
+class ServerWriter;
+
+namespace internal {
+template <class ServiceType, class RequestType, class ResponseType>
+class BidiStreamingHandler;
+template <class RequestType, class ResponseType>
+class CallbackUnaryHandler;
+template <class RequestType, class ResponseType>
+class CallbackClientStreamingHandler;
+template <class RequestType, class ResponseType>
+class CallbackServerStreamingHandler;
+template <class RequestType, class ResponseType>
+class CallbackBidiHandler;
+template <class ServiceType, class RequestType, class ResponseType>
+class ClientStreamingHandler;
+template <class ServiceType, class RequestType, class ResponseType>
+class RpcMethodHandler;
+template <class Base>
+class FinishOnlyReactor;
+template <class W, class R>
+class ServerReaderWriterBody;
+template <class ServiceType, class RequestType, class ResponseType>
+class ServerStreamingHandler;
+class ServerReactor;
+template <class Streamer, bool WriteNeeded>
+class TemplatedBidiStreamingHandler;
+template <::grpc::StatusCode code>
+class ErrorMethodHandler;
+} // namespace internal
+
+class ClientContext;
+class CompletionQueue;
+class GenericServerContext;
+class Server;
+class ServerInterface;
+
+// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
namespace experimental {
-typedef ::grpc::ServerContextBase ServerContextBase;
-typedef ::grpc::CallbackServerContext CallbackServerContext;
+typedef ::grpc::ServerContextBase ServerContextBase;
+typedef ::grpc::CallbackServerContext CallbackServerContext;
+
+} // namespace experimental
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+namespace experimental {
+#endif
+class GenericCallbackServerContext;
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
} // namespace experimental
-
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
-namespace experimental {
-#endif
-class GenericCallbackServerContext;
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
-} // namespace experimental
-#endif
-namespace internal {
-class Call;
-} // namespace internal
-
-namespace testing {
-class InteropServerContextInspector;
-class ServerContextTestSpouse;
-class DefaultReactorTestPeer;
-} // namespace testing
-
-/// Base class of ServerContext. Experimental until callback API is final.
-class ServerContextBase {
- public:
- virtual ~ServerContextBase();
-
- /// Return the deadline for the server call.
- std::chrono::system_clock::time_point deadline() const {
- return ::grpc::Timespec2Timepoint(deadline_);
- }
-
- /// Return a \a gpr_timespec representation of the server call's deadline.
- gpr_timespec raw_deadline() const { return deadline_; }
-
- /// Add the (\a key, \a value) pair to the initial metadata
- /// associated with a server call. These are made available at the client side
- /// by the \a grpc::ClientContext::GetServerInitialMetadata() method.
- ///
- /// \warning This method should only be called before sending initial metadata
- /// to the client (which can happen explicitly, or implicitly when sending a
- /// a response message or status to the client).
- ///
- /// \param key The metadata key. If \a value is binary data, it must
- /// end in "-bin".
- /// \param value The metadata value. If its value is binary, the key name
- /// must end in "-bin".
- ///
- /// Metadata must conform to the following format:
- /// Custom-Metadata -> Binary-Header / ASCII-Header
- /// Binary-Header -> {Header-Name "-bin" } {binary value}
- /// ASCII-Header -> Header-Name ASCII-Value
- /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
- /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
- void AddInitialMetadata(const TString& key, const TString& value);
-
- /// Add the (\a key, \a value) pair to the initial metadata
- /// associated with a server call. These are made available at the client
- /// side by the \a grpc::ClientContext::GetServerTrailingMetadata() method.
- ///
- /// \warning This method should only be called before sending trailing
- /// metadata to the client (which happens when the call is finished and a
- /// status is sent to the client).
- ///
- /// \param key The metadata key. If \a value is binary data,
- /// it must end in "-bin".
- /// \param value The metadata value. If its value is binary, the key name
- /// must end in "-bin".
- ///
- /// Metadata must conform to the following format:
- /// Custom-Metadata -> Binary-Header / ASCII-Header
- /// Binary-Header -> {Header-Name "-bin" } {binary value}
- /// ASCII-Header -> Header-Name ASCII-Value
- /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
- /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
- void AddTrailingMetadata(const TString& key, const TString& value);
-
- /// Return whether this RPC failed before the server could provide its status
- /// back to the client. This could be because of explicit API cancellation
- /// from the client-side or server-side, because of deadline exceeded, network
- /// connection reset, HTTP/2 parameter configuration (e.g., max message size,
- /// max connection age), etc. It does NOT include failure due to a non-OK
- /// status return from the server application's request handler, including
- /// Status::CANCELLED.
- ///
- /// IsCancelled is always safe to call when using sync or callback API.
- /// When using async API, it is only safe to call IsCancelled after
- /// the AsyncNotifyWhenDone tag has been delivered. Thread-safe.
- bool IsCancelled() const;
-
- /// Cancel the Call from the server. This is a best-effort API and
- /// depending on when it is called, the RPC may still appear successful to
- /// the client. For example, if TryCancel() is called on a separate thread, it
- /// might race with the server handler which might return success to the
- /// client before TryCancel() was even started by the thread.
- ///
- /// It is the caller's responsibility to prevent such races and ensure that if
- /// TryCancel() is called, the serverhandler must return Status::CANCELLED.
- /// The only exception is that if the serverhandler is already returning an
- /// error status code, it is ok to not return Status::CANCELLED even if
- /// TryCancel() was called.
- ///
- /// For reasons such as the above, it is generally preferred to explicitly
- /// finish an RPC by returning Status::CANCELLED rather than using TryCancel.
- ///
- /// Note that TryCancel() does not change any of the tags that are pending
- /// on the completion queue. All pending tags will still be delivered
- /// (though their ok result may reflect the effect of cancellation).
- void TryCancel() const;
-
- /// Return a collection of initial metadata key-value pairs sent from the
- /// client. Note that keys may happen more than
- /// once (ie, a \a std::multimap is returned).
- ///
- /// It is safe to use this method after initial metadata has been received,
- /// Calls always begin with the client sending initial metadata, so this is
- /// safe to access as soon as the call has begun on the server side.
- ///
- /// \return A multimap of initial metadata key-value pairs from the server.
- const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata()
- const {
- return *client_metadata_.map();
- }
-
- /// Return the compression algorithm to be used by the server call.
- grpc_compression_level compression_level() const {
- return compression_level_;
- }
-
- /// Set \a level to be the compression level used for the server call.
- ///
- /// \param level The compression level used for the server call.
- void set_compression_level(grpc_compression_level level) {
- compression_level_set_ = true;
- compression_level_ = level;
- }
-
- /// Return a bool indicating whether the compression level for this call
- /// has been set (either implicitly or through a previous call to
- /// \a set_compression_level.
- bool compression_level_set() const { return compression_level_set_; }
-
- /// Return the compression algorithm the server call will request be used.
- /// Note that the gRPC runtime may decide to ignore this request, for example,
- /// due to resource constraints, or if the server is aware the client doesn't
- /// support the requested algorithm.
- grpc_compression_algorithm compression_algorithm() const {
- return compression_algorithm_;
- }
- /// Set \a algorithm to be the compression algorithm used for the server call.
- ///
- /// \param algorithm The compression algorithm used for the server call.
- void set_compression_algorithm(grpc_compression_algorithm algorithm);
-
- /// Set the serialized load reporting costs in \a cost_data for the call.
- void SetLoadReportingCosts(const std::vector<TString>& cost_data);
-
- /// Return the authentication context for this server call.
- ///
- /// \see grpc::AuthContext.
- std::shared_ptr<const ::grpc::AuthContext> auth_context() const {
- if (auth_context_.get() == nullptr) {
- auth_context_ = ::grpc::CreateAuthContext(call_.call);
- }
- return auth_context_;
- }
-
- /// Return the peer uri in a string.
- /// WARNING: this value is never authenticated or subject to any security
- /// related code. It must not be used for any authentication related
- /// functionality. Instead, use auth_context.
- TString peer() const;
-
- /// Get the census context associated with this server call.
- const struct census_context* census_context() const;
-
- /// Should be used for framework-level extensions only.
- /// Applications never need to call this method.
- grpc_call* c_call() { return call_.call; }
-
- protected:
- /// Async only. Has to be called before the rpc starts.
- /// Returns the tag in completion queue when the rpc finishes.
- /// IsCancelled() can then be called to check whether the rpc was cancelled.
- /// TODO(vjpai): Fix this so that the tag is returned even if the call never
- /// starts (https://github.com/grpc/grpc/issues/10136).
- void AsyncNotifyWhenDone(void* tag) {
- has_notify_when_done_tag_ = true;
- async_notify_when_done_tag_ = tag;
- }
-
- /// NOTE: This is an API for advanced users who need custom allocators.
- /// Get and maybe mutate the allocator state associated with the current RPC.
- /// Currently only applicable for callback unary RPC methods.
- /// WARNING: This is experimental API and could be changed or removed.
- ::grpc::experimental::RpcAllocatorState* GetRpcAllocatorState() {
- return message_allocator_state_;
- }
-
- /// Get a library-owned default unary reactor for use in minimal reaction
- /// cases. This supports typical unary RPC usage of providing a response and
- /// status. It supports immediate Finish (finish from within the method
- /// handler) or delayed Finish (finish called after the method handler
- /// invocation). It does not support reacting to cancellation or completion,
- /// or early sending of initial metadata. Since this is a library-owned
- /// reactor, it should not be delete'd or freed in any way. This is more
- /// efficient than creating a user-owned reactor both because of avoiding an
- /// allocation and because its minimal reactions are optimized using a core
- /// surface flag that allows their reactions to run inline without any
- /// thread-hop.
- ///
- /// This method should not be called more than once or called after return
- /// from the method handler.
- ///
- /// WARNING: This is experimental API and could be changed or removed.
- ::grpc::ServerUnaryReactor* DefaultReactor() {
- // Short-circuit the case where a default reactor was already set up by
- // the TestPeer.
- if (test_unary_ != nullptr) {
- return reinterpret_cast<Reactor*>(&default_reactor_);
- }
- new (&default_reactor_) Reactor;
-#ifndef NDEBUG
- bool old = false;
- assert(default_reactor_used_.compare_exchange_strong(
- old, true, std::memory_order_relaxed));
-#else
- default_reactor_used_.store(true, std::memory_order_relaxed);
-#endif
- return reinterpret_cast<Reactor*>(&default_reactor_);
- }
-
- /// Constructors for use by derived classes
- ServerContextBase();
- ServerContextBase(gpr_timespec deadline, grpc_metadata_array* arr);
-
- private:
- friend class ::grpc::testing::InteropServerContextInspector;
- friend class ::grpc::testing::ServerContextTestSpouse;
- friend class ::grpc::testing::DefaultReactorTestPeer;
- friend class ::grpc::ServerInterface;
- friend class ::grpc::Server;
- template <class W, class R>
- friend class ::grpc::ServerAsyncReader;
- template <class W>
- friend class ::grpc::ServerAsyncWriter;
- template <class W>
- friend class ::grpc::ServerAsyncResponseWriter;
- template <class W, class R>
- friend class ::grpc::ServerAsyncReaderWriter;
- template <class R>
- friend class ::grpc::ServerReader;
- template <class W>
- friend class ::grpc::ServerWriter;
- template <class W, class R>
- friend class ::grpc::internal::ServerReaderWriterBody;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::RpcMethodHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::ClientStreamingHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::ServerStreamingHandler;
- template <class Streamer, bool WriteNeeded>
- friend class ::grpc::internal::TemplatedBidiStreamingHandler;
- template <class RequestType, class ResponseType>
- friend class ::grpc::internal::CallbackUnaryHandler;
- template <class RequestType, class ResponseType>
- friend class ::grpc::internal::CallbackClientStreamingHandler;
- template <class RequestType, class ResponseType>
- friend class ::grpc::internal::CallbackServerStreamingHandler;
- template <class RequestType, class ResponseType>
- friend class ::grpc::internal::CallbackBidiHandler;
- template <::grpc::StatusCode code>
- friend class ::grpc::internal::ErrorMethodHandler;
- template <class Base>
- friend class ::grpc::internal::FinishOnlyReactor;
- friend class ::grpc::ClientContext;
- friend class ::grpc::GenericServerContext;
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- friend class ::grpc::GenericCallbackServerContext;
-#else
- friend class ::grpc::experimental::GenericCallbackServerContext;
-#endif
-
- /// Prevent copying.
- ServerContextBase(const ServerContextBase&);
- ServerContextBase& operator=(const ServerContextBase&);
-
- class CompletionOp;
-
- void BeginCompletionOp(
- ::grpc::internal::Call* call, std::function<void(bool)> callback,
- ::grpc::internal::ServerCallbackCall* callback_controller);
- /// Return the tag queued by BeginCompletionOp()
- ::grpc::internal::CompletionQueueTag* GetCompletionOpTag();
-
- void set_call(grpc_call* call) { call_.call = call; }
-
- void BindDeadlineAndMetadata(gpr_timespec deadline, grpc_metadata_array* arr);
-
- uint32_t initial_metadata_flags() const { return 0; }
-
- ::grpc::experimental::ServerRpcInfo* set_server_rpc_info(
- const char* method, ::grpc::internal::RpcMethod::RpcType type,
- const std::vector<std::unique_ptr<
- ::grpc::experimental::ServerInterceptorFactoryInterface>>& creators) {
- if (creators.size() != 0) {
- rpc_info_ = new ::grpc::experimental::ServerRpcInfo(this, method, type);
- rpc_info_->RegisterInterceptors(creators);
- }
- return rpc_info_;
- }
-
- void set_message_allocator_state(
- ::grpc::experimental::RpcAllocatorState* allocator_state) {
- message_allocator_state_ = allocator_state;
- }
-
- struct CallWrapper {
- ~CallWrapper();
-
- grpc_call* call = nullptr;
- };
-
- // NOTE: call_ must be the first data member of this object so that its
- // destructor is the last to be called, since its destructor may unref
- // the underlying core call which holds the arena that may be used to
- // hold this object.
- CallWrapper call_;
-
- CompletionOp* completion_op_ = nullptr;
- bool has_notify_when_done_tag_ = false;
- void* async_notify_when_done_tag_ = nullptr;
- ::grpc::internal::CallbackWithSuccessTag completion_tag_;
-
- gpr_timespec deadline_;
- ::grpc::CompletionQueue* cq_ = nullptr;
- bool sent_initial_metadata_ = false;
- mutable std::shared_ptr<const ::grpc::AuthContext> auth_context_;
- mutable ::grpc::internal::MetadataMap client_metadata_;
- std::multimap<TString, TString> initial_metadata_;
- std::multimap<TString, TString> trailing_metadata_;
-
- bool compression_level_set_ = false;
- grpc_compression_level compression_level_;
- grpc_compression_algorithm compression_algorithm_;
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage>
- pending_ops_;
- bool has_pending_ops_ = false;
-
- ::grpc::experimental::ServerRpcInfo* rpc_info_ = nullptr;
- ::grpc::experimental::RpcAllocatorState* message_allocator_state_ = nullptr;
-
- class Reactor : public ::grpc::ServerUnaryReactor {
- public:
- void OnCancel() override {}
- void OnDone() override {}
- // Override InternalInlineable for this class since its reactions are
- // trivial and thus do not need to be run from the executor (triggering a
- // thread hop). This should only be used by internal reactors (thus the
- // name) and not by user application code.
- bool InternalInlineable() override { return true; }
- };
-
- void SetupTestDefaultReactor(std::function<void(::grpc::Status)> func) {
- test_unary_.reset(new TestServerCallbackUnary(this, std::move(func)));
- }
- bool test_status_set() const {
- return (test_unary_ != nullptr) && test_unary_->status_set();
- }
- ::grpc::Status test_status() const { return test_unary_->status(); }
-
- class TestServerCallbackUnary : public ::grpc::ServerCallbackUnary {
- public:
- TestServerCallbackUnary(ServerContextBase* ctx,
- std::function<void(::grpc::Status)> func)
- : reactor_(ctx->DefaultReactor()), func_(std::move(func)) {
- this->BindReactor(reactor_);
- }
- void Finish(::grpc::Status s) override {
- status_ = s;
- func_(std::move(s));
- status_set_.store(true, std::memory_order_release);
- }
- void SendInitialMetadata() override {}
-
- bool status_set() const {
- return status_set_.load(std::memory_order_acquire);
- }
- ::grpc::Status status() const { return status_; }
-
- private:
- void CallOnDone() override {}
- ::grpc::internal::ServerReactor* reactor() override { return reactor_; }
-
- ::grpc::ServerUnaryReactor* const reactor_;
- std::atomic_bool status_set_{false};
- ::grpc::Status status_;
- const std::function<void(::grpc::Status s)> func_;
- };
-
- typename std::aligned_storage<sizeof(Reactor), alignof(Reactor)>::type
- default_reactor_;
- std::atomic_bool default_reactor_used_{false};
- std::unique_ptr<TestServerCallbackUnary> test_unary_;
-};
-
-/// A ServerContext or CallbackServerContext allows the code implementing a
-/// service handler to:
-///
-/// - Add custom initial and trailing metadata key-value pairs that will
-/// propagated to the client side.
-/// - Control call settings such as compression and authentication.
-/// - Access metadata coming from the client.
-/// - Get performance metrics (ie, census).
-///
-/// Context settings are only relevant to the call handler they are supplied to,
-/// that is to say, they aren't sticky across multiple calls. Some of these
-/// settings, such as the compression options, can be made persistent at server
-/// construction time by specifying the appropriate \a ChannelArguments
-/// to a \a grpc::ServerBuilder, via \a ServerBuilder::AddChannelArgument.
-///
-/// \warning ServerContext instances should \em not be reused across rpcs.
-class ServerContext : public ServerContextBase {
- public:
- ServerContext() {} // for async calls
-
- using ServerContextBase::AddInitialMetadata;
- using ServerContextBase::AddTrailingMetadata;
- using ServerContextBase::auth_context;
- using ServerContextBase::c_call;
- using ServerContextBase::census_context;
- using ServerContextBase::client_metadata;
- using ServerContextBase::compression_algorithm;
- using ServerContextBase::compression_level;
- using ServerContextBase::compression_level_set;
- using ServerContextBase::deadline;
- using ServerContextBase::IsCancelled;
- using ServerContextBase::peer;
- using ServerContextBase::raw_deadline;
- using ServerContextBase::set_compression_algorithm;
- using ServerContextBase::set_compression_level;
- using ServerContextBase::SetLoadReportingCosts;
- using ServerContextBase::TryCancel;
-
- // Sync/CQ-based Async ServerContext only
- using ServerContextBase::AsyncNotifyWhenDone;
-
- private:
- // Constructor for internal use by server only
- friend class ::grpc::Server;
- ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
- : ServerContextBase(deadline, arr) {}
-
- // CallbackServerContext only
- using ServerContextBase::DefaultReactor;
- using ServerContextBase::GetRpcAllocatorState;
-
- /// Prevent copying.
- ServerContext(const ServerContext&) = delete;
- ServerContext& operator=(const ServerContext&) = delete;
-};
-
-class CallbackServerContext : public ServerContextBase {
- public:
- /// Public constructors are for direct use only by mocking tests. In practice,
- /// these objects will be owned by the library.
- CallbackServerContext() {}
-
- using ServerContextBase::AddInitialMetadata;
- using ServerContextBase::AddTrailingMetadata;
- using ServerContextBase::auth_context;
- using ServerContextBase::c_call;
- using ServerContextBase::census_context;
- using ServerContextBase::client_metadata;
- using ServerContextBase::compression_algorithm;
- using ServerContextBase::compression_level;
- using ServerContextBase::compression_level_set;
- using ServerContextBase::deadline;
- using ServerContextBase::IsCancelled;
- using ServerContextBase::peer;
- using ServerContextBase::raw_deadline;
- using ServerContextBase::set_compression_algorithm;
- using ServerContextBase::set_compression_level;
- using ServerContextBase::SetLoadReportingCosts;
- using ServerContextBase::TryCancel;
-
- // CallbackServerContext only
- using ServerContextBase::DefaultReactor;
- using ServerContextBase::GetRpcAllocatorState;
-
- private:
- // Sync/CQ-based Async ServerContext only
- using ServerContextBase::AsyncNotifyWhenDone;
-
- /// Prevent copying.
- CallbackServerContext(const CallbackServerContext&) = delete;
- CallbackServerContext& operator=(const CallbackServerContext&) = delete;
-};
-
+#endif
+namespace internal {
+class Call;
+} // namespace internal
+
+namespace testing {
+class InteropServerContextInspector;
+class ServerContextTestSpouse;
+class DefaultReactorTestPeer;
+} // namespace testing
+
+/// Base class of ServerContext. Experimental until callback API is final.
+class ServerContextBase {
+ public:
+ virtual ~ServerContextBase();
+
+ /// Return the deadline for the server call.
+ std::chrono::system_clock::time_point deadline() const {
+ return ::grpc::Timespec2Timepoint(deadline_);
+ }
+
+ /// Return a \a gpr_timespec representation of the server call's deadline.
+ gpr_timespec raw_deadline() const { return deadline_; }
+
+ /// Add the (\a key, \a value) pair to the initial metadata
+ /// associated with a server call. These are made available at the client side
+ /// by the \a grpc::ClientContext::GetServerInitialMetadata() method.
+ ///
+ /// \warning This method should only be called before sending initial metadata
+ /// to the client (which can happen explicitly, or implicitly when sending a
+ /// a response message or status to the client).
+ ///
+ /// \param key The metadata key. If \a value is binary data, it must
+ /// end in "-bin".
+ /// \param value The metadata value. If its value is binary, the key name
+ /// must end in "-bin".
+ ///
+ /// Metadata must conform to the following format:
+ /// Custom-Metadata -> Binary-Header / ASCII-Header
+ /// Binary-Header -> {Header-Name "-bin" } {binary value}
+ /// ASCII-Header -> Header-Name ASCII-Value
+ /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
+ /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
+ void AddInitialMetadata(const TString& key, const TString& value);
+
+ /// Add the (\a key, \a value) pair to the initial metadata
+ /// associated with a server call. These are made available at the client
+ /// side by the \a grpc::ClientContext::GetServerTrailingMetadata() method.
+ ///
+ /// \warning This method should only be called before sending trailing
+ /// metadata to the client (which happens when the call is finished and a
+ /// status is sent to the client).
+ ///
+ /// \param key The metadata key. If \a value is binary data,
+ /// it must end in "-bin".
+ /// \param value The metadata value. If its value is binary, the key name
+ /// must end in "-bin".
+ ///
+ /// Metadata must conform to the following format:
+ /// Custom-Metadata -> Binary-Header / ASCII-Header
+ /// Binary-Header -> {Header-Name "-bin" } {binary value}
+ /// ASCII-Header -> Header-Name ASCII-Value
+ /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
+ /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
+ void AddTrailingMetadata(const TString& key, const TString& value);
+
+ /// Return whether this RPC failed before the server could provide its status
+ /// back to the client. This could be because of explicit API cancellation
+ /// from the client-side or server-side, because of deadline exceeded, network
+ /// connection reset, HTTP/2 parameter configuration (e.g., max message size,
+ /// max connection age), etc. It does NOT include failure due to a non-OK
+ /// status return from the server application's request handler, including
+ /// Status::CANCELLED.
+ ///
+ /// IsCancelled is always safe to call when using sync or callback API.
+ /// When using async API, it is only safe to call IsCancelled after
+ /// the AsyncNotifyWhenDone tag has been delivered. Thread-safe.
+ bool IsCancelled() const;
+
+ /// Cancel the Call from the server. This is a best-effort API and
+ /// depending on when it is called, the RPC may still appear successful to
+ /// the client. For example, if TryCancel() is called on a separate thread, it
+ /// might race with the server handler which might return success to the
+ /// client before TryCancel() was even started by the thread.
+ ///
+ /// It is the caller's responsibility to prevent such races and ensure that if
+ /// TryCancel() is called, the serverhandler must return Status::CANCELLED.
+ /// The only exception is that if the serverhandler is already returning an
+ /// error status code, it is ok to not return Status::CANCELLED even if
+ /// TryCancel() was called.
+ ///
+ /// For reasons such as the above, it is generally preferred to explicitly
+ /// finish an RPC by returning Status::CANCELLED rather than using TryCancel.
+ ///
+ /// Note that TryCancel() does not change any of the tags that are pending
+ /// on the completion queue. All pending tags will still be delivered
+ /// (though their ok result may reflect the effect of cancellation).
+ void TryCancel() const;
+
+ /// Return a collection of initial metadata key-value pairs sent from the
+ /// client. Note that keys may happen more than
+ /// once (ie, a \a std::multimap is returned).
+ ///
+ /// It is safe to use this method after initial metadata has been received,
+ /// Calls always begin with the client sending initial metadata, so this is
+ /// safe to access as soon as the call has begun on the server side.
+ ///
+ /// \return A multimap of initial metadata key-value pairs from the server.
+ const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata()
+ const {
+ return *client_metadata_.map();
+ }
+
+ /// Return the compression algorithm to be used by the server call.
+ grpc_compression_level compression_level() const {
+ return compression_level_;
+ }
+
+ /// Set \a level to be the compression level used for the server call.
+ ///
+ /// \param level The compression level used for the server call.
+ void set_compression_level(grpc_compression_level level) {
+ compression_level_set_ = true;
+ compression_level_ = level;
+ }
+
+ /// Return a bool indicating whether the compression level for this call
+ /// has been set (either implicitly or through a previous call to
+ /// \a set_compression_level.
+ bool compression_level_set() const { return compression_level_set_; }
+
+ /// Return the compression algorithm the server call will request be used.
+ /// Note that the gRPC runtime may decide to ignore this request, for example,
+ /// due to resource constraints, or if the server is aware the client doesn't
+ /// support the requested algorithm.
+ grpc_compression_algorithm compression_algorithm() const {
+ return compression_algorithm_;
+ }
+ /// Set \a algorithm to be the compression algorithm used for the server call.
+ ///
+ /// \param algorithm The compression algorithm used for the server call.
+ void set_compression_algorithm(grpc_compression_algorithm algorithm);
+
+ /// Set the serialized load reporting costs in \a cost_data for the call.
+ void SetLoadReportingCosts(const std::vector<TString>& cost_data);
+
+ /// Return the authentication context for this server call.
+ ///
+ /// \see grpc::AuthContext.
+ std::shared_ptr<const ::grpc::AuthContext> auth_context() const {
+ if (auth_context_.get() == nullptr) {
+ auth_context_ = ::grpc::CreateAuthContext(call_.call);
+ }
+ return auth_context_;
+ }
+
+ /// Return the peer uri in a string.
+ /// WARNING: this value is never authenticated or subject to any security
+ /// related code. It must not be used for any authentication related
+ /// functionality. Instead, use auth_context.
+ TString peer() const;
+
+ /// Get the census context associated with this server call.
+ const struct census_context* census_context() const;
+
+ /// Should be used for framework-level extensions only.
+ /// Applications never need to call this method.
+ grpc_call* c_call() { return call_.call; }
+
+ protected:
+ /// Async only. Has to be called before the rpc starts.
+ /// Returns the tag in completion queue when the rpc finishes.
+ /// IsCancelled() can then be called to check whether the rpc was cancelled.
+ /// TODO(vjpai): Fix this so that the tag is returned even if the call never
+ /// starts (https://github.com/grpc/grpc/issues/10136).
+ void AsyncNotifyWhenDone(void* tag) {
+ has_notify_when_done_tag_ = true;
+ async_notify_when_done_tag_ = tag;
+ }
+
+ /// NOTE: This is an API for advanced users who need custom allocators.
+ /// Get and maybe mutate the allocator state associated with the current RPC.
+ /// Currently only applicable for callback unary RPC methods.
+ /// WARNING: This is experimental API and could be changed or removed.
+ ::grpc::experimental::RpcAllocatorState* GetRpcAllocatorState() {
+ return message_allocator_state_;
+ }
+
+ /// Get a library-owned default unary reactor for use in minimal reaction
+ /// cases. This supports typical unary RPC usage of providing a response and
+ /// status. It supports immediate Finish (finish from within the method
+ /// handler) or delayed Finish (finish called after the method handler
+ /// invocation). It does not support reacting to cancellation or completion,
+ /// or early sending of initial metadata. Since this is a library-owned
+ /// reactor, it should not be delete'd or freed in any way. This is more
+ /// efficient than creating a user-owned reactor both because of avoiding an
+ /// allocation and because its minimal reactions are optimized using a core
+ /// surface flag that allows their reactions to run inline without any
+ /// thread-hop.
+ ///
+ /// This method should not be called more than once or called after return
+ /// from the method handler.
+ ///
+ /// WARNING: This is experimental API and could be changed or removed.
+ ::grpc::ServerUnaryReactor* DefaultReactor() {
+ // Short-circuit the case where a default reactor was already set up by
+ // the TestPeer.
+ if (test_unary_ != nullptr) {
+ return reinterpret_cast<Reactor*>(&default_reactor_);
+ }
+ new (&default_reactor_) Reactor;
+#ifndef NDEBUG
+ bool old = false;
+ assert(default_reactor_used_.compare_exchange_strong(
+ old, true, std::memory_order_relaxed));
+#else
+ default_reactor_used_.store(true, std::memory_order_relaxed);
+#endif
+ return reinterpret_cast<Reactor*>(&default_reactor_);
+ }
+
+ /// Constructors for use by derived classes
+ ServerContextBase();
+ ServerContextBase(gpr_timespec deadline, grpc_metadata_array* arr);
+
+ private:
+ friend class ::grpc::testing::InteropServerContextInspector;
+ friend class ::grpc::testing::ServerContextTestSpouse;
+ friend class ::grpc::testing::DefaultReactorTestPeer;
+ friend class ::grpc::ServerInterface;
+ friend class ::grpc::Server;
+ template <class W, class R>
+ friend class ::grpc::ServerAsyncReader;
+ template <class W>
+ friend class ::grpc::ServerAsyncWriter;
+ template <class W>
+ friend class ::grpc::ServerAsyncResponseWriter;
+ template <class W, class R>
+ friend class ::grpc::ServerAsyncReaderWriter;
+ template <class R>
+ friend class ::grpc::ServerReader;
+ template <class W>
+ friend class ::grpc::ServerWriter;
+ template <class W, class R>
+ friend class ::grpc::internal::ServerReaderWriterBody;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::RpcMethodHandler;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::ClientStreamingHandler;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::ServerStreamingHandler;
+ template <class Streamer, bool WriteNeeded>
+ friend class ::grpc::internal::TemplatedBidiStreamingHandler;
+ template <class RequestType, class ResponseType>
+ friend class ::grpc::internal::CallbackUnaryHandler;
+ template <class RequestType, class ResponseType>
+ friend class ::grpc::internal::CallbackClientStreamingHandler;
+ template <class RequestType, class ResponseType>
+ friend class ::grpc::internal::CallbackServerStreamingHandler;
+ template <class RequestType, class ResponseType>
+ friend class ::grpc::internal::CallbackBidiHandler;
+ template <::grpc::StatusCode code>
+ friend class ::grpc::internal::ErrorMethodHandler;
+ template <class Base>
+ friend class ::grpc::internal::FinishOnlyReactor;
+ friend class ::grpc::ClientContext;
+ friend class ::grpc::GenericServerContext;
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ friend class ::grpc::GenericCallbackServerContext;
+#else
+ friend class ::grpc::experimental::GenericCallbackServerContext;
+#endif
+
+ /// Prevent copying.
+ ServerContextBase(const ServerContextBase&);
+ ServerContextBase& operator=(const ServerContextBase&);
+
+ class CompletionOp;
+
+ void BeginCompletionOp(
+ ::grpc::internal::Call* call, std::function<void(bool)> callback,
+ ::grpc::internal::ServerCallbackCall* callback_controller);
+ /// Return the tag queued by BeginCompletionOp()
+ ::grpc::internal::CompletionQueueTag* GetCompletionOpTag();
+
+ void set_call(grpc_call* call) { call_.call = call; }
+
+ void BindDeadlineAndMetadata(gpr_timespec deadline, grpc_metadata_array* arr);
+
+ uint32_t initial_metadata_flags() const { return 0; }
+
+ ::grpc::experimental::ServerRpcInfo* set_server_rpc_info(
+ const char* method, ::grpc::internal::RpcMethod::RpcType type,
+ const std::vector<std::unique_ptr<
+ ::grpc::experimental::ServerInterceptorFactoryInterface>>& creators) {
+ if (creators.size() != 0) {
+ rpc_info_ = new ::grpc::experimental::ServerRpcInfo(this, method, type);
+ rpc_info_->RegisterInterceptors(creators);
+ }
+ return rpc_info_;
+ }
+
+ void set_message_allocator_state(
+ ::grpc::experimental::RpcAllocatorState* allocator_state) {
+ message_allocator_state_ = allocator_state;
+ }
+
+ struct CallWrapper {
+ ~CallWrapper();
+
+ grpc_call* call = nullptr;
+ };
+
+ // NOTE: call_ must be the first data member of this object so that its
+ // destructor is the last to be called, since its destructor may unref
+ // the underlying core call which holds the arena that may be used to
+ // hold this object.
+ CallWrapper call_;
+
+ CompletionOp* completion_op_ = nullptr;
+ bool has_notify_when_done_tag_ = false;
+ void* async_notify_when_done_tag_ = nullptr;
+ ::grpc::internal::CallbackWithSuccessTag completion_tag_;
+
+ gpr_timespec deadline_;
+ ::grpc::CompletionQueue* cq_ = nullptr;
+ bool sent_initial_metadata_ = false;
+ mutable std::shared_ptr<const ::grpc::AuthContext> auth_context_;
+ mutable ::grpc::internal::MetadataMap client_metadata_;
+ std::multimap<TString, TString> initial_metadata_;
+ std::multimap<TString, TString> trailing_metadata_;
+
+ bool compression_level_set_ = false;
+ grpc_compression_level compression_level_;
+ grpc_compression_algorithm compression_algorithm_;
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage>
+ pending_ops_;
+ bool has_pending_ops_ = false;
+
+ ::grpc::experimental::ServerRpcInfo* rpc_info_ = nullptr;
+ ::grpc::experimental::RpcAllocatorState* message_allocator_state_ = nullptr;
+
+ class Reactor : public ::grpc::ServerUnaryReactor {
+ public:
+ void OnCancel() override {}
+ void OnDone() override {}
+ // Override InternalInlineable for this class since its reactions are
+ // trivial and thus do not need to be run from the executor (triggering a
+ // thread hop). This should only be used by internal reactors (thus the
+ // name) and not by user application code.
+ bool InternalInlineable() override { return true; }
+ };
+
+ void SetupTestDefaultReactor(std::function<void(::grpc::Status)> func) {
+ test_unary_.reset(new TestServerCallbackUnary(this, std::move(func)));
+ }
+ bool test_status_set() const {
+ return (test_unary_ != nullptr) && test_unary_->status_set();
+ }
+ ::grpc::Status test_status() const { return test_unary_->status(); }
+
+ class TestServerCallbackUnary : public ::grpc::ServerCallbackUnary {
+ public:
+ TestServerCallbackUnary(ServerContextBase* ctx,
+ std::function<void(::grpc::Status)> func)
+ : reactor_(ctx->DefaultReactor()), func_(std::move(func)) {
+ this->BindReactor(reactor_);
+ }
+ void Finish(::grpc::Status s) override {
+ status_ = s;
+ func_(std::move(s));
+ status_set_.store(true, std::memory_order_release);
+ }
+ void SendInitialMetadata() override {}
+
+ bool status_set() const {
+ return status_set_.load(std::memory_order_acquire);
+ }
+ ::grpc::Status status() const { return status_; }
+
+ private:
+ void CallOnDone() override {}
+ ::grpc::internal::ServerReactor* reactor() override { return reactor_; }
+
+ ::grpc::ServerUnaryReactor* const reactor_;
+ std::atomic_bool status_set_{false};
+ ::grpc::Status status_;
+ const std::function<void(::grpc::Status s)> func_;
+ };
+
+ typename std::aligned_storage<sizeof(Reactor), alignof(Reactor)>::type
+ default_reactor_;
+ std::atomic_bool default_reactor_used_{false};
+ std::unique_ptr<TestServerCallbackUnary> test_unary_;
+};
+
+/// A ServerContext or CallbackServerContext allows the code implementing a
+/// service handler to:
+///
+/// - Add custom initial and trailing metadata key-value pairs that will
+/// propagated to the client side.
+/// - Control call settings such as compression and authentication.
+/// - Access metadata coming from the client.
+/// - Get performance metrics (ie, census).
+///
+/// Context settings are only relevant to the call handler they are supplied to,
+/// that is to say, they aren't sticky across multiple calls. Some of these
+/// settings, such as the compression options, can be made persistent at server
+/// construction time by specifying the appropriate \a ChannelArguments
+/// to a \a grpc::ServerBuilder, via \a ServerBuilder::AddChannelArgument.
+///
+/// \warning ServerContext instances should \em not be reused across rpcs.
+class ServerContext : public ServerContextBase {
+ public:
+ ServerContext() {} // for async calls
+
+ using ServerContextBase::AddInitialMetadata;
+ using ServerContextBase::AddTrailingMetadata;
+ using ServerContextBase::auth_context;
+ using ServerContextBase::c_call;
+ using ServerContextBase::census_context;
+ using ServerContextBase::client_metadata;
+ using ServerContextBase::compression_algorithm;
+ using ServerContextBase::compression_level;
+ using ServerContextBase::compression_level_set;
+ using ServerContextBase::deadline;
+ using ServerContextBase::IsCancelled;
+ using ServerContextBase::peer;
+ using ServerContextBase::raw_deadline;
+ using ServerContextBase::set_compression_algorithm;
+ using ServerContextBase::set_compression_level;
+ using ServerContextBase::SetLoadReportingCosts;
+ using ServerContextBase::TryCancel;
+
+ // Sync/CQ-based Async ServerContext only
+ using ServerContextBase::AsyncNotifyWhenDone;
+
+ private:
+ // Constructor for internal use by server only
+ friend class ::grpc::Server;
+ ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
+ : ServerContextBase(deadline, arr) {}
+
+ // CallbackServerContext only
+ using ServerContextBase::DefaultReactor;
+ using ServerContextBase::GetRpcAllocatorState;
+
+ /// Prevent copying.
+ ServerContext(const ServerContext&) = delete;
+ ServerContext& operator=(const ServerContext&) = delete;
+};
+
+class CallbackServerContext : public ServerContextBase {
+ public:
+ /// Public constructors are for direct use only by mocking tests. In practice,
+ /// these objects will be owned by the library.
+ CallbackServerContext() {}
+
+ using ServerContextBase::AddInitialMetadata;
+ using ServerContextBase::AddTrailingMetadata;
+ using ServerContextBase::auth_context;
+ using ServerContextBase::c_call;
+ using ServerContextBase::census_context;
+ using ServerContextBase::client_metadata;
+ using ServerContextBase::compression_algorithm;
+ using ServerContextBase::compression_level;
+ using ServerContextBase::compression_level_set;
+ using ServerContextBase::deadline;
+ using ServerContextBase::IsCancelled;
+ using ServerContextBase::peer;
+ using ServerContextBase::raw_deadline;
+ using ServerContextBase::set_compression_algorithm;
+ using ServerContextBase::set_compression_level;
+ using ServerContextBase::SetLoadReportingCosts;
+ using ServerContextBase::TryCancel;
+
+ // CallbackServerContext only
+ using ServerContextBase::DefaultReactor;
+ using ServerContextBase::GetRpcAllocatorState;
+
+ private:
+ // Sync/CQ-based Async ServerContext only
+ using ServerContextBase::AsyncNotifyWhenDone;
+
+ /// Prevent copying.
+ CallbackServerContext(const CallbackServerContext&) = delete;
+ CallbackServerContext& operator=(const CallbackServerContext&) = delete;
+};
+
} // namespace grpc
-static_assert(
- std::is_base_of<::grpc::ServerContextBase, ::grpc::ServerContext>::value,
- "improper base class");
-static_assert(std::is_base_of<::grpc::ServerContextBase,
- ::grpc::CallbackServerContext>::value,
- "improper base class");
-static_assert(sizeof(::grpc::ServerContextBase) ==
- sizeof(::grpc::ServerContext),
- "wrong size");
-static_assert(sizeof(::grpc::ServerContextBase) ==
- sizeof(::grpc::CallbackServerContext),
- "wrong size");
-
+static_assert(
+ std::is_base_of<::grpc::ServerContextBase, ::grpc::ServerContext>::value,
+ "improper base class");
+static_assert(std::is_base_of<::grpc::ServerContextBase,
+ ::grpc::CallbackServerContext>::value,
+ "improper base class");
+static_assert(sizeof(::grpc::ServerContextBase) ==
+ sizeof(::grpc::ServerContext),
+ "wrong size");
+static_assert(sizeof(::grpc::ServerContextBase) ==
+ sizeof(::grpc::CallbackServerContext),
+ "wrong size");
+
#endif // GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h
index ac0f958959..7598e72a40 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h
@@ -26,7 +26,7 @@
#include <grpcpp/impl/codegen/rpc_method.h>
#include <grpcpp/impl/codegen/string_ref.h>
-namespace grpc {
+namespace grpc {
class ServerContextBase;
namespace internal {
class InterceptorBatchMethodsImpl;
@@ -76,7 +76,7 @@ class ServerRpcInfo {
/// Return a pointer to the underlying ServerContext structure associated
/// with the RPC to support features that apply to it
- ServerContextBase* server_context() { return ctx_; }
+ ServerContextBase* server_context() { return ctx_; }
private:
static_assert(Type::UNARY ==
@@ -92,7 +92,7 @@ class ServerRpcInfo {
static_cast<Type>(internal::RpcMethod::BIDI_STREAMING),
"violated expectation about Type enum");
- ServerRpcInfo(ServerContextBase* ctx, const char* method,
+ ServerRpcInfo(ServerContextBase* ctx, const char* method,
internal::RpcMethod::RpcType type)
: ctx_(ctx), method_(method), type_(static_cast<Type>(type)) {}
@@ -123,14 +123,14 @@ class ServerRpcInfo {
}
}
- ServerContextBase* ctx_ = nullptr;
+ ServerContextBase* ctx_ = nullptr;
const char* method_ = nullptr;
const Type type_;
std::atomic<intptr_t> ref_{1};
std::vector<std::unique_ptr<experimental::Interceptor>> interceptors_;
friend class internal::InterceptorBatchMethodsImpl;
- friend class grpc::ServerContextBase;
+ friend class grpc::ServerContextBase;
};
} // namespace experimental
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h
index c04c1b217c..d97b725025 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h
@@ -19,8 +19,8 @@
#ifndef GRPCPP_IMPL_CODEGEN_SERVER_INTERFACE_H
#define GRPCPP_IMPL_CODEGEN_SERVER_INTERFACE_H
-#include <grpc/impl/codegen/port_platform.h>
-
+#include <grpc/impl/codegen/port_platform.h>
+
#include <grpc/impl/codegen/grpc_types.h>
#include <grpcpp/impl/codegen/byte_buffer.h>
#include <grpcpp/impl/codegen/call.h>
@@ -29,14 +29,14 @@
#include <grpcpp/impl/codegen/core_codegen_interface.h>
#include <grpcpp/impl/codegen/interceptor_common.h>
#include <grpcpp/impl/codegen/rpc_service_method.h>
-#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/server_context.h>
-namespace grpc {
+namespace grpc {
-class AsyncGenericService;
+class AsyncGenericService;
class Channel;
class CompletionQueue;
-class GenericServerContext;
+class GenericServerContext;
class ServerCompletionQueue;
class ServerCredentials;
class Service;
@@ -50,15 +50,15 @@ namespace internal {
class ServerAsyncStreamingInterface;
} // namespace internal
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
namespace experimental {
-#endif
+#endif
class CallbackGenericService;
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
-} // namespace experimental
-#endif
-
-namespace experimental {
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+} // namespace experimental
+#endif
+
+namespace experimental {
class ServerInterceptorFactoryInterface;
} // namespace experimental
@@ -124,20 +124,20 @@ class ServerInterface : public internal::CallHook {
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance.
- virtual bool RegisterService(const TString* host, Service* service) = 0;
+ virtual bool RegisterService(const TString* host, Service* service) = 0;
/// Register a generic service. This call does not take ownership of the
/// service. The service must exist for the lifetime of the Server instance.
virtual void RegisterAsyncGenericService(AsyncGenericService* service) = 0;
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- /// Register a callback generic service. This call does not take ownership of
- /// the service. The service must exist for the lifetime of the Server
- /// instance. May not be abstract since this is a post-1.0 API addition.
-
- virtual void RegisterCallbackGenericService(CallbackGenericService*
- /*service*/) {}
-#else
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ /// Register a callback generic service. This call does not take ownership of
+ /// the service. The service must exist for the lifetime of the Server
+ /// instance. May not be abstract since this is a post-1.0 API addition.
+
+ virtual void RegisterCallbackGenericService(CallbackGenericService*
+ /*service*/) {}
+#else
/// NOTE: class experimental_registration_interface is not part of the public
/// API of this class
/// TODO(vjpai): Move these contents to public API when no longer experimental
@@ -156,7 +156,7 @@ class ServerInterface : public internal::CallHook {
virtual experimental_registration_interface* experimental_registration() {
return nullptr;
}
-#endif
+#endif
/// Tries to bind \a server to the given \a addr.
///
@@ -169,8 +169,8 @@ class ServerInterface : public internal::CallHook {
/// \return bound port number on success, 0 on failure.
///
/// \warning It's an error to call this method on an already started server.
- virtual int AddListeningPort(const TString& addr,
- ServerCredentials* creds) = 0;
+ virtual int AddListeningPort(const TString& addr,
+ ServerCredentials* creds) = 0;
/// Start the server.
///
@@ -178,7 +178,7 @@ class ServerInterface : public internal::CallHook {
/// caller is required to keep all completion queues live until the server is
/// destroyed.
/// \param num_cqs How many completion queues does \a cqs hold.
- virtual void Start(::grpc::ServerCompletionQueue** cqs, size_t num_cqs) = 0;
+ virtual void Start(::grpc::ServerCompletionQueue** cqs, size_t num_cqs) = 0;
virtual void ShutdownInternal(gpr_timespec deadline) = 0;
@@ -191,11 +191,11 @@ class ServerInterface : public internal::CallHook {
class BaseAsyncRequest : public internal::CompletionQueueTag {
public:
- BaseAsyncRequest(ServerInterface* server, ::grpc::ServerContext* context,
+ BaseAsyncRequest(ServerInterface* server, ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag,
- bool delete_on_finalize);
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag,
+ bool delete_on_finalize);
virtual ~BaseAsyncRequest();
bool FinalizeResult(void** tag, bool* status) override;
@@ -205,10 +205,10 @@ class ServerInterface : public internal::CallHook {
protected:
ServerInterface* const server_;
- ::grpc::ServerContext* const context_;
+ ::grpc::ServerContext* const context_;
internal::ServerAsyncStreamingInterface* const stream_;
- ::grpc::CompletionQueue* const call_cq_;
- ::grpc::ServerCompletionQueue* const notification_cq_;
+ ::grpc::CompletionQueue* const call_cq_;
+ ::grpc::ServerCompletionQueue* const notification_cq_;
void* const tag_;
const bool delete_on_finalize_;
grpc_call* call_;
@@ -221,10 +221,10 @@ class ServerInterface : public internal::CallHook {
class RegisteredAsyncRequest : public BaseAsyncRequest {
public:
RegisteredAsyncRequest(ServerInterface* server,
- ::grpc::ServerContext* context,
+ ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag, const char* name,
internal::RpcMethod::RpcType type);
@@ -242,7 +242,7 @@ class ServerInterface : public internal::CallHook {
protected:
void IssueRequest(void* registered_method, grpc_byte_buffer** payload,
- ::grpc::ServerCompletionQueue* notification_cq);
+ ::grpc::ServerCompletionQueue* notification_cq);
const char* name_;
const internal::RpcMethod::RpcType type_;
};
@@ -251,10 +251,10 @@ class ServerInterface : public internal::CallHook {
public:
NoPayloadAsyncRequest(internal::RpcServiceMethod* registered_method,
ServerInterface* server,
- ::grpc::ServerContext* context,
+ ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag)
: RegisteredAsyncRequest(
server, context, stream, call_cq, notification_cq, tag,
@@ -269,10 +269,10 @@ class ServerInterface : public internal::CallHook {
class PayloadAsyncRequest final : public RegisteredAsyncRequest {
public:
PayloadAsyncRequest(internal::RpcServiceMethod* registered_method,
- ServerInterface* server, ::grpc::ServerContext* context,
+ ServerInterface* server, ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag, Message* request)
: RegisteredAsyncRequest(
server, context, stream, call_cq, notification_cq, tag,
@@ -327,8 +327,8 @@ class ServerInterface : public internal::CallHook {
public:
GenericAsyncRequest(ServerInterface* server, GenericServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag, bool delete_on_finalize);
bool FinalizeResult(void** tag, bool* status) override;
@@ -339,10 +339,10 @@ class ServerInterface : public internal::CallHook {
template <class Message>
void RequestAsyncCall(internal::RpcServiceMethod* method,
- ::grpc::ServerContext* context,
+ ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag, Message* message) {
GPR_CODEGEN_ASSERT(method);
new PayloadAsyncRequest<Message>(method, this, context, stream, call_cq,
@@ -350,21 +350,21 @@ class ServerInterface : public internal::CallHook {
}
void RequestAsyncCall(internal::RpcServiceMethod* method,
- ::grpc::ServerContext* context,
+ ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag) {
GPR_CODEGEN_ASSERT(method);
new NoPayloadAsyncRequest(method, this, context, stream, call_cq,
notification_cq, tag);
}
- void RequestAsyncGenericCall(GenericServerContext* context,
- internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
- void* tag) {
+ void RequestAsyncGenericCall(GenericServerContext* context,
+ internal::ServerAsyncStreamingInterface* stream,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
+ void* tag) {
new GenericAsyncRequest(this, context, stream, call_cq, notification_cq,
tag, true);
}
@@ -389,7 +389,7 @@ class ServerInterface : public internal::CallHook {
// Returns nullptr (rather than being pure) since this is a post-1.0 method
// and adding a new pure method to an interface would be a breaking change
// (even though this is private and non-API)
- virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; }
+ virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; }
};
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h
index dab84f5ed3..30be904a3c 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h
@@ -26,7 +26,7 @@
#include <grpcpp/impl/codegen/server_interface.h>
#include <grpcpp/impl/codegen/status.h>
-namespace grpc {
+namespace grpc {
class CompletionQueue;
class ServerContext;
@@ -105,15 +105,15 @@ class Service {
explicit experimental_type(Service* service) : service_(service) {}
void MarkMethodCallback(int index, internal::MethodHandler* handler) {
- service_->MarkMethodCallbackInternal(index, handler);
+ service_->MarkMethodCallbackInternal(index, handler);
}
void MarkMethodRawCallback(int index, internal::MethodHandler* handler) {
- service_->MarkMethodRawCallbackInternal(index, handler);
+ service_->MarkMethodRawCallbackInternal(index, handler);
}
internal::MethodHandler* GetHandler(int index) {
- return service_->GetHandlerInternal(index);
+ return service_->GetHandlerInternal(index);
}
private:
@@ -123,11 +123,11 @@ class Service {
experimental_type experimental() { return experimental_type(this); }
template <class Message>
- void RequestAsyncUnary(int index, ::grpc::ServerContext* context,
+ void RequestAsyncUnary(int index, ::grpc::ServerContext* context,
Message* request,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag) {
// Typecast the index to size_t for indexing into a vector
// while preserving the API that existed before a compiler
@@ -137,29 +137,29 @@ class Service {
notification_cq, tag, request);
}
void RequestAsyncClientStreaming(
- int index, ::grpc::ServerContext* context,
+ int index, ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
size_t idx = static_cast<size_t>(index);
server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq,
notification_cq, tag);
}
template <class Message>
void RequestAsyncServerStreaming(
- int index, ::grpc::ServerContext* context, Message* request,
+ int index, ::grpc::ServerContext* context, Message* request,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
size_t idx = static_cast<size_t>(index);
server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq,
notification_cq, tag, request);
}
void RequestAsyncBidiStreaming(
- int index, ::grpc::ServerContext* context,
+ int index, ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
size_t idx = static_cast<size_t>(index);
server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq,
notification_cq, tag);
@@ -216,55 +216,55 @@ class Service {
methods_[idx]->SetMethodType(internal::RpcMethod::BIDI_STREAMING);
}
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- void MarkMethodCallback(int index, internal::MethodHandler* handler) {
- MarkMethodCallbackInternal(index, handler);
- }
-
- void MarkMethodRawCallback(int index, internal::MethodHandler* handler) {
- MarkMethodRawCallbackInternal(index, handler);
- }
-
- internal::MethodHandler* GetHandler(int index) {
- return GetHandlerInternal(index);
- }
-#endif
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ void MarkMethodCallback(int index, internal::MethodHandler* handler) {
+ MarkMethodCallbackInternal(index, handler);
+ }
+
+ void MarkMethodRawCallback(int index, internal::MethodHandler* handler) {
+ MarkMethodRawCallbackInternal(index, handler);
+ }
+
+ internal::MethodHandler* GetHandler(int index) {
+ return GetHandlerInternal(index);
+ }
+#endif
private:
- // TODO(vjpai): migrate the Internal functions to mainline functions once
- // callback API is fully de-experimental
- void MarkMethodCallbackInternal(int index, internal::MethodHandler* handler) {
- // This does not have to be a hard error, however no one has approached us
- // with a use case yet. Please file an issue if you believe you have one.
- size_t idx = static_cast<size_t>(index);
- GPR_CODEGEN_ASSERT(
- methods_[idx].get() != nullptr &&
- "Cannot mark the method as 'callback' because it has already been "
- "marked as 'generic'.");
- methods_[idx]->SetHandler(handler);
- methods_[idx]->SetServerApiType(
- internal::RpcServiceMethod::ApiType::CALL_BACK);
- }
-
- void MarkMethodRawCallbackInternal(int index,
- internal::MethodHandler* handler) {
- // This does not have to be a hard error, however no one has approached us
- // with a use case yet. Please file an issue if you believe you have one.
- size_t idx = static_cast<size_t>(index);
- GPR_CODEGEN_ASSERT(
- methods_[idx].get() != nullptr &&
- "Cannot mark the method as 'raw callback' because it has already "
- "been marked as 'generic'.");
- methods_[idx]->SetHandler(handler);
- methods_[idx]->SetServerApiType(
- internal::RpcServiceMethod::ApiType::RAW_CALL_BACK);
- }
-
- internal::MethodHandler* GetHandlerInternal(int index) {
- size_t idx = static_cast<size_t>(index);
- return methods_[idx]->handler();
- }
-
- friend class Server;
+ // TODO(vjpai): migrate the Internal functions to mainline functions once
+ // callback API is fully de-experimental
+ void MarkMethodCallbackInternal(int index, internal::MethodHandler* handler) {
+ // This does not have to be a hard error, however no one has approached us
+ // with a use case yet. Please file an issue if you believe you have one.
+ size_t idx = static_cast<size_t>(index);
+ GPR_CODEGEN_ASSERT(
+ methods_[idx].get() != nullptr &&
+ "Cannot mark the method as 'callback' because it has already been "
+ "marked as 'generic'.");
+ methods_[idx]->SetHandler(handler);
+ methods_[idx]->SetServerApiType(
+ internal::RpcServiceMethod::ApiType::CALL_BACK);
+ }
+
+ void MarkMethodRawCallbackInternal(int index,
+ internal::MethodHandler* handler) {
+ // This does not have to be a hard error, however no one has approached us
+ // with a use case yet. Please file an issue if you believe you have one.
+ size_t idx = static_cast<size_t>(index);
+ GPR_CODEGEN_ASSERT(
+ methods_[idx].get() != nullptr &&
+ "Cannot mark the method as 'raw callback' because it has already "
+ "been marked as 'generic'.");
+ methods_[idx]->SetHandler(handler);
+ methods_[idx]->SetServerApiType(
+ internal::RpcServiceMethod::ApiType::RAW_CALL_BACK);
+ }
+
+ internal::MethodHandler* GetHandlerInternal(int index) {
+ size_t idx = static_cast<size_t>(index);
+ return methods_[idx]->handler();
+ }
+
+ friend class Server;
friend class ServerInterface;
ServerInterface* server_;
std::vector<std::unique_ptr<internal::RpcServiceMethod>> methods_;
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h
index 603a500b58..b1a24dcef8 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h
@@ -58,7 +58,7 @@ class Slice final {
reinterpret_cast<const char*>(buf), len)) {}
/// Construct a slice from a copied string
- Slice(const TString& str)
+ Slice(const TString& str)
: slice_(g_core_codegen_interface->grpc_slice_from_copied_buffer(
str.c_str(), str.length())) {}
@@ -123,17 +123,17 @@ inline grpc::string_ref StringRefFromSlice(const grpc_slice* slice) {
GRPC_SLICE_LENGTH(*slice));
}
-inline TString StringFromCopiedSlice(grpc_slice slice) {
- return TString(reinterpret_cast<char*>(GRPC_SLICE_START_PTR(slice)),
- GRPC_SLICE_LENGTH(slice));
+inline TString StringFromCopiedSlice(grpc_slice slice) {
+ return TString(reinterpret_cast<char*>(GRPC_SLICE_START_PTR(slice)),
+ GRPC_SLICE_LENGTH(slice));
}
-inline grpc_slice SliceReferencingString(const TString& str) {
+inline grpc_slice SliceReferencingString(const TString& str) {
return g_core_codegen_interface->grpc_slice_from_static_buffer(str.data(),
str.length());
}
-inline grpc_slice SliceFromCopiedString(const TString& str) {
+inline grpc_slice SliceFromCopiedString(const TString& str) {
return g_core_codegen_interface->grpc_slice_from_copied_buffer(str.data(),
str.length());
}
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h
index a9c689f731..a5ad6f32fe 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h
@@ -88,14 +88,14 @@ class Status {
/// Construct an instance with associated \a code and \a error_message.
/// It is an error to construct an OK status with non-empty \a error_message.
- Status(StatusCode code, const TString& error_message)
+ Status(StatusCode code, const TString& error_message)
: code_(code), error_message_(error_message) {}
/// Construct an instance with \a code, \a error_message and
/// \a error_details. It is an error to construct an OK status with non-empty
/// \a error_message and/or \a error_details.
- Status(StatusCode code, const TString& error_message,
- const TString& error_details)
+ Status(StatusCode code, const TString& error_message,
+ const TString& error_details)
: code_(code),
error_message_(error_message),
binary_error_details_(error_details) {}
@@ -109,10 +109,10 @@ class Status {
/// Return the instance's error code.
StatusCode error_code() const { return code_; }
/// Return the instance's error message.
- TString error_message() const { return error_message_; }
+ TString error_message() const { return error_message_; }
/// Return the (binary) error details.
// Usually it contains a serialized google.rpc.Status proto.
- TString error_details() const { return binary_error_details_; }
+ TString error_details() const { return binary_error_details_; }
/// Is the status OK?
bool ok() const { return code_ == StatusCode::OK; }
@@ -124,8 +124,8 @@ class Status {
private:
StatusCode code_;
- TString error_message_;
- TString binary_error_details_;
+ TString error_message_;
+ TString binary_error_details_;
};
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h
index a099a9d76a..c5dcd31c1d 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h
@@ -28,8 +28,8 @@
#include <grpcpp/impl/codegen/config.h>
-#include <util/stream/output.h>
-
+#include <util/stream/output.h>
+
namespace grpc {
/// This class is a non owning reference to a string.
@@ -61,7 +61,7 @@ class string_ref {
string_ref(const char* s) : data_(s), length_(strlen(s)) {}
string_ref(const char* s, size_t l) : data_(s), length_(l) {}
- string_ref(const TString& s) : data_(s.data()), length_(s.length()) {}
+ string_ref(const TString& s) : data_(s.data()), length_(s.length()) {}
/// iterators
const_iterator begin() const { return data_; }
@@ -139,9 +139,9 @@ inline bool operator<=(string_ref x, string_ref y) { return x.compare(y) <= 0; }
inline bool operator>(string_ref x, string_ref y) { return x.compare(y) > 0; }
inline bool operator>=(string_ref x, string_ref y) { return x.compare(y) >= 0; }
-inline IOutputStream& operator<<(IOutputStream& out, const string_ref& string) {
- TString t(string.begin(), string.end());
- return out << t;
+inline IOutputStream& operator<<(IOutputStream& out, const string_ref& string) {
+ TString t(string.begin(), string.end());
+ return out << t;
}
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h
index be124aaeb2..408f42f280 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,926 +18,926 @@
#ifndef GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H
#define GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/client_context.h>
-#include <grpcpp/impl/codegen/completion_queue.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/server_context.h>
-#include <grpcpp/impl/codegen/service_type.h>
-#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/client_context.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/service_type.h>
+#include <grpcpp/impl/codegen/status.h>
namespace grpc {
namespace internal {
-/// Common interface for all synchronous client side streaming.
-class ClientStreamingInterface {
- public:
- virtual ~ClientStreamingInterface() {}
-
- /// Block waiting until the stream finishes and a final status of the call is
- /// available.
- ///
- /// It is appropriate to call this method exactly once when both:
- /// * the calling code (client-side) has no more message to send
- /// (this can be declared implicitly by calling this method, or
- /// explicitly through an earlier call to <i>WritesDone</i> method of the
- /// class in use, e.g. \a ClientWriterInterface::WritesDone or
- /// \a ClientReaderWriterInterface::WritesDone).
- /// * there are no more messages to be received from the server (which can
- /// be known implicitly, or explicitly from an earlier call to \a
- /// ReaderInterface::Read that returned "false").
- ///
- /// This function will return either:
- /// - when all incoming messages have been read and the server has
- /// returned status.
- /// - when the server has returned a non-OK status.
- /// - OR when the call failed for some reason and the library generated a
- /// status.
- ///
- /// Return values:
- /// - \a Status contains the status code, message and details for the call
- /// - the \a ClientContext associated with this call is updated with
- /// possible trailing metadata sent from the server.
- virtual ::grpc::Status Finish() = 0;
-};
-
-/// Common interface for all synchronous server side streaming.
-class ServerStreamingInterface {
- public:
- virtual ~ServerStreamingInterface() {}
-
- /// Block to send initial metadata to client.
- /// This call is optional, but if it is used, it cannot be used concurrently
- /// with or after the \a Finish method.
- ///
- /// The initial metadata that will be sent to the client will be
- /// taken from the \a ServerContext associated with the call.
- virtual void SendInitialMetadata() = 0;
-};
-
-/// An interface that yields a sequence of messages of type \a R.
+/// Common interface for all synchronous client side streaming.
+class ClientStreamingInterface {
+ public:
+ virtual ~ClientStreamingInterface() {}
+
+ /// Block waiting until the stream finishes and a final status of the call is
+ /// available.
+ ///
+ /// It is appropriate to call this method exactly once when both:
+ /// * the calling code (client-side) has no more message to send
+ /// (this can be declared implicitly by calling this method, or
+ /// explicitly through an earlier call to <i>WritesDone</i> method of the
+ /// class in use, e.g. \a ClientWriterInterface::WritesDone or
+ /// \a ClientReaderWriterInterface::WritesDone).
+ /// * there are no more messages to be received from the server (which can
+ /// be known implicitly, or explicitly from an earlier call to \a
+ /// ReaderInterface::Read that returned "false").
+ ///
+ /// This function will return either:
+ /// - when all incoming messages have been read and the server has
+ /// returned status.
+ /// - when the server has returned a non-OK status.
+ /// - OR when the call failed for some reason and the library generated a
+ /// status.
+ ///
+ /// Return values:
+ /// - \a Status contains the status code, message and details for the call
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible trailing metadata sent from the server.
+ virtual ::grpc::Status Finish() = 0;
+};
+
+/// Common interface for all synchronous server side streaming.
+class ServerStreamingInterface {
+ public:
+ virtual ~ServerStreamingInterface() {}
+
+ /// Block to send initial metadata to client.
+ /// This call is optional, but if it is used, it cannot be used concurrently
+ /// with or after the \a Finish method.
+ ///
+ /// The initial metadata that will be sent to the client will be
+ /// taken from the \a ServerContext associated with the call.
+ virtual void SendInitialMetadata() = 0;
+};
+
+/// An interface that yields a sequence of messages of type \a R.
template <class R>
-class ReaderInterface {
- public:
- virtual ~ReaderInterface() {}
-
- /// Get an upper bound on the next message size available for reading on this
- /// stream.
- virtual bool NextMessageSize(uint32_t* sz) = 0;
-
- /// Block to read a message and parse to \a msg. Returns \a true on success.
- /// This is thread-safe with respect to \a Write or \WritesDone methods on
- /// the same stream. It should not be called concurrently with another \a
- /// Read on the same stream as the order of delivery will not be defined.
- ///
- /// \param[out] msg The read message.
- ///
- /// \return \a false when there will be no more incoming messages, either
- /// because the other side has called \a WritesDone() or the stream has failed
- /// (or been cancelled).
- virtual bool Read(R* msg) = 0;
-};
-
-/// An interface that can be fed a sequence of messages of type \a W.
+class ReaderInterface {
+ public:
+ virtual ~ReaderInterface() {}
+
+ /// Get an upper bound on the next message size available for reading on this
+ /// stream.
+ virtual bool NextMessageSize(uint32_t* sz) = 0;
+
+ /// Block to read a message and parse to \a msg. Returns \a true on success.
+ /// This is thread-safe with respect to \a Write or \WritesDone methods on
+ /// the same stream. It should not be called concurrently with another \a
+ /// Read on the same stream as the order of delivery will not be defined.
+ ///
+ /// \param[out] msg The read message.
+ ///
+ /// \return \a false when there will be no more incoming messages, either
+ /// because the other side has called \a WritesDone() or the stream has failed
+ /// (or been cancelled).
+ virtual bool Read(R* msg) = 0;
+};
+
+/// An interface that can be fed a sequence of messages of type \a W.
template <class W>
-class WriterInterface {
- public:
- virtual ~WriterInterface() {}
-
- /// Block to write \a msg to the stream with WriteOptions \a options.
- /// This is thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \param msg The message to be written to the stream.
- /// \param options The WriteOptions affecting the write operation.
- ///
- /// \return \a true on success, \a false when the stream has been closed.
- virtual bool Write(const W& msg, ::grpc::WriteOptions options) = 0;
-
- /// Block to write \a msg to the stream with default write options.
- /// This is thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \param msg The message to be written to the stream.
- ///
- /// \return \a true on success, \a false when the stream has been closed.
- inline bool Write(const W& msg) { return Write(msg, ::grpc::WriteOptions()); }
-
- /// Write \a msg and coalesce it with the writing of trailing metadata, using
- /// WriteOptions \a options.
- ///
- /// For client, WriteLast is equivalent of performing Write and WritesDone in
- /// a single step. \a msg and trailing metadata are coalesced and sent on wire
- /// by calling this function. For server, WriteLast buffers the \a msg.
- /// The writing of \a msg is held until the service handler returns,
- /// where \a msg and trailing metadata are coalesced and sent on wire.
- /// Note that WriteLast can only buffer \a msg up to the flow control window
- /// size. If \a msg size is larger than the window size, it will be sent on
- /// wire without buffering.
- ///
- /// \param[in] msg The message to be written to the stream.
- /// \param[in] options The WriteOptions to be used to write this message.
- void WriteLast(const W& msg, ::grpc::WriteOptions options) {
- Write(msg, options.set_last_message());
- }
-};
-
+class WriterInterface {
+ public:
+ virtual ~WriterInterface() {}
+
+ /// Block to write \a msg to the stream with WriteOptions \a options.
+ /// This is thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \param msg The message to be written to the stream.
+ /// \param options The WriteOptions affecting the write operation.
+ ///
+ /// \return \a true on success, \a false when the stream has been closed.
+ virtual bool Write(const W& msg, ::grpc::WriteOptions options) = 0;
+
+ /// Block to write \a msg to the stream with default write options.
+ /// This is thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \param msg The message to be written to the stream.
+ ///
+ /// \return \a true on success, \a false when the stream has been closed.
+ inline bool Write(const W& msg) { return Write(msg, ::grpc::WriteOptions()); }
+
+ /// Write \a msg and coalesce it with the writing of trailing metadata, using
+ /// WriteOptions \a options.
+ ///
+ /// For client, WriteLast is equivalent of performing Write and WritesDone in
+ /// a single step. \a msg and trailing metadata are coalesced and sent on wire
+ /// by calling this function. For server, WriteLast buffers the \a msg.
+ /// The writing of \a msg is held until the service handler returns,
+ /// where \a msg and trailing metadata are coalesced and sent on wire.
+ /// Note that WriteLast can only buffer \a msg up to the flow control window
+ /// size. If \a msg size is larger than the window size, it will be sent on
+ /// wire without buffering.
+ ///
+ /// \param[in] msg The message to be written to the stream.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ void WriteLast(const W& msg, ::grpc::WriteOptions options) {
+ Write(msg, options.set_last_message());
+ }
+};
+
} // namespace internal
-/// Client-side interface for streaming reads of message of type \a R.
+/// Client-side interface for streaming reads of message of type \a R.
template <class R>
-class ClientReaderInterface : public internal::ClientStreamingInterface,
- public internal::ReaderInterface<R> {
- public:
- /// Block to wait for initial metadata from server. The received metadata
- /// can only be accessed after this call returns. Should only be called before
- /// the first read. Calling this method is optional, and if it is not called
- /// the metadata will be available in ClientContext after the first read.
- virtual void WaitForInitialMetadata() = 0;
-};
-
-namespace internal {
+class ClientReaderInterface : public internal::ClientStreamingInterface,
+ public internal::ReaderInterface<R> {
+ public:
+ /// Block to wait for initial metadata from server. The received metadata
+ /// can only be accessed after this call returns. Should only be called before
+ /// the first read. Calling this method is optional, and if it is not called
+ /// the metadata will be available in ClientContext after the first read.
+ virtual void WaitForInitialMetadata() = 0;
+};
+
+namespace internal {
template <class R>
-class ClientReaderFactory {
- public:
- template <class W>
- static ClientReader<R>* Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- const W& request) {
- return new ClientReader<R>(channel, method, context, request);
- }
-};
-} // namespace internal
-
-/// Synchronous (blocking) client-side API for doing server-streaming RPCs,
-/// where the stream of messages coming from the server has messages
-/// of type \a R.
-template <class R>
-class ClientReader final : public ClientReaderInterface<R> {
- public:
- /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
- /// semantics.
- ///
- // Side effect:
- /// Once complete, the initial metadata read from
- /// the server will be accessible through the \a ClientContext used to
- /// construct this object.
- void WaitForInitialMetadata() override {
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- ops;
- ops.RecvInitialMetadata(context_);
- call_.PerformOps(&ops);
- cq_.Pluck(&ops); /// status ignored
- }
-
- bool NextMessageSize(uint32_t* sz) override {
- int result = call_.max_receive_message_size();
- *sz = (result > 0) ? result : UINT32_MAX;
- return true;
- }
-
- /// See the \a ReaderInterface.Read method for semantics.
- /// Side effect:
- /// This also receives initial metadata from the server, if not
- /// already received (if initial metadata is received, it can be then
- /// accessed through the \a ClientContext associated with this call).
- bool Read(R* msg) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>>
- ops;
- if (!context_->initial_metadata_received_) {
- ops.RecvInitialMetadata(context_);
- }
- ops.RecvMessage(msg);
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops) && ops.got_message;
- }
-
- /// See the \a ClientStreamingInterface.Finish method for semantics.
- ///
- /// Side effect:
- /// The \a ClientContext associated with this call is updated with
- /// possible metadata received from the server.
- ::grpc::Status Finish() override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientRecvStatus> ops;
- ::grpc::Status status;
- ops.ClientRecvStatus(context_, &status);
- call_.PerformOps(&ops);
- GPR_CODEGEN_ASSERT(cq_.Pluck(&ops));
- return status;
- }
-
- private:
- friend class internal::ClientReaderFactory<R>;
- ::grpc::ClientContext* context_;
- ::grpc::CompletionQueue cq_;
- ::grpc::internal::Call call_;
-
- /// Block to create a stream and write the initial metadata and \a request
- /// out. Note that \a context will be used to fill in custom initial
- /// metadata used to send to the server when starting the call.
- template <class W>
- ClientReader(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, const W& request)
- : context_(context),
- cq_(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
- nullptr}), // Pluckable cq
- call_(channel->CreateCall(method, context, &cq_)) {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- ops;
- ops.SendInitialMetadata(&context->send_initial_metadata_,
- context->initial_metadata_flags());
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(ops.SendMessagePtr(&request).ok());
- ops.ClientSendClose();
- call_.PerformOps(&ops);
- cq_.Pluck(&ops);
- }
-};
-
-/// Client-side interface for streaming writes of message type \a W.
+class ClientReaderFactory {
+ public:
+ template <class W>
+ static ClientReader<R>* Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ const W& request) {
+ return new ClientReader<R>(channel, method, context, request);
+ }
+};
+} // namespace internal
+
+/// Synchronous (blocking) client-side API for doing server-streaming RPCs,
+/// where the stream of messages coming from the server has messages
+/// of type \a R.
+template <class R>
+class ClientReader final : public ClientReaderInterface<R> {
+ public:
+ /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
+ /// semantics.
+ ///
+ // Side effect:
+ /// Once complete, the initial metadata read from
+ /// the server will be accessible through the \a ClientContext used to
+ /// construct this object.
+ void WaitForInitialMetadata() override {
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ ops;
+ ops.RecvInitialMetadata(context_);
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops); /// status ignored
+ }
+
+ bool NextMessageSize(uint32_t* sz) override {
+ int result = call_.max_receive_message_size();
+ *sz = (result > 0) ? result : UINT32_MAX;
+ return true;
+ }
+
+ /// See the \a ReaderInterface.Read method for semantics.
+ /// Side effect:
+ /// This also receives initial metadata from the server, if not
+ /// already received (if initial metadata is received, it can be then
+ /// accessed through the \a ClientContext associated with this call).
+ bool Read(R* msg) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>>
+ ops;
+ if (!context_->initial_metadata_received_) {
+ ops.RecvInitialMetadata(context_);
+ }
+ ops.RecvMessage(msg);
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops) && ops.got_message;
+ }
+
+ /// See the \a ClientStreamingInterface.Finish method for semantics.
+ ///
+ /// Side effect:
+ /// The \a ClientContext associated with this call is updated with
+ /// possible metadata received from the server.
+ ::grpc::Status Finish() override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientRecvStatus> ops;
+ ::grpc::Status status;
+ ops.ClientRecvStatus(context_, &status);
+ call_.PerformOps(&ops);
+ GPR_CODEGEN_ASSERT(cq_.Pluck(&ops));
+ return status;
+ }
+
+ private:
+ friend class internal::ClientReaderFactory<R>;
+ ::grpc::ClientContext* context_;
+ ::grpc::CompletionQueue cq_;
+ ::grpc::internal::Call call_;
+
+ /// Block to create a stream and write the initial metadata and \a request
+ /// out. Note that \a context will be used to fill in custom initial
+ /// metadata used to send to the server when starting the call.
+ template <class W>
+ ClientReader(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, const W& request)
+ : context_(context),
+ cq_(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
+ nullptr}), // Pluckable cq
+ call_(channel->CreateCall(method, context, &cq_)) {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ ops;
+ ops.SendInitialMetadata(&context->send_initial_metadata_,
+ context->initial_metadata_flags());
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(ops.SendMessagePtr(&request).ok());
+ ops.ClientSendClose();
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops);
+ }
+};
+
+/// Client-side interface for streaming writes of message type \a W.
+template <class W>
+class ClientWriterInterface : public internal::ClientStreamingInterface,
+ public internal::WriterInterface<W> {
+ public:
+ /// Half close writing from the client. (signal that the stream of messages
+ /// coming from the client is complete).
+ /// Blocks until currently-pending writes are completed.
+ /// Thread safe with respect to \a ReaderInterface::Read operations only
+ ///
+ /// \return Whether the writes were successful.
+ virtual bool WritesDone() = 0;
+};
+
+namespace internal {
template <class W>
-class ClientWriterInterface : public internal::ClientStreamingInterface,
- public internal::WriterInterface<W> {
- public:
- /// Half close writing from the client. (signal that the stream of messages
- /// coming from the client is complete).
- /// Blocks until currently-pending writes are completed.
- /// Thread safe with respect to \a ReaderInterface::Read operations only
- ///
- /// \return Whether the writes were successful.
- virtual bool WritesDone() = 0;
-};
-
-namespace internal {
+class ClientWriterFactory {
+ public:
+ template <class R>
+ static ClientWriter<W>* Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, R* response) {
+ return new ClientWriter<W>(channel, method, context, response);
+ }
+};
+} // namespace internal
+
+/// Synchronous (blocking) client-side API for doing client-streaming RPCs,
+/// where the outgoing message stream coming from the client has messages of
+/// type \a W.
template <class W>
-class ClientWriterFactory {
- public:
- template <class R>
- static ClientWriter<W>* Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, R* response) {
- return new ClientWriter<W>(channel, method, context, response);
- }
-};
-} // namespace internal
-
-/// Synchronous (blocking) client-side API for doing client-streaming RPCs,
-/// where the outgoing message stream coming from the client has messages of
-/// type \a W.
-template <class W>
-class ClientWriter : public ClientWriterInterface<W> {
- public:
- /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
- /// semantics.
- ///
- // Side effect:
- /// Once complete, the initial metadata read from the server will be
- /// accessible through the \a ClientContext used to construct this object.
- void WaitForInitialMetadata() {
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- ops;
- ops.RecvInitialMetadata(context_);
- call_.PerformOps(&ops);
- cq_.Pluck(&ops); // status ignored
- }
-
- /// See the WriterInterface.Write(const W& msg, WriteOptions options) method
- /// for semantics.
- ///
- /// Side effect:
- /// Also sends initial metadata if not already sent (using the
- /// \a ClientContext associated with this call).
- using internal::WriterInterface<W>::Write;
- bool Write(const W& msg, ::grpc::WriteOptions options) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- ops;
-
- if (options.is_last_message()) {
- options.set_buffer_hint();
- ops.ClientSendClose();
- }
- if (context_->initial_metadata_corked_) {
- ops.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- context_->set_initial_metadata_corked(false);
- }
- if (!ops.SendMessagePtr(&msg, options).ok()) {
- return false;
- }
-
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops);
- }
-
- bool WritesDone() override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
- ops.ClientSendClose();
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops);
- }
-
- /// See the ClientStreamingInterface.Finish method for semantics.
- /// Side effects:
- /// - Also receives initial metadata if not already received.
- /// - Attempts to fill in the \a response parameter passed
- /// to the constructor of this instance with the response
- /// message from the server.
- ::grpc::Status Finish() override {
- ::grpc::Status status;
- if (!context_->initial_metadata_received_) {
- finish_ops_.RecvInitialMetadata(context_);
- }
- finish_ops_.ClientRecvStatus(context_, &status);
- call_.PerformOps(&finish_ops_);
- GPR_CODEGEN_ASSERT(cq_.Pluck(&finish_ops_));
- return status;
- }
-
- private:
- friend class internal::ClientWriterFactory<W>;
-
- /// Block to create a stream (i.e. send request headers and other initial
- /// metadata to the server). Note that \a context will be used to fill
- /// in custom initial metadata. \a response will be filled in with the
- /// single expected response message from the server upon a successful
- /// call to the \a Finish method of this instance.
- template <class R>
- ClientWriter(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, R* response)
- : context_(context),
- cq_(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
- nullptr}), // Pluckable cq
- call_(channel->CreateCall(method, context, &cq_)) {
- finish_ops_.RecvMessage(response);
- finish_ops_.AllowNoMessage();
-
- if (!context_->initial_metadata_corked_) {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- ops;
- ops.SendInitialMetadata(&context->send_initial_metadata_,
- context->initial_metadata_flags());
- call_.PerformOps(&ops);
- cq_.Pluck(&ops);
- }
- }
-
- ::grpc::ClientContext* context_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpGenericRecvMessage,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
- ::grpc::CompletionQueue cq_;
- ::grpc::internal::Call call_;
-};
-
-/// Client-side interface for bi-directional streaming with
-/// client-to-server stream messages of type \a W and
-/// server-to-client stream messages of type \a R.
+class ClientWriter : public ClientWriterInterface<W> {
+ public:
+ /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
+ /// semantics.
+ ///
+ // Side effect:
+ /// Once complete, the initial metadata read from the server will be
+ /// accessible through the \a ClientContext used to construct this object.
+ void WaitForInitialMetadata() {
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ ops;
+ ops.RecvInitialMetadata(context_);
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops); // status ignored
+ }
+
+ /// See the WriterInterface.Write(const W& msg, WriteOptions options) method
+ /// for semantics.
+ ///
+ /// Side effect:
+ /// Also sends initial metadata if not already sent (using the
+ /// \a ClientContext associated with this call).
+ using internal::WriterInterface<W>::Write;
+ bool Write(const W& msg, ::grpc::WriteOptions options) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ ops;
+
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ ops.ClientSendClose();
+ }
+ if (context_->initial_metadata_corked_) {
+ ops.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ context_->set_initial_metadata_corked(false);
+ }
+ if (!ops.SendMessagePtr(&msg, options).ok()) {
+ return false;
+ }
+
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops);
+ }
+
+ bool WritesDone() override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
+ ops.ClientSendClose();
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops);
+ }
+
+ /// See the ClientStreamingInterface.Finish method for semantics.
+ /// Side effects:
+ /// - Also receives initial metadata if not already received.
+ /// - Attempts to fill in the \a response parameter passed
+ /// to the constructor of this instance with the response
+ /// message from the server.
+ ::grpc::Status Finish() override {
+ ::grpc::Status status;
+ if (!context_->initial_metadata_received_) {
+ finish_ops_.RecvInitialMetadata(context_);
+ }
+ finish_ops_.ClientRecvStatus(context_, &status);
+ call_.PerformOps(&finish_ops_);
+ GPR_CODEGEN_ASSERT(cq_.Pluck(&finish_ops_));
+ return status;
+ }
+
+ private:
+ friend class internal::ClientWriterFactory<W>;
+
+ /// Block to create a stream (i.e. send request headers and other initial
+ /// metadata to the server). Note that \a context will be used to fill
+ /// in custom initial metadata. \a response will be filled in with the
+ /// single expected response message from the server upon a successful
+ /// call to the \a Finish method of this instance.
+ template <class R>
+ ClientWriter(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, R* response)
+ : context_(context),
+ cq_(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
+ nullptr}), // Pluckable cq
+ call_(channel->CreateCall(method, context, &cq_)) {
+ finish_ops_.RecvMessage(response);
+ finish_ops_.AllowNoMessage();
+
+ if (!context_->initial_metadata_corked_) {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ ops;
+ ops.SendInitialMetadata(&context->send_initial_metadata_,
+ context->initial_metadata_flags());
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops);
+ }
+ }
+
+ ::grpc::ClientContext* context_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpGenericRecvMessage,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+ ::grpc::CompletionQueue cq_;
+ ::grpc::internal::Call call_;
+};
+
+/// Client-side interface for bi-directional streaming with
+/// client-to-server stream messages of type \a W and
+/// server-to-client stream messages of type \a R.
+template <class W, class R>
+class ClientReaderWriterInterface : public internal::ClientStreamingInterface,
+ public internal::WriterInterface<W>,
+ public internal::ReaderInterface<R> {
+ public:
+ /// Block to wait for initial metadata from server. The received metadata
+ /// can only be accessed after this call returns. Should only be called before
+ /// the first read. Calling this method is optional, and if it is not called
+ /// the metadata will be available in ClientContext after the first read.
+ virtual void WaitForInitialMetadata() = 0;
+
+ /// Half close writing from the client. (signal that the stream of messages
+ /// coming from the client is complete).
+ /// Blocks until currently-pending writes are completed.
+ /// Thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \return Whether the writes were successful.
+ virtual bool WritesDone() = 0;
+};
+
+namespace internal {
template <class W, class R>
-class ClientReaderWriterInterface : public internal::ClientStreamingInterface,
- public internal::WriterInterface<W>,
- public internal::ReaderInterface<R> {
- public:
- /// Block to wait for initial metadata from server. The received metadata
- /// can only be accessed after this call returns. Should only be called before
- /// the first read. Calling this method is optional, and if it is not called
- /// the metadata will be available in ClientContext after the first read.
- virtual void WaitForInitialMetadata() = 0;
-
- /// Half close writing from the client. (signal that the stream of messages
- /// coming from the client is complete).
- /// Blocks until currently-pending writes are completed.
- /// Thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \return Whether the writes were successful.
- virtual bool WritesDone() = 0;
-};
-
-namespace internal {
+class ClientReaderWriterFactory {
+ public:
+ static ClientReaderWriter<W, R>* Create(
+ ::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context) {
+ return new ClientReaderWriter<W, R>(channel, method, context);
+ }
+};
+} // namespace internal
+
+/// Synchronous (blocking) client-side API for bi-directional streaming RPCs,
+/// where the outgoing message stream coming from the client has messages of
+/// type \a W, and the incoming messages stream coming from the server has
+/// messages of type \a R.
template <class W, class R>
-class ClientReaderWriterFactory {
- public:
- static ClientReaderWriter<W, R>* Create(
- ::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context) {
- return new ClientReaderWriter<W, R>(channel, method, context);
- }
-};
-} // namespace internal
-
-/// Synchronous (blocking) client-side API for bi-directional streaming RPCs,
-/// where the outgoing message stream coming from the client has messages of
-/// type \a W, and the incoming messages stream coming from the server has
-/// messages of type \a R.
-template <class W, class R>
-class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
- public:
- /// Block waiting to read initial metadata from the server.
- /// This call is optional, but if it is used, it cannot be used concurrently
- /// with or after the \a Finish method.
- ///
- /// Once complete, the initial metadata read from the server will be
- /// accessible through the \a ClientContext used to construct this object.
- void WaitForInitialMetadata() override {
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- ops;
- ops.RecvInitialMetadata(context_);
- call_.PerformOps(&ops);
- cq_.Pluck(&ops); // status ignored
- }
-
- bool NextMessageSize(uint32_t* sz) override {
- int result = call_.max_receive_message_size();
- *sz = (result > 0) ? result : UINT32_MAX;
- return true;
- }
-
- /// See the \a ReaderInterface.Read method for semantics.
- /// Side effect:
- /// Also receives initial metadata if not already received (updates the \a
- /// ClientContext associated with this call in that case).
- bool Read(R* msg) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>>
- ops;
- if (!context_->initial_metadata_received_) {
- ops.RecvInitialMetadata(context_);
- }
- ops.RecvMessage(msg);
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops) && ops.got_message;
- }
-
- /// See the \a WriterInterface.Write method for semantics.
- ///
- /// Side effect:
- /// Also sends initial metadata if not already sent (using the
- /// \a ClientContext associated with this call to fill in values).
- using internal::WriterInterface<W>::Write;
- bool Write(const W& msg, ::grpc::WriteOptions options) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- ops;
-
- if (options.is_last_message()) {
- options.set_buffer_hint();
- ops.ClientSendClose();
- }
- if (context_->initial_metadata_corked_) {
- ops.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- context_->set_initial_metadata_corked(false);
- }
- if (!ops.SendMessagePtr(&msg, options).ok()) {
- return false;
- }
-
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops);
- }
-
- bool WritesDone() override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
- ops.ClientSendClose();
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops);
- }
-
- /// See the ClientStreamingInterface.Finish method for semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible trailing metadata sent from the server.
- ::grpc::Status Finish() override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpClientRecvStatus>
- ops;
- if (!context_->initial_metadata_received_) {
- ops.RecvInitialMetadata(context_);
- }
- ::grpc::Status status;
- ops.ClientRecvStatus(context_, &status);
- call_.PerformOps(&ops);
- GPR_CODEGEN_ASSERT(cq_.Pluck(&ops));
- return status;
- }
-
- private:
- friend class internal::ClientReaderWriterFactory<W, R>;
-
- ::grpc::ClientContext* context_;
- ::grpc::CompletionQueue cq_;
- ::grpc::internal::Call call_;
-
- /// Block to create a stream and write the initial metadata and \a request
- /// out. Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- ClientReaderWriter(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context)
- : context_(context),
- cq_(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
- nullptr}), // Pluckable cq
- call_(channel->CreateCall(method, context, &cq_)) {
- if (!context_->initial_metadata_corked_) {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- ops;
- ops.SendInitialMetadata(&context->send_initial_metadata_,
- context->initial_metadata_flags());
- call_.PerformOps(&ops);
- cq_.Pluck(&ops);
- }
- }
-};
-
-/// Server-side interface for streaming reads of message of type \a R.
+class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
+ public:
+ /// Block waiting to read initial metadata from the server.
+ /// This call is optional, but if it is used, it cannot be used concurrently
+ /// with or after the \a Finish method.
+ ///
+ /// Once complete, the initial metadata read from the server will be
+ /// accessible through the \a ClientContext used to construct this object.
+ void WaitForInitialMetadata() override {
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ ops;
+ ops.RecvInitialMetadata(context_);
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops); // status ignored
+ }
+
+ bool NextMessageSize(uint32_t* sz) override {
+ int result = call_.max_receive_message_size();
+ *sz = (result > 0) ? result : UINT32_MAX;
+ return true;
+ }
+
+ /// See the \a ReaderInterface.Read method for semantics.
+ /// Side effect:
+ /// Also receives initial metadata if not already received (updates the \a
+ /// ClientContext associated with this call in that case).
+ bool Read(R* msg) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>>
+ ops;
+ if (!context_->initial_metadata_received_) {
+ ops.RecvInitialMetadata(context_);
+ }
+ ops.RecvMessage(msg);
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops) && ops.got_message;
+ }
+
+ /// See the \a WriterInterface.Write method for semantics.
+ ///
+ /// Side effect:
+ /// Also sends initial metadata if not already sent (using the
+ /// \a ClientContext associated with this call to fill in values).
+ using internal::WriterInterface<W>::Write;
+ bool Write(const W& msg, ::grpc::WriteOptions options) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ ops;
+
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ ops.ClientSendClose();
+ }
+ if (context_->initial_metadata_corked_) {
+ ops.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ context_->set_initial_metadata_corked(false);
+ }
+ if (!ops.SendMessagePtr(&msg, options).ok()) {
+ return false;
+ }
+
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops);
+ }
+
+ bool WritesDone() override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
+ ops.ClientSendClose();
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops);
+ }
+
+ /// See the ClientStreamingInterface.Finish method for semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible trailing metadata sent from the server.
+ ::grpc::Status Finish() override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpClientRecvStatus>
+ ops;
+ if (!context_->initial_metadata_received_) {
+ ops.RecvInitialMetadata(context_);
+ }
+ ::grpc::Status status;
+ ops.ClientRecvStatus(context_, &status);
+ call_.PerformOps(&ops);
+ GPR_CODEGEN_ASSERT(cq_.Pluck(&ops));
+ return status;
+ }
+
+ private:
+ friend class internal::ClientReaderWriterFactory<W, R>;
+
+ ::grpc::ClientContext* context_;
+ ::grpc::CompletionQueue cq_;
+ ::grpc::internal::Call call_;
+
+ /// Block to create a stream and write the initial metadata and \a request
+ /// out. Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ ClientReaderWriter(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context)
+ : context_(context),
+ cq_(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
+ nullptr}), // Pluckable cq
+ call_(channel->CreateCall(method, context, &cq_)) {
+ if (!context_->initial_metadata_corked_) {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ ops;
+ ops.SendInitialMetadata(&context->send_initial_metadata_,
+ context->initial_metadata_flags());
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops);
+ }
+ }
+};
+
+/// Server-side interface for streaming reads of message of type \a R.
template <class R>
-class ServerReaderInterface : public internal::ServerStreamingInterface,
- public internal::ReaderInterface<R> {};
+class ServerReaderInterface : public internal::ServerStreamingInterface,
+ public internal::ReaderInterface<R> {};
-/// Synchronous (blocking) server-side API for doing client-streaming RPCs,
-/// where the incoming message stream coming from the client has messages of
-/// type \a R.
+/// Synchronous (blocking) server-side API for doing client-streaming RPCs,
+/// where the incoming message stream coming from the client has messages of
+/// type \a R.
template <class R>
-class ServerReader final : public ServerReaderInterface<R> {
- public:
- /// See the \a ServerStreamingInterface.SendInitialMetadata method
- /// for semantics. Note that initial metadata will be affected by the
- /// \a ServerContext associated with this call.
- void SendInitialMetadata() override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- ops;
- ops.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_->PerformOps(&ops);
- call_->cq()->Pluck(&ops);
- }
-
- bool NextMessageSize(uint32_t* sz) override {
- int result = call_->max_receive_message_size();
- *sz = (result > 0) ? result : UINT32_MAX;
- return true;
- }
-
- bool Read(R* msg) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops;
- ops.RecvMessage(msg);
- call_->PerformOps(&ops);
- return call_->cq()->Pluck(&ops) && ops.got_message;
- }
-
- private:
- ::grpc::internal::Call* const call_;
- ServerContext* const ctx_;
-
- template <class ServiceType, class RequestType, class ResponseType>
- friend class internal::ClientStreamingHandler;
-
- ServerReader(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : call_(call), ctx_(ctx) {}
-};
-
-/// Server-side interface for streaming writes of message of type \a W.
+class ServerReader final : public ServerReaderInterface<R> {
+ public:
+ /// See the \a ServerStreamingInterface.SendInitialMetadata method
+ /// for semantics. Note that initial metadata will be affected by the
+ /// \a ServerContext associated with this call.
+ void SendInitialMetadata() override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ ops;
+ ops.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_->PerformOps(&ops);
+ call_->cq()->Pluck(&ops);
+ }
+
+ bool NextMessageSize(uint32_t* sz) override {
+ int result = call_->max_receive_message_size();
+ *sz = (result > 0) ? result : UINT32_MAX;
+ return true;
+ }
+
+ bool Read(R* msg) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops;
+ ops.RecvMessage(msg);
+ call_->PerformOps(&ops);
+ return call_->cq()->Pluck(&ops) && ops.got_message;
+ }
+
+ private:
+ ::grpc::internal::Call* const call_;
+ ServerContext* const ctx_;
+
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class internal::ClientStreamingHandler;
+
+ ServerReader(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : call_(call), ctx_(ctx) {}
+};
+
+/// Server-side interface for streaming writes of message of type \a W.
template <class W>
-class ServerWriterInterface : public internal::ServerStreamingInterface,
- public internal::WriterInterface<W> {};
+class ServerWriterInterface : public internal::ServerStreamingInterface,
+ public internal::WriterInterface<W> {};
-/// Synchronous (blocking) server-side API for doing for doing a
-/// server-streaming RPCs, where the outgoing message stream coming from the
-/// server has messages of type \a W.
+/// Synchronous (blocking) server-side API for doing for doing a
+/// server-streaming RPCs, where the outgoing message stream coming from the
+/// server has messages of type \a W.
template <class W>
-class ServerWriter final : public ServerWriterInterface<W> {
- public:
- /// See the \a ServerStreamingInterface.SendInitialMetadata method
- /// for semantics.
- /// Note that initial metadata will be affected by the
- /// \a ServerContext associated with this call.
- void SendInitialMetadata() override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- ops;
- ops.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_->PerformOps(&ops);
- call_->cq()->Pluck(&ops);
- }
-
- /// See the \a WriterInterface.Write method for semantics.
- ///
- /// Side effect:
- /// Also sends initial metadata if not already sent (using the
- /// \a ClientContext associated with this call to fill in values).
- using internal::WriterInterface<W>::Write;
- bool Write(const W& msg, ::grpc::WriteOptions options) override {
- if (options.is_last_message()) {
- options.set_buffer_hint();
- }
-
- if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) {
- return false;
- }
- if (!ctx_->sent_initial_metadata_) {
- ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- call_->PerformOps(&ctx_->pending_ops_);
- // if this is the last message we defer the pluck until AFTER we start
- // the trailing md op. This prevents hangs. See
- // https://github.com/grpc/grpc/issues/11546
- if (options.is_last_message()) {
- ctx_->has_pending_ops_ = true;
- return true;
- }
- ctx_->has_pending_ops_ = false;
- return call_->cq()->Pluck(&ctx_->pending_ops_);
- }
-
- private:
- ::grpc::internal::Call* const call_;
- ::grpc::ServerContext* const ctx_;
-
- template <class ServiceType, class RequestType, class ResponseType>
- friend class internal::ServerStreamingHandler;
-
- ServerWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : call_(call), ctx_(ctx) {}
-};
-
-/// Server-side interface for bi-directional streaming.
+class ServerWriter final : public ServerWriterInterface<W> {
+ public:
+ /// See the \a ServerStreamingInterface.SendInitialMetadata method
+ /// for semantics.
+ /// Note that initial metadata will be affected by the
+ /// \a ServerContext associated with this call.
+ void SendInitialMetadata() override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ ops;
+ ops.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_->PerformOps(&ops);
+ call_->cq()->Pluck(&ops);
+ }
+
+ /// See the \a WriterInterface.Write method for semantics.
+ ///
+ /// Side effect:
+ /// Also sends initial metadata if not already sent (using the
+ /// \a ClientContext associated with this call to fill in values).
+ using internal::WriterInterface<W>::Write;
+ bool Write(const W& msg, ::grpc::WriteOptions options) override {
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ }
+
+ if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) {
+ return false;
+ }
+ if (!ctx_->sent_initial_metadata_) {
+ ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ call_->PerformOps(&ctx_->pending_ops_);
+ // if this is the last message we defer the pluck until AFTER we start
+ // the trailing md op. This prevents hangs. See
+ // https://github.com/grpc/grpc/issues/11546
+ if (options.is_last_message()) {
+ ctx_->has_pending_ops_ = true;
+ return true;
+ }
+ ctx_->has_pending_ops_ = false;
+ return call_->cq()->Pluck(&ctx_->pending_ops_);
+ }
+
+ private:
+ ::grpc::internal::Call* const call_;
+ ::grpc::ServerContext* const ctx_;
+
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class internal::ServerStreamingHandler;
+
+ ServerWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : call_(call), ctx_(ctx) {}
+};
+
+/// Server-side interface for bi-directional streaming.
+template <class W, class R>
+class ServerReaderWriterInterface : public internal::ServerStreamingInterface,
+ public internal::WriterInterface<W>,
+ public internal::ReaderInterface<R> {};
+
+/// Actual implementation of bi-directional streaming
+namespace internal {
template <class W, class R>
-class ServerReaderWriterInterface : public internal::ServerStreamingInterface,
- public internal::WriterInterface<W>,
- public internal::ReaderInterface<R> {};
+class ServerReaderWriterBody final {
+ public:
+ ServerReaderWriterBody(grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : call_(call), ctx_(ctx) {}
+
+ void SendInitialMetadata() {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-/// Actual implementation of bi-directional streaming
-namespace internal {
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops;
+ ops.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_->PerformOps(&ops);
+ call_->cq()->Pluck(&ops);
+ }
+
+ bool NextMessageSize(uint32_t* sz) {
+ int result = call_->max_receive_message_size();
+ *sz = (result > 0) ? result : UINT32_MAX;
+ return true;
+ }
+
+ bool Read(R* msg) {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops;
+ ops.RecvMessage(msg);
+ call_->PerformOps(&ops);
+ return call_->cq()->Pluck(&ops) && ops.got_message;
+ }
+
+ bool Write(const W& msg, ::grpc::WriteOptions options) {
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ }
+ if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) {
+ return false;
+ }
+ if (!ctx_->sent_initial_metadata_) {
+ ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ call_->PerformOps(&ctx_->pending_ops_);
+ // if this is the last message we defer the pluck until AFTER we start
+ // the trailing md op. This prevents hangs. See
+ // https://github.com/grpc/grpc/issues/11546
+ if (options.is_last_message()) {
+ ctx_->has_pending_ops_ = true;
+ return true;
+ }
+ ctx_->has_pending_ops_ = false;
+ return call_->cq()->Pluck(&ctx_->pending_ops_);
+ }
+
+ private:
+ grpc::internal::Call* const call_;
+ ::grpc::ServerContext* const ctx_;
+};
+
+} // namespace internal
+
+/// Synchronous (blocking) server-side API for a bidirectional
+/// streaming call, where the incoming message stream coming from the client has
+/// messages of type \a R, and the outgoing message streaming coming from
+/// the server has messages of type \a W.
template <class W, class R>
-class ServerReaderWriterBody final {
- public:
- ServerReaderWriterBody(grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : call_(call), ctx_(ctx) {}
-
- void SendInitialMetadata() {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops;
- ops.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_->PerformOps(&ops);
- call_->cq()->Pluck(&ops);
- }
-
- bool NextMessageSize(uint32_t* sz) {
- int result = call_->max_receive_message_size();
- *sz = (result > 0) ? result : UINT32_MAX;
- return true;
- }
-
- bool Read(R* msg) {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops;
- ops.RecvMessage(msg);
- call_->PerformOps(&ops);
- return call_->cq()->Pluck(&ops) && ops.got_message;
- }
-
- bool Write(const W& msg, ::grpc::WriteOptions options) {
- if (options.is_last_message()) {
- options.set_buffer_hint();
- }
- if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) {
- return false;
- }
- if (!ctx_->sent_initial_metadata_) {
- ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- call_->PerformOps(&ctx_->pending_ops_);
- // if this is the last message we defer the pluck until AFTER we start
- // the trailing md op. This prevents hangs. See
- // https://github.com/grpc/grpc/issues/11546
- if (options.is_last_message()) {
- ctx_->has_pending_ops_ = true;
- return true;
- }
- ctx_->has_pending_ops_ = false;
- return call_->cq()->Pluck(&ctx_->pending_ops_);
- }
-
- private:
- grpc::internal::Call* const call_;
- ::grpc::ServerContext* const ctx_;
-};
-
-} // namespace internal
-
-/// Synchronous (blocking) server-side API for a bidirectional
-/// streaming call, where the incoming message stream coming from the client has
-/// messages of type \a R, and the outgoing message streaming coming from
-/// the server has messages of type \a W.
-template <class W, class R>
-class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
- public:
- /// See the \a ServerStreamingInterface.SendInitialMetadata method
- /// for semantics. Note that initial metadata will be affected by the
- /// \a ServerContext associated with this call.
- void SendInitialMetadata() override { body_.SendInitialMetadata(); }
-
- bool NextMessageSize(uint32_t* sz) override {
- return body_.NextMessageSize(sz);
- }
-
- bool Read(R* msg) override { return body_.Read(msg); }
-
- /// See the \a WriterInterface.Write(const W& msg, WriteOptions options)
- /// method for semantics.
- /// Side effect:
- /// Also sends initial metadata if not already sent (using the \a
- /// ServerContext associated with this call).
- using internal::WriterInterface<W>::Write;
- bool Write(const W& msg, ::grpc::WriteOptions options) override {
- return body_.Write(msg, options);
- }
-
- private:
- internal::ServerReaderWriterBody<W, R> body_;
-
- friend class internal::TemplatedBidiStreamingHandler<ServerReaderWriter<W, R>,
- false>;
- ServerReaderWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : body_(call, ctx) {}
-};
-
-/// A class to represent a flow-controlled unary call. This is something
-/// of a hybrid between conventional unary and streaming. This is invoked
-/// through a unary call on the client side, but the server responds to it
-/// as though it were a single-ping-pong streaming call. The server can use
-/// the \a NextMessageSize method to determine an upper-bound on the size of
-/// the message. A key difference relative to streaming: ServerUnaryStreamer
-/// must have exactly 1 Read and exactly 1 Write, in that order, to function
-/// correctly. Otherwise, the RPC is in error.
+class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
+ public:
+ /// See the \a ServerStreamingInterface.SendInitialMetadata method
+ /// for semantics. Note that initial metadata will be affected by the
+ /// \a ServerContext associated with this call.
+ void SendInitialMetadata() override { body_.SendInitialMetadata(); }
+
+ bool NextMessageSize(uint32_t* sz) override {
+ return body_.NextMessageSize(sz);
+ }
+
+ bool Read(R* msg) override { return body_.Read(msg); }
+
+ /// See the \a WriterInterface.Write(const W& msg, WriteOptions options)
+ /// method for semantics.
+ /// Side effect:
+ /// Also sends initial metadata if not already sent (using the \a
+ /// ServerContext associated with this call).
+ using internal::WriterInterface<W>::Write;
+ bool Write(const W& msg, ::grpc::WriteOptions options) override {
+ return body_.Write(msg, options);
+ }
+
+ private:
+ internal::ServerReaderWriterBody<W, R> body_;
+
+ friend class internal::TemplatedBidiStreamingHandler<ServerReaderWriter<W, R>,
+ false>;
+ ServerReaderWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : body_(call, ctx) {}
+};
+
+/// A class to represent a flow-controlled unary call. This is something
+/// of a hybrid between conventional unary and streaming. This is invoked
+/// through a unary call on the client side, but the server responds to it
+/// as though it were a single-ping-pong streaming call. The server can use
+/// the \a NextMessageSize method to determine an upper-bound on the size of
+/// the message. A key difference relative to streaming: ServerUnaryStreamer
+/// must have exactly 1 Read and exactly 1 Write, in that order, to function
+/// correctly. Otherwise, the RPC is in error.
template <class RequestType, class ResponseType>
-class ServerUnaryStreamer final
- : public ServerReaderWriterInterface<ResponseType, RequestType> {
- public:
- /// Block to send initial metadata to client.
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call will be used for
- /// sending initial metadata.
- void SendInitialMetadata() override { body_.SendInitialMetadata(); }
-
- /// Get an upper bound on the request message size from the client.
- bool NextMessageSize(uint32_t* sz) override {
- return body_.NextMessageSize(sz);
- }
-
- /// Read a message of type \a R into \a msg. Completion will be notified by \a
- /// tag on the associated completion queue.
- /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
- /// should not be called concurrently with other streaming APIs
- /// on the same stream. It is not meaningful to call it concurrently
- /// with another \a ReaderInterface::Read on the same stream since reads on
- /// the same stream are delivered in order.
- ///
- /// \param[out] msg Where to eventually store the read message.
- /// \param[in] tag The tag identifying the operation.
- bool Read(RequestType* request) override {
- if (read_done_) {
- return false;
- }
- read_done_ = true;
- return body_.Read(request);
- }
-
- /// Block to write \a msg to the stream with WriteOptions \a options.
- /// This is thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \param msg The message to be written to the stream.
- /// \param options The WriteOptions affecting the write operation.
- ///
- /// \return \a true on success, \a false when the stream has been closed.
- using internal::WriterInterface<ResponseType>::Write;
- bool Write(const ResponseType& response,
- ::grpc::WriteOptions options) override {
- if (write_done_ || !read_done_) {
- return false;
- }
- write_done_ = true;
- return body_.Write(response, options);
- }
-
- private:
- internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
- bool read_done_;
- bool write_done_;
-
- friend class internal::TemplatedBidiStreamingHandler<
- ServerUnaryStreamer<RequestType, ResponseType>, true>;
- ServerUnaryStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : body_(call, ctx), read_done_(false), write_done_(false) {}
-};
-
-/// A class to represent a flow-controlled server-side streaming call.
-/// This is something of a hybrid between server-side and bidi streaming.
-/// This is invoked through a server-side streaming call on the client side,
-/// but the server responds to it as though it were a bidi streaming call that
-/// must first have exactly 1 Read and then any number of Writes.
+class ServerUnaryStreamer final
+ : public ServerReaderWriterInterface<ResponseType, RequestType> {
+ public:
+ /// Block to send initial metadata to client.
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call will be used for
+ /// sending initial metadata.
+ void SendInitialMetadata() override { body_.SendInitialMetadata(); }
+
+ /// Get an upper bound on the request message size from the client.
+ bool NextMessageSize(uint32_t* sz) override {
+ return body_.NextMessageSize(sz);
+ }
+
+ /// Read a message of type \a R into \a msg. Completion will be notified by \a
+ /// tag on the associated completion queue.
+ /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
+ /// should not be called concurrently with other streaming APIs
+ /// on the same stream. It is not meaningful to call it concurrently
+ /// with another \a ReaderInterface::Read on the same stream since reads on
+ /// the same stream are delivered in order.
+ ///
+ /// \param[out] msg Where to eventually store the read message.
+ /// \param[in] tag The tag identifying the operation.
+ bool Read(RequestType* request) override {
+ if (read_done_) {
+ return false;
+ }
+ read_done_ = true;
+ return body_.Read(request);
+ }
+
+ /// Block to write \a msg to the stream with WriteOptions \a options.
+ /// This is thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \param msg The message to be written to the stream.
+ /// \param options The WriteOptions affecting the write operation.
+ ///
+ /// \return \a true on success, \a false when the stream has been closed.
+ using internal::WriterInterface<ResponseType>::Write;
+ bool Write(const ResponseType& response,
+ ::grpc::WriteOptions options) override {
+ if (write_done_ || !read_done_) {
+ return false;
+ }
+ write_done_ = true;
+ return body_.Write(response, options);
+ }
+
+ private:
+ internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
+ bool read_done_;
+ bool write_done_;
+
+ friend class internal::TemplatedBidiStreamingHandler<
+ ServerUnaryStreamer<RequestType, ResponseType>, true>;
+ ServerUnaryStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : body_(call, ctx), read_done_(false), write_done_(false) {}
+};
+
+/// A class to represent a flow-controlled server-side streaming call.
+/// This is something of a hybrid between server-side and bidi streaming.
+/// This is invoked through a server-side streaming call on the client side,
+/// but the server responds to it as though it were a bidi streaming call that
+/// must first have exactly 1 Read and then any number of Writes.
template <class RequestType, class ResponseType>
-class ServerSplitStreamer final
- : public ServerReaderWriterInterface<ResponseType, RequestType> {
- public:
- /// Block to send initial metadata to client.
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call will be used for
- /// sending initial metadata.
- void SendInitialMetadata() override { body_.SendInitialMetadata(); }
-
- /// Get an upper bound on the request message size from the client.
- bool NextMessageSize(uint32_t* sz) override {
- return body_.NextMessageSize(sz);
- }
-
- /// Read a message of type \a R into \a msg. Completion will be notified by \a
- /// tag on the associated completion queue.
- /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
- /// should not be called concurrently with other streaming APIs
- /// on the same stream. It is not meaningful to call it concurrently
- /// with another \a ReaderInterface::Read on the same stream since reads on
- /// the same stream are delivered in order.
- ///
- /// \param[out] msg Where to eventually store the read message.
- /// \param[in] tag The tag identifying the operation.
- bool Read(RequestType* request) override {
- if (read_done_) {
- return false;
- }
- read_done_ = true;
- return body_.Read(request);
- }
-
- /// Block to write \a msg to the stream with WriteOptions \a options.
- /// This is thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \param msg The message to be written to the stream.
- /// \param options The WriteOptions affecting the write operation.
- ///
- /// \return \a true on success, \a false when the stream has been closed.
- using internal::WriterInterface<ResponseType>::Write;
- bool Write(const ResponseType& response,
- ::grpc::WriteOptions options) override {
- return read_done_ && body_.Write(response, options);
- }
-
- private:
- internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
- bool read_done_;
-
- friend class internal::TemplatedBidiStreamingHandler<
- ServerSplitStreamer<RequestType, ResponseType>, false>;
- ServerSplitStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : body_(call, ctx), read_done_(false) {}
-};
-
+class ServerSplitStreamer final
+ : public ServerReaderWriterInterface<ResponseType, RequestType> {
+ public:
+ /// Block to send initial metadata to client.
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call will be used for
+ /// sending initial metadata.
+ void SendInitialMetadata() override { body_.SendInitialMetadata(); }
+
+ /// Get an upper bound on the request message size from the client.
+ bool NextMessageSize(uint32_t* sz) override {
+ return body_.NextMessageSize(sz);
+ }
+
+ /// Read a message of type \a R into \a msg. Completion will be notified by \a
+ /// tag on the associated completion queue.
+ /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
+ /// should not be called concurrently with other streaming APIs
+ /// on the same stream. It is not meaningful to call it concurrently
+ /// with another \a ReaderInterface::Read on the same stream since reads on
+ /// the same stream are delivered in order.
+ ///
+ /// \param[out] msg Where to eventually store the read message.
+ /// \param[in] tag The tag identifying the operation.
+ bool Read(RequestType* request) override {
+ if (read_done_) {
+ return false;
+ }
+ read_done_ = true;
+ return body_.Read(request);
+ }
+
+ /// Block to write \a msg to the stream with WriteOptions \a options.
+ /// This is thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \param msg The message to be written to the stream.
+ /// \param options The WriteOptions affecting the write operation.
+ ///
+ /// \return \a true on success, \a false when the stream has been closed.
+ using internal::WriterInterface<ResponseType>::Write;
+ bool Write(const ResponseType& response,
+ ::grpc::WriteOptions options) override {
+ return read_done_ && body_.Write(response, options);
+ }
+
+ private:
+ internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
+ bool read_done_;
+
+ friend class internal::TemplatedBidiStreamingHandler<
+ ServerSplitStreamer<RequestType, ResponseType>, false>;
+ ServerSplitStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : body_(call, ctx), read_done_(false) {}
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h
index aa208d4f22..3a54db45bf 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h
@@ -43,12 +43,12 @@ namespace grpc {
template <typename T>
class TimePoint {
public:
- // If you see the error with methods below, you may need either
- // i) using the existing types having a conversion class such as
- // gpr_timespec and std::chrono::system_clock::time_point or
- // ii) writing a new TimePoint<YourType> to address your case.
- TimePoint(const T& /*time*/) = delete;
- gpr_timespec raw_time() = delete;
+ // If you see the error with methods below, you may need either
+ // i) using the existing types having a conversion class such as
+ // gpr_timespec and std::chrono::system_clock::time_point or
+ // ii) writing a new TimePoint<YourType> to address your case.
+ TimePoint(const T& /*time*/) = delete;
+ gpr_timespec raw_time() = delete;
};
template <>
diff --git a/contrib/libs/grpc/include/grpcpp/impl/method_handler_impl.h b/contrib/libs/grpc/include/grpcpp/impl/method_handler_impl.h
index 1430758fc6..5eceb5ff91 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/method_handler_impl.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/method_handler_impl.h
@@ -19,6 +19,6 @@
#ifndef GRPCPP_IMPL_METHOD_HANDLER_IMPL_H
#define GRPCPP_IMPL_METHOD_HANDLER_IMPL_H
-#include <grpcpp/impl/codegen/method_handler.h>
+#include <grpcpp/impl/codegen/method_handler.h>
#endif // GRPCPP_IMPL_METHOD_HANDLER_IMPL_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/server_builder_option.h b/contrib/libs/grpc/include/grpcpp/impl/server_builder_option.h
index fefe2a00e0..c8f047b90e 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/server_builder_option.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/server_builder_option.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,24 +19,24 @@
#ifndef GRPCPP_IMPL_SERVER_BUILDER_OPTION_H
#define GRPCPP_IMPL_SERVER_BUILDER_OPTION_H
-#include <map>
-#include <memory>
+#include <map>
+#include <memory>
+
+#include <grpcpp/impl/server_builder_plugin.h>
+#include <grpcpp/support/channel_arguments.h>
-#include <grpcpp/impl/server_builder_plugin.h>
-#include <grpcpp/support/channel_arguments.h>
-
namespace grpc {
-/// Interface to pass an option to a \a ServerBuilder.
-class ServerBuilderOption {
- public:
- virtual ~ServerBuilderOption() {}
- /// Alter the \a ChannelArguments used to create the gRPC server.
- virtual void UpdateArguments(grpc::ChannelArguments* args) = 0;
- /// Alter the ServerBuilderPlugin map that will be added into ServerBuilder.
- virtual void UpdatePlugins(
- std::vector<std::unique_ptr<grpc::ServerBuilderPlugin>>* plugins) = 0;
-};
+/// Interface to pass an option to a \a ServerBuilder.
+class ServerBuilderOption {
+ public:
+ virtual ~ServerBuilderOption() {}
+ /// Alter the \a ChannelArguments used to create the gRPC server.
+ virtual void UpdateArguments(grpc::ChannelArguments* args) = 0;
+ /// Alter the ServerBuilderPlugin map that will be added into ServerBuilder.
+ virtual void UpdatePlugins(
+ std::vector<std::unique_ptr<grpc::ServerBuilderPlugin>>* plugins) = 0;
+};
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/server_builder_plugin.h b/contrib/libs/grpc/include/grpcpp/impl/server_builder_plugin.h
index 172a3ef3ca..8fedca2b14 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/server_builder_plugin.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/server_builder_plugin.h
@@ -28,7 +28,7 @@
#include <grpcpp/support/channel_arguments.h>
#include <grpcpp/support/config.h>
-namespace grpc {
+namespace grpc {
class ServerBuilder;
class ServerInitializer;
@@ -39,23 +39,23 @@ class ServerInitializer;
class ServerBuilderPlugin {
public:
virtual ~ServerBuilderPlugin() {}
- virtual TString name() = 0;
+ virtual TString name() = 0;
/// UpdateServerBuilder will be called at an early stage in
/// ServerBuilder::BuildAndStart(), right after the ServerBuilderOptions have
/// done their updates.
- virtual void UpdateServerBuilder(ServerBuilder* /*builder*/) {}
+ virtual void UpdateServerBuilder(ServerBuilder* /*builder*/) {}
/// InitServer will be called in ServerBuilder::BuildAndStart(), after the
/// Server instance is created.
- virtual void InitServer(ServerInitializer* si) = 0;
+ virtual void InitServer(ServerInitializer* si) = 0;
/// Finish will be called at the end of ServerBuilder::BuildAndStart().
- virtual void Finish(ServerInitializer* si) = 0;
+ virtual void Finish(ServerInitializer* si) = 0;
/// ChangeArguments is an interface that can be used in
/// ServerBuilderOption::UpdatePlugins
- virtual void ChangeArguments(const TString& name, void* value) = 0;
+ virtual void ChangeArguments(const TString& name, void* value) = 0;
/// UpdateChannelArguments will be called in ServerBuilder::BuildAndStart(),
/// before the Server instance is created.
diff --git a/contrib/libs/grpc/include/grpcpp/impl/server_initializer.h b/contrib/libs/grpc/include/grpcpp/impl/server_initializer.h
index 1a0318bb9d..38b17edacd 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/server_initializer.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/server_initializer.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 gRPC authors.
+ * Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,36 +19,36 @@
#ifndef GRPCPP_IMPL_SERVER_INITIALIZER_H
#define GRPCPP_IMPL_SERVER_INITIALIZER_H
-#include <memory>
-#include <vector>
+#include <memory>
+#include <vector>
+
+#include <grpcpp/server.h>
-#include <grpcpp/server.h>
-
namespace grpc {
-class Server;
-class Service;
-
-class ServerInitializer {
- public:
- ServerInitializer(grpc::Server* server) : server_(server) {}
-
- bool RegisterService(std::shared_ptr<grpc::Service> service) {
- if (!server_->RegisterService(nullptr, service.get())) {
- return false;
- }
- default_services_.push_back(service);
- return true;
- }
-
- const std::vector<TString>* GetServiceList() {
- return &server_->services_;
- }
-
- private:
- grpc::Server* server_;
- std::vector<std::shared_ptr<grpc::Service> > default_services_;
-};
-
+class Server;
+class Service;
+
+class ServerInitializer {
+ public:
+ ServerInitializer(grpc::Server* server) : server_(server) {}
+
+ bool RegisterService(std::shared_ptr<grpc::Service> service) {
+ if (!server_->RegisterService(nullptr, service.get())) {
+ return false;
+ }
+ default_services_.push_back(service);
+ return true;
+ }
+
+ const std::vector<TString>* GetServiceList() {
+ return &server_->services_;
+ }
+
+ private:
+ grpc::Server* server_;
+ std::vector<std::shared_ptr<grpc::Service> > default_services_;
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_SERVER_INITIALIZER_H
diff --git a/contrib/libs/grpc/include/grpcpp/opencensus.h b/contrib/libs/grpc/include/grpcpp/opencensus.h
index 158fc4bd3d..0da949ab73 100644
--- a/contrib/libs/grpc/include/grpcpp/opencensus.h
+++ b/contrib/libs/grpc/include/grpcpp/opencensus.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,29 +19,29 @@
#ifndef GRPCPP_OPENCENSUS_H
#define GRPCPP_OPENCENSUS_H
-#include "opencensus/trace/span.h"
-
-namespace grpc {
-class ServerContext;
-// These symbols in this file will not be included in the binary unless
-// grpc_opencensus_plugin build target was added as a dependency. At the moment
-// it is only setup to be built with Bazel.
-
-// Registers the OpenCensus plugin with gRPC, so that it will be used for future
-// RPCs. This must be called before any views are created.
-void RegisterOpenCensusPlugin();
-
-// RPC stats definitions, defined by
-// https://github.com/census-instrumentation/opencensus-specs/blob/master/stats/gRPC.md
-
-// Registers the cumulative gRPC views so that they will be exported by any
-// registered stats exporter. For on-task stats, construct a View using the
-// ViewDescriptors below.
-void RegisterOpenCensusViewsForExport();
-
-// Returns the tracing Span for the current RPC.
-::opencensus::trace::Span GetSpanFromServerContext(ServerContext* context);
-
+#include "opencensus/trace/span.h"
+
+namespace grpc {
+class ServerContext;
+// These symbols in this file will not be included in the binary unless
+// grpc_opencensus_plugin build target was added as a dependency. At the moment
+// it is only setup to be built with Bazel.
+
+// Registers the OpenCensus plugin with gRPC, so that it will be used for future
+// RPCs. This must be called before any views are created.
+void RegisterOpenCensusPlugin();
+
+// RPC stats definitions, defined by
+// https://github.com/census-instrumentation/opencensus-specs/blob/master/stats/gRPC.md
+
+// Registers the cumulative gRPC views so that they will be exported by any
+// registered stats exporter. For on-task stats, construct a View using the
+// ViewDescriptors below.
+void RegisterOpenCensusViewsForExport();
+
+// Returns the tracing Span for the current RPC.
+::opencensus::trace::Span GetSpanFromServerContext(ServerContext* context);
+
} // namespace grpc
#endif // GRPCPP_OPENCENSUS_H
diff --git a/contrib/libs/grpc/include/grpcpp/resource_quota.h b/contrib/libs/grpc/include/grpcpp/resource_quota.h
index fe2e7df6a1..eb2b7e73af 100644
--- a/contrib/libs/grpc/include/grpcpp/resource_quota.h
+++ b/contrib/libs/grpc/include/grpcpp/resource_quota.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 gRPC authors.
+ * Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,50 +19,50 @@
#ifndef GRPCPP_RESOURCE_QUOTA_H
#define GRPCPP_RESOURCE_QUOTA_H
-struct grpc_resource_quota;
+struct grpc_resource_quota;
+
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/grpc_library.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/grpc_library.h>
-
namespace grpc {
-/// ResourceQuota represents a bound on memory and thread usage by the gRPC
-/// library. A ResourceQuota can be attached to a server (via \a ServerBuilder),
-/// or a client channel (via \a ChannelArguments).
-/// gRPC will attempt to keep memory and threads used by all attached entities
-/// below the ResourceQuota bound.
-class ResourceQuota final : private ::grpc::GrpcLibraryCodegen {
- public:
- /// \param name - a unique name for this ResourceQuota.
- explicit ResourceQuota(const TString& name);
- ResourceQuota();
- ~ResourceQuota();
-
- /// Resize this \a ResourceQuota to a new size. If \a new_size is smaller
- /// than the current size of the pool, memory usage will be monotonically
- /// decreased until it falls under \a new_size.
- /// No time bound is given for this to occur however.
- ResourceQuota& Resize(size_t new_size);
-
- /// Set the max number of threads that can be allocated from this
- /// ResourceQuota object.
- ///
- /// If the new_max_threads value is smaller than the current value, no new
- /// threads are allocated until the number of active threads fall below
- /// new_max_threads. There is no time bound on when this may happen i.e none
- /// of the current threads are forcefully destroyed and all threads run their
- /// normal course.
- ResourceQuota& SetMaxThreads(int new_max_threads);
-
- grpc_resource_quota* c_resource_quota() const { return impl_; }
-
- private:
- ResourceQuota(const ResourceQuota& rhs);
- ResourceQuota& operator=(const ResourceQuota& rhs);
-
- grpc_resource_quota* const impl_;
-};
-
+/// ResourceQuota represents a bound on memory and thread usage by the gRPC
+/// library. A ResourceQuota can be attached to a server (via \a ServerBuilder),
+/// or a client channel (via \a ChannelArguments).
+/// gRPC will attempt to keep memory and threads used by all attached entities
+/// below the ResourceQuota bound.
+class ResourceQuota final : private ::grpc::GrpcLibraryCodegen {
+ public:
+ /// \param name - a unique name for this ResourceQuota.
+ explicit ResourceQuota(const TString& name);
+ ResourceQuota();
+ ~ResourceQuota();
+
+ /// Resize this \a ResourceQuota to a new size. If \a new_size is smaller
+ /// than the current size of the pool, memory usage will be monotonically
+ /// decreased until it falls under \a new_size.
+ /// No time bound is given for this to occur however.
+ ResourceQuota& Resize(size_t new_size);
+
+ /// Set the max number of threads that can be allocated from this
+ /// ResourceQuota object.
+ ///
+ /// If the new_max_threads value is smaller than the current value, no new
+ /// threads are allocated until the number of active threads fall below
+ /// new_max_threads. There is no time bound on when this may happen i.e none
+ /// of the current threads are forcefully destroyed and all threads run their
+ /// normal course.
+ ResourceQuota& SetMaxThreads(int new_max_threads);
+
+ grpc_resource_quota* c_resource_quota() const { return impl_; }
+
+ private:
+ ResourceQuota(const ResourceQuota& rhs);
+ ResourceQuota& operator=(const ResourceQuota& rhs);
+
+ grpc_resource_quota* const impl_;
+};
+
} // namespace grpc
#endif // GRPCPP_RESOURCE_QUOTA_H
diff --git a/contrib/libs/grpc/include/grpcpp/security/alts_context.h b/contrib/libs/grpc/include/grpcpp/security/alts_context.h
index 5f3f48f786..cd1f640a76 100644
--- a/contrib/libs/grpc/include/grpcpp/security/alts_context.h
+++ b/contrib/libs/grpc/include/grpcpp/security/alts_context.h
@@ -1,69 +1,69 @@
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPCPP_SECURITY_ALTS_CONTEXT_H
-#define GRPCPP_SECURITY_ALTS_CONTEXT_H
-
-#include <grpc/grpc_security_constants.h>
-#include <grpcpp/security/auth_context.h>
-
-#include <map>
-#include <memory>
-
-struct grpc_gcp_AltsContext;
-
-namespace grpc {
-namespace experimental {
-
-// AltsContext is a wrapper class for grpc_gcp_AltsContext.
-class AltsContext {
- public:
- struct RpcProtocolVersions {
- struct Version {
- int major_version;
- int minor_version;
- };
- Version max_rpc_version;
- Version min_rpc_version;
- };
- explicit AltsContext(const grpc_gcp_AltsContext* ctx);
- AltsContext& operator=(const AltsContext&) = default;
- AltsContext(const AltsContext&) = default;
-
- TString application_protocol() const;
- TString record_protocol() const;
- TString peer_service_account() const;
- TString local_service_account() const;
- grpc_security_level security_level() const;
- RpcProtocolVersions peer_rpc_versions() const;
- const std::map<TString, TString>& peer_attributes() const;
-
- private:
- TString application_protocol_;
- TString record_protocol_;
- TString peer_service_account_;
- TString local_service_account_;
- grpc_security_level security_level_ = GRPC_SECURITY_NONE;
- RpcProtocolVersions peer_rpc_versions_ = {{0, 0}, {0, 0}};
- std::map<TString, TString> peer_attributes_map_;
-};
-
-} // namespace experimental
-} // namespace grpc
-
-#endif // GRPCPP_SECURITY_ALTS_CONTEXT_H
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPCPP_SECURITY_ALTS_CONTEXT_H
+#define GRPCPP_SECURITY_ALTS_CONTEXT_H
+
+#include <grpc/grpc_security_constants.h>
+#include <grpcpp/security/auth_context.h>
+
+#include <map>
+#include <memory>
+
+struct grpc_gcp_AltsContext;
+
+namespace grpc {
+namespace experimental {
+
+// AltsContext is a wrapper class for grpc_gcp_AltsContext.
+class AltsContext {
+ public:
+ struct RpcProtocolVersions {
+ struct Version {
+ int major_version;
+ int minor_version;
+ };
+ Version max_rpc_version;
+ Version min_rpc_version;
+ };
+ explicit AltsContext(const grpc_gcp_AltsContext* ctx);
+ AltsContext& operator=(const AltsContext&) = default;
+ AltsContext(const AltsContext&) = default;
+
+ TString application_protocol() const;
+ TString record_protocol() const;
+ TString peer_service_account() const;
+ TString local_service_account() const;
+ grpc_security_level security_level() const;
+ RpcProtocolVersions peer_rpc_versions() const;
+ const std::map<TString, TString>& peer_attributes() const;
+
+ private:
+ TString application_protocol_;
+ TString record_protocol_;
+ TString peer_service_account_;
+ TString local_service_account_;
+ grpc_security_level security_level_ = GRPC_SECURITY_NONE;
+ RpcProtocolVersions peer_rpc_versions_ = {{0, 0}, {0, 0}};
+ std::map<TString, TString> peer_attributes_map_;
+};
+
+} // namespace experimental
+} // namespace grpc
+
+#endif // GRPCPP_SECURITY_ALTS_CONTEXT_H
diff --git a/contrib/libs/grpc/include/grpcpp/security/alts_util.h b/contrib/libs/grpc/include/grpcpp/security/alts_util.h
index ff9728cd4b..b8f935ad95 100644
--- a/contrib/libs/grpc/include/grpcpp/security/alts_util.h
+++ b/contrib/libs/grpc/include/grpcpp/security/alts_util.h
@@ -1,50 +1,50 @@
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPCPP_SECURITY_ALTS_UTIL_H
-#define GRPCPP_SECURITY_ALTS_UTIL_H
-
-#include <grpc/grpc_security_constants.h>
-#include <grpcpp/impl/codegen/status.h>
-#include <grpcpp/security/alts_context.h>
-#include <grpcpp/security/auth_context.h>
-
-#include <memory>
-
-struct grpc_gcp_AltsContext;
-
-namespace grpc {
-namespace experimental {
-
-// GetAltsContextFromAuthContext helps to get the AltsContext from AuthContext.
-// If ALTS is not the transport security protocol used to establish the
-// connection, this function will return nullptr.
-std::unique_ptr<AltsContext> GetAltsContextFromAuthContext(
- const std::shared_ptr<const AuthContext>& auth_context);
-
-// This utility function performs ALTS client authorization check on server
-// side, i.e., checks if the client identity matches one of the expected service
-// accounts. It returns OK if client is authorized and an error otherwise.
-grpc::Status AltsClientAuthzCheck(
- const std::shared_ptr<const AuthContext>& auth_context,
- const std::vector<TString>& expected_service_accounts);
-
-} // namespace experimental
-} // namespace grpc
-
-#endif // GRPCPP_SECURITY_ALTS_UTIL_H
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPCPP_SECURITY_ALTS_UTIL_H
+#define GRPCPP_SECURITY_ALTS_UTIL_H
+
+#include <grpc/grpc_security_constants.h>
+#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/security/alts_context.h>
+#include <grpcpp/security/auth_context.h>
+
+#include <memory>
+
+struct grpc_gcp_AltsContext;
+
+namespace grpc {
+namespace experimental {
+
+// GetAltsContextFromAuthContext helps to get the AltsContext from AuthContext.
+// If ALTS is not the transport security protocol used to establish the
+// connection, this function will return nullptr.
+std::unique_ptr<AltsContext> GetAltsContextFromAuthContext(
+ const std::shared_ptr<const AuthContext>& auth_context);
+
+// This utility function performs ALTS client authorization check on server
+// side, i.e., checks if the client identity matches one of the expected service
+// accounts. It returns OK if client is authorized and an error otherwise.
+grpc::Status AltsClientAuthzCheck(
+ const std::shared_ptr<const AuthContext>& auth_context,
+ const std::vector<TString>& expected_service_accounts);
+
+} // namespace experimental
+} // namespace grpc
+
+#endif // GRPCPP_SECURITY_ALTS_UTIL_H
diff --git a/contrib/libs/grpc/include/grpcpp/security/auth_metadata_processor.h b/contrib/libs/grpc/include/grpcpp/security/auth_metadata_processor.h
index b44547aa78..f5321e877c 100644
--- a/contrib/libs/grpc/include/grpcpp/security/auth_metadata_processor.h
+++ b/contrib/libs/grpc/include/grpcpp/security/auth_metadata_processor.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,43 +19,43 @@
#ifndef GRPCPP_SECURITY_AUTH_METADATA_PROCESSOR_H
#define GRPCPP_SECURITY_AUTH_METADATA_PROCESSOR_H
-#include <map>
+#include <map>
+
+#include <grpcpp/security/auth_context.h>
+#include <grpcpp/support/status.h>
+#include <grpcpp/support/string_ref.h>
-#include <grpcpp/security/auth_context.h>
-#include <grpcpp/support/status.h>
-#include <grpcpp/support/string_ref.h>
-
namespace grpc {
-/// Interface allowing custom server-side authorization based on credentials
-/// encoded in metadata. Objects of this type can be passed to
-/// \a ServerCredentials::SetAuthMetadataProcessor().
-class AuthMetadataProcessor {
- public:
- typedef std::multimap<grpc::string_ref, grpc::string_ref> InputMetadata;
- typedef std::multimap<TString, TString> OutputMetadata;
-
- virtual ~AuthMetadataProcessor() {}
-
- /// If this method returns true, the \a Process function will be scheduled in
- /// a different thread from the one processing the call.
- virtual bool IsBlocking() const { return true; }
-
- /// context is read/write: it contains the properties of the channel peer and
- /// it is the job of the Process method to augment it with properties derived
- /// from the passed-in auth_metadata.
- /// consumed_auth_metadata needs to be filled with metadata that has been
- /// consumed by the processor and will be removed from the call.
- /// response_metadata is the metadata that will be sent as part of the
- /// response.
- /// If the return value is not Status::OK, the rpc call will be aborted with
- /// the error code and error message sent back to the client.
- virtual grpc::Status Process(const InputMetadata& auth_metadata,
- grpc::AuthContext* context,
- OutputMetadata* consumed_auth_metadata,
- OutputMetadata* response_metadata) = 0;
-};
-
+/// Interface allowing custom server-side authorization based on credentials
+/// encoded in metadata. Objects of this type can be passed to
+/// \a ServerCredentials::SetAuthMetadataProcessor().
+class AuthMetadataProcessor {
+ public:
+ typedef std::multimap<grpc::string_ref, grpc::string_ref> InputMetadata;
+ typedef std::multimap<TString, TString> OutputMetadata;
+
+ virtual ~AuthMetadataProcessor() {}
+
+ /// If this method returns true, the \a Process function will be scheduled in
+ /// a different thread from the one processing the call.
+ virtual bool IsBlocking() const { return true; }
+
+ /// context is read/write: it contains the properties of the channel peer and
+ /// it is the job of the Process method to augment it with properties derived
+ /// from the passed-in auth_metadata.
+ /// consumed_auth_metadata needs to be filled with metadata that has been
+ /// consumed by the processor and will be removed from the call.
+ /// response_metadata is the metadata that will be sent as part of the
+ /// response.
+ /// If the return value is not Status::OK, the rpc call will be aborted with
+ /// the error code and error message sent back to the client.
+ virtual grpc::Status Process(const InputMetadata& auth_metadata,
+ grpc::AuthContext* context,
+ OutputMetadata* consumed_auth_metadata,
+ OutputMetadata* response_metadata) = 0;
+};
+
} // namespace grpc
#endif // GRPCPP_SECURITY_AUTH_METADATA_PROCESSOR_H
diff --git a/contrib/libs/grpc/include/grpcpp/security/credentials.h b/contrib/libs/grpc/include/grpcpp/security/credentials.h
index 66d5ed2c58..339e9d4c27 100644
--- a/contrib/libs/grpc/include/grpcpp/security/credentials.h
+++ b/contrib/libs/grpc/include/grpcpp/security/credentials.h
@@ -16,309 +16,309 @@
*
*/
-#ifndef GRPCPP_SECURITY_CREDENTIALS_H
-#define GRPCPP_SECURITY_CREDENTIALS_H
-
+#ifndef GRPCPP_SECURITY_CREDENTIALS_H
+#define GRPCPP_SECURITY_CREDENTIALS_H
+
#if defined(__GNUC__)
#pragma GCC system_header
#endif
-#include <map>
-#include <memory>
-#include <vector>
-
-#include <grpc/grpc_security_constants.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/impl/codegen/client_interceptor.h>
-#include <grpcpp/impl/codegen/grpc_library.h>
-#include <grpcpp/security/auth_context.h>
-#include <grpcpp/security/tls_credentials_options.h>
-#include <grpcpp/support/channel_arguments.h>
-#include <grpcpp/support/status.h>
-#include <grpcpp/support/string_ref.h>
-
-struct grpc_call;
-
+#include <map>
+#include <memory>
+#include <vector>
+
+#include <grpc/grpc_security_constants.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/impl/codegen/client_interceptor.h>
+#include <grpcpp/impl/codegen/grpc_library.h>
+#include <grpcpp/security/auth_context.h>
+#include <grpcpp/security/tls_credentials_options.h>
+#include <grpcpp/support/channel_arguments.h>
+#include <grpcpp/support/status.h>
+#include <grpcpp/support/string_ref.h>
+
+struct grpc_call;
+
namespace grpc {
-class CallCredentials;
-class SecureCallCredentials;
-class SecureChannelCredentials;
-class ChannelCredentials;
-
-std::shared_ptr<Channel> CreateCustomChannel(
- const grpc::string& target,
- const std::shared_ptr<grpc::ChannelCredentials>& creds,
- const grpc::ChannelArguments& args);
-
-namespace experimental {
-std::shared_ptr<grpc::Channel> CreateCustomChannelWithInterceptors(
- const grpc::string& target,
- const std::shared_ptr<grpc::ChannelCredentials>& creds,
- const grpc::ChannelArguments& args,
- std::vector<
- std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
+class CallCredentials;
+class SecureCallCredentials;
+class SecureChannelCredentials;
+class ChannelCredentials;
+
+std::shared_ptr<Channel> CreateCustomChannel(
+ const grpc::string& target,
+ const std::shared_ptr<grpc::ChannelCredentials>& creds,
+ const grpc::ChannelArguments& args);
+
+namespace experimental {
+std::shared_ptr<grpc::Channel> CreateCustomChannelWithInterceptors(
+ const grpc::string& target,
+ const std::shared_ptr<grpc::ChannelCredentials>& creds,
+ const grpc::ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
}
-/// A channel credentials object encapsulates all the state needed by a client
-/// to authenticate with a server for a given channel.
-/// It can make various assertions, e.g., about the client’s identity, role
-/// for all the calls on that channel.
-///
-/// \see https://grpc.io/docs/guides/auth.html
-class ChannelCredentials : private grpc::GrpcLibraryCodegen {
- public:
- ChannelCredentials();
- ~ChannelCredentials();
-
- protected:
- friend std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
- const std::shared_ptr<ChannelCredentials>& channel_creds,
- const std::shared_ptr<CallCredentials>& call_creds);
-
- virtual SecureChannelCredentials* AsSecureCredentials() = 0;
-
- private:
- friend std::shared_ptr<grpc::Channel> CreateCustomChannel(
- const grpc::string& target,
- const std::shared_ptr<grpc::ChannelCredentials>& creds,
- const grpc::ChannelArguments& args);
-
- friend std::shared_ptr<grpc::Channel>
- grpc::experimental::CreateCustomChannelWithInterceptors(
- const grpc::string& target,
- const std::shared_ptr<grpc::ChannelCredentials>& creds,
- const grpc::ChannelArguments& args,
- std::vector<std::unique_ptr<
- grpc::experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
-
- virtual std::shared_ptr<Channel> CreateChannelImpl(
- const grpc::string& target, const ChannelArguments& args) = 0;
-
- // This function should have been a pure virtual function, but it is
- // implemented as a virtual function so that it does not break API.
- virtual std::shared_ptr<Channel> CreateChannelWithInterceptors(
- const grpc::string& /*target*/, const ChannelArguments& /*args*/,
- std::vector<std::unique_ptr<
- grpc::experimental::ClientInterceptorFactoryInterface>>
- /*interceptor_creators*/) {
- return nullptr;
- }
-};
-
-/// A call credentials object encapsulates the state needed by a client to
-/// authenticate with a server for a given call on a channel.
-///
-/// \see https://grpc.io/docs/guides/auth.html
-class CallCredentials : private grpc::GrpcLibraryCodegen {
- public:
- CallCredentials();
- ~CallCredentials();
-
- /// Apply this instance's credentials to \a call.
- virtual bool ApplyToCall(grpc_call* call) = 0;
- virtual grpc::string DebugString() {
- return "CallCredentials did not provide a debug string";
- }
-
- protected:
- friend std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
- const std::shared_ptr<ChannelCredentials>& channel_creds,
- const std::shared_ptr<CallCredentials>& call_creds);
-
- friend std::shared_ptr<CallCredentials> CompositeCallCredentials(
- const std::shared_ptr<CallCredentials>& creds1,
- const std::shared_ptr<CallCredentials>& creds2);
-
- virtual SecureCallCredentials* AsSecureCredentials() = 0;
-};
-
-/// Options used to build SslCredentials.
-struct SslCredentialsOptions {
- /// The buffer containing the PEM encoding of the server root certificates. If
- /// this parameter is empty, the default roots will be used. The default
- /// roots can be overridden using the \a GRPC_DEFAULT_SSL_ROOTS_FILE_PATH
- /// environment variable pointing to a file on the file system containing the
- /// roots.
- grpc::string pem_root_certs;
-
- /// The buffer containing the PEM encoding of the client's private key. This
- /// parameter can be empty if the client does not have a private key.
- grpc::string pem_private_key;
-
- /// The buffer containing the PEM encoding of the client's certificate chain.
- /// This parameter can be empty if the client does not have a certificate
- /// chain.
- grpc::string pem_cert_chain;
-};
-
-// Factories for building different types of Credentials The functions may
-// return empty shared_ptr when credentials cannot be created. If a
-// Credentials pointer is returned, it can still be invalid when used to create
-// a channel. A lame channel will be created then and all rpcs will fail on it.
-
-/// Builds credentials with reasonable defaults.
-///
-/// \warning Only use these credentials when connecting to a Google endpoint.
-/// Using these credentials to connect to any other service may result in this
-/// service being able to impersonate your client for requests to Google
-/// services.
-std::shared_ptr<ChannelCredentials> GoogleDefaultCredentials();
-
-/// Builds SSL Credentials given SSL specific options
-std::shared_ptr<ChannelCredentials> SslCredentials(
- const SslCredentialsOptions& options);
-
-/// Builds credentials for use when running in GCE
-///
-/// \warning Only use these credentials when connecting to a Google endpoint.
-/// Using these credentials to connect to any other service may result in this
-/// service being able to impersonate your client for requests to Google
-/// services.
-std::shared_ptr<CallCredentials> GoogleComputeEngineCredentials();
-
-constexpr long kMaxAuthTokenLifetimeSecs = 3600;
-
-/// Builds Service Account JWT Access credentials.
-/// json_key is the JSON key string containing the client's private key.
-/// token_lifetime_seconds is the lifetime in seconds of each Json Web Token
-/// (JWT) created with this credentials. It should not exceed
-/// \a kMaxAuthTokenLifetimeSecs or will be cropped to this value.
-std::shared_ptr<CallCredentials> ServiceAccountJWTAccessCredentials(
+/// A channel credentials object encapsulates all the state needed by a client
+/// to authenticate with a server for a given channel.
+/// It can make various assertions, e.g., about the client’s identity, role
+/// for all the calls on that channel.
+///
+/// \see https://grpc.io/docs/guides/auth.html
+class ChannelCredentials : private grpc::GrpcLibraryCodegen {
+ public:
+ ChannelCredentials();
+ ~ChannelCredentials();
+
+ protected:
+ friend std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
+ const std::shared_ptr<ChannelCredentials>& channel_creds,
+ const std::shared_ptr<CallCredentials>& call_creds);
+
+ virtual SecureChannelCredentials* AsSecureCredentials() = 0;
+
+ private:
+ friend std::shared_ptr<grpc::Channel> CreateCustomChannel(
+ const grpc::string& target,
+ const std::shared_ptr<grpc::ChannelCredentials>& creds,
+ const grpc::ChannelArguments& args);
+
+ friend std::shared_ptr<grpc::Channel>
+ grpc::experimental::CreateCustomChannelWithInterceptors(
+ const grpc::string& target,
+ const std::shared_ptr<grpc::ChannelCredentials>& creds,
+ const grpc::ChannelArguments& args,
+ std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+
+ virtual std::shared_ptr<Channel> CreateChannelImpl(
+ const grpc::string& target, const ChannelArguments& args) = 0;
+
+ // This function should have been a pure virtual function, but it is
+ // implemented as a virtual function so that it does not break API.
+ virtual std::shared_ptr<Channel> CreateChannelWithInterceptors(
+ const grpc::string& /*target*/, const ChannelArguments& /*args*/,
+ std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>
+ /*interceptor_creators*/) {
+ return nullptr;
+ }
+};
+
+/// A call credentials object encapsulates the state needed by a client to
+/// authenticate with a server for a given call on a channel.
+///
+/// \see https://grpc.io/docs/guides/auth.html
+class CallCredentials : private grpc::GrpcLibraryCodegen {
+ public:
+ CallCredentials();
+ ~CallCredentials();
+
+ /// Apply this instance's credentials to \a call.
+ virtual bool ApplyToCall(grpc_call* call) = 0;
+ virtual grpc::string DebugString() {
+ return "CallCredentials did not provide a debug string";
+ }
+
+ protected:
+ friend std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
+ const std::shared_ptr<ChannelCredentials>& channel_creds,
+ const std::shared_ptr<CallCredentials>& call_creds);
+
+ friend std::shared_ptr<CallCredentials> CompositeCallCredentials(
+ const std::shared_ptr<CallCredentials>& creds1,
+ const std::shared_ptr<CallCredentials>& creds2);
+
+ virtual SecureCallCredentials* AsSecureCredentials() = 0;
+};
+
+/// Options used to build SslCredentials.
+struct SslCredentialsOptions {
+ /// The buffer containing the PEM encoding of the server root certificates. If
+ /// this parameter is empty, the default roots will be used. The default
+ /// roots can be overridden using the \a GRPC_DEFAULT_SSL_ROOTS_FILE_PATH
+ /// environment variable pointing to a file on the file system containing the
+ /// roots.
+ grpc::string pem_root_certs;
+
+ /// The buffer containing the PEM encoding of the client's private key. This
+ /// parameter can be empty if the client does not have a private key.
+ grpc::string pem_private_key;
+
+ /// The buffer containing the PEM encoding of the client's certificate chain.
+ /// This parameter can be empty if the client does not have a certificate
+ /// chain.
+ grpc::string pem_cert_chain;
+};
+
+// Factories for building different types of Credentials The functions may
+// return empty shared_ptr when credentials cannot be created. If a
+// Credentials pointer is returned, it can still be invalid when used to create
+// a channel. A lame channel will be created then and all rpcs will fail on it.
+
+/// Builds credentials with reasonable defaults.
+///
+/// \warning Only use these credentials when connecting to a Google endpoint.
+/// Using these credentials to connect to any other service may result in this
+/// service being able to impersonate your client for requests to Google
+/// services.
+std::shared_ptr<ChannelCredentials> GoogleDefaultCredentials();
+
+/// Builds SSL Credentials given SSL specific options
+std::shared_ptr<ChannelCredentials> SslCredentials(
+ const SslCredentialsOptions& options);
+
+/// Builds credentials for use when running in GCE
+///
+/// \warning Only use these credentials when connecting to a Google endpoint.
+/// Using these credentials to connect to any other service may result in this
+/// service being able to impersonate your client for requests to Google
+/// services.
+std::shared_ptr<CallCredentials> GoogleComputeEngineCredentials();
+
+constexpr long kMaxAuthTokenLifetimeSecs = 3600;
+
+/// Builds Service Account JWT Access credentials.
+/// json_key is the JSON key string containing the client's private key.
+/// token_lifetime_seconds is the lifetime in seconds of each Json Web Token
+/// (JWT) created with this credentials. It should not exceed
+/// \a kMaxAuthTokenLifetimeSecs or will be cropped to this value.
+std::shared_ptr<CallCredentials> ServiceAccountJWTAccessCredentials(
const grpc::string& json_key,
- long token_lifetime_seconds = kMaxAuthTokenLifetimeSecs);
-
-/// Builds refresh token credentials.
-/// json_refresh_token is the JSON string containing the refresh token along
-/// with a client_id and client_secret.
-///
-/// \warning Only use these credentials when connecting to a Google endpoint.
-/// Using these credentials to connect to any other service may result in this
-/// service being able to impersonate your client for requests to Google
-/// services.
-std::shared_ptr<CallCredentials> GoogleRefreshTokenCredentials(
- const grpc::string& json_refresh_token);
-
-/// Builds access token credentials.
-/// access_token is an oauth2 access token that was fetched using an out of band
-/// mechanism.
-///
-/// \warning Only use these credentials when connecting to a Google endpoint.
-/// Using these credentials to connect to any other service may result in this
-/// service being able to impersonate your client for requests to Google
-/// services.
-std::shared_ptr<CallCredentials> AccessTokenCredentials(
- const grpc::string& access_token);
-
-/// Builds IAM credentials.
-///
-/// \warning Only use these credentials when connecting to a Google endpoint.
-/// Using these credentials to connect to any other service may result in this
-/// service being able to impersonate your client for requests to Google
-/// services.
-std::shared_ptr<CallCredentials> GoogleIAMCredentials(
+ long token_lifetime_seconds = kMaxAuthTokenLifetimeSecs);
+
+/// Builds refresh token credentials.
+/// json_refresh_token is the JSON string containing the refresh token along
+/// with a client_id and client_secret.
+///
+/// \warning Only use these credentials when connecting to a Google endpoint.
+/// Using these credentials to connect to any other service may result in this
+/// service being able to impersonate your client for requests to Google
+/// services.
+std::shared_ptr<CallCredentials> GoogleRefreshTokenCredentials(
+ const grpc::string& json_refresh_token);
+
+/// Builds access token credentials.
+/// access_token is an oauth2 access token that was fetched using an out of band
+/// mechanism.
+///
+/// \warning Only use these credentials when connecting to a Google endpoint.
+/// Using these credentials to connect to any other service may result in this
+/// service being able to impersonate your client for requests to Google
+/// services.
+std::shared_ptr<CallCredentials> AccessTokenCredentials(
+ const grpc::string& access_token);
+
+/// Builds IAM credentials.
+///
+/// \warning Only use these credentials when connecting to a Google endpoint.
+/// Using these credentials to connect to any other service may result in this
+/// service being able to impersonate your client for requests to Google
+/// services.
+std::shared_ptr<CallCredentials> GoogleIAMCredentials(
const grpc::string& authorization_token,
- const grpc::string& authority_selector);
+ const grpc::string& authority_selector);
-/// Combines a channel credentials and a call credentials into a composite
-/// channel credentials.
-std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
+/// Combines a channel credentials and a call credentials into a composite
+/// channel credentials.
+std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
const std::shared_ptr<ChannelCredentials>& channel_creds,
- const std::shared_ptr<CallCredentials>& call_creds);
-
-/// Combines two call credentials objects into a composite call credentials.
-std::shared_ptr<CallCredentials> CompositeCallCredentials(
- const std::shared_ptr<CallCredentials>& creds1,
- const std::shared_ptr<CallCredentials>& creds2);
-
-/// Credentials for an unencrypted, unauthenticated channel
-std::shared_ptr<ChannelCredentials> InsecureChannelCredentials();
-
-/// User defined metadata credentials.
-class MetadataCredentialsPlugin {
- public:
- virtual ~MetadataCredentialsPlugin() {}
-
- /// If this method returns true, the Process function will be scheduled in
- /// a different thread from the one processing the call.
- virtual bool IsBlocking() const { return true; }
-
- /// Type of credentials this plugin is implementing.
- virtual const char* GetType() const { return ""; }
-
- /// Gets the auth metatada produced by this plugin.
- /// The fully qualified method name is:
- /// service_url + "/" + method_name.
- /// The channel_auth_context contains (among other things), the identity of
- /// the server.
- virtual grpc::Status GetMetadata(
- grpc::string_ref service_url, grpc::string_ref method_name,
- const grpc::AuthContext& channel_auth_context,
- std::multimap<grpc::string, grpc::string>* metadata) = 0;
-
- virtual grpc::string DebugString() {
- return "MetadataCredentialsPlugin did not provide a debug string";
- }
-};
-
-std::shared_ptr<CallCredentials> MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin> plugin);
-
+ const std::shared_ptr<CallCredentials>& call_creds);
+
+/// Combines two call credentials objects into a composite call credentials.
+std::shared_ptr<CallCredentials> CompositeCallCredentials(
+ const std::shared_ptr<CallCredentials>& creds1,
+ const std::shared_ptr<CallCredentials>& creds2);
+
+/// Credentials for an unencrypted, unauthenticated channel
+std::shared_ptr<ChannelCredentials> InsecureChannelCredentials();
+
+/// User defined metadata credentials.
+class MetadataCredentialsPlugin {
+ public:
+ virtual ~MetadataCredentialsPlugin() {}
+
+ /// If this method returns true, the Process function will be scheduled in
+ /// a different thread from the one processing the call.
+ virtual bool IsBlocking() const { return true; }
+
+ /// Type of credentials this plugin is implementing.
+ virtual const char* GetType() const { return ""; }
+
+ /// Gets the auth metatada produced by this plugin.
+ /// The fully qualified method name is:
+ /// service_url + "/" + method_name.
+ /// The channel_auth_context contains (among other things), the identity of
+ /// the server.
+ virtual grpc::Status GetMetadata(
+ grpc::string_ref service_url, grpc::string_ref method_name,
+ const grpc::AuthContext& channel_auth_context,
+ std::multimap<grpc::string, grpc::string>* metadata) = 0;
+
+ virtual grpc::string DebugString() {
+ return "MetadataCredentialsPlugin did not provide a debug string";
+ }
+};
+
+std::shared_ptr<CallCredentials> MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin> plugin);
+
namespace experimental {
-/// Options for creating STS Oauth Token Exchange credentials following the IETF
-/// draft https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16.
-/// Optional fields may be set to empty string. It is the responsibility of the
-/// caller to ensure that the subject and actor tokens are refreshed on disk at
-/// the specified paths.
-struct StsCredentialsOptions {
- grpc::string token_exchange_service_uri; // Required.
- grpc::string resource; // Optional.
- grpc::string audience; // Optional.
- grpc::string scope; // Optional.
- grpc::string requested_token_type; // Optional.
- grpc::string subject_token_path; // Required.
- grpc::string subject_token_type; // Required.
- grpc::string actor_token_path; // Optional.
- grpc::string actor_token_type; // Optional.
-};
-
-grpc::Status StsCredentialsOptionsFromJson(const TString& json_string,
- StsCredentialsOptions* options);
-
-/// Creates STS credentials options from the $STS_CREDENTIALS environment
-/// variable. This environment variable points to the path of a JSON file
-/// comforming to the schema described above.
-grpc::Status StsCredentialsOptionsFromEnv(StsCredentialsOptions* options);
-
-std::shared_ptr<CallCredentials> StsCredentials(
- const StsCredentialsOptions& options);
-
-std::shared_ptr<CallCredentials> MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin> plugin,
- grpc_security_level min_security_level);
-
-/// Options used to build AltsCredentials.
-struct AltsCredentialsOptions {
- /// service accounts of target endpoint that will be acceptable
- /// by the client. If service accounts are provided and none of them matches
- /// that of the server, authentication will fail.
- std::vector<grpc::string> target_service_accounts;
-};
-
-/// Builds ALTS Credentials given ALTS specific options
-std::shared_ptr<ChannelCredentials> AltsCredentials(
- const AltsCredentialsOptions& options);
-
-/// Builds Local Credentials.
-std::shared_ptr<ChannelCredentials> LocalCredentials(
- grpc_local_connect_type type);
-
-/// Builds TLS Credentials given TLS options.
-std::shared_ptr<ChannelCredentials> TlsCredentials(
- const TlsCredentialsOptions& options);
-
+/// Options for creating STS Oauth Token Exchange credentials following the IETF
+/// draft https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16.
+/// Optional fields may be set to empty string. It is the responsibility of the
+/// caller to ensure that the subject and actor tokens are refreshed on disk at
+/// the specified paths.
+struct StsCredentialsOptions {
+ grpc::string token_exchange_service_uri; // Required.
+ grpc::string resource; // Optional.
+ grpc::string audience; // Optional.
+ grpc::string scope; // Optional.
+ grpc::string requested_token_type; // Optional.
+ grpc::string subject_token_path; // Required.
+ grpc::string subject_token_type; // Required.
+ grpc::string actor_token_path; // Optional.
+ grpc::string actor_token_type; // Optional.
+};
+
+grpc::Status StsCredentialsOptionsFromJson(const TString& json_string,
+ StsCredentialsOptions* options);
+
+/// Creates STS credentials options from the $STS_CREDENTIALS environment
+/// variable. This environment variable points to the path of a JSON file
+/// comforming to the schema described above.
+grpc::Status StsCredentialsOptionsFromEnv(StsCredentialsOptions* options);
+
+std::shared_ptr<CallCredentials> StsCredentials(
+ const StsCredentialsOptions& options);
+
+std::shared_ptr<CallCredentials> MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin> plugin,
+ grpc_security_level min_security_level);
+
+/// Options used to build AltsCredentials.
+struct AltsCredentialsOptions {
+ /// service accounts of target endpoint that will be acceptable
+ /// by the client. If service accounts are provided and none of them matches
+ /// that of the server, authentication will fail.
+ std::vector<grpc::string> target_service_accounts;
+};
+
+/// Builds ALTS Credentials given ALTS specific options
+std::shared_ptr<ChannelCredentials> AltsCredentials(
+ const AltsCredentialsOptions& options);
+
+/// Builds Local Credentials.
+std::shared_ptr<ChannelCredentials> LocalCredentials(
+ grpc_local_connect_type type);
+
+/// Builds TLS Credentials given TLS options.
+std::shared_ptr<ChannelCredentials> TlsCredentials(
+ const TlsCredentialsOptions& options);
+
} // namespace experimental
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/security/cronet_credentials.h b/contrib/libs/grpc/include/grpcpp/security/cronet_credentials.h
index 8e2b6aa4ab..7874debae8 100644
--- a/contrib/libs/grpc/include/grpcpp/security/cronet_credentials.h
+++ b/contrib/libs/grpc/include/grpcpp/security/cronet_credentials.h
@@ -19,15 +19,15 @@
#ifndef GRPCPP_SECURITY_CRONET_CREDENTIALS_H
#define GRPCPP_SECURITY_CRONET_CREDENTIALS_H
-#include <memory>
+#include <memory>
namespace grpc {
-class ChannelCredentials;
+class ChannelCredentials;
+
+/// Credentials for a channel using Cronet.
+std::shared_ptr<ChannelCredentials> CronetChannelCredentials(void* engine);
-/// Credentials for a channel using Cronet.
-std::shared_ptr<ChannelCredentials> CronetChannelCredentials(void* engine);
-
} // namespace grpc
#endif // GRPCPP_SECURITY_CRONET_CREDENTIALS_H
diff --git a/contrib/libs/grpc/include/grpcpp/security/server_credentials.h b/contrib/libs/grpc/include/grpcpp/security/server_credentials.h
index 3e0e4858a4..d0a532360a 100644
--- a/contrib/libs/grpc/include/grpcpp/security/server_credentials.h
+++ b/contrib/libs/grpc/include/grpcpp/security/server_credentials.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,19 +19,19 @@
#ifndef GRPCPP_SECURITY_SERVER_CREDENTIALS_H
#define GRPCPP_SECURITY_SERVER_CREDENTIALS_H
-#include <memory>
-#include <vector>
+#include <memory>
+#include <vector>
-#include <grpc/grpc_security_constants.h>
-#include <grpcpp/security/auth_metadata_processor.h>
-#include <grpcpp/security/tls_credentials_options.h>
-#include <grpcpp/support/config.h>
+#include <grpc/grpc_security_constants.h>
+#include <grpcpp/security/auth_metadata_processor.h>
+#include <grpcpp/security/tls_credentials_options.h>
+#include <grpcpp/support/config.h>
+
+struct grpc_server;
-struct grpc_server;
-
namespace grpc {
-class Server;
+class Server;
/// Options to create ServerCredentials with SSL
struct SslServerCredentialsOptions {
/// \warning Deprecated
@@ -43,10 +43,10 @@ struct SslServerCredentialsOptions {
: force_client_auth(false), client_certificate_request(request_type) {}
struct PemKeyCertPair {
- TString private_key;
- TString cert_chain;
+ TString private_key;
+ TString cert_chain;
};
- TString pem_root_certs;
+ TString pem_root_certs;
std::vector<PemKeyCertPair> pem_key_cert_pairs;
/// \warning Deprecated
bool force_client_auth;
@@ -58,54 +58,54 @@ struct SslServerCredentialsOptions {
grpc_ssl_client_certificate_request_type client_certificate_request;
};
-/// Wrapper around \a grpc_server_credentials, a way to authenticate a server.
-class ServerCredentials {
- public:
- virtual ~ServerCredentials();
-
- /// This method is not thread-safe and has to be called before the server is
- /// started. The last call to this function wins.
- virtual void SetAuthMetadataProcessor(
- const std::shared_ptr<grpc::AuthMetadataProcessor>& processor) = 0;
-
- private:
- friend class Server;
-
- /// Tries to bind \a server to the given \a addr (eg, localhost:1234,
- /// 192.168.1.1:31416, [::1]:27182, etc.)
- ///
- /// \return bound port number on success, 0 on failure.
- // TODO(dgq): the "port" part seems to be a misnomer.
- virtual int AddPortToServer(const TString& addr, grpc_server* server) = 0;
-};
-
-/// Builds SSL ServerCredentials given SSL specific options
-std::shared_ptr<ServerCredentials> SslServerCredentials(
- const grpc::SslServerCredentialsOptions& options);
-
-std::shared_ptr<ServerCredentials> InsecureServerCredentials();
-
+/// Wrapper around \a grpc_server_credentials, a way to authenticate a server.
+class ServerCredentials {
+ public:
+ virtual ~ServerCredentials();
+
+ /// This method is not thread-safe and has to be called before the server is
+ /// started. The last call to this function wins.
+ virtual void SetAuthMetadataProcessor(
+ const std::shared_ptr<grpc::AuthMetadataProcessor>& processor) = 0;
+
+ private:
+ friend class Server;
+
+ /// Tries to bind \a server to the given \a addr (eg, localhost:1234,
+ /// 192.168.1.1:31416, [::1]:27182, etc.)
+ ///
+ /// \return bound port number on success, 0 on failure.
+ // TODO(dgq): the "port" part seems to be a misnomer.
+ virtual int AddPortToServer(const TString& addr, grpc_server* server) = 0;
+};
+
+/// Builds SSL ServerCredentials given SSL specific options
+std::shared_ptr<ServerCredentials> SslServerCredentials(
+ const grpc::SslServerCredentialsOptions& options);
+
+std::shared_ptr<ServerCredentials> InsecureServerCredentials();
+
namespace experimental {
-/// Options to create ServerCredentials with ALTS
-struct AltsServerCredentialsOptions {
- /// Add fields if needed.
-};
+/// Options to create ServerCredentials with ALTS
+struct AltsServerCredentialsOptions {
+ /// Add fields if needed.
+};
+
+/// Builds ALTS ServerCredentials given ALTS specific options
+std::shared_ptr<ServerCredentials> AltsServerCredentials(
+ const AltsServerCredentialsOptions& options);
-/// Builds ALTS ServerCredentials given ALTS specific options
-std::shared_ptr<ServerCredentials> AltsServerCredentials(
- const AltsServerCredentialsOptions& options);
+/// Builds Local ServerCredentials.
+std::shared_ptr<ServerCredentials> AltsServerCredentials(
+ const AltsServerCredentialsOptions& options);
-/// Builds Local ServerCredentials.
-std::shared_ptr<ServerCredentials> AltsServerCredentials(
- const AltsServerCredentialsOptions& options);
+std::shared_ptr<ServerCredentials> LocalServerCredentials(
+ grpc_local_connect_type type);
-std::shared_ptr<ServerCredentials> LocalServerCredentials(
- grpc_local_connect_type type);
-
/// Builds TLS ServerCredentials given TLS options.
-std::shared_ptr<ServerCredentials> TlsServerCredentials(
- const experimental::TlsCredentialsOptions& options);
+std::shared_ptr<ServerCredentials> TlsServerCredentials(
+ const experimental::TlsCredentialsOptions& options);
} // namespace experimental
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/security/tls_credentials_options.h b/contrib/libs/grpc/include/grpcpp/security/tls_credentials_options.h
index 4ee7b3f4ed..f704cf6129 100644
--- a/contrib/libs/grpc/include/grpcpp/security/tls_credentials_options.h
+++ b/contrib/libs/grpc/include/grpcpp/security/tls_credentials_options.h
@@ -24,9 +24,9 @@
#include <grpc/support/log.h>
#include <grpcpp/support/config.h>
-#include <memory>
-#include <vector>
-
+#include <memory>
+#include <vector>
+
typedef struct grpc_tls_credential_reload_arg grpc_tls_credential_reload_arg;
typedef struct grpc_tls_credential_reload_config
grpc_tls_credential_reload_config;
@@ -36,7 +36,7 @@ typedef struct grpc_tls_server_authorization_check_config
grpc_tls_server_authorization_check_config;
typedef struct grpc_tls_credentials_options grpc_tls_credentials_options;
-namespace grpc {
+namespace grpc {
namespace experimental {
/** TLS key materials config, wrapper for grpc_tls_key_materials_config. It is
@@ -44,67 +44,67 @@ namespace experimental {
class TlsKeyMaterialsConfig {
public:
struct PemKeyCertPair {
- TString private_key;
- TString cert_chain;
+ TString private_key;
+ TString cert_chain;
};
/** Getters for member fields. **/
- const TString pem_root_certs() const { return pem_root_certs_; }
+ const TString pem_root_certs() const { return pem_root_certs_; }
const std::vector<PemKeyCertPair>& pem_key_cert_pair_list() const {
return pem_key_cert_pair_list_;
}
int version() const { return version_; }
- /** Setter for key materials that will be called by the user. Ownership of the
- * arguments will not be transferred. **/
- void set_pem_root_certs(const TString& pem_root_certs);
+ /** Setter for key materials that will be called by the user. Ownership of the
+ * arguments will not be transferred. **/
+ void set_pem_root_certs(const TString& pem_root_certs);
void add_pem_key_cert_pair(const PemKeyCertPair& pem_key_cert_pair);
- void set_key_materials(
- const TString& pem_root_certs,
- const std::vector<PemKeyCertPair>& pem_key_cert_pair_list);
+ void set_key_materials(
+ const TString& pem_root_certs,
+ const std::vector<PemKeyCertPair>& pem_key_cert_pair_list);
void set_version(int version) { version_ = version; };
private:
int version_ = 0;
std::vector<PemKeyCertPair> pem_key_cert_pair_list_;
- TString pem_root_certs_;
+ TString pem_root_certs_;
};
/** TLS credential reload arguments, wraps grpc_tls_credential_reload_arg. It is
- * used for experimental purposes for now and it is subject to change.
+ * used for experimental purposes for now and it is subject to change.
*
- * The credential reload arg contains all the info necessary to schedule/cancel
- * a credential reload request. The callback function must be called after
- * finishing the schedule operation. See the description of the
- * grpc_tls_credential_reload_arg struct in grpc_security.h for more details.
+ * The credential reload arg contains all the info necessary to schedule/cancel
+ * a credential reload request. The callback function must be called after
+ * finishing the schedule operation. See the description of the
+ * grpc_tls_credential_reload_arg struct in grpc_security.h for more details.
* **/
class TlsCredentialReloadArg {
public:
/** TlsCredentialReloadArg does not take ownership of the C arg that is passed
- * to the constructor. One must remember to free any memory allocated to the
- * C arg after using the setter functions below. **/
+ * to the constructor. One must remember to free any memory allocated to the
+ * C arg after using the setter functions below. **/
TlsCredentialReloadArg(grpc_tls_credential_reload_arg* arg);
~TlsCredentialReloadArg();
- /** Getters for member fields. **/
+ /** Getters for member fields. **/
void* cb_user_data() const;
bool is_pem_key_cert_pair_list_empty() const;
grpc_ssl_certificate_config_reload_status status() const;
- TString error_details() const;
+ TString error_details() const;
- /** Setters for member fields. Ownership of the arguments will not be
- * transferred. **/
+ /** Setters for member fields. Ownership of the arguments will not be
+ * transferred. **/
void set_cb_user_data(void* cb_user_data);
- void set_pem_root_certs(const TString& pem_root_certs);
+ void set_pem_root_certs(const TString& pem_root_certs);
void add_pem_key_cert_pair(
- const TlsKeyMaterialsConfig::PemKeyCertPair& pem_key_cert_pair);
- void set_key_materials(const TString& pem_root_certs,
- std::vector<TlsKeyMaterialsConfig::PemKeyCertPair>
- pem_key_cert_pair_list);
+ const TlsKeyMaterialsConfig::PemKeyCertPair& pem_key_cert_pair);
+ void set_key_materials(const TString& pem_root_certs,
+ std::vector<TlsKeyMaterialsConfig::PemKeyCertPair>
+ pem_key_cert_pair_list);
void set_key_materials_config(
const std::shared_ptr<TlsKeyMaterialsConfig>& key_materials_config);
void set_status(grpc_ssl_certificate_config_reload_status status);
- void set_error_details(const TString& error_details);
+ void set_error_details(const TString& error_details);
/** Calls the C arg's callback function. **/
void OnCredentialReloadDoneCallback();
@@ -184,23 +184,23 @@ class TlsServerAuthorizationCheckArg {
TlsServerAuthorizationCheckArg(grpc_tls_server_authorization_check_arg* arg);
~TlsServerAuthorizationCheckArg();
- /** Getters for member fields. **/
+ /** Getters for member fields. **/
void* cb_user_data() const;
int success() const;
- TString target_name() const;
- TString peer_cert() const;
- TString peer_cert_full_chain() const;
+ TString target_name() const;
+ TString peer_cert() const;
+ TString peer_cert_full_chain() const;
grpc_status_code status() const;
- TString error_details() const;
+ TString error_details() const;
- /** Setters for member fields. **/
+ /** Setters for member fields. **/
void set_cb_user_data(void* cb_user_data);
void set_success(int success);
- void set_target_name(const TString& target_name);
- void set_peer_cert(const TString& peer_cert);
- void set_peer_cert_full_chain(const TString& peer_cert_full_chain);
+ void set_target_name(const TString& target_name);
+ void set_peer_cert(const TString& peer_cert);
+ void set_peer_cert_full_chain(const TString& peer_cert_full_chain);
void set_status(grpc_status_code status);
- void set_error_details(const TString& error_details);
+ void set_error_details(const TString& error_details);
/** Calls the C arg's callback function. **/
void OnServerAuthorizationCheckDoneCallback();
@@ -278,24 +278,24 @@ class TlsServerAuthorizationCheckConfig {
* more details. **/
class TlsCredentialsOptions {
public:
- // Constructor for client.
- explicit TlsCredentialsOptions(
- grpc_tls_server_verification_option server_verification_option,
- std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config,
- std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config,
- std::shared_ptr<TlsServerAuthorizationCheckConfig>
- server_authorization_check_config);
-
- // Constructor for server.
- explicit TlsCredentialsOptions(
- grpc_ssl_client_certificate_request_type cert_request_type,
- std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config,
- std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config);
-
- // This constructor will be deprecated.
+ // Constructor for client.
+ explicit TlsCredentialsOptions(
+ grpc_tls_server_verification_option server_verification_option,
+ std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config,
+ std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config,
+ std::shared_ptr<TlsServerAuthorizationCheckConfig>
+ server_authorization_check_config);
+
+ // Constructor for server.
+ explicit TlsCredentialsOptions(
+ grpc_ssl_client_certificate_request_type cert_request_type,
+ std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config,
+ std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config);
+
+ // This constructor will be deprecated.
TlsCredentialsOptions(
grpc_ssl_client_certificate_request_type cert_request_type,
- grpc_tls_server_verification_option server_verification_option,
+ grpc_tls_server_verification_option server_verification_option,
std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config,
std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config,
std::shared_ptr<TlsServerAuthorizationCheckConfig>
@@ -306,9 +306,9 @@ class TlsCredentialsOptions {
grpc_ssl_client_certificate_request_type cert_request_type() const {
return cert_request_type_;
}
- grpc_tls_server_verification_option server_verification_option() const {
- return server_verification_option_;
- }
+ grpc_tls_server_verification_option server_verification_option() const {
+ return server_verification_option_;
+ }
std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config() const {
return key_materials_config_;
}
@@ -329,9 +329,9 @@ class TlsCredentialsOptions {
* goes unused when creating channel credentials, and the user can set it to
* GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE. **/
grpc_ssl_client_certificate_request_type cert_request_type_;
- /** The server_verification_option_ flag is only relevant when the
- * TlsCredentialsOptions are used to instantiate client credentials; **/
- grpc_tls_server_verification_option server_verification_option_;
+ /** The server_verification_option_ flag is only relevant when the
+ * TlsCredentialsOptions are used to instantiate client credentials; **/
+ grpc_tls_server_verification_option server_verification_option_;
std::shared_ptr<TlsKeyMaterialsConfig> key_materials_config_;
std::shared_ptr<TlsCredentialReloadConfig> credential_reload_config_;
std::shared_ptr<TlsServerAuthorizationCheckConfig>
@@ -340,6 +340,6 @@ class TlsCredentialsOptions {
};
} // namespace experimental
-} // namespace grpc
+} // namespace grpc
#endif // GRPCPP_SECURITY_TLS_CREDENTIALS_OPTIONS_H
diff --git a/contrib/libs/grpc/include/grpcpp/server.h b/contrib/libs/grpc/include/grpcpp/server.h
index 962073af83..ff04746347 100644
--- a/contrib/libs/grpc/include/grpcpp/server.h
+++ b/contrib/libs/grpc/include/grpcpp/server.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,357 +23,357 @@
#pragma GCC system_header
#endif
-#include <list>
-#include <memory>
-#include <vector>
-
-#include <grpc/impl/codegen/port_platform.h>
-
-#include <grpc/compression.h>
-#include <grpc/support/atm.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/completion_queue.h>
-#include <grpcpp/health_check_service_interface.h>
-#include <grpcpp/impl/call.h>
-#include <grpcpp/impl/codegen/client_interceptor.h>
-#include <grpcpp/impl/codegen/completion_queue.h>
-#include <grpcpp/impl/codegen/grpc_library.h>
-#include <grpcpp/impl/codegen/server_interface.h>
-#include <grpcpp/impl/rpc_service_method.h>
-#include <grpcpp/security/server_credentials.h>
-#include <grpcpp/support/channel_arguments.h>
-#include <grpcpp/support/config.h>
-#include <grpcpp/support/status.h>
-
-struct grpc_server;
-
+#include <list>
+#include <memory>
+#include <vector>
+
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <grpc/compression.h>
+#include <grpc/support/atm.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/completion_queue.h>
+#include <grpcpp/health_check_service_interface.h>
+#include <grpcpp/impl/call.h>
+#include <grpcpp/impl/codegen/client_interceptor.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/impl/codegen/grpc_library.h>
+#include <grpcpp/impl/codegen/server_interface.h>
+#include <grpcpp/impl/rpc_service_method.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/support/channel_arguments.h>
+#include <grpcpp/support/config.h>
+#include <grpcpp/support/status.h>
+
+struct grpc_server;
+
namespace grpc {
-class AsyncGenericService;
-class ServerContext;
-class ServerInitializer;
-
-namespace internal {
-class ExternalConnectionAcceptorImpl;
-} // namespace internal
-
-/// Represents a gRPC server.
-///
-/// Use a \a grpc::ServerBuilder to create, configure, and start
-/// \a Server instances.
-class Server : public ServerInterface, private GrpcLibraryCodegen {
- public:
- ~Server();
-
- /// Block until the server shuts down.
- ///
- /// \warning The server must be either shutting down or some other thread must
- /// call \a Shutdown for this function to ever return.
- void Wait() override;
-
- /// Global callbacks are a set of hooks that are called when server
- /// events occur. \a SetGlobalCallbacks method is used to register
- /// the hooks with gRPC. Note that
- /// the \a GlobalCallbacks instance will be shared among all
- /// \a Server instances in an application and can be set exactly
- /// once per application.
- class GlobalCallbacks {
- public:
- virtual ~GlobalCallbacks() {}
- /// Called before server is created.
- virtual void UpdateArguments(ChannelArguments* /*args*/) {}
- /// Called before application callback for each synchronous server request
- virtual void PreSynchronousRequest(ServerContext* context) = 0;
- /// Called after application callback for each synchronous server request
- virtual void PostSynchronousRequest(ServerContext* context) = 0;
- /// Called before server is started.
- virtual void PreServerStart(Server* /*server*/) {}
- /// Called after a server port is added.
- virtual void AddPort(Server* /*server*/, const TString& /*addr*/,
- ServerCredentials* /*creds*/, int /*port*/) {}
- };
- /// Set the global callback object. Can only be called once per application.
- /// Does not take ownership of callbacks, and expects the pointed to object
- /// to be alive until all server objects in the process have been destroyed.
- /// The same \a GlobalCallbacks object will be used throughout the
- /// application and is shared among all \a Server objects.
- static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
-
- /// Returns a \em raw pointer to the underlying \a grpc_server instance.
- /// EXPERIMENTAL: for internal/test use only
- grpc_server* c_server();
-
- /// Returns the health check service.
- HealthCheckServiceInterface* GetHealthCheckService() const {
- return health_check_service_.get();
- }
-
- /// Establish a channel for in-process communication
- std::shared_ptr<Channel> InProcessChannel(const ChannelArguments& args);
-
- /// NOTE: class experimental_type is not part of the public API of this class.
- /// TODO(yashykt): Integrate into public API when this is no longer
- /// experimental.
- class experimental_type {
- public:
- explicit experimental_type(Server* server) : server_(server) {}
-
- /// Establish a channel for in-process communication with client
- /// interceptors
- std::shared_ptr<Channel> InProcessChannelWithInterceptors(
- const ChannelArguments& args,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
-
- private:
- Server* server_;
- };
-
- /// NOTE: The function experimental() is not stable public API. It is a view
- /// to the experimental components of this class. It may be changed or removed
- /// at any time.
- experimental_type experimental() { return experimental_type(this); }
-
- protected:
- /// Register a service. This call does not take ownership of the service.
- /// The service must exist for the lifetime of the Server instance.
- bool RegisterService(const TString* host, Service* service) override;
-
- /// Try binding the server to the given \a addr endpoint
- /// (port, and optionally including IP address to bind to).
- ///
- /// It can be invoked multiple times. Should be used before
- /// starting the server.
- ///
- /// \param addr The address to try to bind to the server (eg, localhost:1234,
- /// 192.168.1.1:31416, [::1]:27182, etc.).
- /// \param creds The credentials associated with the server.
- ///
- /// \return bound port number on success, 0 on failure.
- ///
- /// \warning It is an error to call this method on an already started server.
- int AddListeningPort(const TString& addr,
- ServerCredentials* creds) override;
-
- /// NOTE: This is *NOT* a public API. The server constructors are supposed to
- /// be used by \a ServerBuilder class only. The constructor will be made
- /// 'private' very soon.
- ///
- /// Server constructors. To be used by \a ServerBuilder only.
- ///
- /// \param args The channel args
- ///
- /// \param sync_server_cqs The completion queues to use if the server is a
- /// synchronous server (or a hybrid server). The server polls for new RPCs on
- /// these queues
- ///
- /// \param min_pollers The minimum number of polling threads per server
- /// completion queue (in param sync_server_cqs) to use for listening to
- /// incoming requests (used only in case of sync server)
- ///
- /// \param max_pollers The maximum number of polling threads per server
- /// completion queue (in param sync_server_cqs) to use for listening to
- /// incoming requests (used only in case of sync server)
- ///
- /// \param sync_cq_timeout_msec The timeout to use when calling AsyncNext() on
- /// server completion queues passed via sync_server_cqs param.
- Server(ChannelArguments* args,
- std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
- sync_server_cqs,
- int min_pollers, int max_pollers, int sync_cq_timeout_msec,
- std::vector<std::shared_ptr<internal::ExternalConnectionAcceptorImpl>>
- acceptors,
- grpc_resource_quota* server_rq = nullptr,
- std::vector<
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- interceptor_creators = std::vector<std::unique_ptr<
- experimental::ServerInterceptorFactoryInterface>>());
-
- /// Start the server.
- ///
- /// \param cqs Completion queues for handling asynchronous services. The
- /// caller is required to keep all completion queues live until the server is
- /// destroyed.
- /// \param num_cqs How many completion queues does \a cqs hold.
- void Start(ServerCompletionQueue** cqs, size_t num_cqs) override;
-
- grpc_server* server() override { return server_; }
-
- protected:
- /// NOTE: This method is not part of the public API for this class.
- void set_health_check_service(
- std::unique_ptr<HealthCheckServiceInterface> service) {
- health_check_service_ = std::move(service);
- }
-
- /// NOTE: This method is not part of the public API for this class.
- bool health_check_service_disabled() const {
- return health_check_service_disabled_;
- }
-
- private:
- std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>*
- interceptor_creators() override {
- return &interceptor_creators_;
- }
-
- friend class AsyncGenericService;
- friend class ServerBuilder;
- friend class ServerInitializer;
-
- class SyncRequest;
- class CallbackRequestBase;
- template <class ServerContextType>
- class CallbackRequest;
- class UnimplementedAsyncRequest;
- class UnimplementedAsyncResponse;
-
- /// SyncRequestThreadManager is an implementation of ThreadManager. This class
- /// is responsible for polling for incoming RPCs and calling the RPC handlers.
- /// This is only used in case of a Sync server (i.e a server exposing a sync
- /// interface)
- class SyncRequestThreadManager;
-
- /// Register a generic service. This call does not take ownership of the
- /// service. The service must exist for the lifetime of the Server instance.
- void RegisterAsyncGenericService(AsyncGenericService* service) override;
-
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- /// Register a callback-based generic service. This call does not take
- /// ownership of theservice. The service must exist for the lifetime of the
- /// Server instance.
- void RegisterCallbackGenericService(CallbackGenericService* service) override;
-#else
- /// NOTE: class experimental_registration_type is not part of the public API
- /// of this class
- /// TODO(vjpai): Move these contents to the public API of Server when
- /// they are no longer experimental
- class experimental_registration_type final
- : public experimental_registration_interface {
- public:
- explicit experimental_registration_type(Server* server) : server_(server) {}
- void RegisterCallbackGenericService(
- experimental::CallbackGenericService* service) override {
- server_->RegisterCallbackGenericService(service);
- }
-
- private:
- Server* server_;
- };
-
- /// TODO(vjpai): Mark this override when experimental type above is deleted
- void RegisterCallbackGenericService(
- experimental::CallbackGenericService* service);
-
- /// NOTE: The function experimental_registration() is not stable public API.
- /// It is a view to the experimental components of this class. It may be
- /// changed or removed at any time.
- experimental_registration_interface* experimental_registration() override {
- return &experimental_registration_;
- }
-#endif
-
- void PerformOpsOnCall(internal::CallOpSetInterface* ops,
- internal::Call* call) override;
-
- void ShutdownInternal(gpr_timespec deadline) override;
-
- int max_receive_message_size() const override {
- return max_receive_message_size_;
- }
-
- CompletionQueue* CallbackCQ() override;
-
- ServerInitializer* initializer();
-
- // Functions to manage the server shutdown ref count. Things that increase
- // the ref count are the running state of the server (take a ref at start and
- // drop it at shutdown) and each running callback RPC.
- void Ref();
- void UnrefWithPossibleNotify() /* LOCKS_EXCLUDED(mu_) */;
- void UnrefAndWaitLocked() /* EXCLUSIVE_LOCKS_REQUIRED(mu_) */;
-
- std::vector<std::shared_ptr<internal::ExternalConnectionAcceptorImpl>>
- acceptors_;
-
- // A vector of interceptor factory objects.
- // This should be destroyed after health_check_service_ and this requirement
- // is satisfied by declaring interceptor_creators_ before
- // health_check_service_. (C++ mandates that member objects be destroyed in
- // the reverse order of initialization.)
- std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- interceptor_creators_;
-
- int max_receive_message_size_;
-
- /// The following completion queues are ONLY used in case of Sync API
- /// i.e. if the server has any services with sync methods. The server uses
- /// these completion queues to poll for new RPCs
- std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
- sync_server_cqs_;
-
- /// List of \a ThreadManager instances (one for each cq in
- /// the \a sync_server_cqs)
- std::vector<std::unique_ptr<SyncRequestThreadManager>> sync_req_mgrs_;
-
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
- // For registering experimental callback generic service; remove when that
- // method longer experimental
- experimental_registration_type experimental_registration_{this};
-#endif
-
- // Server status
- internal::Mutex mu_;
- bool started_;
- bool shutdown_;
- bool shutdown_notified_; // Was notify called on the shutdown_cv_
- internal::CondVar shutdown_done_cv_;
- bool shutdown_done_ = false;
- std::atomic_int shutdown_refs_outstanding_{1};
-
- internal::CondVar shutdown_cv_;
-
- std::shared_ptr<GlobalCallbacks> global_callbacks_;
-
- std::vector<TString> services_;
- bool has_async_generic_service_ = false;
- bool has_callback_generic_service_ = false;
- bool has_callback_methods_ = false;
-
- // Pointer to the wrapped grpc_server.
- grpc_server* server_;
-
- std::unique_ptr<ServerInitializer> server_initializer_;
-
- std::unique_ptr<HealthCheckServiceInterface> health_check_service_;
- bool health_check_service_disabled_;
-
- // When appropriate, use a default callback generic service to handle
- // unimplemented methods
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- std::unique_ptr<CallbackGenericService> unimplemented_service_;
-#else
- std::unique_ptr<experimental::CallbackGenericService> unimplemented_service_;
-#endif
-
- // A special handler for resource exhausted in sync case
- std::unique_ptr<internal::MethodHandler> resource_exhausted_handler_;
-
- // Handler for callback generic service, if any
- std::unique_ptr<internal::MethodHandler> generic_handler_;
-
- // callback_cq_ references the callbackable completion queue associated
- // with this server (if any). It is set on the first call to CallbackCQ().
- // It is _not owned_ by the server; ownership belongs with its internal
- // shutdown callback tag (invoked when the CQ is fully shutdown).
- CompletionQueue* callback_cq_ /* GUARDED_BY(mu_) */ = nullptr;
-
- // List of CQs passed in by user that must be Shutdown only after Server is
- // Shutdown. Even though this is only used with NDEBUG, instantiate it in all
- // cases since otherwise the size will be inconsistent.
- std::vector<CompletionQueue*> cq_list_;
-};
-
+class AsyncGenericService;
+class ServerContext;
+class ServerInitializer;
+
+namespace internal {
+class ExternalConnectionAcceptorImpl;
+} // namespace internal
+
+/// Represents a gRPC server.
+///
+/// Use a \a grpc::ServerBuilder to create, configure, and start
+/// \a Server instances.
+class Server : public ServerInterface, private GrpcLibraryCodegen {
+ public:
+ ~Server();
+
+ /// Block until the server shuts down.
+ ///
+ /// \warning The server must be either shutting down or some other thread must
+ /// call \a Shutdown for this function to ever return.
+ void Wait() override;
+
+ /// Global callbacks are a set of hooks that are called when server
+ /// events occur. \a SetGlobalCallbacks method is used to register
+ /// the hooks with gRPC. Note that
+ /// the \a GlobalCallbacks instance will be shared among all
+ /// \a Server instances in an application and can be set exactly
+ /// once per application.
+ class GlobalCallbacks {
+ public:
+ virtual ~GlobalCallbacks() {}
+ /// Called before server is created.
+ virtual void UpdateArguments(ChannelArguments* /*args*/) {}
+ /// Called before application callback for each synchronous server request
+ virtual void PreSynchronousRequest(ServerContext* context) = 0;
+ /// Called after application callback for each synchronous server request
+ virtual void PostSynchronousRequest(ServerContext* context) = 0;
+ /// Called before server is started.
+ virtual void PreServerStart(Server* /*server*/) {}
+ /// Called after a server port is added.
+ virtual void AddPort(Server* /*server*/, const TString& /*addr*/,
+ ServerCredentials* /*creds*/, int /*port*/) {}
+ };
+ /// Set the global callback object. Can only be called once per application.
+ /// Does not take ownership of callbacks, and expects the pointed to object
+ /// to be alive until all server objects in the process have been destroyed.
+ /// The same \a GlobalCallbacks object will be used throughout the
+ /// application and is shared among all \a Server objects.
+ static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
+
+ /// Returns a \em raw pointer to the underlying \a grpc_server instance.
+ /// EXPERIMENTAL: for internal/test use only
+ grpc_server* c_server();
+
+ /// Returns the health check service.
+ HealthCheckServiceInterface* GetHealthCheckService() const {
+ return health_check_service_.get();
+ }
+
+ /// Establish a channel for in-process communication
+ std::shared_ptr<Channel> InProcessChannel(const ChannelArguments& args);
+
+ /// NOTE: class experimental_type is not part of the public API of this class.
+ /// TODO(yashykt): Integrate into public API when this is no longer
+ /// experimental.
+ class experimental_type {
+ public:
+ explicit experimental_type(Server* server) : server_(server) {}
+
+ /// Establish a channel for in-process communication with client
+ /// interceptors
+ std::shared_ptr<Channel> InProcessChannelWithInterceptors(
+ const ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+
+ private:
+ Server* server_;
+ };
+
+ /// NOTE: The function experimental() is not stable public API. It is a view
+ /// to the experimental components of this class. It may be changed or removed
+ /// at any time.
+ experimental_type experimental() { return experimental_type(this); }
+
+ protected:
+ /// Register a service. This call does not take ownership of the service.
+ /// The service must exist for the lifetime of the Server instance.
+ bool RegisterService(const TString* host, Service* service) override;
+
+ /// Try binding the server to the given \a addr endpoint
+ /// (port, and optionally including IP address to bind to).
+ ///
+ /// It can be invoked multiple times. Should be used before
+ /// starting the server.
+ ///
+ /// \param addr The address to try to bind to the server (eg, localhost:1234,
+ /// 192.168.1.1:31416, [::1]:27182, etc.).
+ /// \param creds The credentials associated with the server.
+ ///
+ /// \return bound port number on success, 0 on failure.
+ ///
+ /// \warning It is an error to call this method on an already started server.
+ int AddListeningPort(const TString& addr,
+ ServerCredentials* creds) override;
+
+ /// NOTE: This is *NOT* a public API. The server constructors are supposed to
+ /// be used by \a ServerBuilder class only. The constructor will be made
+ /// 'private' very soon.
+ ///
+ /// Server constructors. To be used by \a ServerBuilder only.
+ ///
+ /// \param args The channel args
+ ///
+ /// \param sync_server_cqs The completion queues to use if the server is a
+ /// synchronous server (or a hybrid server). The server polls for new RPCs on
+ /// these queues
+ ///
+ /// \param min_pollers The minimum number of polling threads per server
+ /// completion queue (in param sync_server_cqs) to use for listening to
+ /// incoming requests (used only in case of sync server)
+ ///
+ /// \param max_pollers The maximum number of polling threads per server
+ /// completion queue (in param sync_server_cqs) to use for listening to
+ /// incoming requests (used only in case of sync server)
+ ///
+ /// \param sync_cq_timeout_msec The timeout to use when calling AsyncNext() on
+ /// server completion queues passed via sync_server_cqs param.
+ Server(ChannelArguments* args,
+ std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
+ sync_server_cqs,
+ int min_pollers, int max_pollers, int sync_cq_timeout_msec,
+ std::vector<std::shared_ptr<internal::ExternalConnectionAcceptorImpl>>
+ acceptors,
+ grpc_resource_quota* server_rq = nullptr,
+ std::vector<
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ interceptor_creators = std::vector<std::unique_ptr<
+ experimental::ServerInterceptorFactoryInterface>>());
+
+ /// Start the server.
+ ///
+ /// \param cqs Completion queues for handling asynchronous services. The
+ /// caller is required to keep all completion queues live until the server is
+ /// destroyed.
+ /// \param num_cqs How many completion queues does \a cqs hold.
+ void Start(ServerCompletionQueue** cqs, size_t num_cqs) override;
+
+ grpc_server* server() override { return server_; }
+
+ protected:
+ /// NOTE: This method is not part of the public API for this class.
+ void set_health_check_service(
+ std::unique_ptr<HealthCheckServiceInterface> service) {
+ health_check_service_ = std::move(service);
+ }
+
+ /// NOTE: This method is not part of the public API for this class.
+ bool health_check_service_disabled() const {
+ return health_check_service_disabled_;
+ }
+
+ private:
+ std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>*
+ interceptor_creators() override {
+ return &interceptor_creators_;
+ }
+
+ friend class AsyncGenericService;
+ friend class ServerBuilder;
+ friend class ServerInitializer;
+
+ class SyncRequest;
+ class CallbackRequestBase;
+ template <class ServerContextType>
+ class CallbackRequest;
+ class UnimplementedAsyncRequest;
+ class UnimplementedAsyncResponse;
+
+ /// SyncRequestThreadManager is an implementation of ThreadManager. This class
+ /// is responsible for polling for incoming RPCs and calling the RPC handlers.
+ /// This is only used in case of a Sync server (i.e a server exposing a sync
+ /// interface)
+ class SyncRequestThreadManager;
+
+ /// Register a generic service. This call does not take ownership of the
+ /// service. The service must exist for the lifetime of the Server instance.
+ void RegisterAsyncGenericService(AsyncGenericService* service) override;
+
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ /// Register a callback-based generic service. This call does not take
+ /// ownership of theservice. The service must exist for the lifetime of the
+ /// Server instance.
+ void RegisterCallbackGenericService(CallbackGenericService* service) override;
+#else
+ /// NOTE: class experimental_registration_type is not part of the public API
+ /// of this class
+ /// TODO(vjpai): Move these contents to the public API of Server when
+ /// they are no longer experimental
+ class experimental_registration_type final
+ : public experimental_registration_interface {
+ public:
+ explicit experimental_registration_type(Server* server) : server_(server) {}
+ void RegisterCallbackGenericService(
+ experimental::CallbackGenericService* service) override {
+ server_->RegisterCallbackGenericService(service);
+ }
+
+ private:
+ Server* server_;
+ };
+
+ /// TODO(vjpai): Mark this override when experimental type above is deleted
+ void RegisterCallbackGenericService(
+ experimental::CallbackGenericService* service);
+
+ /// NOTE: The function experimental_registration() is not stable public API.
+ /// It is a view to the experimental components of this class. It may be
+ /// changed or removed at any time.
+ experimental_registration_interface* experimental_registration() override {
+ return &experimental_registration_;
+ }
+#endif
+
+ void PerformOpsOnCall(internal::CallOpSetInterface* ops,
+ internal::Call* call) override;
+
+ void ShutdownInternal(gpr_timespec deadline) override;
+
+ int max_receive_message_size() const override {
+ return max_receive_message_size_;
+ }
+
+ CompletionQueue* CallbackCQ() override;
+
+ ServerInitializer* initializer();
+
+ // Functions to manage the server shutdown ref count. Things that increase
+ // the ref count are the running state of the server (take a ref at start and
+ // drop it at shutdown) and each running callback RPC.
+ void Ref();
+ void UnrefWithPossibleNotify() /* LOCKS_EXCLUDED(mu_) */;
+ void UnrefAndWaitLocked() /* EXCLUSIVE_LOCKS_REQUIRED(mu_) */;
+
+ std::vector<std::shared_ptr<internal::ExternalConnectionAcceptorImpl>>
+ acceptors_;
+
+ // A vector of interceptor factory objects.
+ // This should be destroyed after health_check_service_ and this requirement
+ // is satisfied by declaring interceptor_creators_ before
+ // health_check_service_. (C++ mandates that member objects be destroyed in
+ // the reverse order of initialization.)
+ std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ interceptor_creators_;
+
+ int max_receive_message_size_;
+
+ /// The following completion queues are ONLY used in case of Sync API
+ /// i.e. if the server has any services with sync methods. The server uses
+ /// these completion queues to poll for new RPCs
+ std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
+ sync_server_cqs_;
+
+ /// List of \a ThreadManager instances (one for each cq in
+ /// the \a sync_server_cqs)
+ std::vector<std::unique_ptr<SyncRequestThreadManager>> sync_req_mgrs_;
+
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ // For registering experimental callback generic service; remove when that
+ // method longer experimental
+ experimental_registration_type experimental_registration_{this};
+#endif
+
+ // Server status
+ internal::Mutex mu_;
+ bool started_;
+ bool shutdown_;
+ bool shutdown_notified_; // Was notify called on the shutdown_cv_
+ internal::CondVar shutdown_done_cv_;
+ bool shutdown_done_ = false;
+ std::atomic_int shutdown_refs_outstanding_{1};
+
+ internal::CondVar shutdown_cv_;
+
+ std::shared_ptr<GlobalCallbacks> global_callbacks_;
+
+ std::vector<TString> services_;
+ bool has_async_generic_service_ = false;
+ bool has_callback_generic_service_ = false;
+ bool has_callback_methods_ = false;
+
+ // Pointer to the wrapped grpc_server.
+ grpc_server* server_;
+
+ std::unique_ptr<ServerInitializer> server_initializer_;
+
+ std::unique_ptr<HealthCheckServiceInterface> health_check_service_;
+ bool health_check_service_disabled_;
+
+ // When appropriate, use a default callback generic service to handle
+ // unimplemented methods
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ std::unique_ptr<CallbackGenericService> unimplemented_service_;
+#else
+ std::unique_ptr<experimental::CallbackGenericService> unimplemented_service_;
+#endif
+
+ // A special handler for resource exhausted in sync case
+ std::unique_ptr<internal::MethodHandler> resource_exhausted_handler_;
+
+ // Handler for callback generic service, if any
+ std::unique_ptr<internal::MethodHandler> generic_handler_;
+
+ // callback_cq_ references the callbackable completion queue associated
+ // with this server (if any). It is set on the first call to CallbackCQ().
+ // It is _not owned_ by the server; ownership belongs with its internal
+ // shutdown callback tag (invoked when the CQ is fully shutdown).
+ CompletionQueue* callback_cq_ /* GUARDED_BY(mu_) */ = nullptr;
+
+ // List of CQs passed in by user that must be Shutdown only after Server is
+ // Shutdown. Even though this is only used with NDEBUG, instantiate it in all
+ // cases since otherwise the size will be inconsistent.
+ std::vector<CompletionQueue*> cq_list_;
+};
+
} // namespace grpc
#endif // GRPCPP_SERVER_H
diff --git a/contrib/libs/grpc/include/grpcpp/server_builder.h b/contrib/libs/grpc/include/grpcpp/server_builder.h
index 0af1fac45b..de48b83807 100644
--- a/contrib/libs/grpc/include/grpcpp/server_builder.h
+++ b/contrib/libs/grpc/include/grpcpp/server_builder.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015-2016 gRPC authors.
+ * Copyright 2015-2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,394 +19,394 @@
#ifndef GRPCPP_SERVER_BUILDER_H
#define GRPCPP_SERVER_BUILDER_H
-#include <climits>
-#include <map>
-#include <memory>
-#include <vector>
-
-#include <grpc/impl/codegen/port_platform.h>
-
-#include <grpc/compression.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/workaround_list.h>
-#include <grpcpp/impl/channel_argument_option.h>
-#include <grpcpp/impl/codegen/server_interceptor.h>
-#include <grpcpp/impl/server_builder_option.h>
-#include <grpcpp/impl/server_builder_plugin.h>
-#include <grpcpp/server.h>
-#include <grpcpp/support/config.h>
-
-struct grpc_resource_quota;
-
+#include <climits>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <grpc/compression.h>
+#include <grpc/support/cpu.h>
+#include <grpc/support/workaround_list.h>
+#include <grpcpp/impl/channel_argument_option.h>
+#include <grpcpp/impl/codegen/server_interceptor.h>
+#include <grpcpp/impl/server_builder_option.h>
+#include <grpcpp/impl/server_builder_plugin.h>
+#include <grpcpp/server.h>
+#include <grpcpp/support/config.h>
+
+struct grpc_resource_quota;
+
namespace grpc {
-class CompletionQueue;
-class Server;
-class ServerCompletionQueue;
-class AsyncGenericService;
-class ResourceQuota;
-class ServerCredentials;
-class Service;
-namespace testing {
-class ServerBuilderPluginTest;
-} // namespace testing
-
-namespace internal {
-class ExternalConnectionAcceptorImpl;
-} // namespace internal
-
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
-namespace experimental {
-#endif
-class CallbackGenericService;
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
-} // namespace experimental
-#endif
-
-namespace experimental {
-// EXPERIMENTAL API:
-// Interface for a grpc server to build transports with connections created out
-// of band.
-// See ServerBuilder's AddExternalConnectionAcceptor API.
-class ExternalConnectionAcceptor {
- public:
- struct NewConnectionParameters {
- int listener_fd = -1;
- int fd = -1;
- ByteBuffer read_buffer; // data intended for the grpc server
- };
- virtual ~ExternalConnectionAcceptor() {}
- // If called before grpc::Server is started or after it is shut down, the new
- // connection will be closed.
- virtual void HandleNewConnection(NewConnectionParameters* p) = 0;
-};
-
-} // namespace experimental
+class CompletionQueue;
+class Server;
+class ServerCompletionQueue;
+class AsyncGenericService;
+class ResourceQuota;
+class ServerCredentials;
+class Service;
+namespace testing {
+class ServerBuilderPluginTest;
+} // namespace testing
+
+namespace internal {
+class ExternalConnectionAcceptorImpl;
+} // namespace internal
+
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+namespace experimental {
+#endif
+class CallbackGenericService;
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+} // namespace experimental
+#endif
+
+namespace experimental {
+// EXPERIMENTAL API:
+// Interface for a grpc server to build transports with connections created out
+// of band.
+// See ServerBuilder's AddExternalConnectionAcceptor API.
+class ExternalConnectionAcceptor {
+ public:
+ struct NewConnectionParameters {
+ int listener_fd = -1;
+ int fd = -1;
+ ByteBuffer read_buffer; // data intended for the grpc server
+ };
+ virtual ~ExternalConnectionAcceptor() {}
+ // If called before grpc::Server is started or after it is shut down, the new
+ // connection will be closed.
+ virtual void HandleNewConnection(NewConnectionParameters* p) = 0;
+};
+
+} // namespace experimental
+} // namespace grpc
+
+namespace grpc {
+
+/// A builder class for the creation and startup of \a grpc::Server instances.
+class ServerBuilder {
+ public:
+ ServerBuilder();
+ virtual ~ServerBuilder();
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Primary API's
+
+ /// Return a running server which is ready for processing calls.
+ /// Before calling, one typically needs to ensure that:
+ /// 1. a service is registered - so that the server knows what to serve
+ /// (via RegisterService, or RegisterAsyncGenericService)
+ /// 2. a listening port has been added - so the server knows where to receive
+ /// traffic (via AddListeningPort)
+ /// 3. [for async api only] completion queues have been added via
+ /// AddCompletionQueue
+ ///
+ /// Will return a nullptr on errors.
+ virtual std::unique_ptr<grpc::Server> BuildAndStart();
+
+ /// Register a service. This call does not take ownership of the service.
+ /// The service must exist for the lifetime of the \a Server instance returned
+ /// by \a BuildAndStart().
+ /// Matches requests with any :authority
+ ServerBuilder& RegisterService(grpc::Service* service);
+
+ /// Enlists an endpoint \a addr (port with an optional IP address) to
+ /// bind the \a grpc::Server object to be created to.
+ ///
+ /// It can be invoked multiple times.
+ ///
+ /// \param addr_uri The address to try to bind to the server in URI form. If
+ /// the scheme name is omitted, "dns:///" is assumed. To bind to any address,
+ /// please use IPv6 any, i.e., [::]:<port>, which also accepts IPv4
+ /// connections. Valid values include dns:///localhost:1234, /
+ /// 192.168.1.1:31416, dns:///[::1]:27182, etc.).
+ /// \param creds The credentials associated with the server.
+ /// \param selected_port[out] If not `nullptr`, gets populated with the port
+ /// number bound to the \a grpc::Server for the corresponding endpoint after
+ /// it is successfully bound by BuildAndStart(), 0 otherwise. AddListeningPort
+ /// does not modify this pointer.
+ ServerBuilder& AddListeningPort(
+ const TString& addr_uri,
+ std::shared_ptr<grpc::ServerCredentials> creds,
+ int* selected_port = nullptr);
+
+ /// Add a completion queue for handling asynchronous services.
+ ///
+ /// Best performance is typically obtained by using one thread per polling
+ /// completion queue.
+ ///
+ /// Caller is required to shutdown the server prior to shutting down the
+ /// returned completion queue. Caller is also required to drain the
+ /// completion queue after shutting it down. A typical usage scenario:
+ ///
+ /// // While building the server:
+ /// ServerBuilder builder;
+ /// ...
+ /// cq_ = builder.AddCompletionQueue();
+ /// server_ = builder.BuildAndStart();
+ ///
+ /// // While shutting down the server;
+ /// server_->Shutdown();
+ /// cq_->Shutdown(); // Always *after* the associated server's Shutdown()!
+ /// // Drain the cq_ that was created
+ /// void* ignored_tag;
+ /// bool ignored_ok;
+ /// while (cq_->Next(&ignored_tag, &ignored_ok)) { }
+ ///
+ /// \param is_frequently_polled This is an optional parameter to inform gRPC
+ /// library about whether this completion queue would be frequently polled
+ /// (i.e. by calling \a Next() or \a AsyncNext()). The default value is
+ /// 'true' and is the recommended setting. Setting this to 'false' (i.e.
+ /// not polling the completion queue frequently) will have a significantly
+ /// negative performance impact and hence should not be used in production
+ /// use cases.
+ std::unique_ptr<grpc::ServerCompletionQueue> AddCompletionQueue(
+ bool is_frequently_polled = true);
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Less commonly used RegisterService variants
+
+ /// Register a service. This call does not take ownership of the service.
+ /// The service must exist for the lifetime of the \a Server instance
+ /// returned by \a BuildAndStart(). Only matches requests with :authority \a
+ /// host
+ ServerBuilder& RegisterService(const TString& host,
+ grpc::Service* service);
+
+ /// Register a generic service.
+ /// Matches requests with any :authority
+ /// This is mostly useful for writing generic gRPC Proxies where the exact
+ /// serialization format is unknown
+ ServerBuilder& RegisterAsyncGenericService(
+ grpc::AsyncGenericService* service);
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Fine control knobs
+
+ /// Set max receive message size in bytes.
+ /// The default is GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH.
+ ServerBuilder& SetMaxReceiveMessageSize(int max_receive_message_size) {
+ max_receive_message_size_ = max_receive_message_size;
+ return *this;
+ }
+
+ /// Set max send message size in bytes.
+ /// The default is GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH.
+ ServerBuilder& SetMaxSendMessageSize(int max_send_message_size) {
+ max_send_message_size_ = max_send_message_size;
+ return *this;
+ }
+
+ /// \deprecated For backward compatibility.
+ ServerBuilder& SetMaxMessageSize(int max_message_size) {
+ return SetMaxReceiveMessageSize(max_message_size);
+ }
+
+ /// Set the support status for compression algorithms. All algorithms are
+ /// enabled by default.
+ ///
+ /// Incoming calls compressed with an unsupported algorithm will fail with
+ /// \a GRPC_STATUS_UNIMPLEMENTED.
+ ServerBuilder& SetCompressionAlgorithmSupportStatus(
+ grpc_compression_algorithm algorithm, bool enabled);
+
+ /// The default compression level to use for all channel calls in the
+ /// absence of a call-specific level.
+ ServerBuilder& SetDefaultCompressionLevel(grpc_compression_level level);
+
+ /// The default compression algorithm to use for all channel calls in the
+ /// absence of a call-specific level. Note that it overrides any compression
+ /// level set by \a SetDefaultCompressionLevel.
+ ServerBuilder& SetDefaultCompressionAlgorithm(
+ grpc_compression_algorithm algorithm);
+
+ /// Set the attached buffer pool for this server
+ ServerBuilder& SetResourceQuota(const grpc::ResourceQuota& resource_quota);
+
+ ServerBuilder& SetOption(std::unique_ptr<grpc::ServerBuilderOption> option);
+
+ /// Options for synchronous servers.
+ enum SyncServerOption {
+ NUM_CQS, ///< Number of completion queues.
+ MIN_POLLERS, ///< Minimum number of polling threads.
+ MAX_POLLERS, ///< Maximum number of polling threads.
+ CQ_TIMEOUT_MSEC ///< Completion queue timeout in milliseconds.
+ };
+
+ /// Only useful if this is a Synchronous server.
+ ServerBuilder& SetSyncServerOption(SyncServerOption option, int value);
+
+ /// Add a channel argument (an escape hatch to tuning core library parameters
+ /// directly)
+ template <class T>
+ ServerBuilder& AddChannelArgument(const TString& arg, const T& value) {
+ return SetOption(grpc::MakeChannelArgumentOption(arg, value));
+ }
+
+ /// For internal use only: Register a ServerBuilderPlugin factory function.
+ static void InternalAddPluginFactory(
+ std::unique_ptr<grpc::ServerBuilderPlugin> (*CreatePlugin)());
+
+ /// Enable a server workaround. Do not use unless you know what the workaround
+ /// does. For explanation and detailed descriptions of workarounds, see
+ /// doc/workarounds.md.
+ ServerBuilder& EnableWorkaround(grpc_workaround_list id);
+
+ /// NOTE: class experimental_type is not part of the public API of this class.
+ /// TODO(yashykt): Integrate into public API when this is no longer
+ /// experimental.
+ class experimental_type {
+ public:
+ explicit experimental_type(ServerBuilder* builder) : builder_(builder) {}
+
+ void SetInterceptorCreators(
+ std::vector<std::unique_ptr<
+ grpc::experimental::ServerInterceptorFactoryInterface>>
+ interceptor_creators) {
+ builder_->interceptor_creators_ = std::move(interceptor_creators);
+ }
+
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ /// Register a generic service that uses the callback API.
+ /// Matches requests with any :authority
+ /// This is mostly useful for writing generic gRPC Proxies where the exact
+ /// serialization format is unknown
+ ServerBuilder& RegisterCallbackGenericService(
+ grpc::experimental::CallbackGenericService* service);
+#endif
+
+ enum class ExternalConnectionType {
+ FROM_FD = 0 // in the form of a file descriptor
+ };
+
+ /// Register an acceptor to handle the externally accepted connection in
+ /// grpc server. The returned acceptor can be used to pass the connection
+ /// to grpc server, where a channel will be created with the provided
+ /// server credentials.
+ std::unique_ptr<grpc::experimental::ExternalConnectionAcceptor>
+ AddExternalConnectionAcceptor(ExternalConnectionType type,
+ std::shared_ptr<ServerCredentials> creds);
+
+ private:
+ ServerBuilder* builder_;
+ };
+
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ /// Register a generic service that uses the callback API.
+ /// Matches requests with any :authority
+ /// This is mostly useful for writing generic gRPC Proxies where the exact
+ /// serialization format is unknown
+ ServerBuilder& RegisterCallbackGenericService(
+ grpc::CallbackGenericService* service);
+#endif
+
+ /// NOTE: The function experimental() is not stable public API. It is a view
+ /// to the experimental components of this class. It may be changed or removed
+ /// at any time.
+ experimental_type experimental() { return experimental_type(this); }
+
+ protected:
+ /// Experimental, to be deprecated
+ struct Port {
+ TString addr;
+ std::shared_ptr<ServerCredentials> creds;
+ int* selected_port;
+ };
+
+ /// Experimental, to be deprecated
+ typedef std::unique_ptr<TString> HostString;
+ struct NamedService {
+ explicit NamedService(grpc::Service* s) : service(s) {}
+ NamedService(const TString& h, grpc::Service* s)
+ : host(new TString(h)), service(s) {}
+ HostString host;
+ grpc::Service* service;
+ };
+
+ /// Experimental, to be deprecated
+ std::vector<Port> ports() { return ports_; }
+
+ /// Experimental, to be deprecated
+ std::vector<NamedService*> services() {
+ std::vector<NamedService*> service_refs;
+ for (auto& ptr : services_) {
+ service_refs.push_back(ptr.get());
+ }
+ return service_refs;
+ }
+
+ /// Experimental, to be deprecated
+ std::vector<grpc::ServerBuilderOption*> options() {
+ std::vector<grpc::ServerBuilderOption*> option_refs;
+ for (auto& ptr : options_) {
+ option_refs.push_back(ptr.get());
+ }
+ return option_refs;
+ }
+
+ private:
+ friend class ::grpc::testing::ServerBuilderPluginTest;
+
+ struct SyncServerSettings {
+ SyncServerSettings()
+ : num_cqs(1), min_pollers(1), max_pollers(2), cq_timeout_msec(10000) {}
+
+ /// Number of server completion queues to create to listen to incoming RPCs.
+ int num_cqs;
+
+ /// Minimum number of threads per completion queue that should be listening
+ /// to incoming RPCs.
+ int min_pollers;
+
+ /// Maximum number of threads per completion queue that can be listening to
+ /// incoming RPCs.
+ int max_pollers;
+
+ /// The timeout for server completion queue's AsyncNext call.
+ int cq_timeout_msec;
+ };
+
+ int max_receive_message_size_;
+ int max_send_message_size_;
+ std::vector<std::unique_ptr<grpc::ServerBuilderOption>> options_;
+ std::vector<std::unique_ptr<NamedService>> services_;
+ std::vector<Port> ports_;
+
+ SyncServerSettings sync_server_settings_;
+
+ /// List of completion queues added via \a AddCompletionQueue method.
+ std::vector<grpc::ServerCompletionQueue*> cqs_;
+
+ std::shared_ptr<grpc::ServerCredentials> creds_;
+ std::vector<std::unique_ptr<grpc::ServerBuilderPlugin>> plugins_;
+ grpc_resource_quota* resource_quota_;
+ grpc::AsyncGenericService* generic_service_{nullptr};
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ grpc::CallbackGenericService* callback_generic_service_{nullptr};
+#else
+ grpc::experimental::CallbackGenericService* callback_generic_service_{
+ nullptr};
+#endif
+
+ struct {
+ bool is_set;
+ grpc_compression_level level;
+ } maybe_default_compression_level_;
+ struct {
+ bool is_set;
+ grpc_compression_algorithm algorithm;
+ } maybe_default_compression_algorithm_;
+ uint32_t enabled_compression_algorithms_bitset_;
+ std::vector<
+ std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
+ interceptor_creators_;
+ std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>>
+ acceptors_;
+};
+
} // namespace grpc
-namespace grpc {
-
-/// A builder class for the creation and startup of \a grpc::Server instances.
-class ServerBuilder {
- public:
- ServerBuilder();
- virtual ~ServerBuilder();
-
- //////////////////////////////////////////////////////////////////////////////
- // Primary API's
-
- /// Return a running server which is ready for processing calls.
- /// Before calling, one typically needs to ensure that:
- /// 1. a service is registered - so that the server knows what to serve
- /// (via RegisterService, or RegisterAsyncGenericService)
- /// 2. a listening port has been added - so the server knows where to receive
- /// traffic (via AddListeningPort)
- /// 3. [for async api only] completion queues have been added via
- /// AddCompletionQueue
- ///
- /// Will return a nullptr on errors.
- virtual std::unique_ptr<grpc::Server> BuildAndStart();
-
- /// Register a service. This call does not take ownership of the service.
- /// The service must exist for the lifetime of the \a Server instance returned
- /// by \a BuildAndStart().
- /// Matches requests with any :authority
- ServerBuilder& RegisterService(grpc::Service* service);
-
- /// Enlists an endpoint \a addr (port with an optional IP address) to
- /// bind the \a grpc::Server object to be created to.
- ///
- /// It can be invoked multiple times.
- ///
- /// \param addr_uri The address to try to bind to the server in URI form. If
- /// the scheme name is omitted, "dns:///" is assumed. To bind to any address,
- /// please use IPv6 any, i.e., [::]:<port>, which also accepts IPv4
- /// connections. Valid values include dns:///localhost:1234, /
- /// 192.168.1.1:31416, dns:///[::1]:27182, etc.).
- /// \param creds The credentials associated with the server.
- /// \param selected_port[out] If not `nullptr`, gets populated with the port
- /// number bound to the \a grpc::Server for the corresponding endpoint after
- /// it is successfully bound by BuildAndStart(), 0 otherwise. AddListeningPort
- /// does not modify this pointer.
- ServerBuilder& AddListeningPort(
- const TString& addr_uri,
- std::shared_ptr<grpc::ServerCredentials> creds,
- int* selected_port = nullptr);
-
- /// Add a completion queue for handling asynchronous services.
- ///
- /// Best performance is typically obtained by using one thread per polling
- /// completion queue.
- ///
- /// Caller is required to shutdown the server prior to shutting down the
- /// returned completion queue. Caller is also required to drain the
- /// completion queue after shutting it down. A typical usage scenario:
- ///
- /// // While building the server:
- /// ServerBuilder builder;
- /// ...
- /// cq_ = builder.AddCompletionQueue();
- /// server_ = builder.BuildAndStart();
- ///
- /// // While shutting down the server;
- /// server_->Shutdown();
- /// cq_->Shutdown(); // Always *after* the associated server's Shutdown()!
- /// // Drain the cq_ that was created
- /// void* ignored_tag;
- /// bool ignored_ok;
- /// while (cq_->Next(&ignored_tag, &ignored_ok)) { }
- ///
- /// \param is_frequently_polled This is an optional parameter to inform gRPC
- /// library about whether this completion queue would be frequently polled
- /// (i.e. by calling \a Next() or \a AsyncNext()). The default value is
- /// 'true' and is the recommended setting. Setting this to 'false' (i.e.
- /// not polling the completion queue frequently) will have a significantly
- /// negative performance impact and hence should not be used in production
- /// use cases.
- std::unique_ptr<grpc::ServerCompletionQueue> AddCompletionQueue(
- bool is_frequently_polled = true);
-
- //////////////////////////////////////////////////////////////////////////////
- // Less commonly used RegisterService variants
-
- /// Register a service. This call does not take ownership of the service.
- /// The service must exist for the lifetime of the \a Server instance
- /// returned by \a BuildAndStart(). Only matches requests with :authority \a
- /// host
- ServerBuilder& RegisterService(const TString& host,
- grpc::Service* service);
-
- /// Register a generic service.
- /// Matches requests with any :authority
- /// This is mostly useful for writing generic gRPC Proxies where the exact
- /// serialization format is unknown
- ServerBuilder& RegisterAsyncGenericService(
- grpc::AsyncGenericService* service);
-
- //////////////////////////////////////////////////////////////////////////////
- // Fine control knobs
-
- /// Set max receive message size in bytes.
- /// The default is GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH.
- ServerBuilder& SetMaxReceiveMessageSize(int max_receive_message_size) {
- max_receive_message_size_ = max_receive_message_size;
- return *this;
- }
-
- /// Set max send message size in bytes.
- /// The default is GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH.
- ServerBuilder& SetMaxSendMessageSize(int max_send_message_size) {
- max_send_message_size_ = max_send_message_size;
- return *this;
- }
-
- /// \deprecated For backward compatibility.
- ServerBuilder& SetMaxMessageSize(int max_message_size) {
- return SetMaxReceiveMessageSize(max_message_size);
- }
-
- /// Set the support status for compression algorithms. All algorithms are
- /// enabled by default.
- ///
- /// Incoming calls compressed with an unsupported algorithm will fail with
- /// \a GRPC_STATUS_UNIMPLEMENTED.
- ServerBuilder& SetCompressionAlgorithmSupportStatus(
- grpc_compression_algorithm algorithm, bool enabled);
-
- /// The default compression level to use for all channel calls in the
- /// absence of a call-specific level.
- ServerBuilder& SetDefaultCompressionLevel(grpc_compression_level level);
-
- /// The default compression algorithm to use for all channel calls in the
- /// absence of a call-specific level. Note that it overrides any compression
- /// level set by \a SetDefaultCompressionLevel.
- ServerBuilder& SetDefaultCompressionAlgorithm(
- grpc_compression_algorithm algorithm);
-
- /// Set the attached buffer pool for this server
- ServerBuilder& SetResourceQuota(const grpc::ResourceQuota& resource_quota);
-
- ServerBuilder& SetOption(std::unique_ptr<grpc::ServerBuilderOption> option);
-
- /// Options for synchronous servers.
- enum SyncServerOption {
- NUM_CQS, ///< Number of completion queues.
- MIN_POLLERS, ///< Minimum number of polling threads.
- MAX_POLLERS, ///< Maximum number of polling threads.
- CQ_TIMEOUT_MSEC ///< Completion queue timeout in milliseconds.
- };
-
- /// Only useful if this is a Synchronous server.
- ServerBuilder& SetSyncServerOption(SyncServerOption option, int value);
-
- /// Add a channel argument (an escape hatch to tuning core library parameters
- /// directly)
- template <class T>
- ServerBuilder& AddChannelArgument(const TString& arg, const T& value) {
- return SetOption(grpc::MakeChannelArgumentOption(arg, value));
- }
-
- /// For internal use only: Register a ServerBuilderPlugin factory function.
- static void InternalAddPluginFactory(
- std::unique_ptr<grpc::ServerBuilderPlugin> (*CreatePlugin)());
-
- /// Enable a server workaround. Do not use unless you know what the workaround
- /// does. For explanation and detailed descriptions of workarounds, see
- /// doc/workarounds.md.
- ServerBuilder& EnableWorkaround(grpc_workaround_list id);
-
- /// NOTE: class experimental_type is not part of the public API of this class.
- /// TODO(yashykt): Integrate into public API when this is no longer
- /// experimental.
- class experimental_type {
- public:
- explicit experimental_type(ServerBuilder* builder) : builder_(builder) {}
-
- void SetInterceptorCreators(
- std::vector<std::unique_ptr<
- grpc::experimental::ServerInterceptorFactoryInterface>>
- interceptor_creators) {
- builder_->interceptor_creators_ = std::move(interceptor_creators);
- }
-
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
- /// Register a generic service that uses the callback API.
- /// Matches requests with any :authority
- /// This is mostly useful for writing generic gRPC Proxies where the exact
- /// serialization format is unknown
- ServerBuilder& RegisterCallbackGenericService(
- grpc::experimental::CallbackGenericService* service);
-#endif
-
- enum class ExternalConnectionType {
- FROM_FD = 0 // in the form of a file descriptor
- };
-
- /// Register an acceptor to handle the externally accepted connection in
- /// grpc server. The returned acceptor can be used to pass the connection
- /// to grpc server, where a channel will be created with the provided
- /// server credentials.
- std::unique_ptr<grpc::experimental::ExternalConnectionAcceptor>
- AddExternalConnectionAcceptor(ExternalConnectionType type,
- std::shared_ptr<ServerCredentials> creds);
-
- private:
- ServerBuilder* builder_;
- };
-
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- /// Register a generic service that uses the callback API.
- /// Matches requests with any :authority
- /// This is mostly useful for writing generic gRPC Proxies where the exact
- /// serialization format is unknown
- ServerBuilder& RegisterCallbackGenericService(
- grpc::CallbackGenericService* service);
-#endif
-
- /// NOTE: The function experimental() is not stable public API. It is a view
- /// to the experimental components of this class. It may be changed or removed
- /// at any time.
- experimental_type experimental() { return experimental_type(this); }
-
- protected:
- /// Experimental, to be deprecated
- struct Port {
- TString addr;
- std::shared_ptr<ServerCredentials> creds;
- int* selected_port;
- };
-
- /// Experimental, to be deprecated
- typedef std::unique_ptr<TString> HostString;
- struct NamedService {
- explicit NamedService(grpc::Service* s) : service(s) {}
- NamedService(const TString& h, grpc::Service* s)
- : host(new TString(h)), service(s) {}
- HostString host;
- grpc::Service* service;
- };
-
- /// Experimental, to be deprecated
- std::vector<Port> ports() { return ports_; }
-
- /// Experimental, to be deprecated
- std::vector<NamedService*> services() {
- std::vector<NamedService*> service_refs;
- for (auto& ptr : services_) {
- service_refs.push_back(ptr.get());
- }
- return service_refs;
- }
-
- /// Experimental, to be deprecated
- std::vector<grpc::ServerBuilderOption*> options() {
- std::vector<grpc::ServerBuilderOption*> option_refs;
- for (auto& ptr : options_) {
- option_refs.push_back(ptr.get());
- }
- return option_refs;
- }
-
- private:
- friend class ::grpc::testing::ServerBuilderPluginTest;
-
- struct SyncServerSettings {
- SyncServerSettings()
- : num_cqs(1), min_pollers(1), max_pollers(2), cq_timeout_msec(10000) {}
-
- /// Number of server completion queues to create to listen to incoming RPCs.
- int num_cqs;
-
- /// Minimum number of threads per completion queue that should be listening
- /// to incoming RPCs.
- int min_pollers;
-
- /// Maximum number of threads per completion queue that can be listening to
- /// incoming RPCs.
- int max_pollers;
-
- /// The timeout for server completion queue's AsyncNext call.
- int cq_timeout_msec;
- };
-
- int max_receive_message_size_;
- int max_send_message_size_;
- std::vector<std::unique_ptr<grpc::ServerBuilderOption>> options_;
- std::vector<std::unique_ptr<NamedService>> services_;
- std::vector<Port> ports_;
-
- SyncServerSettings sync_server_settings_;
-
- /// List of completion queues added via \a AddCompletionQueue method.
- std::vector<grpc::ServerCompletionQueue*> cqs_;
-
- std::shared_ptr<grpc::ServerCredentials> creds_;
- std::vector<std::unique_ptr<grpc::ServerBuilderPlugin>> plugins_;
- grpc_resource_quota* resource_quota_;
- grpc::AsyncGenericService* generic_service_{nullptr};
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- grpc::CallbackGenericService* callback_generic_service_{nullptr};
-#else
- grpc::experimental::CallbackGenericService* callback_generic_service_{
- nullptr};
-#endif
-
- struct {
- bool is_set;
- grpc_compression_level level;
- } maybe_default_compression_level_;
- struct {
- bool is_set;
- grpc_compression_algorithm algorithm;
- } maybe_default_compression_algorithm_;
- uint32_t enabled_compression_algorithms_bitset_;
- std::vector<
- std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
- interceptor_creators_;
- std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>>
- acceptors_;
-};
-
-} // namespace grpc
-
#endif // GRPCPP_SERVER_BUILDER_H
diff --git a/contrib/libs/grpc/include/grpcpp/server_posix.h b/contrib/libs/grpc/include/grpcpp/server_posix.h
index 425f6ad039..ef3ee01a5c 100644
--- a/contrib/libs/grpc/include/grpcpp/server_posix.h
+++ b/contrib/libs/grpc/include/grpcpp/server_posix.h
@@ -19,21 +19,21 @@
#ifndef GRPCPP_SERVER_POSIX_H
#define GRPCPP_SERVER_POSIX_H
-#include <memory>
+#include <memory>
+
+#include <grpc/support/port_platform.h>
+#include <grpcpp/server.h>
-#include <grpc/support/port_platform.h>
-#include <grpcpp/server.h>
-
namespace grpc {
#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
-/// Add a new client to a \a Server communicating over the given
-/// file descriptor.
-///
-/// \param server The server to add the client to.
-/// \param fd The file descriptor representing a socket.
-void AddInsecureChannelFromFd(Server* server, int fd);
+/// Add a new client to a \a Server communicating over the given
+/// file descriptor.
+///
+/// \param server The server to add the client to.
+/// \param fd The file descriptor representing a socket.
+void AddInsecureChannelFromFd(Server* server, int fd);
#endif // GPR_SUPPORT_CHANNELS_FROM_FD
diff --git a/contrib/libs/grpc/include/grpcpp/support/channel_arguments.h b/contrib/libs/grpc/include/grpcpp/support/channel_arguments.h
index 8165da2009..85b6ee7dcc 100644
--- a/contrib/libs/grpc/include/grpcpp/support/channel_arguments.h
+++ b/contrib/libs/grpc/include/grpcpp/support/channel_arguments.h
@@ -23,130 +23,130 @@
#pragma GCC system_header
#endif
-#include <list>
-#include <vector>
+#include <list>
+#include <vector>
-#include <grpc/compression.h>
-#include <grpc/grpc.h>
-#include <grpcpp/resource_quota.h>
-#include <grpcpp/support/config.h>
+#include <grpc/compression.h>
+#include <grpc/grpc.h>
+#include <grpcpp/resource_quota.h>
+#include <grpcpp/support/config.h>
-namespace grpc {
+namespace grpc {
class SecureChannelCredentials;
-namespace testing {
-class ChannelArgumentsTest;
-} // namespace testing
-
-/// Options for channel creation. The user can use generic setters to pass
-/// key value pairs down to C channel creation code. For gRPC related options,
-/// concrete setters are provided.
-class ChannelArguments {
- public:
- ChannelArguments();
- ~ChannelArguments();
-
- ChannelArguments(const ChannelArguments& other);
- ChannelArguments& operator=(ChannelArguments other) {
- Swap(other);
- return *this;
- }
-
- void Swap(ChannelArguments& other);
-
- /// Dump arguments in this instance to \a channel_args. Does not take
- /// ownership of \a channel_args.
- ///
- /// Note that the underlying arguments are shared. Changes made to either \a
- /// channel_args or this instance would be reflected on both.
- void SetChannelArgs(grpc_channel_args* channel_args) const;
-
- // gRPC specific channel argument setters
- /// Set target name override for SSL host name checking. This option should
- /// be used with caution in production.
- void SetSslTargetNameOverride(const TString& name);
- // TODO(yangg) add flow control options
- /// Set the compression algorithm for the channel.
- void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
-
- /// Set the grpclb fallback timeout (in ms) for the channel. If this amount
- /// of time has passed but we have not gotten any non-empty \a serverlist from
- /// the balancer, we will fall back to use the backend address(es) returned by
- /// the resolver.
- void SetGrpclbFallbackTimeout(int fallback_timeout);
-
- /// For client channel's, the socket mutator operates on
- /// "channel" sockets. For server's, the socket mutator operates
- /// only on "listen" sockets.
- /// TODO(apolcyn): allow socket mutators to also operate
- /// on server "channel" sockets, and adjust the socket mutator
- /// object to be more speficic about which type of socket
- /// it should operate on.
- void SetSocketMutator(grpc_socket_mutator* mutator);
-
- /// Set the string to prepend to the user agent.
- void SetUserAgentPrefix(const TString& user_agent_prefix);
-
- /// Set the buffer pool to be attached to the constructed channel.
- void SetResourceQuota(const grpc::ResourceQuota& resource_quota);
-
- /// Set the max receive and send message sizes.
- void SetMaxReceiveMessageSize(int size);
- void SetMaxSendMessageSize(int size);
-
- /// Set LB policy name.
- /// Note that if the name resolver returns only balancer addresses, the
- /// grpclb LB policy will be used, regardless of what is specified here.
- void SetLoadBalancingPolicyName(const TString& lb_policy_name);
-
- /// Set service config in JSON form.
- /// Primarily meant for use in unit tests.
- void SetServiceConfigJSON(const TString& service_config_json);
-
- // Generic channel argument setters. Only for advanced use cases.
- /// Set an integer argument \a value under \a key.
- void SetInt(const TString& key, int value);
-
- // Generic channel argument setter. Only for advanced use cases.
- /// Set a pointer argument \a value under \a key. Owership is not transferred.
- void SetPointer(const TString& key, void* value);
-
- void SetPointerWithVtable(const TString& key, void* value,
- const grpc_arg_pointer_vtable* vtable);
-
- /// Set a textual argument \a value under \a key.
- void SetString(const TString& key, const TString& value);
-
- /// Return (by value) a C \a grpc_channel_args structure which points to
- /// arguments owned by this \a ChannelArguments instance
- grpc_channel_args c_channel_args() const {
- grpc_channel_args out;
- out.num_args = args_.size();
- out.args = args_.empty() ? NULL : const_cast<grpc_arg*>(&args_[0]);
- return out;
- }
-
- private:
- friend class grpc::SecureChannelCredentials;
- friend class grpc::testing::ChannelArgumentsTest;
-
- /// Default pointer argument operations.
- struct PointerVtableMembers {
- static void* Copy(void* in) { return in; }
- static void Destroy(void* /*in*/) {}
- static int Compare(void* a, void* b) {
- if (a < b) return -1;
- if (a > b) return 1;
- return 0;
- }
- };
-
- // Returns empty string when it is not set.
- TString GetSslTargetNameOverride() const;
-
- std::vector<grpc_arg> args_;
- std::list<TString> strings_;
-};
-
+namespace testing {
+class ChannelArgumentsTest;
+} // namespace testing
+
+/// Options for channel creation. The user can use generic setters to pass
+/// key value pairs down to C channel creation code. For gRPC related options,
+/// concrete setters are provided.
+class ChannelArguments {
+ public:
+ ChannelArguments();
+ ~ChannelArguments();
+
+ ChannelArguments(const ChannelArguments& other);
+ ChannelArguments& operator=(ChannelArguments other) {
+ Swap(other);
+ return *this;
+ }
+
+ void Swap(ChannelArguments& other);
+
+ /// Dump arguments in this instance to \a channel_args. Does not take
+ /// ownership of \a channel_args.
+ ///
+ /// Note that the underlying arguments are shared. Changes made to either \a
+ /// channel_args or this instance would be reflected on both.
+ void SetChannelArgs(grpc_channel_args* channel_args) const;
+
+ // gRPC specific channel argument setters
+ /// Set target name override for SSL host name checking. This option should
+ /// be used with caution in production.
+ void SetSslTargetNameOverride(const TString& name);
+ // TODO(yangg) add flow control options
+ /// Set the compression algorithm for the channel.
+ void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
+
+ /// Set the grpclb fallback timeout (in ms) for the channel. If this amount
+ /// of time has passed but we have not gotten any non-empty \a serverlist from
+ /// the balancer, we will fall back to use the backend address(es) returned by
+ /// the resolver.
+ void SetGrpclbFallbackTimeout(int fallback_timeout);
+
+ /// For client channel's, the socket mutator operates on
+ /// "channel" sockets. For server's, the socket mutator operates
+ /// only on "listen" sockets.
+ /// TODO(apolcyn): allow socket mutators to also operate
+ /// on server "channel" sockets, and adjust the socket mutator
+ /// object to be more speficic about which type of socket
+ /// it should operate on.
+ void SetSocketMutator(grpc_socket_mutator* mutator);
+
+ /// Set the string to prepend to the user agent.
+ void SetUserAgentPrefix(const TString& user_agent_prefix);
+
+ /// Set the buffer pool to be attached to the constructed channel.
+ void SetResourceQuota(const grpc::ResourceQuota& resource_quota);
+
+ /// Set the max receive and send message sizes.
+ void SetMaxReceiveMessageSize(int size);
+ void SetMaxSendMessageSize(int size);
+
+ /// Set LB policy name.
+ /// Note that if the name resolver returns only balancer addresses, the
+ /// grpclb LB policy will be used, regardless of what is specified here.
+ void SetLoadBalancingPolicyName(const TString& lb_policy_name);
+
+ /// Set service config in JSON form.
+ /// Primarily meant for use in unit tests.
+ void SetServiceConfigJSON(const TString& service_config_json);
+
+ // Generic channel argument setters. Only for advanced use cases.
+ /// Set an integer argument \a value under \a key.
+ void SetInt(const TString& key, int value);
+
+ // Generic channel argument setter. Only for advanced use cases.
+ /// Set a pointer argument \a value under \a key. Owership is not transferred.
+ void SetPointer(const TString& key, void* value);
+
+ void SetPointerWithVtable(const TString& key, void* value,
+ const grpc_arg_pointer_vtable* vtable);
+
+ /// Set a textual argument \a value under \a key.
+ void SetString(const TString& key, const TString& value);
+
+ /// Return (by value) a C \a grpc_channel_args structure which points to
+ /// arguments owned by this \a ChannelArguments instance
+ grpc_channel_args c_channel_args() const {
+ grpc_channel_args out;
+ out.num_args = args_.size();
+ out.args = args_.empty() ? NULL : const_cast<grpc_arg*>(&args_[0]);
+ return out;
+ }
+
+ private:
+ friend class grpc::SecureChannelCredentials;
+ friend class grpc::testing::ChannelArgumentsTest;
+
+ /// Default pointer argument operations.
+ struct PointerVtableMembers {
+ static void* Copy(void* in) { return in; }
+ static void Destroy(void* /*in*/) {}
+ static int Compare(void* a, void* b) {
+ if (a < b) return -1;
+ if (a > b) return 1;
+ return 0;
+ }
+ };
+
+ // Returns empty string when it is not set.
+ TString GetSslTargetNameOverride() const;
+
+ std::vector<grpc_arg> args_;
+ std::list<TString> strings_;
+};
+
} // namespace grpc
#endif // GRPCPP_SUPPORT_CHANNEL_ARGUMENTS_H
diff --git a/contrib/libs/grpc/include/grpcpp/support/error_details.h b/contrib/libs/grpc/include/grpcpp/support/error_details.h
index d76b627a29..15b917f6c5 100644
--- a/contrib/libs/grpc/include/grpcpp/support/error_details.h
+++ b/contrib/libs/grpc/include/grpcpp/support/error_details.h
@@ -19,7 +19,7 @@
#ifndef GRPCPP_SUPPORT_ERROR_DETAILS_H
#define GRPCPP_SUPPORT_ERROR_DETAILS_H
-#include <grpcpp/support/status.h>
+#include <grpcpp/support/status.h>
namespace google {
namespace rpc {
@@ -29,19 +29,19 @@ class Status;
namespace grpc {
-/// Map a \a grpc::Status to a \a google::rpc::Status.
-/// The given \a to object will be cleared.
-/// On success, returns status with OK.
-/// Returns status with \a INVALID_ARGUMENT, if failed to deserialize.
-/// Returns status with \a FAILED_PRECONDITION, if \a to is nullptr.
-grpc::Status ExtractErrorDetails(const grpc::Status& from,
- ::google::rpc::Status* to);
-
-/// Map \a google::rpc::Status to a \a grpc::Status.
-/// Returns OK on success.
-/// Returns status with \a FAILED_PRECONDITION if \a to is nullptr.
-grpc::Status SetErrorDetails(const ::google::rpc::Status& from,
- grpc::Status* to);
+/// Map a \a grpc::Status to a \a google::rpc::Status.
+/// The given \a to object will be cleared.
+/// On success, returns status with OK.
+/// Returns status with \a INVALID_ARGUMENT, if failed to deserialize.
+/// Returns status with \a FAILED_PRECONDITION, if \a to is nullptr.
+grpc::Status ExtractErrorDetails(const grpc::Status& from,
+ ::google::rpc::Status* to);
+
+/// Map \a google::rpc::Status to a \a grpc::Status.
+/// Returns OK on success.
+/// Returns status with \a FAILED_PRECONDITION if \a to is nullptr.
+grpc::Status SetErrorDetails(const ::google::rpc::Status& from,
+ grpc::Status* to);
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/support/validate_service_config.h b/contrib/libs/grpc/include/grpcpp/support/validate_service_config.h
index cb725def69..f1368623b5 100644
--- a/contrib/libs/grpc/include/grpcpp/support/validate_service_config.h
+++ b/contrib/libs/grpc/include/grpcpp/support/validate_service_config.h
@@ -28,7 +28,7 @@ namespace experimental {
/// Otherwise, returns the validation error.
/// TODO(yashykt): Promote it to out of experimental once it is proved useful
/// and gRFC is accepted.
-TString ValidateServiceConfigJSON(const TString& service_config_json);
+TString ValidateServiceConfigJSON(const TString& service_config_json);
} // namespace experimental
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/test/channel_test_peer.h b/contrib/libs/grpc/include/grpcpp/test/channel_test_peer.h
index 9ef9d39b14..e41bbfa460 100644
--- a/contrib/libs/grpc/include/grpcpp/test/channel_test_peer.h
+++ b/contrib/libs/grpc/include/grpcpp/test/channel_test_peer.h
@@ -1,44 +1,44 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPCPP_TEST_CHANNEL_TEST_PEER_H
-#define GRPCPP_TEST_CHANNEL_TEST_PEER_H
-
-#include <grpcpp/channel.h>
-
-namespace grpc {
-namespace testing {
-
-/// A test-only class to access private members of Channel.
-class ChannelTestPeer {
- public:
- explicit ChannelTestPeer(Channel* channel) : channel_(channel) {}
-
- /// Provide the gRPC Core channel
- grpc_channel* channel() const { return channel_->c_channel_; }
- int registered_calls() const;
- int registration_attempts() const;
-
- private:
- Channel* channel_; // not owned
-};
-
-} // namespace testing
-} // namespace grpc
-
-#endif // GRPCPP_TEST_CHANNEL_TEST_PEER_H
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPCPP_TEST_CHANNEL_TEST_PEER_H
+#define GRPCPP_TEST_CHANNEL_TEST_PEER_H
+
+#include <grpcpp/channel.h>
+
+namespace grpc {
+namespace testing {
+
+/// A test-only class to access private members of Channel.
+class ChannelTestPeer {
+ public:
+ explicit ChannelTestPeer(Channel* channel) : channel_(channel) {}
+
+ /// Provide the gRPC Core channel
+ grpc_channel* channel() const { return channel_->c_channel_; }
+ int registered_calls() const;
+ int registration_attempts() const;
+
+ private:
+ Channel* channel_; // not owned
+};
+
+} // namespace testing
+} // namespace grpc
+
+#endif // GRPCPP_TEST_CHANNEL_TEST_PEER_H
diff --git a/contrib/libs/grpc/include/grpcpp/test/default_reactor_test_peer.h b/contrib/libs/grpc/include/grpcpp/test/default_reactor_test_peer.h
index 93de30da5a..a792e6f94a 100644
--- a/contrib/libs/grpc/include/grpcpp/test/default_reactor_test_peer.h
+++ b/contrib/libs/grpc/include/grpcpp/test/default_reactor_test_peer.h
@@ -29,9 +29,9 @@ namespace testing {
/// DefaultReactor. It is intended for allow unit-testing of a callback API
/// service via direct invocation of the service methods rather than through
/// RPCs. It is only applicable for unary RPC methods that use the
-/// DefaultReactor rather than any user-defined reactor. If it is used, it must
-/// be created before the RPC is invoked so that it can bind the reactor into a
-/// test mode rather than letting it follow the normal paths.
+/// DefaultReactor rather than any user-defined reactor. If it is used, it must
+/// be created before the RPC is invoked so that it can bind the reactor into a
+/// test mode rather than letting it follow the normal paths.
class DefaultReactorTestPeer {
public:
explicit DefaultReactorTestPeer(experimental::CallbackServerContext* ctx)
@@ -42,8 +42,8 @@ class DefaultReactorTestPeer {
ctx->SetupTestDefaultReactor(std::move(finish_func));
}
::grpc::experimental::ServerUnaryReactor* reactor() const {
- return reinterpret_cast<experimental::ServerUnaryReactor*>(
- &ctx_->default_reactor_);
+ return reinterpret_cast<experimental::ServerUnaryReactor*>(
+ &ctx_->default_reactor_);
}
bool test_status_set() const { return ctx_->test_status_set(); }
Status test_status() const { return ctx_->test_status(); }
diff --git a/contrib/libs/grpc/include/grpcpp/test/mock_stream.h b/contrib/libs/grpc/include/grpcpp/test/mock_stream.h
index 056d3b2c35..e33595d709 100644
--- a/contrib/libs/grpc/include/grpcpp/test/mock_stream.h
+++ b/contrib/libs/grpc/include/grpcpp/test/mock_stream.h
@@ -31,7 +31,7 @@ namespace grpc {
namespace testing {
template <class R>
-class MockClientReader : public ::grpc::ClientReaderInterface<R> {
+class MockClientReader : public ::grpc::ClientReaderInterface<R> {
public:
MockClientReader() = default;
@@ -47,7 +47,7 @@ class MockClientReader : public ::grpc::ClientReaderInterface<R> {
};
template <class W>
-class MockClientWriter : public ::grpc::ClientWriterInterface<W> {
+class MockClientWriter : public ::grpc::ClientWriterInterface<W> {
public:
MockClientWriter() = default;
@@ -63,7 +63,7 @@ class MockClientWriter : public ::grpc::ClientWriterInterface<W> {
template <class W, class R>
class MockClientReaderWriter
- : public ::grpc::ClientReaderWriterInterface<W, R> {
+ : public ::grpc::ClientReaderWriterInterface<W, R> {
public:
MockClientReaderWriter() = default;
@@ -86,7 +86,7 @@ class MockClientReaderWriter
template <class R>
class MockClientAsyncResponseReader
- : public ::grpc::ClientAsyncResponseReaderInterface<R> {
+ : public ::grpc::ClientAsyncResponseReaderInterface<R> {
public:
MockClientAsyncResponseReader() = default;
@@ -108,7 +108,7 @@ class MockClientAsyncReader : public ClientAsyncReaderInterface<R> {
};
template <class W>
-class MockClientAsyncWriter : public ::grpc::ClientAsyncWriterInterface<W> {
+class MockClientAsyncWriter : public ::grpc::ClientAsyncWriterInterface<W> {
public:
MockClientAsyncWriter() = default;
diff --git a/contrib/libs/grpc/include/grpcpp/test/server_context_test_spouse.h b/contrib/libs/grpc/include/grpcpp/test/server_context_test_spouse.h
index 28b1816bee..00c19dc4b8 100644
--- a/contrib/libs/grpc/include/grpcpp/test/server_context_test_spouse.h
+++ b/contrib/libs/grpc/include/grpcpp/test/server_context_test_spouse.h
@@ -33,9 +33,9 @@ class ServerContextTestSpouse {
/// Inject client metadata to the ServerContext for the test. The test spouse
/// must be alive when \a ServerContext::client_metadata is called.
- void AddClientMetadata(const TString& key, const TString& value) {
+ void AddClientMetadata(const TString& key, const TString& value) {
client_metadata_storage_.insert(
- std::pair<TString, TString>(key, value));
+ std::pair<TString, TString>(key, value));
ctx_->client_metadata_.map()->clear();
for (const auto& item : client_metadata_storage_) {
ctx_->client_metadata_.map()->insert(
@@ -45,17 +45,17 @@ class ServerContextTestSpouse {
}
}
- std::multimap<TString, TString> GetInitialMetadata() const {
+ std::multimap<TString, TString> GetInitialMetadata() const {
return ctx_->initial_metadata_;
}
- std::multimap<TString, TString> GetTrailingMetadata() const {
+ std::multimap<TString, TString> GetTrailingMetadata() const {
return ctx_->trailing_metadata_;
}
private:
ServerContext* ctx_; // not owned
- std::multimap<TString, TString> client_metadata_storage_;
+ std::multimap<TString, TString> client_metadata_storage_;
};
} // namespace testing