aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/grpc/include/grpcpp/impl/codegen
diff options
context:
space:
mode:
authorheretic <heretic@yandex-team.ru>2022-02-10 16:45:43 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:43 +0300
commit397cbe258b9e064f49c4ca575279f02f39fef76e (patch)
treea0b0eb3cca6a14e4e8ea715393637672fa651284 /contrib/libs/grpc/include/grpcpp/impl/codegen
parent43f5a35593ebc9f6bcea619bb170394ea7ae468e (diff)
downloadydb-397cbe258b9e064f49c4ca575279f02f39fef76e.tar.gz
Restoring authorship annotation for <heretic@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/grpc/include/grpcpp/impl/codegen')
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/README.md42
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h70
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h2178
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h556
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h22
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/call.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h138
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h44
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h2360
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h1000
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h846
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/config.h18
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h10
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h32
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h40
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h690
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h2
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h8
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h6
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h1524
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h404
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h1182
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h10
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h114
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h128
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/status.h14
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h12
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h1792
-rw-r--r--contrib/libs/grpc/include/grpcpp/impl/codegen/time.h12
35 files changed, 6663 insertions, 6663 deletions
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md b/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md
index ade9d05484..155146e99f 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md
@@ -1,21 +1,21 @@
-# Welcome to `include/grpcpp/impl/codegen`
-
-## Why is this directory here?
-
-This directory exists so that generated code can include selected files upon
-which it depends without having to depend on the entire gRPC C++ library. This
-is particularly relevant for users of bazel, particularly if they use the
-multi-lingual `proto_library` target type. Generated code that uses this target
-only depends on the gRPC C++ targets associated with these header files, not the
-entire gRPC C++ codebase since that would make the build time of these types of
-targets excessively large (particularly when they are not even C++ specific).
-
-## What should user code do?
-
-User code should *not* include anything from this directory. Only generated code
-and gRPC library code should include contents from this directory. User code
-should instead include contents from the main `grpcpp` directory or its
-accessible subcomponents like `grpcpp/support`. It is possible that we may
-remove this directory altogether if the motivations for its existence are no
-longer strong enough (e.g., if most users migrate away from the `proto_library`
-target type or if the additional overhead of depending on gRPC C++ is not high).
+# Welcome to `include/grpcpp/impl/codegen`
+
+## Why is this directory here?
+
+This directory exists so that generated code can include selected files upon
+which it depends without having to depend on the entire gRPC C++ library. This
+is particularly relevant for users of bazel, particularly if they use the
+multi-lingual `proto_library` target type. Generated code that uses this target
+only depends on the gRPC C++ targets associated with these header files, not the
+entire gRPC C++ codebase since that would make the build time of these types of
+targets excessively large (particularly when they are not even C++ specific).
+
+## What should user code do?
+
+User code should *not* include anything from this directory. Only generated code
+and gRPC library code should include contents from this directory. User code
+should instead include contents from the main `grpcpp` directory or its
+accessible subcomponents like `grpcpp/support`. It is possible that we may
+remove this directory altogether if the motivations for its existence are no
+longer strong enough (e.g., if most users migrate away from the `proto_library`
+target type or if the additional overhead of depending on gRPC C++ is not high).
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h
index a812b086a2..3f2b8fc20a 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h
@@ -19,33 +19,33 @@
#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H
#define GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H
-#include <grpc/impl/codegen/port_platform.h>
-
-#include <grpcpp/impl/codegen/async_stream.h>
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <grpcpp/impl/codegen/async_stream.h>
#include <grpcpp/impl/codegen/byte_buffer.h>
-#include <grpcpp/impl/codegen/server_callback.h>
+#include <grpcpp/impl/codegen/server_callback.h>
#include <grpcpp/impl/codegen/server_callback_handlers.h>
struct grpc_server;
namespace grpc {
-typedef ServerAsyncReaderWriter<ByteBuffer, ByteBuffer>
+typedef ServerAsyncReaderWriter<ByteBuffer, ByteBuffer>
GenericServerAsyncReaderWriter;
-typedef ServerAsyncResponseWriter<ByteBuffer> GenericServerAsyncResponseWriter;
-typedef ServerAsyncReader<ByteBuffer, ByteBuffer> GenericServerAsyncReader;
-typedef ServerAsyncWriter<ByteBuffer> GenericServerAsyncWriter;
+typedef ServerAsyncResponseWriter<ByteBuffer> GenericServerAsyncResponseWriter;
+typedef ServerAsyncReader<ByteBuffer, ByteBuffer> GenericServerAsyncReader;
+typedef ServerAsyncWriter<ByteBuffer> GenericServerAsyncWriter;
-class GenericServerContext final : public ServerContext {
+class GenericServerContext final : public ServerContext {
public:
- const TString& method() const { return method_; }
- const TString& host() const { return host_; }
+ const TString& method() const { return method_; }
+ const TString& host() const { return host_; }
private:
- friend class ServerInterface;
+ friend class ServerInterface;
- TString method_;
- TString host_;
+ TString method_;
+ TString host_;
};
// A generic service at the server side accepts all RPC methods and hosts. It is
@@ -71,33 +71,33 @@ class AsyncGenericService final {
void RequestCall(GenericServerContext* ctx,
GenericServerAsyncReaderWriter* reader_writer,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag);
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag);
private:
- friend class grpc::Server;
- grpc::Server* server_;
+ friend class grpc::Server;
+ grpc::Server* server_;
};
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
namespace experimental {
-#endif
+#endif
/// \a ServerGenericBidiReactor is the reactor class for bidi streaming RPCs
/// invoked on a CallbackGenericService. It is just a ServerBidi reactor with
/// ByteBuffer arguments.
-using ServerGenericBidiReactor = ServerBidiReactor<ByteBuffer, ByteBuffer>;
+using ServerGenericBidiReactor = ServerBidiReactor<ByteBuffer, ByteBuffer>;
-class GenericCallbackServerContext final : public grpc::CallbackServerContext {
+class GenericCallbackServerContext final : public grpc::CallbackServerContext {
public:
- const TString& method() const { return method_; }
- const TString& host() const { return host_; }
+ const TString& method() const { return method_; }
+ const TString& host() const { return host_; }
private:
- friend class ::grpc::Server;
+ friend class ::grpc::Server;
- TString method_;
- TString host_;
+ TString method_;
+ TString host_;
};
/// \a CallbackGenericService is the base class for generic services implemented
@@ -122,21 +122,21 @@ class CallbackGenericService {
}
private:
- friend class grpc::Server;
+ friend class grpc::Server;
- internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>* Handler() {
- return new internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>(
- [this](::grpc::CallbackServerContext* ctx) {
+ internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>* Handler() {
+ return new internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>(
+ [this](::grpc::CallbackServerContext* ctx) {
return CreateReactor(static_cast<GenericCallbackServerContext*>(ctx));
});
}
- grpc::Server* server_{nullptr};
+ grpc::Server* server_{nullptr};
};
-
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
} // namespace experimental
-#endif
+#endif
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h
index aaee93df93..3a848861ca 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,1114 +18,1114 @@
#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H
#define GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/server_context.h>
-#include <grpcpp/impl/codegen/service_type.h>
-#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/service_type.h>
+#include <grpcpp/impl/codegen/status.h>
namespace grpc {
namespace internal {
-/// Common interface for all client side asynchronous streaming.
-class ClientAsyncStreamingInterface {
- public:
- virtual ~ClientAsyncStreamingInterface() {}
-
- /// Start the call that was set up by the constructor, but only if the
- /// constructor was invoked through the "Prepare" API which doesn't actually
- /// start the call
- virtual void StartCall(void* tag) = 0;
-
- /// Request notification of the reading of the initial metadata. Completion
- /// will be notified by \a tag on the associated completion queue.
- /// This call is optional, but if it is used, it cannot be used concurrently
- /// with or after the \a AsyncReaderInterface::Read method.
- ///
- /// \param[in] tag Tag identifying this request.
- virtual void ReadInitialMetadata(void* tag) = 0;
-
- /// Indicate that the stream is to be finished and request notification for
- /// when the call has been ended.
- /// Should not be used concurrently with other operations.
- ///
- /// It is appropriate to call this method exactly once when both:
- /// * the client side has no more message to send
- /// (this can be declared implicitly by calling this method, or
- /// explicitly through an earlier call to the <i>WritesDone</i> method
- /// of the class in use, e.g. \a ClientAsyncWriterInterface::WritesDone or
- /// \a ClientAsyncReaderWriterInterface::WritesDone).
- /// * there are no more messages to be received from the server (this can
- /// be known implicitly by the calling code, or explicitly from an
- /// earlier call to \a AsyncReaderInterface::Read that yielded a failed
- /// result, e.g. cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
- ///
- /// The tag will be returned when either:
- /// - all incoming messages have been read and the server has returned
- /// a status.
- /// - the server has returned a non-OK status.
- /// - the call failed for some reason and the library generated a
- /// status.
- ///
- /// Note that implementations of this method attempt to receive initial
- /// metadata from the server if initial metadata hasn't yet been received.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[out] status To be updated with the operation status.
- virtual void Finish(::grpc::Status* status, void* tag) = 0;
-};
-
-/// An interface that yields a sequence of messages of type \a R.
+/// Common interface for all client side asynchronous streaming.
+class ClientAsyncStreamingInterface {
+ public:
+ virtual ~ClientAsyncStreamingInterface() {}
+
+ /// Start the call that was set up by the constructor, but only if the
+ /// constructor was invoked through the "Prepare" API which doesn't actually
+ /// start the call
+ virtual void StartCall(void* tag) = 0;
+
+ /// Request notification of the reading of the initial metadata. Completion
+ /// will be notified by \a tag on the associated completion queue.
+ /// This call is optional, but if it is used, it cannot be used concurrently
+ /// with or after the \a AsyncReaderInterface::Read method.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ virtual void ReadInitialMetadata(void* tag) = 0;
+
+ /// Indicate that the stream is to be finished and request notification for
+ /// when the call has been ended.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// It is appropriate to call this method exactly once when both:
+ /// * the client side has no more message to send
+ /// (this can be declared implicitly by calling this method, or
+ /// explicitly through an earlier call to the <i>WritesDone</i> method
+ /// of the class in use, e.g. \a ClientAsyncWriterInterface::WritesDone or
+ /// \a ClientAsyncReaderWriterInterface::WritesDone).
+ /// * there are no more messages to be received from the server (this can
+ /// be known implicitly by the calling code, or explicitly from an
+ /// earlier call to \a AsyncReaderInterface::Read that yielded a failed
+ /// result, e.g. cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
+ ///
+ /// The tag will be returned when either:
+ /// - all incoming messages have been read and the server has returned
+ /// a status.
+ /// - the server has returned a non-OK status.
+ /// - the call failed for some reason and the library generated a
+ /// status.
+ ///
+ /// Note that implementations of this method attempt to receive initial
+ /// metadata from the server if initial metadata hasn't yet been received.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[out] status To be updated with the operation status.
+ virtual void Finish(::grpc::Status* status, void* tag) = 0;
+};
+
+/// An interface that yields a sequence of messages of type \a R.
template <class R>
-class AsyncReaderInterface {
- public:
- virtual ~AsyncReaderInterface() {}
-
- /// Read a message of type \a R into \a msg. Completion will be notified by \a
- /// tag on the associated completion queue.
- /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
- /// should not be called concurrently with other streaming APIs
- /// on the same stream. It is not meaningful to call it concurrently
- /// with another \a AsyncReaderInterface::Read on the same stream since reads
- /// on the same stream are delivered in order.
- ///
- /// \param[out] msg Where to eventually store the read message.
- /// \param[in] tag The tag identifying the operation.
- ///
- /// Side effect: note that this method attempt to receive initial metadata for
- /// a stream if it hasn't yet been received.
- virtual void Read(R* msg, void* tag) = 0;
-};
-
-/// An interface that can be fed a sequence of messages of type \a W.
+class AsyncReaderInterface {
+ public:
+ virtual ~AsyncReaderInterface() {}
+
+ /// Read a message of type \a R into \a msg. Completion will be notified by \a
+ /// tag on the associated completion queue.
+ /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
+ /// should not be called concurrently with other streaming APIs
+ /// on the same stream. It is not meaningful to call it concurrently
+ /// with another \a AsyncReaderInterface::Read on the same stream since reads
+ /// on the same stream are delivered in order.
+ ///
+ /// \param[out] msg Where to eventually store the read message.
+ /// \param[in] tag The tag identifying the operation.
+ ///
+ /// Side effect: note that this method attempt to receive initial metadata for
+ /// a stream if it hasn't yet been received.
+ virtual void Read(R* msg, void* tag) = 0;
+};
+
+/// An interface that can be fed a sequence of messages of type \a W.
template <class W>
-class AsyncWriterInterface {
- public:
- virtual ~AsyncWriterInterface() {}
-
- /// Request the writing of \a msg with identifying tag \a tag.
- ///
- /// Only one write may be outstanding at any given time. This means that
- /// after calling Write, one must wait to receive \a tag from the completion
- /// queue BEFORE calling Write again.
- /// This is thread-safe with respect to \a AsyncReaderInterface::Read
- ///
- /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
- /// to deallocate once Write returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] tag The tag identifying the operation.
- virtual void Write(const W& msg, void* tag) = 0;
-
- /// Request the writing of \a msg using WriteOptions \a options with
- /// identifying tag \a tag.
- ///
- /// Only one write may be outstanding at any given time. This means that
- /// after calling Write, one must wait to receive \a tag from the completion
- /// queue BEFORE calling Write again.
- /// WriteOptions \a options is used to set the write options of this message.
- /// This is thread-safe with respect to \a AsyncReaderInterface::Read
- ///
- /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
- /// to deallocate once Write returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] options The WriteOptions to be used to write this message.
- /// \param[in] tag The tag identifying the operation.
- virtual void Write(const W& msg, ::grpc::WriteOptions options, void* tag) = 0;
-
- /// Request the writing of \a msg and coalesce it with the writing
- /// of trailing metadata, using WriteOptions \a options with
- /// identifying tag \a tag.
- ///
- /// For client, WriteLast is equivalent of performing Write and
- /// WritesDone in a single step.
- /// For server, WriteLast buffers the \a msg. The writing of \a msg is held
- /// until Finish is called, where \a msg and trailing metadata are coalesced
- /// and write is initiated. Note that WriteLast can only buffer \a msg up to
- /// the flow control window size. If \a msg size is larger than the window
- /// size, it will be sent on wire without buffering.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
- /// to deallocate once Write returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] options The WriteOptions to be used to write this message.
- /// \param[in] tag The tag identifying the operation.
- void WriteLast(const W& msg, ::grpc::WriteOptions options, void* tag) {
- Write(msg, options.set_last_message(), tag);
- }
-};
-
+class AsyncWriterInterface {
+ public:
+ virtual ~AsyncWriterInterface() {}
+
+ /// Request the writing of \a msg with identifying tag \a tag.
+ ///
+ /// Only one write may be outstanding at any given time. This means that
+ /// after calling Write, one must wait to receive \a tag from the completion
+ /// queue BEFORE calling Write again.
+ /// This is thread-safe with respect to \a AsyncReaderInterface::Read
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
+ /// to deallocate once Write returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] tag The tag identifying the operation.
+ virtual void Write(const W& msg, void* tag) = 0;
+
+ /// Request the writing of \a msg using WriteOptions \a options with
+ /// identifying tag \a tag.
+ ///
+ /// Only one write may be outstanding at any given time. This means that
+ /// after calling Write, one must wait to receive \a tag from the completion
+ /// queue BEFORE calling Write again.
+ /// WriteOptions \a options is used to set the write options of this message.
+ /// This is thread-safe with respect to \a AsyncReaderInterface::Read
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
+ /// to deallocate once Write returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ /// \param[in] tag The tag identifying the operation.
+ virtual void Write(const W& msg, ::grpc::WriteOptions options, void* tag) = 0;
+
+ /// Request the writing of \a msg and coalesce it with the writing
+ /// of trailing metadata, using WriteOptions \a options with
+ /// identifying tag \a tag.
+ ///
+ /// For client, WriteLast is equivalent of performing Write and
+ /// WritesDone in a single step.
+ /// For server, WriteLast buffers the \a msg. The writing of \a msg is held
+ /// until Finish is called, where \a msg and trailing metadata are coalesced
+ /// and write is initiated. Note that WriteLast can only buffer \a msg up to
+ /// the flow control window size. If \a msg size is larger than the window
+ /// size, it will be sent on wire without buffering.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
+ /// to deallocate once Write returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ /// \param[in] tag The tag identifying the operation.
+ void WriteLast(const W& msg, ::grpc::WriteOptions options, void* tag) {
+ Write(msg, options.set_last_message(), tag);
+ }
+};
+
} // namespace internal
template <class R>
-class ClientAsyncReaderInterface
- : public internal::ClientAsyncStreamingInterface,
- public internal::AsyncReaderInterface<R> {};
+class ClientAsyncReaderInterface
+ : public internal::ClientAsyncStreamingInterface,
+ public internal::AsyncReaderInterface<R> {};
-namespace internal {
+namespace internal {
template <class R>
-class ClientAsyncReaderFactory {
- public:
- /// Create a stream object.
- /// Write the first request out if \a start is set.
- /// \a tag will be notified on \a cq when the call has been started and
- /// \a request has been written out. If \a start is not set, \a tag must be
- /// nullptr and the actual call must be initiated by StartCall
- /// Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- template <class W>
- static ClientAsyncReader<R>* Create(::grpc::ChannelInterface* channel,
- ::grpc::CompletionQueue* cq,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- const W& request, bool start, void* tag) {
- ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
- return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientAsyncReader<R>)))
- ClientAsyncReader<R>(call, context, request, start, tag);
- }
-};
-} // namespace internal
-
-/// Async client-side API for doing server-streaming RPCs,
-/// where the incoming message stream coming from the server has
-/// messages of type \a R.
-template <class R>
-class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReader));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall(void* tag) override {
- GPR_CODEGEN_ASSERT(!started_);
- started_ = true;
- StartCallInternal(tag);
- }
-
- /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata
- /// method for semantics.
- ///
- /// Side effect:
- /// - upon receiving initial metadata from the server,
- /// the \a ClientContext associated with this call is updated, and the
- /// calling code can access the received metadata through the
- /// \a ClientContext.
- void ReadInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.RecvInitialMetadata(context_);
- call_.PerformOps(&meta_ops_);
- }
-
- void Read(R* msg, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- read_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- read_ops_.RecvInitialMetadata(context_);
- }
- read_ops_.RecvMessage(msg);
- call_.PerformOps(&read_ops_);
- }
-
- /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata received from the server.
- void Finish(::grpc::Status* status, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- finish_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- finish_ops_.RecvInitialMetadata(context_);
- }
- finish_ops_.ClientRecvStatus(context_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class internal::ClientAsyncReaderFactory<R>;
- template <class W>
- ClientAsyncReader(::grpc::internal::Call call, ::grpc::ClientContext* context,
- const W& request, bool start, void* tag)
- : context_(context), call_(call), started_(start) {
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
- init_ops_.ClientSendClose();
- if (start) {
- StartCallInternal(tag);
- } else {
- GPR_CODEGEN_ASSERT(tag == nullptr);
- }
- }
-
- void StartCallInternal(void* tag) {
- init_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- init_ops_.set_output_tag(tag);
- call_.PerformOps(&init_ops_);
- }
-
- ::grpc::ClientContext* context_;
- ::grpc::internal::Call call_;
- bool started_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- init_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>>
- read_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
-};
-
-/// Common interface for client side asynchronous writing.
+class ClientAsyncReaderFactory {
+ public:
+ /// Create a stream object.
+ /// Write the first request out if \a start is set.
+ /// \a tag will be notified on \a cq when the call has been started and
+ /// \a request has been written out. If \a start is not set, \a tag must be
+ /// nullptr and the actual call must be initiated by StartCall
+ /// Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ template <class W>
+ static ClientAsyncReader<R>* Create(::grpc::ChannelInterface* channel,
+ ::grpc::CompletionQueue* cq,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ const W& request, bool start, void* tag) {
+ ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
+ return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientAsyncReader<R>)))
+ ClientAsyncReader<R>(call, context, request, start, tag);
+ }
+};
+} // namespace internal
+
+/// Async client-side API for doing server-streaming RPCs,
+/// where the incoming message stream coming from the server has
+/// messages of type \a R.
+template <class R>
+class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReader));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall(void* tag) override {
+ GPR_CODEGEN_ASSERT(!started_);
+ started_ = true;
+ StartCallInternal(tag);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata
+ /// method for semantics.
+ ///
+ /// Side effect:
+ /// - upon receiving initial metadata from the server,
+ /// the \a ClientContext associated with this call is updated, and the
+ /// calling code can access the received metadata through the
+ /// \a ClientContext.
+ void ReadInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.RecvInitialMetadata(context_);
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Read(R* msg, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ read_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ read_ops_.RecvInitialMetadata(context_);
+ }
+ read_ops_.RecvMessage(msg);
+ call_.PerformOps(&read_ops_);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata received from the server.
+ void Finish(::grpc::Status* status, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ finish_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ finish_ops_.RecvInitialMetadata(context_);
+ }
+ finish_ops_.ClientRecvStatus(context_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class internal::ClientAsyncReaderFactory<R>;
+ template <class W>
+ ClientAsyncReader(::grpc::internal::Call call, ::grpc::ClientContext* context,
+ const W& request, bool start, void* tag)
+ : context_(context), call_(call), started_(start) {
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
+ init_ops_.ClientSendClose();
+ if (start) {
+ StartCallInternal(tag);
+ } else {
+ GPR_CODEGEN_ASSERT(tag == nullptr);
+ }
+ }
+
+ void StartCallInternal(void* tag) {
+ init_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ init_ops_.set_output_tag(tag);
+ call_.PerformOps(&init_ops_);
+ }
+
+ ::grpc::ClientContext* context_;
+ ::grpc::internal::Call call_;
+ bool started_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ init_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>>
+ read_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+};
+
+/// Common interface for client side asynchronous writing.
template <class W>
-class ClientAsyncWriterInterface
- : public internal::ClientAsyncStreamingInterface,
- public internal::AsyncWriterInterface<W> {
- public:
- /// Signal the client is done with the writes (half-close the client stream).
- /// Thread-safe with respect to \a AsyncReaderInterface::Read
- ///
- /// \param[in] tag The tag identifying the operation.
- virtual void WritesDone(void* tag) = 0;
-};
-
-namespace internal {
-template <class W>
-class ClientAsyncWriterFactory {
- public:
- /// Create a stream object.
- /// Start the RPC if \a start is set
- /// \a tag will be notified on \a cq when the call has been started (i.e.
- /// intitial metadata sent) and \a request has been written out.
- /// If \a start is not set, \a tag must be nullptr and the actual call
- /// must be initiated by StartCall
- /// Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- /// \a response will be filled in with the single expected response
- /// message from the server upon a successful call to the \a Finish
- /// method of this instance.
- template <class R>
- static ClientAsyncWriter<W>* Create(::grpc::ChannelInterface* channel,
- ::grpc::CompletionQueue* cq,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- R* response, bool start, void* tag) {
- ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
- return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientAsyncWriter<W>)))
- ClientAsyncWriter<W>(call, context, response, start, tag);
- }
-};
-} // namespace internal
-
-/// Async API on the client side for doing client-streaming RPCs,
-/// where the outgoing message stream going to the server contains
-/// messages of type \a W.
+class ClientAsyncWriterInterface
+ : public internal::ClientAsyncStreamingInterface,
+ public internal::AsyncWriterInterface<W> {
+ public:
+ /// Signal the client is done with the writes (half-close the client stream).
+ /// Thread-safe with respect to \a AsyncReaderInterface::Read
+ ///
+ /// \param[in] tag The tag identifying the operation.
+ virtual void WritesDone(void* tag) = 0;
+};
+
+namespace internal {
template <class W>
-class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncWriter));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall(void* tag) override {
- GPR_CODEGEN_ASSERT(!started_);
- started_ = true;
- StartCallInternal(tag);
- }
-
- /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method for
- /// semantics.
- ///
- /// Side effect:
- /// - upon receiving initial metadata from the server, the \a ClientContext
- /// associated with this call is updated, and the calling code can access
- /// the received metadata through the \a ClientContext.
- void ReadInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.RecvInitialMetadata(context_);
- call_.PerformOps(&meta_ops_);
- }
-
- void Write(const W& msg, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- if (options.is_last_message()) {
- options.set_buffer_hint();
- write_ops_.ClientSendClose();
- }
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void WritesDone(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- write_ops_.ClientSendClose();
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata received from the server.
- /// - attempts to fill in the \a response parameter passed to this class's
- /// constructor with the server's response message.
- void Finish(::grpc::Status* status, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- finish_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- finish_ops_.RecvInitialMetadata(context_);
- }
- finish_ops_.ClientRecvStatus(context_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class internal::ClientAsyncWriterFactory<W>;
- template <class R>
- ClientAsyncWriter(::grpc::internal::Call call, ::grpc::ClientContext* context,
- R* response, bool start, void* tag)
- : context_(context), call_(call), started_(start) {
- finish_ops_.RecvMessage(response);
- finish_ops_.AllowNoMessage();
- if (start) {
- StartCallInternal(tag);
- } else {
- GPR_CODEGEN_ASSERT(tag == nullptr);
- }
- }
-
- void StartCallInternal(void* tag) {
- write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- // if corked bit is set in context, we just keep the initial metadata
- // buffered up to coalesce with later message send. No op is performed.
- if (!context_->initial_metadata_corked_) {
- write_ops_.set_output_tag(tag);
- call_.PerformOps(&write_ops_);
- }
- }
-
- ::grpc::ClientContext* context_;
- ::grpc::internal::Call call_;
- bool started_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- write_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpGenericRecvMessage,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
-};
-
-/// Async client-side interface for bi-directional streaming,
-/// where the client-to-server message stream has messages of type \a W,
-/// and the server-to-client message stream has messages of type \a R.
-template <class W, class R>
-class ClientAsyncReaderWriterInterface
- : public internal::ClientAsyncStreamingInterface,
- public internal::AsyncWriterInterface<W>,
- public internal::AsyncReaderInterface<R> {
- public:
- /// Signal the client is done with the writes (half-close the client stream).
- /// Thread-safe with respect to \a AsyncReaderInterface::Read
- ///
- /// \param[in] tag The tag identifying the operation.
- virtual void WritesDone(void* tag) = 0;
-};
-
-namespace internal {
+class ClientAsyncWriterFactory {
+ public:
+ /// Create a stream object.
+ /// Start the RPC if \a start is set
+ /// \a tag will be notified on \a cq when the call has been started (i.e.
+ /// intitial metadata sent) and \a request has been written out.
+ /// If \a start is not set, \a tag must be nullptr and the actual call
+ /// must be initiated by StartCall
+ /// Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ /// \a response will be filled in with the single expected response
+ /// message from the server upon a successful call to the \a Finish
+ /// method of this instance.
+ template <class R>
+ static ClientAsyncWriter<W>* Create(::grpc::ChannelInterface* channel,
+ ::grpc::CompletionQueue* cq,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ R* response, bool start, void* tag) {
+ ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
+ return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientAsyncWriter<W>)))
+ ClientAsyncWriter<W>(call, context, response, start, tag);
+ }
+};
+} // namespace internal
+
+/// Async API on the client side for doing client-streaming RPCs,
+/// where the outgoing message stream going to the server contains
+/// messages of type \a W.
+template <class W>
+class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncWriter));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall(void* tag) override {
+ GPR_CODEGEN_ASSERT(!started_);
+ started_ = true;
+ StartCallInternal(tag);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method for
+ /// semantics.
+ ///
+ /// Side effect:
+ /// - upon receiving initial metadata from the server, the \a ClientContext
+ /// associated with this call is updated, and the calling code can access
+ /// the received metadata through the \a ClientContext.
+ void ReadInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.RecvInitialMetadata(context_);
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Write(const W& msg, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ write_ops_.ClientSendClose();
+ }
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void WritesDone(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ write_ops_.ClientSendClose();
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata received from the server.
+ /// - attempts to fill in the \a response parameter passed to this class's
+ /// constructor with the server's response message.
+ void Finish(::grpc::Status* status, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ finish_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ finish_ops_.RecvInitialMetadata(context_);
+ }
+ finish_ops_.ClientRecvStatus(context_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class internal::ClientAsyncWriterFactory<W>;
+ template <class R>
+ ClientAsyncWriter(::grpc::internal::Call call, ::grpc::ClientContext* context,
+ R* response, bool start, void* tag)
+ : context_(context), call_(call), started_(start) {
+ finish_ops_.RecvMessage(response);
+ finish_ops_.AllowNoMessage();
+ if (start) {
+ StartCallInternal(tag);
+ } else {
+ GPR_CODEGEN_ASSERT(tag == nullptr);
+ }
+ }
+
+ void StartCallInternal(void* tag) {
+ write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ // if corked bit is set in context, we just keep the initial metadata
+ // buffered up to coalesce with later message send. No op is performed.
+ if (!context_->initial_metadata_corked_) {
+ write_ops_.set_output_tag(tag);
+ call_.PerformOps(&write_ops_);
+ }
+ }
+
+ ::grpc::ClientContext* context_;
+ ::grpc::internal::Call call_;
+ bool started_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ write_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpGenericRecvMessage,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+};
+
+/// Async client-side interface for bi-directional streaming,
+/// where the client-to-server message stream has messages of type \a W,
+/// and the server-to-client message stream has messages of type \a R.
template <class W, class R>
-class ClientAsyncReaderWriterFactory {
- public:
- /// Create a stream object.
- /// Start the RPC request if \a start is set.
- /// \a tag will be notified on \a cq when the call has been started (i.e.
- /// intitial metadata sent). If \a start is not set, \a tag must be
- /// nullptr and the actual call must be initiated by StartCall
- /// Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- static ClientAsyncReaderWriter<W, R>* Create(
- ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq,
- const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
- bool start, void* tag) {
- ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
-
- return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientAsyncReaderWriter<W, R>)))
- ClientAsyncReaderWriter<W, R>(call, context, start, tag);
- }
-};
-} // namespace internal
-
-/// Async client-side interface for bi-directional streaming,
-/// where the outgoing message stream going to the server
-/// has messages of type \a W, and the incoming message stream coming
-/// from the server has messages of type \a R.
+class ClientAsyncReaderWriterInterface
+ : public internal::ClientAsyncStreamingInterface,
+ public internal::AsyncWriterInterface<W>,
+ public internal::AsyncReaderInterface<R> {
+ public:
+ /// Signal the client is done with the writes (half-close the client stream).
+ /// Thread-safe with respect to \a AsyncReaderInterface::Read
+ ///
+ /// \param[in] tag The tag identifying the operation.
+ virtual void WritesDone(void* tag) = 0;
+};
+
+namespace internal {
template <class W, class R>
-class ClientAsyncReaderWriter final
- : public ClientAsyncReaderWriterInterface<W, R> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReaderWriter));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall(void* tag) override {
- GPR_CODEGEN_ASSERT(!started_);
- started_ = true;
- StartCallInternal(tag);
- }
-
- /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method
- /// for semantics of this method.
- ///
- /// Side effect:
- /// - upon receiving initial metadata from the server, the \a ClientContext
- /// is updated with it, and then the receiving initial metadata can
- /// be accessed through this \a ClientContext.
- void ReadInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.RecvInitialMetadata(context_);
- call_.PerformOps(&meta_ops_);
- }
-
- void Read(R* msg, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- read_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- read_ops_.RecvInitialMetadata(context_);
- }
- read_ops_.RecvMessage(msg);
- call_.PerformOps(&read_ops_);
- }
-
- void Write(const W& msg, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- if (options.is_last_message()) {
- options.set_buffer_hint();
- write_ops_.ClientSendClose();
- }
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void WritesDone(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- write_ops_.set_output_tag(tag);
- write_ops_.ClientSendClose();
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
- /// Side effect
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata sent from the server.
- void Finish(::grpc::Status* status, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- finish_ops_.set_output_tag(tag);
- if (!context_->initial_metadata_received_) {
- finish_ops_.RecvInitialMetadata(context_);
- }
- finish_ops_.ClientRecvStatus(context_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class internal::ClientAsyncReaderWriterFactory<W, R>;
- ClientAsyncReaderWriter(::grpc::internal::Call call,
- ::grpc::ClientContext* context, bool start, void* tag)
- : context_(context), call_(call), started_(start) {
- if (start) {
- StartCallInternal(tag);
- } else {
- GPR_CODEGEN_ASSERT(tag == nullptr);
- }
- }
-
- void StartCallInternal(void* tag) {
- write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- // if corked bit is set in context, we just keep the initial metadata
- // buffered up to coalesce with later message send. No op is performed.
- if (!context_->initial_metadata_corked_) {
- write_ops_.set_output_tag(tag);
- call_.PerformOps(&write_ops_);
- }
- }
-
- ::grpc::ClientContext* context_;
- ::grpc::internal::Call call_;
- bool started_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>>
- read_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- write_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
-};
-
+class ClientAsyncReaderWriterFactory {
+ public:
+ /// Create a stream object.
+ /// Start the RPC request if \a start is set.
+ /// \a tag will be notified on \a cq when the call has been started (i.e.
+ /// intitial metadata sent). If \a start is not set, \a tag must be
+ /// nullptr and the actual call must be initiated by StartCall
+ /// Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ static ClientAsyncReaderWriter<W, R>* Create(
+ ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq,
+ const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
+ bool start, void* tag) {
+ ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
+
+ return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientAsyncReaderWriter<W, R>)))
+ ClientAsyncReaderWriter<W, R>(call, context, start, tag);
+ }
+};
+} // namespace internal
+
+/// Async client-side interface for bi-directional streaming,
+/// where the outgoing message stream going to the server
+/// has messages of type \a W, and the incoming message stream coming
+/// from the server has messages of type \a R.
template <class W, class R>
-class ServerAsyncReaderInterface
- : public ::grpc::internal::ServerAsyncStreamingInterface,
- public internal::AsyncReaderInterface<R> {
- public:
- /// Indicate that the stream is to be finished with a certain status code
- /// and also send out \a msg response to the client.
- /// Request notification for when the server has sent the response and the
- /// appropriate signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// It is appropriate to call this method when:
- /// * all messages from the client have been received (either known
- /// implictly, or explicitly because a previous
- /// \a AsyncReaderInterface::Read operation with a non-ok result,
- /// e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
- ///
- /// This operation will end when the server has finished sending out initial
- /// metadata (if not sent already), response message, and status, or if
- /// some failure occurred when trying to do so.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg or \a status, so it
- /// is safe to deallocate once Finish returns.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of this call.
- /// \param[in] msg To be sent to the client as the response for this call.
- virtual void Finish(const W& msg, const ::grpc::Status& status,
- void* tag) = 0;
-
- /// Indicate that the stream is to be finished with a certain
- /// non-OK status code.
- /// Request notification for when the server has sent the appropriate
- /// signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// This call is meant to end the call with some error, and can be called at
- /// any point that the server would like to "fail" the call (though note
- /// this shouldn't be called concurrently with any other "sending" call, like
- /// \a AsyncWriterInterface::Write).
- ///
- /// This operation will end when the server has finished sending out initial
- /// metadata (if not sent already), and status, or if some failure occurred
- /// when trying to do so.
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once FinishWithError returns.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of this call.
- /// - Note: \a status must have a non-OK code.
- virtual void FinishWithError(const ::grpc::Status& status, void* tag) = 0;
-};
-
-/// Async server-side API for doing client-streaming RPCs,
-/// where the incoming message stream from the client has messages of type \a R,
-/// and the single response message sent from the server is type \a W.
+class ClientAsyncReaderWriter final
+ : public ClientAsyncReaderWriterInterface<W, R> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReaderWriter));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall(void* tag) override {
+ GPR_CODEGEN_ASSERT(!started_);
+ started_ = true;
+ StartCallInternal(tag);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method
+ /// for semantics of this method.
+ ///
+ /// Side effect:
+ /// - upon receiving initial metadata from the server, the \a ClientContext
+ /// is updated with it, and then the receiving initial metadata can
+ /// be accessed through this \a ClientContext.
+ void ReadInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.RecvInitialMetadata(context_);
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Read(R* msg, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ read_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ read_ops_.RecvInitialMetadata(context_);
+ }
+ read_ops_.RecvMessage(msg);
+ call_.PerformOps(&read_ops_);
+ }
+
+ void Write(const W& msg, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ write_ops_.ClientSendClose();
+ }
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void WritesDone(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ write_ops_.set_output_tag(tag);
+ write_ops_.ClientSendClose();
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ClientAsyncStreamingInterface.Finish method for semantics.
+ /// Side effect
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata sent from the server.
+ void Finish(::grpc::Status* status, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ finish_ops_.set_output_tag(tag);
+ if (!context_->initial_metadata_received_) {
+ finish_ops_.RecvInitialMetadata(context_);
+ }
+ finish_ops_.ClientRecvStatus(context_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class internal::ClientAsyncReaderWriterFactory<W, R>;
+ ClientAsyncReaderWriter(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, bool start, void* tag)
+ : context_(context), call_(call), started_(start) {
+ if (start) {
+ StartCallInternal(tag);
+ } else {
+ GPR_CODEGEN_ASSERT(tag == nullptr);
+ }
+ }
+
+ void StartCallInternal(void* tag) {
+ write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ // if corked bit is set in context, we just keep the initial metadata
+ // buffered up to coalesce with later message send. No op is performed.
+ if (!context_->initial_metadata_corked_) {
+ write_ops_.set_output_tag(tag);
+ call_.PerformOps(&write_ops_);
+ }
+ }
+
+ ::grpc::ClientContext* context_;
+ ::grpc::internal::Call call_;
+ bool started_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>>
+ read_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ write_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+};
+
template <class W, class R>
-class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
- public:
- explicit ServerAsyncReader(::grpc::ServerContext* ctx)
- : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
- /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
- ///
- /// Implicit input parameter:
- /// - The initial metadata that will be sent to the client from this op will
- /// be taken from the \a ServerContext associated with the call.
- void SendInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- meta_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_.PerformOps(&meta_ops_);
- }
-
- void Read(R* msg, void* tag) override {
- read_ops_.set_output_tag(tag);
- read_ops_.RecvMessage(msg);
- call_.PerformOps(&read_ops_);
- }
-
- /// See the \a ServerAsyncReaderInterface.Read method for semantics
- ///
- /// Side effect:
- /// - also sends initial metadata if not alreay sent.
- /// - uses the \a ServerContext associated with this call to send possible
- /// initial and trailing metadata.
- ///
- /// Note: \a msg is not sent if \a status has a non-OK code.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once Finish returns.
- void Finish(const W& msg, const ::grpc::Status& status, void* tag) override {
- finish_ops_.set_output_tag(tag);
- if (!ctx_->sent_initial_metadata_) {
- finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- finish_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- // The response is dropped if the status is not OK.
- if (status.ok()) {
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_,
- finish_ops_.SendMessage(msg));
- } else {
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- }
- call_.PerformOps(&finish_ops_);
- }
-
- /// See the \a ServerAsyncReaderInterface.Read method for semantics
- ///
- /// Side effect:
- /// - also sends initial metadata if not alreay sent.
- /// - uses the \a ServerContext associated with this call to send possible
- /// initial and trailing metadata.
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once FinishWithError returns.
- void FinishWithError(const ::grpc::Status& status, void* tag) override {
- GPR_CODEGEN_ASSERT(!status.ok());
- finish_ops_.set_output_tag(tag);
- if (!ctx_->sent_initial_metadata_) {
- finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- finish_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
-
- ::grpc::internal::Call call_;
- ::grpc::ServerContext* ctx_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- finish_ops_;
-};
-
+class ServerAsyncReaderInterface
+ : public ::grpc::internal::ServerAsyncStreamingInterface,
+ public internal::AsyncReaderInterface<R> {
+ public:
+ /// Indicate that the stream is to be finished with a certain status code
+ /// and also send out \a msg response to the client.
+ /// Request notification for when the server has sent the response and the
+ /// appropriate signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// It is appropriate to call this method when:
+ /// * all messages from the client have been received (either known
+ /// implictly, or explicitly because a previous
+ /// \a AsyncReaderInterface::Read operation with a non-ok result,
+ /// e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
+ ///
+ /// This operation will end when the server has finished sending out initial
+ /// metadata (if not sent already), response message, and status, or if
+ /// some failure occurred when trying to do so.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg or \a status, so it
+ /// is safe to deallocate once Finish returns.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of this call.
+ /// \param[in] msg To be sent to the client as the response for this call.
+ virtual void Finish(const W& msg, const ::grpc::Status& status,
+ void* tag) = 0;
+
+ /// Indicate that the stream is to be finished with a certain
+ /// non-OK status code.
+ /// Request notification for when the server has sent the appropriate
+ /// signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// This call is meant to end the call with some error, and can be called at
+ /// any point that the server would like to "fail" the call (though note
+ /// this shouldn't be called concurrently with any other "sending" call, like
+ /// \a AsyncWriterInterface::Write).
+ ///
+ /// This operation will end when the server has finished sending out initial
+ /// metadata (if not sent already), and status, or if some failure occurred
+ /// when trying to do so.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once FinishWithError returns.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of this call.
+ /// - Note: \a status must have a non-OK code.
+ virtual void FinishWithError(const ::grpc::Status& status, void* tag) = 0;
+};
+
+/// Async server-side API for doing client-streaming RPCs,
+/// where the incoming message stream from the client has messages of type \a R,
+/// and the single response message sent from the server is type \a W.
+template <class W, class R>
+class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
+ public:
+ explicit ServerAsyncReader(::grpc::ServerContext* ctx)
+ : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+ /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - The initial metadata that will be sent to the client from this op will
+ /// be taken from the \a ServerContext associated with the call.
+ void SendInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ meta_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Read(R* msg, void* tag) override {
+ read_ops_.set_output_tag(tag);
+ read_ops_.RecvMessage(msg);
+ call_.PerformOps(&read_ops_);
+ }
+
+ /// See the \a ServerAsyncReaderInterface.Read method for semantics
+ ///
+ /// Side effect:
+ /// - also sends initial metadata if not alreay sent.
+ /// - uses the \a ServerContext associated with this call to send possible
+ /// initial and trailing metadata.
+ ///
+ /// Note: \a msg is not sent if \a status has a non-OK code.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once Finish returns.
+ void Finish(const W& msg, const ::grpc::Status& status, void* tag) override {
+ finish_ops_.set_output_tag(tag);
+ if (!ctx_->sent_initial_metadata_) {
+ finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ finish_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ // The response is dropped if the status is not OK.
+ if (status.ok()) {
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_,
+ finish_ops_.SendMessage(msg));
+ } else {
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ }
+ call_.PerformOps(&finish_ops_);
+ }
+
+ /// See the \a ServerAsyncReaderInterface.Read method for semantics
+ ///
+ /// Side effect:
+ /// - also sends initial metadata if not alreay sent.
+ /// - uses the \a ServerContext associated with this call to send possible
+ /// initial and trailing metadata.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once FinishWithError returns.
+ void FinishWithError(const ::grpc::Status& status, void* tag) override {
+ GPR_CODEGEN_ASSERT(!status.ok());
+ finish_ops_.set_output_tag(tag);
+ if (!ctx_->sent_initial_metadata_) {
+ finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ finish_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
+
+ ::grpc::internal::Call call_;
+ ::grpc::ServerContext* ctx_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ finish_ops_;
+};
+
template <class W>
-class ServerAsyncWriterInterface
- : public ::grpc::internal::ServerAsyncStreamingInterface,
- public internal::AsyncWriterInterface<W> {
- public:
- /// Indicate that the stream is to be finished with a certain status code.
- /// Request notification for when the server has sent the appropriate
- /// signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// It is appropriate to call this method when either:
- /// * all messages from the client have been received (either known
- /// implictly, or explicitly because a previous \a
- /// AsyncReaderInterface::Read operation with a non-ok
- /// result (e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false'.
- /// * it is desired to end the call early with some non-OK status code.
- ///
- /// This operation will end when the server has finished sending out initial
- /// metadata (if not sent already), response message, and status, or if
- /// some failure occurred when trying to do so.
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once Finish returns.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of this call.
- virtual void Finish(const ::grpc::Status& status, void* tag) = 0;
-
- /// Request the writing of \a msg and coalesce it with trailing metadata which
- /// contains \a status, using WriteOptions options with
- /// identifying tag \a tag.
- ///
- /// WriteAndFinish is equivalent of performing WriteLast and Finish
- /// in a single step.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once WriteAndFinish returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] options The WriteOptions to be used to write this message.
- /// \param[in] status The Status that server returns to client.
- /// \param[in] tag The tag identifying the operation.
- virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
- const ::grpc::Status& status, void* tag) = 0;
-};
-
-/// Async server-side API for doing server streaming RPCs,
-/// where the outgoing message stream from the server has messages of type \a W.
+class ServerAsyncWriterInterface
+ : public ::grpc::internal::ServerAsyncStreamingInterface,
+ public internal::AsyncWriterInterface<W> {
+ public:
+ /// Indicate that the stream is to be finished with a certain status code.
+ /// Request notification for when the server has sent the appropriate
+ /// signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// It is appropriate to call this method when either:
+ /// * all messages from the client have been received (either known
+ /// implictly, or explicitly because a previous \a
+ /// AsyncReaderInterface::Read operation with a non-ok
+ /// result (e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false'.
+ /// * it is desired to end the call early with some non-OK status code.
+ ///
+ /// This operation will end when the server has finished sending out initial
+ /// metadata (if not sent already), response message, and status, or if
+ /// some failure occurred when trying to do so.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once Finish returns.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of this call.
+ virtual void Finish(const ::grpc::Status& status, void* tag) = 0;
+
+ /// Request the writing of \a msg and coalesce it with trailing metadata which
+ /// contains \a status, using WriteOptions options with
+ /// identifying tag \a tag.
+ ///
+ /// WriteAndFinish is equivalent of performing WriteLast and Finish
+ /// in a single step.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once WriteAndFinish returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ /// \param[in] status The Status that server returns to client.
+ /// \param[in] tag The tag identifying the operation.
+ virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
+ const ::grpc::Status& status, void* tag) = 0;
+};
+
+/// Async server-side API for doing server streaming RPCs,
+/// where the outgoing message stream from the server has messages of type \a W.
template <class W>
-class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
- public:
- explicit ServerAsyncWriter(::grpc::ServerContext* ctx)
- : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
- /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
- ///
- /// Implicit input parameter:
- /// - The initial metadata that will be sent to the client from this op will
- /// be taken from the \a ServerContext associated with the call.
- ///
- /// \param[in] tag Tag identifying this request.
- void SendInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- meta_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_.PerformOps(&meta_ops_);
- }
-
- void Write(const W& msg, void* tag) override {
- write_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&write_ops_);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
- write_ops_.set_output_tag(tag);
- if (options.is_last_message()) {
- options.set_buffer_hint();
- }
-
- EnsureInitialMetadataSent(&write_ops_);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ServerAsyncWriterInterface.WriteAndFinish method for semantics.
- ///
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call is used
- /// for sending trailing (and initial) metadata to the client.
- ///
- /// Note: \a status must have an OK code.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once WriteAndFinish returns.
- void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
- const ::grpc::Status& status, void* tag) override {
- write_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&write_ops_);
- options.set_buffer_hint();
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ServerAsyncWriterInterface.Finish method for semantics.
- ///
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call is used for sending
- /// trailing (and initial if not already sent) metadata to the client.
- ///
- /// Note: there are no restrictions are the code of
- /// \a status,it may be non-OK
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once Finish returns.
- void Finish(const ::grpc::Status& status, void* tag) override {
- finish_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&finish_ops_);
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
-
- template <class T>
- void EnsureInitialMetadataSent(T* ops) {
- if (!ctx_->sent_initial_metadata_) {
- ops->SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops->set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- }
-
- ::grpc::internal::Call call_;
- ::grpc::ServerContext* ctx_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- write_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- finish_ops_;
-};
-
-/// Server-side interface for asynchronous bi-directional streaming.
+class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
+ public:
+ explicit ServerAsyncWriter(::grpc::ServerContext* ctx)
+ : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+ /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - The initial metadata that will be sent to the client from this op will
+ /// be taken from the \a ServerContext associated with the call.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ void SendInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ meta_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Write(const W& msg, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&write_ops_);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ }
+
+ EnsureInitialMetadataSent(&write_ops_);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ServerAsyncWriterInterface.WriteAndFinish method for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call is used
+ /// for sending trailing (and initial) metadata to the client.
+ ///
+ /// Note: \a status must have an OK code.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once WriteAndFinish returns.
+ void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
+ const ::grpc::Status& status, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&write_ops_);
+ options.set_buffer_hint();
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ServerAsyncWriterInterface.Finish method for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call is used for sending
+ /// trailing (and initial if not already sent) metadata to the client.
+ ///
+ /// Note: there are no restrictions are the code of
+ /// \a status,it may be non-OK
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once Finish returns.
+ void Finish(const ::grpc::Status& status, void* tag) override {
+ finish_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&finish_ops_);
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
+
+ template <class T>
+ void EnsureInitialMetadataSent(T* ops) {
+ if (!ctx_->sent_initial_metadata_) {
+ ops->SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops->set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ }
+
+ ::grpc::internal::Call call_;
+ ::grpc::ServerContext* ctx_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ write_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ finish_ops_;
+};
+
+/// Server-side interface for asynchronous bi-directional streaming.
template <class W, class R>
-class ServerAsyncReaderWriterInterface
- : public ::grpc::internal::ServerAsyncStreamingInterface,
- public internal::AsyncWriterInterface<W>,
- public internal::AsyncReaderInterface<R> {
- public:
- /// Indicate that the stream is to be finished with a certain status code.
- /// Request notification for when the server has sent the appropriate
- /// signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// It is appropriate to call this method when either:
- /// * all messages from the client have been received (either known
- /// implictly, or explicitly because a previous \a
- /// AsyncReaderInterface::Read operation
- /// with a non-ok result (e.g., cq->Next(&read_tag, &ok) filled in 'ok'
- /// with 'false'.
- /// * it is desired to end the call early with some non-OK status code.
- ///
- /// This operation will end when the server has finished sending out initial
- /// metadata (if not sent already), response message, and status, or if some
- /// failure occurred when trying to do so.
- ///
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once Finish returns.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of this call.
- virtual void Finish(const ::grpc::Status& status, void* tag) = 0;
-
- /// Request the writing of \a msg and coalesce it with trailing metadata which
- /// contains \a status, using WriteOptions options with
- /// identifying tag \a tag.
- ///
- /// WriteAndFinish is equivalent of performing WriteLast and Finish in a
- /// single step.
- ///
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once WriteAndFinish returns.
- ///
- /// \param[in] msg The message to be written.
- /// \param[in] options The WriteOptions to be used to write this message.
- /// \param[in] status The Status that server returns to client.
- /// \param[in] tag The tag identifying the operation.
- virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
- const ::grpc::Status& status, void* tag) = 0;
-};
-
-/// Async server-side API for doing bidirectional streaming RPCs,
-/// where the incoming message stream coming from the client has messages of
-/// type \a R, and the outgoing message stream coming from the server has
-/// messages of type \a W.
+class ServerAsyncReaderWriterInterface
+ : public ::grpc::internal::ServerAsyncStreamingInterface,
+ public internal::AsyncWriterInterface<W>,
+ public internal::AsyncReaderInterface<R> {
+ public:
+ /// Indicate that the stream is to be finished with a certain status code.
+ /// Request notification for when the server has sent the appropriate
+ /// signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// It is appropriate to call this method when either:
+ /// * all messages from the client have been received (either known
+ /// implictly, or explicitly because a previous \a
+ /// AsyncReaderInterface::Read operation
+ /// with a non-ok result (e.g., cq->Next(&read_tag, &ok) filled in 'ok'
+ /// with 'false'.
+ /// * it is desired to end the call early with some non-OK status code.
+ ///
+ /// This operation will end when the server has finished sending out initial
+ /// metadata (if not sent already), response message, and status, or if some
+ /// failure occurred when trying to do so.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once Finish returns.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of this call.
+ virtual void Finish(const ::grpc::Status& status, void* tag) = 0;
+
+ /// Request the writing of \a msg and coalesce it with trailing metadata which
+ /// contains \a status, using WriteOptions options with
+ /// identifying tag \a tag.
+ ///
+ /// WriteAndFinish is equivalent of performing WriteLast and Finish in a
+ /// single step.
+ ///
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once WriteAndFinish returns.
+ ///
+ /// \param[in] msg The message to be written.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ /// \param[in] status The Status that server returns to client.
+ /// \param[in] tag The tag identifying the operation.
+ virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
+ const ::grpc::Status& status, void* tag) = 0;
+};
+
+/// Async server-side API for doing bidirectional streaming RPCs,
+/// where the incoming message stream coming from the client has messages of
+/// type \a R, and the outgoing message stream coming from the server has
+/// messages of type \a W.
template <class W, class R>
-class ServerAsyncReaderWriter final
- : public ServerAsyncReaderWriterInterface<W, R> {
- public:
- explicit ServerAsyncReaderWriter(::grpc::ServerContext* ctx)
- : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
- /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
- ///
- /// Implicit input parameter:
- /// - The initial metadata that will be sent to the client from this op will
- /// be taken from the \a ServerContext associated with the call.
- ///
- /// \param[in] tag Tag identifying this request.
- void SendInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- meta_ops_.set_output_tag(tag);
- meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- meta_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_.PerformOps(&meta_ops_);
- }
-
- void Read(R* msg, void* tag) override {
- read_ops_.set_output_tag(tag);
- read_ops_.RecvMessage(msg);
- call_.PerformOps(&read_ops_);
- }
-
- void Write(const W& msg, void* tag) override {
- write_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&write_ops_);
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
- call_.PerformOps(&write_ops_);
- }
-
- void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
- write_ops_.set_output_tag(tag);
- if (options.is_last_message()) {
- options.set_buffer_hint();
- }
- EnsureInitialMetadataSent(&write_ops_);
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ServerAsyncReaderWriterInterface.WriteAndFinish
- /// method for semantics.
- ///
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call is used
- /// for sending trailing (and initial) metadata to the client.
- ///
- /// Note: \a status must have an OK code.
- //
- /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
- /// is safe to deallocate once WriteAndFinish returns.
- void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
- const ::grpc::Status& status, void* tag) override {
- write_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&write_ops_);
- options.set_buffer_hint();
- GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
- write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&write_ops_);
- }
-
- /// See the \a ServerAsyncReaderWriterInterface.Finish method for semantics.
- ///
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call is used for sending
- /// trailing (and initial if not already sent) metadata to the client.
- ///
- /// Note: there are no restrictions are the code of \a status,
- /// it may be non-OK
- //
- /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
- /// to deallocate once Finish returns.
- void Finish(const ::grpc::Status& status, void* tag) override {
- finish_ops_.set_output_tag(tag);
- EnsureInitialMetadataSent(&finish_ops_);
-
- finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class ::grpc::Server;
-
- void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
-
- template <class T>
- void EnsureInitialMetadataSent(T* ops) {
- if (!ctx_->sent_initial_metadata_) {
- ops->SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops->set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- }
-
- ::grpc::internal::Call call_;
- ::grpc::ServerContext* ctx_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- meta_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- write_ops_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- finish_ops_;
-};
-
+class ServerAsyncReaderWriter final
+ : public ServerAsyncReaderWriterInterface<W, R> {
+ public:
+ explicit ServerAsyncReaderWriter(::grpc::ServerContext* ctx)
+ : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+ /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - The initial metadata that will be sent to the client from this op will
+ /// be taken from the \a ServerContext associated with the call.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ void SendInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ meta_ops_.set_output_tag(tag);
+ meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ meta_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_.PerformOps(&meta_ops_);
+ }
+
+ void Read(R* msg, void* tag) override {
+ read_ops_.set_output_tag(tag);
+ read_ops_.RecvMessage(msg);
+ call_.PerformOps(&read_ops_);
+ }
+
+ void Write(const W& msg, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&write_ops_);
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ }
+ EnsureInitialMetadataSent(&write_ops_);
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ServerAsyncReaderWriterInterface.WriteAndFinish
+ /// method for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call is used
+ /// for sending trailing (and initial) metadata to the client.
+ ///
+ /// Note: \a status must have an OK code.
+ //
+ /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
+ /// is safe to deallocate once WriteAndFinish returns.
+ void WriteAndFinish(const W& msg, ::grpc::WriteOptions options,
+ const ::grpc::Status& status, void* tag) override {
+ write_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&write_ops_);
+ options.set_buffer_hint();
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok());
+ write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&write_ops_);
+ }
+
+ /// See the \a ServerAsyncReaderWriterInterface.Finish method for semantics.
+ ///
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call is used for sending
+ /// trailing (and initial if not already sent) metadata to the client.
+ ///
+ /// Note: there are no restrictions are the code of \a status,
+ /// it may be non-OK
+ //
+ /// gRPC doesn't take ownership or a reference to \a status, so it is safe to
+ /// to deallocate once Finish returns.
+ void Finish(const ::grpc::Status& status, void* tag) override {
+ finish_ops_.set_output_tag(tag);
+ EnsureInitialMetadataSent(&finish_ops_);
+
+ finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class ::grpc::Server;
+
+ void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
+
+ template <class T>
+ void EnsureInitialMetadataSent(T* ops) {
+ if (!ctx_->sent_initial_metadata_) {
+ ops->SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops->set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ }
+
+ ::grpc::internal::Call call_;
+ ::grpc::ServerContext* ctx_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ meta_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ write_ops_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ finish_ops_;
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h
index 3deeda8c7f..0fccae0033 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h
@@ -19,296 +19,296 @@
#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
#define GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/client_context.h>
-#include <grpcpp/impl/codegen/server_context.h>
-#include <grpcpp/impl/codegen/service_type.h>
-#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/client_context.h>
+#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/service_type.h>
+#include <grpcpp/impl/codegen/status.h>
namespace grpc {
-/// An interface relevant for async client side unary RPCs (which send
-/// one request message to a server and receive one response message).
+/// An interface relevant for async client side unary RPCs (which send
+/// one request message to a server and receive one response message).
template <class R>
-class ClientAsyncResponseReaderInterface {
- public:
- virtual ~ClientAsyncResponseReaderInterface() {}
+class ClientAsyncResponseReaderInterface {
+ public:
+ virtual ~ClientAsyncResponseReaderInterface() {}
- /// Start the call that was set up by the constructor, but only if the
- /// constructor was invoked through the "Prepare" API which doesn't actually
- /// start the call
- virtual void StartCall() = 0;
+ /// Start the call that was set up by the constructor, but only if the
+ /// constructor was invoked through the "Prepare" API which doesn't actually
+ /// start the call
+ virtual void StartCall() = 0;
- /// Request notification of the reading of initial metadata. Completion
- /// will be notified by \a tag on the associated completion queue.
- /// This call is optional, but if it is used, it cannot be used concurrently
- /// with or after the \a Finish method.
- ///
- /// \param[in] tag Tag identifying this request.
- virtual void ReadInitialMetadata(void* tag) = 0;
-
- /// Request to receive the server's response \a msg and final \a status for
- /// the call, and to notify \a tag on this call's completion queue when
- /// finished.
- ///
- /// This function will return when either:
- /// - when the server's response message and status have been received.
- /// - when the server has returned a non-OK status (no message expected in
- /// this case).
- /// - when the call failed for some reason and the library generated a
- /// non-OK status.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[out] status To be updated with the operation status.
- /// \param[out] msg To be filled in with the server's response message.
- virtual void Finish(R* msg, ::grpc::Status* status, void* tag) = 0;
-};
+ /// Request notification of the reading of initial metadata. Completion
+ /// will be notified by \a tag on the associated completion queue.
+ /// This call is optional, but if it is used, it cannot be used concurrently
+ /// with or after the \a Finish method.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ virtual void ReadInitialMetadata(void* tag) = 0;
+ /// Request to receive the server's response \a msg and final \a status for
+ /// the call, and to notify \a tag on this call's completion queue when
+ /// finished.
+ ///
+ /// This function will return when either:
+ /// - when the server's response message and status have been received.
+ /// - when the server has returned a non-OK status (no message expected in
+ /// this case).
+ /// - when the call failed for some reason and the library generated a
+ /// non-OK status.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[out] status To be updated with the operation status.
+ /// \param[out] msg To be filled in with the server's response message.
+ virtual void Finish(R* msg, ::grpc::Status* status, void* tag) = 0;
+};
+
namespace internal {
-template <class R>
-class ClientAsyncResponseReaderFactory {
- public:
- /// Start a call and write the request out if \a start is set.
- /// \a tag will be notified on \a cq when the call has been started (i.e.
- /// intitial metadata sent) and \a request has been written out.
- /// If \a start is not set, the actual call must be initiated by StartCall
- /// Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- template <class W>
- static ClientAsyncResponseReader<R>* Create(
- ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq,
- const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
- const W& request, bool start) {
- ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
- return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientAsyncResponseReader<R>)))
- ClientAsyncResponseReader<R>(call, context, request, start);
- }
-};
-} // namespace internal
+template <class R>
+class ClientAsyncResponseReaderFactory {
+ public:
+ /// Start a call and write the request out if \a start is set.
+ /// \a tag will be notified on \a cq when the call has been started (i.e.
+ /// intitial metadata sent) and \a request has been written out.
+ /// If \a start is not set, the actual call must be initiated by StartCall
+ /// Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ template <class W>
+ static ClientAsyncResponseReader<R>* Create(
+ ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq,
+ const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context,
+ const W& request, bool start) {
+ ::grpc::internal::Call call = channel->CreateCall(method, context, cq);
+ return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientAsyncResponseReader<R>)))
+ ClientAsyncResponseReader<R>(call, context, request, start);
+ }
+};
+} // namespace internal
-/// Async API for client-side unary RPCs, where the message response
-/// received from the server is of type \a R.
+/// Async API for client-side unary RPCs, where the message response
+/// received from the server is of type \a R.
template <class R>
-class ClientAsyncResponseReader final
- : public ClientAsyncResponseReaderInterface<R> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncResponseReader));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- GPR_CODEGEN_ASSERT(!started_);
- started_ = true;
- StartCallInternal();
- }
-
- /// See \a ClientAsyncResponseReaderInterface::ReadInitialMetadata for
- /// semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata sent from the server.
- void ReadInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- single_buf.set_output_tag(tag);
- single_buf.RecvInitialMetadata(context_);
- call_.PerformOps(&single_buf);
- initial_metadata_read_ = true;
- }
-
- /// See \a ClientAysncResponseReaderInterface::Finish for semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible initial and trailing metadata sent from the server.
- void Finish(R* msg, ::grpc::Status* status, void* tag) override {
- GPR_CODEGEN_ASSERT(started_);
- if (initial_metadata_read_) {
- finish_buf.set_output_tag(tag);
- finish_buf.RecvMessage(msg);
- finish_buf.AllowNoMessage();
- finish_buf.ClientRecvStatus(context_, status);
- call_.PerformOps(&finish_buf);
- } else {
- single_buf.set_output_tag(tag);
- single_buf.RecvInitialMetadata(context_);
- single_buf.RecvMessage(msg);
- single_buf.AllowNoMessage();
- single_buf.ClientRecvStatus(context_, status);
- call_.PerformOps(&single_buf);
- }
- }
-
- private:
- friend class internal::ClientAsyncResponseReaderFactory<R>;
- ::grpc::ClientContext* const context_;
- ::grpc::internal::Call call_;
- bool started_;
- bool initial_metadata_read_ = false;
-
- template <class W>
- ClientAsyncResponseReader(::grpc::internal::Call call,
- ::grpc::ClientContext* context, const W& request,
- bool start)
- : context_(context), call_(call), started_(start) {
- // Bind the metadata at time of StartCallInternal but set up the rest here
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(single_buf.SendMessage(request).ok());
- single_buf.ClientSendClose();
- if (start) StartCallInternal();
- }
-
- void StartCallInternal() {
- single_buf.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- }
-
- // disable operator new
- static void* operator new(std::size_t size);
- static void* operator new(std::size_t /*size*/, void* p) { return p; }
+class ClientAsyncResponseReader final
+ : public ClientAsyncResponseReaderInterface<R> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncResponseReader));
+ }
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose,
- ::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>,
- ::grpc::internal::CallOpClientRecvStatus>
- single_buf;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_buf;
-};
-
-/// Async server-side API for handling unary calls, where the single
-/// response message sent to the client is of type \a W.
-template <class W>
-class ServerAsyncResponseWriter final
- : public ::grpc::internal::ServerAsyncStreamingInterface {
- public:
- explicit ServerAsyncResponseWriter(::grpc::ServerContext* ctx)
- : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
- /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
- ///
- /// Side effect:
- /// The initial metadata that will be sent to the client from this op will
- /// be taken from the \a ServerContext associated with the call.
- ///
- /// \param[in] tag Tag identifying this request.
- void SendInitialMetadata(void* tag) override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- meta_buf_.set_output_tag(tag);
- meta_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- meta_buf_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_.PerformOps(&meta_buf_);
- }
-
- /// Indicate that the stream is to be finished and request notification
- /// when the server has sent the appropriate signals to the client to
- /// end the call. Should not be used concurrently with other operations.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of the call.
- /// \param[in] msg Message to be sent to the client.
- ///
- /// Side effect:
- /// - also sends initial metadata if not already sent (using the
- /// \a ServerContext associated with this call).
- ///
- /// Note: if \a status has a non-OK code, then \a msg will not be sent,
- /// and the client will receive only the status with possible trailing
- /// metadata.
- void Finish(const W& msg, const ::grpc::Status& status, void* tag) {
- finish_buf_.set_output_tag(tag);
- finish_buf_.set_core_cq_tag(&finish_buf_);
- if (!ctx_->sent_initial_metadata_) {
- finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- finish_buf_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- // The response is dropped if the status is not OK.
- if (status.ok()) {
- finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_,
- finish_buf_.SendMessage(msg));
- } else {
- finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- }
- call_.PerformOps(&finish_buf_);
- }
-
- /// Indicate that the stream is to be finished with a non-OK status,
- /// and request notification for when the server has finished sending the
- /// appropriate signals to the client to end the call.
- /// Should not be used concurrently with other operations.
- ///
- /// \param[in] tag Tag identifying this request.
- /// \param[in] status To be sent to the client as the result of the call.
- /// - Note: \a status must have a non-OK code.
- ///
- /// Side effect:
- /// - also sends initial metadata if not already sent (using the
- /// \a ServerContext associated with this call).
- void FinishWithError(const ::grpc::Status& status, void* tag) {
- GPR_CODEGEN_ASSERT(!status.ok());
- finish_buf_.set_output_tag(tag);
- if (!ctx_->sent_initial_metadata_) {
- finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- finish_buf_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status);
- call_.PerformOps(&finish_buf_);
- }
-
- private:
- void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
-
- ::grpc::internal::Call call_;
- ::grpc::ServerContext* ctx_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- meta_buf_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- finish_buf_;
-};
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+ void StartCall() override {
+ GPR_CODEGEN_ASSERT(!started_);
+ started_ = true;
+ StartCallInternal();
+ }
+
+ /// See \a ClientAsyncResponseReaderInterface::ReadInitialMetadata for
+ /// semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata sent from the server.
+ void ReadInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ single_buf.set_output_tag(tag);
+ single_buf.RecvInitialMetadata(context_);
+ call_.PerformOps(&single_buf);
+ initial_metadata_read_ = true;
+ }
+
+ /// See \a ClientAysncResponseReaderInterface::Finish for semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible initial and trailing metadata sent from the server.
+ void Finish(R* msg, ::grpc::Status* status, void* tag) override {
+ GPR_CODEGEN_ASSERT(started_);
+ if (initial_metadata_read_) {
+ finish_buf.set_output_tag(tag);
+ finish_buf.RecvMessage(msg);
+ finish_buf.AllowNoMessage();
+ finish_buf.ClientRecvStatus(context_, status);
+ call_.PerformOps(&finish_buf);
+ } else {
+ single_buf.set_output_tag(tag);
+ single_buf.RecvInitialMetadata(context_);
+ single_buf.RecvMessage(msg);
+ single_buf.AllowNoMessage();
+ single_buf.ClientRecvStatus(context_, status);
+ call_.PerformOps(&single_buf);
+ }
+ }
+
+ private:
+ friend class internal::ClientAsyncResponseReaderFactory<R>;
+ ::grpc::ClientContext* const context_;
+ ::grpc::internal::Call call_;
+ bool started_;
+ bool initial_metadata_read_ = false;
+
+ template <class W>
+ ClientAsyncResponseReader(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, const W& request,
+ bool start)
+ : context_(context), call_(call), started_(start) {
+ // Bind the metadata at time of StartCallInternal but set up the rest here
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(single_buf.SendMessage(request).ok());
+ single_buf.ClientSendClose();
+ if (start) StartCallInternal();
+ }
+
+ void StartCallInternal() {
+ single_buf.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ }
+
+ // disable operator new
+ static void* operator new(std::size_t size);
+ static void* operator new(std::size_t /*size*/, void* p) { return p; }
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose,
+ ::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>,
+ ::grpc::internal::CallOpClientRecvStatus>
+ single_buf;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_buf;
+};
+
+/// Async server-side API for handling unary calls, where the single
+/// response message sent to the client is of type \a W.
+template <class W>
+class ServerAsyncResponseWriter final
+ : public ::grpc::internal::ServerAsyncStreamingInterface {
+ public:
+ explicit ServerAsyncResponseWriter(::grpc::ServerContext* ctx)
+ : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+ /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics.
+ ///
+ /// Side effect:
+ /// The initial metadata that will be sent to the client from this op will
+ /// be taken from the \a ServerContext associated with the call.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ void SendInitialMetadata(void* tag) override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ meta_buf_.set_output_tag(tag);
+ meta_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ meta_buf_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_.PerformOps(&meta_buf_);
+ }
+
+ /// Indicate that the stream is to be finished and request notification
+ /// when the server has sent the appropriate signals to the client to
+ /// end the call. Should not be used concurrently with other operations.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of the call.
+ /// \param[in] msg Message to be sent to the client.
+ ///
+ /// Side effect:
+ /// - also sends initial metadata if not already sent (using the
+ /// \a ServerContext associated with this call).
+ ///
+ /// Note: if \a status has a non-OK code, then \a msg will not be sent,
+ /// and the client will receive only the status with possible trailing
+ /// metadata.
+ void Finish(const W& msg, const ::grpc::Status& status, void* tag) {
+ finish_buf_.set_output_tag(tag);
+ finish_buf_.set_core_cq_tag(&finish_buf_);
+ if (!ctx_->sent_initial_metadata_) {
+ finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ finish_buf_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ // The response is dropped if the status is not OK.
+ if (status.ok()) {
+ finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_,
+ finish_buf_.SendMessage(msg));
+ } else {
+ finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ }
+ call_.PerformOps(&finish_buf_);
+ }
+
+ /// Indicate that the stream is to be finished with a non-OK status,
+ /// and request notification for when the server has finished sending the
+ /// appropriate signals to the client to end the call.
+ /// Should not be used concurrently with other operations.
+ ///
+ /// \param[in] tag Tag identifying this request.
+ /// \param[in] status To be sent to the client as the result of the call.
+ /// - Note: \a status must have a non-OK code.
+ ///
+ /// Side effect:
+ /// - also sends initial metadata if not already sent (using the
+ /// \a ServerContext associated with this call).
+ void FinishWithError(const ::grpc::Status& status, void* tag) {
+ GPR_CODEGEN_ASSERT(!status.ok());
+ finish_buf_.set_output_tag(tag);
+ if (!ctx_->sent_initial_metadata_) {
+ finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ finish_buf_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status);
+ call_.PerformOps(&finish_buf_);
+ }
+
+ private:
+ void BindCall(::grpc::internal::Call* call) override { call_ = *call; }
+
+ ::grpc::internal::Call call_;
+ ::grpc::ServerContext* ctx_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ meta_buf_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ finish_buf_;
+};
+
} // namespace grpc
-namespace std {
-template <class R>
-class default_delete<::grpc::ClientAsyncResponseReader<R>> {
- public:
- void operator()(void* /*p*/) {}
-};
-template <class R>
-class default_delete<::grpc::ClientAsyncResponseReaderInterface<R>> {
- public:
- void operator()(void* /*p*/) {}
-};
-} // namespace std
-
+namespace std {
+template <class R>
+class default_delete<::grpc::ClientAsyncResponseReader<R>> {
+ public:
+ void operator()(void* /*p*/) {}
+};
+template <class R>
+class default_delete<::grpc::ClientAsyncResponseReaderInterface<R>> {
+ public:
+ void operator()(void* /*p*/) {}
+};
+} // namespace std
+
#endif // GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h
index 6e64ec9981..59f1d71759 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h
@@ -29,12 +29,12 @@
#include <vector>
-namespace grpc {
-
-class ServerInterface;
-class ByteBuffer;
-class ServerInterface;
-
+namespace grpc {
+
+class ServerInterface;
+class ByteBuffer;
+class ServerInterface;
+
namespace internal {
template <class RequestType, class ResponseType>
class CallbackUnaryHandler;
@@ -163,15 +163,15 @@ class ByteBuffer final {
friend class internal::CallOpRecvMessage;
friend class internal::CallOpGenericRecvMessage;
template <class ServiceType, class RequestType, class ResponseType>
- friend class internal::RpcMethodHandler;
+ friend class internal::RpcMethodHandler;
template <class ServiceType, class RequestType, class ResponseType>
- friend class internal::ServerStreamingHandler;
+ friend class internal::ServerStreamingHandler;
template <class RequestType, class ResponseType>
- friend class internal::CallbackUnaryHandler;
+ friend class internal::CallbackUnaryHandler;
template <class RequestType, class ResponseType>
- friend class internal::CallbackServerStreamingHandler;
+ friend class internal::CallbackServerStreamingHandler;
template <StatusCode code>
- friend class internal::ErrorMethodHandler;
+ friend class internal::ErrorMethodHandler;
template <class R>
friend class internal::DeserializeFuncType;
friend class ProtoBufferReader;
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h
index b229286215..bddb179401 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h
@@ -21,7 +21,7 @@
#include <grpc/impl/codegen/grpc_types.h>
#include <grpcpp/impl/codegen/call_hook.h>
-namespace grpc {
+namespace grpc {
class CompletionQueue;
namespace experimental {
class ClientRpcInfo;
@@ -40,13 +40,13 @@ class Call final {
call_(nullptr),
max_receive_message_size_(-1) {}
/** call is owned by the caller */
- Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq)
+ Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq)
: call_hook_(call_hook),
cq_(cq),
call_(call),
max_receive_message_size_(-1) {}
- Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq,
+ Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq,
experimental::ClientRpcInfo* rpc_info)
: call_hook_(call_hook),
cq_(cq),
@@ -54,7 +54,7 @@ class Call final {
max_receive_message_size_(-1),
client_rpc_info_(rpc_info) {}
- Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq,
+ Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq,
int max_receive_message_size, experimental::ServerRpcInfo* rpc_info)
: call_hook_(call_hook),
cq_(cq),
@@ -67,7 +67,7 @@ class Call final {
}
grpc_call* call() const { return call_; }
- ::grpc::CompletionQueue* cq() const { return cq_; }
+ ::grpc::CompletionQueue* cq() const { return cq_; }
int max_receive_message_size() const { return max_receive_message_size_; }
@@ -81,7 +81,7 @@ class Call final {
private:
CallHook* call_hook_;
- ::grpc::CompletionQueue* cq_;
+ ::grpc::CompletionQueue* cq_;
grpc_call* call_;
int max_receive_message_size_;
experimental::ClientRpcInfo* client_rpc_info_ = nullptr;
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h
index 379333164a..f47b69c61b 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h
@@ -33,8 +33,8 @@
#include <grpcpp/impl/codegen/call.h>
#include <grpcpp/impl/codegen/call_hook.h>
#include <grpcpp/impl/codegen/call_op_set_interface.h>
-#include <grpcpp/impl/codegen/client_context.h>
-#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/impl/codegen/client_context.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
#include <grpcpp/impl/codegen/completion_queue_tag.h>
#include <grpcpp/impl/codegen/config.h>
#include <grpcpp/impl/codegen/core_codegen_interface.h>
@@ -55,8 +55,8 @@ class CallHook;
// TODO(yangg) if the map is changed before we send, the pointers will be a
// mess. Make sure it does not happen.
inline grpc_metadata* FillMetadataArray(
- const std::multimap<TString, TString>& metadata,
- size_t* metadata_count, const TString& optional_error_details) {
+ const std::multimap<TString, TString>& metadata,
+ size_t* metadata_count, const TString& optional_error_details) {
*metadata_count = metadata.size() + (optional_error_details.empty() ? 0 : 1);
if (*metadata_count == 0) {
return nullptr;
@@ -202,10 +202,10 @@ class WriteOptions {
namespace internal {
-/// Default argument for CallOpSet. The Unused parameter is unused by
-/// the class, but can be used for generating multiple names for the
-/// same thing.
-template <int Unused>
+/// Default argument for CallOpSet. The Unused parameter is unused by
+/// the class, but can be used for generating multiple names for the
+/// same thing.
+template <int Unused>
class CallNoOp {
protected:
void AddOp(grpc_op* /*ops*/, size_t* /*nops*/) {}
@@ -224,7 +224,7 @@ class CallOpSendInitialMetadata {
maybe_compression_level_.is_set = false;
}
- void SendInitialMetadata(std::multimap<TString, TString>* metadata,
+ void SendInitialMetadata(std::multimap<TString, TString>* metadata,
uint32_t flags) {
maybe_compression_level_.is_set = false;
send_ = true;
@@ -280,7 +280,7 @@ class CallOpSendInitialMetadata {
bool send_;
uint32_t flags_;
size_t initial_metadata_count_;
- std::multimap<TString, TString>* metadata_map_;
+ std::multimap<TString, TString>* metadata_map_;
grpc_metadata* initial_metadata_;
struct {
bool is_set;
@@ -431,7 +431,7 @@ class CallOpRecvMessage {
// Do not change status if no message is received.
void AllowNoMessage() { allow_not_getting_message_ = true; }
- bool got_message = false;
+ bool got_message = false;
protected:
void AddOp(grpc_op* ops, size_t* nops) {
@@ -444,7 +444,7 @@ class CallOpRecvMessage {
}
void FinishOp(bool* status) {
- if (message_ == nullptr) return;
+ if (message_ == nullptr) return;
if (recv_buf_.Valid()) {
if (*status) {
got_message = *status =
@@ -455,24 +455,24 @@ class CallOpRecvMessage {
got_message = false;
recv_buf_.Clear();
}
- } else if (hijacked_) {
- if (hijacked_recv_message_failed_) {
- FinishOpRecvMessageFailureHandler(status);
- } else {
- // The op was hijacked and it was successful. There is no further action
- // to be performed since the message is already in its non-serialized
- // form.
- }
+ } else if (hijacked_) {
+ if (hijacked_recv_message_failed_) {
+ FinishOpRecvMessageFailureHandler(status);
+ } else {
+ // The op was hijacked and it was successful. There is no further action
+ // to be performed since the message is already in its non-serialized
+ // form.
+ }
} else {
- FinishOpRecvMessageFailureHandler(status);
+ FinishOpRecvMessageFailureHandler(status);
}
}
void SetInterceptionHookPoint(
InterceptorBatchMethodsImpl* interceptor_methods) {
if (message_ == nullptr) return;
- interceptor_methods->SetRecvMessage(message_,
- &hijacked_recv_message_failed_);
+ interceptor_methods->SetRecvMessage(message_,
+ &hijacked_recv_message_failed_);
}
void SetFinishInterceptionHookPoint(
@@ -491,19 +491,19 @@ class CallOpRecvMessage {
}
private:
- // Sets got_message and \a status for a failed recv message op
- void FinishOpRecvMessageFailureHandler(bool* status) {
- got_message = false;
- if (!allow_not_getting_message_) {
- *status = false;
- }
- }
-
- R* message_ = nullptr;
+ // Sets got_message and \a status for a failed recv message op
+ void FinishOpRecvMessageFailureHandler(bool* status) {
+ got_message = false;
+ if (!allow_not_getting_message_) {
+ *status = false;
+ }
+ }
+
+ R* message_ = nullptr;
ByteBuffer recv_buf_;
- bool allow_not_getting_message_ = false;
+ bool allow_not_getting_message_ = false;
bool hijacked_ = false;
- bool hijacked_recv_message_failed_ = false;
+ bool hijacked_recv_message_failed_ = false;
};
class DeserializeFunc {
@@ -540,7 +540,7 @@ class CallOpGenericRecvMessage {
// Do not change status if no message is received.
void AllowNoMessage() { allow_not_getting_message_ = true; }
- bool got_message = false;
+ bool got_message = false;
protected:
void AddOp(grpc_op* ops, size_t* nops) {
@@ -553,7 +553,7 @@ class CallOpGenericRecvMessage {
}
void FinishOp(bool* status) {
- if (!deserialize_) return;
+ if (!deserialize_) return;
if (recv_buf_.Valid()) {
if (*status) {
got_message = true;
@@ -563,14 +563,14 @@ class CallOpGenericRecvMessage {
got_message = false;
recv_buf_.Clear();
}
- } else if (hijacked_) {
- if (hijacked_recv_message_failed_) {
- FinishOpRecvMessageFailureHandler(status);
- } else {
- // The op was hijacked and it was successful. There is no further action
- // to be performed since the message is already in its non-serialized
- // form.
- }
+ } else if (hijacked_) {
+ if (hijacked_recv_message_failed_) {
+ FinishOpRecvMessageFailureHandler(status);
+ } else {
+ // The op was hijacked and it was successful. There is no further action
+ // to be performed since the message is already in its non-serialized
+ // form.
+ }
} else {
got_message = false;
if (!allow_not_getting_message_) {
@@ -582,8 +582,8 @@ class CallOpGenericRecvMessage {
void SetInterceptionHookPoint(
InterceptorBatchMethodsImpl* interceptor_methods) {
if (!deserialize_) return;
- interceptor_methods->SetRecvMessage(message_,
- &hijacked_recv_message_failed_);
+ interceptor_methods->SetRecvMessage(message_,
+ &hijacked_recv_message_failed_);
}
void SetFinishInterceptionHookPoint(
@@ -603,20 +603,20 @@ class CallOpGenericRecvMessage {
}
private:
- // Sets got_message and \a status for a failed recv message op
- void FinishOpRecvMessageFailureHandler(bool* status) {
- got_message = false;
- if (!allow_not_getting_message_) {
- *status = false;
- }
- }
-
- void* message_ = nullptr;
+ // Sets got_message and \a status for a failed recv message op
+ void FinishOpRecvMessageFailureHandler(bool* status) {
+ got_message = false;
+ if (!allow_not_getting_message_) {
+ *status = false;
+ }
+ }
+
+ void* message_ = nullptr;
std::unique_ptr<DeserializeFunc> deserialize_;
ByteBuffer recv_buf_;
- bool allow_not_getting_message_ = false;
- bool hijacked_ = false;
- bool hijacked_recv_message_failed_ = false;
+ bool allow_not_getting_message_ = false;
+ bool hijacked_ = false;
+ bool hijacked_recv_message_failed_ = false;
};
class CallOpClientSendClose {
@@ -659,7 +659,7 @@ class CallOpServerSendStatus {
CallOpServerSendStatus() : send_status_available_(false) {}
void ServerSendStatus(
- std::multimap<TString, TString>* trailing_metadata,
+ std::multimap<TString, TString>* trailing_metadata,
const Status& status) {
send_error_details_ = status.error_details();
metadata_map_ = trailing_metadata;
@@ -713,10 +713,10 @@ class CallOpServerSendStatus {
bool hijacked_ = false;
bool send_status_available_;
grpc_status_code send_status_code_;
- TString send_error_details_;
- TString send_error_message_;
+ TString send_error_details_;
+ TString send_error_message_;
size_t trailing_metadata_count_;
- std::multimap<TString, TString>* metadata_map_;
+ std::multimap<TString, TString>* metadata_map_;
grpc_metadata* trailing_metadata_;
grpc_slice error_message_slice_;
};
@@ -725,7 +725,7 @@ class CallOpRecvInitialMetadata {
public:
CallOpRecvInitialMetadata() : metadata_map_(nullptr) {}
- void RecvInitialMetadata(::grpc::ClientContext* context) {
+ void RecvInitialMetadata(::grpc::ClientContext* context) {
context->initial_metadata_received_ = true;
metadata_map_ = &context->recv_initial_metadata_;
}
@@ -774,7 +774,7 @@ class CallOpClientRecvStatus {
CallOpClientRecvStatus()
: recv_status_(nullptr), debug_error_string_(nullptr) {}
- void ClientRecvStatus(::grpc::ClientContext* context, Status* status) {
+ void ClientRecvStatus(::grpc::ClientContext* context, Status* status) {
client_context_ = context;
metadata_map_ = &client_context_->trailing_metadata_;
recv_status_ = status;
@@ -803,9 +803,9 @@ class CallOpClientRecvStatus {
*recv_status_ =
Status(static_cast<StatusCode>(status_code_),
GRPC_SLICE_IS_EMPTY(error_message_)
- ? TString()
- : TString(reinterpret_cast<const char*>GRPC_SLICE_START_PTR(error_message_),
- reinterpret_cast<const char*>GRPC_SLICE_END_PTR(error_message_)),
+ ? TString()
+ : TString(reinterpret_cast<const char*>GRPC_SLICE_START_PTR(error_message_),
+ reinterpret_cast<const char*>GRPC_SLICE_END_PTR(error_message_)),
metadata_map_->GetBinaryErrorDetails());
if (debug_error_string_ != nullptr) {
client_context_->set_debug_error_string(debug_error_string_);
@@ -840,7 +840,7 @@ class CallOpClientRecvStatus {
private:
bool hijacked_ = false;
- ::grpc::ClientContext* client_context_;
+ ::grpc::ClientContext* client_context_;
MetadataMap* metadata_map_;
Status* recv_status_;
const char* debug_error_string_;
@@ -858,7 +858,7 @@ class CallOpSet;
/// the maximum count of ops we'll need in a set. We leverage the
/// empty base class optimization to slim this class (especially
/// when there are many unused slots used). To avoid duplicate base classes,
-/// the template parameter for CallNoOp is varied by argument position.
+/// the template parameter for CallNoOp is varied by argument position.
template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
class CallOpSet : public CallOpSetInterface,
public Op1,
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h
index ea0752d90e..0666ee056e 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h
@@ -28,7 +28,7 @@
#include <grpcpp/impl/codegen/status.h>
#include <grpcpp/impl/codegen/time.h>
-namespace grpc {
+namespace grpc {
template <class R>
class ClientReader;
template <class W>
@@ -56,8 +56,8 @@ class ClientCallbackUnaryFactory;
} // namespace internal
class ChannelInterface;
-class ClientContext;
-class CompletionQueue;
+class ClientContext;
+class CompletionQueue;
namespace experimental {
class DelegatingChannel;
@@ -84,7 +84,7 @@ class ChannelInterface {
/// deadline expires. \a GetState needs to called to get the current state.
template <typename T>
void NotifyOnStateChange(grpc_connectivity_state last_observed, T deadline,
- ::grpc::CompletionQueue* cq, void* tag) {
+ ::grpc::CompletionQueue* cq, void* tag) {
TimePoint<T> deadline_tp(deadline);
NotifyOnStateChangeImpl(last_observed, deadline_tp.raw_time(), cq, tag);
}
@@ -109,42 +109,42 @@ class ChannelInterface {
private:
template <class R>
- friend class ::grpc::ClientReader;
+ friend class ::grpc::ClientReader;
template <class W>
- friend class ::grpc::ClientWriter;
+ friend class ::grpc::ClientWriter;
template <class W, class R>
- friend class ::grpc::ClientReaderWriter;
+ friend class ::grpc::ClientReaderWriter;
template <class R>
- friend class ::grpc::internal::ClientAsyncReaderFactory;
+ friend class ::grpc::internal::ClientAsyncReaderFactory;
template <class W>
- friend class ::grpc::internal::ClientAsyncWriterFactory;
+ friend class ::grpc::internal::ClientAsyncWriterFactory;
template <class W, class R>
- friend class ::grpc::internal::ClientAsyncReaderWriterFactory;
+ friend class ::grpc::internal::ClientAsyncReaderWriterFactory;
template <class R>
- friend class ::grpc::internal::ClientAsyncResponseReaderFactory;
+ friend class ::grpc::internal::ClientAsyncResponseReaderFactory;
template <class W, class R>
- friend class ::grpc::internal::ClientCallbackReaderWriterFactory;
+ friend class ::grpc::internal::ClientCallbackReaderWriterFactory;
template <class R>
- friend class ::grpc::internal::ClientCallbackReaderFactory;
+ friend class ::grpc::internal::ClientCallbackReaderFactory;
template <class W>
- friend class ::grpc::internal::ClientCallbackWriterFactory;
- friend class ::grpc::internal::ClientCallbackUnaryFactory;
+ friend class ::grpc::internal::ClientCallbackWriterFactory;
+ friend class ::grpc::internal::ClientCallbackUnaryFactory;
template <class InputMessage, class OutputMessage>
friend class ::grpc::internal::BlockingUnaryCallImpl;
template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::CallbackUnaryCallImpl;
+ friend class ::grpc::internal::CallbackUnaryCallImpl;
friend class ::grpc::internal::RpcMethod;
friend class ::grpc::experimental::DelegatingChannel;
friend class ::grpc::internal::InterceptedChannel;
virtual internal::Call CreateCall(const internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- ::grpc::CompletionQueue* cq) = 0;
+ ::grpc::ClientContext* context,
+ ::grpc::CompletionQueue* cq) = 0;
virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) = 0;
virtual void* RegisterMethod(const char* method) = 0;
virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
- ::grpc::CompletionQueue* cq,
+ ::grpc::CompletionQueue* cq,
void* tag) = 0;
virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) = 0;
@@ -157,8 +157,8 @@ class ChannelInterface {
// method and adding a new pure method to an interface would be a breaking
// change (even though this is private and non-API)
virtual internal::Call CreateCallInternal(
- const internal::RpcMethod& /*method*/, ::grpc::ClientContext* /*context*/,
- ::grpc::CompletionQueue* /*cq*/, size_t /*interceptor_pos*/) {
+ const internal::RpcMethod& /*method*/, ::grpc::ClientContext* /*context*/,
+ ::grpc::CompletionQueue* /*cq*/, size_t /*interceptor_pos*/) {
return internal::Call();
}
@@ -170,7 +170,7 @@ class ChannelInterface {
// Returns nullptr (rather than being pure) since this is a post-1.0 method
// and adding a new pure method to an interface would be a breaking change
// (even though this is private and non-API)
- virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; }
+ virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; }
};
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h
index 90c817ceaa..6a08838423 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,1203 +17,1203 @@
#ifndef GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H
#define GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H
-#include <atomic>
-#include <functional>
+#include <atomic>
+#include <functional>
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/call_op_set.h>
-#include <grpcpp/impl/codegen/callback_common.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/call_op_set.h>
+#include <grpcpp/impl/codegen/callback_common.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/status.h>
namespace grpc {
-class Channel;
-class ClientContext;
-
-namespace internal {
-class RpcMethod;
-
-/// Perform a callback-based unary call
-/// TODO(vjpai): Combine as much as possible with the blocking unary call code
-template <class InputMessage, class OutputMessage>
-void CallbackUnaryCall(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- const InputMessage* request, OutputMessage* result,
- std::function<void(::grpc::Status)> on_completion) {
- CallbackUnaryCallImpl<InputMessage, OutputMessage> x(
- channel, method, context, request, result, on_completion);
-}
-
-template <class InputMessage, class OutputMessage>
-class CallbackUnaryCallImpl {
- public:
- CallbackUnaryCallImpl(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- const InputMessage* request, OutputMessage* result,
- std::function<void(::grpc::Status)> on_completion) {
- ::grpc::CompletionQueue* cq = channel->CallbackCQ();
- GPR_CODEGEN_ASSERT(cq != nullptr);
- grpc::internal::Call call(channel->CreateCall(method, context, cq));
-
- using FullCallOpSet = grpc::internal::CallOpSet<
- ::grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpRecvInitialMetadata,
- grpc::internal::CallOpRecvMessage<OutputMessage>,
- grpc::internal::CallOpClientSendClose,
- grpc::internal::CallOpClientRecvStatus>;
-
- struct OpSetAndTag {
- FullCallOpSet opset;
- grpc::internal::CallbackWithStatusTag tag;
- };
- const size_t alloc_sz = sizeof(OpSetAndTag);
- auto* const alloced = static_cast<OpSetAndTag*>(
- ::grpc::g_core_codegen_interface->grpc_call_arena_alloc(call.call(),
- alloc_sz));
- auto* ops = new (&alloced->opset) FullCallOpSet;
- auto* tag = new (&alloced->tag)
- grpc::internal::CallbackWithStatusTag(call.call(), on_completion, ops);
-
- // TODO(vjpai): Unify code with sync API as much as possible
- ::grpc::Status s = ops->SendMessagePtr(request);
- if (!s.ok()) {
- tag->force_run(s);
- return;
- }
- ops->SendInitialMetadata(&context->send_initial_metadata_,
- context->initial_metadata_flags());
- ops->RecvInitialMetadata(context);
- ops->RecvMessage(result);
- ops->AllowNoMessage();
- ops->ClientSendClose();
- ops->ClientRecvStatus(context, tag->status_ptr());
- ops->set_core_cq_tag(tag);
- call.PerformOps(ops);
- }
-};
-
-// Base class for public API classes.
-class ClientReactor {
- public:
- /// Called by the library when all operations associated with this RPC have
- /// completed and all Holds have been removed. OnDone provides the RPC status
- /// outcome for both successful and failed RPCs. If it is never called on an
- /// RPC, it indicates an application-level problem (like failure to remove a
- /// hold).
- ///
- /// \param[in] s The status outcome of this RPC
- virtual void OnDone(const ::grpc::Status& /*s*/) = 0;
-
- /// InternalScheduleOnDone is not part of the API and is not meant to be
- /// overridden. It is virtual to allow successful builds for certain bazel
- /// build users that only want to depend on gRPC codegen headers and not the
- /// full library (although this is not a generally-supported option). Although
- /// the virtual call is slower than a direct call, this function is
- /// heavyweight and the cost of the virtual call is not much in comparison.
- /// This function may be removed or devirtualized in the future.
- virtual void InternalScheduleOnDone(::grpc::Status s);
-};
-
-} // namespace internal
-
-// Forward declarations
-template <class Request, class Response>
-class ClientBidiReactor;
-template <class Response>
-class ClientReadReactor;
-template <class Request>
-class ClientWriteReactor;
-class ClientUnaryReactor;
-
-// NOTE: The streaming objects are not actually implemented in the public API.
-// These interfaces are provided for mocking only. Typical applications
-// will interact exclusively with the reactors that they define.
-template <class Request, class Response>
-class ClientCallbackReaderWriter {
- public:
- virtual ~ClientCallbackReaderWriter() {}
- virtual void StartCall() = 0;
- virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0;
- virtual void WritesDone() = 0;
- virtual void Read(Response* resp) = 0;
- virtual void AddHold(int holds) = 0;
- virtual void RemoveHold() = 0;
-
- protected:
- void BindReactor(ClientBidiReactor<Request, Response>* reactor) {
- reactor->BindStream(this);
- }
-};
-
-template <class Response>
-class ClientCallbackReader {
- public:
- virtual ~ClientCallbackReader() {}
- virtual void StartCall() = 0;
- virtual void Read(Response* resp) = 0;
- virtual void AddHold(int holds) = 0;
- virtual void RemoveHold() = 0;
-
- protected:
- void BindReactor(ClientReadReactor<Response>* reactor) {
- reactor->BindReader(this);
- }
-};
-
-template <class Request>
-class ClientCallbackWriter {
- public:
- virtual ~ClientCallbackWriter() {}
- virtual void StartCall() = 0;
- void Write(const Request* req) { Write(req, ::grpc::WriteOptions()); }
- virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0;
- void WriteLast(const Request* req, ::grpc::WriteOptions options) {
- Write(req, options.set_last_message());
- }
- virtual void WritesDone() = 0;
-
- virtual void AddHold(int holds) = 0;
- virtual void RemoveHold() = 0;
-
- protected:
- void BindReactor(ClientWriteReactor<Request>* reactor) {
- reactor->BindWriter(this);
- }
-};
-
-class ClientCallbackUnary {
- public:
- virtual ~ClientCallbackUnary() {}
- virtual void StartCall() = 0;
-
- protected:
- void BindReactor(ClientUnaryReactor* reactor);
-};
-
-// The following classes are the reactor interfaces that are to be implemented
-// by the user. They are passed in to the library as an argument to a call on a
-// stub (either a codegen-ed call or a generic call). The streaming RPC is
-// activated by calling StartCall, possibly after initiating StartRead,
-// StartWrite, or AddHold operations on the streaming object. Note that none of
-// the classes are pure; all reactions have a default empty reaction so that the
-// user class only needs to override those classes that it cares about.
-// The reactor must be passed to the stub invocation before any of the below
-// operations can be called.
-
-/// \a ClientBidiReactor is the interface for a bidirectional streaming RPC.
-template <class Request, class Response>
-class ClientBidiReactor : public internal::ClientReactor {
- public:
- virtual ~ClientBidiReactor() {}
-
- /// Activate the RPC and initiate any reads or writes that have been Start'ed
- /// before this call. All streaming RPCs issued by the client MUST have
- /// StartCall invoked on them (even if they are canceled) as this call is the
- /// activation of their lifecycle.
- void StartCall() { stream_->StartCall(); }
-
- /// Initiate a read operation (or post it for later initiation if StartCall
- /// has not yet been invoked).
- ///
- /// \param[out] resp Where to eventually store the read message. Valid when
- /// the library calls OnReadDone
- void StartRead(Response* resp) { stream_->Read(resp); }
-
- /// Initiate a write operation (or post it for later initiation if StartCall
- /// has not yet been invoked).
- ///
- /// \param[in] req The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- void StartWrite(const Request* req) {
- StartWrite(req, ::grpc::WriteOptions());
- }
-
- /// Initiate/post a write operation with specified options.
- ///
- /// \param[in] req The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- void StartWrite(const Request* req, ::grpc::WriteOptions options) {
- stream_->Write(req, std::move(options));
- }
-
- /// Initiate/post a write operation with specified options and an indication
- /// that this is the last write (like StartWrite and StartWritesDone, merged).
- /// Note that calling this means that no more calls to StartWrite,
- /// StartWriteLast, or StartWritesDone are allowed.
- ///
- /// \param[in] req The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- void StartWriteLast(const Request* req, ::grpc::WriteOptions options) {
- StartWrite(req, std::move(options.set_last_message()));
- }
-
- /// Indicate that the RPC will have no more write operations. This can only be
- /// issued once for a given RPC. This is not required or allowed if
- /// StartWriteLast is used since that already has the same implication.
- /// Note that calling this means that no more calls to StartWrite,
- /// StartWriteLast, or StartWritesDone are allowed.
- void StartWritesDone() { stream_->WritesDone(); }
-
- /// Holds are needed if (and only if) this stream has operations that take
- /// place on it after StartCall but from outside one of the reactions
- /// (OnReadDone, etc). This is _not_ a common use of the streaming API.
- ///
- /// Holds must be added before calling StartCall. If a stream still has a hold
- /// in place, its resources will not be destroyed even if the status has
- /// already come in from the wire and there are currently no active callbacks
- /// outstanding. Similarly, the stream will not call OnDone if there are still
- /// holds on it.
- ///
- /// For example, if a StartRead or StartWrite operation is going to be
- /// initiated from elsewhere in the application, the application should call
- /// AddHold or AddMultipleHolds before StartCall. If there is going to be,
- /// for example, a read-flow and a write-flow taking place outside the
- /// reactions, then call AddMultipleHolds(2) before StartCall. When the
- /// application knows that it won't issue any more read operations (such as
- /// when a read comes back as not ok), it should issue a RemoveHold(). It
- /// should also call RemoveHold() again after it does StartWriteLast or
- /// StartWritesDone that indicates that there will be no more write ops.
- /// The number of RemoveHold calls must match the total number of AddHold
- /// calls plus the number of holds added by AddMultipleHolds.
- /// The argument to AddMultipleHolds must be positive.
- void AddHold() { AddMultipleHolds(1); }
- void AddMultipleHolds(int holds) {
- GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
- stream_->AddHold(holds);
- }
- void RemoveHold() { stream_->RemoveHold(); }
-
- /// Notifies the application that all operations associated with this RPC
- /// have completed and all Holds have been removed. OnDone provides the RPC
- /// status outcome for both successful and failed RPCs and will be called in
- /// all cases. If it is not called, it indicates an application-level problem
- /// (like failure to remove a hold).
- ///
- /// \param[in] s The status outcome of this RPC
- void OnDone(const ::grpc::Status& /*s*/) override {}
-
- /// Notifies the application that a read of initial metadata from the
- /// server is done. If the application chooses not to implement this method,
- /// it can assume that the initial metadata has been read before the first
- /// call of OnReadDone or OnDone.
- ///
- /// \param[in] ok Was the initial metadata read successfully? If false, no
- /// new read/write operation will succeed, and any further
- /// Start* operations should not be called.
- virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartRead operation completed.
- ///
- /// \param[in] ok Was it successful? If false, no new read/write operation
- /// will succeed, and any further Start* should not be called.
- virtual void OnReadDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartWrite or StartWriteLast operation
- /// completed.
- ///
- /// \param[in] ok Was it successful? If false, no new read/write operation
- /// will succeed, and any further Start* should not be called.
- virtual void OnWriteDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartWritesDone operation completed. Note
- /// that this is only used on explicit StartWritesDone operations and not for
- /// those that are implicitly invoked as part of a StartWriteLast.
- ///
- /// \param[in] ok Was it successful? If false, the application will later see
- /// the failure reflected as a bad status in OnDone and no
- /// further Start* should be called.
- virtual void OnWritesDoneDone(bool /*ok*/) {}
-
- private:
- friend class ClientCallbackReaderWriter<Request, Response>;
- void BindStream(ClientCallbackReaderWriter<Request, Response>* stream) {
- stream_ = stream;
- }
- ClientCallbackReaderWriter<Request, Response>* stream_;
-};
-
-/// \a ClientReadReactor is the interface for a server-streaming RPC.
-/// All public methods behave as in ClientBidiReactor.
-template <class Response>
-class ClientReadReactor : public internal::ClientReactor {
- public:
- virtual ~ClientReadReactor() {}
-
- void StartCall() { reader_->StartCall(); }
- void StartRead(Response* resp) { reader_->Read(resp); }
-
- void AddHold() { AddMultipleHolds(1); }
- void AddMultipleHolds(int holds) {
- GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
- reader_->AddHold(holds);
- }
- void RemoveHold() { reader_->RemoveHold(); }
-
- void OnDone(const ::grpc::Status& /*s*/) override {}
- virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
- virtual void OnReadDone(bool /*ok*/) {}
-
- private:
- friend class ClientCallbackReader<Response>;
- void BindReader(ClientCallbackReader<Response>* reader) { reader_ = reader; }
- ClientCallbackReader<Response>* reader_;
-};
-
-/// \a ClientWriteReactor is the interface for a client-streaming RPC.
-/// All public methods behave as in ClientBidiReactor.
-template <class Request>
-class ClientWriteReactor : public internal::ClientReactor {
- public:
- virtual ~ClientWriteReactor() {}
-
- void StartCall() { writer_->StartCall(); }
- void StartWrite(const Request* req) {
- StartWrite(req, ::grpc::WriteOptions());
- }
- void StartWrite(const Request* req, ::grpc::WriteOptions options) {
- writer_->Write(req, std::move(options));
- }
- void StartWriteLast(const Request* req, ::grpc::WriteOptions options) {
- StartWrite(req, std::move(options.set_last_message()));
- }
- void StartWritesDone() { writer_->WritesDone(); }
-
- void AddHold() { AddMultipleHolds(1); }
- void AddMultipleHolds(int holds) {
- GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
- writer_->AddHold(holds);
- }
- void RemoveHold() { writer_->RemoveHold(); }
-
- void OnDone(const ::grpc::Status& /*s*/) override {}
- virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
- virtual void OnWriteDone(bool /*ok*/) {}
- virtual void OnWritesDoneDone(bool /*ok*/) {}
-
- private:
- friend class ClientCallbackWriter<Request>;
- void BindWriter(ClientCallbackWriter<Request>* writer) { writer_ = writer; }
-
- ClientCallbackWriter<Request>* writer_;
-};
-
-/// \a ClientUnaryReactor is a reactor-style interface for a unary RPC.
-/// This is _not_ a common way of invoking a unary RPC. In practice, this
-/// option should be used only if the unary RPC wants to receive initial
-/// metadata without waiting for the response to complete. Most deployments of
-/// RPC systems do not use this option, but it is needed for generality.
-/// All public methods behave as in ClientBidiReactor.
-/// StartCall is included for consistency with the other reactor flavors: even
-/// though there are no StartRead or StartWrite operations to queue before the
-/// call (that is part of the unary call itself) and there is no reactor object
-/// being created as a result of this call, we keep a consistent 2-phase
-/// initiation API among all the reactor flavors.
-class ClientUnaryReactor : public internal::ClientReactor {
- public:
- virtual ~ClientUnaryReactor() {}
-
- void StartCall() { call_->StartCall(); }
- void OnDone(const ::grpc::Status& /*s*/) override {}
- virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
-
- private:
- friend class ClientCallbackUnary;
- void BindCall(ClientCallbackUnary* call) { call_ = call; }
- ClientCallbackUnary* call_;
-};
-
-// Define function out-of-line from class to avoid forward declaration issue
-inline void ClientCallbackUnary::BindReactor(ClientUnaryReactor* reactor) {
- reactor->BindCall(this);
-}
-
-namespace internal {
-
-// Forward declare factory classes for friendship
-template <class Request, class Response>
-class ClientCallbackReaderWriterFactory;
-template <class Response>
-class ClientCallbackReaderFactory;
-template <class Request>
-class ClientCallbackWriterFactory;
-
-template <class Request, class Response>
-class ClientCallbackReaderWriterImpl
- : public ClientCallbackReaderWriter<Request, Response> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderWriterImpl));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- // This call initiates two batches, plus any backlog, each with a callback
- // 1. Send initial metadata (unless corked) + recv initial metadata
- // 2. Any read backlog
- // 3. Any write backlog
- // 4. Recv trailing metadata (unless corked)
- if (!start_corked_) {
- start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- }
-
- call_.PerformOps(&start_ops_);
-
- {
- grpc::internal::MutexLock lock(&start_mu_);
-
- if (backlog_.read_ops) {
- call_.PerformOps(&read_ops_);
- }
- if (backlog_.write_ops) {
- call_.PerformOps(&write_ops_);
- }
- if (backlog_.writes_done_ops) {
- call_.PerformOps(&writes_done_ops_);
- }
- call_.PerformOps(&finish_ops_);
- // The last thing in this critical section is to set started_ so that it
- // can be used lock-free as well.
- started_.store(true, std::memory_order_release);
- }
- // MaybeFinish outside the lock to make sure that destruction of this object
- // doesn't take place while holding the lock (which would cause the lock to
- // be released after destruction)
- this->MaybeFinish(/*from_reaction=*/false);
- }
-
- void Read(Response* msg) override {
- read_ops_.RecvMessage(msg);
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.read_ops = true;
- return;
- }
- }
- call_.PerformOps(&read_ops_);
- }
-
- void Write(const Request* msg, ::grpc::WriteOptions options) override {
- if (options.is_last_message()) {
- options.set_buffer_hint();
- write_ops_.ClientSendClose();
- }
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
- if (GPR_UNLIKELY(corked_write_needed_)) {
- write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- corked_write_needed_ = false;
- }
-
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.write_ops = true;
- return;
- }
- }
- call_.PerformOps(&write_ops_);
- }
- void WritesDone() override {
- writes_done_ops_.ClientSendClose();
- writes_done_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnWritesDoneDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &writes_done_ops_, /*can_inline=*/false);
- writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
- if (GPR_UNLIKELY(corked_write_needed_)) {
- writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- corked_write_needed_ = false;
- }
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.writes_done_ops = true;
- return;
- }
- }
- call_.PerformOps(&writes_done_ops_);
- }
-
- void AddHold(int holds) override {
- callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
- }
- void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
-
- private:
- friend class ClientCallbackReaderWriterFactory<Request, Response>;
-
- ClientCallbackReaderWriterImpl(grpc::internal::Call call,
- ::grpc::ClientContext* context,
- ClientBidiReactor<Request, Response>* reactor)
- : context_(context),
- call_(call),
- reactor_(reactor),
- start_corked_(context_->initial_metadata_corked_),
- corked_write_needed_(start_corked_) {
- this->BindReactor(reactor);
-
- // Set up the unchanging parts of the start, read, and write tags and ops.
- start_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadInitialMetadataDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &start_ops_, /*can_inline=*/false);
- start_ops_.RecvInitialMetadata(context_);
- start_ops_.set_core_cq_tag(&start_tag_);
-
- write_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnWriteDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &write_ops_, /*can_inline=*/false);
- write_ops_.set_core_cq_tag(&write_tag_);
-
- read_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &read_ops_, /*can_inline=*/false);
- read_ops_.set_core_cq_tag(&read_tag_);
-
- // Also set up the Finish tag and op set.
- finish_tag_.Set(
- call_.call(),
- [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
- &finish_ops_,
- /*can_inline=*/false);
- finish_ops_.ClientRecvStatus(context_, &finish_status_);
- finish_ops_.set_core_cq_tag(&finish_tag_);
- }
-
- // MaybeFinish can be called from reactions or from user-initiated operations
- // like StartCall or RemoveHold. If this is the last operation or hold on this
- // object, it will invoke the OnDone reaction. If MaybeFinish was called from
- // a reaction, it can call OnDone directly. If not, it would need to schedule
- // OnDone onto an executor thread to avoid the possibility of deadlocking with
- // any locks in the user code that invoked it.
- void MaybeFinish(bool from_reaction) {
- if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1)) {
- ::grpc::Status s = std::move(finish_status_);
- auto* reactor = reactor_;
- auto* call = call_.call();
- this->~ClientCallbackReaderWriterImpl();
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- if (GPR_LIKELY(from_reaction)) {
- reactor->OnDone(s);
- } else {
- reactor->InternalScheduleOnDone(std::move(s));
- }
- }
- }
-
- ::grpc::ClientContext* const context_;
- grpc::internal::Call call_;
- ClientBidiReactor<Request, Response>* const reactor_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpRecvInitialMetadata>
- start_ops_;
- grpc::internal::CallbackWithSuccessTag start_tag_;
- const bool start_corked_;
- bool corked_write_needed_; // no lock needed since only accessed in
- // Write/WritesDone which cannot be concurrent
-
- grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
- grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::Status finish_status_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpClientSendClose>
- write_ops_;
- grpc::internal::CallbackWithSuccessTag write_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpClientSendClose>
- writes_done_ops_;
- grpc::internal::CallbackWithSuccessTag writes_done_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
- read_ops_;
- grpc::internal::CallbackWithSuccessTag read_tag_;
-
- struct StartCallBacklog {
- bool write_ops = false;
- bool writes_done_ops = false;
- bool read_ops = false;
- };
- StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
-
- // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
- std::atomic<intptr_t> callbacks_outstanding_{3};
- std::atomic_bool started_{false};
- grpc::internal::Mutex start_mu_;
-};
-
-template <class Request, class Response>
-class ClientCallbackReaderWriterFactory {
- public:
- static void Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- ClientBidiReactor<Request, Response>* reactor) {
- grpc::internal::Call call =
- channel->CreateCall(method, context, channel->CallbackCQ());
-
- ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientCallbackReaderWriterImpl<Request, Response>)))
- ClientCallbackReaderWriterImpl<Request, Response>(call, context,
- reactor);
- }
-};
-
-template <class Response>
-class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderImpl));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- // This call initiates two batches, plus any backlog, each with a callback
- // 1. Send initial metadata (unless corked) + recv initial metadata
- // 2. Any backlog
- // 3. Recv trailing metadata
-
- start_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadInitialMetadataDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &start_ops_, /*can_inline=*/false);
- start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- start_ops_.RecvInitialMetadata(context_);
- start_ops_.set_core_cq_tag(&start_tag_);
- call_.PerformOps(&start_ops_);
-
- // Also set up the read tag so it doesn't have to be set up each time
- read_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &read_ops_, /*can_inline=*/false);
- read_ops_.set_core_cq_tag(&read_tag_);
-
- {
- grpc::internal::MutexLock lock(&start_mu_);
- if (backlog_.read_ops) {
- call_.PerformOps(&read_ops_);
- }
- started_.store(true, std::memory_order_release);
- }
-
- finish_tag_.Set(
- call_.call(),
- [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
- &finish_ops_, /*can_inline=*/false);
- finish_ops_.ClientRecvStatus(context_, &finish_status_);
- finish_ops_.set_core_cq_tag(&finish_tag_);
- call_.PerformOps(&finish_ops_);
- }
-
- void Read(Response* msg) override {
- read_ops_.RecvMessage(msg);
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.read_ops = true;
- return;
- }
- }
- call_.PerformOps(&read_ops_);
- }
-
- void AddHold(int holds) override {
- callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
- }
- void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
-
- private:
- friend class ClientCallbackReaderFactory<Response>;
-
- template <class Request>
- ClientCallbackReaderImpl(::grpc::internal::Call call,
- ::grpc::ClientContext* context, Request* request,
- ClientReadReactor<Response>* reactor)
- : context_(context), call_(call), reactor_(reactor) {
- this->BindReactor(reactor);
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok());
- start_ops_.ClientSendClose();
- }
-
- // MaybeFinish behaves as in ClientCallbackReaderWriterImpl.
- void MaybeFinish(bool from_reaction) {
- if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1)) {
- ::grpc::Status s = std::move(finish_status_);
- auto* reactor = reactor_;
- auto* call = call_.call();
- this->~ClientCallbackReaderImpl();
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- if (GPR_LIKELY(from_reaction)) {
- reactor->OnDone(s);
- } else {
- reactor->InternalScheduleOnDone(std::move(s));
- }
- }
- }
-
- ::grpc::ClientContext* const context_;
- grpc::internal::Call call_;
- ClientReadReactor<Response>* const reactor_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpClientSendClose,
- grpc::internal::CallOpRecvInitialMetadata>
- start_ops_;
- grpc::internal::CallbackWithSuccessTag start_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
- grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::Status finish_status_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
- read_ops_;
- grpc::internal::CallbackWithSuccessTag read_tag_;
-
- struct StartCallBacklog {
- bool read_ops = false;
- };
- StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
-
- // Minimum of 2 callbacks to pre-register for start and finish
- std::atomic<intptr_t> callbacks_outstanding_{2};
- std::atomic_bool started_{false};
- grpc::internal::Mutex start_mu_;
-};
-
-template <class Response>
-class ClientCallbackReaderFactory {
- public:
- template <class Request>
- static void Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, const Request* request,
- ClientReadReactor<Response>* reactor) {
- grpc::internal::Call call =
- channel->CreateCall(method, context, channel->CallbackCQ());
-
- ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientCallbackReaderImpl<Response>)))
- ClientCallbackReaderImpl<Response>(call, context, request, reactor);
- }
-};
-
-template <class Request>
-class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackWriterImpl));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- // This call initiates two batches, plus any backlog, each with a callback
- // 1. Send initial metadata (unless corked) + recv initial metadata
- // 2. Any backlog
- // 3. Recv trailing metadata
-
- if (!start_corked_) {
- start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- }
- call_.PerformOps(&start_ops_);
-
- {
- grpc::internal::MutexLock lock(&start_mu_);
-
- if (backlog_.write_ops) {
- call_.PerformOps(&write_ops_);
- }
- if (backlog_.writes_done_ops) {
- call_.PerformOps(&writes_done_ops_);
- }
- call_.PerformOps(&finish_ops_);
- // The last thing in this critical section is to set started_ so that it
- // can be used lock-free as well.
- started_.store(true, std::memory_order_release);
- }
- // MaybeFinish outside the lock to make sure that destruction of this object
- // doesn't take place while holding the lock (which would cause the lock to
- // be released after destruction)
- this->MaybeFinish(/*from_reaction=*/false);
- }
-
- void Write(const Request* msg, ::grpc::WriteOptions options) override {
- if (GPR_UNLIKELY(options.is_last_message())) {
- options.set_buffer_hint();
- write_ops_.ClientSendClose();
- }
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-
- if (GPR_UNLIKELY(corked_write_needed_)) {
- write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- corked_write_needed_ = false;
- }
-
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.write_ops = true;
- return;
- }
- }
- call_.PerformOps(&write_ops_);
- }
-
- void WritesDone() override {
- writes_done_ops_.ClientSendClose();
- writes_done_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnWritesDoneDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &writes_done_ops_, /*can_inline=*/false);
- writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
- callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-
- if (GPR_UNLIKELY(corked_write_needed_)) {
- writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- corked_write_needed_ = false;
- }
-
- if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
- grpc::internal::MutexLock lock(&start_mu_);
- if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
- backlog_.writes_done_ops = true;
- return;
- }
- }
- call_.PerformOps(&writes_done_ops_);
- }
-
- void AddHold(int holds) override {
- callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
- }
- void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
-
- private:
- friend class ClientCallbackWriterFactory<Request>;
-
- template <class Response>
- ClientCallbackWriterImpl(::grpc::internal::Call call,
- ::grpc::ClientContext* context, Response* response,
- ClientWriteReactor<Request>* reactor)
- : context_(context),
- call_(call),
- reactor_(reactor),
- start_corked_(context_->initial_metadata_corked_),
- corked_write_needed_(start_corked_) {
- this->BindReactor(reactor);
-
- // Set up the unchanging parts of the start and write tags and ops.
- start_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadInitialMetadataDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &start_ops_, /*can_inline=*/false);
- start_ops_.RecvInitialMetadata(context_);
- start_ops_.set_core_cq_tag(&start_tag_);
-
- write_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnWriteDone(ok);
- MaybeFinish(/*from_reaction=*/true);
- },
- &write_ops_, /*can_inline=*/false);
- write_ops_.set_core_cq_tag(&write_tag_);
-
- // Also set up the Finish tag and op set.
- finish_ops_.RecvMessage(response);
- finish_ops_.AllowNoMessage();
- finish_tag_.Set(
- call_.call(),
- [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
- &finish_ops_,
- /*can_inline=*/false);
- finish_ops_.ClientRecvStatus(context_, &finish_status_);
- finish_ops_.set_core_cq_tag(&finish_tag_);
- }
-
- // MaybeFinish behaves as in ClientCallbackReaderWriterImpl.
- void MaybeFinish(bool from_reaction) {
- if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1)) {
- ::grpc::Status s = std::move(finish_status_);
- auto* reactor = reactor_;
- auto* call = call_.call();
- this->~ClientCallbackWriterImpl();
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- if (GPR_LIKELY(from_reaction)) {
- reactor->OnDone(s);
- } else {
- reactor->InternalScheduleOnDone(std::move(s));
- }
- }
- }
-
- ::grpc::ClientContext* const context_;
- grpc::internal::Call call_;
- ClientWriteReactor<Request>* const reactor_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpRecvInitialMetadata>
- start_ops_;
- grpc::internal::CallbackWithSuccessTag start_tag_;
- const bool start_corked_;
- bool corked_write_needed_; // no lock needed since only accessed in
- // Write/WritesDone which cannot be concurrent
-
- grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
- grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
- grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::Status finish_status_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpClientSendClose>
- write_ops_;
- grpc::internal::CallbackWithSuccessTag write_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpClientSendClose>
- writes_done_ops_;
- grpc::internal::CallbackWithSuccessTag writes_done_tag_;
-
- struct StartCallBacklog {
- bool write_ops = false;
- bool writes_done_ops = false;
- };
- StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
-
- // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
- std::atomic<intptr_t> callbacks_outstanding_{3};
- std::atomic_bool started_{false};
- grpc::internal::Mutex start_mu_;
-};
-
-template <class Request>
-class ClientCallbackWriterFactory {
- public:
- template <class Response>
- static void Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, Response* response,
- ClientWriteReactor<Request>* reactor) {
- grpc::internal::Call call =
- channel->CreateCall(method, context, channel->CallbackCQ());
-
- ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientCallbackWriterImpl<Request>)))
- ClientCallbackWriterImpl<Request>(call, context, response, reactor);
- }
-};
-
-class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
- public:
- // always allocated against a call arena, no memory free required
- static void operator delete(void* /*ptr*/, std::size_t size) {
- GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackUnaryImpl));
- }
-
- // This operator should never be called as the memory should be freed as part
- // of the arena destruction. It only exists to provide a matching operator
- // delete to the operator new so that some compilers will not complain (see
- // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
- // there are no tests catching the compiler warning.
- static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
-
- void StartCall() override {
- // This call initiates two batches, each with a callback
- // 1. Send initial metadata + write + writes done + recv initial metadata
- // 2. Read message, recv trailing metadata
-
- start_tag_.Set(call_.call(),
- [this](bool ok) {
- reactor_->OnReadInitialMetadataDone(ok);
- MaybeFinish();
- },
- &start_ops_, /*can_inline=*/false);
- start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- start_ops_.RecvInitialMetadata(context_);
- start_ops_.set_core_cq_tag(&start_tag_);
- call_.PerformOps(&start_ops_);
-
- finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
- &finish_ops_,
- /*can_inline=*/false);
- finish_ops_.ClientRecvStatus(context_, &finish_status_);
- finish_ops_.set_core_cq_tag(&finish_tag_);
- call_.PerformOps(&finish_ops_);
- }
-
- private:
- friend class ClientCallbackUnaryFactory;
-
- template <class Request, class Response>
- ClientCallbackUnaryImpl(::grpc::internal::Call call,
- ::grpc::ClientContext* context, Request* request,
- Response* response, ClientUnaryReactor* reactor)
- : context_(context), call_(call), reactor_(reactor) {
- this->BindReactor(reactor);
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok());
- start_ops_.ClientSendClose();
- finish_ops_.RecvMessage(response);
- finish_ops_.AllowNoMessage();
- }
-
- // In the unary case, MaybeFinish is only ever invoked from a
- // library-initiated reaction, so it will just directly call OnDone if this is
- // the last reaction for this RPC.
- void MaybeFinish() {
- if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1)) {
- ::grpc::Status s = std::move(finish_status_);
- auto* reactor = reactor_;
- auto* call = call_.call();
- this->~ClientCallbackUnaryImpl();
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- reactor->OnDone(s);
- }
- }
-
- ::grpc::ClientContext* const context_;
- grpc::internal::Call call_;
- ClientUnaryReactor* const reactor_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
- grpc::internal::CallOpSendMessage,
- grpc::internal::CallOpClientSendClose,
- grpc::internal::CallOpRecvInitialMetadata>
- start_ops_;
- grpc::internal::CallbackWithSuccessTag start_tag_;
-
- grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
- grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
- grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::Status finish_status_;
-
- // This call will have 2 callbacks: start and finish
- std::atomic<intptr_t> callbacks_outstanding_{2};
-};
-
-class ClientCallbackUnaryFactory {
- public:
- template <class Request, class Response>
- static void Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, const Request* request,
- Response* response, ClientUnaryReactor* reactor) {
- grpc::internal::Call call =
- channel->CreateCall(method, context, channel->CallbackCQ());
-
- ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
-
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call.call(), sizeof(ClientCallbackUnaryImpl)))
- ClientCallbackUnaryImpl(call, context, request, response, reactor);
- }
-};
-
-} // namespace internal
-
-// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
+class Channel;
+class ClientContext;
+
+namespace internal {
+class RpcMethod;
+
+/// Perform a callback-based unary call
+/// TODO(vjpai): Combine as much as possible with the blocking unary call code
+template <class InputMessage, class OutputMessage>
+void CallbackUnaryCall(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ const InputMessage* request, OutputMessage* result,
+ std::function<void(::grpc::Status)> on_completion) {
+ CallbackUnaryCallImpl<InputMessage, OutputMessage> x(
+ channel, method, context, request, result, on_completion);
+}
+
+template <class InputMessage, class OutputMessage>
+class CallbackUnaryCallImpl {
+ public:
+ CallbackUnaryCallImpl(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ const InputMessage* request, OutputMessage* result,
+ std::function<void(::grpc::Status)> on_completion) {
+ ::grpc::CompletionQueue* cq = channel->CallbackCQ();
+ GPR_CODEGEN_ASSERT(cq != nullptr);
+ grpc::internal::Call call(channel->CreateCall(method, context, cq));
+
+ using FullCallOpSet = grpc::internal::CallOpSet<
+ ::grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpRecvInitialMetadata,
+ grpc::internal::CallOpRecvMessage<OutputMessage>,
+ grpc::internal::CallOpClientSendClose,
+ grpc::internal::CallOpClientRecvStatus>;
+
+ struct OpSetAndTag {
+ FullCallOpSet opset;
+ grpc::internal::CallbackWithStatusTag tag;
+ };
+ const size_t alloc_sz = sizeof(OpSetAndTag);
+ auto* const alloced = static_cast<OpSetAndTag*>(
+ ::grpc::g_core_codegen_interface->grpc_call_arena_alloc(call.call(),
+ alloc_sz));
+ auto* ops = new (&alloced->opset) FullCallOpSet;
+ auto* tag = new (&alloced->tag)
+ grpc::internal::CallbackWithStatusTag(call.call(), on_completion, ops);
+
+ // TODO(vjpai): Unify code with sync API as much as possible
+ ::grpc::Status s = ops->SendMessagePtr(request);
+ if (!s.ok()) {
+ tag->force_run(s);
+ return;
+ }
+ ops->SendInitialMetadata(&context->send_initial_metadata_,
+ context->initial_metadata_flags());
+ ops->RecvInitialMetadata(context);
+ ops->RecvMessage(result);
+ ops->AllowNoMessage();
+ ops->ClientSendClose();
+ ops->ClientRecvStatus(context, tag->status_ptr());
+ ops->set_core_cq_tag(tag);
+ call.PerformOps(ops);
+ }
+};
+
+// Base class for public API classes.
+class ClientReactor {
+ public:
+ /// Called by the library when all operations associated with this RPC have
+ /// completed and all Holds have been removed. OnDone provides the RPC status
+ /// outcome for both successful and failed RPCs. If it is never called on an
+ /// RPC, it indicates an application-level problem (like failure to remove a
+ /// hold).
+ ///
+ /// \param[in] s The status outcome of this RPC
+ virtual void OnDone(const ::grpc::Status& /*s*/) = 0;
+
+ /// InternalScheduleOnDone is not part of the API and is not meant to be
+ /// overridden. It is virtual to allow successful builds for certain bazel
+ /// build users that only want to depend on gRPC codegen headers and not the
+ /// full library (although this is not a generally-supported option). Although
+ /// the virtual call is slower than a direct call, this function is
+ /// heavyweight and the cost of the virtual call is not much in comparison.
+ /// This function may be removed or devirtualized in the future.
+ virtual void InternalScheduleOnDone(::grpc::Status s);
+};
+
+} // namespace internal
+
+// Forward declarations
+template <class Request, class Response>
+class ClientBidiReactor;
+template <class Response>
+class ClientReadReactor;
+template <class Request>
+class ClientWriteReactor;
+class ClientUnaryReactor;
+
+// NOTE: The streaming objects are not actually implemented in the public API.
+// These interfaces are provided for mocking only. Typical applications
+// will interact exclusively with the reactors that they define.
+template <class Request, class Response>
+class ClientCallbackReaderWriter {
+ public:
+ virtual ~ClientCallbackReaderWriter() {}
+ virtual void StartCall() = 0;
+ virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0;
+ virtual void WritesDone() = 0;
+ virtual void Read(Response* resp) = 0;
+ virtual void AddHold(int holds) = 0;
+ virtual void RemoveHold() = 0;
+
+ protected:
+ void BindReactor(ClientBidiReactor<Request, Response>* reactor) {
+ reactor->BindStream(this);
+ }
+};
+
+template <class Response>
+class ClientCallbackReader {
+ public:
+ virtual ~ClientCallbackReader() {}
+ virtual void StartCall() = 0;
+ virtual void Read(Response* resp) = 0;
+ virtual void AddHold(int holds) = 0;
+ virtual void RemoveHold() = 0;
+
+ protected:
+ void BindReactor(ClientReadReactor<Response>* reactor) {
+ reactor->BindReader(this);
+ }
+};
+
+template <class Request>
+class ClientCallbackWriter {
+ public:
+ virtual ~ClientCallbackWriter() {}
+ virtual void StartCall() = 0;
+ void Write(const Request* req) { Write(req, ::grpc::WriteOptions()); }
+ virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0;
+ void WriteLast(const Request* req, ::grpc::WriteOptions options) {
+ Write(req, options.set_last_message());
+ }
+ virtual void WritesDone() = 0;
+
+ virtual void AddHold(int holds) = 0;
+ virtual void RemoveHold() = 0;
+
+ protected:
+ void BindReactor(ClientWriteReactor<Request>* reactor) {
+ reactor->BindWriter(this);
+ }
+};
+
+class ClientCallbackUnary {
+ public:
+ virtual ~ClientCallbackUnary() {}
+ virtual void StartCall() = 0;
+
+ protected:
+ void BindReactor(ClientUnaryReactor* reactor);
+};
+
+// The following classes are the reactor interfaces that are to be implemented
+// by the user. They are passed in to the library as an argument to a call on a
+// stub (either a codegen-ed call or a generic call). The streaming RPC is
+// activated by calling StartCall, possibly after initiating StartRead,
+// StartWrite, or AddHold operations on the streaming object. Note that none of
+// the classes are pure; all reactions have a default empty reaction so that the
+// user class only needs to override those classes that it cares about.
+// The reactor must be passed to the stub invocation before any of the below
+// operations can be called.
+
+/// \a ClientBidiReactor is the interface for a bidirectional streaming RPC.
+template <class Request, class Response>
+class ClientBidiReactor : public internal::ClientReactor {
+ public:
+ virtual ~ClientBidiReactor() {}
+
+ /// Activate the RPC and initiate any reads or writes that have been Start'ed
+ /// before this call. All streaming RPCs issued by the client MUST have
+ /// StartCall invoked on them (even if they are canceled) as this call is the
+ /// activation of their lifecycle.
+ void StartCall() { stream_->StartCall(); }
+
+ /// Initiate a read operation (or post it for later initiation if StartCall
+ /// has not yet been invoked).
+ ///
+ /// \param[out] resp Where to eventually store the read message. Valid when
+ /// the library calls OnReadDone
+ void StartRead(Response* resp) { stream_->Read(resp); }
+
+ /// Initiate a write operation (or post it for later initiation if StartCall
+ /// has not yet been invoked).
+ ///
+ /// \param[in] req The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ void StartWrite(const Request* req) {
+ StartWrite(req, ::grpc::WriteOptions());
+ }
+
+ /// Initiate/post a write operation with specified options.
+ ///
+ /// \param[in] req The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ void StartWrite(const Request* req, ::grpc::WriteOptions options) {
+ stream_->Write(req, std::move(options));
+ }
+
+ /// Initiate/post a write operation with specified options and an indication
+ /// that this is the last write (like StartWrite and StartWritesDone, merged).
+ /// Note that calling this means that no more calls to StartWrite,
+ /// StartWriteLast, or StartWritesDone are allowed.
+ ///
+ /// \param[in] req The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ void StartWriteLast(const Request* req, ::grpc::WriteOptions options) {
+ StartWrite(req, std::move(options.set_last_message()));
+ }
+
+ /// Indicate that the RPC will have no more write operations. This can only be
+ /// issued once for a given RPC. This is not required or allowed if
+ /// StartWriteLast is used since that already has the same implication.
+ /// Note that calling this means that no more calls to StartWrite,
+ /// StartWriteLast, or StartWritesDone are allowed.
+ void StartWritesDone() { stream_->WritesDone(); }
+
+ /// Holds are needed if (and only if) this stream has operations that take
+ /// place on it after StartCall but from outside one of the reactions
+ /// (OnReadDone, etc). This is _not_ a common use of the streaming API.
+ ///
+ /// Holds must be added before calling StartCall. If a stream still has a hold
+ /// in place, its resources will not be destroyed even if the status has
+ /// already come in from the wire and there are currently no active callbacks
+ /// outstanding. Similarly, the stream will not call OnDone if there are still
+ /// holds on it.
+ ///
+ /// For example, if a StartRead or StartWrite operation is going to be
+ /// initiated from elsewhere in the application, the application should call
+ /// AddHold or AddMultipleHolds before StartCall. If there is going to be,
+ /// for example, a read-flow and a write-flow taking place outside the
+ /// reactions, then call AddMultipleHolds(2) before StartCall. When the
+ /// application knows that it won't issue any more read operations (such as
+ /// when a read comes back as not ok), it should issue a RemoveHold(). It
+ /// should also call RemoveHold() again after it does StartWriteLast or
+ /// StartWritesDone that indicates that there will be no more write ops.
+ /// The number of RemoveHold calls must match the total number of AddHold
+ /// calls plus the number of holds added by AddMultipleHolds.
+ /// The argument to AddMultipleHolds must be positive.
+ void AddHold() { AddMultipleHolds(1); }
+ void AddMultipleHolds(int holds) {
+ GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
+ stream_->AddHold(holds);
+ }
+ void RemoveHold() { stream_->RemoveHold(); }
+
+ /// Notifies the application that all operations associated with this RPC
+ /// have completed and all Holds have been removed. OnDone provides the RPC
+ /// status outcome for both successful and failed RPCs and will be called in
+ /// all cases. If it is not called, it indicates an application-level problem
+ /// (like failure to remove a hold).
+ ///
+ /// \param[in] s The status outcome of this RPC
+ void OnDone(const ::grpc::Status& /*s*/) override {}
+
+ /// Notifies the application that a read of initial metadata from the
+ /// server is done. If the application chooses not to implement this method,
+ /// it can assume that the initial metadata has been read before the first
+ /// call of OnReadDone or OnDone.
+ ///
+ /// \param[in] ok Was the initial metadata read successfully? If false, no
+ /// new read/write operation will succeed, and any further
+ /// Start* operations should not be called.
+ virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartRead operation completed.
+ ///
+ /// \param[in] ok Was it successful? If false, no new read/write operation
+ /// will succeed, and any further Start* should not be called.
+ virtual void OnReadDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartWrite or StartWriteLast operation
+ /// completed.
+ ///
+ /// \param[in] ok Was it successful? If false, no new read/write operation
+ /// will succeed, and any further Start* should not be called.
+ virtual void OnWriteDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartWritesDone operation completed. Note
+ /// that this is only used on explicit StartWritesDone operations and not for
+ /// those that are implicitly invoked as part of a StartWriteLast.
+ ///
+ /// \param[in] ok Was it successful? If false, the application will later see
+ /// the failure reflected as a bad status in OnDone and no
+ /// further Start* should be called.
+ virtual void OnWritesDoneDone(bool /*ok*/) {}
+
+ private:
+ friend class ClientCallbackReaderWriter<Request, Response>;
+ void BindStream(ClientCallbackReaderWriter<Request, Response>* stream) {
+ stream_ = stream;
+ }
+ ClientCallbackReaderWriter<Request, Response>* stream_;
+};
+
+/// \a ClientReadReactor is the interface for a server-streaming RPC.
+/// All public methods behave as in ClientBidiReactor.
+template <class Response>
+class ClientReadReactor : public internal::ClientReactor {
+ public:
+ virtual ~ClientReadReactor() {}
+
+ void StartCall() { reader_->StartCall(); }
+ void StartRead(Response* resp) { reader_->Read(resp); }
+
+ void AddHold() { AddMultipleHolds(1); }
+ void AddMultipleHolds(int holds) {
+ GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
+ reader_->AddHold(holds);
+ }
+ void RemoveHold() { reader_->RemoveHold(); }
+
+ void OnDone(const ::grpc::Status& /*s*/) override {}
+ virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
+ virtual void OnReadDone(bool /*ok*/) {}
+
+ private:
+ friend class ClientCallbackReader<Response>;
+ void BindReader(ClientCallbackReader<Response>* reader) { reader_ = reader; }
+ ClientCallbackReader<Response>* reader_;
+};
+
+/// \a ClientWriteReactor is the interface for a client-streaming RPC.
+/// All public methods behave as in ClientBidiReactor.
+template <class Request>
+class ClientWriteReactor : public internal::ClientReactor {
+ public:
+ virtual ~ClientWriteReactor() {}
+
+ void StartCall() { writer_->StartCall(); }
+ void StartWrite(const Request* req) {
+ StartWrite(req, ::grpc::WriteOptions());
+ }
+ void StartWrite(const Request* req, ::grpc::WriteOptions options) {
+ writer_->Write(req, std::move(options));
+ }
+ void StartWriteLast(const Request* req, ::grpc::WriteOptions options) {
+ StartWrite(req, std::move(options.set_last_message()));
+ }
+ void StartWritesDone() { writer_->WritesDone(); }
+
+ void AddHold() { AddMultipleHolds(1); }
+ void AddMultipleHolds(int holds) {
+ GPR_CODEGEN_DEBUG_ASSERT(holds > 0);
+ writer_->AddHold(holds);
+ }
+ void RemoveHold() { writer_->RemoveHold(); }
+
+ void OnDone(const ::grpc::Status& /*s*/) override {}
+ virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
+ virtual void OnWriteDone(bool /*ok*/) {}
+ virtual void OnWritesDoneDone(bool /*ok*/) {}
+
+ private:
+ friend class ClientCallbackWriter<Request>;
+ void BindWriter(ClientCallbackWriter<Request>* writer) { writer_ = writer; }
+
+ ClientCallbackWriter<Request>* writer_;
+};
+
+/// \a ClientUnaryReactor is a reactor-style interface for a unary RPC.
+/// This is _not_ a common way of invoking a unary RPC. In practice, this
+/// option should be used only if the unary RPC wants to receive initial
+/// metadata without waiting for the response to complete. Most deployments of
+/// RPC systems do not use this option, but it is needed for generality.
+/// All public methods behave as in ClientBidiReactor.
+/// StartCall is included for consistency with the other reactor flavors: even
+/// though there are no StartRead or StartWrite operations to queue before the
+/// call (that is part of the unary call itself) and there is no reactor object
+/// being created as a result of this call, we keep a consistent 2-phase
+/// initiation API among all the reactor flavors.
+class ClientUnaryReactor : public internal::ClientReactor {
+ public:
+ virtual ~ClientUnaryReactor() {}
+
+ void StartCall() { call_->StartCall(); }
+ void OnDone(const ::grpc::Status& /*s*/) override {}
+ virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
+
+ private:
+ friend class ClientCallbackUnary;
+ void BindCall(ClientCallbackUnary* call) { call_ = call; }
+ ClientCallbackUnary* call_;
+};
+
+// Define function out-of-line from class to avoid forward declaration issue
+inline void ClientCallbackUnary::BindReactor(ClientUnaryReactor* reactor) {
+ reactor->BindCall(this);
+}
+
+namespace internal {
+
+// Forward declare factory classes for friendship
+template <class Request, class Response>
+class ClientCallbackReaderWriterFactory;
+template <class Response>
+class ClientCallbackReaderFactory;
+template <class Request>
+class ClientCallbackWriterFactory;
+
+template <class Request, class Response>
+class ClientCallbackReaderWriterImpl
+ : public ClientCallbackReaderWriter<Request, Response> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderWriterImpl));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ // This call initiates two batches, plus any backlog, each with a callback
+ // 1. Send initial metadata (unless corked) + recv initial metadata
+ // 2. Any read backlog
+ // 3. Any write backlog
+ // 4. Recv trailing metadata (unless corked)
+ if (!start_corked_) {
+ start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ }
+
+ call_.PerformOps(&start_ops_);
+
+ {
+ grpc::internal::MutexLock lock(&start_mu_);
+
+ if (backlog_.read_ops) {
+ call_.PerformOps(&read_ops_);
+ }
+ if (backlog_.write_ops) {
+ call_.PerformOps(&write_ops_);
+ }
+ if (backlog_.writes_done_ops) {
+ call_.PerformOps(&writes_done_ops_);
+ }
+ call_.PerformOps(&finish_ops_);
+ // The last thing in this critical section is to set started_ so that it
+ // can be used lock-free as well.
+ started_.store(true, std::memory_order_release);
+ }
+ // MaybeFinish outside the lock to make sure that destruction of this object
+ // doesn't take place while holding the lock (which would cause the lock to
+ // be released after destruction)
+ this->MaybeFinish(/*from_reaction=*/false);
+ }
+
+ void Read(Response* msg) override {
+ read_ops_.RecvMessage(msg);
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.read_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&read_ops_);
+ }
+
+ void Write(const Request* msg, ::grpc::WriteOptions options) override {
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ write_ops_.ClientSendClose();
+ }
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+ if (GPR_UNLIKELY(corked_write_needed_)) {
+ write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ corked_write_needed_ = false;
+ }
+
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.write_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&write_ops_);
+ }
+ void WritesDone() override {
+ writes_done_ops_.ClientSendClose();
+ writes_done_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnWritesDoneDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &writes_done_ops_, /*can_inline=*/false);
+ writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+ if (GPR_UNLIKELY(corked_write_needed_)) {
+ writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ corked_write_needed_ = false;
+ }
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.writes_done_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&writes_done_ops_);
+ }
+
+ void AddHold(int holds) override {
+ callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
+ }
+ void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
+
+ private:
+ friend class ClientCallbackReaderWriterFactory<Request, Response>;
+
+ ClientCallbackReaderWriterImpl(grpc::internal::Call call,
+ ::grpc::ClientContext* context,
+ ClientBidiReactor<Request, Response>* reactor)
+ : context_(context),
+ call_(call),
+ reactor_(reactor),
+ start_corked_(context_->initial_metadata_corked_),
+ corked_write_needed_(start_corked_) {
+ this->BindReactor(reactor);
+
+ // Set up the unchanging parts of the start, read, and write tags and ops.
+ start_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadInitialMetadataDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &start_ops_, /*can_inline=*/false);
+ start_ops_.RecvInitialMetadata(context_);
+ start_ops_.set_core_cq_tag(&start_tag_);
+
+ write_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnWriteDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &write_ops_, /*can_inline=*/false);
+ write_ops_.set_core_cq_tag(&write_tag_);
+
+ read_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &read_ops_, /*can_inline=*/false);
+ read_ops_.set_core_cq_tag(&read_tag_);
+
+ // Also set up the Finish tag and op set.
+ finish_tag_.Set(
+ call_.call(),
+ [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
+ &finish_ops_,
+ /*can_inline=*/false);
+ finish_ops_.ClientRecvStatus(context_, &finish_status_);
+ finish_ops_.set_core_cq_tag(&finish_tag_);
+ }
+
+ // MaybeFinish can be called from reactions or from user-initiated operations
+ // like StartCall or RemoveHold. If this is the last operation or hold on this
+ // object, it will invoke the OnDone reaction. If MaybeFinish was called from
+ // a reaction, it can call OnDone directly. If not, it would need to schedule
+ // OnDone onto an executor thread to avoid the possibility of deadlocking with
+ // any locks in the user code that invoked it.
+ void MaybeFinish(bool from_reaction) {
+ if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ ::grpc::Status s = std::move(finish_status_);
+ auto* reactor = reactor_;
+ auto* call = call_.call();
+ this->~ClientCallbackReaderWriterImpl();
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ if (GPR_LIKELY(from_reaction)) {
+ reactor->OnDone(s);
+ } else {
+ reactor->InternalScheduleOnDone(std::move(s));
+ }
+ }
+ }
+
+ ::grpc::ClientContext* const context_;
+ grpc::internal::Call call_;
+ ClientBidiReactor<Request, Response>* const reactor_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpRecvInitialMetadata>
+ start_ops_;
+ grpc::internal::CallbackWithSuccessTag start_tag_;
+ const bool start_corked_;
+ bool corked_write_needed_; // no lock needed since only accessed in
+ // Write/WritesDone which cannot be concurrent
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
+ grpc::internal::CallbackWithSuccessTag finish_tag_;
+ ::grpc::Status finish_status_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpClientSendClose>
+ write_ops_;
+ grpc::internal::CallbackWithSuccessTag write_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpClientSendClose>
+ writes_done_ops_;
+ grpc::internal::CallbackWithSuccessTag writes_done_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
+ read_ops_;
+ grpc::internal::CallbackWithSuccessTag read_tag_;
+
+ struct StartCallBacklog {
+ bool write_ops = false;
+ bool writes_done_ops = false;
+ bool read_ops = false;
+ };
+ StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
+
+ // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
+ std::atomic<intptr_t> callbacks_outstanding_{3};
+ std::atomic_bool started_{false};
+ grpc::internal::Mutex start_mu_;
+};
+
+template <class Request, class Response>
+class ClientCallbackReaderWriterFactory {
+ public:
+ static void Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ ClientBidiReactor<Request, Response>* reactor) {
+ grpc::internal::Call call =
+ channel->CreateCall(method, context, channel->CallbackCQ());
+
+ ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientCallbackReaderWriterImpl<Request, Response>)))
+ ClientCallbackReaderWriterImpl<Request, Response>(call, context,
+ reactor);
+ }
+};
+
+template <class Response>
+class ClientCallbackReaderImpl : public ClientCallbackReader<Response> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderImpl));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ // This call initiates two batches, plus any backlog, each with a callback
+ // 1. Send initial metadata (unless corked) + recv initial metadata
+ // 2. Any backlog
+ // 3. Recv trailing metadata
+
+ start_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadInitialMetadataDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &start_ops_, /*can_inline=*/false);
+ start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ start_ops_.RecvInitialMetadata(context_);
+ start_ops_.set_core_cq_tag(&start_tag_);
+ call_.PerformOps(&start_ops_);
+
+ // Also set up the read tag so it doesn't have to be set up each time
+ read_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &read_ops_, /*can_inline=*/false);
+ read_ops_.set_core_cq_tag(&read_tag_);
+
+ {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (backlog_.read_ops) {
+ call_.PerformOps(&read_ops_);
+ }
+ started_.store(true, std::memory_order_release);
+ }
+
+ finish_tag_.Set(
+ call_.call(),
+ [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
+ &finish_ops_, /*can_inline=*/false);
+ finish_ops_.ClientRecvStatus(context_, &finish_status_);
+ finish_ops_.set_core_cq_tag(&finish_tag_);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ void Read(Response* msg) override {
+ read_ops_.RecvMessage(msg);
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.read_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&read_ops_);
+ }
+
+ void AddHold(int holds) override {
+ callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
+ }
+ void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
+
+ private:
+ friend class ClientCallbackReaderFactory<Response>;
+
+ template <class Request>
+ ClientCallbackReaderImpl(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, Request* request,
+ ClientReadReactor<Response>* reactor)
+ : context_(context), call_(call), reactor_(reactor) {
+ this->BindReactor(reactor);
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok());
+ start_ops_.ClientSendClose();
+ }
+
+ // MaybeFinish behaves as in ClientCallbackReaderWriterImpl.
+ void MaybeFinish(bool from_reaction) {
+ if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ ::grpc::Status s = std::move(finish_status_);
+ auto* reactor = reactor_;
+ auto* call = call_.call();
+ this->~ClientCallbackReaderImpl();
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ if (GPR_LIKELY(from_reaction)) {
+ reactor->OnDone(s);
+ } else {
+ reactor->InternalScheduleOnDone(std::move(s));
+ }
+ }
+ }
+
+ ::grpc::ClientContext* const context_;
+ grpc::internal::Call call_;
+ ClientReadReactor<Response>* const reactor_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpClientSendClose,
+ grpc::internal::CallOpRecvInitialMetadata>
+ start_ops_;
+ grpc::internal::CallbackWithSuccessTag start_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_;
+ grpc::internal::CallbackWithSuccessTag finish_tag_;
+ ::grpc::Status finish_status_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>>
+ read_ops_;
+ grpc::internal::CallbackWithSuccessTag read_tag_;
+
+ struct StartCallBacklog {
+ bool read_ops = false;
+ };
+ StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
+
+ // Minimum of 2 callbacks to pre-register for start and finish
+ std::atomic<intptr_t> callbacks_outstanding_{2};
+ std::atomic_bool started_{false};
+ grpc::internal::Mutex start_mu_;
+};
+
+template <class Response>
+class ClientCallbackReaderFactory {
+ public:
+ template <class Request>
+ static void Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, const Request* request,
+ ClientReadReactor<Response>* reactor) {
+ grpc::internal::Call call =
+ channel->CreateCall(method, context, channel->CallbackCQ());
+
+ ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientCallbackReaderImpl<Response>)))
+ ClientCallbackReaderImpl<Response>(call, context, request, reactor);
+ }
+};
+
+template <class Request>
+class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackWriterImpl));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ // This call initiates two batches, plus any backlog, each with a callback
+ // 1. Send initial metadata (unless corked) + recv initial metadata
+ // 2. Any backlog
+ // 3. Recv trailing metadata
+
+ if (!start_corked_) {
+ start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ }
+ call_.PerformOps(&start_ops_);
+
+ {
+ grpc::internal::MutexLock lock(&start_mu_);
+
+ if (backlog_.write_ops) {
+ call_.PerformOps(&write_ops_);
+ }
+ if (backlog_.writes_done_ops) {
+ call_.PerformOps(&writes_done_ops_);
+ }
+ call_.PerformOps(&finish_ops_);
+ // The last thing in this critical section is to set started_ so that it
+ // can be used lock-free as well.
+ started_.store(true, std::memory_order_release);
+ }
+ // MaybeFinish outside the lock to make sure that destruction of this object
+ // doesn't take place while holding the lock (which would cause the lock to
+ // be released after destruction)
+ this->MaybeFinish(/*from_reaction=*/false);
+ }
+
+ void Write(const Request* msg, ::grpc::WriteOptions options) override {
+ if (GPR_UNLIKELY(options.is_last_message())) {
+ options.set_buffer_hint();
+ write_ops_.ClientSendClose();
+ }
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok());
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+
+ if (GPR_UNLIKELY(corked_write_needed_)) {
+ write_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ corked_write_needed_ = false;
+ }
+
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.write_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&write_ops_);
+ }
+
+ void WritesDone() override {
+ writes_done_ops_.ClientSendClose();
+ writes_done_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnWritesDoneDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &writes_done_ops_, /*can_inline=*/false);
+ writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
+ callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
+
+ if (GPR_UNLIKELY(corked_write_needed_)) {
+ writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ corked_write_needed_ = false;
+ }
+
+ if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) {
+ grpc::internal::MutexLock lock(&start_mu_);
+ if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) {
+ backlog_.writes_done_ops = true;
+ return;
+ }
+ }
+ call_.PerformOps(&writes_done_ops_);
+ }
+
+ void AddHold(int holds) override {
+ callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed);
+ }
+ void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); }
+
+ private:
+ friend class ClientCallbackWriterFactory<Request>;
+
+ template <class Response>
+ ClientCallbackWriterImpl(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, Response* response,
+ ClientWriteReactor<Request>* reactor)
+ : context_(context),
+ call_(call),
+ reactor_(reactor),
+ start_corked_(context_->initial_metadata_corked_),
+ corked_write_needed_(start_corked_) {
+ this->BindReactor(reactor);
+
+ // Set up the unchanging parts of the start and write tags and ops.
+ start_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadInitialMetadataDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &start_ops_, /*can_inline=*/false);
+ start_ops_.RecvInitialMetadata(context_);
+ start_ops_.set_core_cq_tag(&start_tag_);
+
+ write_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnWriteDone(ok);
+ MaybeFinish(/*from_reaction=*/true);
+ },
+ &write_ops_, /*can_inline=*/false);
+ write_ops_.set_core_cq_tag(&write_tag_);
+
+ // Also set up the Finish tag and op set.
+ finish_ops_.RecvMessage(response);
+ finish_ops_.AllowNoMessage();
+ finish_tag_.Set(
+ call_.call(),
+ [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); },
+ &finish_ops_,
+ /*can_inline=*/false);
+ finish_ops_.ClientRecvStatus(context_, &finish_status_);
+ finish_ops_.set_core_cq_tag(&finish_tag_);
+ }
+
+ // MaybeFinish behaves as in ClientCallbackReaderWriterImpl.
+ void MaybeFinish(bool from_reaction) {
+ if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ ::grpc::Status s = std::move(finish_status_);
+ auto* reactor = reactor_;
+ auto* call = call_.call();
+ this->~ClientCallbackWriterImpl();
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ if (GPR_LIKELY(from_reaction)) {
+ reactor->OnDone(s);
+ } else {
+ reactor->InternalScheduleOnDone(std::move(s));
+ }
+ }
+ }
+
+ ::grpc::ClientContext* const context_;
+ grpc::internal::Call call_;
+ ClientWriteReactor<Request>* const reactor_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpRecvInitialMetadata>
+ start_ops_;
+ grpc::internal::CallbackWithSuccessTag start_tag_;
+ const bool start_corked_;
+ bool corked_write_needed_; // no lock needed since only accessed in
+ // Write/WritesDone which cannot be concurrent
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
+ grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+ grpc::internal::CallbackWithSuccessTag finish_tag_;
+ ::grpc::Status finish_status_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpClientSendClose>
+ write_ops_;
+ grpc::internal::CallbackWithSuccessTag write_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpClientSendClose>
+ writes_done_ops_;
+ grpc::internal::CallbackWithSuccessTag writes_done_tag_;
+
+ struct StartCallBacklog {
+ bool write_ops = false;
+ bool writes_done_ops = false;
+ };
+ StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */;
+
+ // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish
+ std::atomic<intptr_t> callbacks_outstanding_{3};
+ std::atomic_bool started_{false};
+ grpc::internal::Mutex start_mu_;
+};
+
+template <class Request>
+class ClientCallbackWriterFactory {
+ public:
+ template <class Response>
+ static void Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, Response* response,
+ ClientWriteReactor<Request>* reactor) {
+ grpc::internal::Call call =
+ channel->CreateCall(method, context, channel->CallbackCQ());
+
+ ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientCallbackWriterImpl<Request>)))
+ ClientCallbackWriterImpl<Request>(call, context, response, reactor);
+ }
+};
+
+class ClientCallbackUnaryImpl final : public ClientCallbackUnary {
+ public:
+ // always allocated against a call arena, no memory free required
+ static void operator delete(void* /*ptr*/, std::size_t size) {
+ GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackUnaryImpl));
+ }
+
+ // This operator should never be called as the memory should be freed as part
+ // of the arena destruction. It only exists to provide a matching operator
+ // delete to the operator new so that some compilers will not complain (see
+ // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+ // there are no tests catching the compiler warning.
+ static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); }
+
+ void StartCall() override {
+ // This call initiates two batches, each with a callback
+ // 1. Send initial metadata + write + writes done + recv initial metadata
+ // 2. Read message, recv trailing metadata
+
+ start_tag_.Set(call_.call(),
+ [this](bool ok) {
+ reactor_->OnReadInitialMetadataDone(ok);
+ MaybeFinish();
+ },
+ &start_ops_, /*can_inline=*/false);
+ start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ start_ops_.RecvInitialMetadata(context_);
+ start_ops_.set_core_cq_tag(&start_tag_);
+ call_.PerformOps(&start_ops_);
+
+ finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
+ &finish_ops_,
+ /*can_inline=*/false);
+ finish_ops_.ClientRecvStatus(context_, &finish_status_);
+ finish_ops_.set_core_cq_tag(&finish_tag_);
+ call_.PerformOps(&finish_ops_);
+ }
+
+ private:
+ friend class ClientCallbackUnaryFactory;
+
+ template <class Request, class Response>
+ ClientCallbackUnaryImpl(::grpc::internal::Call call,
+ ::grpc::ClientContext* context, Request* request,
+ Response* response, ClientUnaryReactor* reactor)
+ : context_(context), call_(call), reactor_(reactor) {
+ this->BindReactor(reactor);
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok());
+ start_ops_.ClientSendClose();
+ finish_ops_.RecvMessage(response);
+ finish_ops_.AllowNoMessage();
+ }
+
+ // In the unary case, MaybeFinish is only ever invoked from a
+ // library-initiated reaction, so it will just directly call OnDone if this is
+ // the last reaction for this RPC.
+ void MaybeFinish() {
+ if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1)) {
+ ::grpc::Status s = std::move(finish_status_);
+ auto* reactor = reactor_;
+ auto* call = call_.call();
+ this->~ClientCallbackUnaryImpl();
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ reactor->OnDone(s);
+ }
+ }
+
+ ::grpc::ClientContext* const context_;
+ grpc::internal::Call call_;
+ ClientUnaryReactor* const reactor_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
+ grpc::internal::CallOpSendMessage,
+ grpc::internal::CallOpClientSendClose,
+ grpc::internal::CallOpRecvInitialMetadata>
+ start_ops_;
+ grpc::internal::CallbackWithSuccessTag start_tag_;
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage,
+ grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+ grpc::internal::CallbackWithSuccessTag finish_tag_;
+ ::grpc::Status finish_status_;
+
+ // This call will have 2 callbacks: start and finish
+ std::atomic<intptr_t> callbacks_outstanding_{2};
+};
+
+class ClientCallbackUnaryFactory {
+ public:
+ template <class Request, class Response>
+ static void Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, const Request* request,
+ Response* response, ClientUnaryReactor* reactor) {
+ grpc::internal::Call call =
+ channel->CreateCall(method, context, channel->CallbackCQ());
+
+ ::grpc::g_core_codegen_interface->grpc_call_ref(call.call());
+
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call.call(), sizeof(ClientCallbackUnaryImpl)))
+ ClientCallbackUnaryImpl(call, context, request, response, reactor);
+ }
+};
+
+} // namespace internal
+
+// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
namespace experimental {
template <class Response>
-using ClientCallbackReader = ::grpc::ClientCallbackReader<Response>;
+using ClientCallbackReader = ::grpc::ClientCallbackReader<Response>;
template <class Request>
-using ClientCallbackWriter = ::grpc::ClientCallbackWriter<Request>;
+using ClientCallbackWriter = ::grpc::ClientCallbackWriter<Request>;
template <class Request, class Response>
using ClientCallbackReaderWriter =
- ::grpc::ClientCallbackReaderWriter<Request, Response>;
+ ::grpc::ClientCallbackReaderWriter<Request, Response>;
template <class Response>
-using ClientReadReactor = ::grpc::ClientReadReactor<Response>;
+using ClientReadReactor = ::grpc::ClientReadReactor<Response>;
template <class Request>
-using ClientWriteReactor = ::grpc::ClientWriteReactor<Request>;
+using ClientWriteReactor = ::grpc::ClientWriteReactor<Request>;
template <class Request, class Response>
-using ClientBidiReactor = ::grpc::ClientBidiReactor<Request, Response>;
-
-typedef ::grpc::ClientUnaryReactor ClientUnaryReactor;
+using ClientBidiReactor = ::grpc::ClientBidiReactor<Request, Response>;
+typedef ::grpc::ClientUnaryReactor ClientUnaryReactor;
+
} // namespace experimental
-
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h
index a4e58f34c5..85be2853ef 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,509 +16,509 @@
*
*/
-/// A ClientContext allows the person implementing a service client to:
-///
-/// - Add custom metadata key-value pairs that will propagated to the server
-/// side.
-/// - Control call settings such as compression and authentication.
-/// - Initial and trailing metadata coming from the server.
-/// - Get performance metrics (ie, census).
-///
-/// Context settings are only relevant to the call they are invoked with, that
-/// is to say, they aren't sticky. Some of these settings, such as the
-/// compression options, can be made persistent at channel construction time
-/// (see \a grpc::CreateCustomChannel).
-///
-/// \warning ClientContext instances should \em not be reused across rpcs.
-
+/// A ClientContext allows the person implementing a service client to:
+///
+/// - Add custom metadata key-value pairs that will propagated to the server
+/// side.
+/// - Control call settings such as compression and authentication.
+/// - Initial and trailing metadata coming from the server.
+/// - Get performance metrics (ie, census).
+///
+/// Context settings are only relevant to the call they are invoked with, that
+/// is to say, they aren't sticky. Some of these settings, such as the
+/// compression options, can be made persistent at channel construction time
+/// (see \a grpc::CreateCustomChannel).
+///
+/// \warning ClientContext instances should \em not be reused across rpcs.
+
#ifndef GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H
#define GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H
-#include <map>
-#include <memory>
-#include <util/generic/string.h>
-
-#include <grpc/impl/codegen/compression_types.h>
-#include <grpc/impl/codegen/propagation_bits.h>
-#include <grpcpp/impl/codegen/client_interceptor.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/create_auth_context.h>
-#include <grpcpp/impl/codegen/metadata_map.h>
-#include <grpcpp/impl/codegen/rpc_method.h>
-#include <grpcpp/impl/codegen/security/auth_context.h>
-#include <grpcpp/impl/codegen/slice.h>
-#include <grpcpp/impl/codegen/status.h>
-#include <grpcpp/impl/codegen/string_ref.h>
-#include <grpcpp/impl/codegen/sync.h>
-#include <grpcpp/impl/codegen/time.h>
-
-struct census_context;
-struct grpc_call;
-
+#include <map>
+#include <memory>
+#include <util/generic/string.h>
+
+#include <grpc/impl/codegen/compression_types.h>
+#include <grpc/impl/codegen/propagation_bits.h>
+#include <grpcpp/impl/codegen/client_interceptor.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/create_auth_context.h>
+#include <grpcpp/impl/codegen/metadata_map.h>
+#include <grpcpp/impl/codegen/rpc_method.h>
+#include <grpcpp/impl/codegen/security/auth_context.h>
+#include <grpcpp/impl/codegen/slice.h>
+#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/string_ref.h>
+#include <grpcpp/impl/codegen/sync.h>
+#include <grpcpp/impl/codegen/time.h>
+
+struct census_context;
+struct grpc_call;
+
namespace grpc {
-class ServerContext;
-class ServerContextBase;
-class CallbackServerContext;
-
-namespace internal {
-template <class InputMessage, class OutputMessage>
-class CallbackUnaryCallImpl;
-template <class Request, class Response>
-class ClientCallbackReaderWriterImpl;
-template <class Response>
-class ClientCallbackReaderImpl;
-template <class Request>
-class ClientCallbackWriterImpl;
-class ClientCallbackUnaryImpl;
-class ClientContextAccessor;
-} // namespace internal
-
-template <class R>
-class ClientReader;
-template <class W>
-class ClientWriter;
-template <class W, class R>
-class ClientReaderWriter;
-template <class R>
-class ClientAsyncReader;
-template <class W>
-class ClientAsyncWriter;
-template <class W, class R>
-class ClientAsyncReaderWriter;
-template <class R>
-class ClientAsyncResponseReader;
-
-namespace testing {
-class InteropClientContextInspector;
-} // namespace testing
-
-namespace internal {
-class RpcMethod;
-template <class InputMessage, class OutputMessage>
-class BlockingUnaryCallImpl;
-class CallOpClientRecvStatus;
-class CallOpRecvInitialMetadata;
-class ServerContextImpl;
-template <class InputMessage, class OutputMessage>
-class CallbackUnaryCallImpl;
-template <class Request, class Response>
-class ClientCallbackReaderWriterImpl;
-template <class Response>
-class ClientCallbackReaderImpl;
-template <class Request>
-class ClientCallbackWriterImpl;
-class ClientCallbackUnaryImpl;
-class ClientContextAccessor;
-} // namespace internal
-
-class CallCredentials;
-class Channel;
-class ChannelInterface;
-class CompletionQueue;
-
-/// Options for \a ClientContext::FromServerContext specifying which traits from
-/// the \a ServerContext to propagate (copy) from it into a new \a
-/// ClientContext.
-///
-/// \see ClientContext::FromServerContext
-class PropagationOptions {
- public:
- PropagationOptions() : propagate_(GRPC_PROPAGATE_DEFAULTS) {}
-
- PropagationOptions& enable_deadline_propagation() {
- propagate_ |= GRPC_PROPAGATE_DEADLINE;
- return *this;
- }
-
- PropagationOptions& disable_deadline_propagation() {
- propagate_ &= ~GRPC_PROPAGATE_DEADLINE;
- return *this;
- }
-
- PropagationOptions& enable_census_stats_propagation() {
- propagate_ |= GRPC_PROPAGATE_CENSUS_STATS_CONTEXT;
- return *this;
- }
-
- PropagationOptions& disable_census_stats_propagation() {
- propagate_ &= ~GRPC_PROPAGATE_CENSUS_STATS_CONTEXT;
- return *this;
- }
-
- PropagationOptions& enable_census_tracing_propagation() {
- propagate_ |= GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT;
- return *this;
- }
-
- PropagationOptions& disable_census_tracing_propagation() {
- propagate_ &= ~GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT;
- return *this;
- }
-
- PropagationOptions& enable_cancellation_propagation() {
- propagate_ |= GRPC_PROPAGATE_CANCELLATION;
- return *this;
- }
-
- PropagationOptions& disable_cancellation_propagation() {
- propagate_ &= ~GRPC_PROPAGATE_CANCELLATION;
- return *this;
- }
-
- uint32_t c_bitmask() const { return propagate_; }
-
- private:
- uint32_t propagate_;
-};
-
-/// A ClientContext allows the person implementing a service client to:
-///
-/// - Add custom metadata key-value pairs that will propagated to the server
-/// side.
-/// - Control call settings such as compression and authentication.
-/// - Initial and trailing metadata coming from the server.
-/// - Get performance metrics (ie, census).
-///
-/// Context settings are only relevant to the call they are invoked with, that
-/// is to say, they aren't sticky. Some of these settings, such as the
-/// compression options, can be made persistent at channel construction time
-/// (see \a grpc::CreateCustomChannel).
-///
-/// \warning ClientContext instances should \em not be reused across rpcs.
-/// \warning The ClientContext instance used for creating an rpc must remain
-/// alive and valid for the lifetime of the rpc.
-class ClientContext {
- public:
- ClientContext();
- ~ClientContext();
-
- /// Create a new \a ClientContext as a child of an incoming server call,
- /// according to \a options (\see PropagationOptions).
- ///
- /// \param server_context The source server context to use as the basis for
- /// constructing the client context.
- /// \param options The options controlling what to copy from the \a
- /// server_context.
- ///
- /// \return A newly constructed \a ClientContext instance based on \a
- /// server_context, with traits propagated (copied) according to \a options.
- static std::unique_ptr<ClientContext> FromServerContext(
- const grpc::ServerContext& server_context,
- PropagationOptions options = PropagationOptions());
- static std::unique_ptr<ClientContext> FromCallbackServerContext(
- const grpc::CallbackServerContext& server_context,
- PropagationOptions options = PropagationOptions());
-
- /// Add the (\a meta_key, \a meta_value) pair to the metadata associated with
- /// a client call. These are made available at the server side by the \a
- /// grpc::ServerContext::client_metadata() method.
- ///
- /// \warning This method should only be called before invoking the rpc.
- ///
- /// \param meta_key The metadata key. If \a meta_value is binary data, it must
- /// end in "-bin".
- /// \param meta_value The metadata value. If its value is binary, the key name
- /// must end in "-bin".
- ///
- /// Metadata must conform to the following format:
- /// Custom-Metadata -> Binary-Header / ASCII-Header
- /// Binary-Header -> {Header-Name "-bin" } {binary value}
- /// ASCII-Header -> Header-Name ASCII-Value
- /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
- /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
- void AddMetadata(const TString& meta_key, const TString& meta_value);
-
- /// Return a collection of initial metadata key-value pairs. Note that keys
- /// may happen more than once (ie, a \a std::multimap is returned).
- ///
- /// \warning This method should only be called after initial metadata has been
- /// received. For streaming calls, see \a
- /// ClientReaderInterface::WaitForInitialMetadata().
- ///
- /// \return A multimap of initial metadata key-value pairs from the server.
- const std::multimap<grpc::string_ref, grpc::string_ref>&
- GetServerInitialMetadata() const {
- GPR_CODEGEN_ASSERT(initial_metadata_received_);
- return *recv_initial_metadata_.map();
- }
-
- /// Return a collection of trailing metadata key-value pairs. Note that keys
- /// may happen more than once (ie, a \a std::multimap is returned).
- ///
- /// \warning This method is only callable once the stream has finished.
- ///
- /// \return A multimap of metadata trailing key-value pairs from the server.
- const std::multimap<grpc::string_ref, grpc::string_ref>&
- GetServerTrailingMetadata() const {
- // TODO(yangg) check finished
- return *trailing_metadata_.map();
- }
-
- /// Set the deadline for the client call.
- ///
- /// \warning This method should only be called before invoking the rpc.
- ///
- /// \param deadline the deadline for the client call. Units are determined by
- /// the type used. The deadline is an absolute (not relative) time.
- template <typename T>
- void set_deadline(const T& deadline) {
- grpc::TimePoint<T> deadline_tp(deadline);
- deadline_ = deadline_tp.raw_time();
- }
-
- /// EXPERIMENTAL: Indicate that this request is idempotent.
- /// By default, RPCs are assumed to <i>not</i> be idempotent.
- ///
- /// If true, the gRPC library assumes that it's safe to initiate
- /// this RPC multiple times.
- void set_idempotent(bool idempotent) { idempotent_ = idempotent; }
-
- /// EXPERIMENTAL: Set this request to be cacheable.
- /// If set, grpc is free to use the HTTP GET verb for sending the request,
- /// with the possibility of receiving a cached response.
- void set_cacheable(bool cacheable) { cacheable_ = cacheable; }
-
- /// EXPERIMENTAL: Trigger wait-for-ready or not on this request.
- /// See https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
- /// If set, if an RPC is made when a channel's connectivity state is
- /// TRANSIENT_FAILURE or CONNECTING, the call will not "fail fast",
- /// and the channel will wait until the channel is READY before making the
- /// call.
- void set_wait_for_ready(bool wait_for_ready) {
- wait_for_ready_ = wait_for_ready;
- wait_for_ready_explicitly_set_ = true;
- }
-
- /// DEPRECATED: Use set_wait_for_ready() instead.
- void set_fail_fast(bool fail_fast) { set_wait_for_ready(!fail_fast); }
-
- /// Return the deadline for the client call.
- std::chrono::system_clock::time_point deadline() const {
- return grpc::Timespec2Timepoint(deadline_);
- }
-
- /// Return a \a gpr_timespec representation of the client call's deadline.
- gpr_timespec raw_deadline() const { return deadline_; }
-
- /// Set the per call authority header (see
- /// https://tools.ietf.org/html/rfc7540#section-8.1.2.3).
- void set_authority(const TString& authority) { authority_ = authority; }
-
- /// Return the authentication context for the associated client call.
- /// It is only valid to call this during the lifetime of the client call.
- ///
- /// \see grpc::AuthContext.
- std::shared_ptr<const grpc::AuthContext> auth_context() const {
- if (auth_context_.get() == nullptr) {
- auth_context_ = grpc::CreateAuthContext(call_);
- }
- return auth_context_;
- }
-
- /// Set credentials for the client call.
- ///
- /// A credentials object encapsulates all the state needed by a client to
- /// authenticate with a server and make various assertions, e.g., about the
- /// client’s identity, role, or whether it is authorized to make a particular
- /// call.
- ///
- /// It is legal to call this only before initial metadata is sent.
- ///
- /// \see https://grpc.io/docs/guides/auth.html
- void set_credentials(const std::shared_ptr<grpc::CallCredentials>& creds);
-
- /// EXPERIMENTAL debugging API
- ///
- /// Returns the credentials for the client call. This should be used only in
- /// tests and for diagnostic purposes, and should not be used by application
- /// logic.
- std::shared_ptr<grpc::CallCredentials> credentials() { return creds_; }
-
- /// Return the compression algorithm the client call will request be used.
- /// Note that the gRPC runtime may decide to ignore this request, for example,
- /// due to resource constraints.
- grpc_compression_algorithm compression_algorithm() const {
- return compression_algorithm_;
- }
-
- /// Set \a algorithm to be the compression algorithm used for the client call.
- ///
- /// \param algorithm The compression algorithm used for the client call.
- void set_compression_algorithm(grpc_compression_algorithm algorithm);
-
- /// Flag whether the initial metadata should be \a corked
- ///
- /// If \a corked is true, then the initial metadata will be coalesced with the
- /// write of first message in the stream. As a result, any tag set for the
- /// initial metadata operation (starting a client-streaming or bidi-streaming
- /// RPC) will not actually be sent to the completion queue or delivered
- /// via Next.
- ///
- /// \param corked The flag indicating whether the initial metadata is to be
- /// corked or not.
- void set_initial_metadata_corked(bool corked) {
- initial_metadata_corked_ = corked;
- }
-
- /// Return the peer uri in a string.
- /// It is only valid to call this during the lifetime of the client call.
- ///
- /// \warning This value is never authenticated or subject to any security
- /// related code. It must not be used for any authentication related
- /// functionality. Instead, use auth_context.
- ///
- /// \return The call's peer URI.
- TString peer() const;
-
- /// Sets the census context.
- /// It is only valid to call this before the client call is created. A common
- /// place of setting census context is from within the DefaultConstructor
- /// method of GlobalCallbacks.
- void set_census_context(struct census_context* ccp) { census_context_ = ccp; }
-
- /// Returns the census context that has been set, or nullptr if not set.
- struct census_context* census_context() const {
- return census_context_;
- }
-
- /// Send a best-effort out-of-band cancel on the call associated with
- /// this client context. The call could be in any stage; e.g., if it is
- /// already finished, it may still return success.
- ///
- /// There is no guarantee the call will be cancelled.
- ///
- /// Note that TryCancel() does not change any of the tags that are pending
- /// on the completion queue. All pending tags will still be delivered
- /// (though their ok result may reflect the effect of cancellation).
- void TryCancel();
-
- /// Global Callbacks
- ///
- /// Can be set exactly once per application to install hooks whenever
- /// a client context is constructed and destructed.
- class GlobalCallbacks {
- public:
- virtual ~GlobalCallbacks() {}
- virtual void DefaultConstructor(ClientContext* context) = 0;
- virtual void Destructor(ClientContext* context) = 0;
- };
- static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
-
- /// Should be used for framework-level extensions only.
- /// Applications never need to call this method.
- grpc_call* c_call() { return call_; }
-
- /// EXPERIMENTAL debugging API
- ///
- /// if status is not ok() for an RPC, this will return a detailed string
- /// of the gRPC Core error that led to the failure. It should not be relied
- /// upon for anything other than gaining more debug data in failure cases.
- TString debug_error_string() const { return debug_error_string_; }
-
- private:
- // Disallow copy and assign.
- ClientContext(const ClientContext&);
- ClientContext& operator=(const ClientContext&);
-
- friend class ::grpc::testing::InteropClientContextInspector;
- friend class ::grpc::internal::CallOpClientRecvStatus;
- friend class ::grpc::internal::CallOpRecvInitialMetadata;
- friend class ::grpc::Channel;
- template <class R>
- friend class ::grpc::ClientReader;
- template <class W>
- friend class ::grpc::ClientWriter;
- template <class W, class R>
- friend class ::grpc::ClientReaderWriter;
- template <class R>
- friend class ::grpc::ClientAsyncReader;
- template <class W>
- friend class ::grpc::ClientAsyncWriter;
- template <class W, class R>
- friend class ::grpc::ClientAsyncReaderWriter;
- template <class R>
- friend class ::grpc::ClientAsyncResponseReader;
- template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::BlockingUnaryCallImpl;
- template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::CallbackUnaryCallImpl;
- template <class Request, class Response>
- friend class ::grpc::internal::ClientCallbackReaderWriterImpl;
- template <class Response>
- friend class ::grpc::internal::ClientCallbackReaderImpl;
- template <class Request>
- friend class ::grpc::internal::ClientCallbackWriterImpl;
- friend class ::grpc::internal::ClientCallbackUnaryImpl;
- friend class ::grpc::internal::ClientContextAccessor;
-
- // Used by friend class CallOpClientRecvStatus
- void set_debug_error_string(const TString& debug_error_string) {
- debug_error_string_ = debug_error_string;
- }
-
- grpc_call* call() const { return call_; }
- void set_call(grpc_call* call,
- const std::shared_ptr<::grpc::Channel>& channel);
-
- grpc::experimental::ClientRpcInfo* set_client_rpc_info(
- const char* method, grpc::internal::RpcMethod::RpcType type,
- grpc::ChannelInterface* channel,
- const std::vector<std::unique_ptr<
- grpc::experimental::ClientInterceptorFactoryInterface>>& creators,
- size_t interceptor_pos) {
- rpc_info_ = grpc::experimental::ClientRpcInfo(this, type, method, channel);
- rpc_info_.RegisterInterceptors(creators, interceptor_pos);
- return &rpc_info_;
- }
-
- uint32_t initial_metadata_flags() const {
- return (idempotent_ ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST : 0) |
- (wait_for_ready_ ? GRPC_INITIAL_METADATA_WAIT_FOR_READY : 0) |
- (cacheable_ ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST : 0) |
- (wait_for_ready_explicitly_set_
- ? GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
- : 0) |
- (initial_metadata_corked_ ? GRPC_INITIAL_METADATA_CORKED : 0);
- }
-
- TString authority() { return authority_; }
-
- void SendCancelToInterceptors();
-
- static std::unique_ptr<ClientContext> FromInternalServerContext(
- const grpc::ServerContextBase& server_context,
- PropagationOptions options);
-
- bool initial_metadata_received_;
- bool wait_for_ready_;
- bool wait_for_ready_explicitly_set_;
- bool idempotent_;
- bool cacheable_;
- std::shared_ptr<::grpc::Channel> channel_;
- grpc::internal::Mutex mu_;
- grpc_call* call_;
- bool call_canceled_;
- gpr_timespec deadline_;
- grpc::string authority_;
- std::shared_ptr<grpc::CallCredentials> creds_;
- mutable std::shared_ptr<const grpc::AuthContext> auth_context_;
- struct census_context* census_context_;
- std::multimap<TString, TString> send_initial_metadata_;
- mutable grpc::internal::MetadataMap recv_initial_metadata_;
- mutable grpc::internal::MetadataMap trailing_metadata_;
-
- grpc_call* propagate_from_call_;
- PropagationOptions propagation_options_;
-
- grpc_compression_algorithm compression_algorithm_;
- bool initial_metadata_corked_;
-
- TString debug_error_string_;
-
- grpc::experimental::ClientRpcInfo rpc_info_;
-};
-
+class ServerContext;
+class ServerContextBase;
+class CallbackServerContext;
+
+namespace internal {
+template <class InputMessage, class OutputMessage>
+class CallbackUnaryCallImpl;
+template <class Request, class Response>
+class ClientCallbackReaderWriterImpl;
+template <class Response>
+class ClientCallbackReaderImpl;
+template <class Request>
+class ClientCallbackWriterImpl;
+class ClientCallbackUnaryImpl;
+class ClientContextAccessor;
+} // namespace internal
+
+template <class R>
+class ClientReader;
+template <class W>
+class ClientWriter;
+template <class W, class R>
+class ClientReaderWriter;
+template <class R>
+class ClientAsyncReader;
+template <class W>
+class ClientAsyncWriter;
+template <class W, class R>
+class ClientAsyncReaderWriter;
+template <class R>
+class ClientAsyncResponseReader;
+
+namespace testing {
+class InteropClientContextInspector;
+} // namespace testing
+
+namespace internal {
+class RpcMethod;
+template <class InputMessage, class OutputMessage>
+class BlockingUnaryCallImpl;
+class CallOpClientRecvStatus;
+class CallOpRecvInitialMetadata;
+class ServerContextImpl;
+template <class InputMessage, class OutputMessage>
+class CallbackUnaryCallImpl;
+template <class Request, class Response>
+class ClientCallbackReaderWriterImpl;
+template <class Response>
+class ClientCallbackReaderImpl;
+template <class Request>
+class ClientCallbackWriterImpl;
+class ClientCallbackUnaryImpl;
+class ClientContextAccessor;
+} // namespace internal
+
+class CallCredentials;
+class Channel;
+class ChannelInterface;
+class CompletionQueue;
+
+/// Options for \a ClientContext::FromServerContext specifying which traits from
+/// the \a ServerContext to propagate (copy) from it into a new \a
+/// ClientContext.
+///
+/// \see ClientContext::FromServerContext
+class PropagationOptions {
+ public:
+ PropagationOptions() : propagate_(GRPC_PROPAGATE_DEFAULTS) {}
+
+ PropagationOptions& enable_deadline_propagation() {
+ propagate_ |= GRPC_PROPAGATE_DEADLINE;
+ return *this;
+ }
+
+ PropagationOptions& disable_deadline_propagation() {
+ propagate_ &= ~GRPC_PROPAGATE_DEADLINE;
+ return *this;
+ }
+
+ PropagationOptions& enable_census_stats_propagation() {
+ propagate_ |= GRPC_PROPAGATE_CENSUS_STATS_CONTEXT;
+ return *this;
+ }
+
+ PropagationOptions& disable_census_stats_propagation() {
+ propagate_ &= ~GRPC_PROPAGATE_CENSUS_STATS_CONTEXT;
+ return *this;
+ }
+
+ PropagationOptions& enable_census_tracing_propagation() {
+ propagate_ |= GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT;
+ return *this;
+ }
+
+ PropagationOptions& disable_census_tracing_propagation() {
+ propagate_ &= ~GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT;
+ return *this;
+ }
+
+ PropagationOptions& enable_cancellation_propagation() {
+ propagate_ |= GRPC_PROPAGATE_CANCELLATION;
+ return *this;
+ }
+
+ PropagationOptions& disable_cancellation_propagation() {
+ propagate_ &= ~GRPC_PROPAGATE_CANCELLATION;
+ return *this;
+ }
+
+ uint32_t c_bitmask() const { return propagate_; }
+
+ private:
+ uint32_t propagate_;
+};
+
+/// A ClientContext allows the person implementing a service client to:
+///
+/// - Add custom metadata key-value pairs that will propagated to the server
+/// side.
+/// - Control call settings such as compression and authentication.
+/// - Initial and trailing metadata coming from the server.
+/// - Get performance metrics (ie, census).
+///
+/// Context settings are only relevant to the call they are invoked with, that
+/// is to say, they aren't sticky. Some of these settings, such as the
+/// compression options, can be made persistent at channel construction time
+/// (see \a grpc::CreateCustomChannel).
+///
+/// \warning ClientContext instances should \em not be reused across rpcs.
+/// \warning The ClientContext instance used for creating an rpc must remain
+/// alive and valid for the lifetime of the rpc.
+class ClientContext {
+ public:
+ ClientContext();
+ ~ClientContext();
+
+ /// Create a new \a ClientContext as a child of an incoming server call,
+ /// according to \a options (\see PropagationOptions).
+ ///
+ /// \param server_context The source server context to use as the basis for
+ /// constructing the client context.
+ /// \param options The options controlling what to copy from the \a
+ /// server_context.
+ ///
+ /// \return A newly constructed \a ClientContext instance based on \a
+ /// server_context, with traits propagated (copied) according to \a options.
+ static std::unique_ptr<ClientContext> FromServerContext(
+ const grpc::ServerContext& server_context,
+ PropagationOptions options = PropagationOptions());
+ static std::unique_ptr<ClientContext> FromCallbackServerContext(
+ const grpc::CallbackServerContext& server_context,
+ PropagationOptions options = PropagationOptions());
+
+ /// Add the (\a meta_key, \a meta_value) pair to the metadata associated with
+ /// a client call. These are made available at the server side by the \a
+ /// grpc::ServerContext::client_metadata() method.
+ ///
+ /// \warning This method should only be called before invoking the rpc.
+ ///
+ /// \param meta_key The metadata key. If \a meta_value is binary data, it must
+ /// end in "-bin".
+ /// \param meta_value The metadata value. If its value is binary, the key name
+ /// must end in "-bin".
+ ///
+ /// Metadata must conform to the following format:
+ /// Custom-Metadata -> Binary-Header / ASCII-Header
+ /// Binary-Header -> {Header-Name "-bin" } {binary value}
+ /// ASCII-Header -> Header-Name ASCII-Value
+ /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
+ /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
+ void AddMetadata(const TString& meta_key, const TString& meta_value);
+
+ /// Return a collection of initial metadata key-value pairs. Note that keys
+ /// may happen more than once (ie, a \a std::multimap is returned).
+ ///
+ /// \warning This method should only be called after initial metadata has been
+ /// received. For streaming calls, see \a
+ /// ClientReaderInterface::WaitForInitialMetadata().
+ ///
+ /// \return A multimap of initial metadata key-value pairs from the server.
+ const std::multimap<grpc::string_ref, grpc::string_ref>&
+ GetServerInitialMetadata() const {
+ GPR_CODEGEN_ASSERT(initial_metadata_received_);
+ return *recv_initial_metadata_.map();
+ }
+
+ /// Return a collection of trailing metadata key-value pairs. Note that keys
+ /// may happen more than once (ie, a \a std::multimap is returned).
+ ///
+ /// \warning This method is only callable once the stream has finished.
+ ///
+ /// \return A multimap of metadata trailing key-value pairs from the server.
+ const std::multimap<grpc::string_ref, grpc::string_ref>&
+ GetServerTrailingMetadata() const {
+ // TODO(yangg) check finished
+ return *trailing_metadata_.map();
+ }
+
+ /// Set the deadline for the client call.
+ ///
+ /// \warning This method should only be called before invoking the rpc.
+ ///
+ /// \param deadline the deadline for the client call. Units are determined by
+ /// the type used. The deadline is an absolute (not relative) time.
+ template <typename T>
+ void set_deadline(const T& deadline) {
+ grpc::TimePoint<T> deadline_tp(deadline);
+ deadline_ = deadline_tp.raw_time();
+ }
+
+ /// EXPERIMENTAL: Indicate that this request is idempotent.
+ /// By default, RPCs are assumed to <i>not</i> be idempotent.
+ ///
+ /// If true, the gRPC library assumes that it's safe to initiate
+ /// this RPC multiple times.
+ void set_idempotent(bool idempotent) { idempotent_ = idempotent; }
+
+ /// EXPERIMENTAL: Set this request to be cacheable.
+ /// If set, grpc is free to use the HTTP GET verb for sending the request,
+ /// with the possibility of receiving a cached response.
+ void set_cacheable(bool cacheable) { cacheable_ = cacheable; }
+
+ /// EXPERIMENTAL: Trigger wait-for-ready or not on this request.
+ /// See https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+ /// If set, if an RPC is made when a channel's connectivity state is
+ /// TRANSIENT_FAILURE or CONNECTING, the call will not "fail fast",
+ /// and the channel will wait until the channel is READY before making the
+ /// call.
+ void set_wait_for_ready(bool wait_for_ready) {
+ wait_for_ready_ = wait_for_ready;
+ wait_for_ready_explicitly_set_ = true;
+ }
+
+ /// DEPRECATED: Use set_wait_for_ready() instead.
+ void set_fail_fast(bool fail_fast) { set_wait_for_ready(!fail_fast); }
+
+ /// Return the deadline for the client call.
+ std::chrono::system_clock::time_point deadline() const {
+ return grpc::Timespec2Timepoint(deadline_);
+ }
+
+ /// Return a \a gpr_timespec representation of the client call's deadline.
+ gpr_timespec raw_deadline() const { return deadline_; }
+
+ /// Set the per call authority header (see
+ /// https://tools.ietf.org/html/rfc7540#section-8.1.2.3).
+ void set_authority(const TString& authority) { authority_ = authority; }
+
+ /// Return the authentication context for the associated client call.
+ /// It is only valid to call this during the lifetime of the client call.
+ ///
+ /// \see grpc::AuthContext.
+ std::shared_ptr<const grpc::AuthContext> auth_context() const {
+ if (auth_context_.get() == nullptr) {
+ auth_context_ = grpc::CreateAuthContext(call_);
+ }
+ return auth_context_;
+ }
+
+ /// Set credentials for the client call.
+ ///
+ /// A credentials object encapsulates all the state needed by a client to
+ /// authenticate with a server and make various assertions, e.g., about the
+ /// client’s identity, role, or whether it is authorized to make a particular
+ /// call.
+ ///
+ /// It is legal to call this only before initial metadata is sent.
+ ///
+ /// \see https://grpc.io/docs/guides/auth.html
+ void set_credentials(const std::shared_ptr<grpc::CallCredentials>& creds);
+
+ /// EXPERIMENTAL debugging API
+ ///
+ /// Returns the credentials for the client call. This should be used only in
+ /// tests and for diagnostic purposes, and should not be used by application
+ /// logic.
+ std::shared_ptr<grpc::CallCredentials> credentials() { return creds_; }
+
+ /// Return the compression algorithm the client call will request be used.
+ /// Note that the gRPC runtime may decide to ignore this request, for example,
+ /// due to resource constraints.
+ grpc_compression_algorithm compression_algorithm() const {
+ return compression_algorithm_;
+ }
+
+ /// Set \a algorithm to be the compression algorithm used for the client call.
+ ///
+ /// \param algorithm The compression algorithm used for the client call.
+ void set_compression_algorithm(grpc_compression_algorithm algorithm);
+
+ /// Flag whether the initial metadata should be \a corked
+ ///
+ /// If \a corked is true, then the initial metadata will be coalesced with the
+ /// write of first message in the stream. As a result, any tag set for the
+ /// initial metadata operation (starting a client-streaming or bidi-streaming
+ /// RPC) will not actually be sent to the completion queue or delivered
+ /// via Next.
+ ///
+ /// \param corked The flag indicating whether the initial metadata is to be
+ /// corked or not.
+ void set_initial_metadata_corked(bool corked) {
+ initial_metadata_corked_ = corked;
+ }
+
+ /// Return the peer uri in a string.
+ /// It is only valid to call this during the lifetime of the client call.
+ ///
+ /// \warning This value is never authenticated or subject to any security
+ /// related code. It must not be used for any authentication related
+ /// functionality. Instead, use auth_context.
+ ///
+ /// \return The call's peer URI.
+ TString peer() const;
+
+ /// Sets the census context.
+ /// It is only valid to call this before the client call is created. A common
+ /// place of setting census context is from within the DefaultConstructor
+ /// method of GlobalCallbacks.
+ void set_census_context(struct census_context* ccp) { census_context_ = ccp; }
+
+ /// Returns the census context that has been set, or nullptr if not set.
+ struct census_context* census_context() const {
+ return census_context_;
+ }
+
+ /// Send a best-effort out-of-band cancel on the call associated with
+ /// this client context. The call could be in any stage; e.g., if it is
+ /// already finished, it may still return success.
+ ///
+ /// There is no guarantee the call will be cancelled.
+ ///
+ /// Note that TryCancel() does not change any of the tags that are pending
+ /// on the completion queue. All pending tags will still be delivered
+ /// (though their ok result may reflect the effect of cancellation).
+ void TryCancel();
+
+ /// Global Callbacks
+ ///
+ /// Can be set exactly once per application to install hooks whenever
+ /// a client context is constructed and destructed.
+ class GlobalCallbacks {
+ public:
+ virtual ~GlobalCallbacks() {}
+ virtual void DefaultConstructor(ClientContext* context) = 0;
+ virtual void Destructor(ClientContext* context) = 0;
+ };
+ static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
+
+ /// Should be used for framework-level extensions only.
+ /// Applications never need to call this method.
+ grpc_call* c_call() { return call_; }
+
+ /// EXPERIMENTAL debugging API
+ ///
+ /// if status is not ok() for an RPC, this will return a detailed string
+ /// of the gRPC Core error that led to the failure. It should not be relied
+ /// upon for anything other than gaining more debug data in failure cases.
+ TString debug_error_string() const { return debug_error_string_; }
+
+ private:
+ // Disallow copy and assign.
+ ClientContext(const ClientContext&);
+ ClientContext& operator=(const ClientContext&);
+
+ friend class ::grpc::testing::InteropClientContextInspector;
+ friend class ::grpc::internal::CallOpClientRecvStatus;
+ friend class ::grpc::internal::CallOpRecvInitialMetadata;
+ friend class ::grpc::Channel;
+ template <class R>
+ friend class ::grpc::ClientReader;
+ template <class W>
+ friend class ::grpc::ClientWriter;
+ template <class W, class R>
+ friend class ::grpc::ClientReaderWriter;
+ template <class R>
+ friend class ::grpc::ClientAsyncReader;
+ template <class W>
+ friend class ::grpc::ClientAsyncWriter;
+ template <class W, class R>
+ friend class ::grpc::ClientAsyncReaderWriter;
+ template <class R>
+ friend class ::grpc::ClientAsyncResponseReader;
+ template <class InputMessage, class OutputMessage>
+ friend class ::grpc::internal::BlockingUnaryCallImpl;
+ template <class InputMessage, class OutputMessage>
+ friend class ::grpc::internal::CallbackUnaryCallImpl;
+ template <class Request, class Response>
+ friend class ::grpc::internal::ClientCallbackReaderWriterImpl;
+ template <class Response>
+ friend class ::grpc::internal::ClientCallbackReaderImpl;
+ template <class Request>
+ friend class ::grpc::internal::ClientCallbackWriterImpl;
+ friend class ::grpc::internal::ClientCallbackUnaryImpl;
+ friend class ::grpc::internal::ClientContextAccessor;
+
+ // Used by friend class CallOpClientRecvStatus
+ void set_debug_error_string(const TString& debug_error_string) {
+ debug_error_string_ = debug_error_string;
+ }
+
+ grpc_call* call() const { return call_; }
+ void set_call(grpc_call* call,
+ const std::shared_ptr<::grpc::Channel>& channel);
+
+ grpc::experimental::ClientRpcInfo* set_client_rpc_info(
+ const char* method, grpc::internal::RpcMethod::RpcType type,
+ grpc::ChannelInterface* channel,
+ const std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>& creators,
+ size_t interceptor_pos) {
+ rpc_info_ = grpc::experimental::ClientRpcInfo(this, type, method, channel);
+ rpc_info_.RegisterInterceptors(creators, interceptor_pos);
+ return &rpc_info_;
+ }
+
+ uint32_t initial_metadata_flags() const {
+ return (idempotent_ ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST : 0) |
+ (wait_for_ready_ ? GRPC_INITIAL_METADATA_WAIT_FOR_READY : 0) |
+ (cacheable_ ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST : 0) |
+ (wait_for_ready_explicitly_set_
+ ? GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
+ : 0) |
+ (initial_metadata_corked_ ? GRPC_INITIAL_METADATA_CORKED : 0);
+ }
+
+ TString authority() { return authority_; }
+
+ void SendCancelToInterceptors();
+
+ static std::unique_ptr<ClientContext> FromInternalServerContext(
+ const grpc::ServerContextBase& server_context,
+ PropagationOptions options);
+
+ bool initial_metadata_received_;
+ bool wait_for_ready_;
+ bool wait_for_ready_explicitly_set_;
+ bool idempotent_;
+ bool cacheable_;
+ std::shared_ptr<::grpc::Channel> channel_;
+ grpc::internal::Mutex mu_;
+ grpc_call* call_;
+ bool call_canceled_;
+ gpr_timespec deadline_;
+ grpc::string authority_;
+ std::shared_ptr<grpc::CallCredentials> creds_;
+ mutable std::shared_ptr<const grpc::AuthContext> auth_context_;
+ struct census_context* census_context_;
+ std::multimap<TString, TString> send_initial_metadata_;
+ mutable grpc::internal::MetadataMap recv_initial_metadata_;
+ mutable grpc::internal::MetadataMap trailing_metadata_;
+
+ grpc_call* propagate_from_call_;
+ PropagationOptions propagation_options_;
+
+ grpc_compression_algorithm compression_algorithm_;
+ bool initial_metadata_corked_;
+
+ TString debug_error_string_;
+
+ grpc::experimental::ClientRpcInfo rpc_info_;
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h
index 78be1f7597..359f03560f 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h
@@ -26,7 +26,7 @@
#include <grpcpp/impl/codegen/rpc_method.h>
#include <grpcpp/impl/codegen/string_ref.h>
-namespace grpc {
+namespace grpc {
class Channel;
class ClientContext;
@@ -92,7 +92,7 @@ class ClientRpcInfo {
/// Return a pointer to the underlying ClientContext structure associated
/// with the RPC to support features that apply to it
- grpc::ClientContext* client_context() { return ctx_; }
+ grpc::ClientContext* client_context() { return ctx_; }
/// Return the type of the RPC (unary or a streaming flavor)
Type type() const { return type_; }
@@ -115,8 +115,8 @@ class ClientRpcInfo {
ClientRpcInfo() = default;
// Constructor will only be called from ClientContext
- ClientRpcInfo(grpc::ClientContext* ctx, internal::RpcMethod::RpcType type,
- const char* method, grpc::ChannelInterface* channel)
+ ClientRpcInfo(grpc::ClientContext* ctx, internal::RpcMethod::RpcType type,
+ const char* method, grpc::ChannelInterface* channel)
: ctx_(ctx),
type_(static_cast<Type>(type)),
method_(method),
@@ -158,7 +158,7 @@ class ClientRpcInfo {
}
}
- grpc::ClientContext* ctx_ = nullptr;
+ grpc::ClientContext* ctx_ = nullptr;
// TODO(yashykt): make type_ const once move-assignment is deleted
Type type_{Type::UNKNOWN};
const char* method_ = nullptr;
@@ -168,7 +168,7 @@ class ClientRpcInfo {
size_t hijacked_interceptor_ = 0;
friend class internal::InterceptorBatchMethodsImpl;
- friend class grpc::ClientContext;
+ friend class grpc::ClientContext;
};
// PLEASE DO NOT USE THIS. ALWAYS PREFER PER CHANNEL INTERCEPTORS OVER A GLOBAL
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h
index 098bb50ee2..8b4afe52ca 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h
@@ -25,7 +25,7 @@
#include <grpcpp/impl/codegen/core_codegen_interface.h>
#include <grpcpp/impl/codegen/status.h>
-namespace grpc {
+namespace grpc {
class ClientContext;
namespace internal {
@@ -33,7 +33,7 @@ class RpcMethod;
/// Wrapper that performs a blocking unary call
template <class InputMessage, class OutputMessage>
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
- grpc::ClientContext* context,
+ grpc::ClientContext* context,
const InputMessage& request, OutputMessage* result) {
return BlockingUnaryCallImpl<InputMessage, OutputMessage>(
channel, method, context, request, result)
@@ -44,9 +44,9 @@ template <class InputMessage, class OutputMessage>
class BlockingUnaryCallImpl {
public:
BlockingUnaryCallImpl(ChannelInterface* channel, const RpcMethod& method,
- grpc::ClientContext* context,
+ grpc::ClientContext* context,
const InputMessage& request, OutputMessage* result) {
- ::grpc::CompletionQueue cq(grpc_completion_queue_attributes{
+ ::grpc::CompletionQueue cq(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
nullptr}); // Pluckable completion queue
::grpc::internal::Call call(channel->CreateCall(method, context, &cq));
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h
index ca0c77276a..44f3a938be 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h
@@ -16,433 +16,433 @@
*
*/
-/// A completion queue implements a concurrent producer-consumer queue, with
-/// two main API-exposed methods: \a Next and \a AsyncNext. These
-/// methods are the essential component of the gRPC C++ asynchronous API.
-/// There is also a \a Shutdown method to indicate that a given completion queue
-/// will no longer have regular events. This must be called before the
-/// completion queue is destroyed.
-/// All completion queue APIs are thread-safe and may be used concurrently with
-/// any other completion queue API invocation; it is acceptable to have
-/// multiple threads calling \a Next or \a AsyncNext on the same or different
-/// completion queues, or to call these methods concurrently with a \a Shutdown
-/// elsewhere.
-/// \remark{All other API calls on completion queue should be completed before
-/// a completion queue destructor is called.}
+/// A completion queue implements a concurrent producer-consumer queue, with
+/// two main API-exposed methods: \a Next and \a AsyncNext. These
+/// methods are the essential component of the gRPC C++ asynchronous API.
+/// There is also a \a Shutdown method to indicate that a given completion queue
+/// will no longer have regular events. This must be called before the
+/// completion queue is destroyed.
+/// All completion queue APIs are thread-safe and may be used concurrently with
+/// any other completion queue API invocation; it is acceptable to have
+/// multiple threads calling \a Next or \a AsyncNext on the same or different
+/// completion queues, or to call these methods concurrently with a \a Shutdown
+/// elsewhere.
+/// \remark{All other API calls on completion queue should be completed before
+/// a completion queue destructor is called.}
#ifndef GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H
#define GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H
-#include <list>
-
-#include <grpc/impl/codegen/atm.h>
-#include <grpcpp/impl/codegen/completion_queue_tag.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/grpc_library.h>
-#include <grpcpp/impl/codegen/status.h>
-#include <grpcpp/impl/codegen/sync.h>
-#include <grpcpp/impl/codegen/time.h>
-
-struct grpc_completion_queue;
-
+#include <list>
+
+#include <grpc/impl/codegen/atm.h>
+#include <grpcpp/impl/codegen/completion_queue_tag.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/grpc_library.h>
+#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/sync.h>
+#include <grpcpp/impl/codegen/time.h>
+
+struct grpc_completion_queue;
+
namespace grpc {
-template <class R>
-class ClientReader;
-template <class W>
-class ClientWriter;
-template <class W, class R>
-class ClientReaderWriter;
-template <class R>
-class ServerReader;
-template <class W>
-class ServerWriter;
-namespace internal {
-template <class W, class R>
-class ServerReaderWriterBody;
-
-template <class ServiceType, class RequestType, class ResponseType>
-class RpcMethodHandler;
-template <class ServiceType, class RequestType, class ResponseType>
-class ClientStreamingHandler;
-template <class ServiceType, class RequestType, class ResponseType>
-class ServerStreamingHandler;
-template <class Streamer, bool WriteNeeded>
-class TemplatedBidiStreamingHandler;
-template <::grpc::StatusCode code>
-class ErrorMethodHandler;
-} // namespace internal
-
-class Channel;
-class ChannelInterface;
-class Server;
-class ServerBuilder;
-class ServerContextBase;
-class ServerInterface;
-
-namespace internal {
-class CompletionQueueTag;
-class RpcMethod;
-template <class InputMessage, class OutputMessage>
-class BlockingUnaryCallImpl;
-template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
-class CallOpSet;
-} // namespace internal
-
-extern CoreCodegenInterface* g_core_codegen_interface;
-
-/// A thin wrapper around \ref grpc_completion_queue (see \ref
-/// src/core/lib/surface/completion_queue.h).
-/// See \ref doc/cpp/perf_notes.md for notes on best practices for high
-/// performance servers.
-class CompletionQueue : private ::grpc::GrpcLibraryCodegen {
- public:
- /// Default constructor. Implicitly creates a \a grpc_completion_queue
- /// instance.
- CompletionQueue()
- : CompletionQueue(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING,
- nullptr}) {}
-
- /// Wrap \a take, taking ownership of the instance.
- ///
- /// \param take The completion queue instance to wrap. Ownership is taken.
- explicit CompletionQueue(grpc_completion_queue* take);
-
- /// Destructor. Destroys the owned wrapped completion queue / instance.
- ~CompletionQueue() {
- ::grpc::g_core_codegen_interface->grpc_completion_queue_destroy(cq_);
- }
-
- /// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT.
- enum NextStatus {
- SHUTDOWN, ///< The completion queue has been shutdown and fully-drained
- GOT_EVENT, ///< Got a new event; \a tag will be filled in with its
- ///< associated value; \a ok indicating its success.
- TIMEOUT ///< deadline was reached.
- };
-
- /// Read from the queue, blocking until an event is available or the queue is
- /// shutting down.
- ///
- /// \param tag [out] Updated to point to the read event's tag.
- /// \param ok [out] true if read a successful event, false otherwise.
- ///
- /// Note that each tag sent to the completion queue (through RPC operations
- /// or alarms) will be delivered out of the completion queue by a call to
- /// Next (or a related method), regardless of whether the operation succeeded
- /// or not. Success here means that this operation completed in the normal
- /// valid manner.
- ///
- /// Server-side RPC request: \a ok indicates that the RPC has indeed
- /// been started. If it is false, the server has been Shutdown
- /// before this particular call got matched to an incoming RPC.
- ///
- /// Client-side StartCall/RPC invocation: \a ok indicates that the RPC is
- /// going to go to the wire. If it is false, it not going to the wire. This
- /// would happen if the channel is either permanently broken or
- /// transiently broken but with the fail-fast option. (Note that async unary
- /// RPCs don't post a CQ tag at this point, nor do client-streaming
- /// or bidi-streaming RPCs that have the initial metadata corked option set.)
- ///
- /// Client-side Write, Client-side WritesDone, Server-side Write,
- /// Server-side Finish, Server-side SendInitialMetadata (which is
- /// typically included in Write or Finish when not done explicitly):
- /// \a ok means that the data/metadata/status/etc is going to go to the
- /// wire. If it is false, it not going to the wire because the call
- /// is already dead (i.e., canceled, deadline expired, other side
- /// dropped the channel, etc).
- ///
- /// Client-side Read, Server-side Read, Client-side
- /// RecvInitialMetadata (which is typically included in Read if not
- /// done explicitly): \a ok indicates whether there is a valid message
- /// that got read. If not, you know that there are certainly no more
- /// messages that can ever be read from this stream. For the client-side
- /// operations, this only happens because the call is dead. For the
- /// server-sider operation, though, this could happen because the client
- /// has done a WritesDone already.
- ///
- /// Client-side Finish: \a ok should always be true
- ///
- /// Server-side AsyncNotifyWhenDone: \a ok should always be true
- ///
- /// Alarm: \a ok is true if it expired, false if it was canceled
- ///
- /// \return true if got an event, false if the queue is fully drained and
- /// shut down.
- bool Next(void** tag, bool* ok) {
- return (AsyncNextInternal(tag, ok,
- ::grpc::g_core_codegen_interface->gpr_inf_future(
- GPR_CLOCK_REALTIME)) != SHUTDOWN);
- }
-
- /// Read from the queue, blocking up to \a deadline (or the queue's shutdown).
- /// Both \a tag and \a ok are updated upon success (if an event is available
- /// within the \a deadline). A \a tag points to an arbitrary location usually
- /// employed to uniquely identify an event.
- ///
- /// \param tag [out] Upon success, updated to point to the event's tag.
- /// \param ok [out] Upon success, true if a successful event, false otherwise
- /// See documentation for CompletionQueue::Next for explanation of ok
- /// \param deadline [in] How long to block in wait for an event.
- ///
- /// \return The type of event read.
- template <typename T>
- NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) {
- ::grpc::TimePoint<T> deadline_tp(deadline);
- return AsyncNextInternal(tag, ok, deadline_tp.raw_time());
- }
-
- /// EXPERIMENTAL
- /// First executes \a F, then reads from the queue, blocking up to
- /// \a deadline (or the queue's shutdown).
- /// Both \a tag and \a ok are updated upon success (if an event is available
- /// within the \a deadline). A \a tag points to an arbitrary location usually
- /// employed to uniquely identify an event.
- ///
- /// \param f [in] Function to execute before calling AsyncNext on this queue.
- /// \param tag [out] Upon success, updated to point to the event's tag.
- /// \param ok [out] Upon success, true if read a regular event, false
- /// otherwise.
- /// \param deadline [in] How long to block in wait for an event.
- ///
- /// \return The type of event read.
- template <typename T, typename F>
- NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) {
- CompletionQueueTLSCache cache = CompletionQueueTLSCache(this);
- f();
- if (cache.Flush(tag, ok)) {
- return GOT_EVENT;
- } else {
- return AsyncNext(tag, ok, deadline);
- }
- }
-
- /// Request the shutdown of the queue.
- ///
- /// \warning This method must be called at some point if this completion queue
- /// is accessed with Next or AsyncNext. \a Next will not return false
- /// until this method has been called and all pending tags have been drained.
- /// (Likewise for \a AsyncNext returning \a NextStatus::SHUTDOWN .)
- /// Only once either one of these methods does that (that is, once the queue
- /// has been \em drained) can an instance of this class be destroyed.
- /// Also note that applications must ensure that no work is enqueued on this
- /// completion queue after this method is called.
- void Shutdown();
-
- /// Returns a \em raw pointer to the underlying \a grpc_completion_queue
- /// instance.
- ///
- /// \warning Remember that the returned instance is owned. No transfer of
- /// owership is performed.
- grpc_completion_queue* cq() { return cq_; }
-
- protected:
- /// Private constructor of CompletionQueue only visible to friend classes
- CompletionQueue(const grpc_completion_queue_attributes& attributes) {
- cq_ = ::grpc::g_core_codegen_interface->grpc_completion_queue_create(
- ::grpc::g_core_codegen_interface->grpc_completion_queue_factory_lookup(
- &attributes),
- &attributes, NULL);
- InitialAvalanching(); // reserve this for the future shutdown
- }
-
- private:
- // Friends for access to server registration lists that enable checking and
- // logging on shutdown
- friend class ::grpc::ServerBuilder;
- friend class ::grpc::Server;
-
- // Friend synchronous wrappers so that they can access Pluck(), which is
- // a semi-private API geared towards the synchronous implementation.
- template <class R>
- friend class ::grpc::ClientReader;
- template <class W>
- friend class ::grpc::ClientWriter;
- template <class W, class R>
- friend class ::grpc::ClientReaderWriter;
- template <class R>
- friend class ::grpc::ServerReader;
- template <class W>
- friend class ::grpc::ServerWriter;
- template <class W, class R>
- friend class ::grpc::internal::ServerReaderWriterBody;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::RpcMethodHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::ClientStreamingHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::ServerStreamingHandler;
- template <class Streamer, bool WriteNeeded>
- friend class ::grpc::internal::TemplatedBidiStreamingHandler;
- template <::grpc::StatusCode code>
- friend class ::grpc::internal::ErrorMethodHandler;
- friend class ::grpc::ServerContextBase;
- friend class ::grpc::ServerInterface;
- template <class InputMessage, class OutputMessage>
- friend class ::grpc::internal::BlockingUnaryCallImpl;
-
- // Friends that need access to constructor for callback CQ
- friend class ::grpc::Channel;
-
- // For access to Register/CompleteAvalanching
- template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
- friend class ::grpc::internal::CallOpSet;
-
- /// EXPERIMENTAL
- /// Creates a Thread Local cache to store the first event
- /// On this completion queue queued from this thread. Once
- /// initialized, it must be flushed on the same thread.
- class CompletionQueueTLSCache {
- public:
- CompletionQueueTLSCache(CompletionQueue* cq);
- ~CompletionQueueTLSCache();
- bool Flush(void** tag, bool* ok);
-
- private:
- CompletionQueue* cq_;
- bool flushed_;
- };
-
- NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);
-
- /// Wraps \a grpc_completion_queue_pluck.
- /// \warning Must not be mixed with calls to \a Next.
- bool Pluck(::grpc::internal::CompletionQueueTag* tag) {
- auto deadline =
- ::grpc::g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
- while (true) {
- auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
- cq_, tag, deadline, nullptr);
- bool ok = ev.success != 0;
- void* ignored = tag;
- if (tag->FinalizeResult(&ignored, &ok)) {
- GPR_CODEGEN_ASSERT(ignored == tag);
- return ok;
- }
- }
- }
-
- /// Performs a single polling pluck on \a tag.
- /// \warning Must not be mixed with calls to \a Next.
- ///
- /// TODO: sreek - This calls tag->FinalizeResult() even if the cq_ is already
- /// shutdown. This is most likely a bug and if it is a bug, then change this
- /// implementation to simple call the other TryPluck function with a zero
- /// timeout. i.e:
- /// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
- void TryPluck(::grpc::internal::CompletionQueueTag* tag) {
- auto deadline =
- ::grpc::g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
- auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
- cq_, tag, deadline, nullptr);
- if (ev.type == GRPC_QUEUE_TIMEOUT) return;
- bool ok = ev.success != 0;
- void* ignored = tag;
- // the tag must be swallowed if using TryPluck
- GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
- }
-
- /// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if
- /// the pluck() was successful and returned the tag.
- ///
- /// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
- /// that the tag is internal not something that is returned to the user.
- void TryPluck(::grpc::internal::CompletionQueueTag* tag,
- gpr_timespec deadline) {
- auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
- cq_, tag, deadline, nullptr);
- if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {
- return;
- }
-
- bool ok = ev.success != 0;
- void* ignored = tag;
- GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
- }
-
- /// Manage state of avalanching operations : completion queue tags that
- /// trigger other completion queue operations. The underlying core completion
- /// queue should not really shutdown until all avalanching operations have
- /// been finalized. Note that we maintain the requirement that an avalanche
- /// registration must take place before CQ shutdown (which must be maintained
- /// elsehwere)
- void InitialAvalanching() {
- gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1));
- }
- void RegisterAvalanching() {
- gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
- static_cast<gpr_atm>(1));
- }
- void CompleteAvalanching() {
- if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
- static_cast<gpr_atm>(-1)) == 1) {
- ::grpc::g_core_codegen_interface->grpc_completion_queue_shutdown(cq_);
- }
- }
-
- void RegisterServer(const ::grpc::Server* server) {
- (void)server;
-#ifndef NDEBUG
- grpc::internal::MutexLock l(&server_list_mutex_);
- server_list_.push_back(server);
-#endif
- }
- void UnregisterServer(const ::grpc::Server* server) {
- (void)server;
-#ifndef NDEBUG
- grpc::internal::MutexLock l(&server_list_mutex_);
- server_list_.remove(server);
-#endif
- }
- bool ServerListEmpty() const {
-#ifndef NDEBUG
- grpc::internal::MutexLock l(&server_list_mutex_);
- return server_list_.empty();
-#endif
- return true;
- }
-
- grpc_completion_queue* cq_; // owned
-
- gpr_atm avalanches_in_flight_;
-
- // List of servers associated with this CQ. Even though this is only used with
- // NDEBUG, instantiate it in all cases since otherwise the size will be
- // inconsistent.
- mutable grpc::internal::Mutex server_list_mutex_;
- std::list<const ::grpc::Server*>
- server_list_ /* GUARDED_BY(server_list_mutex_) */;
-};
-
-/// A specific type of completion queue used by the processing of notifications
-/// by servers. Instantiated by \a ServerBuilder or Server (for health checker).
-class ServerCompletionQueue : public CompletionQueue {
- public:
- bool IsFrequentlyPolled() { return polling_type_ != GRPC_CQ_NON_LISTENING; }
-
- protected:
- /// Default constructor
- ServerCompletionQueue() : polling_type_(GRPC_CQ_DEFAULT_POLLING) {}
-
- private:
- /// \param completion_type indicates whether this is a NEXT or CALLBACK
- /// completion queue.
- /// \param polling_type Informs the GRPC library about the type of polling
- /// allowed on this completion queue. See grpc_cq_polling_type's description
- /// in grpc_types.h for more details.
- /// \param shutdown_cb is the shutdown callback used for CALLBACK api queues
- ServerCompletionQueue(grpc_cq_completion_type completion_type,
- grpc_cq_polling_type polling_type,
- grpc_experimental_completion_queue_functor* shutdown_cb)
- : CompletionQueue(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, completion_type, polling_type,
- shutdown_cb}),
- polling_type_(polling_type) {}
-
- grpc_cq_polling_type polling_type_;
- friend class ::grpc::ServerBuilder;
- friend class ::grpc::Server;
-};
-
+template <class R>
+class ClientReader;
+template <class W>
+class ClientWriter;
+template <class W, class R>
+class ClientReaderWriter;
+template <class R>
+class ServerReader;
+template <class W>
+class ServerWriter;
+namespace internal {
+template <class W, class R>
+class ServerReaderWriterBody;
+
+template <class ServiceType, class RequestType, class ResponseType>
+class RpcMethodHandler;
+template <class ServiceType, class RequestType, class ResponseType>
+class ClientStreamingHandler;
+template <class ServiceType, class RequestType, class ResponseType>
+class ServerStreamingHandler;
+template <class Streamer, bool WriteNeeded>
+class TemplatedBidiStreamingHandler;
+template <::grpc::StatusCode code>
+class ErrorMethodHandler;
+} // namespace internal
+
+class Channel;
+class ChannelInterface;
+class Server;
+class ServerBuilder;
+class ServerContextBase;
+class ServerInterface;
+
+namespace internal {
+class CompletionQueueTag;
+class RpcMethod;
+template <class InputMessage, class OutputMessage>
+class BlockingUnaryCallImpl;
+template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
+class CallOpSet;
+} // namespace internal
+
+extern CoreCodegenInterface* g_core_codegen_interface;
+
+/// A thin wrapper around \ref grpc_completion_queue (see \ref
+/// src/core/lib/surface/completion_queue.h).
+/// See \ref doc/cpp/perf_notes.md for notes on best practices for high
+/// performance servers.
+class CompletionQueue : private ::grpc::GrpcLibraryCodegen {
+ public:
+ /// Default constructor. Implicitly creates a \a grpc_completion_queue
+ /// instance.
+ CompletionQueue()
+ : CompletionQueue(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING,
+ nullptr}) {}
+
+ /// Wrap \a take, taking ownership of the instance.
+ ///
+ /// \param take The completion queue instance to wrap. Ownership is taken.
+ explicit CompletionQueue(grpc_completion_queue* take);
+
+ /// Destructor. Destroys the owned wrapped completion queue / instance.
+ ~CompletionQueue() {
+ ::grpc::g_core_codegen_interface->grpc_completion_queue_destroy(cq_);
+ }
+
+ /// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT.
+ enum NextStatus {
+ SHUTDOWN, ///< The completion queue has been shutdown and fully-drained
+ GOT_EVENT, ///< Got a new event; \a tag will be filled in with its
+ ///< associated value; \a ok indicating its success.
+ TIMEOUT ///< deadline was reached.
+ };
+
+ /// Read from the queue, blocking until an event is available or the queue is
+ /// shutting down.
+ ///
+ /// \param tag [out] Updated to point to the read event's tag.
+ /// \param ok [out] true if read a successful event, false otherwise.
+ ///
+ /// Note that each tag sent to the completion queue (through RPC operations
+ /// or alarms) will be delivered out of the completion queue by a call to
+ /// Next (or a related method), regardless of whether the operation succeeded
+ /// or not. Success here means that this operation completed in the normal
+ /// valid manner.
+ ///
+ /// Server-side RPC request: \a ok indicates that the RPC has indeed
+ /// been started. If it is false, the server has been Shutdown
+ /// before this particular call got matched to an incoming RPC.
+ ///
+ /// Client-side StartCall/RPC invocation: \a ok indicates that the RPC is
+ /// going to go to the wire. If it is false, it not going to the wire. This
+ /// would happen if the channel is either permanently broken or
+ /// transiently broken but with the fail-fast option. (Note that async unary
+ /// RPCs don't post a CQ tag at this point, nor do client-streaming
+ /// or bidi-streaming RPCs that have the initial metadata corked option set.)
+ ///
+ /// Client-side Write, Client-side WritesDone, Server-side Write,
+ /// Server-side Finish, Server-side SendInitialMetadata (which is
+ /// typically included in Write or Finish when not done explicitly):
+ /// \a ok means that the data/metadata/status/etc is going to go to the
+ /// wire. If it is false, it not going to the wire because the call
+ /// is already dead (i.e., canceled, deadline expired, other side
+ /// dropped the channel, etc).
+ ///
+ /// Client-side Read, Server-side Read, Client-side
+ /// RecvInitialMetadata (which is typically included in Read if not
+ /// done explicitly): \a ok indicates whether there is a valid message
+ /// that got read. If not, you know that there are certainly no more
+ /// messages that can ever be read from this stream. For the client-side
+ /// operations, this only happens because the call is dead. For the
+ /// server-sider operation, though, this could happen because the client
+ /// has done a WritesDone already.
+ ///
+ /// Client-side Finish: \a ok should always be true
+ ///
+ /// Server-side AsyncNotifyWhenDone: \a ok should always be true
+ ///
+ /// Alarm: \a ok is true if it expired, false if it was canceled
+ ///
+ /// \return true if got an event, false if the queue is fully drained and
+ /// shut down.
+ bool Next(void** tag, bool* ok) {
+ return (AsyncNextInternal(tag, ok,
+ ::grpc::g_core_codegen_interface->gpr_inf_future(
+ GPR_CLOCK_REALTIME)) != SHUTDOWN);
+ }
+
+ /// Read from the queue, blocking up to \a deadline (or the queue's shutdown).
+ /// Both \a tag and \a ok are updated upon success (if an event is available
+ /// within the \a deadline). A \a tag points to an arbitrary location usually
+ /// employed to uniquely identify an event.
+ ///
+ /// \param tag [out] Upon success, updated to point to the event's tag.
+ /// \param ok [out] Upon success, true if a successful event, false otherwise
+ /// See documentation for CompletionQueue::Next for explanation of ok
+ /// \param deadline [in] How long to block in wait for an event.
+ ///
+ /// \return The type of event read.
+ template <typename T>
+ NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) {
+ ::grpc::TimePoint<T> deadline_tp(deadline);
+ return AsyncNextInternal(tag, ok, deadline_tp.raw_time());
+ }
+
+ /// EXPERIMENTAL
+ /// First executes \a F, then reads from the queue, blocking up to
+ /// \a deadline (or the queue's shutdown).
+ /// Both \a tag and \a ok are updated upon success (if an event is available
+ /// within the \a deadline). A \a tag points to an arbitrary location usually
+ /// employed to uniquely identify an event.
+ ///
+ /// \param f [in] Function to execute before calling AsyncNext on this queue.
+ /// \param tag [out] Upon success, updated to point to the event's tag.
+ /// \param ok [out] Upon success, true if read a regular event, false
+ /// otherwise.
+ /// \param deadline [in] How long to block in wait for an event.
+ ///
+ /// \return The type of event read.
+ template <typename T, typename F>
+ NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) {
+ CompletionQueueTLSCache cache = CompletionQueueTLSCache(this);
+ f();
+ if (cache.Flush(tag, ok)) {
+ return GOT_EVENT;
+ } else {
+ return AsyncNext(tag, ok, deadline);
+ }
+ }
+
+ /// Request the shutdown of the queue.
+ ///
+ /// \warning This method must be called at some point if this completion queue
+ /// is accessed with Next or AsyncNext. \a Next will not return false
+ /// until this method has been called and all pending tags have been drained.
+ /// (Likewise for \a AsyncNext returning \a NextStatus::SHUTDOWN .)
+ /// Only once either one of these methods does that (that is, once the queue
+ /// has been \em drained) can an instance of this class be destroyed.
+ /// Also note that applications must ensure that no work is enqueued on this
+ /// completion queue after this method is called.
+ void Shutdown();
+
+ /// Returns a \em raw pointer to the underlying \a grpc_completion_queue
+ /// instance.
+ ///
+ /// \warning Remember that the returned instance is owned. No transfer of
+ /// owership is performed.
+ grpc_completion_queue* cq() { return cq_; }
+
+ protected:
+ /// Private constructor of CompletionQueue only visible to friend classes
+ CompletionQueue(const grpc_completion_queue_attributes& attributes) {
+ cq_ = ::grpc::g_core_codegen_interface->grpc_completion_queue_create(
+ ::grpc::g_core_codegen_interface->grpc_completion_queue_factory_lookup(
+ &attributes),
+ &attributes, NULL);
+ InitialAvalanching(); // reserve this for the future shutdown
+ }
+
+ private:
+ // Friends for access to server registration lists that enable checking and
+ // logging on shutdown
+ friend class ::grpc::ServerBuilder;
+ friend class ::grpc::Server;
+
+ // Friend synchronous wrappers so that they can access Pluck(), which is
+ // a semi-private API geared towards the synchronous implementation.
+ template <class R>
+ friend class ::grpc::ClientReader;
+ template <class W>
+ friend class ::grpc::ClientWriter;
+ template <class W, class R>
+ friend class ::grpc::ClientReaderWriter;
+ template <class R>
+ friend class ::grpc::ServerReader;
+ template <class W>
+ friend class ::grpc::ServerWriter;
+ template <class W, class R>
+ friend class ::grpc::internal::ServerReaderWriterBody;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::RpcMethodHandler;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::ClientStreamingHandler;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::ServerStreamingHandler;
+ template <class Streamer, bool WriteNeeded>
+ friend class ::grpc::internal::TemplatedBidiStreamingHandler;
+ template <::grpc::StatusCode code>
+ friend class ::grpc::internal::ErrorMethodHandler;
+ friend class ::grpc::ServerContextBase;
+ friend class ::grpc::ServerInterface;
+ template <class InputMessage, class OutputMessage>
+ friend class ::grpc::internal::BlockingUnaryCallImpl;
+
+ // Friends that need access to constructor for callback CQ
+ friend class ::grpc::Channel;
+
+ // For access to Register/CompleteAvalanching
+ template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
+ friend class ::grpc::internal::CallOpSet;
+
+ /// EXPERIMENTAL
+ /// Creates a Thread Local cache to store the first event
+ /// On this completion queue queued from this thread. Once
+ /// initialized, it must be flushed on the same thread.
+ class CompletionQueueTLSCache {
+ public:
+ CompletionQueueTLSCache(CompletionQueue* cq);
+ ~CompletionQueueTLSCache();
+ bool Flush(void** tag, bool* ok);
+
+ private:
+ CompletionQueue* cq_;
+ bool flushed_;
+ };
+
+ NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline);
+
+ /// Wraps \a grpc_completion_queue_pluck.
+ /// \warning Must not be mixed with calls to \a Next.
+ bool Pluck(::grpc::internal::CompletionQueueTag* tag) {
+ auto deadline =
+ ::grpc::g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME);
+ while (true) {
+ auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
+ cq_, tag, deadline, nullptr);
+ bool ok = ev.success != 0;
+ void* ignored = tag;
+ if (tag->FinalizeResult(&ignored, &ok)) {
+ GPR_CODEGEN_ASSERT(ignored == tag);
+ return ok;
+ }
+ }
+ }
+
+ /// Performs a single polling pluck on \a tag.
+ /// \warning Must not be mixed with calls to \a Next.
+ ///
+ /// TODO: sreek - This calls tag->FinalizeResult() even if the cq_ is already
+ /// shutdown. This is most likely a bug and if it is a bug, then change this
+ /// implementation to simple call the other TryPluck function with a zero
+ /// timeout. i.e:
+ /// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME))
+ void TryPluck(::grpc::internal::CompletionQueueTag* tag) {
+ auto deadline =
+ ::grpc::g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME);
+ auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
+ cq_, tag, deadline, nullptr);
+ if (ev.type == GRPC_QUEUE_TIMEOUT) return;
+ bool ok = ev.success != 0;
+ void* ignored = tag;
+ // the tag must be swallowed if using TryPluck
+ GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
+ }
+
+ /// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if
+ /// the pluck() was successful and returned the tag.
+ ///
+ /// This exects tag->FinalizeResult (if called) to return 'false' i.e expects
+ /// that the tag is internal not something that is returned to the user.
+ void TryPluck(::grpc::internal::CompletionQueueTag* tag,
+ gpr_timespec deadline) {
+ auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck(
+ cq_, tag, deadline, nullptr);
+ if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) {
+ return;
+ }
+
+ bool ok = ev.success != 0;
+ void* ignored = tag;
+ GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok));
+ }
+
+ /// Manage state of avalanching operations : completion queue tags that
+ /// trigger other completion queue operations. The underlying core completion
+ /// queue should not really shutdown until all avalanching operations have
+ /// been finalized. Note that we maintain the requirement that an avalanche
+ /// registration must take place before CQ shutdown (which must be maintained
+ /// elsehwere)
+ void InitialAvalanching() {
+ gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1));
+ }
+ void RegisterAvalanching() {
+ gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
+ static_cast<gpr_atm>(1));
+ }
+ void CompleteAvalanching() {
+ if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
+ static_cast<gpr_atm>(-1)) == 1) {
+ ::grpc::g_core_codegen_interface->grpc_completion_queue_shutdown(cq_);
+ }
+ }
+
+ void RegisterServer(const ::grpc::Server* server) {
+ (void)server;
+#ifndef NDEBUG
+ grpc::internal::MutexLock l(&server_list_mutex_);
+ server_list_.push_back(server);
+#endif
+ }
+ void UnregisterServer(const ::grpc::Server* server) {
+ (void)server;
+#ifndef NDEBUG
+ grpc::internal::MutexLock l(&server_list_mutex_);
+ server_list_.remove(server);
+#endif
+ }
+ bool ServerListEmpty() const {
+#ifndef NDEBUG
+ grpc::internal::MutexLock l(&server_list_mutex_);
+ return server_list_.empty();
+#endif
+ return true;
+ }
+
+ grpc_completion_queue* cq_; // owned
+
+ gpr_atm avalanches_in_flight_;
+
+ // List of servers associated with this CQ. Even though this is only used with
+ // NDEBUG, instantiate it in all cases since otherwise the size will be
+ // inconsistent.
+ mutable grpc::internal::Mutex server_list_mutex_;
+ std::list<const ::grpc::Server*>
+ server_list_ /* GUARDED_BY(server_list_mutex_) */;
+};
+
+/// A specific type of completion queue used by the processing of notifications
+/// by servers. Instantiated by \a ServerBuilder or Server (for health checker).
+class ServerCompletionQueue : public CompletionQueue {
+ public:
+ bool IsFrequentlyPolled() { return polling_type_ != GRPC_CQ_NON_LISTENING; }
+
+ protected:
+ /// Default constructor
+ ServerCompletionQueue() : polling_type_(GRPC_CQ_DEFAULT_POLLING) {}
+
+ private:
+ /// \param completion_type indicates whether this is a NEXT or CALLBACK
+ /// completion queue.
+ /// \param polling_type Informs the GRPC library about the type of polling
+ /// allowed on this completion queue. See grpc_cq_polling_type's description
+ /// in grpc_types.h for more details.
+ /// \param shutdown_cb is the shutdown callback used for CALLBACK api queues
+ ServerCompletionQueue(grpc_cq_completion_type completion_type,
+ grpc_cq_polling_type polling_type,
+ grpc_experimental_completion_queue_functor* shutdown_cb)
+ : CompletionQueue(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, completion_type, polling_type,
+ shutdown_cb}),
+ polling_type_(polling_type) {}
+
+ grpc_cq_polling_type polling_type_;
+ friend class ::grpc::ServerBuilder;
+ friend class ::grpc::Server;
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h
index 87f9914273..3b214c9b9b 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h
@@ -19,7 +19,7 @@
#ifndef GRPCPP_IMPL_CODEGEN_CONFIG_H
#define GRPCPP_IMPL_CODEGEN_CONFIG_H
-#include <util/generic/string.h>
+#include <util/generic/string.h>
/// The following macros are deprecated and appear only for users
/// with PB files generated using gRPC 1.0.x plugins. They should
@@ -27,16 +27,16 @@
#define GRPC_OVERRIDE override // deprecated
#define GRPC_FINAL final // deprecated
-#ifdef GRPC_CUSTOM_STRING
-#warning GRPC_CUSTOM_STRING is no longer supported. Please use TString.
-#endif
-
+#ifdef GRPC_CUSTOM_STRING
+#warning GRPC_CUSTOM_STRING is no longer supported. Please use TString.
+#endif
+
namespace grpc {
-// Using grpc::string and grpc::to_string is discouraged in favor of
-// TString and ToString. This is only for legacy code using
-// them explictly.
-typedef TString string; // deprecated
+// Using grpc::string and grpc::to_string is discouraged in favor of
+// TString and ToString. This is only for legacy code using
+// them explictly.
+typedef TString string; // deprecated
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h
index 1a3bbd3349..cb987e2e8a 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h
@@ -40,7 +40,7 @@ class DelegatingChannel : public ::grpc::ChannelInterface {
private:
internal::Call CreateCall(const internal::RpcMethod& method,
ClientContext* context,
- ::grpc::CompletionQueue* cq) final {
+ ::grpc::CompletionQueue* cq) final {
return delegate_channel()->CreateCall(method, context, cq);
}
@@ -55,7 +55,7 @@ class DelegatingChannel : public ::grpc::ChannelInterface {
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
- ::grpc::CompletionQueue* cq,
+ ::grpc::CompletionQueue* cq,
void* tag) override {
delegate_channel()->NotifyOnStateChangeImpl(last_observed, deadline, cq,
tag);
@@ -68,13 +68,13 @@ class DelegatingChannel : public ::grpc::ChannelInterface {
internal::Call CreateCallInternal(const internal::RpcMethod& method,
ClientContext* context,
- ::grpc::CompletionQueue* cq,
+ ::grpc::CompletionQueue* cq,
size_t interceptor_pos) final {
return delegate_channel()->CreateCallInternal(method, context, cq,
interceptor_pos);
}
- ::grpc::CompletionQueue* CallbackCQ() final {
+ ::grpc::CompletionQueue* CallbackCQ() final {
return delegate_channel()->CallbackCQ();
}
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h
index c729970ca8..46af38512d 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h
@@ -21,7 +21,7 @@
#include <grpcpp/impl/codegen/channel_interface.h>
-namespace grpc {
+namespace grpc {
class CompletionQueue;
namespace internal {
@@ -46,8 +46,8 @@ class InterceptedChannel : public ChannelInterface {
InterceptedChannel(ChannelInterface* channel, size_t pos)
: channel_(channel), interceptor_pos_(pos) {}
- Call CreateCall(const RpcMethod& method, ::grpc::ClientContext* context,
- ::grpc::CompletionQueue* cq) override {
+ Call CreateCall(const RpcMethod& method, ::grpc::ClientContext* context,
+ ::grpc::CompletionQueue* cq) override {
return channel_->CreateCallInternal(method, context, cq, interceptor_pos_);
}
@@ -60,7 +60,7 @@ class InterceptedChannel : public ChannelInterface {
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
- ::grpc::CompletionQueue* cq,
+ ::grpc::CompletionQueue* cq,
void* tag) override {
return channel_->NotifyOnStateChangeImpl(last_observed, deadline, cq, tag);
}
@@ -69,7 +69,7 @@ class InterceptedChannel : public ChannelInterface {
return channel_->WaitForStateChangeImpl(last_observed, deadline);
}
- ::grpc::CompletionQueue* CallbackCQ() override {
+ ::grpc::CompletionQueue* CallbackCQ() override {
return channel_->CallbackCQ();
}
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h
index d0afa03a17..457d5393f5 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h
@@ -19,8 +19,8 @@
#ifndef GRPCPP_IMPL_CODEGEN_INTERCEPTOR_H
#define GRPCPP_IMPL_CODEGEN_INTERCEPTOR_H
-#include <memory>
-
+#include <memory>
+
#include <grpc/impl/codegen/grpc_types.h>
#include <grpcpp/impl/codegen/byte_buffer.h>
#include <grpcpp/impl/codegen/config.h>
@@ -157,7 +157,7 @@ class InterceptorBatchMethods {
/// Returns a modifiable multimap of the initial metadata to be sent. Valid
/// for PRE_SEND_INITIAL_METADATA interceptions. A value of nullptr indicates
/// that this field is not valid.
- virtual std::multimap<TString, TString>* GetSendInitialMetadata() = 0;
+ virtual std::multimap<TString, TString>* GetSendInitialMetadata() = 0;
/// Returns the status to be sent. Valid for PRE_SEND_STATUS interceptions.
virtual Status GetSendStatus() = 0;
@@ -169,7 +169,7 @@ class InterceptorBatchMethods {
/// Returns a modifiable multimap of the trailing metadata to be sent. Valid
/// for PRE_SEND_STATUS interceptions. A value of nullptr indicates
/// that this field is not valid.
- virtual std::multimap<TString, TString>*
+ virtual std::multimap<TString, TString>*
GetSendTrailingMetadata() = 0;
/// Returns a pointer to the modifiable received message. Note that the
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h
index 714351f543..32fbe9b883 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h
@@ -104,7 +104,7 @@ class InterceptorBatchMethodsImpl
bool GetSendMessageStatus() override { return !*fail_send_message_; }
- std::multimap<TString, TString>* GetSendInitialMetadata() override {
+ std::multimap<TString, TString>* GetSendInitialMetadata() override {
return send_initial_metadata_;
}
@@ -119,7 +119,7 @@ class InterceptorBatchMethodsImpl
*error_message_ = status.error_message();
}
- std::multimap<TString, TString>* GetSendTrailingMetadata() override {
+ std::multimap<TString, TString>* GetSendTrailingMetadata() override {
return send_trailing_metadata_;
}
@@ -153,25 +153,25 @@ class InterceptorBatchMethodsImpl
}
void SetSendInitialMetadata(
- std::multimap<TString, TString>* metadata) {
+ std::multimap<TString, TString>* metadata) {
send_initial_metadata_ = metadata;
}
- void SetSendStatus(grpc_status_code* code, TString* error_details,
- TString* error_message) {
+ void SetSendStatus(grpc_status_code* code, TString* error_details,
+ TString* error_message) {
code_ = code;
error_details_ = error_details;
error_message_ = error_message;
}
void SetSendTrailingMetadata(
- std::multimap<TString, TString>* metadata) {
+ std::multimap<TString, TString>* metadata) {
send_trailing_metadata_ = metadata;
}
- void SetRecvMessage(void* message, bool* hijacked_recv_message_failed) {
+ void SetRecvMessage(void* message, bool* hijacked_recv_message_failed) {
recv_message_ = message;
- hijacked_recv_message_failed_ = hijacked_recv_message_failed;
+ hijacked_recv_message_failed_ = hijacked_recv_message_failed;
}
void SetRecvInitialMetadata(MetadataMap* map) {
@@ -198,7 +198,7 @@ class InterceptorBatchMethodsImpl
void FailHijackedRecvMessage() override {
GPR_CODEGEN_ASSERT(hooks_[static_cast<size_t>(
experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)]);
- *hijacked_recv_message_failed_ = true;
+ *hijacked_recv_message_failed_ = true;
}
// Clears all state
@@ -401,16 +401,16 @@ class InterceptorBatchMethodsImpl
const void** orig_send_message_ = nullptr;
std::function<Status(const void*)> serializer_;
- std::multimap<TString, TString>* send_initial_metadata_;
+ std::multimap<TString, TString>* send_initial_metadata_;
grpc_status_code* code_ = nullptr;
- TString* error_details_ = nullptr;
- TString* error_message_ = nullptr;
+ TString* error_details_ = nullptr;
+ TString* error_message_ = nullptr;
- std::multimap<TString, TString>* send_trailing_metadata_ = nullptr;
+ std::multimap<TString, TString>* send_trailing_metadata_ = nullptr;
void* recv_message_ = nullptr;
- bool* hijacked_recv_message_failed_ = nullptr;
+ bool* hijacked_recv_message_failed_ = nullptr;
MetadataMap* recv_initial_metadata_ = nullptr;
@@ -475,7 +475,7 @@ class CancelInterceptorBatchMethods
"has a Cancel notification");
}
- std::multimap<TString, TString>* GetSendInitialMetadata() override {
+ std::multimap<TString, TString>* GetSendInitialMetadata() override {
GPR_CODEGEN_ASSERT(false &&
"It is illegal to call GetSendInitialMetadata on a "
"method which has a Cancel notification");
@@ -496,7 +496,7 @@ class CancelInterceptorBatchMethods
return;
}
- std::multimap<TString, TString>* GetSendTrailingMetadata() override {
+ std::multimap<TString, TString>* GetSendTrailingMetadata() override {
GPR_CODEGEN_ASSERT(false &&
"It is illegal to call GetSendTrailingMetadata on a "
"method which has a Cancel notification");
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h
index 4048ea1197..0299bb675d 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h
@@ -20,9 +20,9 @@
#define GRPCPP_IMPL_CODEGEN_MESSAGE_ALLOCATOR_H
namespace grpc {
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
namespace experimental {
-#endif
+#endif
// NOTE: This is an API for advanced users who need custom allocators.
// Per rpc struct for the allocator. This is the interface to return to user.
@@ -69,25 +69,25 @@ class MessageAllocator {
virtual MessageHolder<RequestT, ResponseT>* AllocateMessages() = 0;
};
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
} // namespace experimental
-#endif
-
-// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
-namespace experimental {
-
-using ::grpc::RpcAllocatorState;
-
-template <typename RequestT, typename ResponseT>
-using MessageHolder = ::grpc::MessageHolder<RequestT, ResponseT>;
-
-template <typename RequestT, typename ResponseT>
-using MessageAllocator = ::grpc::MessageAllocator<RequestT, ResponseT>;
-
-} // namespace experimental
-#endif
-
+#endif
+
+// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+namespace experimental {
+
+using ::grpc::RpcAllocatorState;
+
+template <typename RequestT, typename ResponseT>
+using MessageHolder = ::grpc::MessageHolder<RequestT, ResponseT>;
+
+template <typename RequestT, typename ResponseT>
+using MessageAllocator = ::grpc::MessageAllocator<RequestT, ResponseT>;
+
+} // namespace experimental
+#endif
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_MESSAGE_ALLOCATOR_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h
index 03afc0781a..1471153676 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h
@@ -36,12 +36,12 @@ class MetadataMap {
~MetadataMap() { Destroy(); }
- TString GetBinaryErrorDetails() {
+ TString GetBinaryErrorDetails() {
// if filled_, extract from the multimap for O(log(n))
if (filled_) {
auto iter = map_.find(kBinaryErrorDetailsKey);
if (iter != map_.end()) {
- return TString(iter->second.begin(), iter->second.length());
+ return TString(iter->second.begin(), iter->second.length());
}
}
// if not yet filled, take the O(n) lookup to avoid allocating the
@@ -54,13 +54,13 @@ class MetadataMap {
GRPC_SLICE_START_PTR(arr_.metadata[i].key)),
kBinaryErrorDetailsKey,
GRPC_SLICE_LENGTH(arr_.metadata[i].key)) == 0) {
- return TString(reinterpret_cast<const char*>(
- GRPC_SLICE_START_PTR(arr_.metadata[i].value)),
- GRPC_SLICE_LENGTH(arr_.metadata[i].value));
+ return TString(reinterpret_cast<const char*>(
+ GRPC_SLICE_START_PTR(arr_.metadata[i].value)),
+ GRPC_SLICE_LENGTH(arr_.metadata[i].value));
}
}
}
- return TString();
+ return TString();
}
std::multimap<grpc::string_ref, grpc::string_ref>* map() {
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h
index 0033936b04..c2b1c3a924 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,362 +19,362 @@
#ifndef GRPCPP_IMPL_CODEGEN_METHOD_HANDLER_H
#define GRPCPP_IMPL_CODEGEN_METHOD_HANDLER_H
-#include <grpcpp/impl/codegen/byte_buffer.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/rpc_service_method.h>
-#include <grpcpp/impl/codegen/sync_stream.h>
+#include <grpcpp/impl/codegen/byte_buffer.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/rpc_service_method.h>
+#include <grpcpp/impl/codegen/sync_stream.h>
namespace grpc {
namespace internal {
-// Invoke the method handler, fill in the status, and
-// return whether or not we finished safely (without an exception).
-// Note that exception handling is 0-cost in most compiler/library
-// implementations (except when an exception is actually thrown),
-// so this process doesn't require additional overhead in the common case.
-// Additionally, we don't need to return if we caught an exception or not;
-// the handling is the same in either case.
-template <class Callable>
-::grpc::Status CatchingFunctionHandler(Callable&& handler) {
-#if GRPC_ALLOW_EXCEPTIONS
- try {
- return handler();
- } catch (...) {
- return ::grpc::Status(::grpc::StatusCode::UNKNOWN,
- "Unexpected error in RPC handling");
- }
-#else // GRPC_ALLOW_EXCEPTIONS
- return handler();
-#endif // GRPC_ALLOW_EXCEPTIONS
-}
-
-/// A wrapper class of an application provided rpc method handler.
+// Invoke the method handler, fill in the status, and
+// return whether or not we finished safely (without an exception).
+// Note that exception handling is 0-cost in most compiler/library
+// implementations (except when an exception is actually thrown),
+// so this process doesn't require additional overhead in the common case.
+// Additionally, we don't need to return if we caught an exception or not;
+// the handling is the same in either case.
+template <class Callable>
+::grpc::Status CatchingFunctionHandler(Callable&& handler) {
+#if GRPC_ALLOW_EXCEPTIONS
+ try {
+ return handler();
+ } catch (...) {
+ return ::grpc::Status(::grpc::StatusCode::UNKNOWN,
+ "Unexpected error in RPC handling");
+ }
+#else // GRPC_ALLOW_EXCEPTIONS
+ return handler();
+#endif // GRPC_ALLOW_EXCEPTIONS
+}
+
+/// A wrapper class of an application provided rpc method handler.
template <class ServiceType, class RequestType, class ResponseType>
-class RpcMethodHandler : public ::grpc::internal::MethodHandler {
- public:
- RpcMethodHandler(
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- const RequestType*, ResponseType*)>
- func,
- ServiceType* service)
- : func_(func), service_(service) {}
-
- void RunHandler(const HandlerParameter& param) final {
- ResponseType rsp;
- ::grpc::Status status = param.status;
- if (status.ok()) {
- status = CatchingFunctionHandler([this, &param, &rsp] {
- return func_(service_,
- static_cast<::grpc::ServerContext*>(param.server_context),
- static_cast<RequestType*>(param.request), &rsp);
- });
- static_cast<RequestType*>(param.request)->~RequestType();
- }
-
- GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_);
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- ops.SendInitialMetadata(&param.server_context->initial_metadata_,
- param.server_context->initial_metadata_flags());
- if (param.server_context->compression_level_set()) {
- ops.set_compression_level(param.server_context->compression_level());
- }
- if (status.ok()) {
- status = ops.SendMessagePtr(&rsp);
- }
- ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
- param.call->PerformOps(&ops);
- param.call->cq()->Pluck(&ops);
- }
-
- void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
- ::grpc::Status* status, void** /*handler_data*/) final {
- ::grpc::ByteBuffer buf;
- buf.set_buffer(req);
- auto* request =
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call, sizeof(RequestType))) RequestType();
- *status =
- ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
- buf.Release();
- if (status->ok()) {
- return request;
- }
- request->~RequestType();
- return nullptr;
- }
-
- private:
- /// Application provided rpc handler function.
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- const RequestType*, ResponseType*)>
- func_;
- // The class the above handler function lives in.
- ServiceType* service_;
-};
-
-/// A wrapper class of an application provided client streaming handler.
+class RpcMethodHandler : public ::grpc::internal::MethodHandler {
+ public:
+ RpcMethodHandler(
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ const RequestType*, ResponseType*)>
+ func,
+ ServiceType* service)
+ : func_(func), service_(service) {}
+
+ void RunHandler(const HandlerParameter& param) final {
+ ResponseType rsp;
+ ::grpc::Status status = param.status;
+ if (status.ok()) {
+ status = CatchingFunctionHandler([this, &param, &rsp] {
+ return func_(service_,
+ static_cast<::grpc::ServerContext*>(param.server_context),
+ static_cast<RequestType*>(param.request), &rsp);
+ });
+ static_cast<RequestType*>(param.request)->~RequestType();
+ }
+
+ GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_);
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ ops.SendInitialMetadata(&param.server_context->initial_metadata_,
+ param.server_context->initial_metadata_flags());
+ if (param.server_context->compression_level_set()) {
+ ops.set_compression_level(param.server_context->compression_level());
+ }
+ if (status.ok()) {
+ status = ops.SendMessagePtr(&rsp);
+ }
+ ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
+ param.call->PerformOps(&ops);
+ param.call->cq()->Pluck(&ops);
+ }
+
+ void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
+ ::grpc::Status* status, void** /*handler_data*/) final {
+ ::grpc::ByteBuffer buf;
+ buf.set_buffer(req);
+ auto* request =
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call, sizeof(RequestType))) RequestType();
+ *status =
+ ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
+ buf.Release();
+ if (status->ok()) {
+ return request;
+ }
+ request->~RequestType();
+ return nullptr;
+ }
+
+ private:
+ /// Application provided rpc handler function.
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ const RequestType*, ResponseType*)>
+ func_;
+ // The class the above handler function lives in.
+ ServiceType* service_;
+};
+
+/// A wrapper class of an application provided client streaming handler.
template <class ServiceType, class RequestType, class ResponseType>
-class ClientStreamingHandler : public ::grpc::internal::MethodHandler {
- public:
- ClientStreamingHandler(
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- ServerReader<RequestType>*, ResponseType*)>
- func,
- ServiceType* service)
- : func_(func), service_(service) {}
-
- void RunHandler(const HandlerParameter& param) final {
- ServerReader<RequestType> reader(
- param.call, static_cast<::grpc::ServerContext*>(param.server_context));
- ResponseType rsp;
- ::grpc::Status status = CatchingFunctionHandler([this, &param, &reader,
- &rsp] {
- return func_(service_,
- static_cast<::grpc::ServerContext*>(param.server_context),
- &reader, &rsp);
- });
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- if (!param.server_context->sent_initial_metadata_) {
- ops.SendInitialMetadata(&param.server_context->initial_metadata_,
- param.server_context->initial_metadata_flags());
- if (param.server_context->compression_level_set()) {
- ops.set_compression_level(param.server_context->compression_level());
- }
- }
- if (status.ok()) {
- status = ops.SendMessagePtr(&rsp);
- }
- ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
- param.call->PerformOps(&ops);
- param.call->cq()->Pluck(&ops);
- }
-
- private:
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- ServerReader<RequestType>*, ResponseType*)>
- func_;
- ServiceType* service_;
-};
-
-/// A wrapper class of an application provided server streaming handler.
+class ClientStreamingHandler : public ::grpc::internal::MethodHandler {
+ public:
+ ClientStreamingHandler(
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ ServerReader<RequestType>*, ResponseType*)>
+ func,
+ ServiceType* service)
+ : func_(func), service_(service) {}
+
+ void RunHandler(const HandlerParameter& param) final {
+ ServerReader<RequestType> reader(
+ param.call, static_cast<::grpc::ServerContext*>(param.server_context));
+ ResponseType rsp;
+ ::grpc::Status status = CatchingFunctionHandler([this, &param, &reader,
+ &rsp] {
+ return func_(service_,
+ static_cast<::grpc::ServerContext*>(param.server_context),
+ &reader, &rsp);
+ });
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ if (!param.server_context->sent_initial_metadata_) {
+ ops.SendInitialMetadata(&param.server_context->initial_metadata_,
+ param.server_context->initial_metadata_flags());
+ if (param.server_context->compression_level_set()) {
+ ops.set_compression_level(param.server_context->compression_level());
+ }
+ }
+ if (status.ok()) {
+ status = ops.SendMessagePtr(&rsp);
+ }
+ ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
+ param.call->PerformOps(&ops);
+ param.call->cq()->Pluck(&ops);
+ }
+
+ private:
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ ServerReader<RequestType>*, ResponseType*)>
+ func_;
+ ServiceType* service_;
+};
+
+/// A wrapper class of an application provided server streaming handler.
template <class ServiceType, class RequestType, class ResponseType>
-class ServerStreamingHandler : public ::grpc::internal::MethodHandler {
- public:
- ServerStreamingHandler(std::function<::grpc::Status(
- ServiceType*, ::grpc::ServerContext*,
- const RequestType*, ServerWriter<ResponseType>*)>
- func,
- ServiceType* service)
- : func_(func), service_(service) {}
-
- void RunHandler(const HandlerParameter& param) final {
- ::grpc::Status status = param.status;
- if (status.ok()) {
- ServerWriter<ResponseType> writer(
- param.call,
- static_cast<::grpc::ServerContext*>(param.server_context));
- status = CatchingFunctionHandler([this, &param, &writer] {
- return func_(service_,
- static_cast<::grpc::ServerContext*>(param.server_context),
- static_cast<RequestType*>(param.request), &writer);
- });
- static_cast<RequestType*>(param.request)->~RequestType();
- }
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- if (!param.server_context->sent_initial_metadata_) {
- ops.SendInitialMetadata(&param.server_context->initial_metadata_,
- param.server_context->initial_metadata_flags());
- if (param.server_context->compression_level_set()) {
- ops.set_compression_level(param.server_context->compression_level());
- }
- }
- ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
- param.call->PerformOps(&ops);
- if (param.server_context->has_pending_ops_) {
- param.call->cq()->Pluck(&param.server_context->pending_ops_);
- }
- param.call->cq()->Pluck(&ops);
- }
-
- void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
- ::grpc::Status* status, void** /*handler_data*/) final {
- ::grpc::ByteBuffer buf;
- buf.set_buffer(req);
- auto* request =
- new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
- call, sizeof(RequestType))) RequestType();
- *status =
- ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
- buf.Release();
- if (status->ok()) {
- return request;
- }
- request->~RequestType();
- return nullptr;
- }
-
- private:
- std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
- const RequestType*, ServerWriter<ResponseType>*)>
- func_;
- ServiceType* service_;
-};
-
-/// A wrapper class of an application provided bidi-streaming handler.
-/// This also applies to server-streamed implementation of a unary method
-/// with the additional requirement that such methods must have done a
-/// write for status to be ok
-/// Since this is used by more than 1 class, the service is not passed in.
-/// Instead, it is expected to be an implicitly-captured argument of func
-/// (through bind or something along those lines)
+class ServerStreamingHandler : public ::grpc::internal::MethodHandler {
+ public:
+ ServerStreamingHandler(std::function<::grpc::Status(
+ ServiceType*, ::grpc::ServerContext*,
+ const RequestType*, ServerWriter<ResponseType>*)>
+ func,
+ ServiceType* service)
+ : func_(func), service_(service) {}
+
+ void RunHandler(const HandlerParameter& param) final {
+ ::grpc::Status status = param.status;
+ if (status.ok()) {
+ ServerWriter<ResponseType> writer(
+ param.call,
+ static_cast<::grpc::ServerContext*>(param.server_context));
+ status = CatchingFunctionHandler([this, &param, &writer] {
+ return func_(service_,
+ static_cast<::grpc::ServerContext*>(param.server_context),
+ static_cast<RequestType*>(param.request), &writer);
+ });
+ static_cast<RequestType*>(param.request)->~RequestType();
+ }
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ if (!param.server_context->sent_initial_metadata_) {
+ ops.SendInitialMetadata(&param.server_context->initial_metadata_,
+ param.server_context->initial_metadata_flags());
+ if (param.server_context->compression_level_set()) {
+ ops.set_compression_level(param.server_context->compression_level());
+ }
+ }
+ ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
+ param.call->PerformOps(&ops);
+ if (param.server_context->has_pending_ops_) {
+ param.call->cq()->Pluck(&param.server_context->pending_ops_);
+ }
+ param.call->cq()->Pluck(&ops);
+ }
+
+ void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
+ ::grpc::Status* status, void** /*handler_data*/) final {
+ ::grpc::ByteBuffer buf;
+ buf.set_buffer(req);
+ auto* request =
+ new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+ call, sizeof(RequestType))) RequestType();
+ *status =
+ ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
+ buf.Release();
+ if (status->ok()) {
+ return request;
+ }
+ request->~RequestType();
+ return nullptr;
+ }
+
+ private:
+ std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*,
+ const RequestType*, ServerWriter<ResponseType>*)>
+ func_;
+ ServiceType* service_;
+};
+
+/// A wrapper class of an application provided bidi-streaming handler.
+/// This also applies to server-streamed implementation of a unary method
+/// with the additional requirement that such methods must have done a
+/// write for status to be ok
+/// Since this is used by more than 1 class, the service is not passed in.
+/// Instead, it is expected to be an implicitly-captured argument of func
+/// (through bind or something along those lines)
template <class Streamer, bool WriteNeeded>
-class TemplatedBidiStreamingHandler : public ::grpc::internal::MethodHandler {
- public:
- TemplatedBidiStreamingHandler(
- std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func)
- : func_(func), write_needed_(WriteNeeded) {}
-
- void RunHandler(const HandlerParameter& param) final {
- Streamer stream(param.call,
- static_cast<::grpc::ServerContext*>(param.server_context));
- ::grpc::Status status = CatchingFunctionHandler([this, &param, &stream] {
- return func_(static_cast<::grpc::ServerContext*>(param.server_context),
- &stream);
- });
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- if (!param.server_context->sent_initial_metadata_) {
- ops.SendInitialMetadata(&param.server_context->initial_metadata_,
- param.server_context->initial_metadata_flags());
- if (param.server_context->compression_level_set()) {
- ops.set_compression_level(param.server_context->compression_level());
- }
- if (write_needed_ && status.ok()) {
- // If we needed a write but never did one, we need to mark the
- // status as a fail
- status = ::grpc::Status(::grpc::StatusCode::INTERNAL,
- "Service did not provide response message");
- }
- }
- ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
- param.call->PerformOps(&ops);
- if (param.server_context->has_pending_ops_) {
- param.call->cq()->Pluck(&param.server_context->pending_ops_);
- }
- param.call->cq()->Pluck(&ops);
- }
-
- private:
- std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func_;
- const bool write_needed_;
-};
-
-template <class ServiceType, class RequestType, class ResponseType>
-class BidiStreamingHandler
- : public TemplatedBidiStreamingHandler<
- ServerReaderWriter<ResponseType, RequestType>, false> {
- public:
- BidiStreamingHandler(std::function<::grpc::Status(
- ServiceType*, ::grpc::ServerContext*,
- ServerReaderWriter<ResponseType, RequestType>*)>
- func,
- ServiceType* service)
- // TODO(vjpai): When gRPC supports C++14, move-capture func in the below
- : TemplatedBidiStreamingHandler<
- ServerReaderWriter<ResponseType, RequestType>, false>(
- [func, service](
- ::grpc::ServerContext* ctx,
- ServerReaderWriter<ResponseType, RequestType>* streamer) {
- return func(service, ctx, streamer);
- }) {}
-};
-
+class TemplatedBidiStreamingHandler : public ::grpc::internal::MethodHandler {
+ public:
+ TemplatedBidiStreamingHandler(
+ std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func)
+ : func_(func), write_needed_(WriteNeeded) {}
+
+ void RunHandler(const HandlerParameter& param) final {
+ Streamer stream(param.call,
+ static_cast<::grpc::ServerContext*>(param.server_context));
+ ::grpc::Status status = CatchingFunctionHandler([this, &param, &stream] {
+ return func_(static_cast<::grpc::ServerContext*>(param.server_context),
+ &stream);
+ });
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ if (!param.server_context->sent_initial_metadata_) {
+ ops.SendInitialMetadata(&param.server_context->initial_metadata_,
+ param.server_context->initial_metadata_flags());
+ if (param.server_context->compression_level_set()) {
+ ops.set_compression_level(param.server_context->compression_level());
+ }
+ if (write_needed_ && status.ok()) {
+ // If we needed a write but never did one, we need to mark the
+ // status as a fail
+ status = ::grpc::Status(::grpc::StatusCode::INTERNAL,
+ "Service did not provide response message");
+ }
+ }
+ ops.ServerSendStatus(&param.server_context->trailing_metadata_, status);
+ param.call->PerformOps(&ops);
+ if (param.server_context->has_pending_ops_) {
+ param.call->cq()->Pluck(&param.server_context->pending_ops_);
+ }
+ param.call->cq()->Pluck(&ops);
+ }
+
+ private:
+ std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func_;
+ const bool write_needed_;
+};
+
+template <class ServiceType, class RequestType, class ResponseType>
+class BidiStreamingHandler
+ : public TemplatedBidiStreamingHandler<
+ ServerReaderWriter<ResponseType, RequestType>, false> {
+ public:
+ BidiStreamingHandler(std::function<::grpc::Status(
+ ServiceType*, ::grpc::ServerContext*,
+ ServerReaderWriter<ResponseType, RequestType>*)>
+ func,
+ ServiceType* service)
+ // TODO(vjpai): When gRPC supports C++14, move-capture func in the below
+ : TemplatedBidiStreamingHandler<
+ ServerReaderWriter<ResponseType, RequestType>, false>(
+ [func, service](
+ ::grpc::ServerContext* ctx,
+ ServerReaderWriter<ResponseType, RequestType>* streamer) {
+ return func(service, ctx, streamer);
+ }) {}
+};
+
template <class RequestType, class ResponseType>
-class StreamedUnaryHandler
- : public TemplatedBidiStreamingHandler<
- ServerUnaryStreamer<RequestType, ResponseType>, true> {
- public:
- explicit StreamedUnaryHandler(
- std::function<
- ::grpc::Status(::grpc::ServerContext*,
- ServerUnaryStreamer<RequestType, ResponseType>*)>
- func)
- : TemplatedBidiStreamingHandler<
- ServerUnaryStreamer<RequestType, ResponseType>, true>(
- std::move(func)) {}
-};
+class StreamedUnaryHandler
+ : public TemplatedBidiStreamingHandler<
+ ServerUnaryStreamer<RequestType, ResponseType>, true> {
+ public:
+ explicit StreamedUnaryHandler(
+ std::function<
+ ::grpc::Status(::grpc::ServerContext*,
+ ServerUnaryStreamer<RequestType, ResponseType>*)>
+ func)
+ : TemplatedBidiStreamingHandler<
+ ServerUnaryStreamer<RequestType, ResponseType>, true>(
+ std::move(func)) {}
+};
template <class RequestType, class ResponseType>
-class SplitServerStreamingHandler
- : public TemplatedBidiStreamingHandler<
- ServerSplitStreamer<RequestType, ResponseType>, false> {
- public:
- explicit SplitServerStreamingHandler(
- std::function<
- ::grpc::Status(::grpc::ServerContext*,
- ServerSplitStreamer<RequestType, ResponseType>*)>
- func)
- : TemplatedBidiStreamingHandler<
- ServerSplitStreamer<RequestType, ResponseType>, false>(
- std::move(func)) {}
-};
-
-/// General method handler class for errors that prevent real method use
-/// e.g., handle unknown method by returning UNIMPLEMENTED error.
-template <::grpc::StatusCode code>
-class ErrorMethodHandler : public ::grpc::internal::MethodHandler {
- public:
- template <class T>
- static void FillOps(::grpc::ServerContextBase* context, T* ops) {
- ::grpc::Status status(code, "");
- if (!context->sent_initial_metadata_) {
- ops->SendInitialMetadata(&context->initial_metadata_,
- context->initial_metadata_flags());
- if (context->compression_level_set()) {
- ops->set_compression_level(context->compression_level());
- }
- context->sent_initial_metadata_ = true;
- }
- ops->ServerSendStatus(&context->trailing_metadata_, status);
- }
-
- void RunHandler(const HandlerParameter& param) final {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpServerSendStatus>
- ops;
- FillOps(param.server_context, &ops);
- param.call->PerformOps(&ops);
- param.call->cq()->Pluck(&ops);
- }
-
- void* Deserialize(grpc_call* /*call*/, grpc_byte_buffer* req,
- ::grpc::Status* /*status*/, void** /*handler_data*/) final {
- // We have to destroy any request payload
- if (req != nullptr) {
- ::grpc::g_core_codegen_interface->grpc_byte_buffer_destroy(req);
- }
- return nullptr;
- }
-};
-
-typedef ErrorMethodHandler<::grpc::StatusCode::UNIMPLEMENTED>
- UnknownMethodHandler;
-typedef ErrorMethodHandler<::grpc::StatusCode::RESOURCE_EXHAUSTED>
- ResourceExhaustedHandler;
-
+class SplitServerStreamingHandler
+ : public TemplatedBidiStreamingHandler<
+ ServerSplitStreamer<RequestType, ResponseType>, false> {
+ public:
+ explicit SplitServerStreamingHandler(
+ std::function<
+ ::grpc::Status(::grpc::ServerContext*,
+ ServerSplitStreamer<RequestType, ResponseType>*)>
+ func)
+ : TemplatedBidiStreamingHandler<
+ ServerSplitStreamer<RequestType, ResponseType>, false>(
+ std::move(func)) {}
+};
+
+/// General method handler class for errors that prevent real method use
+/// e.g., handle unknown method by returning UNIMPLEMENTED error.
+template <::grpc::StatusCode code>
+class ErrorMethodHandler : public ::grpc::internal::MethodHandler {
+ public:
+ template <class T>
+ static void FillOps(::grpc::ServerContextBase* context, T* ops) {
+ ::grpc::Status status(code, "");
+ if (!context->sent_initial_metadata_) {
+ ops->SendInitialMetadata(&context->initial_metadata_,
+ context->initial_metadata_flags());
+ if (context->compression_level_set()) {
+ ops->set_compression_level(context->compression_level());
+ }
+ context->sent_initial_metadata_ = true;
+ }
+ ops->ServerSendStatus(&context->trailing_metadata_, status);
+ }
+
+ void RunHandler(const HandlerParameter& param) final {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpServerSendStatus>
+ ops;
+ FillOps(param.server_context, &ops);
+ param.call->PerformOps(&ops);
+ param.call->cq()->Pluck(&ops);
+ }
+
+ void* Deserialize(grpc_call* /*call*/, grpc_byte_buffer* req,
+ ::grpc::Status* /*status*/, void** /*handler_data*/) final {
+ // We have to destroy any request payload
+ if (req != nullptr) {
+ ::grpc::g_core_codegen_interface->grpc_byte_buffer_destroy(req);
+ }
+ return nullptr;
+ }
+};
+
+typedef ErrorMethodHandler<::grpc::StatusCode::UNIMPLEMENTED>
+ UnknownMethodHandler;
+typedef ErrorMethodHandler<::grpc::StatusCode::RESOURCE_EXHAUSTED>
+ ResourceExhaustedHandler;
+
} // namespace internal
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h
index 2e102135a3..62c5dd5ea6 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h
@@ -49,7 +49,7 @@ Status GenericSerialize(const grpc::protobuf::MessageLite& msg, ByteBuffer* bb,
"ProtoBufferWriter must be a subclass of "
"::protobuf::io::ZeroCopyOutputStream");
*own_buffer = true;
- int byte_size = msg.ByteSizeLong();
+ int byte_size = msg.ByteSizeLong();
if ((size_t)byte_size <= GRPC_SLICE_INLINED_SIZE) {
Slice slice(byte_size);
// We serialize directly into the allocated slices memory
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h
index 4fcc211243..8366537360 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h
@@ -31,7 +31,7 @@
#include <grpcpp/impl/codegen/rpc_method.h>
#include <grpcpp/impl/codegen/status.h>
-namespace grpc {
+namespace grpc {
class ServerContextBase;
namespace internal {
/// Base class for running an RPC handler.
@@ -49,8 +49,8 @@ class MethodHandler {
/// \param requester : used only by the callback API. It is a function
/// called by the RPC Controller to request another RPC (and also
/// to set up the state required to make that request possible)
- HandlerParameter(Call* c, ::grpc::ServerContextBase* context, void* req,
- Status req_status, void* handler_data,
+ HandlerParameter(Call* c, ::grpc::ServerContextBase* context, void* req,
+ Status req_status, void* handler_data,
std::function<void()> requester)
: call(c),
server_context(context),
@@ -60,7 +60,7 @@ class MethodHandler {
call_requester(std::move(requester)) {}
~HandlerParameter() {}
Call* const call;
- ::grpc::ServerContextBase* const server_context;
+ ::grpc::ServerContextBase* const server_context;
void* const request;
const Status status;
void* const internal_data;
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h
index 220b78f2eb..94f58b180b 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h
@@ -74,18 +74,18 @@ class AuthContext {
/// It is, in general, comprised of one or more properties (in which case they
/// have the same name).
virtual std::vector<grpc::string_ref> GetPeerIdentity() const = 0;
- virtual TString GetPeerIdentityPropertyName() const = 0;
+ virtual TString GetPeerIdentityPropertyName() const = 0;
/// Returns all the property values with the given name.
virtual std::vector<grpc::string_ref> FindPropertyValues(
- const TString& name) const = 0;
+ const TString& name) const = 0;
/// Iteration over all the properties.
virtual AuthPropertyIterator begin() const = 0;
virtual AuthPropertyIterator end() const = 0;
/// Mutation functions: should only be used by an AuthMetadataProcessor.
- virtual void AddProperty(const TString& key, const string_ref& value) = 0;
+ virtual void AddProperty(const TString& key, const string_ref& value) = 0;
virtual bool SetPeerIdentityPropertyName(const string& name) = 0;
};
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h
index 3794a9ffa7..d43f2a4e2c 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,777 +18,777 @@
#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H
#define GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H
-#include <atomic>
-#include <functional>
-#include <type_traits>
-
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/call_op_set.h>
-#include <grpcpp/impl/codegen/callback_common.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/message_allocator.h>
-#include <grpcpp/impl/codegen/status.h>
-
+#include <atomic>
+#include <functional>
+#include <type_traits>
+
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/call_op_set.h>
+#include <grpcpp/impl/codegen/callback_common.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/message_allocator.h>
+#include <grpcpp/impl/codegen/status.h>
+
namespace grpc {
-
-// Declare base class of all reactors as internal
-namespace internal {
-
-// Forward declarations
-template <class Request, class Response>
-class CallbackUnaryHandler;
-template <class Request, class Response>
-class CallbackClientStreamingHandler;
-template <class Request, class Response>
-class CallbackServerStreamingHandler;
-template <class Request, class Response>
-class CallbackBidiHandler;
-
-class ServerReactor {
- public:
- virtual ~ServerReactor() = default;
- virtual void OnDone() = 0;
- virtual void OnCancel() = 0;
-
- // The following is not API. It is for internal use only and specifies whether
- // all reactions of this Reactor can be run without an extra executor
- // scheduling. This should only be used for internally-defined reactors with
- // trivial reactions.
- virtual bool InternalInlineable() { return false; }
-
- private:
- template <class Request, class Response>
- friend class CallbackUnaryHandler;
- template <class Request, class Response>
- friend class CallbackClientStreamingHandler;
- template <class Request, class Response>
- friend class CallbackServerStreamingHandler;
- template <class Request, class Response>
- friend class CallbackBidiHandler;
-};
-
-/// The base class of ServerCallbackUnary etc.
-class ServerCallbackCall {
- public:
- virtual ~ServerCallbackCall() {}
-
- // This object is responsible for tracking when it is safe to call OnDone and
- // OnCancel. OnDone should not be called until the method handler is complete,
- // Finish has been called, the ServerContext CompletionOp (which tracks
- // cancellation or successful completion) has completed, and all outstanding
- // Read/Write actions have seen their reactions. OnCancel should not be called
- // until after the method handler is done and the RPC has completed with a
- // cancellation. This is tracked by counting how many of these conditions have
- // been met and calling OnCancel when none remain unmet.
-
- // Public versions of MaybeDone: one where we don't know the reactor in
- // advance (used for the ServerContext CompletionOp), and one for where we
- // know the inlineability of the OnDone reaction. You should set the inline
- // flag to true if either the Reactor is InternalInlineable() or if this
- // callback is already being forced to run dispatched to an executor
- // (typically because it contains additional work than just the MaybeDone).
-
- void MaybeDone() {
- if (GPR_UNLIKELY(Unref() == 1)) {
- ScheduleOnDone(reactor()->InternalInlineable());
- }
- }
-
- void MaybeDone(bool inline_ondone) {
- if (GPR_UNLIKELY(Unref() == 1)) {
- ScheduleOnDone(inline_ondone);
- }
- }
-
- // Fast version called with known reactor passed in, used from derived
- // classes, typically in non-cancel case
- void MaybeCallOnCancel(ServerReactor* reactor) {
- if (GPR_UNLIKELY(UnblockCancellation())) {
- CallOnCancel(reactor);
- }
- }
-
- // Slower version called from object that doesn't know the reactor a priori
- // (such as the ServerContext CompletionOp which is formed before the
- // reactor). This is used in cancel cases only, so it's ok to be slower and
- // invoke a virtual function.
- void MaybeCallOnCancel() {
- if (GPR_UNLIKELY(UnblockCancellation())) {
- CallOnCancel(reactor());
- }
- }
-
- protected:
- /// Increases the reference count
- void Ref() { callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); }
-
- private:
- virtual ServerReactor* reactor() = 0;
-
- // CallOnDone performs the work required at completion of the RPC: invoking
- // the OnDone function and doing all necessary cleanup. This function is only
- // ever invoked on a fully-Unref'fed ServerCallbackCall.
- virtual void CallOnDone() = 0;
-
- // If the OnDone reaction is inlineable, execute it inline. Otherwise send it
- // to an executor.
- void ScheduleOnDone(bool inline_ondone);
-
- // If the OnCancel reaction is inlineable, execute it inline. Otherwise send
- // it to an executor.
- void CallOnCancel(ServerReactor* reactor);
-
- // Implement the cancellation constraint counter. Return true if OnCancel
- // should be called, false otherwise.
- bool UnblockCancellation() {
- return on_cancel_conditions_remaining_.fetch_sub(
- 1, std::memory_order_acq_rel) == 1;
- }
-
- /// Decreases the reference count and returns the previous value
- int Unref() {
- return callbacks_outstanding_.fetch_sub(1, std::memory_order_acq_rel);
- }
-
- std::atomic_int on_cancel_conditions_remaining_{2};
- std::atomic_int callbacks_outstanding_{
- 3}; // reserve for start, Finish, and CompletionOp
-};
-
-template <class Request, class Response>
-class DefaultMessageHolder
- : public ::grpc::experimental::MessageHolder<Request, Response> {
- public:
- DefaultMessageHolder() {
- this->set_request(&request_obj_);
- this->set_response(&response_obj_);
- }
- void Release() override {
- // the object is allocated in the call arena.
- this->~DefaultMessageHolder<Request, Response>();
- }
-
- private:
- Request request_obj_;
- Response response_obj_;
-};
-
-} // namespace internal
-
-// Forward declarations
-class ServerUnaryReactor;
-template <class Request>
-class ServerReadReactor;
-template <class Response>
-class ServerWriteReactor;
-template <class Request, class Response>
-class ServerBidiReactor;
-
-// NOTE: The actual call/stream object classes are provided as API only to
-// support mocking. There are no implementations of these class interfaces in
-// the API.
-class ServerCallbackUnary : public internal::ServerCallbackCall {
- public:
- virtual ~ServerCallbackUnary() {}
- virtual void Finish(::grpc::Status s) = 0;
- virtual void SendInitialMetadata() = 0;
-
- protected:
- // Use a template rather than explicitly specifying ServerUnaryReactor to
- // delay binding and avoid a circular forward declaration issue
- template <class Reactor>
- void BindReactor(Reactor* reactor) {
- reactor->InternalBindCall(this);
- }
-};
-
-template <class Request>
-class ServerCallbackReader : public internal::ServerCallbackCall {
- public:
- virtual ~ServerCallbackReader() {}
- virtual void Finish(::grpc::Status s) = 0;
- virtual void SendInitialMetadata() = 0;
- virtual void Read(Request* msg) = 0;
-
- protected:
- void BindReactor(ServerReadReactor<Request>* reactor) {
- reactor->InternalBindReader(this);
- }
-};
-
-template <class Response>
-class ServerCallbackWriter : public internal::ServerCallbackCall {
- public:
- virtual ~ServerCallbackWriter() {}
-
- virtual void Finish(::grpc::Status s) = 0;
- virtual void SendInitialMetadata() = 0;
- virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
- virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
- ::grpc::Status s) = 0;
-
- protected:
- void BindReactor(ServerWriteReactor<Response>* reactor) {
- reactor->InternalBindWriter(this);
- }
-};
-
-template <class Request, class Response>
-class ServerCallbackReaderWriter : public internal::ServerCallbackCall {
- public:
- virtual ~ServerCallbackReaderWriter() {}
-
- virtual void Finish(::grpc::Status s) = 0;
- virtual void SendInitialMetadata() = 0;
- virtual void Read(Request* msg) = 0;
- virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
- virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
- ::grpc::Status s) = 0;
-
- protected:
- void BindReactor(ServerBidiReactor<Request, Response>* reactor) {
- reactor->InternalBindStream(this);
- }
-};
-
-// The following classes are the reactor interfaces that are to be implemented
-// by the user, returned as the output parameter of the method handler for a
-// callback method. Note that none of the classes are pure; all reactions have a
-// default empty reaction so that the user class only needs to override those
-// classes that it cares about.
-
-/// \a ServerBidiReactor is the interface for a bidirectional streaming RPC.
-template <class Request, class Response>
-class ServerBidiReactor : public internal::ServerReactor {
- public:
- // NOTE: Initializing stream_ as a constructor initializer rather than a
- // default initializer because gcc-4.x requires a copy constructor for
- // default initializing a templated member, which isn't ok for atomic.
- // TODO(vjpai): Switch to default constructor and default initializer when
- // gcc-4.x is no longer supported
- ServerBidiReactor() : stream_(nullptr) {}
- ~ServerBidiReactor() = default;
-
- /// Send any initial metadata stored in the RPC context. If not invoked,
- /// any initial metadata will be passed along with the first Write or the
- /// Finish (if there are no writes).
- void StartSendInitialMetadata() {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.send_initial_metadata_wanted = true;
- return;
- }
- }
- stream->SendInitialMetadata();
- }
-
- /// Initiate a read operation.
- ///
- /// \param[out] req Where to eventually store the read message. Valid when
- /// the library calls OnReadDone
- void StartRead(Request* req) {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.read_wanted = req;
- return;
- }
- }
- stream->Read(req);
- }
-
- /// Initiate a write operation.
- ///
- /// \param[in] resp The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- void StartWrite(const Response* resp) {
- StartWrite(resp, ::grpc::WriteOptions());
- }
-
- /// Initiate a write operation with specified options.
- ///
- /// \param[in] resp The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.write_wanted = resp;
- backlog_.write_options_wanted = std::move(options);
- return;
- }
- }
- stream->Write(resp, std::move(options));
- }
-
- /// Initiate a write operation with specified options and final RPC Status,
- /// which also causes any trailing metadata for this RPC to be sent out.
- /// StartWriteAndFinish is like merging StartWriteLast and Finish into a
- /// single step. A key difference, though, is that this operation doesn't have
- /// an OnWriteDone reaction - it is considered complete only when OnDone is
- /// available. An RPC can either have StartWriteAndFinish or Finish, but not
- /// both.
- ///
- /// \param[in] resp The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- /// \param[in] s The status outcome of this RPC
- void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
- ::grpc::Status s) {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.write_and_finish_wanted = true;
- backlog_.write_wanted = resp;
- backlog_.write_options_wanted = std::move(options);
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- stream->WriteAndFinish(resp, std::move(options), std::move(s));
- }
-
- /// Inform system of a planned write operation with specified options, but
- /// allow the library to schedule the actual write coalesced with the writing
- /// of trailing metadata (which takes place on a Finish call).
- ///
- /// \param[in] resp The message to be written. The library does not take
- /// ownership but the caller must ensure that the message is
- /// not deleted or modified until OnWriteDone is called.
- /// \param[in] options The WriteOptions to use for writing this message
- void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
- StartWrite(resp, std::move(options.set_last_message()));
- }
-
- /// Indicate that the stream is to be finished and the trailing metadata and
- /// RPC status are to be sent. Every RPC MUST be finished using either Finish
- /// or StartWriteAndFinish (but not both), even if the RPC is already
- /// cancelled.
- ///
- /// \param[in] s The status outcome of this RPC
- void Finish(::grpc::Status s) {
- ServerCallbackReaderWriter<Request, Response>* stream =
- stream_.load(std::memory_order_acquire);
- if (stream == nullptr) {
- grpc::internal::MutexLock l(&stream_mu_);
- stream = stream_.load(std::memory_order_relaxed);
- if (stream == nullptr) {
- backlog_.finish_wanted = true;
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- stream->Finish(std::move(s));
- }
-
- /// Notifies the application that an explicit StartSendInitialMetadata
- /// operation completed. Not used when the sending of initial metadata
- /// piggybacks onto the first write.
- ///
- /// \param[in] ok Was it successful? If false, no further write-side operation
- /// will succeed.
- virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartRead operation completed.
- ///
- /// \param[in] ok Was it successful? If false, no further read-side operation
- /// will succeed.
- virtual void OnReadDone(bool /*ok*/) {}
-
- /// Notifies the application that a StartWrite (or StartWriteLast) operation
- /// completed.
- ///
- /// \param[in] ok Was it successful? If false, no further write-side operation
- /// will succeed.
- virtual void OnWriteDone(bool /*ok*/) {}
-
- /// Notifies the application that all operations associated with this RPC
- /// have completed. This is an override (from the internal base class) but
- /// still abstract, so derived classes MUST override it to be instantiated.
- void OnDone() override = 0;
-
- /// Notifies the application that this RPC has been cancelled. This is an
- /// override (from the internal base class) but not final, so derived classes
- /// should override it if they want to take action.
- void OnCancel() override {}
-
- private:
- friend class ServerCallbackReaderWriter<Request, Response>;
- // May be overridden by internal implementation details. This is not a public
- // customization point.
- virtual void InternalBindStream(
- ServerCallbackReaderWriter<Request, Response>* stream) {
- grpc::internal::MutexLock l(&stream_mu_);
-
- if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
- stream->SendInitialMetadata();
- }
- if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
- stream->Read(backlog_.read_wanted);
- }
- if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) {
- stream->WriteAndFinish(backlog_.write_wanted,
- std::move(backlog_.write_options_wanted),
- std::move(backlog_.status_wanted));
- } else {
- if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) {
- stream->Write(backlog_.write_wanted,
- std::move(backlog_.write_options_wanted));
- }
- if (GPR_UNLIKELY(backlog_.finish_wanted)) {
- stream->Finish(std::move(backlog_.status_wanted));
- }
- }
- // Set stream_ last so that other functions can use it lock-free
- stream_.store(stream, std::memory_order_release);
- }
-
- grpc::internal::Mutex stream_mu_;
- // TODO(vjpai): Make stream_or_backlog_ into a std::variant or y_absl::variant
- // once C++17 or ABSL is supported since stream and backlog are
- // mutually exclusive in this class. Do likewise with the
- // remaining reactor classes and their backlogs as well.
- std::atomic<ServerCallbackReaderWriter<Request, Response>*> stream_{nullptr};
- struct PreBindBacklog {
- bool send_initial_metadata_wanted = false;
- bool write_and_finish_wanted = false;
- bool finish_wanted = false;
- Request* read_wanted = nullptr;
- const Response* write_wanted = nullptr;
- ::grpc::WriteOptions write_options_wanted;
- ::grpc::Status status_wanted;
- };
- PreBindBacklog backlog_ /* GUARDED_BY(stream_mu_) */;
-};
-
-/// \a ServerReadReactor is the interface for a client-streaming RPC.
-template <class Request>
-class ServerReadReactor : public internal::ServerReactor {
- public:
- ServerReadReactor() : reader_(nullptr) {}
- ~ServerReadReactor() = default;
-
- /// The following operation initiations are exactly like ServerBidiReactor.
- void StartSendInitialMetadata() {
- ServerCallbackReader<Request>* reader =
- reader_.load(std::memory_order_acquire);
- if (reader == nullptr) {
- grpc::internal::MutexLock l(&reader_mu_);
- reader = reader_.load(std::memory_order_relaxed);
- if (reader == nullptr) {
- backlog_.send_initial_metadata_wanted = true;
- return;
- }
- }
- reader->SendInitialMetadata();
- }
- void StartRead(Request* req) {
- ServerCallbackReader<Request>* reader =
- reader_.load(std::memory_order_acquire);
- if (reader == nullptr) {
- grpc::internal::MutexLock l(&reader_mu_);
- reader = reader_.load(std::memory_order_relaxed);
- if (reader == nullptr) {
- backlog_.read_wanted = req;
- return;
- }
- }
- reader->Read(req);
- }
- void Finish(::grpc::Status s) {
- ServerCallbackReader<Request>* reader =
- reader_.load(std::memory_order_acquire);
- if (reader == nullptr) {
- grpc::internal::MutexLock l(&reader_mu_);
- reader = reader_.load(std::memory_order_relaxed);
- if (reader == nullptr) {
- backlog_.finish_wanted = true;
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- reader->Finish(std::move(s));
- }
-
- /// The following notifications are exactly like ServerBidiReactor.
- virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
- virtual void OnReadDone(bool /*ok*/) {}
- void OnDone() override = 0;
- void OnCancel() override {}
-
- private:
- friend class ServerCallbackReader<Request>;
-
- // May be overridden by internal implementation details. This is not a public
- // customization point.
- virtual void InternalBindReader(ServerCallbackReader<Request>* reader) {
- grpc::internal::MutexLock l(&reader_mu_);
-
- if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
- reader->SendInitialMetadata();
- }
- if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
- reader->Read(backlog_.read_wanted);
- }
- if (GPR_UNLIKELY(backlog_.finish_wanted)) {
- reader->Finish(std::move(backlog_.status_wanted));
- }
- // Set reader_ last so that other functions can use it lock-free
- reader_.store(reader, std::memory_order_release);
- }
-
- grpc::internal::Mutex reader_mu_;
- std::atomic<ServerCallbackReader<Request>*> reader_{nullptr};
- struct PreBindBacklog {
- bool send_initial_metadata_wanted = false;
- bool finish_wanted = false;
- Request* read_wanted = nullptr;
- ::grpc::Status status_wanted;
- };
- PreBindBacklog backlog_ /* GUARDED_BY(reader_mu_) */;
-};
-
-/// \a ServerWriteReactor is the interface for a server-streaming RPC.
-template <class Response>
-class ServerWriteReactor : public internal::ServerReactor {
- public:
- ServerWriteReactor() : writer_(nullptr) {}
- ~ServerWriteReactor() = default;
-
- /// The following operation initiations are exactly like ServerBidiReactor.
- void StartSendInitialMetadata() {
- ServerCallbackWriter<Response>* writer =
- writer_.load(std::memory_order_acquire);
- if (writer == nullptr) {
- grpc::internal::MutexLock l(&writer_mu_);
- writer = writer_.load(std::memory_order_relaxed);
- if (writer == nullptr) {
- backlog_.send_initial_metadata_wanted = true;
- return;
- }
- }
- writer->SendInitialMetadata();
- }
- void StartWrite(const Response* resp) {
- StartWrite(resp, ::grpc::WriteOptions());
- }
- void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
- ServerCallbackWriter<Response>* writer =
- writer_.load(std::memory_order_acquire);
- if (writer == nullptr) {
- grpc::internal::MutexLock l(&writer_mu_);
- writer = writer_.load(std::memory_order_relaxed);
- if (writer == nullptr) {
- backlog_.write_wanted = resp;
- backlog_.write_options_wanted = std::move(options);
- return;
- }
- }
- writer->Write(resp, std::move(options));
- }
- void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
- ::grpc::Status s) {
- ServerCallbackWriter<Response>* writer =
- writer_.load(std::memory_order_acquire);
- if (writer == nullptr) {
- grpc::internal::MutexLock l(&writer_mu_);
- writer = writer_.load(std::memory_order_relaxed);
- if (writer == nullptr) {
- backlog_.write_and_finish_wanted = true;
- backlog_.write_wanted = resp;
- backlog_.write_options_wanted = std::move(options);
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- writer->WriteAndFinish(resp, std::move(options), std::move(s));
- }
- void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
- StartWrite(resp, std::move(options.set_last_message()));
- }
- void Finish(::grpc::Status s) {
- ServerCallbackWriter<Response>* writer =
- writer_.load(std::memory_order_acquire);
- if (writer == nullptr) {
- grpc::internal::MutexLock l(&writer_mu_);
- writer = writer_.load(std::memory_order_relaxed);
- if (writer == nullptr) {
- backlog_.finish_wanted = true;
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- writer->Finish(std::move(s));
- }
-
- /// The following notifications are exactly like ServerBidiReactor.
- virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
- virtual void OnWriteDone(bool /*ok*/) {}
- void OnDone() override = 0;
- void OnCancel() override {}
-
- private:
- friend class ServerCallbackWriter<Response>;
- // May be overridden by internal implementation details. This is not a public
- // customization point.
- virtual void InternalBindWriter(ServerCallbackWriter<Response>* writer) {
- grpc::internal::MutexLock l(&writer_mu_);
-
- if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
- writer->SendInitialMetadata();
- }
- if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) {
- writer->WriteAndFinish(backlog_.write_wanted,
- std::move(backlog_.write_options_wanted),
- std::move(backlog_.status_wanted));
- } else {
- if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) {
- writer->Write(backlog_.write_wanted,
- std::move(backlog_.write_options_wanted));
- }
- if (GPR_UNLIKELY(backlog_.finish_wanted)) {
- writer->Finish(std::move(backlog_.status_wanted));
- }
- }
- // Set writer_ last so that other functions can use it lock-free
- writer_.store(writer, std::memory_order_release);
- }
-
- grpc::internal::Mutex writer_mu_;
- std::atomic<ServerCallbackWriter<Response>*> writer_{nullptr};
- struct PreBindBacklog {
- bool send_initial_metadata_wanted = false;
- bool write_and_finish_wanted = false;
- bool finish_wanted = false;
- const Response* write_wanted = nullptr;
- ::grpc::WriteOptions write_options_wanted;
- ::grpc::Status status_wanted;
- };
- PreBindBacklog backlog_ /* GUARDED_BY(writer_mu_) */;
-};
-
-class ServerUnaryReactor : public internal::ServerReactor {
- public:
- ServerUnaryReactor() : call_(nullptr) {}
- ~ServerUnaryReactor() = default;
-
- /// StartSendInitialMetadata is exactly like ServerBidiReactor.
- void StartSendInitialMetadata() {
- ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
- if (call == nullptr) {
- grpc::internal::MutexLock l(&call_mu_);
- call = call_.load(std::memory_order_relaxed);
- if (call == nullptr) {
- backlog_.send_initial_metadata_wanted = true;
- return;
- }
- }
- call->SendInitialMetadata();
- }
- /// Finish is similar to ServerBidiReactor except for one detail.
- /// If the status is non-OK, any message will not be sent. Instead,
- /// the client will only receive the status and any trailing metadata.
- void Finish(::grpc::Status s) {
- ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
- if (call == nullptr) {
- grpc::internal::MutexLock l(&call_mu_);
- call = call_.load(std::memory_order_relaxed);
- if (call == nullptr) {
- backlog_.finish_wanted = true;
- backlog_.status_wanted = std::move(s);
- return;
- }
- }
- call->Finish(std::move(s));
- }
-
- /// The following notifications are exactly like ServerBidiReactor.
- virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
- void OnDone() override = 0;
- void OnCancel() override {}
-
- private:
- friend class ServerCallbackUnary;
- // May be overridden by internal implementation details. This is not a public
- // customization point.
- virtual void InternalBindCall(ServerCallbackUnary* call) {
- grpc::internal::MutexLock l(&call_mu_);
-
- if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
- call->SendInitialMetadata();
- }
- if (GPR_UNLIKELY(backlog_.finish_wanted)) {
- call->Finish(std::move(backlog_.status_wanted));
- }
- // Set call_ last so that other functions can use it lock-free
- call_.store(call, std::memory_order_release);
- }
-
- grpc::internal::Mutex call_mu_;
- std::atomic<ServerCallbackUnary*> call_{nullptr};
- struct PreBindBacklog {
- bool send_initial_metadata_wanted = false;
- bool finish_wanted = false;
- ::grpc::Status status_wanted;
- };
- PreBindBacklog backlog_ /* GUARDED_BY(call_mu_) */;
-};
-
-namespace internal {
-
-template <class Base>
-class FinishOnlyReactor : public Base {
- public:
- explicit FinishOnlyReactor(::grpc::Status s) { this->Finish(std::move(s)); }
- void OnDone() override { this->~FinishOnlyReactor(); }
-};
-
-using UnimplementedUnaryReactor = FinishOnlyReactor<ServerUnaryReactor>;
-template <class Request>
-using UnimplementedReadReactor = FinishOnlyReactor<ServerReadReactor<Request>>;
-template <class Response>
-using UnimplementedWriteReactor =
- FinishOnlyReactor<ServerWriteReactor<Response>>;
-template <class Request, class Response>
-using UnimplementedBidiReactor =
- FinishOnlyReactor<ServerBidiReactor<Request, Response>>;
-
-} // namespace internal
-
-// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
+
+// Declare base class of all reactors as internal
+namespace internal {
+
+// Forward declarations
+template <class Request, class Response>
+class CallbackUnaryHandler;
+template <class Request, class Response>
+class CallbackClientStreamingHandler;
+template <class Request, class Response>
+class CallbackServerStreamingHandler;
+template <class Request, class Response>
+class CallbackBidiHandler;
+
+class ServerReactor {
+ public:
+ virtual ~ServerReactor() = default;
+ virtual void OnDone() = 0;
+ virtual void OnCancel() = 0;
+
+ // The following is not API. It is for internal use only and specifies whether
+ // all reactions of this Reactor can be run without an extra executor
+ // scheduling. This should only be used for internally-defined reactors with
+ // trivial reactions.
+ virtual bool InternalInlineable() { return false; }
+
+ private:
+ template <class Request, class Response>
+ friend class CallbackUnaryHandler;
+ template <class Request, class Response>
+ friend class CallbackClientStreamingHandler;
+ template <class Request, class Response>
+ friend class CallbackServerStreamingHandler;
+ template <class Request, class Response>
+ friend class CallbackBidiHandler;
+};
+
+/// The base class of ServerCallbackUnary etc.
+class ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackCall() {}
+
+ // This object is responsible for tracking when it is safe to call OnDone and
+ // OnCancel. OnDone should not be called until the method handler is complete,
+ // Finish has been called, the ServerContext CompletionOp (which tracks
+ // cancellation or successful completion) has completed, and all outstanding
+ // Read/Write actions have seen their reactions. OnCancel should not be called
+ // until after the method handler is done and the RPC has completed with a
+ // cancellation. This is tracked by counting how many of these conditions have
+ // been met and calling OnCancel when none remain unmet.
+
+ // Public versions of MaybeDone: one where we don't know the reactor in
+ // advance (used for the ServerContext CompletionOp), and one for where we
+ // know the inlineability of the OnDone reaction. You should set the inline
+ // flag to true if either the Reactor is InternalInlineable() or if this
+ // callback is already being forced to run dispatched to an executor
+ // (typically because it contains additional work than just the MaybeDone).
+
+ void MaybeDone() {
+ if (GPR_UNLIKELY(Unref() == 1)) {
+ ScheduleOnDone(reactor()->InternalInlineable());
+ }
+ }
+
+ void MaybeDone(bool inline_ondone) {
+ if (GPR_UNLIKELY(Unref() == 1)) {
+ ScheduleOnDone(inline_ondone);
+ }
+ }
+
+ // Fast version called with known reactor passed in, used from derived
+ // classes, typically in non-cancel case
+ void MaybeCallOnCancel(ServerReactor* reactor) {
+ if (GPR_UNLIKELY(UnblockCancellation())) {
+ CallOnCancel(reactor);
+ }
+ }
+
+ // Slower version called from object that doesn't know the reactor a priori
+ // (such as the ServerContext CompletionOp which is formed before the
+ // reactor). This is used in cancel cases only, so it's ok to be slower and
+ // invoke a virtual function.
+ void MaybeCallOnCancel() {
+ if (GPR_UNLIKELY(UnblockCancellation())) {
+ CallOnCancel(reactor());
+ }
+ }
+
+ protected:
+ /// Increases the reference count
+ void Ref() { callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); }
+
+ private:
+ virtual ServerReactor* reactor() = 0;
+
+ // CallOnDone performs the work required at completion of the RPC: invoking
+ // the OnDone function and doing all necessary cleanup. This function is only
+ // ever invoked on a fully-Unref'fed ServerCallbackCall.
+ virtual void CallOnDone() = 0;
+
+ // If the OnDone reaction is inlineable, execute it inline. Otherwise send it
+ // to an executor.
+ void ScheduleOnDone(bool inline_ondone);
+
+ // If the OnCancel reaction is inlineable, execute it inline. Otherwise send
+ // it to an executor.
+ void CallOnCancel(ServerReactor* reactor);
+
+ // Implement the cancellation constraint counter. Return true if OnCancel
+ // should be called, false otherwise.
+ bool UnblockCancellation() {
+ return on_cancel_conditions_remaining_.fetch_sub(
+ 1, std::memory_order_acq_rel) == 1;
+ }
+
+ /// Decreases the reference count and returns the previous value
+ int Unref() {
+ return callbacks_outstanding_.fetch_sub(1, std::memory_order_acq_rel);
+ }
+
+ std::atomic_int on_cancel_conditions_remaining_{2};
+ std::atomic_int callbacks_outstanding_{
+ 3}; // reserve for start, Finish, and CompletionOp
+};
+
+template <class Request, class Response>
+class DefaultMessageHolder
+ : public ::grpc::experimental::MessageHolder<Request, Response> {
+ public:
+ DefaultMessageHolder() {
+ this->set_request(&request_obj_);
+ this->set_response(&response_obj_);
+ }
+ void Release() override {
+ // the object is allocated in the call arena.
+ this->~DefaultMessageHolder<Request, Response>();
+ }
+
+ private:
+ Request request_obj_;
+ Response response_obj_;
+};
+
+} // namespace internal
+
+// Forward declarations
+class ServerUnaryReactor;
+template <class Request>
+class ServerReadReactor;
+template <class Response>
+class ServerWriteReactor;
+template <class Request, class Response>
+class ServerBidiReactor;
+
+// NOTE: The actual call/stream object classes are provided as API only to
+// support mocking. There are no implementations of these class interfaces in
+// the API.
+class ServerCallbackUnary : public internal::ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackUnary() {}
+ virtual void Finish(::grpc::Status s) = 0;
+ virtual void SendInitialMetadata() = 0;
+
+ protected:
+ // Use a template rather than explicitly specifying ServerUnaryReactor to
+ // delay binding and avoid a circular forward declaration issue
+ template <class Reactor>
+ void BindReactor(Reactor* reactor) {
+ reactor->InternalBindCall(this);
+ }
+};
+
+template <class Request>
+class ServerCallbackReader : public internal::ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackReader() {}
+ virtual void Finish(::grpc::Status s) = 0;
+ virtual void SendInitialMetadata() = 0;
+ virtual void Read(Request* msg) = 0;
+
+ protected:
+ void BindReactor(ServerReadReactor<Request>* reactor) {
+ reactor->InternalBindReader(this);
+ }
+};
+
+template <class Response>
+class ServerCallbackWriter : public internal::ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackWriter() {}
+
+ virtual void Finish(::grpc::Status s) = 0;
+ virtual void SendInitialMetadata() = 0;
+ virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
+ virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
+ ::grpc::Status s) = 0;
+
+ protected:
+ void BindReactor(ServerWriteReactor<Response>* reactor) {
+ reactor->InternalBindWriter(this);
+ }
+};
+
+template <class Request, class Response>
+class ServerCallbackReaderWriter : public internal::ServerCallbackCall {
+ public:
+ virtual ~ServerCallbackReaderWriter() {}
+
+ virtual void Finish(::grpc::Status s) = 0;
+ virtual void SendInitialMetadata() = 0;
+ virtual void Read(Request* msg) = 0;
+ virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
+ virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
+ ::grpc::Status s) = 0;
+
+ protected:
+ void BindReactor(ServerBidiReactor<Request, Response>* reactor) {
+ reactor->InternalBindStream(this);
+ }
+};
+
+// The following classes are the reactor interfaces that are to be implemented
+// by the user, returned as the output parameter of the method handler for a
+// callback method. Note that none of the classes are pure; all reactions have a
+// default empty reaction so that the user class only needs to override those
+// classes that it cares about.
+
+/// \a ServerBidiReactor is the interface for a bidirectional streaming RPC.
+template <class Request, class Response>
+class ServerBidiReactor : public internal::ServerReactor {
+ public:
+ // NOTE: Initializing stream_ as a constructor initializer rather than a
+ // default initializer because gcc-4.x requires a copy constructor for
+ // default initializing a templated member, which isn't ok for atomic.
+ // TODO(vjpai): Switch to default constructor and default initializer when
+ // gcc-4.x is no longer supported
+ ServerBidiReactor() : stream_(nullptr) {}
+ ~ServerBidiReactor() = default;
+
+ /// Send any initial metadata stored in the RPC context. If not invoked,
+ /// any initial metadata will be passed along with the first Write or the
+ /// Finish (if there are no writes).
+ void StartSendInitialMetadata() {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.send_initial_metadata_wanted = true;
+ return;
+ }
+ }
+ stream->SendInitialMetadata();
+ }
+
+ /// Initiate a read operation.
+ ///
+ /// \param[out] req Where to eventually store the read message. Valid when
+ /// the library calls OnReadDone
+ void StartRead(Request* req) {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.read_wanted = req;
+ return;
+ }
+ }
+ stream->Read(req);
+ }
+
+ /// Initiate a write operation.
+ ///
+ /// \param[in] resp The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ void StartWrite(const Response* resp) {
+ StartWrite(resp, ::grpc::WriteOptions());
+ }
+
+ /// Initiate a write operation with specified options.
+ ///
+ /// \param[in] resp The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.write_wanted = resp;
+ backlog_.write_options_wanted = std::move(options);
+ return;
+ }
+ }
+ stream->Write(resp, std::move(options));
+ }
+
+ /// Initiate a write operation with specified options and final RPC Status,
+ /// which also causes any trailing metadata for this RPC to be sent out.
+ /// StartWriteAndFinish is like merging StartWriteLast and Finish into a
+ /// single step. A key difference, though, is that this operation doesn't have
+ /// an OnWriteDone reaction - it is considered complete only when OnDone is
+ /// available. An RPC can either have StartWriteAndFinish or Finish, but not
+ /// both.
+ ///
+ /// \param[in] resp The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ /// \param[in] s The status outcome of this RPC
+ void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
+ ::grpc::Status s) {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.write_and_finish_wanted = true;
+ backlog_.write_wanted = resp;
+ backlog_.write_options_wanted = std::move(options);
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ stream->WriteAndFinish(resp, std::move(options), std::move(s));
+ }
+
+ /// Inform system of a planned write operation with specified options, but
+ /// allow the library to schedule the actual write coalesced with the writing
+ /// of trailing metadata (which takes place on a Finish call).
+ ///
+ /// \param[in] resp The message to be written. The library does not take
+ /// ownership but the caller must ensure that the message is
+ /// not deleted or modified until OnWriteDone is called.
+ /// \param[in] options The WriteOptions to use for writing this message
+ void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
+ StartWrite(resp, std::move(options.set_last_message()));
+ }
+
+ /// Indicate that the stream is to be finished and the trailing metadata and
+ /// RPC status are to be sent. Every RPC MUST be finished using either Finish
+ /// or StartWriteAndFinish (but not both), even if the RPC is already
+ /// cancelled.
+ ///
+ /// \param[in] s The status outcome of this RPC
+ void Finish(::grpc::Status s) {
+ ServerCallbackReaderWriter<Request, Response>* stream =
+ stream_.load(std::memory_order_acquire);
+ if (stream == nullptr) {
+ grpc::internal::MutexLock l(&stream_mu_);
+ stream = stream_.load(std::memory_order_relaxed);
+ if (stream == nullptr) {
+ backlog_.finish_wanted = true;
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ stream->Finish(std::move(s));
+ }
+
+ /// Notifies the application that an explicit StartSendInitialMetadata
+ /// operation completed. Not used when the sending of initial metadata
+ /// piggybacks onto the first write.
+ ///
+ /// \param[in] ok Was it successful? If false, no further write-side operation
+ /// will succeed.
+ virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartRead operation completed.
+ ///
+ /// \param[in] ok Was it successful? If false, no further read-side operation
+ /// will succeed.
+ virtual void OnReadDone(bool /*ok*/) {}
+
+ /// Notifies the application that a StartWrite (or StartWriteLast) operation
+ /// completed.
+ ///
+ /// \param[in] ok Was it successful? If false, no further write-side operation
+ /// will succeed.
+ virtual void OnWriteDone(bool /*ok*/) {}
+
+ /// Notifies the application that all operations associated with this RPC
+ /// have completed. This is an override (from the internal base class) but
+ /// still abstract, so derived classes MUST override it to be instantiated.
+ void OnDone() override = 0;
+
+ /// Notifies the application that this RPC has been cancelled. This is an
+ /// override (from the internal base class) but not final, so derived classes
+ /// should override it if they want to take action.
+ void OnCancel() override {}
+
+ private:
+ friend class ServerCallbackReaderWriter<Request, Response>;
+ // May be overridden by internal implementation details. This is not a public
+ // customization point.
+ virtual void InternalBindStream(
+ ServerCallbackReaderWriter<Request, Response>* stream) {
+ grpc::internal::MutexLock l(&stream_mu_);
+
+ if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
+ stream->SendInitialMetadata();
+ }
+ if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
+ stream->Read(backlog_.read_wanted);
+ }
+ if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) {
+ stream->WriteAndFinish(backlog_.write_wanted,
+ std::move(backlog_.write_options_wanted),
+ std::move(backlog_.status_wanted));
+ } else {
+ if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) {
+ stream->Write(backlog_.write_wanted,
+ std::move(backlog_.write_options_wanted));
+ }
+ if (GPR_UNLIKELY(backlog_.finish_wanted)) {
+ stream->Finish(std::move(backlog_.status_wanted));
+ }
+ }
+ // Set stream_ last so that other functions can use it lock-free
+ stream_.store(stream, std::memory_order_release);
+ }
+
+ grpc::internal::Mutex stream_mu_;
+ // TODO(vjpai): Make stream_or_backlog_ into a std::variant or y_absl::variant
+ // once C++17 or ABSL is supported since stream and backlog are
+ // mutually exclusive in this class. Do likewise with the
+ // remaining reactor classes and their backlogs as well.
+ std::atomic<ServerCallbackReaderWriter<Request, Response>*> stream_{nullptr};
+ struct PreBindBacklog {
+ bool send_initial_metadata_wanted = false;
+ bool write_and_finish_wanted = false;
+ bool finish_wanted = false;
+ Request* read_wanted = nullptr;
+ const Response* write_wanted = nullptr;
+ ::grpc::WriteOptions write_options_wanted;
+ ::grpc::Status status_wanted;
+ };
+ PreBindBacklog backlog_ /* GUARDED_BY(stream_mu_) */;
+};
+
+/// \a ServerReadReactor is the interface for a client-streaming RPC.
+template <class Request>
+class ServerReadReactor : public internal::ServerReactor {
+ public:
+ ServerReadReactor() : reader_(nullptr) {}
+ ~ServerReadReactor() = default;
+
+ /// The following operation initiations are exactly like ServerBidiReactor.
+ void StartSendInitialMetadata() {
+ ServerCallbackReader<Request>* reader =
+ reader_.load(std::memory_order_acquire);
+ if (reader == nullptr) {
+ grpc::internal::MutexLock l(&reader_mu_);
+ reader = reader_.load(std::memory_order_relaxed);
+ if (reader == nullptr) {
+ backlog_.send_initial_metadata_wanted = true;
+ return;
+ }
+ }
+ reader->SendInitialMetadata();
+ }
+ void StartRead(Request* req) {
+ ServerCallbackReader<Request>* reader =
+ reader_.load(std::memory_order_acquire);
+ if (reader == nullptr) {
+ grpc::internal::MutexLock l(&reader_mu_);
+ reader = reader_.load(std::memory_order_relaxed);
+ if (reader == nullptr) {
+ backlog_.read_wanted = req;
+ return;
+ }
+ }
+ reader->Read(req);
+ }
+ void Finish(::grpc::Status s) {
+ ServerCallbackReader<Request>* reader =
+ reader_.load(std::memory_order_acquire);
+ if (reader == nullptr) {
+ grpc::internal::MutexLock l(&reader_mu_);
+ reader = reader_.load(std::memory_order_relaxed);
+ if (reader == nullptr) {
+ backlog_.finish_wanted = true;
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ reader->Finish(std::move(s));
+ }
+
+ /// The following notifications are exactly like ServerBidiReactor.
+ virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+ virtual void OnReadDone(bool /*ok*/) {}
+ void OnDone() override = 0;
+ void OnCancel() override {}
+
+ private:
+ friend class ServerCallbackReader<Request>;
+
+ // May be overridden by internal implementation details. This is not a public
+ // customization point.
+ virtual void InternalBindReader(ServerCallbackReader<Request>* reader) {
+ grpc::internal::MutexLock l(&reader_mu_);
+
+ if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
+ reader->SendInitialMetadata();
+ }
+ if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
+ reader->Read(backlog_.read_wanted);
+ }
+ if (GPR_UNLIKELY(backlog_.finish_wanted)) {
+ reader->Finish(std::move(backlog_.status_wanted));
+ }
+ // Set reader_ last so that other functions can use it lock-free
+ reader_.store(reader, std::memory_order_release);
+ }
+
+ grpc::internal::Mutex reader_mu_;
+ std::atomic<ServerCallbackReader<Request>*> reader_{nullptr};
+ struct PreBindBacklog {
+ bool send_initial_metadata_wanted = false;
+ bool finish_wanted = false;
+ Request* read_wanted = nullptr;
+ ::grpc::Status status_wanted;
+ };
+ PreBindBacklog backlog_ /* GUARDED_BY(reader_mu_) */;
+};
+
+/// \a ServerWriteReactor is the interface for a server-streaming RPC.
+template <class Response>
+class ServerWriteReactor : public internal::ServerReactor {
+ public:
+ ServerWriteReactor() : writer_(nullptr) {}
+ ~ServerWriteReactor() = default;
+
+ /// The following operation initiations are exactly like ServerBidiReactor.
+ void StartSendInitialMetadata() {
+ ServerCallbackWriter<Response>* writer =
+ writer_.load(std::memory_order_acquire);
+ if (writer == nullptr) {
+ grpc::internal::MutexLock l(&writer_mu_);
+ writer = writer_.load(std::memory_order_relaxed);
+ if (writer == nullptr) {
+ backlog_.send_initial_metadata_wanted = true;
+ return;
+ }
+ }
+ writer->SendInitialMetadata();
+ }
+ void StartWrite(const Response* resp) {
+ StartWrite(resp, ::grpc::WriteOptions());
+ }
+ void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
+ ServerCallbackWriter<Response>* writer =
+ writer_.load(std::memory_order_acquire);
+ if (writer == nullptr) {
+ grpc::internal::MutexLock l(&writer_mu_);
+ writer = writer_.load(std::memory_order_relaxed);
+ if (writer == nullptr) {
+ backlog_.write_wanted = resp;
+ backlog_.write_options_wanted = std::move(options);
+ return;
+ }
+ }
+ writer->Write(resp, std::move(options));
+ }
+ void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
+ ::grpc::Status s) {
+ ServerCallbackWriter<Response>* writer =
+ writer_.load(std::memory_order_acquire);
+ if (writer == nullptr) {
+ grpc::internal::MutexLock l(&writer_mu_);
+ writer = writer_.load(std::memory_order_relaxed);
+ if (writer == nullptr) {
+ backlog_.write_and_finish_wanted = true;
+ backlog_.write_wanted = resp;
+ backlog_.write_options_wanted = std::move(options);
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ writer->WriteAndFinish(resp, std::move(options), std::move(s));
+ }
+ void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
+ StartWrite(resp, std::move(options.set_last_message()));
+ }
+ void Finish(::grpc::Status s) {
+ ServerCallbackWriter<Response>* writer =
+ writer_.load(std::memory_order_acquire);
+ if (writer == nullptr) {
+ grpc::internal::MutexLock l(&writer_mu_);
+ writer = writer_.load(std::memory_order_relaxed);
+ if (writer == nullptr) {
+ backlog_.finish_wanted = true;
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ writer->Finish(std::move(s));
+ }
+
+ /// The following notifications are exactly like ServerBidiReactor.
+ virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+ virtual void OnWriteDone(bool /*ok*/) {}
+ void OnDone() override = 0;
+ void OnCancel() override {}
+
+ private:
+ friend class ServerCallbackWriter<Response>;
+ // May be overridden by internal implementation details. This is not a public
+ // customization point.
+ virtual void InternalBindWriter(ServerCallbackWriter<Response>* writer) {
+ grpc::internal::MutexLock l(&writer_mu_);
+
+ if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
+ writer->SendInitialMetadata();
+ }
+ if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) {
+ writer->WriteAndFinish(backlog_.write_wanted,
+ std::move(backlog_.write_options_wanted),
+ std::move(backlog_.status_wanted));
+ } else {
+ if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) {
+ writer->Write(backlog_.write_wanted,
+ std::move(backlog_.write_options_wanted));
+ }
+ if (GPR_UNLIKELY(backlog_.finish_wanted)) {
+ writer->Finish(std::move(backlog_.status_wanted));
+ }
+ }
+ // Set writer_ last so that other functions can use it lock-free
+ writer_.store(writer, std::memory_order_release);
+ }
+
+ grpc::internal::Mutex writer_mu_;
+ std::atomic<ServerCallbackWriter<Response>*> writer_{nullptr};
+ struct PreBindBacklog {
+ bool send_initial_metadata_wanted = false;
+ bool write_and_finish_wanted = false;
+ bool finish_wanted = false;
+ const Response* write_wanted = nullptr;
+ ::grpc::WriteOptions write_options_wanted;
+ ::grpc::Status status_wanted;
+ };
+ PreBindBacklog backlog_ /* GUARDED_BY(writer_mu_) */;
+};
+
+class ServerUnaryReactor : public internal::ServerReactor {
+ public:
+ ServerUnaryReactor() : call_(nullptr) {}
+ ~ServerUnaryReactor() = default;
+
+ /// StartSendInitialMetadata is exactly like ServerBidiReactor.
+ void StartSendInitialMetadata() {
+ ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
+ if (call == nullptr) {
+ grpc::internal::MutexLock l(&call_mu_);
+ call = call_.load(std::memory_order_relaxed);
+ if (call == nullptr) {
+ backlog_.send_initial_metadata_wanted = true;
+ return;
+ }
+ }
+ call->SendInitialMetadata();
+ }
+ /// Finish is similar to ServerBidiReactor except for one detail.
+ /// If the status is non-OK, any message will not be sent. Instead,
+ /// the client will only receive the status and any trailing metadata.
+ void Finish(::grpc::Status s) {
+ ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
+ if (call == nullptr) {
+ grpc::internal::MutexLock l(&call_mu_);
+ call = call_.load(std::memory_order_relaxed);
+ if (call == nullptr) {
+ backlog_.finish_wanted = true;
+ backlog_.status_wanted = std::move(s);
+ return;
+ }
+ }
+ call->Finish(std::move(s));
+ }
+
+ /// The following notifications are exactly like ServerBidiReactor.
+ virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+ void OnDone() override = 0;
+ void OnCancel() override {}
+
+ private:
+ friend class ServerCallbackUnary;
+ // May be overridden by internal implementation details. This is not a public
+ // customization point.
+ virtual void InternalBindCall(ServerCallbackUnary* call) {
+ grpc::internal::MutexLock l(&call_mu_);
+
+ if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
+ call->SendInitialMetadata();
+ }
+ if (GPR_UNLIKELY(backlog_.finish_wanted)) {
+ call->Finish(std::move(backlog_.status_wanted));
+ }
+ // Set call_ last so that other functions can use it lock-free
+ call_.store(call, std::memory_order_release);
+ }
+
+ grpc::internal::Mutex call_mu_;
+ std::atomic<ServerCallbackUnary*> call_{nullptr};
+ struct PreBindBacklog {
+ bool send_initial_metadata_wanted = false;
+ bool finish_wanted = false;
+ ::grpc::Status status_wanted;
+ };
+ PreBindBacklog backlog_ /* GUARDED_BY(call_mu_) */;
+};
+
+namespace internal {
+
+template <class Base>
+class FinishOnlyReactor : public Base {
+ public:
+ explicit FinishOnlyReactor(::grpc::Status s) { this->Finish(std::move(s)); }
+ void OnDone() override { this->~FinishOnlyReactor(); }
+};
+
+using UnimplementedUnaryReactor = FinishOnlyReactor<ServerUnaryReactor>;
+template <class Request>
+using UnimplementedReadReactor = FinishOnlyReactor<ServerReadReactor<Request>>;
+template <class Response>
+using UnimplementedWriteReactor =
+ FinishOnlyReactor<ServerWriteReactor<Response>>;
+template <class Request, class Response>
+using UnimplementedBidiReactor =
+ FinishOnlyReactor<ServerBidiReactor<Request, Response>>;
+
+} // namespace internal
+
+// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
namespace experimental {
-
+
template <class Request>
-using ServerReadReactor = ::grpc::ServerReadReactor<Request>;
+using ServerReadReactor = ::grpc::ServerReadReactor<Request>;
template <class Response>
-using ServerWriteReactor = ::grpc::ServerWriteReactor<Response>;
+using ServerWriteReactor = ::grpc::ServerWriteReactor<Response>;
template <class Request, class Response>
-using ServerBidiReactor = ::grpc::ServerBidiReactor<Request, Response>;
+using ServerBidiReactor = ::grpc::ServerBidiReactor<Request, Response>;
-using ServerUnaryReactor = ::grpc::ServerUnaryReactor;
+using ServerUnaryReactor = ::grpc::ServerUnaryReactor;
} // namespace experimental
-
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h
index 8120fcaf85..330d62ab37 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h
@@ -20,18 +20,18 @@
#include <grpcpp/impl/codegen/message_allocator.h>
#include <grpcpp/impl/codegen/rpc_service_method.h>
-#include <grpcpp/impl/codegen/server_callback.h>
-#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/server_callback.h>
+#include <grpcpp/impl/codegen/server_context.h>
#include <grpcpp/impl/codegen/status.h>
-namespace grpc {
+namespace grpc {
namespace internal {
template <class RequestType, class ResponseType>
class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
public:
explicit CallbackUnaryHandler(
- std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*,
+ std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*,
const RequestType*, ResponseType*)>
get_reactor)
: get_reactor_(std::move(get_reactor)) {}
@@ -52,7 +52,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
auto* call = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
param.call->call(), sizeof(ServerCallbackUnaryImpl)))
ServerCallbackUnaryImpl(
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
param.call, allocator_state, std::move(param.call_requester));
param.server_context->BeginCompletionOp(
param.call, [call](bool) { call->MaybeDone(); }, call);
@@ -61,7 +61,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
if (param.status.ok()) {
reactor = ::grpc::internal::CatchingReactorGetter<ServerUnaryReactor>(
get_reactor_,
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
call->request(), call->response());
}
@@ -106,7 +106,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
}
private:
- std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*,
+ std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*,
const RequestType*, ResponseType*)>
get_reactor_;
::grpc::experimental::MessageAllocator<RequestType, ResponseType>*
@@ -115,19 +115,19 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
class ServerCallbackUnaryImpl : public ServerCallbackUnary {
public:
void Finish(::grpc::Status s) override {
- // A callback that only contains a call to MaybeDone can be run as an
- // inline callback regardless of whether or not OnDone is inlineable
- // because if the actual OnDone callback needs to be scheduled, MaybeDone
- // is responsible for dispatching to an executor thread if needed. Thus,
- // when setting up the finish_tag_, we can set its own callback to
- // inlineable.
+ // A callback that only contains a call to MaybeDone can be run as an
+ // inline callback regardless of whether or not OnDone is inlineable
+ // because if the actual OnDone callback needs to be scheduled, MaybeDone
+ // is responsible for dispatching to an executor thread if needed. Thus,
+ // when setting up the finish_tag_, we can set its own callback to
+ // inlineable.
finish_tag_.Set(
- call_.call(),
- [this](bool) {
- this->MaybeDone(
- reactor_.load(std::memory_order_relaxed)->InternalInlineable());
- },
- &finish_ops_, /*can_inline=*/true);
+ call_.call(),
+ [this](bool) {
+ this->MaybeDone(
+ reactor_.load(std::memory_order_relaxed)->InternalInlineable());
+ },
+ &finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) {
@@ -152,19 +152,19 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref();
- // The callback for this function should not be marked inline because it
- // is directly invoking a user-controlled reaction
- // (OnSendInitialMetadataDone). Thus it must be dispatched to an executor
- // thread. However, any OnDone needed after that can be inlined because it
- // is already running on an executor thread.
+ // The callback for this function should not be marked inline because it
+ // is directly invoking a user-controlled reaction
+ // (OnSendInitialMetadataDone). Thus it must be dispatched to an executor
+ // thread. However, any OnDone needed after that can be inlined because it
+ // is already running on an executor thread.
meta_tag_.Set(call_.call(),
[this](bool ok) {
- ServerUnaryReactor* reactor =
- reactor_.load(std::memory_order_relaxed);
- reactor->OnSendInitialMetadataDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ ServerUnaryReactor* reactor =
+ reactor_.load(std::memory_order_relaxed);
+ reactor->OnSendInitialMetadataDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &meta_ops_, /*can_inline=*/false);
+ &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@@ -179,7 +179,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
friend class CallbackUnaryHandler<RequestType, ResponseType>;
ServerCallbackUnaryImpl(
- ::grpc::CallbackServerContext* ctx, ::grpc::internal::Call* call,
+ ::grpc::CallbackServerContext* ctx, ::grpc::internal::Call* call,
::grpc::experimental::MessageHolder<RequestType, ResponseType>*
allocator_state,
std::function<void()> call_requester)
@@ -198,20 +198,20 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
reactor_.store(reactor, std::memory_order_relaxed);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
- this->MaybeDone(reactor->InternalInlineable());
+ this->MaybeDone(reactor->InternalInlineable());
}
const RequestType* request() { return allocator_state_->request(); }
ResponseType* response() { return allocator_state_->response(); }
- void CallOnDone() override {
- reactor_.load(std::memory_order_relaxed)->OnDone();
- grpc_call* call = call_.call();
- auto call_requester = std::move(call_requester_);
- allocator_state_->Release();
- this->~ServerCallbackUnaryImpl(); // explicitly call destructor
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- call_requester();
+ void CallOnDone() override {
+ reactor_.load(std::memory_order_relaxed)->OnDone();
+ grpc_call* call = call_.call();
+ auto call_requester = std::move(call_requester_);
+ allocator_state_->Release();
+ this->~ServerCallbackUnaryImpl(); // explicitly call destructor
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ call_requester();
}
ServerReactor* reactor() override {
@@ -227,7 +227,7 @@ class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
finish_ops_;
::grpc::internal::CallbackWithSuccessTag finish_tag_;
- ::grpc::CallbackServerContext* const ctx_;
+ ::grpc::CallbackServerContext* const ctx_;
::grpc::internal::Call call_;
::grpc::experimental::MessageHolder<RequestType, ResponseType>* const
allocator_state_;
@@ -254,7 +254,7 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
public:
explicit CallbackClientStreamingHandler(
std::function<ServerReadReactor<RequestType>*(
- ::grpc::CallbackServerContext*, ResponseType*)>
+ ::grpc::CallbackServerContext*, ResponseType*)>
get_reactor)
: get_reactor_(std::move(get_reactor)) {}
void RunHandler(const HandlerParameter& param) final {
@@ -264,22 +264,22 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
auto* reader = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
param.call->call(), sizeof(ServerCallbackReaderImpl)))
ServerCallbackReaderImpl(
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
param.call, std::move(param.call_requester));
- // Inlineable OnDone can be false in the CompletionOp callback because there
- // is no read reactor that has an inlineable OnDone; this only applies to
- // the DefaultReactor (which is unary).
+ // Inlineable OnDone can be false in the CompletionOp callback because there
+ // is no read reactor that has an inlineable OnDone; this only applies to
+ // the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp(
- param.call,
- [reader](bool) { reader->MaybeDone(/*inlineable_ondone=*/false); },
- reader);
+ param.call,
+ [reader](bool) { reader->MaybeDone(/*inlineable_ondone=*/false); },
+ reader);
ServerReadReactor<RequestType>* reactor = nullptr;
if (param.status.ok()) {
reactor = ::grpc::internal::CatchingReactorGetter<
ServerReadReactor<RequestType>>(
get_reactor_,
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
reader->response());
}
@@ -295,24 +295,24 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
}
private:
- std::function<ServerReadReactor<RequestType>*(::grpc::CallbackServerContext*,
- ResponseType*)>
+ std::function<ServerReadReactor<RequestType>*(::grpc::CallbackServerContext*,
+ ResponseType*)>
get_reactor_;
class ServerCallbackReaderImpl : public ServerCallbackReader<RequestType> {
public:
void Finish(::grpc::Status s) override {
- // A finish tag with only MaybeDone can have its callback inlined
- // regardless even if OnDone is not inlineable because this callback just
- // checks a ref and then decides whether or not to dispatch OnDone.
- finish_tag_.Set(call_.call(),
- [this](bool) {
- // Inlineable OnDone can be false here because there is
- // no read reactor that has an inlineable OnDone; this
- // only applies to the DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
- },
- &finish_ops_, /*can_inline=*/true);
+ // A finish tag with only MaybeDone can have its callback inlined
+ // regardless even if OnDone is not inlineable because this callback just
+ // checks a ref and then decides whether or not to dispatch OnDone.
+ finish_tag_.Set(call_.call(),
+ [this](bool) {
+ // Inlineable OnDone can be false here because there is
+ // no read reactor that has an inlineable OnDone; this
+ // only applies to the DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
+ },
+ &finish_ops_, /*can_inline=*/true);
if (!ctx_->sent_initial_metadata_) {
finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
@@ -335,17 +335,17 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref();
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
- ServerReadReactor<RequestType>* reactor =
- reactor_.load(std::memory_order_relaxed);
- reactor->OnSendInitialMetadataDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ ServerReadReactor<RequestType>* reactor =
+ reactor_.load(std::memory_order_relaxed);
+ reactor->OnSendInitialMetadataDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &meta_ops_, /*can_inline=*/false);
+ &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@@ -365,42 +365,42 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
private:
friend class CallbackClientStreamingHandler<RequestType, ResponseType>;
- ServerCallbackReaderImpl(::grpc::CallbackServerContext* ctx,
+ ServerCallbackReaderImpl(::grpc::CallbackServerContext* ctx,
::grpc::internal::Call* call,
std::function<void()> call_requester)
: ctx_(ctx), call_(*call), call_requester_(std::move(call_requester)) {}
void SetupReactor(ServerReadReactor<RequestType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed);
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
read_tag_.Set(call_.call(),
- [this, reactor](bool ok) {
- reactor->OnReadDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ [this, reactor](bool ok) {
+ reactor->OnReadDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &read_ops_, /*can_inline=*/false);
+ &read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
- // Inlineable OnDone can be false here because there is no read
- // reactor that has an inlineable OnDone; this only applies to the
- // DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
+ // Inlineable OnDone can be false here because there is no read
+ // reactor that has an inlineable OnDone; this only applies to the
+ // DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
}
~ServerCallbackReaderImpl() {}
ResponseType* response() { return &resp_; }
- void CallOnDone() override {
- reactor_.load(std::memory_order_relaxed)->OnDone();
- grpc_call* call = call_.call();
- auto call_requester = std::move(call_requester_);
- this->~ServerCallbackReaderImpl(); // explicitly call destructor
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- call_requester();
+ void CallOnDone() override {
+ reactor_.load(std::memory_order_relaxed)->OnDone();
+ grpc_call* call = call_.call();
+ auto call_requester = std::move(call_requester_);
+ this->~ServerCallbackReaderImpl(); // explicitly call destructor
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ call_requester();
}
ServerReactor* reactor() override {
@@ -420,7 +420,7 @@ class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
read_ops_;
::grpc::internal::CallbackWithSuccessTag read_tag_;
- ::grpc::CallbackServerContext* const ctx_;
+ ::grpc::CallbackServerContext* const ctx_;
::grpc::internal::Call call_;
ResponseType resp_;
std::function<void()> call_requester_;
@@ -437,7 +437,7 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
public:
explicit CallbackServerStreamingHandler(
std::function<ServerWriteReactor<ResponseType>*(
- ::grpc::CallbackServerContext*, const RequestType*)>
+ ::grpc::CallbackServerContext*, const RequestType*)>
get_reactor)
: get_reactor_(std::move(get_reactor)) {}
void RunHandler(const HandlerParameter& param) final {
@@ -447,23 +447,23 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
auto* writer = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
param.call->call(), sizeof(ServerCallbackWriterImpl)))
ServerCallbackWriterImpl(
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
param.call, static_cast<RequestType*>(param.request),
std::move(param.call_requester));
- // Inlineable OnDone can be false in the CompletionOp callback because there
- // is no write reactor that has an inlineable OnDone; this only applies to
- // the DefaultReactor (which is unary).
+ // Inlineable OnDone can be false in the CompletionOp callback because there
+ // is no write reactor that has an inlineable OnDone; this only applies to
+ // the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp(
- param.call,
- [writer](bool) { writer->MaybeDone(/*inlineable_ondone=*/false); },
- writer);
+ param.call,
+ [writer](bool) { writer->MaybeDone(/*inlineable_ondone=*/false); },
+ writer);
ServerWriteReactor<ResponseType>* reactor = nullptr;
if (param.status.ok()) {
reactor = ::grpc::internal::CatchingReactorGetter<
ServerWriteReactor<ResponseType>>(
get_reactor_,
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
writer->request());
}
if (reactor == nullptr) {
@@ -496,23 +496,23 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
private:
std::function<ServerWriteReactor<ResponseType>*(
- ::grpc::CallbackServerContext*, const RequestType*)>
+ ::grpc::CallbackServerContext*, const RequestType*)>
get_reactor_;
class ServerCallbackWriterImpl : public ServerCallbackWriter<ResponseType> {
public:
void Finish(::grpc::Status s) override {
- // A finish tag with only MaybeDone can have its callback inlined
- // regardless even if OnDone is not inlineable because this callback just
- // checks a ref and then decides whether or not to dispatch OnDone.
- finish_tag_.Set(call_.call(),
- [this](bool) {
- // Inlineable OnDone can be false here because there is
- // no write reactor that has an inlineable OnDone; this
- // only applies to the DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
- },
- &finish_ops_, /*can_inline=*/true);
+ // A finish tag with only MaybeDone can have its callback inlined
+ // regardless even if OnDone is not inlineable because this callback just
+ // checks a ref and then decides whether or not to dispatch OnDone.
+ finish_tag_.Set(call_.call(),
+ [this](bool) {
+ // Inlineable OnDone can be false here because there is
+ // no write reactor that has an inlineable OnDone; this
+ // only applies to the DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
+ },
+ &finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) {
@@ -530,17 +530,17 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref();
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
- ServerWriteReactor<ResponseType>* reactor =
- reactor_.load(std::memory_order_relaxed);
- reactor->OnSendInitialMetadataDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ ServerWriteReactor<ResponseType>* reactor =
+ reactor_.load(std::memory_order_relaxed);
+ reactor->OnSendInitialMetadataDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &meta_ops_, /*can_inline=*/false);
+ &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@@ -573,15 +573,15 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options,
::grpc::Status s) override {
// This combines the write into the finish callback
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
Finish(std::move(s));
}
private:
friend class CallbackServerStreamingHandler<RequestType, ResponseType>;
- ServerCallbackWriterImpl(::grpc::CallbackServerContext* ctx,
+ ServerCallbackWriterImpl(::grpc::CallbackServerContext* ctx,
::grpc::internal::Call* call,
const RequestType* req,
std::function<void()> call_requester)
@@ -592,34 +592,34 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
void SetupReactor(ServerWriteReactor<ResponseType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed);
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
- write_tag_.Set(call_.call(),
- [this, reactor](bool ok) {
- reactor->OnWriteDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
- },
- &write_ops_, /*can_inline=*/false);
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
+ write_tag_.Set(call_.call(),
+ [this, reactor](bool ok) {
+ reactor->OnWriteDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
+ },
+ &write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
- // Inlineable OnDone can be false here because there is no write
- // reactor that has an inlineable OnDone; this only applies to the
- // DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
+ // Inlineable OnDone can be false here because there is no write
+ // reactor that has an inlineable OnDone; this only applies to the
+ // DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
}
~ServerCallbackWriterImpl() { req_->~RequestType(); }
const RequestType* request() { return req_; }
- void CallOnDone() override {
- reactor_.load(std::memory_order_relaxed)->OnDone();
- grpc_call* call = call_.call();
- auto call_requester = std::move(call_requester_);
- this->~ServerCallbackWriterImpl(); // explicitly call destructor
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- call_requester();
+ void CallOnDone() override {
+ reactor_.load(std::memory_order_relaxed)->OnDone();
+ grpc_call* call = call_.call();
+ auto call_requester = std::move(call_requester_);
+ this->~ServerCallbackWriterImpl(); // explicitly call destructor
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ call_requester();
}
ServerReactor* reactor() override {
@@ -639,7 +639,7 @@ class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
write_ops_;
::grpc::internal::CallbackWithSuccessTag write_tag_;
- ::grpc::CallbackServerContext* const ctx_;
+ ::grpc::CallbackServerContext* const ctx_;
::grpc::internal::Call call_;
const RequestType* req_;
std::function<void()> call_requester_;
@@ -656,7 +656,7 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
public:
explicit CallbackBidiHandler(
std::function<ServerBidiReactor<RequestType, ResponseType>*(
- ::grpc::CallbackServerContext*)>
+ ::grpc::CallbackServerContext*)>
get_reactor)
: get_reactor_(std::move(get_reactor)) {}
void RunHandler(const HandlerParameter& param) final {
@@ -665,22 +665,22 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
auto* stream = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
param.call->call(), sizeof(ServerCallbackReaderWriterImpl)))
ServerCallbackReaderWriterImpl(
- static_cast<::grpc::CallbackServerContext*>(param.server_context),
+ static_cast<::grpc::CallbackServerContext*>(param.server_context),
param.call, std::move(param.call_requester));
- // Inlineable OnDone can be false in the CompletionOp callback because there
- // is no bidi reactor that has an inlineable OnDone; this only applies to
- // the DefaultReactor (which is unary).
+ // Inlineable OnDone can be false in the CompletionOp callback because there
+ // is no bidi reactor that has an inlineable OnDone; this only applies to
+ // the DefaultReactor (which is unary).
param.server_context->BeginCompletionOp(
- param.call,
- [stream](bool) { stream->MaybeDone(/*inlineable_ondone=*/false); },
- stream);
+ param.call,
+ [stream](bool) { stream->MaybeDone(/*inlineable_ondone=*/false); },
+ stream);
ServerBidiReactor<RequestType, ResponseType>* reactor = nullptr;
if (param.status.ok()) {
reactor = ::grpc::internal::CatchingReactorGetter<
ServerBidiReactor<RequestType, ResponseType>>(
- get_reactor_,
- static_cast<::grpc::CallbackServerContext*>(param.server_context));
+ get_reactor_,
+ static_cast<::grpc::CallbackServerContext*>(param.server_context));
}
if (reactor == nullptr) {
@@ -697,24 +697,24 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
private:
std::function<ServerBidiReactor<RequestType, ResponseType>*(
- ::grpc::CallbackServerContext*)>
+ ::grpc::CallbackServerContext*)>
get_reactor_;
class ServerCallbackReaderWriterImpl
: public ServerCallbackReaderWriter<RequestType, ResponseType> {
public:
void Finish(::grpc::Status s) override {
- // A finish tag with only MaybeDone can have its callback inlined
- // regardless even if OnDone is not inlineable because this callback just
- // checks a ref and then decides whether or not to dispatch OnDone.
- finish_tag_.Set(call_.call(),
- [this](bool) {
- // Inlineable OnDone can be false here because there is
- // no bidi reactor that has an inlineable OnDone; this
- // only applies to the DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
- },
- &finish_ops_, /*can_inline=*/true);
+ // A finish tag with only MaybeDone can have its callback inlined
+ // regardless even if OnDone is not inlineable because this callback just
+ // checks a ref and then decides whether or not to dispatch OnDone.
+ finish_tag_.Set(call_.call(),
+ [this](bool) {
+ // Inlineable OnDone can be false here because there is
+ // no bidi reactor that has an inlineable OnDone; this
+ // only applies to the DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
+ },
+ &finish_ops_, /*can_inline=*/true);
finish_ops_.set_core_cq_tag(&finish_tag_);
if (!ctx_->sent_initial_metadata_) {
@@ -732,17 +732,17 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
this->Ref();
- // The callback for this function should not be inlined because it invokes
- // a user-controlled reaction, but any resulting OnDone can be inlined in
- // the executor to which this callback is dispatched.
+ // The callback for this function should not be inlined because it invokes
+ // a user-controlled reaction, but any resulting OnDone can be inlined in
+ // the executor to which this callback is dispatched.
meta_tag_.Set(call_.call(),
[this](bool ok) {
- ServerBidiReactor<RequestType, ResponseType>* reactor =
- reactor_.load(std::memory_order_relaxed);
- reactor->OnSendInitialMetadataDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ ServerBidiReactor<RequestType, ResponseType>* reactor =
+ reactor_.load(std::memory_order_relaxed);
+ reactor->OnSendInitialMetadataDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &meta_ops_, /*can_inline=*/false);
+ &meta_ops_, /*can_inline=*/false);
meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
ctx_->initial_metadata_flags());
if (ctx_->compression_level_set()) {
@@ -774,8 +774,8 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options,
::grpc::Status s) override {
- // TODO(vjpai): don't assert
- GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
+ // TODO(vjpai): don't assert
+ GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
Finish(std::move(s));
}
@@ -788,45 +788,45 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
private:
friend class CallbackBidiHandler<RequestType, ResponseType>;
- ServerCallbackReaderWriterImpl(::grpc::CallbackServerContext* ctx,
+ ServerCallbackReaderWriterImpl(::grpc::CallbackServerContext* ctx,
::grpc::internal::Call* call,
std::function<void()> call_requester)
: ctx_(ctx), call_(*call), call_requester_(std::move(call_requester)) {}
void SetupReactor(ServerBidiReactor<RequestType, ResponseType>* reactor) {
reactor_.store(reactor, std::memory_order_relaxed);
- // The callbacks for these functions should not be inlined because they
- // invoke user-controlled reactions, but any resulting OnDones can be
- // inlined in the executor to which a callback is dispatched.
- write_tag_.Set(call_.call(),
- [this, reactor](bool ok) {
- reactor->OnWriteDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
- },
- &write_ops_, /*can_inline=*/false);
+ // The callbacks for these functions should not be inlined because they
+ // invoke user-controlled reactions, but any resulting OnDones can be
+ // inlined in the executor to which a callback is dispatched.
+ write_tag_.Set(call_.call(),
+ [this, reactor](bool ok) {
+ reactor->OnWriteDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
+ },
+ &write_ops_, /*can_inline=*/false);
write_ops_.set_core_cq_tag(&write_tag_);
read_tag_.Set(call_.call(),
- [this, reactor](bool ok) {
- reactor->OnReadDone(ok);
- this->MaybeDone(/*inlineable_ondone=*/true);
+ [this, reactor](bool ok) {
+ reactor->OnReadDone(ok);
+ this->MaybeDone(/*inlineable_ondone=*/true);
},
- &read_ops_, /*can_inline=*/false);
+ &read_ops_, /*can_inline=*/false);
read_ops_.set_core_cq_tag(&read_tag_);
this->BindReactor(reactor);
this->MaybeCallOnCancel(reactor);
- // Inlineable OnDone can be false here because there is no bidi
- // reactor that has an inlineable OnDone; this only applies to the
- // DefaultReactor (which is unary).
- this->MaybeDone(/*inlineable_ondone=*/false);
+ // Inlineable OnDone can be false here because there is no bidi
+ // reactor that has an inlineable OnDone; this only applies to the
+ // DefaultReactor (which is unary).
+ this->MaybeDone(/*inlineable_ondone=*/false);
}
- void CallOnDone() override {
- reactor_.load(std::memory_order_relaxed)->OnDone();
- grpc_call* call = call_.call();
- auto call_requester = std::move(call_requester_);
- this->~ServerCallbackReaderWriterImpl(); // explicitly call destructor
- ::grpc::g_core_codegen_interface->grpc_call_unref(call);
- call_requester();
+ void CallOnDone() override {
+ reactor_.load(std::memory_order_relaxed)->OnDone();
+ grpc_call* call = call_.call();
+ auto call_requester = std::move(call_requester_);
+ this->~ServerCallbackReaderWriterImpl(); // explicitly call destructor
+ ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+ call_requester();
}
ServerReactor* reactor() override {
@@ -850,7 +850,7 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
read_ops_;
::grpc::internal::CallbackWithSuccessTag read_tag_;
- ::grpc::CallbackServerContext* const ctx_;
+ ::grpc::CallbackServerContext* const ctx_;
::grpc::internal::Call call_;
std::function<void()> call_requester_;
// The memory ordering of reactor_ follows ServerCallbackUnaryImpl.
@@ -862,6 +862,6 @@ class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
};
} // namespace internal
-} // namespace grpc
+} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_HANDLERS_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h
index 685f006cda..769b1b5b5d 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,601 +19,601 @@
#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H
#define GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H
-#include <atomic>
-#include <cassert>
-#include <map>
-#include <memory>
-#include <type_traits>
-#include <vector>
-
-#include <grpc/impl/codegen/port_platform.h>
-
-#include <grpc/impl/codegen/compression_types.h>
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/call_op_set.h>
-#include <grpcpp/impl/codegen/callback_common.h>
-#include <grpcpp/impl/codegen/completion_queue_tag.h>
-#include <grpcpp/impl/codegen/config.h>
-#include <grpcpp/impl/codegen/create_auth_context.h>
-#include <grpcpp/impl/codegen/message_allocator.h>
-#include <grpcpp/impl/codegen/metadata_map.h>
-#include <grpcpp/impl/codegen/security/auth_context.h>
-#include <grpcpp/impl/codegen/server_callback.h>
-#include <grpcpp/impl/codegen/server_interceptor.h>
-#include <grpcpp/impl/codegen/status.h>
-#include <grpcpp/impl/codegen/string_ref.h>
-#include <grpcpp/impl/codegen/time.h>
-
-struct grpc_metadata;
-struct grpc_call;
-struct census_context;
-
+#include <atomic>
+#include <cassert>
+#include <map>
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+#include <grpc/impl/codegen/port_platform.h>
+
+#include <grpc/impl/codegen/compression_types.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/call_op_set.h>
+#include <grpcpp/impl/codegen/callback_common.h>
+#include <grpcpp/impl/codegen/completion_queue_tag.h>
+#include <grpcpp/impl/codegen/config.h>
+#include <grpcpp/impl/codegen/create_auth_context.h>
+#include <grpcpp/impl/codegen/message_allocator.h>
+#include <grpcpp/impl/codegen/metadata_map.h>
+#include <grpcpp/impl/codegen/security/auth_context.h>
+#include <grpcpp/impl/codegen/server_callback.h>
+#include <grpcpp/impl/codegen/server_interceptor.h>
+#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/string_ref.h>
+#include <grpcpp/impl/codegen/time.h>
+
+struct grpc_metadata;
+struct grpc_call;
+struct census_context;
+
namespace grpc {
-template <class W, class R>
-class ServerAsyncReader;
-template <class W>
-class ServerAsyncWriter;
-template <class W>
-class ServerAsyncResponseWriter;
-template <class W, class R>
-class ServerAsyncReaderWriter;
-template <class R>
-class ServerReader;
-template <class W>
-class ServerWriter;
-
-namespace internal {
-template <class ServiceType, class RequestType, class ResponseType>
-class BidiStreamingHandler;
-template <class RequestType, class ResponseType>
-class CallbackUnaryHandler;
-template <class RequestType, class ResponseType>
-class CallbackClientStreamingHandler;
-template <class RequestType, class ResponseType>
-class CallbackServerStreamingHandler;
-template <class RequestType, class ResponseType>
-class CallbackBidiHandler;
-template <class ServiceType, class RequestType, class ResponseType>
-class ClientStreamingHandler;
-template <class ServiceType, class RequestType, class ResponseType>
-class RpcMethodHandler;
-template <class Base>
-class FinishOnlyReactor;
-template <class W, class R>
-class ServerReaderWriterBody;
-template <class ServiceType, class RequestType, class ResponseType>
-class ServerStreamingHandler;
-class ServerReactor;
-template <class Streamer, bool WriteNeeded>
-class TemplatedBidiStreamingHandler;
-template <::grpc::StatusCode code>
-class ErrorMethodHandler;
-} // namespace internal
-
-class ClientContext;
-class CompletionQueue;
-class GenericServerContext;
-class Server;
-class ServerInterface;
-
-// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
+template <class W, class R>
+class ServerAsyncReader;
+template <class W>
+class ServerAsyncWriter;
+template <class W>
+class ServerAsyncResponseWriter;
+template <class W, class R>
+class ServerAsyncReaderWriter;
+template <class R>
+class ServerReader;
+template <class W>
+class ServerWriter;
+
+namespace internal {
+template <class ServiceType, class RequestType, class ResponseType>
+class BidiStreamingHandler;
+template <class RequestType, class ResponseType>
+class CallbackUnaryHandler;
+template <class RequestType, class ResponseType>
+class CallbackClientStreamingHandler;
+template <class RequestType, class ResponseType>
+class CallbackServerStreamingHandler;
+template <class RequestType, class ResponseType>
+class CallbackBidiHandler;
+template <class ServiceType, class RequestType, class ResponseType>
+class ClientStreamingHandler;
+template <class ServiceType, class RequestType, class ResponseType>
+class RpcMethodHandler;
+template <class Base>
+class FinishOnlyReactor;
+template <class W, class R>
+class ServerReaderWriterBody;
+template <class ServiceType, class RequestType, class ResponseType>
+class ServerStreamingHandler;
+class ServerReactor;
+template <class Streamer, bool WriteNeeded>
+class TemplatedBidiStreamingHandler;
+template <::grpc::StatusCode code>
+class ErrorMethodHandler;
+} // namespace internal
+
+class ClientContext;
+class CompletionQueue;
+class GenericServerContext;
+class Server;
+class ServerInterface;
+
+// TODO(vjpai): Remove namespace experimental when de-experimentalized fully.
namespace experimental {
-typedef ::grpc::ServerContextBase ServerContextBase;
-typedef ::grpc::CallbackServerContext CallbackServerContext;
-
-} // namespace experimental
+typedef ::grpc::ServerContextBase ServerContextBase;
+typedef ::grpc::CallbackServerContext CallbackServerContext;
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
-namespace experimental {
-#endif
-class GenericCallbackServerContext;
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
} // namespace experimental
-#endif
-namespace internal {
-class Call;
-} // namespace internal
-
-namespace testing {
-class InteropServerContextInspector;
-class ServerContextTestSpouse;
-class DefaultReactorTestPeer;
-} // namespace testing
-
-/// Base class of ServerContext. Experimental until callback API is final.
-class ServerContextBase {
- public:
- virtual ~ServerContextBase();
-
- /// Return the deadline for the server call.
- std::chrono::system_clock::time_point deadline() const {
- return ::grpc::Timespec2Timepoint(deadline_);
- }
-
- /// Return a \a gpr_timespec representation of the server call's deadline.
- gpr_timespec raw_deadline() const { return deadline_; }
-
- /// Add the (\a key, \a value) pair to the initial metadata
- /// associated with a server call. These are made available at the client side
- /// by the \a grpc::ClientContext::GetServerInitialMetadata() method.
- ///
- /// \warning This method should only be called before sending initial metadata
- /// to the client (which can happen explicitly, or implicitly when sending a
- /// a response message or status to the client).
- ///
- /// \param key The metadata key. If \a value is binary data, it must
- /// end in "-bin".
- /// \param value The metadata value. If its value is binary, the key name
- /// must end in "-bin".
- ///
- /// Metadata must conform to the following format:
- /// Custom-Metadata -> Binary-Header / ASCII-Header
- /// Binary-Header -> {Header-Name "-bin" } {binary value}
- /// ASCII-Header -> Header-Name ASCII-Value
- /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
- /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
- void AddInitialMetadata(const TString& key, const TString& value);
-
- /// Add the (\a key, \a value) pair to the initial metadata
- /// associated with a server call. These are made available at the client
- /// side by the \a grpc::ClientContext::GetServerTrailingMetadata() method.
- ///
- /// \warning This method should only be called before sending trailing
- /// metadata to the client (which happens when the call is finished and a
- /// status is sent to the client).
- ///
- /// \param key The metadata key. If \a value is binary data,
- /// it must end in "-bin".
- /// \param value The metadata value. If its value is binary, the key name
- /// must end in "-bin".
- ///
- /// Metadata must conform to the following format:
- /// Custom-Metadata -> Binary-Header / ASCII-Header
- /// Binary-Header -> {Header-Name "-bin" } {binary value}
- /// ASCII-Header -> Header-Name ASCII-Value
- /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
- /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
- void AddTrailingMetadata(const TString& key, const TString& value);
-
- /// Return whether this RPC failed before the server could provide its status
- /// back to the client. This could be because of explicit API cancellation
- /// from the client-side or server-side, because of deadline exceeded, network
- /// connection reset, HTTP/2 parameter configuration (e.g., max message size,
- /// max connection age), etc. It does NOT include failure due to a non-OK
- /// status return from the server application's request handler, including
- /// Status::CANCELLED.
- ///
- /// IsCancelled is always safe to call when using sync or callback API.
- /// When using async API, it is only safe to call IsCancelled after
- /// the AsyncNotifyWhenDone tag has been delivered. Thread-safe.
- bool IsCancelled() const;
-
- /// Cancel the Call from the server. This is a best-effort API and
- /// depending on when it is called, the RPC may still appear successful to
- /// the client. For example, if TryCancel() is called on a separate thread, it
- /// might race with the server handler which might return success to the
- /// client before TryCancel() was even started by the thread.
- ///
- /// It is the caller's responsibility to prevent such races and ensure that if
- /// TryCancel() is called, the serverhandler must return Status::CANCELLED.
- /// The only exception is that if the serverhandler is already returning an
- /// error status code, it is ok to not return Status::CANCELLED even if
- /// TryCancel() was called.
- ///
- /// For reasons such as the above, it is generally preferred to explicitly
- /// finish an RPC by returning Status::CANCELLED rather than using TryCancel.
- ///
- /// Note that TryCancel() does not change any of the tags that are pending
- /// on the completion queue. All pending tags will still be delivered
- /// (though their ok result may reflect the effect of cancellation).
- void TryCancel() const;
-
- /// Return a collection of initial metadata key-value pairs sent from the
- /// client. Note that keys may happen more than
- /// once (ie, a \a std::multimap is returned).
- ///
- /// It is safe to use this method after initial metadata has been received,
- /// Calls always begin with the client sending initial metadata, so this is
- /// safe to access as soon as the call has begun on the server side.
- ///
- /// \return A multimap of initial metadata key-value pairs from the server.
- const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata()
- const {
- return *client_metadata_.map();
- }
-
- /// Return the compression algorithm to be used by the server call.
- grpc_compression_level compression_level() const {
- return compression_level_;
- }
-
- /// Set \a level to be the compression level used for the server call.
- ///
- /// \param level The compression level used for the server call.
- void set_compression_level(grpc_compression_level level) {
- compression_level_set_ = true;
- compression_level_ = level;
- }
-
- /// Return a bool indicating whether the compression level for this call
- /// has been set (either implicitly or through a previous call to
- /// \a set_compression_level.
- bool compression_level_set() const { return compression_level_set_; }
-
- /// Return the compression algorithm the server call will request be used.
- /// Note that the gRPC runtime may decide to ignore this request, for example,
- /// due to resource constraints, or if the server is aware the client doesn't
- /// support the requested algorithm.
- grpc_compression_algorithm compression_algorithm() const {
- return compression_algorithm_;
- }
- /// Set \a algorithm to be the compression algorithm used for the server call.
- ///
- /// \param algorithm The compression algorithm used for the server call.
- void set_compression_algorithm(grpc_compression_algorithm algorithm);
-
- /// Set the serialized load reporting costs in \a cost_data for the call.
- void SetLoadReportingCosts(const std::vector<TString>& cost_data);
-
- /// Return the authentication context for this server call.
- ///
- /// \see grpc::AuthContext.
- std::shared_ptr<const ::grpc::AuthContext> auth_context() const {
- if (auth_context_.get() == nullptr) {
- auth_context_ = ::grpc::CreateAuthContext(call_.call);
- }
- return auth_context_;
- }
-
- /// Return the peer uri in a string.
- /// WARNING: this value is never authenticated or subject to any security
- /// related code. It must not be used for any authentication related
- /// functionality. Instead, use auth_context.
- TString peer() const;
-
- /// Get the census context associated with this server call.
- const struct census_context* census_context() const;
-
- /// Should be used for framework-level extensions only.
- /// Applications never need to call this method.
- grpc_call* c_call() { return call_.call; }
-
- protected:
- /// Async only. Has to be called before the rpc starts.
- /// Returns the tag in completion queue when the rpc finishes.
- /// IsCancelled() can then be called to check whether the rpc was cancelled.
- /// TODO(vjpai): Fix this so that the tag is returned even if the call never
- /// starts (https://github.com/grpc/grpc/issues/10136).
- void AsyncNotifyWhenDone(void* tag) {
- has_notify_when_done_tag_ = true;
- async_notify_when_done_tag_ = tag;
- }
-
- /// NOTE: This is an API for advanced users who need custom allocators.
- /// Get and maybe mutate the allocator state associated with the current RPC.
- /// Currently only applicable for callback unary RPC methods.
- /// WARNING: This is experimental API and could be changed or removed.
- ::grpc::experimental::RpcAllocatorState* GetRpcAllocatorState() {
- return message_allocator_state_;
- }
-
- /// Get a library-owned default unary reactor for use in minimal reaction
- /// cases. This supports typical unary RPC usage of providing a response and
- /// status. It supports immediate Finish (finish from within the method
- /// handler) or delayed Finish (finish called after the method handler
- /// invocation). It does not support reacting to cancellation or completion,
- /// or early sending of initial metadata. Since this is a library-owned
- /// reactor, it should not be delete'd or freed in any way. This is more
- /// efficient than creating a user-owned reactor both because of avoiding an
- /// allocation and because its minimal reactions are optimized using a core
- /// surface flag that allows their reactions to run inline without any
- /// thread-hop.
- ///
- /// This method should not be called more than once or called after return
- /// from the method handler.
- ///
- /// WARNING: This is experimental API and could be changed or removed.
- ::grpc::ServerUnaryReactor* DefaultReactor() {
- // Short-circuit the case where a default reactor was already set up by
- // the TestPeer.
- if (test_unary_ != nullptr) {
- return reinterpret_cast<Reactor*>(&default_reactor_);
- }
- new (&default_reactor_) Reactor;
-#ifndef NDEBUG
- bool old = false;
- assert(default_reactor_used_.compare_exchange_strong(
- old, true, std::memory_order_relaxed));
-#else
- default_reactor_used_.store(true, std::memory_order_relaxed);
-#endif
- return reinterpret_cast<Reactor*>(&default_reactor_);
- }
-
- /// Constructors for use by derived classes
- ServerContextBase();
- ServerContextBase(gpr_timespec deadline, grpc_metadata_array* arr);
-
- private:
- friend class ::grpc::testing::InteropServerContextInspector;
- friend class ::grpc::testing::ServerContextTestSpouse;
- friend class ::grpc::testing::DefaultReactorTestPeer;
- friend class ::grpc::ServerInterface;
- friend class ::grpc::Server;
- template <class W, class R>
- friend class ::grpc::ServerAsyncReader;
- template <class W>
- friend class ::grpc::ServerAsyncWriter;
- template <class W>
- friend class ::grpc::ServerAsyncResponseWriter;
- template <class W, class R>
- friend class ::grpc::ServerAsyncReaderWriter;
- template <class R>
- friend class ::grpc::ServerReader;
- template <class W>
- friend class ::grpc::ServerWriter;
- template <class W, class R>
- friend class ::grpc::internal::ServerReaderWriterBody;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::RpcMethodHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::ClientStreamingHandler;
- template <class ServiceType, class RequestType, class ResponseType>
- friend class ::grpc::internal::ServerStreamingHandler;
- template <class Streamer, bool WriteNeeded>
- friend class ::grpc::internal::TemplatedBidiStreamingHandler;
- template <class RequestType, class ResponseType>
- friend class ::grpc::internal::CallbackUnaryHandler;
- template <class RequestType, class ResponseType>
- friend class ::grpc::internal::CallbackClientStreamingHandler;
- template <class RequestType, class ResponseType>
- friend class ::grpc::internal::CallbackServerStreamingHandler;
- template <class RequestType, class ResponseType>
- friend class ::grpc::internal::CallbackBidiHandler;
- template <::grpc::StatusCode code>
- friend class ::grpc::internal::ErrorMethodHandler;
- template <class Base>
- friend class ::grpc::internal::FinishOnlyReactor;
- friend class ::grpc::ClientContext;
- friend class ::grpc::GenericServerContext;
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- friend class ::grpc::GenericCallbackServerContext;
-#else
- friend class ::grpc::experimental::GenericCallbackServerContext;
-#endif
-
- /// Prevent copying.
- ServerContextBase(const ServerContextBase&);
- ServerContextBase& operator=(const ServerContextBase&);
-
- class CompletionOp;
-
- void BeginCompletionOp(
- ::grpc::internal::Call* call, std::function<void(bool)> callback,
- ::grpc::internal::ServerCallbackCall* callback_controller);
- /// Return the tag queued by BeginCompletionOp()
- ::grpc::internal::CompletionQueueTag* GetCompletionOpTag();
-
- void set_call(grpc_call* call) { call_.call = call; }
-
- void BindDeadlineAndMetadata(gpr_timespec deadline, grpc_metadata_array* arr);
-
- uint32_t initial_metadata_flags() const { return 0; }
-
- ::grpc::experimental::ServerRpcInfo* set_server_rpc_info(
- const char* method, ::grpc::internal::RpcMethod::RpcType type,
- const std::vector<std::unique_ptr<
- ::grpc::experimental::ServerInterceptorFactoryInterface>>& creators) {
- if (creators.size() != 0) {
- rpc_info_ = new ::grpc::experimental::ServerRpcInfo(this, method, type);
- rpc_info_->RegisterInterceptors(creators);
- }
- return rpc_info_;
- }
-
- void set_message_allocator_state(
- ::grpc::experimental::RpcAllocatorState* allocator_state) {
- message_allocator_state_ = allocator_state;
- }
-
- struct CallWrapper {
- ~CallWrapper();
-
- grpc_call* call = nullptr;
- };
-
- // NOTE: call_ must be the first data member of this object so that its
- // destructor is the last to be called, since its destructor may unref
- // the underlying core call which holds the arena that may be used to
- // hold this object.
- CallWrapper call_;
-
- CompletionOp* completion_op_ = nullptr;
- bool has_notify_when_done_tag_ = false;
- void* async_notify_when_done_tag_ = nullptr;
- ::grpc::internal::CallbackWithSuccessTag completion_tag_;
-
- gpr_timespec deadline_;
- ::grpc::CompletionQueue* cq_ = nullptr;
- bool sent_initial_metadata_ = false;
- mutable std::shared_ptr<const ::grpc::AuthContext> auth_context_;
- mutable ::grpc::internal::MetadataMap client_metadata_;
- std::multimap<TString, TString> initial_metadata_;
- std::multimap<TString, TString> trailing_metadata_;
-
- bool compression_level_set_ = false;
- grpc_compression_level compression_level_;
- grpc_compression_algorithm compression_algorithm_;
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage>
- pending_ops_;
- bool has_pending_ops_ = false;
-
- ::grpc::experimental::ServerRpcInfo* rpc_info_ = nullptr;
- ::grpc::experimental::RpcAllocatorState* message_allocator_state_ = nullptr;
-
- class Reactor : public ::grpc::ServerUnaryReactor {
- public:
- void OnCancel() override {}
- void OnDone() override {}
- // Override InternalInlineable for this class since its reactions are
- // trivial and thus do not need to be run from the executor (triggering a
- // thread hop). This should only be used by internal reactors (thus the
- // name) and not by user application code.
- bool InternalInlineable() override { return true; }
- };
-
- void SetupTestDefaultReactor(std::function<void(::grpc::Status)> func) {
- test_unary_.reset(new TestServerCallbackUnary(this, std::move(func)));
- }
- bool test_status_set() const {
- return (test_unary_ != nullptr) && test_unary_->status_set();
- }
- ::grpc::Status test_status() const { return test_unary_->status(); }
-
- class TestServerCallbackUnary : public ::grpc::ServerCallbackUnary {
- public:
- TestServerCallbackUnary(ServerContextBase* ctx,
- std::function<void(::grpc::Status)> func)
- : reactor_(ctx->DefaultReactor()), func_(std::move(func)) {
- this->BindReactor(reactor_);
- }
- void Finish(::grpc::Status s) override {
- status_ = s;
- func_(std::move(s));
- status_set_.store(true, std::memory_order_release);
- }
- void SendInitialMetadata() override {}
-
- bool status_set() const {
- return status_set_.load(std::memory_order_acquire);
- }
- ::grpc::Status status() const { return status_; }
-
- private:
- void CallOnDone() override {}
- ::grpc::internal::ServerReactor* reactor() override { return reactor_; }
-
- ::grpc::ServerUnaryReactor* const reactor_;
- std::atomic_bool status_set_{false};
- ::grpc::Status status_;
- const std::function<void(::grpc::Status s)> func_;
- };
-
- typename std::aligned_storage<sizeof(Reactor), alignof(Reactor)>::type
- default_reactor_;
- std::atomic_bool default_reactor_used_{false};
- std::unique_ptr<TestServerCallbackUnary> test_unary_;
-};
-
-/// A ServerContext or CallbackServerContext allows the code implementing a
-/// service handler to:
-///
-/// - Add custom initial and trailing metadata key-value pairs that will
-/// propagated to the client side.
-/// - Control call settings such as compression and authentication.
-/// - Access metadata coming from the client.
-/// - Get performance metrics (ie, census).
-///
-/// Context settings are only relevant to the call handler they are supplied to,
-/// that is to say, they aren't sticky across multiple calls. Some of these
-/// settings, such as the compression options, can be made persistent at server
-/// construction time by specifying the appropriate \a ChannelArguments
-/// to a \a grpc::ServerBuilder, via \a ServerBuilder::AddChannelArgument.
-///
-/// \warning ServerContext instances should \em not be reused across rpcs.
-class ServerContext : public ServerContextBase {
- public:
- ServerContext() {} // for async calls
-
- using ServerContextBase::AddInitialMetadata;
- using ServerContextBase::AddTrailingMetadata;
- using ServerContextBase::auth_context;
- using ServerContextBase::c_call;
- using ServerContextBase::census_context;
- using ServerContextBase::client_metadata;
- using ServerContextBase::compression_algorithm;
- using ServerContextBase::compression_level;
- using ServerContextBase::compression_level_set;
- using ServerContextBase::deadline;
- using ServerContextBase::IsCancelled;
- using ServerContextBase::peer;
- using ServerContextBase::raw_deadline;
- using ServerContextBase::set_compression_algorithm;
- using ServerContextBase::set_compression_level;
- using ServerContextBase::SetLoadReportingCosts;
- using ServerContextBase::TryCancel;
-
- // Sync/CQ-based Async ServerContext only
- using ServerContextBase::AsyncNotifyWhenDone;
-
- private:
- // Constructor for internal use by server only
- friend class ::grpc::Server;
- ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
- : ServerContextBase(deadline, arr) {}
-
- // CallbackServerContext only
- using ServerContextBase::DefaultReactor;
- using ServerContextBase::GetRpcAllocatorState;
-
- /// Prevent copying.
- ServerContext(const ServerContext&) = delete;
- ServerContext& operator=(const ServerContext&) = delete;
-};
-
-class CallbackServerContext : public ServerContextBase {
- public:
- /// Public constructors are for direct use only by mocking tests. In practice,
- /// these objects will be owned by the library.
- CallbackServerContext() {}
-
- using ServerContextBase::AddInitialMetadata;
- using ServerContextBase::AddTrailingMetadata;
- using ServerContextBase::auth_context;
- using ServerContextBase::c_call;
- using ServerContextBase::census_context;
- using ServerContextBase::client_metadata;
- using ServerContextBase::compression_algorithm;
- using ServerContextBase::compression_level;
- using ServerContextBase::compression_level_set;
- using ServerContextBase::deadline;
- using ServerContextBase::IsCancelled;
- using ServerContextBase::peer;
- using ServerContextBase::raw_deadline;
- using ServerContextBase::set_compression_algorithm;
- using ServerContextBase::set_compression_level;
- using ServerContextBase::SetLoadReportingCosts;
- using ServerContextBase::TryCancel;
-
- // CallbackServerContext only
- using ServerContextBase::DefaultReactor;
- using ServerContextBase::GetRpcAllocatorState;
-
- private:
- // Sync/CQ-based Async ServerContext only
- using ServerContextBase::AsyncNotifyWhenDone;
-
- /// Prevent copying.
- CallbackServerContext(const CallbackServerContext&) = delete;
- CallbackServerContext& operator=(const CallbackServerContext&) = delete;
-};
-
+
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+namespace experimental {
+#endif
+class GenericCallbackServerContext;
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+} // namespace experimental
+#endif
+namespace internal {
+class Call;
+} // namespace internal
+
+namespace testing {
+class InteropServerContextInspector;
+class ServerContextTestSpouse;
+class DefaultReactorTestPeer;
+} // namespace testing
+
+/// Base class of ServerContext. Experimental until callback API is final.
+class ServerContextBase {
+ public:
+ virtual ~ServerContextBase();
+
+ /// Return the deadline for the server call.
+ std::chrono::system_clock::time_point deadline() const {
+ return ::grpc::Timespec2Timepoint(deadline_);
+ }
+
+ /// Return a \a gpr_timespec representation of the server call's deadline.
+ gpr_timespec raw_deadline() const { return deadline_; }
+
+ /// Add the (\a key, \a value) pair to the initial metadata
+ /// associated with a server call. These are made available at the client side
+ /// by the \a grpc::ClientContext::GetServerInitialMetadata() method.
+ ///
+ /// \warning This method should only be called before sending initial metadata
+ /// to the client (which can happen explicitly, or implicitly when sending a
+ /// a response message or status to the client).
+ ///
+ /// \param key The metadata key. If \a value is binary data, it must
+ /// end in "-bin".
+ /// \param value The metadata value. If its value is binary, the key name
+ /// must end in "-bin".
+ ///
+ /// Metadata must conform to the following format:
+ /// Custom-Metadata -> Binary-Header / ASCII-Header
+ /// Binary-Header -> {Header-Name "-bin" } {binary value}
+ /// ASCII-Header -> Header-Name ASCII-Value
+ /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
+ /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
+ void AddInitialMetadata(const TString& key, const TString& value);
+
+ /// Add the (\a key, \a value) pair to the initial metadata
+ /// associated with a server call. These are made available at the client
+ /// side by the \a grpc::ClientContext::GetServerTrailingMetadata() method.
+ ///
+ /// \warning This method should only be called before sending trailing
+ /// metadata to the client (which happens when the call is finished and a
+ /// status is sent to the client).
+ ///
+ /// \param key The metadata key. If \a value is binary data,
+ /// it must end in "-bin".
+ /// \param value The metadata value. If its value is binary, the key name
+ /// must end in "-bin".
+ ///
+ /// Metadata must conform to the following format:
+ /// Custom-Metadata -> Binary-Header / ASCII-Header
+ /// Binary-Header -> {Header-Name "-bin" } {binary value}
+ /// ASCII-Header -> Header-Name ASCII-Value
+ /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - .
+ /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII
+ void AddTrailingMetadata(const TString& key, const TString& value);
+
+ /// Return whether this RPC failed before the server could provide its status
+ /// back to the client. This could be because of explicit API cancellation
+ /// from the client-side or server-side, because of deadline exceeded, network
+ /// connection reset, HTTP/2 parameter configuration (e.g., max message size,
+ /// max connection age), etc. It does NOT include failure due to a non-OK
+ /// status return from the server application's request handler, including
+ /// Status::CANCELLED.
+ ///
+ /// IsCancelled is always safe to call when using sync or callback API.
+ /// When using async API, it is only safe to call IsCancelled after
+ /// the AsyncNotifyWhenDone tag has been delivered. Thread-safe.
+ bool IsCancelled() const;
+
+ /// Cancel the Call from the server. This is a best-effort API and
+ /// depending on when it is called, the RPC may still appear successful to
+ /// the client. For example, if TryCancel() is called on a separate thread, it
+ /// might race with the server handler which might return success to the
+ /// client before TryCancel() was even started by the thread.
+ ///
+ /// It is the caller's responsibility to prevent such races and ensure that if
+ /// TryCancel() is called, the serverhandler must return Status::CANCELLED.
+ /// The only exception is that if the serverhandler is already returning an
+ /// error status code, it is ok to not return Status::CANCELLED even if
+ /// TryCancel() was called.
+ ///
+ /// For reasons such as the above, it is generally preferred to explicitly
+ /// finish an RPC by returning Status::CANCELLED rather than using TryCancel.
+ ///
+ /// Note that TryCancel() does not change any of the tags that are pending
+ /// on the completion queue. All pending tags will still be delivered
+ /// (though their ok result may reflect the effect of cancellation).
+ void TryCancel() const;
+
+ /// Return a collection of initial metadata key-value pairs sent from the
+ /// client. Note that keys may happen more than
+ /// once (ie, a \a std::multimap is returned).
+ ///
+ /// It is safe to use this method after initial metadata has been received,
+ /// Calls always begin with the client sending initial metadata, so this is
+ /// safe to access as soon as the call has begun on the server side.
+ ///
+ /// \return A multimap of initial metadata key-value pairs from the server.
+ const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata()
+ const {
+ return *client_metadata_.map();
+ }
+
+ /// Return the compression algorithm to be used by the server call.
+ grpc_compression_level compression_level() const {
+ return compression_level_;
+ }
+
+ /// Set \a level to be the compression level used for the server call.
+ ///
+ /// \param level The compression level used for the server call.
+ void set_compression_level(grpc_compression_level level) {
+ compression_level_set_ = true;
+ compression_level_ = level;
+ }
+
+ /// Return a bool indicating whether the compression level for this call
+ /// has been set (either implicitly or through a previous call to
+ /// \a set_compression_level.
+ bool compression_level_set() const { return compression_level_set_; }
+
+ /// Return the compression algorithm the server call will request be used.
+ /// Note that the gRPC runtime may decide to ignore this request, for example,
+ /// due to resource constraints, or if the server is aware the client doesn't
+ /// support the requested algorithm.
+ grpc_compression_algorithm compression_algorithm() const {
+ return compression_algorithm_;
+ }
+ /// Set \a algorithm to be the compression algorithm used for the server call.
+ ///
+ /// \param algorithm The compression algorithm used for the server call.
+ void set_compression_algorithm(grpc_compression_algorithm algorithm);
+
+ /// Set the serialized load reporting costs in \a cost_data for the call.
+ void SetLoadReportingCosts(const std::vector<TString>& cost_data);
+
+ /// Return the authentication context for this server call.
+ ///
+ /// \see grpc::AuthContext.
+ std::shared_ptr<const ::grpc::AuthContext> auth_context() const {
+ if (auth_context_.get() == nullptr) {
+ auth_context_ = ::grpc::CreateAuthContext(call_.call);
+ }
+ return auth_context_;
+ }
+
+ /// Return the peer uri in a string.
+ /// WARNING: this value is never authenticated or subject to any security
+ /// related code. It must not be used for any authentication related
+ /// functionality. Instead, use auth_context.
+ TString peer() const;
+
+ /// Get the census context associated with this server call.
+ const struct census_context* census_context() const;
+
+ /// Should be used for framework-level extensions only.
+ /// Applications never need to call this method.
+ grpc_call* c_call() { return call_.call; }
+
+ protected:
+ /// Async only. Has to be called before the rpc starts.
+ /// Returns the tag in completion queue when the rpc finishes.
+ /// IsCancelled() can then be called to check whether the rpc was cancelled.
+ /// TODO(vjpai): Fix this so that the tag is returned even if the call never
+ /// starts (https://github.com/grpc/grpc/issues/10136).
+ void AsyncNotifyWhenDone(void* tag) {
+ has_notify_when_done_tag_ = true;
+ async_notify_when_done_tag_ = tag;
+ }
+
+ /// NOTE: This is an API for advanced users who need custom allocators.
+ /// Get and maybe mutate the allocator state associated with the current RPC.
+ /// Currently only applicable for callback unary RPC methods.
+ /// WARNING: This is experimental API and could be changed or removed.
+ ::grpc::experimental::RpcAllocatorState* GetRpcAllocatorState() {
+ return message_allocator_state_;
+ }
+
+ /// Get a library-owned default unary reactor for use in minimal reaction
+ /// cases. This supports typical unary RPC usage of providing a response and
+ /// status. It supports immediate Finish (finish from within the method
+ /// handler) or delayed Finish (finish called after the method handler
+ /// invocation). It does not support reacting to cancellation or completion,
+ /// or early sending of initial metadata. Since this is a library-owned
+ /// reactor, it should not be delete'd or freed in any way. This is more
+ /// efficient than creating a user-owned reactor both because of avoiding an
+ /// allocation and because its minimal reactions are optimized using a core
+ /// surface flag that allows their reactions to run inline without any
+ /// thread-hop.
+ ///
+ /// This method should not be called more than once or called after return
+ /// from the method handler.
+ ///
+ /// WARNING: This is experimental API and could be changed or removed.
+ ::grpc::ServerUnaryReactor* DefaultReactor() {
+ // Short-circuit the case where a default reactor was already set up by
+ // the TestPeer.
+ if (test_unary_ != nullptr) {
+ return reinterpret_cast<Reactor*>(&default_reactor_);
+ }
+ new (&default_reactor_) Reactor;
+#ifndef NDEBUG
+ bool old = false;
+ assert(default_reactor_used_.compare_exchange_strong(
+ old, true, std::memory_order_relaxed));
+#else
+ default_reactor_used_.store(true, std::memory_order_relaxed);
+#endif
+ return reinterpret_cast<Reactor*>(&default_reactor_);
+ }
+
+ /// Constructors for use by derived classes
+ ServerContextBase();
+ ServerContextBase(gpr_timespec deadline, grpc_metadata_array* arr);
+
+ private:
+ friend class ::grpc::testing::InteropServerContextInspector;
+ friend class ::grpc::testing::ServerContextTestSpouse;
+ friend class ::grpc::testing::DefaultReactorTestPeer;
+ friend class ::grpc::ServerInterface;
+ friend class ::grpc::Server;
+ template <class W, class R>
+ friend class ::grpc::ServerAsyncReader;
+ template <class W>
+ friend class ::grpc::ServerAsyncWriter;
+ template <class W>
+ friend class ::grpc::ServerAsyncResponseWriter;
+ template <class W, class R>
+ friend class ::grpc::ServerAsyncReaderWriter;
+ template <class R>
+ friend class ::grpc::ServerReader;
+ template <class W>
+ friend class ::grpc::ServerWriter;
+ template <class W, class R>
+ friend class ::grpc::internal::ServerReaderWriterBody;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::RpcMethodHandler;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::ClientStreamingHandler;
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class ::grpc::internal::ServerStreamingHandler;
+ template <class Streamer, bool WriteNeeded>
+ friend class ::grpc::internal::TemplatedBidiStreamingHandler;
+ template <class RequestType, class ResponseType>
+ friend class ::grpc::internal::CallbackUnaryHandler;
+ template <class RequestType, class ResponseType>
+ friend class ::grpc::internal::CallbackClientStreamingHandler;
+ template <class RequestType, class ResponseType>
+ friend class ::grpc::internal::CallbackServerStreamingHandler;
+ template <class RequestType, class ResponseType>
+ friend class ::grpc::internal::CallbackBidiHandler;
+ template <::grpc::StatusCode code>
+ friend class ::grpc::internal::ErrorMethodHandler;
+ template <class Base>
+ friend class ::grpc::internal::FinishOnlyReactor;
+ friend class ::grpc::ClientContext;
+ friend class ::grpc::GenericServerContext;
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ friend class ::grpc::GenericCallbackServerContext;
+#else
+ friend class ::grpc::experimental::GenericCallbackServerContext;
+#endif
+
+ /// Prevent copying.
+ ServerContextBase(const ServerContextBase&);
+ ServerContextBase& operator=(const ServerContextBase&);
+
+ class CompletionOp;
+
+ void BeginCompletionOp(
+ ::grpc::internal::Call* call, std::function<void(bool)> callback,
+ ::grpc::internal::ServerCallbackCall* callback_controller);
+ /// Return the tag queued by BeginCompletionOp()
+ ::grpc::internal::CompletionQueueTag* GetCompletionOpTag();
+
+ void set_call(grpc_call* call) { call_.call = call; }
+
+ void BindDeadlineAndMetadata(gpr_timespec deadline, grpc_metadata_array* arr);
+
+ uint32_t initial_metadata_flags() const { return 0; }
+
+ ::grpc::experimental::ServerRpcInfo* set_server_rpc_info(
+ const char* method, ::grpc::internal::RpcMethod::RpcType type,
+ const std::vector<std::unique_ptr<
+ ::grpc::experimental::ServerInterceptorFactoryInterface>>& creators) {
+ if (creators.size() != 0) {
+ rpc_info_ = new ::grpc::experimental::ServerRpcInfo(this, method, type);
+ rpc_info_->RegisterInterceptors(creators);
+ }
+ return rpc_info_;
+ }
+
+ void set_message_allocator_state(
+ ::grpc::experimental::RpcAllocatorState* allocator_state) {
+ message_allocator_state_ = allocator_state;
+ }
+
+ struct CallWrapper {
+ ~CallWrapper();
+
+ grpc_call* call = nullptr;
+ };
+
+ // NOTE: call_ must be the first data member of this object so that its
+ // destructor is the last to be called, since its destructor may unref
+ // the underlying core call which holds the arena that may be used to
+ // hold this object.
+ CallWrapper call_;
+
+ CompletionOp* completion_op_ = nullptr;
+ bool has_notify_when_done_tag_ = false;
+ void* async_notify_when_done_tag_ = nullptr;
+ ::grpc::internal::CallbackWithSuccessTag completion_tag_;
+
+ gpr_timespec deadline_;
+ ::grpc::CompletionQueue* cq_ = nullptr;
+ bool sent_initial_metadata_ = false;
+ mutable std::shared_ptr<const ::grpc::AuthContext> auth_context_;
+ mutable ::grpc::internal::MetadataMap client_metadata_;
+ std::multimap<TString, TString> initial_metadata_;
+ std::multimap<TString, TString> trailing_metadata_;
+
+ bool compression_level_set_ = false;
+ grpc_compression_level compression_level_;
+ grpc_compression_algorithm compression_algorithm_;
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage>
+ pending_ops_;
+ bool has_pending_ops_ = false;
+
+ ::grpc::experimental::ServerRpcInfo* rpc_info_ = nullptr;
+ ::grpc::experimental::RpcAllocatorState* message_allocator_state_ = nullptr;
+
+ class Reactor : public ::grpc::ServerUnaryReactor {
+ public:
+ void OnCancel() override {}
+ void OnDone() override {}
+ // Override InternalInlineable for this class since its reactions are
+ // trivial and thus do not need to be run from the executor (triggering a
+ // thread hop). This should only be used by internal reactors (thus the
+ // name) and not by user application code.
+ bool InternalInlineable() override { return true; }
+ };
+
+ void SetupTestDefaultReactor(std::function<void(::grpc::Status)> func) {
+ test_unary_.reset(new TestServerCallbackUnary(this, std::move(func)));
+ }
+ bool test_status_set() const {
+ return (test_unary_ != nullptr) && test_unary_->status_set();
+ }
+ ::grpc::Status test_status() const { return test_unary_->status(); }
+
+ class TestServerCallbackUnary : public ::grpc::ServerCallbackUnary {
+ public:
+ TestServerCallbackUnary(ServerContextBase* ctx,
+ std::function<void(::grpc::Status)> func)
+ : reactor_(ctx->DefaultReactor()), func_(std::move(func)) {
+ this->BindReactor(reactor_);
+ }
+ void Finish(::grpc::Status s) override {
+ status_ = s;
+ func_(std::move(s));
+ status_set_.store(true, std::memory_order_release);
+ }
+ void SendInitialMetadata() override {}
+
+ bool status_set() const {
+ return status_set_.load(std::memory_order_acquire);
+ }
+ ::grpc::Status status() const { return status_; }
+
+ private:
+ void CallOnDone() override {}
+ ::grpc::internal::ServerReactor* reactor() override { return reactor_; }
+
+ ::grpc::ServerUnaryReactor* const reactor_;
+ std::atomic_bool status_set_{false};
+ ::grpc::Status status_;
+ const std::function<void(::grpc::Status s)> func_;
+ };
+
+ typename std::aligned_storage<sizeof(Reactor), alignof(Reactor)>::type
+ default_reactor_;
+ std::atomic_bool default_reactor_used_{false};
+ std::unique_ptr<TestServerCallbackUnary> test_unary_;
+};
+
+/// A ServerContext or CallbackServerContext allows the code implementing a
+/// service handler to:
+///
+/// - Add custom initial and trailing metadata key-value pairs that will
+/// propagated to the client side.
+/// - Control call settings such as compression and authentication.
+/// - Access metadata coming from the client.
+/// - Get performance metrics (ie, census).
+///
+/// Context settings are only relevant to the call handler they are supplied to,
+/// that is to say, they aren't sticky across multiple calls. Some of these
+/// settings, such as the compression options, can be made persistent at server
+/// construction time by specifying the appropriate \a ChannelArguments
+/// to a \a grpc::ServerBuilder, via \a ServerBuilder::AddChannelArgument.
+///
+/// \warning ServerContext instances should \em not be reused across rpcs.
+class ServerContext : public ServerContextBase {
+ public:
+ ServerContext() {} // for async calls
+
+ using ServerContextBase::AddInitialMetadata;
+ using ServerContextBase::AddTrailingMetadata;
+ using ServerContextBase::auth_context;
+ using ServerContextBase::c_call;
+ using ServerContextBase::census_context;
+ using ServerContextBase::client_metadata;
+ using ServerContextBase::compression_algorithm;
+ using ServerContextBase::compression_level;
+ using ServerContextBase::compression_level_set;
+ using ServerContextBase::deadline;
+ using ServerContextBase::IsCancelled;
+ using ServerContextBase::peer;
+ using ServerContextBase::raw_deadline;
+ using ServerContextBase::set_compression_algorithm;
+ using ServerContextBase::set_compression_level;
+ using ServerContextBase::SetLoadReportingCosts;
+ using ServerContextBase::TryCancel;
+
+ // Sync/CQ-based Async ServerContext only
+ using ServerContextBase::AsyncNotifyWhenDone;
+
+ private:
+ // Constructor for internal use by server only
+ friend class ::grpc::Server;
+ ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
+ : ServerContextBase(deadline, arr) {}
+
+ // CallbackServerContext only
+ using ServerContextBase::DefaultReactor;
+ using ServerContextBase::GetRpcAllocatorState;
+
+ /// Prevent copying.
+ ServerContext(const ServerContext&) = delete;
+ ServerContext& operator=(const ServerContext&) = delete;
+};
+
+class CallbackServerContext : public ServerContextBase {
+ public:
+ /// Public constructors are for direct use only by mocking tests. In practice,
+ /// these objects will be owned by the library.
+ CallbackServerContext() {}
+
+ using ServerContextBase::AddInitialMetadata;
+ using ServerContextBase::AddTrailingMetadata;
+ using ServerContextBase::auth_context;
+ using ServerContextBase::c_call;
+ using ServerContextBase::census_context;
+ using ServerContextBase::client_metadata;
+ using ServerContextBase::compression_algorithm;
+ using ServerContextBase::compression_level;
+ using ServerContextBase::compression_level_set;
+ using ServerContextBase::deadline;
+ using ServerContextBase::IsCancelled;
+ using ServerContextBase::peer;
+ using ServerContextBase::raw_deadline;
+ using ServerContextBase::set_compression_algorithm;
+ using ServerContextBase::set_compression_level;
+ using ServerContextBase::SetLoadReportingCosts;
+ using ServerContextBase::TryCancel;
+
+ // CallbackServerContext only
+ using ServerContextBase::DefaultReactor;
+ using ServerContextBase::GetRpcAllocatorState;
+
+ private:
+ // Sync/CQ-based Async ServerContext only
+ using ServerContextBase::AsyncNotifyWhenDone;
+
+ /// Prevent copying.
+ CallbackServerContext(const CallbackServerContext&) = delete;
+ CallbackServerContext& operator=(const CallbackServerContext&) = delete;
+};
+
} // namespace grpc
-static_assert(
- std::is_base_of<::grpc::ServerContextBase, ::grpc::ServerContext>::value,
- "improper base class");
-static_assert(std::is_base_of<::grpc::ServerContextBase,
- ::grpc::CallbackServerContext>::value,
- "improper base class");
-static_assert(sizeof(::grpc::ServerContextBase) ==
- sizeof(::grpc::ServerContext),
- "wrong size");
-static_assert(sizeof(::grpc::ServerContextBase) ==
- sizeof(::grpc::CallbackServerContext),
- "wrong size");
-
+static_assert(
+ std::is_base_of<::grpc::ServerContextBase, ::grpc::ServerContext>::value,
+ "improper base class");
+static_assert(std::is_base_of<::grpc::ServerContextBase,
+ ::grpc::CallbackServerContext>::value,
+ "improper base class");
+static_assert(sizeof(::grpc::ServerContextBase) ==
+ sizeof(::grpc::ServerContext),
+ "wrong size");
+static_assert(sizeof(::grpc::ServerContextBase) ==
+ sizeof(::grpc::CallbackServerContext),
+ "wrong size");
+
#endif // GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h
index 7598e72a40..ac0f958959 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h
@@ -26,7 +26,7 @@
#include <grpcpp/impl/codegen/rpc_method.h>
#include <grpcpp/impl/codegen/string_ref.h>
-namespace grpc {
+namespace grpc {
class ServerContextBase;
namespace internal {
class InterceptorBatchMethodsImpl;
@@ -76,7 +76,7 @@ class ServerRpcInfo {
/// Return a pointer to the underlying ServerContext structure associated
/// with the RPC to support features that apply to it
- ServerContextBase* server_context() { return ctx_; }
+ ServerContextBase* server_context() { return ctx_; }
private:
static_assert(Type::UNARY ==
@@ -92,7 +92,7 @@ class ServerRpcInfo {
static_cast<Type>(internal::RpcMethod::BIDI_STREAMING),
"violated expectation about Type enum");
- ServerRpcInfo(ServerContextBase* ctx, const char* method,
+ ServerRpcInfo(ServerContextBase* ctx, const char* method,
internal::RpcMethod::RpcType type)
: ctx_(ctx), method_(method), type_(static_cast<Type>(type)) {}
@@ -123,14 +123,14 @@ class ServerRpcInfo {
}
}
- ServerContextBase* ctx_ = nullptr;
+ ServerContextBase* ctx_ = nullptr;
const char* method_ = nullptr;
const Type type_;
std::atomic<intptr_t> ref_{1};
std::vector<std::unique_ptr<experimental::Interceptor>> interceptors_;
friend class internal::InterceptorBatchMethodsImpl;
- friend class grpc::ServerContextBase;
+ friend class grpc::ServerContextBase;
};
} // namespace experimental
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h
index d97b725025..c04c1b217c 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h
@@ -19,8 +19,8 @@
#ifndef GRPCPP_IMPL_CODEGEN_SERVER_INTERFACE_H
#define GRPCPP_IMPL_CODEGEN_SERVER_INTERFACE_H
-#include <grpc/impl/codegen/port_platform.h>
-
+#include <grpc/impl/codegen/port_platform.h>
+
#include <grpc/impl/codegen/grpc_types.h>
#include <grpcpp/impl/codegen/byte_buffer.h>
#include <grpcpp/impl/codegen/call.h>
@@ -29,14 +29,14 @@
#include <grpcpp/impl/codegen/core_codegen_interface.h>
#include <grpcpp/impl/codegen/interceptor_common.h>
#include <grpcpp/impl/codegen/rpc_service_method.h>
-#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/server_context.h>
-namespace grpc {
+namespace grpc {
-class AsyncGenericService;
+class AsyncGenericService;
class Channel;
class CompletionQueue;
-class GenericServerContext;
+class GenericServerContext;
class ServerCompletionQueue;
class ServerCredentials;
class Service;
@@ -50,15 +50,15 @@ namespace internal {
class ServerAsyncStreamingInterface;
} // namespace internal
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
namespace experimental {
-#endif
+#endif
class CallbackGenericService;
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
-} // namespace experimental
-#endif
-
-namespace experimental {
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+} // namespace experimental
+#endif
+
+namespace experimental {
class ServerInterceptorFactoryInterface;
} // namespace experimental
@@ -124,20 +124,20 @@ class ServerInterface : public internal::CallHook {
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance.
- virtual bool RegisterService(const TString* host, Service* service) = 0;
+ virtual bool RegisterService(const TString* host, Service* service) = 0;
/// Register a generic service. This call does not take ownership of the
/// service. The service must exist for the lifetime of the Server instance.
virtual void RegisterAsyncGenericService(AsyncGenericService* service) = 0;
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- /// Register a callback generic service. This call does not take ownership of
- /// the service. The service must exist for the lifetime of the Server
- /// instance. May not be abstract since this is a post-1.0 API addition.
-
- virtual void RegisterCallbackGenericService(CallbackGenericService*
- /*service*/) {}
-#else
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ /// Register a callback generic service. This call does not take ownership of
+ /// the service. The service must exist for the lifetime of the Server
+ /// instance. May not be abstract since this is a post-1.0 API addition.
+
+ virtual void RegisterCallbackGenericService(CallbackGenericService*
+ /*service*/) {}
+#else
/// NOTE: class experimental_registration_interface is not part of the public
/// API of this class
/// TODO(vjpai): Move these contents to public API when no longer experimental
@@ -156,7 +156,7 @@ class ServerInterface : public internal::CallHook {
virtual experimental_registration_interface* experimental_registration() {
return nullptr;
}
-#endif
+#endif
/// Tries to bind \a server to the given \a addr.
///
@@ -169,8 +169,8 @@ class ServerInterface : public internal::CallHook {
/// \return bound port number on success, 0 on failure.
///
/// \warning It's an error to call this method on an already started server.
- virtual int AddListeningPort(const TString& addr,
- ServerCredentials* creds) = 0;
+ virtual int AddListeningPort(const TString& addr,
+ ServerCredentials* creds) = 0;
/// Start the server.
///
@@ -178,7 +178,7 @@ class ServerInterface : public internal::CallHook {
/// caller is required to keep all completion queues live until the server is
/// destroyed.
/// \param num_cqs How many completion queues does \a cqs hold.
- virtual void Start(::grpc::ServerCompletionQueue** cqs, size_t num_cqs) = 0;
+ virtual void Start(::grpc::ServerCompletionQueue** cqs, size_t num_cqs) = 0;
virtual void ShutdownInternal(gpr_timespec deadline) = 0;
@@ -191,11 +191,11 @@ class ServerInterface : public internal::CallHook {
class BaseAsyncRequest : public internal::CompletionQueueTag {
public:
- BaseAsyncRequest(ServerInterface* server, ::grpc::ServerContext* context,
+ BaseAsyncRequest(ServerInterface* server, ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag,
- bool delete_on_finalize);
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag,
+ bool delete_on_finalize);
virtual ~BaseAsyncRequest();
bool FinalizeResult(void** tag, bool* status) override;
@@ -205,10 +205,10 @@ class ServerInterface : public internal::CallHook {
protected:
ServerInterface* const server_;
- ::grpc::ServerContext* const context_;
+ ::grpc::ServerContext* const context_;
internal::ServerAsyncStreamingInterface* const stream_;
- ::grpc::CompletionQueue* const call_cq_;
- ::grpc::ServerCompletionQueue* const notification_cq_;
+ ::grpc::CompletionQueue* const call_cq_;
+ ::grpc::ServerCompletionQueue* const notification_cq_;
void* const tag_;
const bool delete_on_finalize_;
grpc_call* call_;
@@ -221,10 +221,10 @@ class ServerInterface : public internal::CallHook {
class RegisteredAsyncRequest : public BaseAsyncRequest {
public:
RegisteredAsyncRequest(ServerInterface* server,
- ::grpc::ServerContext* context,
+ ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag, const char* name,
internal::RpcMethod::RpcType type);
@@ -242,7 +242,7 @@ class ServerInterface : public internal::CallHook {
protected:
void IssueRequest(void* registered_method, grpc_byte_buffer** payload,
- ::grpc::ServerCompletionQueue* notification_cq);
+ ::grpc::ServerCompletionQueue* notification_cq);
const char* name_;
const internal::RpcMethod::RpcType type_;
};
@@ -251,10 +251,10 @@ class ServerInterface : public internal::CallHook {
public:
NoPayloadAsyncRequest(internal::RpcServiceMethod* registered_method,
ServerInterface* server,
- ::grpc::ServerContext* context,
+ ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag)
: RegisteredAsyncRequest(
server, context, stream, call_cq, notification_cq, tag,
@@ -269,10 +269,10 @@ class ServerInterface : public internal::CallHook {
class PayloadAsyncRequest final : public RegisteredAsyncRequest {
public:
PayloadAsyncRequest(internal::RpcServiceMethod* registered_method,
- ServerInterface* server, ::grpc::ServerContext* context,
+ ServerInterface* server, ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag, Message* request)
: RegisteredAsyncRequest(
server, context, stream, call_cq, notification_cq, tag,
@@ -327,8 +327,8 @@ class ServerInterface : public internal::CallHook {
public:
GenericAsyncRequest(ServerInterface* server, GenericServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag, bool delete_on_finalize);
bool FinalizeResult(void** tag, bool* status) override;
@@ -339,10 +339,10 @@ class ServerInterface : public internal::CallHook {
template <class Message>
void RequestAsyncCall(internal::RpcServiceMethod* method,
- ::grpc::ServerContext* context,
+ ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag, Message* message) {
GPR_CODEGEN_ASSERT(method);
new PayloadAsyncRequest<Message>(method, this, context, stream, call_cq,
@@ -350,21 +350,21 @@ class ServerInterface : public internal::CallHook {
}
void RequestAsyncCall(internal::RpcServiceMethod* method,
- ::grpc::ServerContext* context,
+ ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag) {
GPR_CODEGEN_ASSERT(method);
new NoPayloadAsyncRequest(method, this, context, stream, call_cq,
notification_cq, tag);
}
- void RequestAsyncGenericCall(GenericServerContext* context,
- internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
- void* tag) {
+ void RequestAsyncGenericCall(GenericServerContext* context,
+ internal::ServerAsyncStreamingInterface* stream,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
+ void* tag) {
new GenericAsyncRequest(this, context, stream, call_cq, notification_cq,
tag, true);
}
@@ -389,7 +389,7 @@ class ServerInterface : public internal::CallHook {
// Returns nullptr (rather than being pure) since this is a post-1.0 method
// and adding a new pure method to an interface would be a breaking change
// (even though this is private and non-API)
- virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; }
+ virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; }
};
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h
index 30be904a3c..dab84f5ed3 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h
@@ -26,7 +26,7 @@
#include <grpcpp/impl/codegen/server_interface.h>
#include <grpcpp/impl/codegen/status.h>
-namespace grpc {
+namespace grpc {
class CompletionQueue;
class ServerContext;
@@ -105,15 +105,15 @@ class Service {
explicit experimental_type(Service* service) : service_(service) {}
void MarkMethodCallback(int index, internal::MethodHandler* handler) {
- service_->MarkMethodCallbackInternal(index, handler);
+ service_->MarkMethodCallbackInternal(index, handler);
}
void MarkMethodRawCallback(int index, internal::MethodHandler* handler) {
- service_->MarkMethodRawCallbackInternal(index, handler);
+ service_->MarkMethodRawCallbackInternal(index, handler);
}
internal::MethodHandler* GetHandler(int index) {
- return service_->GetHandlerInternal(index);
+ return service_->GetHandlerInternal(index);
}
private:
@@ -123,11 +123,11 @@ class Service {
experimental_type experimental() { return experimental_type(this); }
template <class Message>
- void RequestAsyncUnary(int index, ::grpc::ServerContext* context,
+ void RequestAsyncUnary(int index, ::grpc::ServerContext* context,
Message* request,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq,
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq,
void* tag) {
// Typecast the index to size_t for indexing into a vector
// while preserving the API that existed before a compiler
@@ -137,29 +137,29 @@ class Service {
notification_cq, tag, request);
}
void RequestAsyncClientStreaming(
- int index, ::grpc::ServerContext* context,
+ int index, ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
size_t idx = static_cast<size_t>(index);
server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq,
notification_cq, tag);
}
template <class Message>
void RequestAsyncServerStreaming(
- int index, ::grpc::ServerContext* context, Message* request,
+ int index, ::grpc::ServerContext* context, Message* request,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
size_t idx = static_cast<size_t>(index);
server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq,
notification_cq, tag, request);
}
void RequestAsyncBidiStreaming(
- int index, ::grpc::ServerContext* context,
+ int index, ::grpc::ServerContext* context,
internal::ServerAsyncStreamingInterface* stream,
- ::grpc::CompletionQueue* call_cq,
- ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
+ ::grpc::CompletionQueue* call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
size_t idx = static_cast<size_t>(index);
server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq,
notification_cq, tag);
@@ -216,55 +216,55 @@ class Service {
methods_[idx]->SetMethodType(internal::RpcMethod::BIDI_STREAMING);
}
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- void MarkMethodCallback(int index, internal::MethodHandler* handler) {
- MarkMethodCallbackInternal(index, handler);
- }
-
- void MarkMethodRawCallback(int index, internal::MethodHandler* handler) {
- MarkMethodRawCallbackInternal(index, handler);
- }
-
- internal::MethodHandler* GetHandler(int index) {
- return GetHandlerInternal(index);
- }
-#endif
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ void MarkMethodCallback(int index, internal::MethodHandler* handler) {
+ MarkMethodCallbackInternal(index, handler);
+ }
+
+ void MarkMethodRawCallback(int index, internal::MethodHandler* handler) {
+ MarkMethodRawCallbackInternal(index, handler);
+ }
+
+ internal::MethodHandler* GetHandler(int index) {
+ return GetHandlerInternal(index);
+ }
+#endif
private:
- // TODO(vjpai): migrate the Internal functions to mainline functions once
- // callback API is fully de-experimental
- void MarkMethodCallbackInternal(int index, internal::MethodHandler* handler) {
- // This does not have to be a hard error, however no one has approached us
- // with a use case yet. Please file an issue if you believe you have one.
- size_t idx = static_cast<size_t>(index);
- GPR_CODEGEN_ASSERT(
- methods_[idx].get() != nullptr &&
- "Cannot mark the method as 'callback' because it has already been "
- "marked as 'generic'.");
- methods_[idx]->SetHandler(handler);
- methods_[idx]->SetServerApiType(
- internal::RpcServiceMethod::ApiType::CALL_BACK);
- }
-
- void MarkMethodRawCallbackInternal(int index,
- internal::MethodHandler* handler) {
- // This does not have to be a hard error, however no one has approached us
- // with a use case yet. Please file an issue if you believe you have one.
- size_t idx = static_cast<size_t>(index);
- GPR_CODEGEN_ASSERT(
- methods_[idx].get() != nullptr &&
- "Cannot mark the method as 'raw callback' because it has already "
- "been marked as 'generic'.");
- methods_[idx]->SetHandler(handler);
- methods_[idx]->SetServerApiType(
- internal::RpcServiceMethod::ApiType::RAW_CALL_BACK);
- }
-
- internal::MethodHandler* GetHandlerInternal(int index) {
- size_t idx = static_cast<size_t>(index);
- return methods_[idx]->handler();
- }
-
- friend class Server;
+ // TODO(vjpai): migrate the Internal functions to mainline functions once
+ // callback API is fully de-experimental
+ void MarkMethodCallbackInternal(int index, internal::MethodHandler* handler) {
+ // This does not have to be a hard error, however no one has approached us
+ // with a use case yet. Please file an issue if you believe you have one.
+ size_t idx = static_cast<size_t>(index);
+ GPR_CODEGEN_ASSERT(
+ methods_[idx].get() != nullptr &&
+ "Cannot mark the method as 'callback' because it has already been "
+ "marked as 'generic'.");
+ methods_[idx]->SetHandler(handler);
+ methods_[idx]->SetServerApiType(
+ internal::RpcServiceMethod::ApiType::CALL_BACK);
+ }
+
+ void MarkMethodRawCallbackInternal(int index,
+ internal::MethodHandler* handler) {
+ // This does not have to be a hard error, however no one has approached us
+ // with a use case yet. Please file an issue if you believe you have one.
+ size_t idx = static_cast<size_t>(index);
+ GPR_CODEGEN_ASSERT(
+ methods_[idx].get() != nullptr &&
+ "Cannot mark the method as 'raw callback' because it has already "
+ "been marked as 'generic'.");
+ methods_[idx]->SetHandler(handler);
+ methods_[idx]->SetServerApiType(
+ internal::RpcServiceMethod::ApiType::RAW_CALL_BACK);
+ }
+
+ internal::MethodHandler* GetHandlerInternal(int index) {
+ size_t idx = static_cast<size_t>(index);
+ return methods_[idx]->handler();
+ }
+
+ friend class Server;
friend class ServerInterface;
ServerInterface* server_;
std::vector<std::unique_ptr<internal::RpcServiceMethod>> methods_;
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h
index b1a24dcef8..603a500b58 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h
@@ -58,7 +58,7 @@ class Slice final {
reinterpret_cast<const char*>(buf), len)) {}
/// Construct a slice from a copied string
- Slice(const TString& str)
+ Slice(const TString& str)
: slice_(g_core_codegen_interface->grpc_slice_from_copied_buffer(
str.c_str(), str.length())) {}
@@ -123,17 +123,17 @@ inline grpc::string_ref StringRefFromSlice(const grpc_slice* slice) {
GRPC_SLICE_LENGTH(*slice));
}
-inline TString StringFromCopiedSlice(grpc_slice slice) {
- return TString(reinterpret_cast<char*>(GRPC_SLICE_START_PTR(slice)),
- GRPC_SLICE_LENGTH(slice));
+inline TString StringFromCopiedSlice(grpc_slice slice) {
+ return TString(reinterpret_cast<char*>(GRPC_SLICE_START_PTR(slice)),
+ GRPC_SLICE_LENGTH(slice));
}
-inline grpc_slice SliceReferencingString(const TString& str) {
+inline grpc_slice SliceReferencingString(const TString& str) {
return g_core_codegen_interface->grpc_slice_from_static_buffer(str.data(),
str.length());
}
-inline grpc_slice SliceFromCopiedString(const TString& str) {
+inline grpc_slice SliceFromCopiedString(const TString& str) {
return g_core_codegen_interface->grpc_slice_from_copied_buffer(str.data(),
str.length());
}
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h
index a5ad6f32fe..a9c689f731 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h
@@ -88,14 +88,14 @@ class Status {
/// Construct an instance with associated \a code and \a error_message.
/// It is an error to construct an OK status with non-empty \a error_message.
- Status(StatusCode code, const TString& error_message)
+ Status(StatusCode code, const TString& error_message)
: code_(code), error_message_(error_message) {}
/// Construct an instance with \a code, \a error_message and
/// \a error_details. It is an error to construct an OK status with non-empty
/// \a error_message and/or \a error_details.
- Status(StatusCode code, const TString& error_message,
- const TString& error_details)
+ Status(StatusCode code, const TString& error_message,
+ const TString& error_details)
: code_(code),
error_message_(error_message),
binary_error_details_(error_details) {}
@@ -109,10 +109,10 @@ class Status {
/// Return the instance's error code.
StatusCode error_code() const { return code_; }
/// Return the instance's error message.
- TString error_message() const { return error_message_; }
+ TString error_message() const { return error_message_; }
/// Return the (binary) error details.
// Usually it contains a serialized google.rpc.Status proto.
- TString error_details() const { return binary_error_details_; }
+ TString error_details() const { return binary_error_details_; }
/// Is the status OK?
bool ok() const { return code_ == StatusCode::OK; }
@@ -124,8 +124,8 @@ class Status {
private:
StatusCode code_;
- TString error_message_;
- TString binary_error_details_;
+ TString error_message_;
+ TString binary_error_details_;
};
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h
index c5dcd31c1d..a099a9d76a 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h
@@ -28,8 +28,8 @@
#include <grpcpp/impl/codegen/config.h>
-#include <util/stream/output.h>
-
+#include <util/stream/output.h>
+
namespace grpc {
/// This class is a non owning reference to a string.
@@ -61,7 +61,7 @@ class string_ref {
string_ref(const char* s) : data_(s), length_(strlen(s)) {}
string_ref(const char* s, size_t l) : data_(s), length_(l) {}
- string_ref(const TString& s) : data_(s.data()), length_(s.length()) {}
+ string_ref(const TString& s) : data_(s.data()), length_(s.length()) {}
/// iterators
const_iterator begin() const { return data_; }
@@ -139,9 +139,9 @@ inline bool operator<=(string_ref x, string_ref y) { return x.compare(y) <= 0; }
inline bool operator>(string_ref x, string_ref y) { return x.compare(y) > 0; }
inline bool operator>=(string_ref x, string_ref y) { return x.compare(y) >= 0; }
-inline IOutputStream& operator<<(IOutputStream& out, const string_ref& string) {
- TString t(string.begin(), string.end());
- return out << t;
+inline IOutputStream& operator<<(IOutputStream& out, const string_ref& string) {
+ TString t(string.begin(), string.end());
+ return out << t;
}
} // namespace grpc
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h
index 408f42f280..be124aaeb2 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2019 gRPC authors.
+ * Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,926 +18,926 @@
#ifndef GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H
#define GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H
-#include <grpcpp/impl/codegen/call.h>
-#include <grpcpp/impl/codegen/channel_interface.h>
-#include <grpcpp/impl/codegen/client_context.h>
-#include <grpcpp/impl/codegen/completion_queue.h>
-#include <grpcpp/impl/codegen/core_codegen_interface.h>
-#include <grpcpp/impl/codegen/server_context.h>
-#include <grpcpp/impl/codegen/service_type.h>
-#include <grpcpp/impl/codegen/status.h>
+#include <grpcpp/impl/codegen/call.h>
+#include <grpcpp/impl/codegen/channel_interface.h>
+#include <grpcpp/impl/codegen/client_context.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
+#include <grpcpp/impl/codegen/core_codegen_interface.h>
+#include <grpcpp/impl/codegen/server_context.h>
+#include <grpcpp/impl/codegen/service_type.h>
+#include <grpcpp/impl/codegen/status.h>
namespace grpc {
namespace internal {
-/// Common interface for all synchronous client side streaming.
-class ClientStreamingInterface {
- public:
- virtual ~ClientStreamingInterface() {}
-
- /// Block waiting until the stream finishes and a final status of the call is
- /// available.
- ///
- /// It is appropriate to call this method exactly once when both:
- /// * the calling code (client-side) has no more message to send
- /// (this can be declared implicitly by calling this method, or
- /// explicitly through an earlier call to <i>WritesDone</i> method of the
- /// class in use, e.g. \a ClientWriterInterface::WritesDone or
- /// \a ClientReaderWriterInterface::WritesDone).
- /// * there are no more messages to be received from the server (which can
- /// be known implicitly, or explicitly from an earlier call to \a
- /// ReaderInterface::Read that returned "false").
- ///
- /// This function will return either:
- /// - when all incoming messages have been read and the server has
- /// returned status.
- /// - when the server has returned a non-OK status.
- /// - OR when the call failed for some reason and the library generated a
- /// status.
- ///
- /// Return values:
- /// - \a Status contains the status code, message and details for the call
- /// - the \a ClientContext associated with this call is updated with
- /// possible trailing metadata sent from the server.
- virtual ::grpc::Status Finish() = 0;
-};
-
-/// Common interface for all synchronous server side streaming.
-class ServerStreamingInterface {
- public:
- virtual ~ServerStreamingInterface() {}
-
- /// Block to send initial metadata to client.
- /// This call is optional, but if it is used, it cannot be used concurrently
- /// with or after the \a Finish method.
- ///
- /// The initial metadata that will be sent to the client will be
- /// taken from the \a ServerContext associated with the call.
- virtual void SendInitialMetadata() = 0;
-};
-
-/// An interface that yields a sequence of messages of type \a R.
+/// Common interface for all synchronous client side streaming.
+class ClientStreamingInterface {
+ public:
+ virtual ~ClientStreamingInterface() {}
+
+ /// Block waiting until the stream finishes and a final status of the call is
+ /// available.
+ ///
+ /// It is appropriate to call this method exactly once when both:
+ /// * the calling code (client-side) has no more message to send
+ /// (this can be declared implicitly by calling this method, or
+ /// explicitly through an earlier call to <i>WritesDone</i> method of the
+ /// class in use, e.g. \a ClientWriterInterface::WritesDone or
+ /// \a ClientReaderWriterInterface::WritesDone).
+ /// * there are no more messages to be received from the server (which can
+ /// be known implicitly, or explicitly from an earlier call to \a
+ /// ReaderInterface::Read that returned "false").
+ ///
+ /// This function will return either:
+ /// - when all incoming messages have been read and the server has
+ /// returned status.
+ /// - when the server has returned a non-OK status.
+ /// - OR when the call failed for some reason and the library generated a
+ /// status.
+ ///
+ /// Return values:
+ /// - \a Status contains the status code, message and details for the call
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible trailing metadata sent from the server.
+ virtual ::grpc::Status Finish() = 0;
+};
+
+/// Common interface for all synchronous server side streaming.
+class ServerStreamingInterface {
+ public:
+ virtual ~ServerStreamingInterface() {}
+
+ /// Block to send initial metadata to client.
+ /// This call is optional, but if it is used, it cannot be used concurrently
+ /// with or after the \a Finish method.
+ ///
+ /// The initial metadata that will be sent to the client will be
+ /// taken from the \a ServerContext associated with the call.
+ virtual void SendInitialMetadata() = 0;
+};
+
+/// An interface that yields a sequence of messages of type \a R.
template <class R>
-class ReaderInterface {
- public:
- virtual ~ReaderInterface() {}
-
- /// Get an upper bound on the next message size available for reading on this
- /// stream.
- virtual bool NextMessageSize(uint32_t* sz) = 0;
-
- /// Block to read a message and parse to \a msg. Returns \a true on success.
- /// This is thread-safe with respect to \a Write or \WritesDone methods on
- /// the same stream. It should not be called concurrently with another \a
- /// Read on the same stream as the order of delivery will not be defined.
- ///
- /// \param[out] msg The read message.
- ///
- /// \return \a false when there will be no more incoming messages, either
- /// because the other side has called \a WritesDone() or the stream has failed
- /// (or been cancelled).
- virtual bool Read(R* msg) = 0;
-};
-
-/// An interface that can be fed a sequence of messages of type \a W.
+class ReaderInterface {
+ public:
+ virtual ~ReaderInterface() {}
+
+ /// Get an upper bound on the next message size available for reading on this
+ /// stream.
+ virtual bool NextMessageSize(uint32_t* sz) = 0;
+
+ /// Block to read a message and parse to \a msg. Returns \a true on success.
+ /// This is thread-safe with respect to \a Write or \WritesDone methods on
+ /// the same stream. It should not be called concurrently with another \a
+ /// Read on the same stream as the order of delivery will not be defined.
+ ///
+ /// \param[out] msg The read message.
+ ///
+ /// \return \a false when there will be no more incoming messages, either
+ /// because the other side has called \a WritesDone() or the stream has failed
+ /// (or been cancelled).
+ virtual bool Read(R* msg) = 0;
+};
+
+/// An interface that can be fed a sequence of messages of type \a W.
template <class W>
-class WriterInterface {
- public:
- virtual ~WriterInterface() {}
-
- /// Block to write \a msg to the stream with WriteOptions \a options.
- /// This is thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \param msg The message to be written to the stream.
- /// \param options The WriteOptions affecting the write operation.
- ///
- /// \return \a true on success, \a false when the stream has been closed.
- virtual bool Write(const W& msg, ::grpc::WriteOptions options) = 0;
-
- /// Block to write \a msg to the stream with default write options.
- /// This is thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \param msg The message to be written to the stream.
- ///
- /// \return \a true on success, \a false when the stream has been closed.
- inline bool Write(const W& msg) { return Write(msg, ::grpc::WriteOptions()); }
-
- /// Write \a msg and coalesce it with the writing of trailing metadata, using
- /// WriteOptions \a options.
- ///
- /// For client, WriteLast is equivalent of performing Write and WritesDone in
- /// a single step. \a msg and trailing metadata are coalesced and sent on wire
- /// by calling this function. For server, WriteLast buffers the \a msg.
- /// The writing of \a msg is held until the service handler returns,
- /// where \a msg and trailing metadata are coalesced and sent on wire.
- /// Note that WriteLast can only buffer \a msg up to the flow control window
- /// size. If \a msg size is larger than the window size, it will be sent on
- /// wire without buffering.
- ///
- /// \param[in] msg The message to be written to the stream.
- /// \param[in] options The WriteOptions to be used to write this message.
- void WriteLast(const W& msg, ::grpc::WriteOptions options) {
- Write(msg, options.set_last_message());
- }
-};
-
+class WriterInterface {
+ public:
+ virtual ~WriterInterface() {}
+
+ /// Block to write \a msg to the stream with WriteOptions \a options.
+ /// This is thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \param msg The message to be written to the stream.
+ /// \param options The WriteOptions affecting the write operation.
+ ///
+ /// \return \a true on success, \a false when the stream has been closed.
+ virtual bool Write(const W& msg, ::grpc::WriteOptions options) = 0;
+
+ /// Block to write \a msg to the stream with default write options.
+ /// This is thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \param msg The message to be written to the stream.
+ ///
+ /// \return \a true on success, \a false when the stream has been closed.
+ inline bool Write(const W& msg) { return Write(msg, ::grpc::WriteOptions()); }
+
+ /// Write \a msg and coalesce it with the writing of trailing metadata, using
+ /// WriteOptions \a options.
+ ///
+ /// For client, WriteLast is equivalent of performing Write and WritesDone in
+ /// a single step. \a msg and trailing metadata are coalesced and sent on wire
+ /// by calling this function. For server, WriteLast buffers the \a msg.
+ /// The writing of \a msg is held until the service handler returns,
+ /// where \a msg and trailing metadata are coalesced and sent on wire.
+ /// Note that WriteLast can only buffer \a msg up to the flow control window
+ /// size. If \a msg size is larger than the window size, it will be sent on
+ /// wire without buffering.
+ ///
+ /// \param[in] msg The message to be written to the stream.
+ /// \param[in] options The WriteOptions to be used to write this message.
+ void WriteLast(const W& msg, ::grpc::WriteOptions options) {
+ Write(msg, options.set_last_message());
+ }
+};
+
} // namespace internal
-/// Client-side interface for streaming reads of message of type \a R.
+/// Client-side interface for streaming reads of message of type \a R.
template <class R>
-class ClientReaderInterface : public internal::ClientStreamingInterface,
- public internal::ReaderInterface<R> {
- public:
- /// Block to wait for initial metadata from server. The received metadata
- /// can only be accessed after this call returns. Should only be called before
- /// the first read. Calling this method is optional, and if it is not called
- /// the metadata will be available in ClientContext after the first read.
- virtual void WaitForInitialMetadata() = 0;
-};
-
-namespace internal {
+class ClientReaderInterface : public internal::ClientStreamingInterface,
+ public internal::ReaderInterface<R> {
+ public:
+ /// Block to wait for initial metadata from server. The received metadata
+ /// can only be accessed after this call returns. Should only be called before
+ /// the first read. Calling this method is optional, and if it is not called
+ /// the metadata will be available in ClientContext after the first read.
+ virtual void WaitForInitialMetadata() = 0;
+};
+
+namespace internal {
template <class R>
-class ClientReaderFactory {
- public:
- template <class W>
- static ClientReader<R>* Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context,
- const W& request) {
- return new ClientReader<R>(channel, method, context, request);
- }
-};
-} // namespace internal
-
-/// Synchronous (blocking) client-side API for doing server-streaming RPCs,
-/// where the stream of messages coming from the server has messages
-/// of type \a R.
-template <class R>
-class ClientReader final : public ClientReaderInterface<R> {
- public:
- /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
- /// semantics.
- ///
- // Side effect:
- /// Once complete, the initial metadata read from
- /// the server will be accessible through the \a ClientContext used to
- /// construct this object.
- void WaitForInitialMetadata() override {
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- ops;
- ops.RecvInitialMetadata(context_);
- call_.PerformOps(&ops);
- cq_.Pluck(&ops); /// status ignored
- }
-
- bool NextMessageSize(uint32_t* sz) override {
- int result = call_.max_receive_message_size();
- *sz = (result > 0) ? result : UINT32_MAX;
- return true;
- }
-
- /// See the \a ReaderInterface.Read method for semantics.
- /// Side effect:
- /// This also receives initial metadata from the server, if not
- /// already received (if initial metadata is received, it can be then
- /// accessed through the \a ClientContext associated with this call).
- bool Read(R* msg) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>>
- ops;
- if (!context_->initial_metadata_received_) {
- ops.RecvInitialMetadata(context_);
- }
- ops.RecvMessage(msg);
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops) && ops.got_message;
- }
-
- /// See the \a ClientStreamingInterface.Finish method for semantics.
- ///
- /// Side effect:
- /// The \a ClientContext associated with this call is updated with
- /// possible metadata received from the server.
- ::grpc::Status Finish() override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientRecvStatus> ops;
- ::grpc::Status status;
- ops.ClientRecvStatus(context_, &status);
- call_.PerformOps(&ops);
- GPR_CODEGEN_ASSERT(cq_.Pluck(&ops));
- return status;
- }
-
- private:
- friend class internal::ClientReaderFactory<R>;
- ::grpc::ClientContext* context_;
- ::grpc::CompletionQueue cq_;
- ::grpc::internal::Call call_;
-
- /// Block to create a stream and write the initial metadata and \a request
- /// out. Note that \a context will be used to fill in custom initial
- /// metadata used to send to the server when starting the call.
- template <class W>
- ClientReader(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, const W& request)
- : context_(context),
- cq_(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
- nullptr}), // Pluckable cq
- call_(channel->CreateCall(method, context, &cq_)) {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- ops;
- ops.SendInitialMetadata(&context->send_initial_metadata_,
- context->initial_metadata_flags());
- // TODO(ctiller): don't assert
- GPR_CODEGEN_ASSERT(ops.SendMessagePtr(&request).ok());
- ops.ClientSendClose();
- call_.PerformOps(&ops);
- cq_.Pluck(&ops);
- }
-};
-
-/// Client-side interface for streaming writes of message type \a W.
-template <class W>
-class ClientWriterInterface : public internal::ClientStreamingInterface,
- public internal::WriterInterface<W> {
- public:
- /// Half close writing from the client. (signal that the stream of messages
- /// coming from the client is complete).
- /// Blocks until currently-pending writes are completed.
- /// Thread safe with respect to \a ReaderInterface::Read operations only
- ///
- /// \return Whether the writes were successful.
- virtual bool WritesDone() = 0;
-};
-
-namespace internal {
+class ClientReaderFactory {
+ public:
+ template <class W>
+ static ClientReader<R>* Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context,
+ const W& request) {
+ return new ClientReader<R>(channel, method, context, request);
+ }
+};
+} // namespace internal
+
+/// Synchronous (blocking) client-side API for doing server-streaming RPCs,
+/// where the stream of messages coming from the server has messages
+/// of type \a R.
+template <class R>
+class ClientReader final : public ClientReaderInterface<R> {
+ public:
+ /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
+ /// semantics.
+ ///
+ // Side effect:
+ /// Once complete, the initial metadata read from
+ /// the server will be accessible through the \a ClientContext used to
+ /// construct this object.
+ void WaitForInitialMetadata() override {
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ ops;
+ ops.RecvInitialMetadata(context_);
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops); /// status ignored
+ }
+
+ bool NextMessageSize(uint32_t* sz) override {
+ int result = call_.max_receive_message_size();
+ *sz = (result > 0) ? result : UINT32_MAX;
+ return true;
+ }
+
+ /// See the \a ReaderInterface.Read method for semantics.
+ /// Side effect:
+ /// This also receives initial metadata from the server, if not
+ /// already received (if initial metadata is received, it can be then
+ /// accessed through the \a ClientContext associated with this call).
+ bool Read(R* msg) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>>
+ ops;
+ if (!context_->initial_metadata_received_) {
+ ops.RecvInitialMetadata(context_);
+ }
+ ops.RecvMessage(msg);
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops) && ops.got_message;
+ }
+
+ /// See the \a ClientStreamingInterface.Finish method for semantics.
+ ///
+ /// Side effect:
+ /// The \a ClientContext associated with this call is updated with
+ /// possible metadata received from the server.
+ ::grpc::Status Finish() override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientRecvStatus> ops;
+ ::grpc::Status status;
+ ops.ClientRecvStatus(context_, &status);
+ call_.PerformOps(&ops);
+ GPR_CODEGEN_ASSERT(cq_.Pluck(&ops));
+ return status;
+ }
+
+ private:
+ friend class internal::ClientReaderFactory<R>;
+ ::grpc::ClientContext* context_;
+ ::grpc::CompletionQueue cq_;
+ ::grpc::internal::Call call_;
+
+ /// Block to create a stream and write the initial metadata and \a request
+ /// out. Note that \a context will be used to fill in custom initial
+ /// metadata used to send to the server when starting the call.
+ template <class W>
+ ClientReader(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, const W& request)
+ : context_(context),
+ cq_(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
+ nullptr}), // Pluckable cq
+ call_(channel->CreateCall(method, context, &cq_)) {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ ops;
+ ops.SendInitialMetadata(&context->send_initial_metadata_,
+ context->initial_metadata_flags());
+ // TODO(ctiller): don't assert
+ GPR_CODEGEN_ASSERT(ops.SendMessagePtr(&request).ok());
+ ops.ClientSendClose();
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops);
+ }
+};
+
+/// Client-side interface for streaming writes of message type \a W.
template <class W>
-class ClientWriterFactory {
- public:
- template <class R>
- static ClientWriter<W>* Create(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, R* response) {
- return new ClientWriter<W>(channel, method, context, response);
- }
-};
-} // namespace internal
-
-/// Synchronous (blocking) client-side API for doing client-streaming RPCs,
-/// where the outgoing message stream coming from the client has messages of
-/// type \a W.
+class ClientWriterInterface : public internal::ClientStreamingInterface,
+ public internal::WriterInterface<W> {
+ public:
+ /// Half close writing from the client. (signal that the stream of messages
+ /// coming from the client is complete).
+ /// Blocks until currently-pending writes are completed.
+ /// Thread safe with respect to \a ReaderInterface::Read operations only
+ ///
+ /// \return Whether the writes were successful.
+ virtual bool WritesDone() = 0;
+};
+
+namespace internal {
template <class W>
-class ClientWriter : public ClientWriterInterface<W> {
- public:
- /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
- /// semantics.
- ///
- // Side effect:
- /// Once complete, the initial metadata read from the server will be
- /// accessible through the \a ClientContext used to construct this object.
- void WaitForInitialMetadata() {
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- ops;
- ops.RecvInitialMetadata(context_);
- call_.PerformOps(&ops);
- cq_.Pluck(&ops); // status ignored
- }
-
- /// See the WriterInterface.Write(const W& msg, WriteOptions options) method
- /// for semantics.
- ///
- /// Side effect:
- /// Also sends initial metadata if not already sent (using the
- /// \a ClientContext associated with this call).
- using internal::WriterInterface<W>::Write;
- bool Write(const W& msg, ::grpc::WriteOptions options) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- ops;
-
- if (options.is_last_message()) {
- options.set_buffer_hint();
- ops.ClientSendClose();
- }
- if (context_->initial_metadata_corked_) {
- ops.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- context_->set_initial_metadata_corked(false);
- }
- if (!ops.SendMessagePtr(&msg, options).ok()) {
- return false;
- }
-
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops);
- }
-
- bool WritesDone() override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
- ops.ClientSendClose();
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops);
- }
-
- /// See the ClientStreamingInterface.Finish method for semantics.
- /// Side effects:
- /// - Also receives initial metadata if not already received.
- /// - Attempts to fill in the \a response parameter passed
- /// to the constructor of this instance with the response
- /// message from the server.
- ::grpc::Status Finish() override {
- ::grpc::Status status;
- if (!context_->initial_metadata_received_) {
- finish_ops_.RecvInitialMetadata(context_);
- }
- finish_ops_.ClientRecvStatus(context_, &status);
- call_.PerformOps(&finish_ops_);
- GPR_CODEGEN_ASSERT(cq_.Pluck(&finish_ops_));
- return status;
- }
-
- private:
- friend class internal::ClientWriterFactory<W>;
-
- /// Block to create a stream (i.e. send request headers and other initial
- /// metadata to the server). Note that \a context will be used to fill
- /// in custom initial metadata. \a response will be filled in with the
- /// single expected response message from the server upon a successful
- /// call to the \a Finish method of this instance.
- template <class R>
- ClientWriter(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context, R* response)
- : context_(context),
- cq_(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
- nullptr}), // Pluckable cq
- call_(channel->CreateCall(method, context, &cq_)) {
- finish_ops_.RecvMessage(response);
- finish_ops_.AllowNoMessage();
-
- if (!context_->initial_metadata_corked_) {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- ops;
- ops.SendInitialMetadata(&context->send_initial_metadata_,
- context->initial_metadata_flags());
- call_.PerformOps(&ops);
- cq_.Pluck(&ops);
- }
- }
-
- ::grpc::ClientContext* context_;
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpGenericRecvMessage,
- ::grpc::internal::CallOpClientRecvStatus>
- finish_ops_;
- ::grpc::CompletionQueue cq_;
- ::grpc::internal::Call call_;
-};
-
-/// Client-side interface for bi-directional streaming with
-/// client-to-server stream messages of type \a W and
-/// server-to-client stream messages of type \a R.
-template <class W, class R>
-class ClientReaderWriterInterface : public internal::ClientStreamingInterface,
- public internal::WriterInterface<W>,
- public internal::ReaderInterface<R> {
- public:
- /// Block to wait for initial metadata from server. The received metadata
- /// can only be accessed after this call returns. Should only be called before
- /// the first read. Calling this method is optional, and if it is not called
- /// the metadata will be available in ClientContext after the first read.
- virtual void WaitForInitialMetadata() = 0;
-
- /// Half close writing from the client. (signal that the stream of messages
- /// coming from the client is complete).
- /// Blocks until currently-pending writes are completed.
- /// Thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \return Whether the writes were successful.
- virtual bool WritesDone() = 0;
-};
-
-namespace internal {
+class ClientWriterFactory {
+ public:
+ template <class R>
+ static ClientWriter<W>* Create(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, R* response) {
+ return new ClientWriter<W>(channel, method, context, response);
+ }
+};
+} // namespace internal
+
+/// Synchronous (blocking) client-side API for doing client-streaming RPCs,
+/// where the outgoing message stream coming from the client has messages of
+/// type \a W.
+template <class W>
+class ClientWriter : public ClientWriterInterface<W> {
+ public:
+ /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for
+ /// semantics.
+ ///
+ // Side effect:
+ /// Once complete, the initial metadata read from the server will be
+ /// accessible through the \a ClientContext used to construct this object.
+ void WaitForInitialMetadata() {
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ ops;
+ ops.RecvInitialMetadata(context_);
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops); // status ignored
+ }
+
+ /// See the WriterInterface.Write(const W& msg, WriteOptions options) method
+ /// for semantics.
+ ///
+ /// Side effect:
+ /// Also sends initial metadata if not already sent (using the
+ /// \a ClientContext associated with this call).
+ using internal::WriterInterface<W>::Write;
+ bool Write(const W& msg, ::grpc::WriteOptions options) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ ops;
+
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ ops.ClientSendClose();
+ }
+ if (context_->initial_metadata_corked_) {
+ ops.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ context_->set_initial_metadata_corked(false);
+ }
+ if (!ops.SendMessagePtr(&msg, options).ok()) {
+ return false;
+ }
+
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops);
+ }
+
+ bool WritesDone() override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
+ ops.ClientSendClose();
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops);
+ }
+
+ /// See the ClientStreamingInterface.Finish method for semantics.
+ /// Side effects:
+ /// - Also receives initial metadata if not already received.
+ /// - Attempts to fill in the \a response parameter passed
+ /// to the constructor of this instance with the response
+ /// message from the server.
+ ::grpc::Status Finish() override {
+ ::grpc::Status status;
+ if (!context_->initial_metadata_received_) {
+ finish_ops_.RecvInitialMetadata(context_);
+ }
+ finish_ops_.ClientRecvStatus(context_, &status);
+ call_.PerformOps(&finish_ops_);
+ GPR_CODEGEN_ASSERT(cq_.Pluck(&finish_ops_));
+ return status;
+ }
+
+ private:
+ friend class internal::ClientWriterFactory<W>;
+
+ /// Block to create a stream (i.e. send request headers and other initial
+ /// metadata to the server). Note that \a context will be used to fill
+ /// in custom initial metadata. \a response will be filled in with the
+ /// single expected response message from the server upon a successful
+ /// call to the \a Finish method of this instance.
+ template <class R>
+ ClientWriter(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context, R* response)
+ : context_(context),
+ cq_(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
+ nullptr}), // Pluckable cq
+ call_(channel->CreateCall(method, context, &cq_)) {
+ finish_ops_.RecvMessage(response);
+ finish_ops_.AllowNoMessage();
+
+ if (!context_->initial_metadata_corked_) {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ ops;
+ ops.SendInitialMetadata(&context->send_initial_metadata_,
+ context->initial_metadata_flags());
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops);
+ }
+ }
+
+ ::grpc::ClientContext* context_;
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpGenericRecvMessage,
+ ::grpc::internal::CallOpClientRecvStatus>
+ finish_ops_;
+ ::grpc::CompletionQueue cq_;
+ ::grpc::internal::Call call_;
+};
+
+/// Client-side interface for bi-directional streaming with
+/// client-to-server stream messages of type \a W and
+/// server-to-client stream messages of type \a R.
template <class W, class R>
-class ClientReaderWriterFactory {
- public:
- static ClientReaderWriter<W, R>* Create(
- ::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context) {
- return new ClientReaderWriter<W, R>(channel, method, context);
- }
-};
-} // namespace internal
-
-/// Synchronous (blocking) client-side API for bi-directional streaming RPCs,
-/// where the outgoing message stream coming from the client has messages of
-/// type \a W, and the incoming messages stream coming from the server has
-/// messages of type \a R.
+class ClientReaderWriterInterface : public internal::ClientStreamingInterface,
+ public internal::WriterInterface<W>,
+ public internal::ReaderInterface<R> {
+ public:
+ /// Block to wait for initial metadata from server. The received metadata
+ /// can only be accessed after this call returns. Should only be called before
+ /// the first read. Calling this method is optional, and if it is not called
+ /// the metadata will be available in ClientContext after the first read.
+ virtual void WaitForInitialMetadata() = 0;
+
+ /// Half close writing from the client. (signal that the stream of messages
+ /// coming from the client is complete).
+ /// Blocks until currently-pending writes are completed.
+ /// Thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \return Whether the writes were successful.
+ virtual bool WritesDone() = 0;
+};
+
+namespace internal {
template <class W, class R>
-class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
- public:
- /// Block waiting to read initial metadata from the server.
- /// This call is optional, but if it is used, it cannot be used concurrently
- /// with or after the \a Finish method.
- ///
- /// Once complete, the initial metadata read from the server will be
- /// accessible through the \a ClientContext used to construct this object.
- void WaitForInitialMetadata() override {
- GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
- ops;
- ops.RecvInitialMetadata(context_);
- call_.PerformOps(&ops);
- cq_.Pluck(&ops); // status ignored
- }
-
- bool NextMessageSize(uint32_t* sz) override {
- int result = call_.max_receive_message_size();
- *sz = (result > 0) ? result : UINT32_MAX;
- return true;
- }
-
- /// See the \a ReaderInterface.Read method for semantics.
- /// Side effect:
- /// Also receives initial metadata if not already received (updates the \a
- /// ClientContext associated with this call in that case).
- bool Read(R* msg) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpRecvMessage<R>>
- ops;
- if (!context_->initial_metadata_received_) {
- ops.RecvInitialMetadata(context_);
- }
- ops.RecvMessage(msg);
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops) && ops.got_message;
- }
-
- /// See the \a WriterInterface.Write method for semantics.
- ///
- /// Side effect:
- /// Also sends initial metadata if not already sent (using the
- /// \a ClientContext associated with this call to fill in values).
- using internal::WriterInterface<W>::Write;
- bool Write(const W& msg, ::grpc::WriteOptions options) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
- ::grpc::internal::CallOpSendMessage,
- ::grpc::internal::CallOpClientSendClose>
- ops;
-
- if (options.is_last_message()) {
- options.set_buffer_hint();
- ops.ClientSendClose();
- }
- if (context_->initial_metadata_corked_) {
- ops.SendInitialMetadata(&context_->send_initial_metadata_,
- context_->initial_metadata_flags());
- context_->set_initial_metadata_corked(false);
- }
- if (!ops.SendMessagePtr(&msg, options).ok()) {
- return false;
- }
-
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops);
- }
-
- bool WritesDone() override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
- ops.ClientSendClose();
- call_.PerformOps(&ops);
- return cq_.Pluck(&ops);
- }
-
- /// See the ClientStreamingInterface.Finish method for semantics.
- ///
- /// Side effect:
- /// - the \a ClientContext associated with this call is updated with
- /// possible trailing metadata sent from the server.
- ::grpc::Status Finish() override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
- ::grpc::internal::CallOpClientRecvStatus>
- ops;
- if (!context_->initial_metadata_received_) {
- ops.RecvInitialMetadata(context_);
- }
- ::grpc::Status status;
- ops.ClientRecvStatus(context_, &status);
- call_.PerformOps(&ops);
- GPR_CODEGEN_ASSERT(cq_.Pluck(&ops));
- return status;
- }
-
- private:
- friend class internal::ClientReaderWriterFactory<W, R>;
-
- ::grpc::ClientContext* context_;
- ::grpc::CompletionQueue cq_;
- ::grpc::internal::Call call_;
-
- /// Block to create a stream and write the initial metadata and \a request
- /// out. Note that \a context will be used to fill in custom initial metadata
- /// used to send to the server when starting the call.
- ClientReaderWriter(::grpc::ChannelInterface* channel,
- const ::grpc::internal::RpcMethod& method,
- ::grpc::ClientContext* context)
- : context_(context),
- cq_(grpc_completion_queue_attributes{
- GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
- nullptr}), // Pluckable cq
- call_(channel->CreateCall(method, context, &cq_)) {
- if (!context_->initial_metadata_corked_) {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- ops;
- ops.SendInitialMetadata(&context->send_initial_metadata_,
- context->initial_metadata_flags());
- call_.PerformOps(&ops);
- cq_.Pluck(&ops);
- }
- }
-};
-
-/// Server-side interface for streaming reads of message of type \a R.
+class ClientReaderWriterFactory {
+ public:
+ static ClientReaderWriter<W, R>* Create(
+ ::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context) {
+ return new ClientReaderWriter<W, R>(channel, method, context);
+ }
+};
+} // namespace internal
+
+/// Synchronous (blocking) client-side API for bi-directional streaming RPCs,
+/// where the outgoing message stream coming from the client has messages of
+/// type \a W, and the incoming messages stream coming from the server has
+/// messages of type \a R.
+template <class W, class R>
+class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
+ public:
+ /// Block waiting to read initial metadata from the server.
+ /// This call is optional, but if it is used, it cannot be used concurrently
+ /// with or after the \a Finish method.
+ ///
+ /// Once complete, the initial metadata read from the server will be
+ /// accessible through the \a ClientContext used to construct this object.
+ void WaitForInitialMetadata() override {
+ GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata>
+ ops;
+ ops.RecvInitialMetadata(context_);
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops); // status ignored
+ }
+
+ bool NextMessageSize(uint32_t* sz) override {
+ int result = call_.max_receive_message_size();
+ *sz = (result > 0) ? result : UINT32_MAX;
+ return true;
+ }
+
+ /// See the \a ReaderInterface.Read method for semantics.
+ /// Side effect:
+ /// Also receives initial metadata if not already received (updates the \a
+ /// ClientContext associated with this call in that case).
+ bool Read(R* msg) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpRecvMessage<R>>
+ ops;
+ if (!context_->initial_metadata_received_) {
+ ops.RecvInitialMetadata(context_);
+ }
+ ops.RecvMessage(msg);
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops) && ops.got_message;
+ }
+
+ /// See the \a WriterInterface.Write method for semantics.
+ ///
+ /// Side effect:
+ /// Also sends initial metadata if not already sent (using the
+ /// \a ClientContext associated with this call to fill in values).
+ using internal::WriterInterface<W>::Write;
+ bool Write(const W& msg, ::grpc::WriteOptions options) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+ ::grpc::internal::CallOpSendMessage,
+ ::grpc::internal::CallOpClientSendClose>
+ ops;
+
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ ops.ClientSendClose();
+ }
+ if (context_->initial_metadata_corked_) {
+ ops.SendInitialMetadata(&context_->send_initial_metadata_,
+ context_->initial_metadata_flags());
+ context_->set_initial_metadata_corked(false);
+ }
+ if (!ops.SendMessagePtr(&msg, options).ok()) {
+ return false;
+ }
+
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops);
+ }
+
+ bool WritesDone() override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops;
+ ops.ClientSendClose();
+ call_.PerformOps(&ops);
+ return cq_.Pluck(&ops);
+ }
+
+ /// See the ClientStreamingInterface.Finish method for semantics.
+ ///
+ /// Side effect:
+ /// - the \a ClientContext associated with this call is updated with
+ /// possible trailing metadata sent from the server.
+ ::grpc::Status Finish() override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata,
+ ::grpc::internal::CallOpClientRecvStatus>
+ ops;
+ if (!context_->initial_metadata_received_) {
+ ops.RecvInitialMetadata(context_);
+ }
+ ::grpc::Status status;
+ ops.ClientRecvStatus(context_, &status);
+ call_.PerformOps(&ops);
+ GPR_CODEGEN_ASSERT(cq_.Pluck(&ops));
+ return status;
+ }
+
+ private:
+ friend class internal::ClientReaderWriterFactory<W, R>;
+
+ ::grpc::ClientContext* context_;
+ ::grpc::CompletionQueue cq_;
+ ::grpc::internal::Call call_;
+
+ /// Block to create a stream and write the initial metadata and \a request
+ /// out. Note that \a context will be used to fill in custom initial metadata
+ /// used to send to the server when starting the call.
+ ClientReaderWriter(::grpc::ChannelInterface* channel,
+ const ::grpc::internal::RpcMethod& method,
+ ::grpc::ClientContext* context)
+ : context_(context),
+ cq_(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
+ nullptr}), // Pluckable cq
+ call_(channel->CreateCall(method, context, &cq_)) {
+ if (!context_->initial_metadata_corked_) {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ ops;
+ ops.SendInitialMetadata(&context->send_initial_metadata_,
+ context->initial_metadata_flags());
+ call_.PerformOps(&ops);
+ cq_.Pluck(&ops);
+ }
+ }
+};
+
+/// Server-side interface for streaming reads of message of type \a R.
template <class R>
-class ServerReaderInterface : public internal::ServerStreamingInterface,
- public internal::ReaderInterface<R> {};
+class ServerReaderInterface : public internal::ServerStreamingInterface,
+ public internal::ReaderInterface<R> {};
-/// Synchronous (blocking) server-side API for doing client-streaming RPCs,
-/// where the incoming message stream coming from the client has messages of
-/// type \a R.
+/// Synchronous (blocking) server-side API for doing client-streaming RPCs,
+/// where the incoming message stream coming from the client has messages of
+/// type \a R.
template <class R>
-class ServerReader final : public ServerReaderInterface<R> {
- public:
- /// See the \a ServerStreamingInterface.SendInitialMetadata method
- /// for semantics. Note that initial metadata will be affected by the
- /// \a ServerContext associated with this call.
- void SendInitialMetadata() override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- ops;
- ops.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_->PerformOps(&ops);
- call_->cq()->Pluck(&ops);
- }
-
- bool NextMessageSize(uint32_t* sz) override {
- int result = call_->max_receive_message_size();
- *sz = (result > 0) ? result : UINT32_MAX;
- return true;
- }
-
- bool Read(R* msg) override {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops;
- ops.RecvMessage(msg);
- call_->PerformOps(&ops);
- return call_->cq()->Pluck(&ops) && ops.got_message;
- }
-
- private:
- ::grpc::internal::Call* const call_;
- ServerContext* const ctx_;
-
- template <class ServiceType, class RequestType, class ResponseType>
- friend class internal::ClientStreamingHandler;
-
- ServerReader(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : call_(call), ctx_(ctx) {}
-};
-
-/// Server-side interface for streaming writes of message of type \a W.
+class ServerReader final : public ServerReaderInterface<R> {
+ public:
+ /// See the \a ServerStreamingInterface.SendInitialMetadata method
+ /// for semantics. Note that initial metadata will be affected by the
+ /// \a ServerContext associated with this call.
+ void SendInitialMetadata() override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ ops;
+ ops.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_->PerformOps(&ops);
+ call_->cq()->Pluck(&ops);
+ }
+
+ bool NextMessageSize(uint32_t* sz) override {
+ int result = call_->max_receive_message_size();
+ *sz = (result > 0) ? result : UINT32_MAX;
+ return true;
+ }
+
+ bool Read(R* msg) override {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops;
+ ops.RecvMessage(msg);
+ call_->PerformOps(&ops);
+ return call_->cq()->Pluck(&ops) && ops.got_message;
+ }
+
+ private:
+ ::grpc::internal::Call* const call_;
+ ServerContext* const ctx_;
+
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class internal::ClientStreamingHandler;
+
+ ServerReader(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : call_(call), ctx_(ctx) {}
+};
+
+/// Server-side interface for streaming writes of message of type \a W.
template <class W>
-class ServerWriterInterface : public internal::ServerStreamingInterface,
- public internal::WriterInterface<W> {};
+class ServerWriterInterface : public internal::ServerStreamingInterface,
+ public internal::WriterInterface<W> {};
-/// Synchronous (blocking) server-side API for doing for doing a
-/// server-streaming RPCs, where the outgoing message stream coming from the
-/// server has messages of type \a W.
+/// Synchronous (blocking) server-side API for doing for doing a
+/// server-streaming RPCs, where the outgoing message stream coming from the
+/// server has messages of type \a W.
template <class W>
-class ServerWriter final : public ServerWriterInterface<W> {
- public:
- /// See the \a ServerStreamingInterface.SendInitialMetadata method
- /// for semantics.
- /// Note that initial metadata will be affected by the
- /// \a ServerContext associated with this call.
- void SendInitialMetadata() override {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
- ops;
- ops.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_->PerformOps(&ops);
- call_->cq()->Pluck(&ops);
- }
-
- /// See the \a WriterInterface.Write method for semantics.
- ///
- /// Side effect:
- /// Also sends initial metadata if not already sent (using the
- /// \a ClientContext associated with this call to fill in values).
- using internal::WriterInterface<W>::Write;
- bool Write(const W& msg, ::grpc::WriteOptions options) override {
- if (options.is_last_message()) {
- options.set_buffer_hint();
- }
-
- if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) {
- return false;
- }
- if (!ctx_->sent_initial_metadata_) {
- ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- call_->PerformOps(&ctx_->pending_ops_);
- // if this is the last message we defer the pluck until AFTER we start
- // the trailing md op. This prevents hangs. See
- // https://github.com/grpc/grpc/issues/11546
- if (options.is_last_message()) {
- ctx_->has_pending_ops_ = true;
- return true;
- }
- ctx_->has_pending_ops_ = false;
- return call_->cq()->Pluck(&ctx_->pending_ops_);
- }
-
- private:
- ::grpc::internal::Call* const call_;
- ::grpc::ServerContext* const ctx_;
-
- template <class ServiceType, class RequestType, class ResponseType>
- friend class internal::ServerStreamingHandler;
-
- ServerWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : call_(call), ctx_(ctx) {}
-};
-
-/// Server-side interface for bi-directional streaming.
-template <class W, class R>
-class ServerReaderWriterInterface : public internal::ServerStreamingInterface,
- public internal::WriterInterface<W>,
- public internal::ReaderInterface<R> {};
-
-/// Actual implementation of bi-directional streaming
-namespace internal {
+class ServerWriter final : public ServerWriterInterface<W> {
+ public:
+ /// See the \a ServerStreamingInterface.SendInitialMetadata method
+ /// for semantics.
+ /// Note that initial metadata will be affected by the
+ /// \a ServerContext associated with this call.
+ void SendInitialMetadata() override {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+ ops;
+ ops.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_->PerformOps(&ops);
+ call_->cq()->Pluck(&ops);
+ }
+
+ /// See the \a WriterInterface.Write method for semantics.
+ ///
+ /// Side effect:
+ /// Also sends initial metadata if not already sent (using the
+ /// \a ClientContext associated with this call to fill in values).
+ using internal::WriterInterface<W>::Write;
+ bool Write(const W& msg, ::grpc::WriteOptions options) override {
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ }
+
+ if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) {
+ return false;
+ }
+ if (!ctx_->sent_initial_metadata_) {
+ ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ call_->PerformOps(&ctx_->pending_ops_);
+ // if this is the last message we defer the pluck until AFTER we start
+ // the trailing md op. This prevents hangs. See
+ // https://github.com/grpc/grpc/issues/11546
+ if (options.is_last_message()) {
+ ctx_->has_pending_ops_ = true;
+ return true;
+ }
+ ctx_->has_pending_ops_ = false;
+ return call_->cq()->Pluck(&ctx_->pending_ops_);
+ }
+
+ private:
+ ::grpc::internal::Call* const call_;
+ ::grpc::ServerContext* const ctx_;
+
+ template <class ServiceType, class RequestType, class ResponseType>
+ friend class internal::ServerStreamingHandler;
+
+ ServerWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : call_(call), ctx_(ctx) {}
+};
+
+/// Server-side interface for bi-directional streaming.
template <class W, class R>
-class ServerReaderWriterBody final {
- public:
- ServerReaderWriterBody(grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : call_(call), ctx_(ctx) {}
-
- void SendInitialMetadata() {
- GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+class ServerReaderWriterInterface : public internal::ServerStreamingInterface,
+ public internal::WriterInterface<W>,
+ public internal::ReaderInterface<R> {};
- grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops;
- ops.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ops.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- call_->PerformOps(&ops);
- call_->cq()->Pluck(&ops);
- }
-
- bool NextMessageSize(uint32_t* sz) {
- int result = call_->max_receive_message_size();
- *sz = (result > 0) ? result : UINT32_MAX;
- return true;
- }
-
- bool Read(R* msg) {
- ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops;
- ops.RecvMessage(msg);
- call_->PerformOps(&ops);
- return call_->cq()->Pluck(&ops) && ops.got_message;
- }
-
- bool Write(const W& msg, ::grpc::WriteOptions options) {
- if (options.is_last_message()) {
- options.set_buffer_hint();
- }
- if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) {
- return false;
- }
- if (!ctx_->sent_initial_metadata_) {
- ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
- ctx_->initial_metadata_flags());
- if (ctx_->compression_level_set()) {
- ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
- }
- ctx_->sent_initial_metadata_ = true;
- }
- call_->PerformOps(&ctx_->pending_ops_);
- // if this is the last message we defer the pluck until AFTER we start
- // the trailing md op. This prevents hangs. See
- // https://github.com/grpc/grpc/issues/11546
- if (options.is_last_message()) {
- ctx_->has_pending_ops_ = true;
- return true;
- }
- ctx_->has_pending_ops_ = false;
- return call_->cq()->Pluck(&ctx_->pending_ops_);
- }
-
- private:
- grpc::internal::Call* const call_;
- ::grpc::ServerContext* const ctx_;
-};
-
-} // namespace internal
-
-/// Synchronous (blocking) server-side API for a bidirectional
-/// streaming call, where the incoming message stream coming from the client has
-/// messages of type \a R, and the outgoing message streaming coming from
-/// the server has messages of type \a W.
+/// Actual implementation of bi-directional streaming
+namespace internal {
template <class W, class R>
-class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
- public:
- /// See the \a ServerStreamingInterface.SendInitialMetadata method
- /// for semantics. Note that initial metadata will be affected by the
- /// \a ServerContext associated with this call.
- void SendInitialMetadata() override { body_.SendInitialMetadata(); }
-
- bool NextMessageSize(uint32_t* sz) override {
- return body_.NextMessageSize(sz);
- }
-
- bool Read(R* msg) override { return body_.Read(msg); }
-
- /// See the \a WriterInterface.Write(const W& msg, WriteOptions options)
- /// method for semantics.
- /// Side effect:
- /// Also sends initial metadata if not already sent (using the \a
- /// ServerContext associated with this call).
- using internal::WriterInterface<W>::Write;
- bool Write(const W& msg, ::grpc::WriteOptions options) override {
- return body_.Write(msg, options);
- }
-
- private:
- internal::ServerReaderWriterBody<W, R> body_;
-
- friend class internal::TemplatedBidiStreamingHandler<ServerReaderWriter<W, R>,
- false>;
- ServerReaderWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : body_(call, ctx) {}
-};
-
-/// A class to represent a flow-controlled unary call. This is something
-/// of a hybrid between conventional unary and streaming. This is invoked
-/// through a unary call on the client side, but the server responds to it
-/// as though it were a single-ping-pong streaming call. The server can use
-/// the \a NextMessageSize method to determine an upper-bound on the size of
-/// the message. A key difference relative to streaming: ServerUnaryStreamer
-/// must have exactly 1 Read and exactly 1 Write, in that order, to function
-/// correctly. Otherwise, the RPC is in error.
+class ServerReaderWriterBody final {
+ public:
+ ServerReaderWriterBody(grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : call_(call), ctx_(ctx) {}
+
+ void SendInitialMetadata() {
+ GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+
+ grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops;
+ ops.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ops.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ call_->PerformOps(&ops);
+ call_->cq()->Pluck(&ops);
+ }
+
+ bool NextMessageSize(uint32_t* sz) {
+ int result = call_->max_receive_message_size();
+ *sz = (result > 0) ? result : UINT32_MAX;
+ return true;
+ }
+
+ bool Read(R* msg) {
+ ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops;
+ ops.RecvMessage(msg);
+ call_->PerformOps(&ops);
+ return call_->cq()->Pluck(&ops) && ops.got_message;
+ }
+
+ bool Write(const W& msg, ::grpc::WriteOptions options) {
+ if (options.is_last_message()) {
+ options.set_buffer_hint();
+ }
+ if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) {
+ return false;
+ }
+ if (!ctx_->sent_initial_metadata_) {
+ ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+ ctx_->initial_metadata_flags());
+ if (ctx_->compression_level_set()) {
+ ctx_->pending_ops_.set_compression_level(ctx_->compression_level());
+ }
+ ctx_->sent_initial_metadata_ = true;
+ }
+ call_->PerformOps(&ctx_->pending_ops_);
+ // if this is the last message we defer the pluck until AFTER we start
+ // the trailing md op. This prevents hangs. See
+ // https://github.com/grpc/grpc/issues/11546
+ if (options.is_last_message()) {
+ ctx_->has_pending_ops_ = true;
+ return true;
+ }
+ ctx_->has_pending_ops_ = false;
+ return call_->cq()->Pluck(&ctx_->pending_ops_);
+ }
+
+ private:
+ grpc::internal::Call* const call_;
+ ::grpc::ServerContext* const ctx_;
+};
+
+} // namespace internal
+
+/// Synchronous (blocking) server-side API for a bidirectional
+/// streaming call, where the incoming message stream coming from the client has
+/// messages of type \a R, and the outgoing message streaming coming from
+/// the server has messages of type \a W.
+template <class W, class R>
+class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
+ public:
+ /// See the \a ServerStreamingInterface.SendInitialMetadata method
+ /// for semantics. Note that initial metadata will be affected by the
+ /// \a ServerContext associated with this call.
+ void SendInitialMetadata() override { body_.SendInitialMetadata(); }
+
+ bool NextMessageSize(uint32_t* sz) override {
+ return body_.NextMessageSize(sz);
+ }
+
+ bool Read(R* msg) override { return body_.Read(msg); }
+
+ /// See the \a WriterInterface.Write(const W& msg, WriteOptions options)
+ /// method for semantics.
+ /// Side effect:
+ /// Also sends initial metadata if not already sent (using the \a
+ /// ServerContext associated with this call).
+ using internal::WriterInterface<W>::Write;
+ bool Write(const W& msg, ::grpc::WriteOptions options) override {
+ return body_.Write(msg, options);
+ }
+
+ private:
+ internal::ServerReaderWriterBody<W, R> body_;
+
+ friend class internal::TemplatedBidiStreamingHandler<ServerReaderWriter<W, R>,
+ false>;
+ ServerReaderWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : body_(call, ctx) {}
+};
+
+/// A class to represent a flow-controlled unary call. This is something
+/// of a hybrid between conventional unary and streaming. This is invoked
+/// through a unary call on the client side, but the server responds to it
+/// as though it were a single-ping-pong streaming call. The server can use
+/// the \a NextMessageSize method to determine an upper-bound on the size of
+/// the message. A key difference relative to streaming: ServerUnaryStreamer
+/// must have exactly 1 Read and exactly 1 Write, in that order, to function
+/// correctly. Otherwise, the RPC is in error.
template <class RequestType, class ResponseType>
-class ServerUnaryStreamer final
- : public ServerReaderWriterInterface<ResponseType, RequestType> {
- public:
- /// Block to send initial metadata to client.
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call will be used for
- /// sending initial metadata.
- void SendInitialMetadata() override { body_.SendInitialMetadata(); }
-
- /// Get an upper bound on the request message size from the client.
- bool NextMessageSize(uint32_t* sz) override {
- return body_.NextMessageSize(sz);
- }
-
- /// Read a message of type \a R into \a msg. Completion will be notified by \a
- /// tag on the associated completion queue.
- /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
- /// should not be called concurrently with other streaming APIs
- /// on the same stream. It is not meaningful to call it concurrently
- /// with another \a ReaderInterface::Read on the same stream since reads on
- /// the same stream are delivered in order.
- ///
- /// \param[out] msg Where to eventually store the read message.
- /// \param[in] tag The tag identifying the operation.
- bool Read(RequestType* request) override {
- if (read_done_) {
- return false;
- }
- read_done_ = true;
- return body_.Read(request);
- }
-
- /// Block to write \a msg to the stream with WriteOptions \a options.
- /// This is thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \param msg The message to be written to the stream.
- /// \param options The WriteOptions affecting the write operation.
- ///
- /// \return \a true on success, \a false when the stream has been closed.
- using internal::WriterInterface<ResponseType>::Write;
- bool Write(const ResponseType& response,
- ::grpc::WriteOptions options) override {
- if (write_done_ || !read_done_) {
- return false;
- }
- write_done_ = true;
- return body_.Write(response, options);
- }
-
- private:
- internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
- bool read_done_;
- bool write_done_;
-
- friend class internal::TemplatedBidiStreamingHandler<
- ServerUnaryStreamer<RequestType, ResponseType>, true>;
- ServerUnaryStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : body_(call, ctx), read_done_(false), write_done_(false) {}
-};
-
-/// A class to represent a flow-controlled server-side streaming call.
-/// This is something of a hybrid between server-side and bidi streaming.
-/// This is invoked through a server-side streaming call on the client side,
-/// but the server responds to it as though it were a bidi streaming call that
-/// must first have exactly 1 Read and then any number of Writes.
+class ServerUnaryStreamer final
+ : public ServerReaderWriterInterface<ResponseType, RequestType> {
+ public:
+ /// Block to send initial metadata to client.
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call will be used for
+ /// sending initial metadata.
+ void SendInitialMetadata() override { body_.SendInitialMetadata(); }
+
+ /// Get an upper bound on the request message size from the client.
+ bool NextMessageSize(uint32_t* sz) override {
+ return body_.NextMessageSize(sz);
+ }
+
+ /// Read a message of type \a R into \a msg. Completion will be notified by \a
+ /// tag on the associated completion queue.
+ /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
+ /// should not be called concurrently with other streaming APIs
+ /// on the same stream. It is not meaningful to call it concurrently
+ /// with another \a ReaderInterface::Read on the same stream since reads on
+ /// the same stream are delivered in order.
+ ///
+ /// \param[out] msg Where to eventually store the read message.
+ /// \param[in] tag The tag identifying the operation.
+ bool Read(RequestType* request) override {
+ if (read_done_) {
+ return false;
+ }
+ read_done_ = true;
+ return body_.Read(request);
+ }
+
+ /// Block to write \a msg to the stream with WriteOptions \a options.
+ /// This is thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \param msg The message to be written to the stream.
+ /// \param options The WriteOptions affecting the write operation.
+ ///
+ /// \return \a true on success, \a false when the stream has been closed.
+ using internal::WriterInterface<ResponseType>::Write;
+ bool Write(const ResponseType& response,
+ ::grpc::WriteOptions options) override {
+ if (write_done_ || !read_done_) {
+ return false;
+ }
+ write_done_ = true;
+ return body_.Write(response, options);
+ }
+
+ private:
+ internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
+ bool read_done_;
+ bool write_done_;
+
+ friend class internal::TemplatedBidiStreamingHandler<
+ ServerUnaryStreamer<RequestType, ResponseType>, true>;
+ ServerUnaryStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : body_(call, ctx), read_done_(false), write_done_(false) {}
+};
+
+/// A class to represent a flow-controlled server-side streaming call.
+/// This is something of a hybrid between server-side and bidi streaming.
+/// This is invoked through a server-side streaming call on the client side,
+/// but the server responds to it as though it were a bidi streaming call that
+/// must first have exactly 1 Read and then any number of Writes.
template <class RequestType, class ResponseType>
-class ServerSplitStreamer final
- : public ServerReaderWriterInterface<ResponseType, RequestType> {
- public:
- /// Block to send initial metadata to client.
- /// Implicit input parameter:
- /// - the \a ServerContext associated with this call will be used for
- /// sending initial metadata.
- void SendInitialMetadata() override { body_.SendInitialMetadata(); }
-
- /// Get an upper bound on the request message size from the client.
- bool NextMessageSize(uint32_t* sz) override {
- return body_.NextMessageSize(sz);
- }
-
- /// Read a message of type \a R into \a msg. Completion will be notified by \a
- /// tag on the associated completion queue.
- /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
- /// should not be called concurrently with other streaming APIs
- /// on the same stream. It is not meaningful to call it concurrently
- /// with another \a ReaderInterface::Read on the same stream since reads on
- /// the same stream are delivered in order.
- ///
- /// \param[out] msg Where to eventually store the read message.
- /// \param[in] tag The tag identifying the operation.
- bool Read(RequestType* request) override {
- if (read_done_) {
- return false;
- }
- read_done_ = true;
- return body_.Read(request);
- }
-
- /// Block to write \a msg to the stream with WriteOptions \a options.
- /// This is thread-safe with respect to \a ReaderInterface::Read
- ///
- /// \param msg The message to be written to the stream.
- /// \param options The WriteOptions affecting the write operation.
- ///
- /// \return \a true on success, \a false when the stream has been closed.
- using internal::WriterInterface<ResponseType>::Write;
- bool Write(const ResponseType& response,
- ::grpc::WriteOptions options) override {
- return read_done_ && body_.Write(response, options);
- }
-
- private:
- internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
- bool read_done_;
-
- friend class internal::TemplatedBidiStreamingHandler<
- ServerSplitStreamer<RequestType, ResponseType>, false>;
- ServerSplitStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
- : body_(call, ctx), read_done_(false) {}
-};
-
+class ServerSplitStreamer final
+ : public ServerReaderWriterInterface<ResponseType, RequestType> {
+ public:
+ /// Block to send initial metadata to client.
+ /// Implicit input parameter:
+ /// - the \a ServerContext associated with this call will be used for
+ /// sending initial metadata.
+ void SendInitialMetadata() override { body_.SendInitialMetadata(); }
+
+ /// Get an upper bound on the request message size from the client.
+ bool NextMessageSize(uint32_t* sz) override {
+ return body_.NextMessageSize(sz);
+ }
+
+ /// Read a message of type \a R into \a msg. Completion will be notified by \a
+ /// tag on the associated completion queue.
+ /// This is thread-safe with respect to \a Write or \a WritesDone methods. It
+ /// should not be called concurrently with other streaming APIs
+ /// on the same stream. It is not meaningful to call it concurrently
+ /// with another \a ReaderInterface::Read on the same stream since reads on
+ /// the same stream are delivered in order.
+ ///
+ /// \param[out] msg Where to eventually store the read message.
+ /// \param[in] tag The tag identifying the operation.
+ bool Read(RequestType* request) override {
+ if (read_done_) {
+ return false;
+ }
+ read_done_ = true;
+ return body_.Read(request);
+ }
+
+ /// Block to write \a msg to the stream with WriteOptions \a options.
+ /// This is thread-safe with respect to \a ReaderInterface::Read
+ ///
+ /// \param msg The message to be written to the stream.
+ /// \param options The WriteOptions affecting the write operation.
+ ///
+ /// \return \a true on success, \a false when the stream has been closed.
+ using internal::WriterInterface<ResponseType>::Write;
+ bool Write(const ResponseType& response,
+ ::grpc::WriteOptions options) override {
+ return read_done_ && body_.Write(response, options);
+ }
+
+ private:
+ internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
+ bool read_done_;
+
+ friend class internal::TemplatedBidiStreamingHandler<
+ ServerSplitStreamer<RequestType, ResponseType>, false>;
+ ServerSplitStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx)
+ : body_(call, ctx), read_done_(false) {}
+};
+
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h
index 3a54db45bf..aa208d4f22 100644
--- a/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h
+++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h
@@ -43,12 +43,12 @@ namespace grpc {
template <typename T>
class TimePoint {
public:
- // If you see the error with methods below, you may need either
- // i) using the existing types having a conversion class such as
- // gpr_timespec and std::chrono::system_clock::time_point or
- // ii) writing a new TimePoint<YourType> to address your case.
- TimePoint(const T& /*time*/) = delete;
- gpr_timespec raw_time() = delete;
+ // If you see the error with methods below, you may need either
+ // i) using the existing types having a conversion class such as
+ // gpr_timespec and std::chrono::system_clock::time_point or
+ // ii) writing a new TimePoint<YourType> to address your case.
+ TimePoint(const T& /*time*/) = delete;
+ gpr_timespec raw_time() = delete;
};
template <>