diff options
| author | Devtools Arcadia <[email protected]> | 2022-02-07 18:08:42 +0300 | 
|---|---|---|
| committer | Devtools Arcadia <[email protected]> | 2022-02-07 18:08:42 +0300 | 
| commit | 1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch) | |
| tree | e26c9fed0de5d9873cce7e00bc214573dc2195b7 /contrib/libs/grpc/include/grpcpp/impl/codegen | |
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'contrib/libs/grpc/include/grpcpp/impl/codegen')
51 files changed, 13740 insertions, 0 deletions
diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md b/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md new file mode 100644 index 00000000000..ade9d054842 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/README.md @@ -0,0 +1,21 @@ +# Welcome to `include/grpcpp/impl/codegen` + +## Why is this directory here? + +This directory exists so that generated code can include selected files upon +which it depends without having to depend on the entire gRPC C++ library. This +is particularly relevant for users of bazel, particularly if they use the +multi-lingual `proto_library` target type. Generated code that uses this target +only depends on the gRPC C++ targets associated with these header files, not the +entire gRPC C++ codebase since that would make the build time of these types of +targets excessively large (particularly when they are not even C++ specific). + +## What should user code do? + +User code should *not* include anything from this directory. Only generated code +and gRPC library code should include contents from this directory. User code +should instead include contents from the main `grpcpp` directory or its +accessible subcomponents like `grpcpp/support`. It is possible that we may +remove this directory altogether if the motivations for its existence are no +longer strong enough (e.g., if most users migrate away from the `proto_library` +target type or if the additional overhead of depending on gRPC C++ is not high). diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h new file mode 100644 index 00000000000..a812b086a2a --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_generic_service.h @@ -0,0 +1,142 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H +#define GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H + +#include <grpc/impl/codegen/port_platform.h> + +#include <grpcpp/impl/codegen/async_stream.h> +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/server_callback.h> +#include <grpcpp/impl/codegen/server_callback_handlers.h> + +struct grpc_server; + +namespace grpc { + +typedef ServerAsyncReaderWriter<ByteBuffer, ByteBuffer> +    GenericServerAsyncReaderWriter; +typedef ServerAsyncResponseWriter<ByteBuffer> GenericServerAsyncResponseWriter; +typedef ServerAsyncReader<ByteBuffer, ByteBuffer> GenericServerAsyncReader; +typedef ServerAsyncWriter<ByteBuffer> GenericServerAsyncWriter; + +class GenericServerContext final : public ServerContext { + public: +  const TString& method() const { return method_; } +  const TString& host() const { return host_; } + + private: +  friend class ServerInterface; + +  TString method_; +  TString host_; +}; + +// A generic service at the server side accepts all RPC methods and hosts. It is +// typically used in proxies. The generic service can be registered to a server +// which also has other services. +// Sample usage: +//   ServerBuilder builder; +//   auto cq = builder.AddCompletionQueue(); +//   AsyncGenericService generic_service; +//   builder.RegisterAsyncGenericService(&generic_service); +//   auto server = builder.BuildAndStart(); +// +//   // request a new call +//   GenericServerContext context; +//   GenericServerAsyncReaderWriter stream; +//   generic_service.RequestCall(&context, &stream, cq.get(), cq.get(), tag); +// +// When tag is retrieved from cq->Next(), context.method() can be used to look +// at the method and the RPC can be handled accordingly. +class AsyncGenericService final { + public: +  AsyncGenericService() : server_(nullptr) {} + +  void RequestCall(GenericServerContext* ctx, +                   GenericServerAsyncReaderWriter* reader_writer, +                   ::grpc::CompletionQueue* call_cq, +                   ::grpc::ServerCompletionQueue* notification_cq, void* tag); + + private: +  friend class grpc::Server; +  grpc::Server* server_; +}; + +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +namespace experimental { +#endif + +/// \a ServerGenericBidiReactor is the reactor class for bidi streaming RPCs +/// invoked on a CallbackGenericService. It is just a ServerBidi reactor with +/// ByteBuffer arguments. +using ServerGenericBidiReactor = ServerBidiReactor<ByteBuffer, ByteBuffer>; + +class GenericCallbackServerContext final : public grpc::CallbackServerContext { + public: +  const TString& method() const { return method_; } +  const TString& host() const { return host_; } + + private: +  friend class ::grpc::Server; + +  TString method_; +  TString host_; +}; + +/// \a CallbackGenericService is the base class for generic services implemented +/// using the callback API and registered through the ServerBuilder using +/// RegisterCallbackGenericService. +class CallbackGenericService { + public: +  CallbackGenericService() {} +  virtual ~CallbackGenericService() {} + +  /// The "method handler" for the generic API. This function should be +  /// overridden to provide a ServerGenericBidiReactor that implements the +  /// application-level interface for this RPC. Unimplemented by default. +  virtual ServerGenericBidiReactor* CreateReactor( +      GenericCallbackServerContext* /*ctx*/) { +    class Reactor : public ServerGenericBidiReactor { +     public: +      Reactor() { this->Finish(Status(StatusCode::UNIMPLEMENTED, "")); } +      void OnDone() override { delete this; } +    }; +    return new Reactor; +  } + + private: +  friend class grpc::Server; + +  internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>* Handler() { +    return new internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>( +        [this](::grpc::CallbackServerContext* ctx) { +          return CreateReactor(static_cast<GenericCallbackServerContext*>(ctx)); +        }); +  } + +  grpc::Server* server_{nullptr}; +}; + +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +}  // namespace experimental +#endif +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h new file mode 100644 index 00000000000..aaee93df933 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_stream.h @@ -0,0 +1,1131 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H +#define GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H + +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/channel_interface.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/server_context.h> +#include <grpcpp/impl/codegen/service_type.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { + +namespace internal { +/// Common interface for all client side asynchronous streaming. +class ClientAsyncStreamingInterface { + public: +  virtual ~ClientAsyncStreamingInterface() {} + +  /// Start the call that was set up by the constructor, but only if the +  /// constructor was invoked through the "Prepare" API which doesn't actually +  /// start the call +  virtual void StartCall(void* tag) = 0; + +  /// Request notification of the reading of the initial metadata. Completion +  /// will be notified by \a tag on the associated completion queue. +  /// This call is optional, but if it is used, it cannot be used concurrently +  /// with or after the \a AsyncReaderInterface::Read method. +  /// +  /// \param[in] tag Tag identifying this request. +  virtual void ReadInitialMetadata(void* tag) = 0; + +  /// Indicate that the stream is to be finished and request notification for +  /// when the call has been ended. +  /// Should not be used concurrently with other operations. +  /// +  /// It is appropriate to call this method exactly once when both: +  ///   * the client side has no more message to send +  ///     (this can be declared implicitly by calling this method, or +  ///     explicitly through an earlier call to the <i>WritesDone</i> method +  ///     of the class in use, e.g. \a ClientAsyncWriterInterface::WritesDone or +  ///     \a ClientAsyncReaderWriterInterface::WritesDone). +  ///   * there are no more messages to be received from the server (this can +  ///     be known implicitly by the calling code, or explicitly from an +  ///     earlier call to \a AsyncReaderInterface::Read that yielded a failed +  ///     result, e.g. cq->Next(&read_tag, &ok) filled in 'ok' with 'false'). +  /// +  /// The tag will be returned when either: +  /// - all incoming messages have been read and the server has returned +  ///   a status. +  /// - the server has returned a non-OK status. +  /// - the call failed for some reason and the library generated a +  ///   status. +  /// +  /// Note that implementations of this method attempt to receive initial +  /// metadata from the server if initial metadata hasn't yet been received. +  /// +  /// \param[in] tag Tag identifying this request. +  /// \param[out] status To be updated with the operation status. +  virtual void Finish(::grpc::Status* status, void* tag) = 0; +}; + +/// An interface that yields a sequence of messages of type \a R. +template <class R> +class AsyncReaderInterface { + public: +  virtual ~AsyncReaderInterface() {} + +  /// Read a message of type \a R into \a msg. Completion will be notified by \a +  /// tag on the associated completion queue. +  /// This is thread-safe with respect to \a Write or \a WritesDone methods. It +  /// should not be called concurrently with other streaming APIs +  /// on the same stream. It is not meaningful to call it concurrently +  /// with another \a AsyncReaderInterface::Read on the same stream since reads +  /// on the same stream are delivered in order. +  /// +  /// \param[out] msg Where to eventually store the read message. +  /// \param[in] tag The tag identifying the operation. +  /// +  /// Side effect: note that this method attempt to receive initial metadata for +  /// a stream if it hasn't yet been received. +  virtual void Read(R* msg, void* tag) = 0; +}; + +/// An interface that can be fed a sequence of messages of type \a W. +template <class W> +class AsyncWriterInterface { + public: +  virtual ~AsyncWriterInterface() {} + +  /// Request the writing of \a msg with identifying tag \a tag. +  /// +  /// Only one write may be outstanding at any given time. This means that +  /// after calling Write, one must wait to receive \a tag from the completion +  /// queue BEFORE calling Write again. +  /// This is thread-safe with respect to \a AsyncReaderInterface::Read +  /// +  /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to +  /// to deallocate once Write returns. +  /// +  /// \param[in] msg The message to be written. +  /// \param[in] tag The tag identifying the operation. +  virtual void Write(const W& msg, void* tag) = 0; + +  /// Request the writing of \a msg using WriteOptions \a options with +  /// identifying tag \a tag. +  /// +  /// Only one write may be outstanding at any given time. This means that +  /// after calling Write, one must wait to receive \a tag from the completion +  /// queue BEFORE calling Write again. +  /// WriteOptions \a options is used to set the write options of this message. +  /// This is thread-safe with respect to \a AsyncReaderInterface::Read +  /// +  /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to +  /// to deallocate once Write returns. +  /// +  /// \param[in] msg The message to be written. +  /// \param[in] options The WriteOptions to be used to write this message. +  /// \param[in] tag The tag identifying the operation. +  virtual void Write(const W& msg, ::grpc::WriteOptions options, void* tag) = 0; + +  /// Request the writing of \a msg and coalesce it with the writing +  /// of trailing metadata, using WriteOptions \a options with +  /// identifying tag \a tag. +  /// +  /// For client, WriteLast is equivalent of performing Write and +  /// WritesDone in a single step. +  /// For server, WriteLast buffers the \a msg. The writing of \a msg is held +  /// until Finish is called, where \a msg and trailing metadata are coalesced +  /// and write is initiated. Note that WriteLast can only buffer \a msg up to +  /// the flow control window size. If \a msg size is larger than the window +  /// size, it will be sent on wire without buffering. +  /// +  /// gRPC doesn't take ownership or a reference to \a msg, so it is safe to +  /// to deallocate once Write returns. +  /// +  /// \param[in] msg The message to be written. +  /// \param[in] options The WriteOptions to be used to write this message. +  /// \param[in] tag The tag identifying the operation. +  void WriteLast(const W& msg, ::grpc::WriteOptions options, void* tag) { +    Write(msg, options.set_last_message(), tag); +  } +}; + +}  // namespace internal + +template <class R> +class ClientAsyncReaderInterface +    : public internal::ClientAsyncStreamingInterface, +      public internal::AsyncReaderInterface<R> {}; + +namespace internal { +template <class R> +class ClientAsyncReaderFactory { + public: +  /// Create a stream object. +  /// Write the first request out if \a start is set. +  /// \a tag will be notified on \a cq when the call has been started and +  /// \a request has been written out. If \a start is not set, \a tag must be +  /// nullptr and the actual call must be initiated by StartCall +  /// Note that \a context will be used to fill in custom initial metadata +  /// used to send to the server when starting the call. +  template <class W> +  static ClientAsyncReader<R>* Create(::grpc::ChannelInterface* channel, +                                      ::grpc::CompletionQueue* cq, +                                      const ::grpc::internal::RpcMethod& method, +                                      ::grpc::ClientContext* context, +                                      const W& request, bool start, void* tag) { +    ::grpc::internal::Call call = channel->CreateCall(method, context, cq); +    return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        call.call(), sizeof(ClientAsyncReader<R>))) +        ClientAsyncReader<R>(call, context, request, start, tag); +  } +}; +}  // namespace internal + +/// Async client-side API for doing server-streaming RPCs, +/// where the incoming message stream coming from the server has +/// messages of type \a R. +template <class R> +class ClientAsyncReader final : public ClientAsyncReaderInterface<R> { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReader)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  void StartCall(void* tag) override { +    GPR_CODEGEN_ASSERT(!started_); +    started_ = true; +    StartCallInternal(tag); +  } + +  /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata +  /// method for semantics. +  /// +  /// Side effect: +  ///   - upon receiving initial metadata from the server, +  ///     the \a ClientContext associated with this call is updated, and the +  ///     calling code can access the received metadata through the +  ///     \a ClientContext. +  void ReadInitialMetadata(void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); + +    meta_ops_.set_output_tag(tag); +    meta_ops_.RecvInitialMetadata(context_); +    call_.PerformOps(&meta_ops_); +  } + +  void Read(R* msg, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    read_ops_.set_output_tag(tag); +    if (!context_->initial_metadata_received_) { +      read_ops_.RecvInitialMetadata(context_); +    } +    read_ops_.RecvMessage(msg); +    call_.PerformOps(&read_ops_); +  } + +  /// See the \a ClientAsyncStreamingInterface.Finish method for semantics. +  /// +  /// Side effect: +  ///   - the \a ClientContext associated with this call is updated with +  ///     possible initial and trailing metadata received from the server. +  void Finish(::grpc::Status* status, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    finish_ops_.set_output_tag(tag); +    if (!context_->initial_metadata_received_) { +      finish_ops_.RecvInitialMetadata(context_); +    } +    finish_ops_.ClientRecvStatus(context_, status); +    call_.PerformOps(&finish_ops_); +  } + + private: +  friend class internal::ClientAsyncReaderFactory<R>; +  template <class W> +  ClientAsyncReader(::grpc::internal::Call call, ::grpc::ClientContext* context, +                    const W& request, bool start, void* tag) +      : context_(context), call_(call), started_(start) { +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok()); +    init_ops_.ClientSendClose(); +    if (start) { +      StartCallInternal(tag); +    } else { +      GPR_CODEGEN_ASSERT(tag == nullptr); +    } +  } + +  void StartCallInternal(void* tag) { +    init_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                  context_->initial_metadata_flags()); +    init_ops_.set_output_tag(tag); +    call_.PerformOps(&init_ops_); +  } + +  ::grpc::ClientContext* context_; +  ::grpc::internal::Call call_; +  bool started_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage, +                              ::grpc::internal::CallOpClientSendClose> +      init_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata> +      meta_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                              ::grpc::internal::CallOpRecvMessage<R>> +      read_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                              ::grpc::internal::CallOpClientRecvStatus> +      finish_ops_; +}; + +/// Common interface for client side asynchronous writing. +template <class W> +class ClientAsyncWriterInterface +    : public internal::ClientAsyncStreamingInterface, +      public internal::AsyncWriterInterface<W> { + public: +  /// Signal the client is done with the writes (half-close the client stream). +  /// Thread-safe with respect to \a AsyncReaderInterface::Read +  /// +  /// \param[in] tag The tag identifying the operation. +  virtual void WritesDone(void* tag) = 0; +}; + +namespace internal { +template <class W> +class ClientAsyncWriterFactory { + public: +  /// Create a stream object. +  /// Start the RPC if \a start is set +  /// \a tag will be notified on \a cq when the call has been started (i.e. +  /// intitial metadata sent) and \a request has been written out. +  /// If \a start is not set, \a tag must be nullptr and the actual call +  /// must be initiated by StartCall +  /// Note that \a context will be used to fill in custom initial metadata +  /// used to send to the server when starting the call. +  /// \a response will be filled in with the single expected response +  /// message from the server upon a successful call to the \a Finish +  /// method of this instance. +  template <class R> +  static ClientAsyncWriter<W>* Create(::grpc::ChannelInterface* channel, +                                      ::grpc::CompletionQueue* cq, +                                      const ::grpc::internal::RpcMethod& method, +                                      ::grpc::ClientContext* context, +                                      R* response, bool start, void* tag) { +    ::grpc::internal::Call call = channel->CreateCall(method, context, cq); +    return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        call.call(), sizeof(ClientAsyncWriter<W>))) +        ClientAsyncWriter<W>(call, context, response, start, tag); +  } +}; +}  // namespace internal + +/// Async API on the client side for doing client-streaming RPCs, +/// where the outgoing message stream going to the server contains +/// messages of type \a W. +template <class W> +class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncWriter)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  void StartCall(void* tag) override { +    GPR_CODEGEN_ASSERT(!started_); +    started_ = true; +    StartCallInternal(tag); +  } + +  /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method for +  /// semantics. +  /// +  /// Side effect: +  ///   - upon receiving initial metadata from the server, the \a ClientContext +  ///     associated with this call is updated, and the calling code can access +  ///     the received metadata through the \a ClientContext. +  void ReadInitialMetadata(void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); + +    meta_ops_.set_output_tag(tag); +    meta_ops_.RecvInitialMetadata(context_); +    call_.PerformOps(&meta_ops_); +  } + +  void Write(const W& msg, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    write_ops_.set_output_tag(tag); +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); +    call_.PerformOps(&write_ops_); +  } + +  void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    write_ops_.set_output_tag(tag); +    if (options.is_last_message()) { +      options.set_buffer_hint(); +      write_ops_.ClientSendClose(); +    } +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok()); +    call_.PerformOps(&write_ops_); +  } + +  void WritesDone(void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    write_ops_.set_output_tag(tag); +    write_ops_.ClientSendClose(); +    call_.PerformOps(&write_ops_); +  } + +  /// See the \a ClientAsyncStreamingInterface.Finish method for semantics. +  /// +  /// Side effect: +  ///   - the \a ClientContext associated with this call is updated with +  ///     possible initial and trailing metadata received from the server. +  ///   - attempts to fill in the \a response parameter passed to this class's +  ///     constructor with the server's response message. +  void Finish(::grpc::Status* status, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    finish_ops_.set_output_tag(tag); +    if (!context_->initial_metadata_received_) { +      finish_ops_.RecvInitialMetadata(context_); +    } +    finish_ops_.ClientRecvStatus(context_, status); +    call_.PerformOps(&finish_ops_); +  } + + private: +  friend class internal::ClientAsyncWriterFactory<W>; +  template <class R> +  ClientAsyncWriter(::grpc::internal::Call call, ::grpc::ClientContext* context, +                    R* response, bool start, void* tag) +      : context_(context), call_(call), started_(start) { +    finish_ops_.RecvMessage(response); +    finish_ops_.AllowNoMessage(); +    if (start) { +      StartCallInternal(tag); +    } else { +      GPR_CODEGEN_ASSERT(tag == nullptr); +    } +  } + +  void StartCallInternal(void* tag) { +    write_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                   context_->initial_metadata_flags()); +    // if corked bit is set in context, we just keep the initial metadata +    // buffered up to coalesce with later message send. No op is performed. +    if (!context_->initial_metadata_corked_) { +      write_ops_.set_output_tag(tag); +      call_.PerformOps(&write_ops_); +    } +  } + +  ::grpc::ClientContext* context_; +  ::grpc::internal::Call call_; +  bool started_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata> +      meta_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage, +                              ::grpc::internal::CallOpClientSendClose> +      write_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                              ::grpc::internal::CallOpGenericRecvMessage, +                              ::grpc::internal::CallOpClientRecvStatus> +      finish_ops_; +}; + +/// Async client-side interface for bi-directional streaming, +/// where the client-to-server message stream has messages of type \a W, +/// and the server-to-client message stream has messages of type \a R. +template <class W, class R> +class ClientAsyncReaderWriterInterface +    : public internal::ClientAsyncStreamingInterface, +      public internal::AsyncWriterInterface<W>, +      public internal::AsyncReaderInterface<R> { + public: +  /// Signal the client is done with the writes (half-close the client stream). +  /// Thread-safe with respect to \a AsyncReaderInterface::Read +  /// +  /// \param[in] tag The tag identifying the operation. +  virtual void WritesDone(void* tag) = 0; +}; + +namespace internal { +template <class W, class R> +class ClientAsyncReaderWriterFactory { + public: +  /// Create a stream object. +  /// Start the RPC request if \a start is set. +  /// \a tag will be notified on \a cq when the call has been started (i.e. +  /// intitial metadata sent). If \a start is not set, \a tag must be +  /// nullptr and the actual call must be initiated by StartCall +  /// Note that \a context will be used to fill in custom initial metadata +  /// used to send to the server when starting the call. +  static ClientAsyncReaderWriter<W, R>* Create( +      ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq, +      const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context, +      bool start, void* tag) { +    ::grpc::internal::Call call = channel->CreateCall(method, context, cq); + +    return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        call.call(), sizeof(ClientAsyncReaderWriter<W, R>))) +        ClientAsyncReaderWriter<W, R>(call, context, start, tag); +  } +}; +}  // namespace internal + +/// Async client-side interface for bi-directional streaming, +/// where the outgoing message stream going to the server +/// has messages of type \a W,  and the incoming message stream coming +/// from the server has messages of type \a R. +template <class W, class R> +class ClientAsyncReaderWriter final +    : public ClientAsyncReaderWriterInterface<W, R> { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncReaderWriter)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  void StartCall(void* tag) override { +    GPR_CODEGEN_ASSERT(!started_); +    started_ = true; +    StartCallInternal(tag); +  } + +  /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method +  /// for semantics of this method. +  /// +  /// Side effect: +  ///   - upon receiving initial metadata from the server, the \a ClientContext +  ///     is updated with it, and then the receiving initial metadata can +  ///     be accessed through this \a ClientContext. +  void ReadInitialMetadata(void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); + +    meta_ops_.set_output_tag(tag); +    meta_ops_.RecvInitialMetadata(context_); +    call_.PerformOps(&meta_ops_); +  } + +  void Read(R* msg, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    read_ops_.set_output_tag(tag); +    if (!context_->initial_metadata_received_) { +      read_ops_.RecvInitialMetadata(context_); +    } +    read_ops_.RecvMessage(msg); +    call_.PerformOps(&read_ops_); +  } + +  void Write(const W& msg, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    write_ops_.set_output_tag(tag); +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); +    call_.PerformOps(&write_ops_); +  } + +  void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    write_ops_.set_output_tag(tag); +    if (options.is_last_message()) { +      options.set_buffer_hint(); +      write_ops_.ClientSendClose(); +    } +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok()); +    call_.PerformOps(&write_ops_); +  } + +  void WritesDone(void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    write_ops_.set_output_tag(tag); +    write_ops_.ClientSendClose(); +    call_.PerformOps(&write_ops_); +  } + +  /// See the \a ClientAsyncStreamingInterface.Finish method for semantics. +  /// Side effect +  ///   - the \a ClientContext associated with this call is updated with +  ///     possible initial and trailing metadata sent from the server. +  void Finish(::grpc::Status* status, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    finish_ops_.set_output_tag(tag); +    if (!context_->initial_metadata_received_) { +      finish_ops_.RecvInitialMetadata(context_); +    } +    finish_ops_.ClientRecvStatus(context_, status); +    call_.PerformOps(&finish_ops_); +  } + + private: +  friend class internal::ClientAsyncReaderWriterFactory<W, R>; +  ClientAsyncReaderWriter(::grpc::internal::Call call, +                          ::grpc::ClientContext* context, bool start, void* tag) +      : context_(context), call_(call), started_(start) { +    if (start) { +      StartCallInternal(tag); +    } else { +      GPR_CODEGEN_ASSERT(tag == nullptr); +    } +  } + +  void StartCallInternal(void* tag) { +    write_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                   context_->initial_metadata_flags()); +    // if corked bit is set in context, we just keep the initial metadata +    // buffered up to coalesce with later message send. No op is performed. +    if (!context_->initial_metadata_corked_) { +      write_ops_.set_output_tag(tag); +      call_.PerformOps(&write_ops_); +    } +  } + +  ::grpc::ClientContext* context_; +  ::grpc::internal::Call call_; +  bool started_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata> +      meta_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                              ::grpc::internal::CallOpRecvMessage<R>> +      read_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage, +                              ::grpc::internal::CallOpClientSendClose> +      write_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                              ::grpc::internal::CallOpClientRecvStatus> +      finish_ops_; +}; + +template <class W, class R> +class ServerAsyncReaderInterface +    : public ::grpc::internal::ServerAsyncStreamingInterface, +      public internal::AsyncReaderInterface<R> { + public: +  /// Indicate that the stream is to be finished with a certain status code +  /// and also send out \a msg response to the client. +  /// Request notification for when the server has sent the response and the +  /// appropriate signals to the client to end the call. +  /// Should not be used concurrently with other operations. +  /// +  /// It is appropriate to call this method when: +  ///   * all messages from the client have been received (either known +  ///     implictly, or explicitly because a previous +  ///     \a AsyncReaderInterface::Read operation with a non-ok result, +  ///     e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false'). +  /// +  /// This operation will end when the server has finished sending out initial +  /// metadata (if not sent already), response message, and status, or if +  /// some failure occurred when trying to do so. +  /// +  /// gRPC doesn't take ownership or a reference to \a msg or \a status, so it +  /// is safe to deallocate once Finish returns. +  /// +  /// \param[in] tag Tag identifying this request. +  /// \param[in] status To be sent to the client as the result of this call. +  /// \param[in] msg To be sent to the client as the response for this call. +  virtual void Finish(const W& msg, const ::grpc::Status& status, +                      void* tag) = 0; + +  /// Indicate that the stream is to be finished with a certain +  /// non-OK status code. +  /// Request notification for when the server has sent the appropriate +  /// signals to the client to end the call. +  /// Should not be used concurrently with other operations. +  /// +  /// This call is meant to end the call with some error, and can be called at +  /// any point that the server would like to "fail" the call (though note +  /// this shouldn't be called concurrently with any other "sending" call, like +  /// \a AsyncWriterInterface::Write). +  /// +  /// This operation will end when the server has finished sending out initial +  /// metadata (if not sent already), and status, or if some failure occurred +  /// when trying to do so. +  /// +  /// gRPC doesn't take ownership or a reference to \a status, so it is safe to +  /// to deallocate once FinishWithError returns. +  /// +  /// \param[in] tag Tag identifying this request. +  /// \param[in] status To be sent to the client as the result of this call. +  ///     - Note: \a status must have a non-OK code. +  virtual void FinishWithError(const ::grpc::Status& status, void* tag) = 0; +}; + +/// Async server-side API for doing client-streaming RPCs, +/// where the incoming message stream from the client has messages of type \a R, +/// and the single response message sent from the server is type \a W. +template <class W, class R> +class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> { + public: +  explicit ServerAsyncReader(::grpc::ServerContext* ctx) +      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {} + +  /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics. +  /// +  /// Implicit input parameter: +  ///   - The initial metadata that will be sent to the client from this op will +  ///     be taken from the \a ServerContext associated with the call. +  void SendInitialMetadata(void* tag) override { +    GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); + +    meta_ops_.set_output_tag(tag); +    meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                  ctx_->initial_metadata_flags()); +    if (ctx_->compression_level_set()) { +      meta_ops_.set_compression_level(ctx_->compression_level()); +    } +    ctx_->sent_initial_metadata_ = true; +    call_.PerformOps(&meta_ops_); +  } + +  void Read(R* msg, void* tag) override { +    read_ops_.set_output_tag(tag); +    read_ops_.RecvMessage(msg); +    call_.PerformOps(&read_ops_); +  } + +  /// See the \a ServerAsyncReaderInterface.Read method for semantics +  /// +  /// Side effect: +  ///   - also sends initial metadata if not alreay sent. +  ///   - uses the \a ServerContext associated with this call to send possible +  ///     initial and trailing metadata. +  /// +  /// Note: \a msg is not sent if \a status has a non-OK code. +  /// +  /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it +  /// is safe to deallocate once Finish returns. +  void Finish(const W& msg, const ::grpc::Status& status, void* tag) override { +    finish_ops_.set_output_tag(tag); +    if (!ctx_->sent_initial_metadata_) { +      finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                      ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        finish_ops_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +    } +    // The response is dropped if the status is not OK. +    if (status.ok()) { +      finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, +                                   finish_ops_.SendMessage(msg)); +    } else { +      finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status); +    } +    call_.PerformOps(&finish_ops_); +  } + +  /// See the \a ServerAsyncReaderInterface.Read method for semantics +  /// +  /// Side effect: +  ///   - also sends initial metadata if not alreay sent. +  ///   - uses the \a ServerContext associated with this call to send possible +  ///     initial and trailing metadata. +  /// +  /// gRPC doesn't take ownership or a reference to \a status, so it is safe to +  /// to deallocate once FinishWithError returns. +  void FinishWithError(const ::grpc::Status& status, void* tag) override { +    GPR_CODEGEN_ASSERT(!status.ok()); +    finish_ops_.set_output_tag(tag); +    if (!ctx_->sent_initial_metadata_) { +      finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                      ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        finish_ops_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +    } +    finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status); +    call_.PerformOps(&finish_ops_); +  } + + private: +  void BindCall(::grpc::internal::Call* call) override { call_ = *call; } + +  ::grpc::internal::Call call_; +  ::grpc::ServerContext* ctx_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +      meta_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage, +                              ::grpc::internal::CallOpServerSendStatus> +      finish_ops_; +}; + +template <class W> +class ServerAsyncWriterInterface +    : public ::grpc::internal::ServerAsyncStreamingInterface, +      public internal::AsyncWriterInterface<W> { + public: +  /// Indicate that the stream is to be finished with a certain status code. +  /// Request notification for when the server has sent the appropriate +  /// signals to the client to end the call. +  /// Should not be used concurrently with other operations. +  /// +  /// It is appropriate to call this method when either: +  ///   * all messages from the client have been received (either known +  ///     implictly, or explicitly because a previous \a +  ///     AsyncReaderInterface::Read operation with a non-ok +  ///     result (e.g., cq->Next(&read_tag, &ok) filled in 'ok' with 'false'. +  ///   * it is desired to end the call early with some non-OK status code. +  /// +  /// This operation will end when the server has finished sending out initial +  /// metadata (if not sent already), response message, and status, or if +  /// some failure occurred when trying to do so. +  /// +  /// gRPC doesn't take ownership or a reference to \a status, so it is safe to +  /// to deallocate once Finish returns. +  /// +  /// \param[in] tag Tag identifying this request. +  /// \param[in] status To be sent to the client as the result of this call. +  virtual void Finish(const ::grpc::Status& status, void* tag) = 0; + +  /// Request the writing of \a msg and coalesce it with trailing metadata which +  /// contains \a status, using WriteOptions options with +  /// identifying tag \a tag. +  /// +  /// WriteAndFinish is equivalent of performing WriteLast and Finish +  /// in a single step. +  /// +  /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it +  /// is safe to deallocate once WriteAndFinish returns. +  /// +  /// \param[in] msg The message to be written. +  /// \param[in] options The WriteOptions to be used to write this message. +  /// \param[in] status The Status that server returns to client. +  /// \param[in] tag The tag identifying the operation. +  virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options, +                              const ::grpc::Status& status, void* tag) = 0; +}; + +/// Async server-side API for doing server streaming RPCs, +/// where the outgoing message stream from the server has messages of type \a W. +template <class W> +class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> { + public: +  explicit ServerAsyncWriter(::grpc::ServerContext* ctx) +      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {} + +  /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics. +  /// +  /// Implicit input parameter: +  ///   - The initial metadata that will be sent to the client from this op will +  ///     be taken from the \a ServerContext associated with the call. +  /// +  /// \param[in] tag Tag identifying this request. +  void SendInitialMetadata(void* tag) override { +    GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); + +    meta_ops_.set_output_tag(tag); +    meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                  ctx_->initial_metadata_flags()); +    if (ctx_->compression_level_set()) { +      meta_ops_.set_compression_level(ctx_->compression_level()); +    } +    ctx_->sent_initial_metadata_ = true; +    call_.PerformOps(&meta_ops_); +  } + +  void Write(const W& msg, void* tag) override { +    write_ops_.set_output_tag(tag); +    EnsureInitialMetadataSent(&write_ops_); +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); +    call_.PerformOps(&write_ops_); +  } + +  void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override { +    write_ops_.set_output_tag(tag); +    if (options.is_last_message()) { +      options.set_buffer_hint(); +    } + +    EnsureInitialMetadataSent(&write_ops_); +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok()); +    call_.PerformOps(&write_ops_); +  } + +  /// See the \a ServerAsyncWriterInterface.WriteAndFinish method for semantics. +  /// +  /// Implicit input parameter: +  ///   - the \a ServerContext associated with this call is used +  ///     for sending trailing (and initial) metadata to the client. +  /// +  /// Note: \a status must have an OK code. +  /// +  /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it +  /// is safe to deallocate once WriteAndFinish returns. +  void WriteAndFinish(const W& msg, ::grpc::WriteOptions options, +                      const ::grpc::Status& status, void* tag) override { +    write_ops_.set_output_tag(tag); +    EnsureInitialMetadataSent(&write_ops_); +    options.set_buffer_hint(); +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok()); +    write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status); +    call_.PerformOps(&write_ops_); +  } + +  /// See the \a ServerAsyncWriterInterface.Finish method for semantics. +  /// +  /// Implicit input parameter: +  ///   - the \a ServerContext associated with this call is used for sending +  ///     trailing (and initial if not already sent) metadata to the client. +  /// +  /// Note: there are no restrictions are the code of +  /// \a status,it may be non-OK +  /// +  /// gRPC doesn't take ownership or a reference to \a status, so it is safe to +  /// to deallocate once Finish returns. +  void Finish(const ::grpc::Status& status, void* tag) override { +    finish_ops_.set_output_tag(tag); +    EnsureInitialMetadataSent(&finish_ops_); +    finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status); +    call_.PerformOps(&finish_ops_); +  } + + private: +  void BindCall(::grpc::internal::Call* call) override { call_ = *call; } + +  template <class T> +  void EnsureInitialMetadataSent(T* ops) { +    if (!ctx_->sent_initial_metadata_) { +      ops->SendInitialMetadata(&ctx_->initial_metadata_, +                               ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        ops->set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +    } +  } + +  ::grpc::internal::Call call_; +  ::grpc::ServerContext* ctx_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +      meta_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage, +                              ::grpc::internal::CallOpServerSendStatus> +      write_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpServerSendStatus> +      finish_ops_; +}; + +/// Server-side interface for asynchronous bi-directional streaming. +template <class W, class R> +class ServerAsyncReaderWriterInterface +    : public ::grpc::internal::ServerAsyncStreamingInterface, +      public internal::AsyncWriterInterface<W>, +      public internal::AsyncReaderInterface<R> { + public: +  /// Indicate that the stream is to be finished with a certain status code. +  /// Request notification for when the server has sent the appropriate +  /// signals to the client to end the call. +  /// Should not be used concurrently with other operations. +  /// +  /// It is appropriate to call this method when either: +  ///   * all messages from the client have been received (either known +  ///     implictly, or explicitly because a previous \a +  ///     AsyncReaderInterface::Read operation +  ///     with a non-ok result (e.g., cq->Next(&read_tag, &ok) filled in 'ok' +  ///     with 'false'. +  ///   * it is desired to end the call early with some non-OK status code. +  /// +  /// This operation will end when the server has finished sending out initial +  /// metadata (if not sent already), response message, and status, or if some +  /// failure occurred when trying to do so. +  /// +  /// gRPC doesn't take ownership or a reference to \a status, so it is safe to +  /// to deallocate once Finish returns. +  /// +  /// \param[in] tag Tag identifying this request. +  /// \param[in] status To be sent to the client as the result of this call. +  virtual void Finish(const ::grpc::Status& status, void* tag) = 0; + +  /// Request the writing of \a msg and coalesce it with trailing metadata which +  /// contains \a status, using WriteOptions options with +  /// identifying tag \a tag. +  /// +  /// WriteAndFinish is equivalent of performing WriteLast and Finish in a +  /// single step. +  /// +  /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it +  /// is safe to deallocate once WriteAndFinish returns. +  /// +  /// \param[in] msg The message to be written. +  /// \param[in] options The WriteOptions to be used to write this message. +  /// \param[in] status The Status that server returns to client. +  /// \param[in] tag The tag identifying the operation. +  virtual void WriteAndFinish(const W& msg, ::grpc::WriteOptions options, +                              const ::grpc::Status& status, void* tag) = 0; +}; + +/// Async server-side API for doing bidirectional streaming RPCs, +/// where the incoming message stream coming from the client has messages of +/// type \a R, and the outgoing message stream coming from the server has +/// messages of type \a W. +template <class W, class R> +class ServerAsyncReaderWriter final +    : public ServerAsyncReaderWriterInterface<W, R> { + public: +  explicit ServerAsyncReaderWriter(::grpc::ServerContext* ctx) +      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {} + +  /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics. +  /// +  /// Implicit input parameter: +  ///   - The initial metadata that will be sent to the client from this op will +  ///     be taken from the \a ServerContext associated with the call. +  /// +  /// \param[in] tag Tag identifying this request. +  void SendInitialMetadata(void* tag) override { +    GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); + +    meta_ops_.set_output_tag(tag); +    meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                  ctx_->initial_metadata_flags()); +    if (ctx_->compression_level_set()) { +      meta_ops_.set_compression_level(ctx_->compression_level()); +    } +    ctx_->sent_initial_metadata_ = true; +    call_.PerformOps(&meta_ops_); +  } + +  void Read(R* msg, void* tag) override { +    read_ops_.set_output_tag(tag); +    read_ops_.RecvMessage(msg); +    call_.PerformOps(&read_ops_); +  } + +  void Write(const W& msg, void* tag) override { +    write_ops_.set_output_tag(tag); +    EnsureInitialMetadataSent(&write_ops_); +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); +    call_.PerformOps(&write_ops_); +  } + +  void Write(const W& msg, ::grpc::WriteOptions options, void* tag) override { +    write_ops_.set_output_tag(tag); +    if (options.is_last_message()) { +      options.set_buffer_hint(); +    } +    EnsureInitialMetadataSent(&write_ops_); +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok()); +    call_.PerformOps(&write_ops_); +  } + +  /// See the \a ServerAsyncReaderWriterInterface.WriteAndFinish +  /// method for semantics. +  /// +  /// Implicit input parameter: +  ///   - the \a ServerContext associated with this call is used +  ///     for sending trailing (and initial) metadata to the client. +  /// +  /// Note: \a status must have an OK code. +  // +  /// gRPC doesn't take ownership or a reference to \a msg and \a status, so it +  /// is safe to deallocate once WriteAndFinish returns. +  void WriteAndFinish(const W& msg, ::grpc::WriteOptions options, +                      const ::grpc::Status& status, void* tag) override { +    write_ops_.set_output_tag(tag); +    EnsureInitialMetadataSent(&write_ops_); +    options.set_buffer_hint(); +    GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg, options).ok()); +    write_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status); +    call_.PerformOps(&write_ops_); +  } + +  /// See the \a ServerAsyncReaderWriterInterface.Finish method for semantics. +  /// +  /// Implicit input parameter: +  ///   - the \a ServerContext associated with this call is used for sending +  ///     trailing (and initial if not already sent) metadata to the client. +  /// +  /// Note: there are no restrictions are the code of \a status, +  /// it may be non-OK +  // +  /// gRPC doesn't take ownership or a reference to \a status, so it is safe to +  /// to deallocate once Finish returns. +  void Finish(const ::grpc::Status& status, void* tag) override { +    finish_ops_.set_output_tag(tag); +    EnsureInitialMetadataSent(&finish_ops_); + +    finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, status); +    call_.PerformOps(&finish_ops_); +  } + + private: +  friend class ::grpc::Server; + +  void BindCall(::grpc::internal::Call* call) override { call_ = *call; } + +  template <class T> +  void EnsureInitialMetadataSent(T* ops) { +    if (!ctx_->sent_initial_metadata_) { +      ops->SendInitialMetadata(&ctx_->initial_metadata_, +                               ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        ops->set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +    } +  } + +  ::grpc::internal::Call call_; +  ::grpc::ServerContext* ctx_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +      meta_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> read_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage, +                              ::grpc::internal::CallOpServerSendStatus> +      write_ops_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpServerSendStatus> +      finish_ops_; +}; + +}  // namespace grpc +#endif  // GRPCPP_IMPL_CODEGEN_ASYNC_STREAM_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h new file mode 100644 index 00000000000..3deeda8c7fc --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/async_unary_call.h @@ -0,0 +1,314 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H +#define GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H + +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/channel_interface.h> +#include <grpcpp/impl/codegen/client_context.h> +#include <grpcpp/impl/codegen/server_context.h> +#include <grpcpp/impl/codegen/service_type.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { + +/// An interface relevant for async client side unary RPCs (which send +/// one request message to a server and receive one response message). +template <class R> +class ClientAsyncResponseReaderInterface { + public: +  virtual ~ClientAsyncResponseReaderInterface() {} + +  /// Start the call that was set up by the constructor, but only if the +  /// constructor was invoked through the "Prepare" API which doesn't actually +  /// start the call +  virtual void StartCall() = 0; + +  /// Request notification of the reading of initial metadata. Completion +  /// will be notified by \a tag on the associated completion queue. +  /// This call is optional, but if it is used, it cannot be used concurrently +  /// with or after the \a Finish method. +  /// +  /// \param[in] tag Tag identifying this request. +  virtual void ReadInitialMetadata(void* tag) = 0; + +  /// Request to receive the server's response \a msg and final \a status for +  /// the call, and to notify \a tag on this call's completion queue when +  /// finished. +  /// +  /// This function will return when either: +  /// - when the server's response message and status have been received. +  /// - when the server has returned a non-OK status (no message expected in +  ///   this case). +  /// - when the call failed for some reason and the library generated a +  ///   non-OK status. +  /// +  /// \param[in] tag Tag identifying this request. +  /// \param[out] status To be updated with the operation status. +  /// \param[out] msg To be filled in with the server's response message. +  virtual void Finish(R* msg, ::grpc::Status* status, void* tag) = 0; +}; + +namespace internal { +template <class R> +class ClientAsyncResponseReaderFactory { + public: +  /// Start a call and write the request out if \a start is set. +  /// \a tag will be notified on \a cq when the call has been started (i.e. +  /// intitial metadata sent) and \a request has been written out. +  /// If \a start is not set, the actual call must be initiated by StartCall +  /// Note that \a context will be used to fill in custom initial metadata +  /// used to send to the server when starting the call. +  template <class W> +  static ClientAsyncResponseReader<R>* Create( +      ::grpc::ChannelInterface* channel, ::grpc::CompletionQueue* cq, +      const ::grpc::internal::RpcMethod& method, ::grpc::ClientContext* context, +      const W& request, bool start) { +    ::grpc::internal::Call call = channel->CreateCall(method, context, cq); +    return new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        call.call(), sizeof(ClientAsyncResponseReader<R>))) +        ClientAsyncResponseReader<R>(call, context, request, start); +  } +}; +}  // namespace internal + +/// Async API for client-side unary RPCs, where the message response +/// received from the server is of type \a R. +template <class R> +class ClientAsyncResponseReader final +    : public ClientAsyncResponseReaderInterface<R> { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(ClientAsyncResponseReader)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  void StartCall() override { +    GPR_CODEGEN_ASSERT(!started_); +    started_ = true; +    StartCallInternal(); +  } + +  /// See \a ClientAsyncResponseReaderInterface::ReadInitialMetadata for +  /// semantics. +  /// +  /// Side effect: +  ///   - the \a ClientContext associated with this call is updated with +  ///     possible initial and trailing metadata sent from the server. +  void ReadInitialMetadata(void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); + +    single_buf.set_output_tag(tag); +    single_buf.RecvInitialMetadata(context_); +    call_.PerformOps(&single_buf); +    initial_metadata_read_ = true; +  } + +  /// See \a ClientAysncResponseReaderInterface::Finish for semantics. +  /// +  /// Side effect: +  ///   - the \a ClientContext associated with this call is updated with +  ///     possible initial and trailing metadata sent from the server. +  void Finish(R* msg, ::grpc::Status* status, void* tag) override { +    GPR_CODEGEN_ASSERT(started_); +    if (initial_metadata_read_) { +      finish_buf.set_output_tag(tag); +      finish_buf.RecvMessage(msg); +      finish_buf.AllowNoMessage(); +      finish_buf.ClientRecvStatus(context_, status); +      call_.PerformOps(&finish_buf); +    } else { +      single_buf.set_output_tag(tag); +      single_buf.RecvInitialMetadata(context_); +      single_buf.RecvMessage(msg); +      single_buf.AllowNoMessage(); +      single_buf.ClientRecvStatus(context_, status); +      call_.PerformOps(&single_buf); +    } +  } + + private: +  friend class internal::ClientAsyncResponseReaderFactory<R>; +  ::grpc::ClientContext* const context_; +  ::grpc::internal::Call call_; +  bool started_; +  bool initial_metadata_read_ = false; + +  template <class W> +  ClientAsyncResponseReader(::grpc::internal::Call call, +                            ::grpc::ClientContext* context, const W& request, +                            bool start) +      : context_(context), call_(call), started_(start) { +    // Bind the metadata at time of StartCallInternal but set up the rest here +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(single_buf.SendMessage(request).ok()); +    single_buf.ClientSendClose(); +    if (start) StartCallInternal(); +  } + +  void StartCallInternal() { +    single_buf.SendInitialMetadata(&context_->send_initial_metadata_, +                                   context_->initial_metadata_flags()); +  } + +  // disable operator new +  static void* operator new(std::size_t size); +  static void* operator new(std::size_t /*size*/, void* p) { return p; } + +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage, +                              ::grpc::internal::CallOpClientSendClose, +                              ::grpc::internal::CallOpRecvInitialMetadata, +                              ::grpc::internal::CallOpRecvMessage<R>, +                              ::grpc::internal::CallOpClientRecvStatus> +      single_buf; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>, +                              ::grpc::internal::CallOpClientRecvStatus> +      finish_buf; +}; + +/// Async server-side API for handling unary calls, where the single +/// response message sent to the client is of type \a W. +template <class W> +class ServerAsyncResponseWriter final +    : public ::grpc::internal::ServerAsyncStreamingInterface { + public: +  explicit ServerAsyncResponseWriter(::grpc::ServerContext* ctx) +      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {} + +  /// See \a ServerAsyncStreamingInterface::SendInitialMetadata for semantics. +  /// +  /// Side effect: +  ///   The initial metadata that will be sent to the client from this op will +  ///   be taken from the \a ServerContext associated with the call. +  /// +  /// \param[in] tag Tag identifying this request. +  void SendInitialMetadata(void* tag) override { +    GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); + +    meta_buf_.set_output_tag(tag); +    meta_buf_.SendInitialMetadata(&ctx_->initial_metadata_, +                                  ctx_->initial_metadata_flags()); +    if (ctx_->compression_level_set()) { +      meta_buf_.set_compression_level(ctx_->compression_level()); +    } +    ctx_->sent_initial_metadata_ = true; +    call_.PerformOps(&meta_buf_); +  } + +  /// Indicate that the stream is to be finished and request notification +  /// when the server has sent the appropriate signals to the client to +  /// end the call. Should not be used concurrently with other operations. +  /// +  /// \param[in] tag Tag identifying this request. +  /// \param[in] status To be sent to the client as the result of the call. +  /// \param[in] msg Message to be sent to the client. +  /// +  /// Side effect: +  ///   - also sends initial metadata if not already sent (using the +  ///     \a ServerContext associated with this call). +  /// +  /// Note: if \a status has a non-OK code, then \a msg will not be sent, +  /// and the client will receive only the status with possible trailing +  /// metadata. +  void Finish(const W& msg, const ::grpc::Status& status, void* tag) { +    finish_buf_.set_output_tag(tag); +    finish_buf_.set_core_cq_tag(&finish_buf_); +    if (!ctx_->sent_initial_metadata_) { +      finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_, +                                      ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        finish_buf_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +    } +    // The response is dropped if the status is not OK. +    if (status.ok()) { +      finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, +                                   finish_buf_.SendMessage(msg)); +    } else { +      finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status); +    } +    call_.PerformOps(&finish_buf_); +  } + +  /// Indicate that the stream is to be finished with a non-OK status, +  /// and request notification for when the server has finished sending the +  /// appropriate signals to the client to end the call. +  /// Should not be used concurrently with other operations. +  /// +  /// \param[in] tag Tag identifying this request. +  /// \param[in] status To be sent to the client as the result of the call. +  ///   - Note: \a status must have a non-OK code. +  /// +  /// Side effect: +  ///   - also sends initial metadata if not already sent (using the +  ///     \a ServerContext associated with this call). +  void FinishWithError(const ::grpc::Status& status, void* tag) { +    GPR_CODEGEN_ASSERT(!status.ok()); +    finish_buf_.set_output_tag(tag); +    if (!ctx_->sent_initial_metadata_) { +      finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_, +                                      ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        finish_buf_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +    } +    finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, status); +    call_.PerformOps(&finish_buf_); +  } + + private: +  void BindCall(::grpc::internal::Call* call) override { call_ = *call; } + +  ::grpc::internal::Call call_; +  ::grpc::ServerContext* ctx_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +      meta_buf_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage, +                              ::grpc::internal::CallOpServerSendStatus> +      finish_buf_; +}; + +}  // namespace grpc + +namespace std { +template <class R> +class default_delete<::grpc::ClientAsyncResponseReader<R>> { + public: +  void operator()(void* /*p*/) {} +}; +template <class R> +class default_delete<::grpc::ClientAsyncResponseReaderInterface<R>> { + public: +  void operator()(void* /*p*/) {} +}; +}  // namespace std + +#endif  // GRPCPP_IMPL_CODEGEN_ASYNC_UNARY_CALL_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h new file mode 100644 index 00000000000..6e64ec9981e --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/byte_buffer.h @@ -0,0 +1,226 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_BYTE_BUFFER_H +#define GRPCPP_IMPL_CODEGEN_BYTE_BUFFER_H + +#include <grpc/impl/codegen/byte_buffer.h> + +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/serialization_traits.h> +#include <grpcpp/impl/codegen/slice.h> +#include <grpcpp/impl/codegen/status.h> + +#include <vector> + +namespace grpc { + +class ServerInterface; +class ByteBuffer; +class ServerInterface; + +namespace internal { +template <class RequestType, class ResponseType> +class CallbackUnaryHandler; +template <class RequestType, class ResponseType> +class CallbackServerStreamingHandler; +template <class ServiceType, class RequestType, class ResponseType> +class RpcMethodHandler; +template <class ServiceType, class RequestType, class ResponseType> +class ServerStreamingHandler; +template <::grpc::StatusCode code> +class ErrorMethodHandler; +class CallOpSendMessage; +template <class R> +class CallOpRecvMessage; +class CallOpGenericRecvMessage; +class ExternalConnectionAcceptorImpl; +template <class R> +class DeserializeFuncType; +class GrpcByteBufferPeer; + +}  // namespace internal +/// A sequence of bytes. +class ByteBuffer final { + public: +  /// Constuct an empty buffer. +  ByteBuffer() : buffer_(nullptr) {} + +  /// Construct buffer from \a slices, of which there are \a nslices. +  ByteBuffer(const Slice* slices, size_t nslices) { +    // The following assertions check that the representation of a grpc::Slice +    // is identical to that of a grpc_slice:  it has a grpc_slice field, and +    // nothing else. +    static_assert(std::is_same<decltype(slices[0].slice_), grpc_slice>::value, +                  "Slice must have same representation as grpc_slice"); +    static_assert(sizeof(Slice) == sizeof(grpc_slice), +                  "Slice must have same representation as grpc_slice"); +    // The following assertions check that the representation of a ByteBuffer is +    // identical to grpc_byte_buffer*:  it has a grpc_byte_buffer* field, +    // and nothing else. +    static_assert(std::is_same<decltype(buffer_), grpc_byte_buffer*>::value, +                  "ByteBuffer must have same representation as " +                  "grpc_byte_buffer*"); +    static_assert(sizeof(ByteBuffer) == sizeof(grpc_byte_buffer*), +                  "ByteBuffer must have same representation as " +                  "grpc_byte_buffer*"); +    // The const_cast is legal if grpc_raw_byte_buffer_create() does no more +    // than its advertised side effect of increasing the reference count of the +    // slices it processes, and such an increase does not affect the semantics +    // seen by the caller of this constructor. +    buffer_ = g_core_codegen_interface->grpc_raw_byte_buffer_create( +        reinterpret_cast<grpc_slice*>(const_cast<Slice*>(slices)), nslices); +  } + +  /// Constuct a byte buffer by referencing elements of existing buffer +  /// \a buf. Wrapper of core function grpc_byte_buffer_copy . This is not +  /// a deep copy; it is just a referencing. As a result, its performance is +  /// size-independent. +  ByteBuffer(const ByteBuffer& buf) : buffer_(nullptr) { operator=(buf); } + +  ~ByteBuffer() { +    if (buffer_) { +      g_core_codegen_interface->grpc_byte_buffer_destroy(buffer_); +    } +  } + +  /// Wrapper of core function grpc_byte_buffer_copy . This is not +  /// a deep copy; it is just a referencing. As a result, its performance is +  /// size-independent. +  ByteBuffer& operator=(const ByteBuffer& buf) { +    if (this != &buf) { +      Clear();  // first remove existing data +    } +    if (buf.buffer_) { +      // then copy +      buffer_ = g_core_codegen_interface->grpc_byte_buffer_copy(buf.buffer_); +    } +    return *this; +  } + +  /// Dump (read) the buffer contents into \a slices. +  Status Dump(std::vector<Slice>* slices) const; + +  /// Remove all data. +  void Clear() { +    if (buffer_) { +      g_core_codegen_interface->grpc_byte_buffer_destroy(buffer_); +      buffer_ = nullptr; +    } +  } + +  /// Make a duplicate copy of the internals of this byte +  /// buffer so that we have our own owned version of it. +  /// bbuf.Duplicate(); is equivalent to bbuf=bbuf; but is actually readable. +  /// This is not a deep copy; it is a referencing and its performance +  /// is size-independent. +  void Duplicate() { +    buffer_ = g_core_codegen_interface->grpc_byte_buffer_copy(buffer_); +  } + +  /// Forget underlying byte buffer without destroying +  /// Use this only for un-owned byte buffers +  void Release() { buffer_ = nullptr; } + +  /// Buffer size in bytes. +  size_t Length() const { +    return buffer_ == nullptr +               ? 0 +               : g_core_codegen_interface->grpc_byte_buffer_length(buffer_); +  } + +  /// Swap the state of *this and *other. +  void Swap(ByteBuffer* other) { +    grpc_byte_buffer* tmp = other->buffer_; +    other->buffer_ = buffer_; +    buffer_ = tmp; +  } + +  /// Is this ByteBuffer valid? +  bool Valid() const { return (buffer_ != nullptr); } + + private: +  friend class SerializationTraits<ByteBuffer, void>; +  friend class ServerInterface; +  friend class internal::CallOpSendMessage; +  template <class R> +  friend class internal::CallOpRecvMessage; +  friend class internal::CallOpGenericRecvMessage; +  template <class ServiceType, class RequestType, class ResponseType> +  friend class internal::RpcMethodHandler; +  template <class ServiceType, class RequestType, class ResponseType> +  friend class internal::ServerStreamingHandler; +  template <class RequestType, class ResponseType> +  friend class internal::CallbackUnaryHandler; +  template <class RequestType, class ResponseType> +  friend class internal::CallbackServerStreamingHandler; +  template <StatusCode code> +  friend class internal::ErrorMethodHandler; +  template <class R> +  friend class internal::DeserializeFuncType; +  friend class ProtoBufferReader; +  friend class ProtoBufferWriter; +  friend class internal::GrpcByteBufferPeer; +  friend class internal::ExternalConnectionAcceptorImpl; + +  grpc_byte_buffer* buffer_; + +  // takes ownership +  void set_buffer(grpc_byte_buffer* buf) { +    if (buffer_) { +      Clear(); +    } +    buffer_ = buf; +  } + +  grpc_byte_buffer* c_buffer() { return buffer_; } +  grpc_byte_buffer** c_buffer_ptr() { return &buffer_; } + +  class ByteBufferPointer { +   public: +    ByteBufferPointer(const ByteBuffer* b) +        : bbuf_(const_cast<ByteBuffer*>(b)) {} +    operator ByteBuffer*() { return bbuf_; } +    operator grpc_byte_buffer*() { return bbuf_->buffer_; } +    operator grpc_byte_buffer**() { return &bbuf_->buffer_; } + +   private: +    ByteBuffer* bbuf_; +  }; +  ByteBufferPointer bbuf_ptr() const { return ByteBufferPointer(this); } +}; + +template <> +class SerializationTraits<ByteBuffer, void> { + public: +  static Status Deserialize(ByteBuffer* byte_buffer, ByteBuffer* dest) { +    dest->set_buffer(byte_buffer->buffer_); +    return Status::OK; +  } +  static Status Serialize(const ByteBuffer& source, ByteBuffer* buffer, +                          bool* own_buffer) { +    *buffer = source; +    *own_buffer = true; +    return g_core_codegen_interface->ok(); +  } +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_BYTE_BUFFER_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h new file mode 100644 index 00000000000..b2292862157 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/call.h @@ -0,0 +1,93 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#ifndef GRPCPP_IMPL_CODEGEN_CALL_H +#define GRPCPP_IMPL_CODEGEN_CALL_H + +#include <grpc/impl/codegen/grpc_types.h> +#include <grpcpp/impl/codegen/call_hook.h> + +namespace grpc { +class CompletionQueue; +namespace experimental { +class ClientRpcInfo; +class ServerRpcInfo; +}  // namespace experimental +namespace internal { +class CallHook; +class CallOpSetInterface; + +/// Straightforward wrapping of the C call object +class Call final { + public: +  Call() +      : call_hook_(nullptr), +        cq_(nullptr), +        call_(nullptr), +        max_receive_message_size_(-1) {} +  /** call is owned by the caller */ +  Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq) +      : call_hook_(call_hook), +        cq_(cq), +        call_(call), +        max_receive_message_size_(-1) {} + +  Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq, +       experimental::ClientRpcInfo* rpc_info) +      : call_hook_(call_hook), +        cq_(cq), +        call_(call), +        max_receive_message_size_(-1), +        client_rpc_info_(rpc_info) {} + +  Call(grpc_call* call, CallHook* call_hook, ::grpc::CompletionQueue* cq, +       int max_receive_message_size, experimental::ServerRpcInfo* rpc_info) +      : call_hook_(call_hook), +        cq_(cq), +        call_(call), +        max_receive_message_size_(max_receive_message_size), +        server_rpc_info_(rpc_info) {} + +  void PerformOps(CallOpSetInterface* ops) { +    call_hook_->PerformOpsOnCall(ops, this); +  } + +  grpc_call* call() const { return call_; } +  ::grpc::CompletionQueue* cq() const { return cq_; } + +  int max_receive_message_size() const { return max_receive_message_size_; } + +  experimental::ClientRpcInfo* client_rpc_info() const { +    return client_rpc_info_; +  } + +  experimental::ServerRpcInfo* server_rpc_info() const { +    return server_rpc_info_; +  } + + private: +  CallHook* call_hook_; +  ::grpc::CompletionQueue* cq_; +  grpc_call* call_; +  int max_receive_message_size_; +  experimental::ClientRpcInfo* client_rpc_info_ = nullptr; +  experimental::ServerRpcInfo* server_rpc_info_ = nullptr; +}; +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CALL_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/call_hook.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_hook.h new file mode 100644 index 00000000000..4f7d370c4f7 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_hook.h @@ -0,0 +1,39 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CALL_HOOK_H +#define GRPCPP_IMPL_CODEGEN_CALL_HOOK_H + +namespace grpc { + +namespace internal { +class CallOpSetInterface; +class Call; + +/// This is an interface that Channel and Server implement to allow them to hook +/// performing ops. +class CallHook { + public: +  virtual ~CallHook() {} +  virtual void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) = 0; +}; +}  // namespace internal + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CALL_HOOK_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h new file mode 100644 index 00000000000..379333164a6 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set.h @@ -0,0 +1,1037 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#if defined(__GNUC__) +#pragma GCC system_header +#endif + +#ifndef GRPCPP_IMPL_CODEGEN_CALL_OP_SET_H +#define GRPCPP_IMPL_CODEGEN_CALL_OP_SET_H + +#include <cstring> +#include <map> +#include <memory> + +#include <grpc/impl/codegen/compression_types.h> +#include <grpc/impl/codegen/grpc_types.h> +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/call_hook.h> +#include <grpcpp/impl/codegen/call_op_set_interface.h> +#include <grpcpp/impl/codegen/client_context.h> +#include <grpcpp/impl/codegen/completion_queue.h> +#include <grpcpp/impl/codegen/completion_queue_tag.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/intercepted_channel.h> +#include <grpcpp/impl/codegen/interceptor_common.h> +#include <grpcpp/impl/codegen/serialization_traits.h> +#include <grpcpp/impl/codegen/slice.h> +#include <grpcpp/impl/codegen/string_ref.h> + +namespace grpc { + +extern CoreCodegenInterface* g_core_codegen_interface; + +namespace internal { +class Call; +class CallHook; + +// TODO(yangg) if the map is changed before we send, the pointers will be a +// mess. Make sure it does not happen. +inline grpc_metadata* FillMetadataArray( +    const std::multimap<TString, TString>& metadata, +    size_t* metadata_count, const TString& optional_error_details) { +  *metadata_count = metadata.size() + (optional_error_details.empty() ? 0 : 1); +  if (*metadata_count == 0) { +    return nullptr; +  } +  grpc_metadata* metadata_array = +      (grpc_metadata*)(g_core_codegen_interface->gpr_malloc( +          (*metadata_count) * sizeof(grpc_metadata))); +  size_t i = 0; +  for (auto iter = metadata.cbegin(); iter != metadata.cend(); ++iter, ++i) { +    metadata_array[i].key = SliceReferencingString(iter->first); +    metadata_array[i].value = SliceReferencingString(iter->second); +  } +  if (!optional_error_details.empty()) { +    metadata_array[i].key = +        g_core_codegen_interface->grpc_slice_from_static_buffer( +            kBinaryErrorDetailsKey, sizeof(kBinaryErrorDetailsKey) - 1); +    metadata_array[i].value = SliceReferencingString(optional_error_details); +  } +  return metadata_array; +} +}  // namespace internal + +/// Per-message write options. +class WriteOptions { + public: +  WriteOptions() : flags_(0), last_message_(false) {} +  WriteOptions(const WriteOptions& other) +      : flags_(other.flags_), last_message_(other.last_message_) {} + +  /// Default assignment operator +  WriteOptions& operator=(const WriteOptions& other) = default; + +  /// Clear all flags. +  inline void Clear() { flags_ = 0; } + +  /// Returns raw flags bitset. +  inline uint32_t flags() const { return flags_; } + +  /// Sets flag for the disabling of compression for the next message write. +  /// +  /// \sa GRPC_WRITE_NO_COMPRESS +  inline WriteOptions& set_no_compression() { +    SetBit(GRPC_WRITE_NO_COMPRESS); +    return *this; +  } + +  /// Clears flag for the disabling of compression for the next message write. +  /// +  /// \sa GRPC_WRITE_NO_COMPRESS +  inline WriteOptions& clear_no_compression() { +    ClearBit(GRPC_WRITE_NO_COMPRESS); +    return *this; +  } + +  /// Get value for the flag indicating whether compression for the next +  /// message write is forcefully disabled. +  /// +  /// \sa GRPC_WRITE_NO_COMPRESS +  inline bool get_no_compression() const { +    return GetBit(GRPC_WRITE_NO_COMPRESS); +  } + +  /// Sets flag indicating that the write may be buffered and need not go out on +  /// the wire immediately. +  /// +  /// \sa GRPC_WRITE_BUFFER_HINT +  inline WriteOptions& set_buffer_hint() { +    SetBit(GRPC_WRITE_BUFFER_HINT); +    return *this; +  } + +  /// Clears flag indicating that the write may be buffered and need not go out +  /// on the wire immediately. +  /// +  /// \sa GRPC_WRITE_BUFFER_HINT +  inline WriteOptions& clear_buffer_hint() { +    ClearBit(GRPC_WRITE_BUFFER_HINT); +    return *this; +  } + +  /// Get value for the flag indicating that the write may be buffered and need +  /// not go out on the wire immediately. +  /// +  /// \sa GRPC_WRITE_BUFFER_HINT +  inline bool get_buffer_hint() const { return GetBit(GRPC_WRITE_BUFFER_HINT); } + +  /// corked bit: aliases set_buffer_hint currently, with the intent that +  /// set_buffer_hint will be removed in the future +  inline WriteOptions& set_corked() { +    SetBit(GRPC_WRITE_BUFFER_HINT); +    return *this; +  } + +  inline WriteOptions& clear_corked() { +    ClearBit(GRPC_WRITE_BUFFER_HINT); +    return *this; +  } + +  inline bool is_corked() const { return GetBit(GRPC_WRITE_BUFFER_HINT); } + +  /// last-message bit: indicates this is the last message in a stream +  /// client-side:  makes Write the equivalent of performing Write, WritesDone +  /// in a single step +  /// server-side:  hold the Write until the service handler returns (sync api) +  /// or until Finish is called (async api) +  inline WriteOptions& set_last_message() { +    last_message_ = true; +    return *this; +  } + +  /// Clears flag indicating that this is the last message in a stream, +  /// disabling coalescing. +  inline WriteOptions& clear_last_message() { +    last_message_ = false; +    return *this; +  } + +  /// Guarantee that all bytes have been written to the socket before completing +  /// this write (usually writes are completed when they pass flow control). +  inline WriteOptions& set_write_through() { +    SetBit(GRPC_WRITE_THROUGH); +    return *this; +  } + +  inline bool is_write_through() const { return GetBit(GRPC_WRITE_THROUGH); } + +  /// Get value for the flag indicating that this is the last message, and +  /// should be coalesced with trailing metadata. +  /// +  /// \sa GRPC_WRITE_LAST_MESSAGE +  bool is_last_message() const { return last_message_; } + + private: +  void SetBit(const uint32_t mask) { flags_ |= mask; } + +  void ClearBit(const uint32_t mask) { flags_ &= ~mask; } + +  bool GetBit(const uint32_t mask) const { return (flags_ & mask) != 0; } + +  uint32_t flags_; +  bool last_message_; +}; + +namespace internal { + +/// Default argument for CallOpSet. The Unused parameter is unused by +/// the class, but can be used for generating multiple names for the +/// same thing. +template <int Unused> +class CallNoOp { + protected: +  void AddOp(grpc_op* /*ops*/, size_t* /*nops*/) {} +  void FinishOp(bool* /*status*/) {} +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* /*interceptor_methods*/) {} +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* /*interceptor_methods*/) {} +  void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) { +  } +}; + +class CallOpSendInitialMetadata { + public: +  CallOpSendInitialMetadata() : send_(false) { +    maybe_compression_level_.is_set = false; +  } + +  void SendInitialMetadata(std::multimap<TString, TString>* metadata, +                           uint32_t flags) { +    maybe_compression_level_.is_set = false; +    send_ = true; +    flags_ = flags; +    metadata_map_ = metadata; +  } + +  void set_compression_level(grpc_compression_level level) { +    maybe_compression_level_.is_set = true; +    maybe_compression_level_.level = level; +  } + + protected: +  void AddOp(grpc_op* ops, size_t* nops) { +    if (!send_ || hijacked_) return; +    grpc_op* op = &ops[(*nops)++]; +    op->op = GRPC_OP_SEND_INITIAL_METADATA; +    op->flags = flags_; +    op->reserved = NULL; +    initial_metadata_ = +        FillMetadataArray(*metadata_map_, &initial_metadata_count_, ""); +    op->data.send_initial_metadata.count = initial_metadata_count_; +    op->data.send_initial_metadata.metadata = initial_metadata_; +    op->data.send_initial_metadata.maybe_compression_level.is_set = +        maybe_compression_level_.is_set; +    if (maybe_compression_level_.is_set) { +      op->data.send_initial_metadata.maybe_compression_level.level = +          maybe_compression_level_.level; +    } +  } +  void FinishOp(bool* /*status*/) { +    if (!send_ || hijacked_) return; +    g_core_codegen_interface->gpr_free(initial_metadata_); +    send_ = false; +  } + +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (!send_) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA); +    interceptor_methods->SetSendInitialMetadata(metadata_map_); +  } + +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* /*interceptor_methods*/) {} + +  void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) { +    hijacked_ = true; +  } + +  bool hijacked_ = false; +  bool send_; +  uint32_t flags_; +  size_t initial_metadata_count_; +  std::multimap<TString, TString>* metadata_map_; +  grpc_metadata* initial_metadata_; +  struct { +    bool is_set; +    grpc_compression_level level; +  } maybe_compression_level_; +}; + +class CallOpSendMessage { + public: +  CallOpSendMessage() : send_buf_() {} + +  /// Send \a message using \a options for the write. The \a options are cleared +  /// after use. +  template <class M> +  Status SendMessage(const M& message, +                     WriteOptions options) GRPC_MUST_USE_RESULT; + +  template <class M> +  Status SendMessage(const M& message) GRPC_MUST_USE_RESULT; + +  /// Send \a message using \a options for the write. The \a options are cleared +  /// after use. This form of SendMessage allows gRPC to reference \a message +  /// beyond the lifetime of SendMessage. +  template <class M> +  Status SendMessagePtr(const M* message, +                        WriteOptions options) GRPC_MUST_USE_RESULT; + +  /// This form of SendMessage allows gRPC to reference \a message beyond the +  /// lifetime of SendMessage. +  template <class M> +  Status SendMessagePtr(const M* message) GRPC_MUST_USE_RESULT; + + protected: +  void AddOp(grpc_op* ops, size_t* nops) { +    if (msg_ == nullptr && !send_buf_.Valid()) return; +    if (hijacked_) { +      serializer_ = nullptr; +      return; +    } +    if (msg_ != nullptr) { +      GPR_CODEGEN_ASSERT(serializer_(msg_).ok()); +    } +    serializer_ = nullptr; +    grpc_op* op = &ops[(*nops)++]; +    op->op = GRPC_OP_SEND_MESSAGE; +    op->flags = write_options_.flags(); +    op->reserved = NULL; +    op->data.send_message.send_message = send_buf_.c_buffer(); +    // Flags are per-message: clear them after use. +    write_options_.Clear(); +  } +  void FinishOp(bool* status) { +    if (msg_ == nullptr && !send_buf_.Valid()) return; +    if (hijacked_ && failed_send_) { +      // Hijacking interceptor failed this Op +      *status = false; +    } else if (!*status) { +      // This Op was passed down to core and the Op failed +      failed_send_ = true; +    } +  } + +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (msg_ == nullptr && !send_buf_.Valid()) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::PRE_SEND_MESSAGE); +    interceptor_methods->SetSendMessage(&send_buf_, &msg_, &failed_send_, +                                        serializer_); +  } + +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (msg_ != nullptr || send_buf_.Valid()) { +      interceptor_methods->AddInterceptionHookPoint( +          experimental::InterceptionHookPoints::POST_SEND_MESSAGE); +    } +    send_buf_.Clear(); +    msg_ = nullptr; +    // The contents of the SendMessage value that was previously set +    // has had its references stolen by core's operations +    interceptor_methods->SetSendMessage(nullptr, nullptr, &failed_send_, +                                        nullptr); +  } + +  void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) { +    hijacked_ = true; +  } + + private: +  const void* msg_ = nullptr;  // The original non-serialized message +  bool hijacked_ = false; +  bool failed_send_ = false; +  ByteBuffer send_buf_; +  WriteOptions write_options_; +  std::function<Status(const void*)> serializer_; +}; + +template <class M> +Status CallOpSendMessage::SendMessage(const M& message, WriteOptions options) { +  write_options_ = options; +  serializer_ = [this](const void* message) { +    bool own_buf; +    send_buf_.Clear(); +    // TODO(vjpai): Remove the void below when possible +    // The void in the template parameter below should not be needed +    // (since it should be implicit) but is needed due to an observed +    // difference in behavior between clang and gcc for certain internal users +    Status result = SerializationTraits<M, void>::Serialize( +        *static_cast<const M*>(message), send_buf_.bbuf_ptr(), &own_buf); +    if (!own_buf) { +      send_buf_.Duplicate(); +    } +    return result; +  }; +  // Serialize immediately only if we do not have access to the message pointer +  if (msg_ == nullptr) { +    Status result = serializer_(&message); +    serializer_ = nullptr; +    return result; +  } +  return Status(); +} + +template <class M> +Status CallOpSendMessage::SendMessage(const M& message) { +  return SendMessage(message, WriteOptions()); +} + +template <class M> +Status CallOpSendMessage::SendMessagePtr(const M* message, +                                         WriteOptions options) { +  msg_ = message; +  return SendMessage(*message, options); +} + +template <class M> +Status CallOpSendMessage::SendMessagePtr(const M* message) { +  msg_ = message; +  return SendMessage(*message, WriteOptions()); +} + +template <class R> +class CallOpRecvMessage { + public: +  void RecvMessage(R* message) { message_ = message; } + +  // Do not change status if no message is received. +  void AllowNoMessage() { allow_not_getting_message_ = true; } + +  bool got_message = false; + + protected: +  void AddOp(grpc_op* ops, size_t* nops) { +    if (message_ == nullptr || hijacked_) return; +    grpc_op* op = &ops[(*nops)++]; +    op->op = GRPC_OP_RECV_MESSAGE; +    op->flags = 0; +    op->reserved = NULL; +    op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr(); +  } + +  void FinishOp(bool* status) { +    if (message_ == nullptr) return; +    if (recv_buf_.Valid()) { +      if (*status) { +        got_message = *status = +            SerializationTraits<R>::Deserialize(recv_buf_.bbuf_ptr(), message_) +                .ok(); +        recv_buf_.Release(); +      } else { +        got_message = false; +        recv_buf_.Clear(); +      } +    } else if (hijacked_) { +      if (hijacked_recv_message_failed_) { +        FinishOpRecvMessageFailureHandler(status); +      } else { +        // The op was hijacked and it was successful. There is no further action +        // to be performed since the message is already in its non-serialized +        // form. +      } +    } else { +      FinishOpRecvMessageFailureHandler(status); +    } +  } + +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (message_ == nullptr) return; +    interceptor_methods->SetRecvMessage(message_, +                                        &hijacked_recv_message_failed_); +  } + +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (message_ == nullptr) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::POST_RECV_MESSAGE); +    if (!got_message) interceptor_methods->SetRecvMessage(nullptr, nullptr); +  } +  void SetHijackingState(InterceptorBatchMethodsImpl* interceptor_methods) { +    hijacked_ = true; +    if (message_ == nullptr) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::PRE_RECV_MESSAGE); +    got_message = true; +  } + + private: +  // Sets got_message and \a status for a failed recv message op +  void FinishOpRecvMessageFailureHandler(bool* status) { +    got_message = false; +    if (!allow_not_getting_message_) { +      *status = false; +    } +  } + +  R* message_ = nullptr; +  ByteBuffer recv_buf_; +  bool allow_not_getting_message_ = false; +  bool hijacked_ = false; +  bool hijacked_recv_message_failed_ = false; +}; + +class DeserializeFunc { + public: +  virtual Status Deserialize(ByteBuffer* buf) = 0; +  virtual ~DeserializeFunc() {} +}; + +template <class R> +class DeserializeFuncType final : public DeserializeFunc { + public: +  DeserializeFuncType(R* message) : message_(message) {} +  Status Deserialize(ByteBuffer* buf) override { +    return SerializationTraits<R>::Deserialize(buf->bbuf_ptr(), message_); +  } + +  ~DeserializeFuncType() override {} + + private: +  R* message_;  // Not a managed pointer because management is external to this +}; + +class CallOpGenericRecvMessage { + public: +  template <class R> +  void RecvMessage(R* message) { +    // Use an explicit base class pointer to avoid resolution error in the +    // following unique_ptr::reset for some old implementations. +    DeserializeFunc* func = new DeserializeFuncType<R>(message); +    deserialize_.reset(func); +    message_ = message; +  } + +  // Do not change status if no message is received. +  void AllowNoMessage() { allow_not_getting_message_ = true; } + +  bool got_message = false; + + protected: +  void AddOp(grpc_op* ops, size_t* nops) { +    if (!deserialize_ || hijacked_) return; +    grpc_op* op = &ops[(*nops)++]; +    op->op = GRPC_OP_RECV_MESSAGE; +    op->flags = 0; +    op->reserved = NULL; +    op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr(); +  } + +  void FinishOp(bool* status) { +    if (!deserialize_) return; +    if (recv_buf_.Valid()) { +      if (*status) { +        got_message = true; +        *status = deserialize_->Deserialize(&recv_buf_).ok(); +        recv_buf_.Release(); +      } else { +        got_message = false; +        recv_buf_.Clear(); +      } +    } else if (hijacked_) { +      if (hijacked_recv_message_failed_) { +        FinishOpRecvMessageFailureHandler(status); +      } else { +        // The op was hijacked and it was successful. There is no further action +        // to be performed since the message is already in its non-serialized +        // form. +      } +    } else { +      got_message = false; +      if (!allow_not_getting_message_) { +        *status = false; +      } +    } +  } + +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (!deserialize_) return; +    interceptor_methods->SetRecvMessage(message_, +                                        &hijacked_recv_message_failed_); +  } + +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (!deserialize_) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::POST_RECV_MESSAGE); +    if (!got_message) interceptor_methods->SetRecvMessage(nullptr, nullptr); +    deserialize_.reset(); +  } +  void SetHijackingState(InterceptorBatchMethodsImpl* interceptor_methods) { +    hijacked_ = true; +    if (!deserialize_) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::PRE_RECV_MESSAGE); +    got_message = true; +  } + + private: +  // Sets got_message and \a status for a failed recv message op +  void FinishOpRecvMessageFailureHandler(bool* status) { +    got_message = false; +    if (!allow_not_getting_message_) { +      *status = false; +    } +  } + +  void* message_ = nullptr; +  std::unique_ptr<DeserializeFunc> deserialize_; +  ByteBuffer recv_buf_; +  bool allow_not_getting_message_ = false; +  bool hijacked_ = false; +  bool hijacked_recv_message_failed_ = false; +}; + +class CallOpClientSendClose { + public: +  CallOpClientSendClose() : send_(false) {} + +  void ClientSendClose() { send_ = true; } + + protected: +  void AddOp(grpc_op* ops, size_t* nops) { +    if (!send_ || hijacked_) return; +    grpc_op* op = &ops[(*nops)++]; +    op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; +    op->flags = 0; +    op->reserved = NULL; +  } +  void FinishOp(bool* /*status*/) { send_ = false; } + +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (!send_) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::PRE_SEND_CLOSE); +  } + +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* /*interceptor_methods*/) {} + +  void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) { +    hijacked_ = true; +  } + + private: +  bool hijacked_ = false; +  bool send_; +}; + +class CallOpServerSendStatus { + public: +  CallOpServerSendStatus() : send_status_available_(false) {} + +  void ServerSendStatus( +      std::multimap<TString, TString>* trailing_metadata, +      const Status& status) { +    send_error_details_ = status.error_details(); +    metadata_map_ = trailing_metadata; +    send_status_available_ = true; +    send_status_code_ = static_cast<grpc_status_code>(status.error_code()); +    send_error_message_ = status.error_message(); +  } + + protected: +  void AddOp(grpc_op* ops, size_t* nops) { +    if (!send_status_available_ || hijacked_) return; +    trailing_metadata_ = FillMetadataArray( +        *metadata_map_, &trailing_metadata_count_, send_error_details_); +    grpc_op* op = &ops[(*nops)++]; +    op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; +    op->data.send_status_from_server.trailing_metadata_count = +        trailing_metadata_count_; +    op->data.send_status_from_server.trailing_metadata = trailing_metadata_; +    op->data.send_status_from_server.status = send_status_code_; +    error_message_slice_ = SliceReferencingString(send_error_message_); +    op->data.send_status_from_server.status_details = +        send_error_message_.empty() ? nullptr : &error_message_slice_; +    op->flags = 0; +    op->reserved = NULL; +  } + +  void FinishOp(bool* /*status*/) { +    if (!send_status_available_ || hijacked_) return; +    g_core_codegen_interface->gpr_free(trailing_metadata_); +    send_status_available_ = false; +  } + +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (!send_status_available_) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::PRE_SEND_STATUS); +    interceptor_methods->SetSendTrailingMetadata(metadata_map_); +    interceptor_methods->SetSendStatus(&send_status_code_, &send_error_details_, +                                       &send_error_message_); +  } + +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* /*interceptor_methods*/) {} + +  void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) { +    hijacked_ = true; +  } + + private: +  bool hijacked_ = false; +  bool send_status_available_; +  grpc_status_code send_status_code_; +  TString send_error_details_; +  TString send_error_message_; +  size_t trailing_metadata_count_; +  std::multimap<TString, TString>* metadata_map_; +  grpc_metadata* trailing_metadata_; +  grpc_slice error_message_slice_; +}; + +class CallOpRecvInitialMetadata { + public: +  CallOpRecvInitialMetadata() : metadata_map_(nullptr) {} + +  void RecvInitialMetadata(::grpc::ClientContext* context) { +    context->initial_metadata_received_ = true; +    metadata_map_ = &context->recv_initial_metadata_; +  } + + protected: +  void AddOp(grpc_op* ops, size_t* nops) { +    if (metadata_map_ == nullptr || hijacked_) return; +    grpc_op* op = &ops[(*nops)++]; +    op->op = GRPC_OP_RECV_INITIAL_METADATA; +    op->data.recv_initial_metadata.recv_initial_metadata = metadata_map_->arr(); +    op->flags = 0; +    op->reserved = NULL; +  } + +  void FinishOp(bool* /*status*/) { +    if (metadata_map_ == nullptr || hijacked_) return; +  } + +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    interceptor_methods->SetRecvInitialMetadata(metadata_map_); +  } + +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (metadata_map_ == nullptr) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA); +    metadata_map_ = nullptr; +  } + +  void SetHijackingState(InterceptorBatchMethodsImpl* interceptor_methods) { +    hijacked_ = true; +    if (metadata_map_ == nullptr) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::PRE_RECV_INITIAL_METADATA); +  } + + private: +  bool hijacked_ = false; +  MetadataMap* metadata_map_; +}; + +class CallOpClientRecvStatus { + public: +  CallOpClientRecvStatus() +      : recv_status_(nullptr), debug_error_string_(nullptr) {} + +  void ClientRecvStatus(::grpc::ClientContext* context, Status* status) { +    client_context_ = context; +    metadata_map_ = &client_context_->trailing_metadata_; +    recv_status_ = status; +    error_message_ = g_core_codegen_interface->grpc_empty_slice(); +  } + + protected: +  void AddOp(grpc_op* ops, size_t* nops) { +    if (recv_status_ == nullptr || hijacked_) return; +    grpc_op* op = &ops[(*nops)++]; +    op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; +    op->data.recv_status_on_client.trailing_metadata = metadata_map_->arr(); +    op->data.recv_status_on_client.status = &status_code_; +    op->data.recv_status_on_client.status_details = &error_message_; +    op->data.recv_status_on_client.error_string = &debug_error_string_; +    op->flags = 0; +    op->reserved = NULL; +  } + +  void FinishOp(bool* /*status*/) { +    if (recv_status_ == nullptr || hijacked_) return; +    if (static_cast<StatusCode>(status_code_) == StatusCode::OK) { +      *recv_status_ = Status(); +      GPR_CODEGEN_DEBUG_ASSERT(debug_error_string_ == nullptr); +    } else { +      *recv_status_ = +          Status(static_cast<StatusCode>(status_code_), +                 GRPC_SLICE_IS_EMPTY(error_message_) +                     ? TString() +                     : TString(reinterpret_cast<const char*>GRPC_SLICE_START_PTR(error_message_), +                                   reinterpret_cast<const char*>GRPC_SLICE_END_PTR(error_message_)), +                 metadata_map_->GetBinaryErrorDetails()); +      if (debug_error_string_ != nullptr) { +        client_context_->set_debug_error_string(debug_error_string_); +        g_core_codegen_interface->gpr_free((void*)debug_error_string_); +      } +    } +    // TODO(soheil): Find callers that set debug string even for status OK, +    //               and fix them. +    g_core_codegen_interface->grpc_slice_unref(error_message_); +  } + +  void SetInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    interceptor_methods->SetRecvStatus(recv_status_); +    interceptor_methods->SetRecvTrailingMetadata(metadata_map_); +  } + +  void SetFinishInterceptionHookPoint( +      InterceptorBatchMethodsImpl* interceptor_methods) { +    if (recv_status_ == nullptr) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::POST_RECV_STATUS); +    recv_status_ = nullptr; +  } + +  void SetHijackingState(InterceptorBatchMethodsImpl* interceptor_methods) { +    hijacked_ = true; +    if (recv_status_ == nullptr) return; +    interceptor_methods->AddInterceptionHookPoint( +        experimental::InterceptionHookPoints::PRE_RECV_STATUS); +  } + + private: +  bool hijacked_ = false; +  ::grpc::ClientContext* client_context_; +  MetadataMap* metadata_map_; +  Status* recv_status_; +  const char* debug_error_string_; +  grpc_status_code status_code_; +  grpc_slice error_message_; +}; + +template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>, +          class Op3 = CallNoOp<3>, class Op4 = CallNoOp<4>, +          class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>> +class CallOpSet; + +/// Primary implementation of CallOpSetInterface. +/// Since we cannot use variadic templates, we declare slots up to +/// the maximum count of ops we'll need in a set. We leverage the +/// empty base class optimization to slim this class (especially +/// when there are many unused slots used). To avoid duplicate base classes, +/// the template parameter for CallNoOp is varied by argument position. +template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6> +class CallOpSet : public CallOpSetInterface, +                  public Op1, +                  public Op2, +                  public Op3, +                  public Op4, +                  public Op5, +                  public Op6 { + public: +  CallOpSet() : core_cq_tag_(this), return_tag_(this) {} +  // The copy constructor and assignment operator reset the value of +  // core_cq_tag_, return_tag_, done_intercepting_ and interceptor_methods_ +  // since those are only meaningful on a specific object, not across objects. +  CallOpSet(const CallOpSet& other) +      : core_cq_tag_(this), +        return_tag_(this), +        call_(other.call_), +        done_intercepting_(false), +        interceptor_methods_(InterceptorBatchMethodsImpl()) {} + +  CallOpSet& operator=(const CallOpSet& other) { +    core_cq_tag_ = this; +    return_tag_ = this; +    call_ = other.call_; +    done_intercepting_ = false; +    interceptor_methods_ = InterceptorBatchMethodsImpl(); +    return *this; +  } + +  void FillOps(Call* call) override { +    done_intercepting_ = false; +    g_core_codegen_interface->grpc_call_ref(call->call()); +    call_ = +        *call;  // It's fine to create a copy of call since it's just pointers + +    if (RunInterceptors()) { +      ContinueFillOpsAfterInterception(); +    } else { +      // After the interceptors are run, ContinueFillOpsAfterInterception will +      // be run +    } +  } + +  bool FinalizeResult(void** tag, bool* status) override { +    if (done_intercepting_) { +      // Complete the avalanching since we are done with this batch of ops +      call_.cq()->CompleteAvalanching(); +      // We have already finished intercepting and filling in the results. This +      // round trip from the core needed to be made because interceptors were +      // run +      *tag = return_tag_; +      *status = saved_status_; +      g_core_codegen_interface->grpc_call_unref(call_.call()); +      return true; +    } + +    this->Op1::FinishOp(status); +    this->Op2::FinishOp(status); +    this->Op3::FinishOp(status); +    this->Op4::FinishOp(status); +    this->Op5::FinishOp(status); +    this->Op6::FinishOp(status); +    saved_status_ = *status; +    if (RunInterceptorsPostRecv()) { +      *tag = return_tag_; +      g_core_codegen_interface->grpc_call_unref(call_.call()); +      return true; +    } +    // Interceptors are going to be run, so we can't return the tag just yet. +    // After the interceptors are run, ContinueFinalizeResultAfterInterception +    return false; +  } + +  void set_output_tag(void* return_tag) { return_tag_ = return_tag; } + +  void* core_cq_tag() override { return core_cq_tag_; } + +  /// set_core_cq_tag is used to provide a different core CQ tag than "this". +  /// This is used for callback-based tags, where the core tag is the core +  /// callback function. It does not change the use or behavior of any other +  /// function (such as FinalizeResult) +  void set_core_cq_tag(void* core_cq_tag) { core_cq_tag_ = core_cq_tag; } + +  // This will be called while interceptors are run if the RPC is a hijacked +  // RPC. This should set hijacking state for each of the ops. +  void SetHijackingState() override { +    this->Op1::SetHijackingState(&interceptor_methods_); +    this->Op2::SetHijackingState(&interceptor_methods_); +    this->Op3::SetHijackingState(&interceptor_methods_); +    this->Op4::SetHijackingState(&interceptor_methods_); +    this->Op5::SetHijackingState(&interceptor_methods_); +    this->Op6::SetHijackingState(&interceptor_methods_); +  } + +  // Should be called after interceptors are done running +  void ContinueFillOpsAfterInterception() override { +    static const size_t MAX_OPS = 6; +    grpc_op ops[MAX_OPS]; +    size_t nops = 0; +    this->Op1::AddOp(ops, &nops); +    this->Op2::AddOp(ops, &nops); +    this->Op3::AddOp(ops, &nops); +    this->Op4::AddOp(ops, &nops); +    this->Op5::AddOp(ops, &nops); +    this->Op6::AddOp(ops, &nops); + +    grpc_call_error err = g_core_codegen_interface->grpc_call_start_batch( +        call_.call(), ops, nops, core_cq_tag(), nullptr); + +    if (err != GRPC_CALL_OK) { +      // A failure here indicates an API misuse; for example, doing a Write +      // while another Write is already pending on the same RPC or invoking +      // WritesDone multiple times +      // gpr_log(GPR_ERROR, "API misuse of type %s observed", +      //        g_core_codegen_interface->grpc_call_error_to_string(err)); +      GPR_CODEGEN_ASSERT(false); +    } +  } + +  // Should be called after interceptors are done running on the finalize result +  // path +  void ContinueFinalizeResultAfterInterception() override { +    done_intercepting_ = true; +    // The following call_start_batch is internally-generated so no need for an +    // explanatory log on failure. +    GPR_CODEGEN_ASSERT(g_core_codegen_interface->grpc_call_start_batch( +                           call_.call(), nullptr, 0, core_cq_tag(), nullptr) == +                       GRPC_CALL_OK); +  } + + private: +  // Returns true if no interceptors need to be run +  bool RunInterceptors() { +    interceptor_methods_.ClearState(); +    interceptor_methods_.SetCallOpSetInterface(this); +    interceptor_methods_.SetCall(&call_); +    this->Op1::SetInterceptionHookPoint(&interceptor_methods_); +    this->Op2::SetInterceptionHookPoint(&interceptor_methods_); +    this->Op3::SetInterceptionHookPoint(&interceptor_methods_); +    this->Op4::SetInterceptionHookPoint(&interceptor_methods_); +    this->Op5::SetInterceptionHookPoint(&interceptor_methods_); +    this->Op6::SetInterceptionHookPoint(&interceptor_methods_); +    if (interceptor_methods_.InterceptorsListEmpty()) { +      return true; +    } +    // This call will go through interceptors and would need to +    // schedule new batches, so delay completion queue shutdown +    call_.cq()->RegisterAvalanching(); +    return interceptor_methods_.RunInterceptors(); +  } +  // Returns true if no interceptors need to be run +  bool RunInterceptorsPostRecv() { +    // Call and OpSet had already been set on the set state. +    // SetReverse also clears previously set hook points +    interceptor_methods_.SetReverse(); +    this->Op1::SetFinishInterceptionHookPoint(&interceptor_methods_); +    this->Op2::SetFinishInterceptionHookPoint(&interceptor_methods_); +    this->Op3::SetFinishInterceptionHookPoint(&interceptor_methods_); +    this->Op4::SetFinishInterceptionHookPoint(&interceptor_methods_); +    this->Op5::SetFinishInterceptionHookPoint(&interceptor_methods_); +    this->Op6::SetFinishInterceptionHookPoint(&interceptor_methods_); +    return interceptor_methods_.RunInterceptors(); +  } + +  void* core_cq_tag_; +  void* return_tag_; +  Call call_; +  bool done_intercepting_ = false; +  InterceptorBatchMethodsImpl interceptor_methods_; +  bool saved_status_; +}; + +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CALL_OP_SET_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set_interface.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set_interface.h new file mode 100644 index 00000000000..3b74566a6d3 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/call_op_set_interface.h @@ -0,0 +1,59 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CALL_OP_SET_INTERFACE_H +#define GRPCPP_IMPL_CODEGEN_CALL_OP_SET_INTERFACE_H + +#include <grpcpp/impl/codegen/completion_queue_tag.h> + +namespace grpc { +namespace internal { + +class Call; + +/// An abstract collection of call ops, used to generate the +/// grpc_call_op structure to pass down to the lower layers, +/// and as it is-a CompletionQueueTag, also massages the final +/// completion into the correct form for consumption in the C++ +/// API. +class CallOpSetInterface : public CompletionQueueTag { + public: +  /// Fills in grpc_op, starting from ops[*nops] and moving +  /// upwards. +  virtual void FillOps(internal::Call* call) = 0; + +  /// Get the tag to be used at the core completion queue. Generally, the +  /// value of core_cq_tag will be "this". However, it can be overridden if we +  /// want core to process the tag differently (e.g., as a core callback) +  virtual void* core_cq_tag() = 0; + +  // This will be called while interceptors are run if the RPC is a hijacked +  // RPC. This should set hijacking state for each of the ops. +  virtual void SetHijackingState() = 0; + +  // Should be called after interceptors are done running +  virtual void ContinueFillOpsAfterInterception() = 0; + +  // Should be called after interceptors are done running on the finalize result +  // path +  virtual void ContinueFinalizeResultAfterInterception() = 0; +}; +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CALL_OP_SET_INTERFACE_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/callback_common.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/callback_common.h new file mode 100644 index 00000000000..3c3bfd7e762 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/callback_common.h @@ -0,0 +1,226 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#if defined(__GNUC__) +#pragma GCC system_header +#endif + +#ifndef GRPCPP_IMPL_CODEGEN_CALLBACK_COMMON_H +#define GRPCPP_IMPL_CODEGEN_CALLBACK_COMMON_H + +#include <functional> + +#include <grpc/impl/codegen/grpc_types.h> +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/channel_interface.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { +namespace internal { + +/// An exception-safe way of invoking a user-specified callback function +// TODO(vjpai): decide whether it is better for this to take a const lvalue +//              parameter or an rvalue parameter, or if it even matters +template <class Func, class... Args> +void CatchingCallback(Func&& func, Args&&... args) { +#if GRPC_ALLOW_EXCEPTIONS +  try { +    func(std::forward<Args>(args)...); +  } catch (...) { +    // nothing to return or change here, just don't crash the library +  } +#else   // GRPC_ALLOW_EXCEPTIONS +  func(std::forward<Args>(args)...); +#endif  // GRPC_ALLOW_EXCEPTIONS +} + +template <class Reactor, class Func, class... Args> +Reactor* CatchingReactorGetter(Func&& func, Args&&... args) { +#if GRPC_ALLOW_EXCEPTIONS +  try { +    return func(std::forward<Args>(args)...); +  } catch (...) { +    // fail the RPC, don't crash the library +    return nullptr; +  } +#else   // GRPC_ALLOW_EXCEPTIONS +  return func(std::forward<Args>(args)...); +#endif  // GRPC_ALLOW_EXCEPTIONS +} + +// The contract on these tags is that they are single-shot. They must be +// constructed and then fired at exactly one point. There is no expectation +// that they can be reused without reconstruction. + +class CallbackWithStatusTag +    : public grpc_experimental_completion_queue_functor { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(CallbackWithStatusTag)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  CallbackWithStatusTag(grpc_call* call, std::function<void(Status)> f, +                        CompletionQueueTag* ops) +      : call_(call), func_(std::move(f)), ops_(ops) { +    g_core_codegen_interface->grpc_call_ref(call); +    functor_run = &CallbackWithStatusTag::StaticRun; +    // A client-side callback should never be run inline since they will always +    // have work to do from the user application. So, set the parent's +    // inlineable field to false +    inlineable = false; +  } +  ~CallbackWithStatusTag() {} +  Status* status_ptr() { return &status_; } + +  // force_run can not be performed on a tag if operations using this tag +  // have been sent to PerformOpsOnCall. It is intended for error conditions +  // that are detected before the operations are internally processed. +  void force_run(Status s) { +    status_ = std::move(s); +    Run(true); +  } + + private: +  grpc_call* call_; +  std::function<void(Status)> func_; +  CompletionQueueTag* ops_; +  Status status_; + +  static void StaticRun(grpc_experimental_completion_queue_functor* cb, +                        int ok) { +    static_cast<CallbackWithStatusTag*>(cb)->Run(static_cast<bool>(ok)); +  } +  void Run(bool ok) { +    void* ignored = ops_; + +    if (!ops_->FinalizeResult(&ignored, &ok)) { +      // The tag was swallowed +      return; +    } +    GPR_CODEGEN_ASSERT(ignored == ops_); + +    // Last use of func_ or status_, so ok to move them out +    auto func = std::move(func_); +    auto status = std::move(status_); +    func_ = nullptr;     // reset to clear this out for sure +    status_ = Status();  // reset to clear this out for sure +    CatchingCallback(std::move(func), std::move(status)); +    g_core_codegen_interface->grpc_call_unref(call_); +  } +}; + +/// CallbackWithSuccessTag can be reused multiple times, and will be used in +/// this fashion for streaming operations. As a result, it shouldn't clear +/// anything up until its destructor +class CallbackWithSuccessTag +    : public grpc_experimental_completion_queue_functor { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(CallbackWithSuccessTag)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  CallbackWithSuccessTag() : call_(nullptr) {} + +  CallbackWithSuccessTag(const CallbackWithSuccessTag&) = delete; +  CallbackWithSuccessTag& operator=(const CallbackWithSuccessTag&) = delete; + +  ~CallbackWithSuccessTag() { Clear(); } + +  // Set can only be called on a default-constructed or Clear'ed tag. +  // It should never be called on a tag that was constructed with arguments +  // or on a tag that has been Set before unless the tag has been cleared. +  // can_inline indicates that this particular callback can be executed inline +  // (without needing a thread hop) and is only used for library-provided server +  // callbacks. +  void Set(grpc_call* call, std::function<void(bool)> f, +           CompletionQueueTag* ops, bool can_inline) { +    GPR_CODEGEN_ASSERT(call_ == nullptr); +    g_core_codegen_interface->grpc_call_ref(call); +    call_ = call; +    func_ = std::move(f); +    ops_ = ops; +    functor_run = &CallbackWithSuccessTag::StaticRun; +    inlineable = can_inline; +  } + +  void Clear() { +    if (call_ != nullptr) { +      grpc_call* call = call_; +      call_ = nullptr; +      func_ = nullptr; +      g_core_codegen_interface->grpc_call_unref(call); +    } +  } + +  CompletionQueueTag* ops() { return ops_; } + +  // force_run can not be performed on a tag if operations using this tag +  // have been sent to PerformOpsOnCall. It is intended for error conditions +  // that are detected before the operations are internally processed. +  void force_run(bool ok) { Run(ok); } + +  /// check if this tag is currently set +  operator bool() const { return call_ != nullptr; } + + private: +  grpc_call* call_; +  std::function<void(bool)> func_; +  CompletionQueueTag* ops_; + +  static void StaticRun(grpc_experimental_completion_queue_functor* cb, +                        int ok) { +    static_cast<CallbackWithSuccessTag*>(cb)->Run(static_cast<bool>(ok)); +  } +  void Run(bool ok) { +    void* ignored = ops_; +    // Allow a "false" return value from FinalizeResult to silence the +    // callback, just as it silences a CQ tag in the async cases +#ifndef NDEBUG +    auto* ops = ops_; +#endif +    bool do_callback = ops_->FinalizeResult(&ignored, &ok); +    GPR_CODEGEN_DEBUG_ASSERT(ignored == ops); + +    if (do_callback) { +      CatchingCallback(func_, ok); +    } +  } +}; + +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CALLBACK_COMMON_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h new file mode 100644 index 00000000000..ea0752d90ee --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/channel_interface.h @@ -0,0 +1,177 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#if defined(__GNUC__) +#pragma GCC system_header +#endif + +#ifndef GRPCPP_IMPL_CODEGEN_CHANNEL_INTERFACE_H +#define GRPCPP_IMPL_CODEGEN_CHANNEL_INTERFACE_H + +#include <grpc/impl/codegen/connectivity_state.h> +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/status.h> +#include <grpcpp/impl/codegen/time.h> + +namespace grpc { +template <class R> +class ClientReader; +template <class W> +class ClientWriter; +template <class W, class R> +class ClientReaderWriter; +namespace internal { +template <class InputMessage, class OutputMessage> +class CallbackUnaryCallImpl; +template <class R> +class ClientAsyncReaderFactory; +template <class W> +class ClientAsyncWriterFactory; +template <class W, class R> +class ClientAsyncReaderWriterFactory; +template <class R> +class ClientAsyncResponseReaderFactory; +template <class W, class R> +class ClientCallbackReaderWriterFactory; +template <class R> +class ClientCallbackReaderFactory; +template <class W> +class ClientCallbackWriterFactory; +class ClientCallbackUnaryFactory; +}  // namespace internal + +class ChannelInterface; +class ClientContext; +class CompletionQueue; + +namespace experimental { +class DelegatingChannel; +} + +namespace internal { +class Call; +class CallOpSetInterface; +class RpcMethod; +class InterceptedChannel; +template <class InputMessage, class OutputMessage> +class BlockingUnaryCallImpl; +}  // namespace internal + +/// Codegen interface for \a grpc::Channel. +class ChannelInterface { + public: +  virtual ~ChannelInterface() {} +  /// Get the current channel state. If the channel is in IDLE and +  /// \a try_to_connect is set to true, try to connect. +  virtual grpc_connectivity_state GetState(bool try_to_connect) = 0; + +  /// Return the \a tag on \a cq when the channel state is changed or \a +  /// deadline expires. \a GetState needs to called to get the current state. +  template <typename T> +  void NotifyOnStateChange(grpc_connectivity_state last_observed, T deadline, +                           ::grpc::CompletionQueue* cq, void* tag) { +    TimePoint<T> deadline_tp(deadline); +    NotifyOnStateChangeImpl(last_observed, deadline_tp.raw_time(), cq, tag); +  } + +  /// Blocking wait for channel state change or \a deadline expiration. +  /// \a GetState needs to called to get the current state. +  template <typename T> +  bool WaitForStateChange(grpc_connectivity_state last_observed, T deadline) { +    TimePoint<T> deadline_tp(deadline); +    return WaitForStateChangeImpl(last_observed, deadline_tp.raw_time()); +  } + +  /// Wait for this channel to be connected +  template <typename T> +  bool WaitForConnected(T deadline) { +    grpc_connectivity_state state; +    while ((state = GetState(true)) != GRPC_CHANNEL_READY) { +      if (!WaitForStateChange(state, deadline)) return false; +    } +    return true; +  } + + private: +  template <class R> +  friend class ::grpc::ClientReader; +  template <class W> +  friend class ::grpc::ClientWriter; +  template <class W, class R> +  friend class ::grpc::ClientReaderWriter; +  template <class R> +  friend class ::grpc::internal::ClientAsyncReaderFactory; +  template <class W> +  friend class ::grpc::internal::ClientAsyncWriterFactory; +  template <class W, class R> +  friend class ::grpc::internal::ClientAsyncReaderWriterFactory; +  template <class R> +  friend class ::grpc::internal::ClientAsyncResponseReaderFactory; +  template <class W, class R> +  friend class ::grpc::internal::ClientCallbackReaderWriterFactory; +  template <class R> +  friend class ::grpc::internal::ClientCallbackReaderFactory; +  template <class W> +  friend class ::grpc::internal::ClientCallbackWriterFactory; +  friend class ::grpc::internal::ClientCallbackUnaryFactory; +  template <class InputMessage, class OutputMessage> +  friend class ::grpc::internal::BlockingUnaryCallImpl; +  template <class InputMessage, class OutputMessage> +  friend class ::grpc::internal::CallbackUnaryCallImpl; +  friend class ::grpc::internal::RpcMethod; +  friend class ::grpc::experimental::DelegatingChannel; +  friend class ::grpc::internal::InterceptedChannel; +  virtual internal::Call CreateCall(const internal::RpcMethod& method, +                                    ::grpc::ClientContext* context, +                                    ::grpc::CompletionQueue* cq) = 0; +  virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops, +                                internal::Call* call) = 0; +  virtual void* RegisterMethod(const char* method) = 0; +  virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed, +                                       gpr_timespec deadline, +                                       ::grpc::CompletionQueue* cq, +                                       void* tag) = 0; +  virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed, +                                      gpr_timespec deadline) = 0; + +  // EXPERIMENTAL +  // This is needed to keep codegen_test_minimal happy. InterceptedChannel needs +  // to make use of this but can't directly call Channel's implementation +  // because of the test. +  // Returns an empty Call object (rather than being pure) since this is a new +  // method and adding a new pure method to an interface would be a breaking +  // change (even though this is private and non-API) +  virtual internal::Call CreateCallInternal( +      const internal::RpcMethod& /*method*/, ::grpc::ClientContext* /*context*/, +      ::grpc::CompletionQueue* /*cq*/, size_t /*interceptor_pos*/) { +    return internal::Call(); +  } + +  // EXPERIMENTAL +  // A method to get the callbackable completion queue associated with this +  // channel. If the return value is nullptr, this channel doesn't support +  // callback operations. +  // TODO(vjpai): Consider a better default like using a global CQ +  // Returns nullptr (rather than being pure) since this is a post-1.0 method +  // and adding a new pure method to an interface would be a breaking change +  // (even though this is private and non-API) +  virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; } +}; +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CHANNEL_INTERFACE_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h new file mode 100644 index 00000000000..90c817ceaa7 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_callback.h @@ -0,0 +1,1219 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H +#define GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H +#include <atomic> +#include <functional> + +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/call_op_set.h> +#include <grpcpp/impl/codegen/callback_common.h> +#include <grpcpp/impl/codegen/channel_interface.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { +class Channel; +class ClientContext; + +namespace internal { +class RpcMethod; + +/// Perform a callback-based unary call +/// TODO(vjpai): Combine as much as possible with the blocking unary call code +template <class InputMessage, class OutputMessage> +void CallbackUnaryCall(::grpc::ChannelInterface* channel, +                       const ::grpc::internal::RpcMethod& method, +                       ::grpc::ClientContext* context, +                       const InputMessage* request, OutputMessage* result, +                       std::function<void(::grpc::Status)> on_completion) { +  CallbackUnaryCallImpl<InputMessage, OutputMessage> x( +      channel, method, context, request, result, on_completion); +} + +template <class InputMessage, class OutputMessage> +class CallbackUnaryCallImpl { + public: +  CallbackUnaryCallImpl(::grpc::ChannelInterface* channel, +                        const ::grpc::internal::RpcMethod& method, +                        ::grpc::ClientContext* context, +                        const InputMessage* request, OutputMessage* result, +                        std::function<void(::grpc::Status)> on_completion) { +    ::grpc::CompletionQueue* cq = channel->CallbackCQ(); +    GPR_CODEGEN_ASSERT(cq != nullptr); +    grpc::internal::Call call(channel->CreateCall(method, context, cq)); + +    using FullCallOpSet = grpc::internal::CallOpSet< +        ::grpc::internal::CallOpSendInitialMetadata, +        grpc::internal::CallOpSendMessage, +        grpc::internal::CallOpRecvInitialMetadata, +        grpc::internal::CallOpRecvMessage<OutputMessage>, +        grpc::internal::CallOpClientSendClose, +        grpc::internal::CallOpClientRecvStatus>; + +    struct OpSetAndTag { +      FullCallOpSet opset; +      grpc::internal::CallbackWithStatusTag tag; +    }; +    const size_t alloc_sz = sizeof(OpSetAndTag); +    auto* const alloced = static_cast<OpSetAndTag*>( +        ::grpc::g_core_codegen_interface->grpc_call_arena_alloc(call.call(), +                                                                alloc_sz)); +    auto* ops = new (&alloced->opset) FullCallOpSet; +    auto* tag = new (&alloced->tag) +        grpc::internal::CallbackWithStatusTag(call.call(), on_completion, ops); + +    // TODO(vjpai): Unify code with sync API as much as possible +    ::grpc::Status s = ops->SendMessagePtr(request); +    if (!s.ok()) { +      tag->force_run(s); +      return; +    } +    ops->SendInitialMetadata(&context->send_initial_metadata_, +                             context->initial_metadata_flags()); +    ops->RecvInitialMetadata(context); +    ops->RecvMessage(result); +    ops->AllowNoMessage(); +    ops->ClientSendClose(); +    ops->ClientRecvStatus(context, tag->status_ptr()); +    ops->set_core_cq_tag(tag); +    call.PerformOps(ops); +  } +}; + +// Base class for public API classes. +class ClientReactor { + public: +  /// Called by the library when all operations associated with this RPC have +  /// completed and all Holds have been removed. OnDone provides the RPC status +  /// outcome for both successful and failed RPCs. If it is never called on an +  /// RPC, it indicates an application-level problem (like failure to remove a +  /// hold). +  /// +  /// \param[in] s The status outcome of this RPC +  virtual void OnDone(const ::grpc::Status& /*s*/) = 0; + +  /// InternalScheduleOnDone is not part of the API and is not meant to be +  /// overridden. It is virtual to allow successful builds for certain bazel +  /// build users that only want to depend on gRPC codegen headers and not the +  /// full library (although this is not a generally-supported option). Although +  /// the virtual call is slower than a direct call, this function is +  /// heavyweight and the cost of the virtual call is not much in comparison. +  /// This function may be removed or devirtualized in the future. +  virtual void InternalScheduleOnDone(::grpc::Status s); +}; + +}  // namespace internal + +// Forward declarations +template <class Request, class Response> +class ClientBidiReactor; +template <class Response> +class ClientReadReactor; +template <class Request> +class ClientWriteReactor; +class ClientUnaryReactor; + +// NOTE: The streaming objects are not actually implemented in the public API. +//       These interfaces are provided for mocking only. Typical applications +//       will interact exclusively with the reactors that they define. +template <class Request, class Response> +class ClientCallbackReaderWriter { + public: +  virtual ~ClientCallbackReaderWriter() {} +  virtual void StartCall() = 0; +  virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0; +  virtual void WritesDone() = 0; +  virtual void Read(Response* resp) = 0; +  virtual void AddHold(int holds) = 0; +  virtual void RemoveHold() = 0; + + protected: +  void BindReactor(ClientBidiReactor<Request, Response>* reactor) { +    reactor->BindStream(this); +  } +}; + +template <class Response> +class ClientCallbackReader { + public: +  virtual ~ClientCallbackReader() {} +  virtual void StartCall() = 0; +  virtual void Read(Response* resp) = 0; +  virtual void AddHold(int holds) = 0; +  virtual void RemoveHold() = 0; + + protected: +  void BindReactor(ClientReadReactor<Response>* reactor) { +    reactor->BindReader(this); +  } +}; + +template <class Request> +class ClientCallbackWriter { + public: +  virtual ~ClientCallbackWriter() {} +  virtual void StartCall() = 0; +  void Write(const Request* req) { Write(req, ::grpc::WriteOptions()); } +  virtual void Write(const Request* req, ::grpc::WriteOptions options) = 0; +  void WriteLast(const Request* req, ::grpc::WriteOptions options) { +    Write(req, options.set_last_message()); +  } +  virtual void WritesDone() = 0; + +  virtual void AddHold(int holds) = 0; +  virtual void RemoveHold() = 0; + + protected: +  void BindReactor(ClientWriteReactor<Request>* reactor) { +    reactor->BindWriter(this); +  } +}; + +class ClientCallbackUnary { + public: +  virtual ~ClientCallbackUnary() {} +  virtual void StartCall() = 0; + + protected: +  void BindReactor(ClientUnaryReactor* reactor); +}; + +// The following classes are the reactor interfaces that are to be implemented +// by the user. They are passed in to the library as an argument to a call on a +// stub (either a codegen-ed call or a generic call). The streaming RPC is +// activated by calling StartCall, possibly after initiating StartRead, +// StartWrite, or AddHold operations on the streaming object. Note that none of +// the classes are pure; all reactions have a default empty reaction so that the +// user class only needs to override those classes that it cares about. +// The reactor must be passed to the stub invocation before any of the below +// operations can be called. + +/// \a ClientBidiReactor is the interface for a bidirectional streaming RPC. +template <class Request, class Response> +class ClientBidiReactor : public internal::ClientReactor { + public: +  virtual ~ClientBidiReactor() {} + +  /// Activate the RPC and initiate any reads or writes that have been Start'ed +  /// before this call. All streaming RPCs issued by the client MUST have +  /// StartCall invoked on them (even if they are canceled) as this call is the +  /// activation of their lifecycle. +  void StartCall() { stream_->StartCall(); } + +  /// Initiate a read operation (or post it for later initiation if StartCall +  /// has not yet been invoked). +  /// +  /// \param[out] resp Where to eventually store the read message. Valid when +  ///                  the library calls OnReadDone +  void StartRead(Response* resp) { stream_->Read(resp); } + +  /// Initiate a write operation (or post it for later initiation if StartCall +  /// has not yet been invoked). +  /// +  /// \param[in] req The message to be written. The library does not take +  ///                ownership but the caller must ensure that the message is +  ///                not deleted or modified until OnWriteDone is called. +  void StartWrite(const Request* req) { +    StartWrite(req, ::grpc::WriteOptions()); +  } + +  /// Initiate/post a write operation with specified options. +  /// +  /// \param[in] req The message to be written. The library does not take +  ///                ownership but the caller must ensure that the message is +  ///                not deleted or modified until OnWriteDone is called. +  /// \param[in] options The WriteOptions to use for writing this message +  void StartWrite(const Request* req, ::grpc::WriteOptions options) { +    stream_->Write(req, std::move(options)); +  } + +  /// Initiate/post a write operation with specified options and an indication +  /// that this is the last write (like StartWrite and StartWritesDone, merged). +  /// Note that calling this means that no more calls to StartWrite, +  /// StartWriteLast, or StartWritesDone are allowed. +  /// +  /// \param[in] req The message to be written. The library does not take +  ///                ownership but the caller must ensure that the message is +  ///                not deleted or modified until OnWriteDone is called. +  /// \param[in] options The WriteOptions to use for writing this message +  void StartWriteLast(const Request* req, ::grpc::WriteOptions options) { +    StartWrite(req, std::move(options.set_last_message())); +  } + +  /// Indicate that the RPC will have no more write operations. This can only be +  /// issued once for a given RPC. This is not required or allowed if +  /// StartWriteLast is used since that already has the same implication. +  /// Note that calling this means that no more calls to StartWrite, +  /// StartWriteLast, or StartWritesDone are allowed. +  void StartWritesDone() { stream_->WritesDone(); } + +  /// Holds are needed if (and only if) this stream has operations that take +  /// place on it after StartCall but from outside one of the reactions +  /// (OnReadDone, etc). This is _not_ a common use of the streaming API. +  /// +  /// Holds must be added before calling StartCall. If a stream still has a hold +  /// in place, its resources will not be destroyed even if the status has +  /// already come in from the wire and there are currently no active callbacks +  /// outstanding. Similarly, the stream will not call OnDone if there are still +  /// holds on it. +  /// +  /// For example, if a StartRead or StartWrite operation is going to be +  /// initiated from elsewhere in the application, the application should call +  /// AddHold or AddMultipleHolds before StartCall.  If there is going to be, +  /// for example, a read-flow and a write-flow taking place outside the +  /// reactions, then call AddMultipleHolds(2) before StartCall. When the +  /// application knows that it won't issue any more read operations (such as +  /// when a read comes back as not ok), it should issue a RemoveHold(). It +  /// should also call RemoveHold() again after it does StartWriteLast or +  /// StartWritesDone that indicates that there will be no more write ops. +  /// The number of RemoveHold calls must match the total number of AddHold +  /// calls plus the number of holds added by AddMultipleHolds. +  /// The argument to AddMultipleHolds must be positive. +  void AddHold() { AddMultipleHolds(1); } +  void AddMultipleHolds(int holds) { +    GPR_CODEGEN_DEBUG_ASSERT(holds > 0); +    stream_->AddHold(holds); +  } +  void RemoveHold() { stream_->RemoveHold(); } + +  /// Notifies the application that all operations associated with this RPC +  /// have completed and all Holds have been removed. OnDone provides the RPC +  /// status outcome for both successful and failed RPCs and will be called in +  /// all cases. If it is not called, it indicates an application-level problem +  /// (like failure to remove a hold). +  /// +  /// \param[in] s The status outcome of this RPC +  void OnDone(const ::grpc::Status& /*s*/) override {} + +  /// Notifies the application that a read of initial metadata from the +  /// server is done. If the application chooses not to implement this method, +  /// it can assume that the initial metadata has been read before the first +  /// call of OnReadDone or OnDone. +  /// +  /// \param[in] ok Was the initial metadata read successfully? If false, no +  ///               new read/write operation will succeed, and any further +  ///               Start* operations should not be called. +  virtual void OnReadInitialMetadataDone(bool /*ok*/) {} + +  /// Notifies the application that a StartRead operation completed. +  /// +  /// \param[in] ok Was it successful? If false, no new read/write operation +  ///               will succeed, and any further Start* should not be called. +  virtual void OnReadDone(bool /*ok*/) {} + +  /// Notifies the application that a StartWrite or StartWriteLast operation +  /// completed. +  /// +  /// \param[in] ok Was it successful? If false, no new read/write operation +  ///               will succeed, and any further Start* should not be called. +  virtual void OnWriteDone(bool /*ok*/) {} + +  /// Notifies the application that a StartWritesDone operation completed. Note +  /// that this is only used on explicit StartWritesDone operations and not for +  /// those that are implicitly invoked as part of a StartWriteLast. +  /// +  /// \param[in] ok Was it successful? If false, the application will later see +  ///               the failure reflected as a bad status in OnDone and no +  ///               further Start* should be called. +  virtual void OnWritesDoneDone(bool /*ok*/) {} + + private: +  friend class ClientCallbackReaderWriter<Request, Response>; +  void BindStream(ClientCallbackReaderWriter<Request, Response>* stream) { +    stream_ = stream; +  } +  ClientCallbackReaderWriter<Request, Response>* stream_; +}; + +/// \a ClientReadReactor is the interface for a server-streaming RPC. +/// All public methods behave as in ClientBidiReactor. +template <class Response> +class ClientReadReactor : public internal::ClientReactor { + public: +  virtual ~ClientReadReactor() {} + +  void StartCall() { reader_->StartCall(); } +  void StartRead(Response* resp) { reader_->Read(resp); } + +  void AddHold() { AddMultipleHolds(1); } +  void AddMultipleHolds(int holds) { +    GPR_CODEGEN_DEBUG_ASSERT(holds > 0); +    reader_->AddHold(holds); +  } +  void RemoveHold() { reader_->RemoveHold(); } + +  void OnDone(const ::grpc::Status& /*s*/) override {} +  virtual void OnReadInitialMetadataDone(bool /*ok*/) {} +  virtual void OnReadDone(bool /*ok*/) {} + + private: +  friend class ClientCallbackReader<Response>; +  void BindReader(ClientCallbackReader<Response>* reader) { reader_ = reader; } +  ClientCallbackReader<Response>* reader_; +}; + +/// \a ClientWriteReactor is the interface for a client-streaming RPC. +/// All public methods behave as in ClientBidiReactor. +template <class Request> +class ClientWriteReactor : public internal::ClientReactor { + public: +  virtual ~ClientWriteReactor() {} + +  void StartCall() { writer_->StartCall(); } +  void StartWrite(const Request* req) { +    StartWrite(req, ::grpc::WriteOptions()); +  } +  void StartWrite(const Request* req, ::grpc::WriteOptions options) { +    writer_->Write(req, std::move(options)); +  } +  void StartWriteLast(const Request* req, ::grpc::WriteOptions options) { +    StartWrite(req, std::move(options.set_last_message())); +  } +  void StartWritesDone() { writer_->WritesDone(); } + +  void AddHold() { AddMultipleHolds(1); } +  void AddMultipleHolds(int holds) { +    GPR_CODEGEN_DEBUG_ASSERT(holds > 0); +    writer_->AddHold(holds); +  } +  void RemoveHold() { writer_->RemoveHold(); } + +  void OnDone(const ::grpc::Status& /*s*/) override {} +  virtual void OnReadInitialMetadataDone(bool /*ok*/) {} +  virtual void OnWriteDone(bool /*ok*/) {} +  virtual void OnWritesDoneDone(bool /*ok*/) {} + + private: +  friend class ClientCallbackWriter<Request>; +  void BindWriter(ClientCallbackWriter<Request>* writer) { writer_ = writer; } + +  ClientCallbackWriter<Request>* writer_; +}; + +/// \a ClientUnaryReactor is a reactor-style interface for a unary RPC. +/// This is _not_ a common way of invoking a unary RPC. In practice, this +/// option should be used only if the unary RPC wants to receive initial +/// metadata without waiting for the response to complete. Most deployments of +/// RPC systems do not use this option, but it is needed for generality. +/// All public methods behave as in ClientBidiReactor. +/// StartCall is included for consistency with the other reactor flavors: even +/// though there are no StartRead or StartWrite operations to queue before the +/// call (that is part of the unary call itself) and there is no reactor object +/// being created as a result of this call, we keep a consistent 2-phase +/// initiation API among all the reactor flavors. +class ClientUnaryReactor : public internal::ClientReactor { + public: +  virtual ~ClientUnaryReactor() {} + +  void StartCall() { call_->StartCall(); } +  void OnDone(const ::grpc::Status& /*s*/) override {} +  virtual void OnReadInitialMetadataDone(bool /*ok*/) {} + + private: +  friend class ClientCallbackUnary; +  void BindCall(ClientCallbackUnary* call) { call_ = call; } +  ClientCallbackUnary* call_; +}; + +// Define function out-of-line from class to avoid forward declaration issue +inline void ClientCallbackUnary::BindReactor(ClientUnaryReactor* reactor) { +  reactor->BindCall(this); +} + +namespace internal { + +// Forward declare factory classes for friendship +template <class Request, class Response> +class ClientCallbackReaderWriterFactory; +template <class Response> +class ClientCallbackReaderFactory; +template <class Request> +class ClientCallbackWriterFactory; + +template <class Request, class Response> +class ClientCallbackReaderWriterImpl +    : public ClientCallbackReaderWriter<Request, Response> { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderWriterImpl)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  void StartCall() override { +    // This call initiates two batches, plus any backlog, each with a callback +    // 1. Send initial metadata (unless corked) + recv initial metadata +    // 2. Any read backlog +    // 3. Any write backlog +    // 4. Recv trailing metadata (unless corked) +    if (!start_corked_) { +      start_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                     context_->initial_metadata_flags()); +    } + +    call_.PerformOps(&start_ops_); + +    { +      grpc::internal::MutexLock lock(&start_mu_); + +      if (backlog_.read_ops) { +        call_.PerformOps(&read_ops_); +      } +      if (backlog_.write_ops) { +        call_.PerformOps(&write_ops_); +      } +      if (backlog_.writes_done_ops) { +        call_.PerformOps(&writes_done_ops_); +      } +      call_.PerformOps(&finish_ops_); +      // The last thing in this critical section is to set started_ so that it +      // can be used lock-free as well. +      started_.store(true, std::memory_order_release); +    } +    // MaybeFinish outside the lock to make sure that destruction of this object +    // doesn't take place while holding the lock (which would cause the lock to +    // be released after destruction) +    this->MaybeFinish(/*from_reaction=*/false); +  } + +  void Read(Response* msg) override { +    read_ops_.RecvMessage(msg); +    callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); +    if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) { +      grpc::internal::MutexLock lock(&start_mu_); +      if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) { +        backlog_.read_ops = true; +        return; +      } +    } +    call_.PerformOps(&read_ops_); +  } + +  void Write(const Request* msg, ::grpc::WriteOptions options) override { +    if (options.is_last_message()) { +      options.set_buffer_hint(); +      write_ops_.ClientSendClose(); +    } +    // TODO(vjpai): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok()); +    callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); +    if (GPR_UNLIKELY(corked_write_needed_)) { +      write_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                     context_->initial_metadata_flags()); +      corked_write_needed_ = false; +    } + +    if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) { +      grpc::internal::MutexLock lock(&start_mu_); +      if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) { +        backlog_.write_ops = true; +        return; +      } +    } +    call_.PerformOps(&write_ops_); +  } +  void WritesDone() override { +    writes_done_ops_.ClientSendClose(); +    writes_done_tag_.Set(call_.call(), +                         [this](bool ok) { +                           reactor_->OnWritesDoneDone(ok); +                           MaybeFinish(/*from_reaction=*/true); +                         }, +                         &writes_done_ops_, /*can_inline=*/false); +    writes_done_ops_.set_core_cq_tag(&writes_done_tag_); +    callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); +    if (GPR_UNLIKELY(corked_write_needed_)) { +      writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                           context_->initial_metadata_flags()); +      corked_write_needed_ = false; +    } +    if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) { +      grpc::internal::MutexLock lock(&start_mu_); +      if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) { +        backlog_.writes_done_ops = true; +        return; +      } +    } +    call_.PerformOps(&writes_done_ops_); +  } + +  void AddHold(int holds) override { +    callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed); +  } +  void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); } + + private: +  friend class ClientCallbackReaderWriterFactory<Request, Response>; + +  ClientCallbackReaderWriterImpl(grpc::internal::Call call, +                                 ::grpc::ClientContext* context, +                                 ClientBidiReactor<Request, Response>* reactor) +      : context_(context), +        call_(call), +        reactor_(reactor), +        start_corked_(context_->initial_metadata_corked_), +        corked_write_needed_(start_corked_) { +    this->BindReactor(reactor); + +    // Set up the unchanging parts of the start, read, and write tags and ops. +    start_tag_.Set(call_.call(), +                   [this](bool ok) { +                     reactor_->OnReadInitialMetadataDone(ok); +                     MaybeFinish(/*from_reaction=*/true); +                   }, +                   &start_ops_, /*can_inline=*/false); +    start_ops_.RecvInitialMetadata(context_); +    start_ops_.set_core_cq_tag(&start_tag_); + +    write_tag_.Set(call_.call(), +                   [this](bool ok) { +                     reactor_->OnWriteDone(ok); +                     MaybeFinish(/*from_reaction=*/true); +                   }, +                   &write_ops_, /*can_inline=*/false); +    write_ops_.set_core_cq_tag(&write_tag_); + +    read_tag_.Set(call_.call(), +                  [this](bool ok) { +                    reactor_->OnReadDone(ok); +                    MaybeFinish(/*from_reaction=*/true); +                  }, +                  &read_ops_, /*can_inline=*/false); +    read_ops_.set_core_cq_tag(&read_tag_); + +    // Also set up the Finish tag and op set. +    finish_tag_.Set( +        call_.call(), +        [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); }, +        &finish_ops_, +        /*can_inline=*/false); +    finish_ops_.ClientRecvStatus(context_, &finish_status_); +    finish_ops_.set_core_cq_tag(&finish_tag_); +  } + +  // MaybeFinish can be called from reactions or from user-initiated operations +  // like StartCall or RemoveHold. If this is the last operation or hold on this +  // object, it will invoke the OnDone reaction. If MaybeFinish was called from +  // a reaction, it can call OnDone directly. If not, it would need to schedule +  // OnDone onto an executor thread to avoid the possibility of deadlocking with +  // any locks in the user code that invoked it. +  void MaybeFinish(bool from_reaction) { +    if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub( +                         1, std::memory_order_acq_rel) == 1)) { +      ::grpc::Status s = std::move(finish_status_); +      auto* reactor = reactor_; +      auto* call = call_.call(); +      this->~ClientCallbackReaderWriterImpl(); +      ::grpc::g_core_codegen_interface->grpc_call_unref(call); +      if (GPR_LIKELY(from_reaction)) { +        reactor->OnDone(s); +      } else { +        reactor->InternalScheduleOnDone(std::move(s)); +      } +    } +  } + +  ::grpc::ClientContext* const context_; +  grpc::internal::Call call_; +  ClientBidiReactor<Request, Response>* const reactor_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, +                            grpc::internal::CallOpRecvInitialMetadata> +      start_ops_; +  grpc::internal::CallbackWithSuccessTag start_tag_; +  const bool start_corked_; +  bool corked_write_needed_;  // no lock needed since only accessed in +                              // Write/WritesDone which cannot be concurrent + +  grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_; +  grpc::internal::CallbackWithSuccessTag finish_tag_; +  ::grpc::Status finish_status_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, +                            grpc::internal::CallOpSendMessage, +                            grpc::internal::CallOpClientSendClose> +      write_ops_; +  grpc::internal::CallbackWithSuccessTag write_tag_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, +                            grpc::internal::CallOpClientSendClose> +      writes_done_ops_; +  grpc::internal::CallbackWithSuccessTag writes_done_tag_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>> +      read_ops_; +  grpc::internal::CallbackWithSuccessTag read_tag_; + +  struct StartCallBacklog { +    bool write_ops = false; +    bool writes_done_ops = false; +    bool read_ops = false; +  }; +  StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */; + +  // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish +  std::atomic<intptr_t> callbacks_outstanding_{3}; +  std::atomic_bool started_{false}; +  grpc::internal::Mutex start_mu_; +}; + +template <class Request, class Response> +class ClientCallbackReaderWriterFactory { + public: +  static void Create(::grpc::ChannelInterface* channel, +                     const ::grpc::internal::RpcMethod& method, +                     ::grpc::ClientContext* context, +                     ClientBidiReactor<Request, Response>* reactor) { +    grpc::internal::Call call = +        channel->CreateCall(method, context, channel->CallbackCQ()); + +    ::grpc::g_core_codegen_interface->grpc_call_ref(call.call()); +    new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        call.call(), sizeof(ClientCallbackReaderWriterImpl<Request, Response>))) +        ClientCallbackReaderWriterImpl<Request, Response>(call, context, +                                                          reactor); +  } +}; + +template <class Response> +class ClientCallbackReaderImpl : public ClientCallbackReader<Response> { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackReaderImpl)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  void StartCall() override { +    // This call initiates two batches, plus any backlog, each with a callback +    // 1. Send initial metadata (unless corked) + recv initial metadata +    // 2. Any backlog +    // 3. Recv trailing metadata + +    start_tag_.Set(call_.call(), +                   [this](bool ok) { +                     reactor_->OnReadInitialMetadataDone(ok); +                     MaybeFinish(/*from_reaction=*/true); +                   }, +                   &start_ops_, /*can_inline=*/false); +    start_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                   context_->initial_metadata_flags()); +    start_ops_.RecvInitialMetadata(context_); +    start_ops_.set_core_cq_tag(&start_tag_); +    call_.PerformOps(&start_ops_); + +    // Also set up the read tag so it doesn't have to be set up each time +    read_tag_.Set(call_.call(), +                  [this](bool ok) { +                    reactor_->OnReadDone(ok); +                    MaybeFinish(/*from_reaction=*/true); +                  }, +                  &read_ops_, /*can_inline=*/false); +    read_ops_.set_core_cq_tag(&read_tag_); + +    { +      grpc::internal::MutexLock lock(&start_mu_); +      if (backlog_.read_ops) { +        call_.PerformOps(&read_ops_); +      } +      started_.store(true, std::memory_order_release); +    } + +    finish_tag_.Set( +        call_.call(), +        [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); }, +        &finish_ops_, /*can_inline=*/false); +    finish_ops_.ClientRecvStatus(context_, &finish_status_); +    finish_ops_.set_core_cq_tag(&finish_tag_); +    call_.PerformOps(&finish_ops_); +  } + +  void Read(Response* msg) override { +    read_ops_.RecvMessage(msg); +    callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); +    if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) { +      grpc::internal::MutexLock lock(&start_mu_); +      if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) { +        backlog_.read_ops = true; +        return; +      } +    } +    call_.PerformOps(&read_ops_); +  } + +  void AddHold(int holds) override { +    callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed); +  } +  void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); } + + private: +  friend class ClientCallbackReaderFactory<Response>; + +  template <class Request> +  ClientCallbackReaderImpl(::grpc::internal::Call call, +                           ::grpc::ClientContext* context, Request* request, +                           ClientReadReactor<Response>* reactor) +      : context_(context), call_(call), reactor_(reactor) { +    this->BindReactor(reactor); +    // TODO(vjpai): don't assert +    GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok()); +    start_ops_.ClientSendClose(); +  } + +  // MaybeFinish behaves as in ClientCallbackReaderWriterImpl. +  void MaybeFinish(bool from_reaction) { +    if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub( +                         1, std::memory_order_acq_rel) == 1)) { +      ::grpc::Status s = std::move(finish_status_); +      auto* reactor = reactor_; +      auto* call = call_.call(); +      this->~ClientCallbackReaderImpl(); +      ::grpc::g_core_codegen_interface->grpc_call_unref(call); +      if (GPR_LIKELY(from_reaction)) { +        reactor->OnDone(s); +      } else { +        reactor->InternalScheduleOnDone(std::move(s)); +      } +    } +  } + +  ::grpc::ClientContext* const context_; +  grpc::internal::Call call_; +  ClientReadReactor<Response>* const reactor_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, +                            grpc::internal::CallOpSendMessage, +                            grpc::internal::CallOpClientSendClose, +                            grpc::internal::CallOpRecvInitialMetadata> +      start_ops_; +  grpc::internal::CallbackWithSuccessTag start_tag_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpClientRecvStatus> finish_ops_; +  grpc::internal::CallbackWithSuccessTag finish_tag_; +  ::grpc::Status finish_status_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<Response>> +      read_ops_; +  grpc::internal::CallbackWithSuccessTag read_tag_; + +  struct StartCallBacklog { +    bool read_ops = false; +  }; +  StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */; + +  // Minimum of 2 callbacks to pre-register for start and finish +  std::atomic<intptr_t> callbacks_outstanding_{2}; +  std::atomic_bool started_{false}; +  grpc::internal::Mutex start_mu_; +}; + +template <class Response> +class ClientCallbackReaderFactory { + public: +  template <class Request> +  static void Create(::grpc::ChannelInterface* channel, +                     const ::grpc::internal::RpcMethod& method, +                     ::grpc::ClientContext* context, const Request* request, +                     ClientReadReactor<Response>* reactor) { +    grpc::internal::Call call = +        channel->CreateCall(method, context, channel->CallbackCQ()); + +    ::grpc::g_core_codegen_interface->grpc_call_ref(call.call()); +    new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        call.call(), sizeof(ClientCallbackReaderImpl<Response>))) +        ClientCallbackReaderImpl<Response>(call, context, request, reactor); +  } +}; + +template <class Request> +class ClientCallbackWriterImpl : public ClientCallbackWriter<Request> { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackWriterImpl)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  void StartCall() override { +    // This call initiates two batches, plus any backlog, each with a callback +    // 1. Send initial metadata (unless corked) + recv initial metadata +    // 2. Any backlog +    // 3. Recv trailing metadata + +    if (!start_corked_) { +      start_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                     context_->initial_metadata_flags()); +    } +    call_.PerformOps(&start_ops_); + +    { +      grpc::internal::MutexLock lock(&start_mu_); + +      if (backlog_.write_ops) { +        call_.PerformOps(&write_ops_); +      } +      if (backlog_.writes_done_ops) { +        call_.PerformOps(&writes_done_ops_); +      } +      call_.PerformOps(&finish_ops_); +      // The last thing in this critical section is to set started_ so that it +      // can be used lock-free as well. +      started_.store(true, std::memory_order_release); +    } +    // MaybeFinish outside the lock to make sure that destruction of this object +    // doesn't take place while holding the lock (which would cause the lock to +    // be released after destruction) +    this->MaybeFinish(/*from_reaction=*/false); +  } + +  void Write(const Request* msg, ::grpc::WriteOptions options) override { +    if (GPR_UNLIKELY(options.is_last_message())) { +      options.set_buffer_hint(); +      write_ops_.ClientSendClose(); +    } +    // TODO(vjpai): don't assert +    GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(msg, options).ok()); +    callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); + +    if (GPR_UNLIKELY(corked_write_needed_)) { +      write_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                     context_->initial_metadata_flags()); +      corked_write_needed_ = false; +    } + +    if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) { +      grpc::internal::MutexLock lock(&start_mu_); +      if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) { +        backlog_.write_ops = true; +        return; +      } +    } +    call_.PerformOps(&write_ops_); +  } + +  void WritesDone() override { +    writes_done_ops_.ClientSendClose(); +    writes_done_tag_.Set(call_.call(), +                         [this](bool ok) { +                           reactor_->OnWritesDoneDone(ok); +                           MaybeFinish(/*from_reaction=*/true); +                         }, +                         &writes_done_ops_, /*can_inline=*/false); +    writes_done_ops_.set_core_cq_tag(&writes_done_tag_); +    callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); + +    if (GPR_UNLIKELY(corked_write_needed_)) { +      writes_done_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                           context_->initial_metadata_flags()); +      corked_write_needed_ = false; +    } + +    if (GPR_UNLIKELY(!started_.load(std::memory_order_acquire))) { +      grpc::internal::MutexLock lock(&start_mu_); +      if (GPR_LIKELY(!started_.load(std::memory_order_relaxed))) { +        backlog_.writes_done_ops = true; +        return; +      } +    } +    call_.PerformOps(&writes_done_ops_); +  } + +  void AddHold(int holds) override { +    callbacks_outstanding_.fetch_add(holds, std::memory_order_relaxed); +  } +  void RemoveHold() override { MaybeFinish(/*from_reaction=*/false); } + + private: +  friend class ClientCallbackWriterFactory<Request>; + +  template <class Response> +  ClientCallbackWriterImpl(::grpc::internal::Call call, +                           ::grpc::ClientContext* context, Response* response, +                           ClientWriteReactor<Request>* reactor) +      : context_(context), +        call_(call), +        reactor_(reactor), +        start_corked_(context_->initial_metadata_corked_), +        corked_write_needed_(start_corked_) { +    this->BindReactor(reactor); + +    // Set up the unchanging parts of the start and write tags and ops. +    start_tag_.Set(call_.call(), +                   [this](bool ok) { +                     reactor_->OnReadInitialMetadataDone(ok); +                     MaybeFinish(/*from_reaction=*/true); +                   }, +                   &start_ops_, /*can_inline=*/false); +    start_ops_.RecvInitialMetadata(context_); +    start_ops_.set_core_cq_tag(&start_tag_); + +    write_tag_.Set(call_.call(), +                   [this](bool ok) { +                     reactor_->OnWriteDone(ok); +                     MaybeFinish(/*from_reaction=*/true); +                   }, +                   &write_ops_, /*can_inline=*/false); +    write_ops_.set_core_cq_tag(&write_tag_); + +    // Also set up the Finish tag and op set. +    finish_ops_.RecvMessage(response); +    finish_ops_.AllowNoMessage(); +    finish_tag_.Set( +        call_.call(), +        [this](bool /*ok*/) { MaybeFinish(/*from_reaction=*/true); }, +        &finish_ops_, +        /*can_inline=*/false); +    finish_ops_.ClientRecvStatus(context_, &finish_status_); +    finish_ops_.set_core_cq_tag(&finish_tag_); +  } + +  // MaybeFinish behaves as in ClientCallbackReaderWriterImpl. +  void MaybeFinish(bool from_reaction) { +    if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub( +                         1, std::memory_order_acq_rel) == 1)) { +      ::grpc::Status s = std::move(finish_status_); +      auto* reactor = reactor_; +      auto* call = call_.call(); +      this->~ClientCallbackWriterImpl(); +      ::grpc::g_core_codegen_interface->grpc_call_unref(call); +      if (GPR_LIKELY(from_reaction)) { +        reactor->OnDone(s); +      } else { +        reactor->InternalScheduleOnDone(std::move(s)); +      } +    } +  } + +  ::grpc::ClientContext* const context_; +  grpc::internal::Call call_; +  ClientWriteReactor<Request>* const reactor_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, +                            grpc::internal::CallOpRecvInitialMetadata> +      start_ops_; +  grpc::internal::CallbackWithSuccessTag start_tag_; +  const bool start_corked_; +  bool corked_write_needed_;  // no lock needed since only accessed in +                              // Write/WritesDone which cannot be concurrent + +  grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage, +                            grpc::internal::CallOpClientRecvStatus> +      finish_ops_; +  grpc::internal::CallbackWithSuccessTag finish_tag_; +  ::grpc::Status finish_status_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, +                            grpc::internal::CallOpSendMessage, +                            grpc::internal::CallOpClientSendClose> +      write_ops_; +  grpc::internal::CallbackWithSuccessTag write_tag_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, +                            grpc::internal::CallOpClientSendClose> +      writes_done_ops_; +  grpc::internal::CallbackWithSuccessTag writes_done_tag_; + +  struct StartCallBacklog { +    bool write_ops = false; +    bool writes_done_ops = false; +  }; +  StartCallBacklog backlog_ /* GUARDED_BY(start_mu_) */; + +  // Minimum of 3 callbacks to pre-register for start ops, StartCall, and finish +  std::atomic<intptr_t> callbacks_outstanding_{3}; +  std::atomic_bool started_{false}; +  grpc::internal::Mutex start_mu_; +}; + +template <class Request> +class ClientCallbackWriterFactory { + public: +  template <class Response> +  static void Create(::grpc::ChannelInterface* channel, +                     const ::grpc::internal::RpcMethod& method, +                     ::grpc::ClientContext* context, Response* response, +                     ClientWriteReactor<Request>* reactor) { +    grpc::internal::Call call = +        channel->CreateCall(method, context, channel->CallbackCQ()); + +    ::grpc::g_core_codegen_interface->grpc_call_ref(call.call()); +    new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        call.call(), sizeof(ClientCallbackWriterImpl<Request>))) +        ClientCallbackWriterImpl<Request>(call, context, response, reactor); +  } +}; + +class ClientCallbackUnaryImpl final : public ClientCallbackUnary { + public: +  // always allocated against a call arena, no memory free required +  static void operator delete(void* /*ptr*/, std::size_t size) { +    GPR_CODEGEN_ASSERT(size == sizeof(ClientCallbackUnaryImpl)); +  } + +  // This operator should never be called as the memory should be freed as part +  // of the arena destruction. It only exists to provide a matching operator +  // delete to the operator new so that some compilers will not complain (see +  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this +  // there are no tests catching the compiler warning. +  static void operator delete(void*, void*) { GPR_CODEGEN_ASSERT(false); } + +  void StartCall() override { +    // This call initiates two batches, each with a callback +    // 1. Send initial metadata + write + writes done + recv initial metadata +    // 2. Read message, recv trailing metadata + +    start_tag_.Set(call_.call(), +                   [this](bool ok) { +                     reactor_->OnReadInitialMetadataDone(ok); +                     MaybeFinish(); +                   }, +                   &start_ops_, /*can_inline=*/false); +    start_ops_.SendInitialMetadata(&context_->send_initial_metadata_, +                                   context_->initial_metadata_flags()); +    start_ops_.RecvInitialMetadata(context_); +    start_ops_.set_core_cq_tag(&start_tag_); +    call_.PerformOps(&start_ops_); + +    finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); }, +                    &finish_ops_, +                    /*can_inline=*/false); +    finish_ops_.ClientRecvStatus(context_, &finish_status_); +    finish_ops_.set_core_cq_tag(&finish_tag_); +    call_.PerformOps(&finish_ops_); +  } + + private: +  friend class ClientCallbackUnaryFactory; + +  template <class Request, class Response> +  ClientCallbackUnaryImpl(::grpc::internal::Call call, +                          ::grpc::ClientContext* context, Request* request, +                          Response* response, ClientUnaryReactor* reactor) +      : context_(context), call_(call), reactor_(reactor) { +    this->BindReactor(reactor); +    // TODO(vjpai): don't assert +    GPR_CODEGEN_ASSERT(start_ops_.SendMessagePtr(request).ok()); +    start_ops_.ClientSendClose(); +    finish_ops_.RecvMessage(response); +    finish_ops_.AllowNoMessage(); +  } + +  // In the unary case, MaybeFinish is only ever invoked from a +  // library-initiated reaction, so it will just directly call OnDone if this is +  // the last reaction for this RPC. +  void MaybeFinish() { +    if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub( +                         1, std::memory_order_acq_rel) == 1)) { +      ::grpc::Status s = std::move(finish_status_); +      auto* reactor = reactor_; +      auto* call = call_.call(); +      this->~ClientCallbackUnaryImpl(); +      ::grpc::g_core_codegen_interface->grpc_call_unref(call); +      reactor->OnDone(s); +    } +  } + +  ::grpc::ClientContext* const context_; +  grpc::internal::Call call_; +  ClientUnaryReactor* const reactor_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, +                            grpc::internal::CallOpSendMessage, +                            grpc::internal::CallOpClientSendClose, +                            grpc::internal::CallOpRecvInitialMetadata> +      start_ops_; +  grpc::internal::CallbackWithSuccessTag start_tag_; + +  grpc::internal::CallOpSet<grpc::internal::CallOpGenericRecvMessage, +                            grpc::internal::CallOpClientRecvStatus> +      finish_ops_; +  grpc::internal::CallbackWithSuccessTag finish_tag_; +  ::grpc::Status finish_status_; + +  // This call will have 2 callbacks: start and finish +  std::atomic<intptr_t> callbacks_outstanding_{2}; +}; + +class ClientCallbackUnaryFactory { + public: +  template <class Request, class Response> +  static void Create(::grpc::ChannelInterface* channel, +                     const ::grpc::internal::RpcMethod& method, +                     ::grpc::ClientContext* context, const Request* request, +                     Response* response, ClientUnaryReactor* reactor) { +    grpc::internal::Call call = +        channel->CreateCall(method, context, channel->CallbackCQ()); + +    ::grpc::g_core_codegen_interface->grpc_call_ref(call.call()); + +    new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        call.call(), sizeof(ClientCallbackUnaryImpl))) +        ClientCallbackUnaryImpl(call, context, request, response, reactor); +  } +}; + +}  // namespace internal + +// TODO(vjpai): Remove namespace experimental when de-experimentalized fully. +namespace experimental { + +template <class Response> +using ClientCallbackReader = ::grpc::ClientCallbackReader<Response>; + +template <class Request> +using ClientCallbackWriter = ::grpc::ClientCallbackWriter<Request>; + +template <class Request, class Response> +using ClientCallbackReaderWriter = +    ::grpc::ClientCallbackReaderWriter<Request, Response>; + +template <class Response> +using ClientReadReactor = ::grpc::ClientReadReactor<Response>; + +template <class Request> +using ClientWriteReactor = ::grpc::ClientWriteReactor<Request>; + +template <class Request, class Response> +using ClientBidiReactor = ::grpc::ClientBidiReactor<Request, Response>; + +typedef ::grpc::ClientUnaryReactor ClientUnaryReactor; + +}  // namespace experimental + +}  // namespace grpc +#endif  // GRPCPP_IMPL_CODEGEN_CLIENT_CALLBACK_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h new file mode 100644 index 00000000000..a4e58f34c5e --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_context.h @@ -0,0 +1,524 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/// A ClientContext allows the person implementing a service client to: +/// +/// - Add custom metadata key-value pairs that will propagated to the server +/// side. +/// - Control call settings such as compression and authentication. +/// - Initial and trailing metadata coming from the server. +/// - Get performance metrics (ie, census). +/// +/// Context settings are only relevant to the call they are invoked with, that +/// is to say, they aren't sticky. Some of these settings, such as the +/// compression options, can be made persistent at channel construction time +/// (see \a grpc::CreateCustomChannel). +/// +/// \warning ClientContext instances should \em not be reused across rpcs. + +#ifndef GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H +#define GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H + +#include <map> +#include <memory> +#include <util/generic/string.h> + +#include <grpc/impl/codegen/compression_types.h> +#include <grpc/impl/codegen/propagation_bits.h> +#include <grpcpp/impl/codegen/client_interceptor.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/create_auth_context.h> +#include <grpcpp/impl/codegen/metadata_map.h> +#include <grpcpp/impl/codegen/rpc_method.h> +#include <grpcpp/impl/codegen/security/auth_context.h> +#include <grpcpp/impl/codegen/slice.h> +#include <grpcpp/impl/codegen/status.h> +#include <grpcpp/impl/codegen/string_ref.h> +#include <grpcpp/impl/codegen/sync.h> +#include <grpcpp/impl/codegen/time.h> + +struct census_context; +struct grpc_call; + +namespace grpc { +class ServerContext; +class ServerContextBase; +class CallbackServerContext; + +namespace internal { +template <class InputMessage, class OutputMessage> +class CallbackUnaryCallImpl; +template <class Request, class Response> +class ClientCallbackReaderWriterImpl; +template <class Response> +class ClientCallbackReaderImpl; +template <class Request> +class ClientCallbackWriterImpl; +class ClientCallbackUnaryImpl; +class ClientContextAccessor; +}  // namespace internal + +template <class R> +class ClientReader; +template <class W> +class ClientWriter; +template <class W, class R> +class ClientReaderWriter; +template <class R> +class ClientAsyncReader; +template <class W> +class ClientAsyncWriter; +template <class W, class R> +class ClientAsyncReaderWriter; +template <class R> +class ClientAsyncResponseReader; + +namespace testing { +class InteropClientContextInspector; +}  // namespace testing + +namespace internal { +class RpcMethod; +template <class InputMessage, class OutputMessage> +class BlockingUnaryCallImpl; +class CallOpClientRecvStatus; +class CallOpRecvInitialMetadata; +class ServerContextImpl; +template <class InputMessage, class OutputMessage> +class CallbackUnaryCallImpl; +template <class Request, class Response> +class ClientCallbackReaderWriterImpl; +template <class Response> +class ClientCallbackReaderImpl; +template <class Request> +class ClientCallbackWriterImpl; +class ClientCallbackUnaryImpl; +class ClientContextAccessor; +}  // namespace internal + +class CallCredentials; +class Channel; +class ChannelInterface; +class CompletionQueue; + +/// Options for \a ClientContext::FromServerContext specifying which traits from +/// the \a ServerContext to propagate (copy) from it into a new \a +/// ClientContext. +/// +/// \see ClientContext::FromServerContext +class PropagationOptions { + public: +  PropagationOptions() : propagate_(GRPC_PROPAGATE_DEFAULTS) {} + +  PropagationOptions& enable_deadline_propagation() { +    propagate_ |= GRPC_PROPAGATE_DEADLINE; +    return *this; +  } + +  PropagationOptions& disable_deadline_propagation() { +    propagate_ &= ~GRPC_PROPAGATE_DEADLINE; +    return *this; +  } + +  PropagationOptions& enable_census_stats_propagation() { +    propagate_ |= GRPC_PROPAGATE_CENSUS_STATS_CONTEXT; +    return *this; +  } + +  PropagationOptions& disable_census_stats_propagation() { +    propagate_ &= ~GRPC_PROPAGATE_CENSUS_STATS_CONTEXT; +    return *this; +  } + +  PropagationOptions& enable_census_tracing_propagation() { +    propagate_ |= GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT; +    return *this; +  } + +  PropagationOptions& disable_census_tracing_propagation() { +    propagate_ &= ~GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT; +    return *this; +  } + +  PropagationOptions& enable_cancellation_propagation() { +    propagate_ |= GRPC_PROPAGATE_CANCELLATION; +    return *this; +  } + +  PropagationOptions& disable_cancellation_propagation() { +    propagate_ &= ~GRPC_PROPAGATE_CANCELLATION; +    return *this; +  } + +  uint32_t c_bitmask() const { return propagate_; } + + private: +  uint32_t propagate_; +}; + +/// A ClientContext allows the person implementing a service client to: +/// +/// - Add custom metadata key-value pairs that will propagated to the server +///   side. +/// - Control call settings such as compression and authentication. +/// - Initial and trailing metadata coming from the server. +/// - Get performance metrics (ie, census). +/// +/// Context settings are only relevant to the call they are invoked with, that +/// is to say, they aren't sticky. Some of these settings, such as the +/// compression options, can be made persistent at channel construction time +/// (see \a grpc::CreateCustomChannel). +/// +/// \warning ClientContext instances should \em not be reused across rpcs. +/// \warning The ClientContext instance used for creating an rpc must remain +///          alive and valid for the lifetime of the rpc. +class ClientContext { + public: +  ClientContext(); +  ~ClientContext(); + +  /// Create a new \a ClientContext as a child of an incoming server call, +  /// according to \a options (\see PropagationOptions). +  /// +  /// \param server_context The source server context to use as the basis for +  /// constructing the client context. +  /// \param options The options controlling what to copy from the \a +  /// server_context. +  /// +  /// \return A newly constructed \a ClientContext instance based on \a +  /// server_context, with traits propagated (copied) according to \a options. +  static std::unique_ptr<ClientContext> FromServerContext( +      const grpc::ServerContext& server_context, +      PropagationOptions options = PropagationOptions()); +  static std::unique_ptr<ClientContext> FromCallbackServerContext( +      const grpc::CallbackServerContext& server_context, +      PropagationOptions options = PropagationOptions()); + +  /// Add the (\a meta_key, \a meta_value) pair to the metadata associated with +  /// a client call. These are made available at the server side by the \a +  /// grpc::ServerContext::client_metadata() method. +  /// +  /// \warning This method should only be called before invoking the rpc. +  /// +  /// \param meta_key The metadata key. If \a meta_value is binary data, it must +  /// end in "-bin". +  /// \param meta_value The metadata value. If its value is binary, the key name +  /// must end in "-bin". +  /// +  /// Metadata must conform to the following format: +  /// Custom-Metadata -> Binary-Header / ASCII-Header +  /// Binary-Header -> {Header-Name "-bin" } {binary value} +  /// ASCII-Header -> Header-Name ASCII-Value +  /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - . +  /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII +  void AddMetadata(const TString& meta_key, const TString& meta_value); + +  /// Return a collection of initial metadata key-value pairs. Note that keys +  /// may happen more than once (ie, a \a std::multimap is returned). +  /// +  /// \warning This method should only be called after initial metadata has been +  /// received. For streaming calls, see \a +  /// ClientReaderInterface::WaitForInitialMetadata(). +  /// +  /// \return A multimap of initial metadata key-value pairs from the server. +  const std::multimap<grpc::string_ref, grpc::string_ref>& +  GetServerInitialMetadata() const { +    GPR_CODEGEN_ASSERT(initial_metadata_received_); +    return *recv_initial_metadata_.map(); +  } + +  /// Return a collection of trailing metadata key-value pairs. Note that keys +  /// may happen more than once (ie, a \a std::multimap is returned). +  /// +  /// \warning This method is only callable once the stream has finished. +  /// +  /// \return A multimap of metadata trailing key-value pairs from the server. +  const std::multimap<grpc::string_ref, grpc::string_ref>& +  GetServerTrailingMetadata() const { +    // TODO(yangg) check finished +    return *trailing_metadata_.map(); +  } + +  /// Set the deadline for the client call. +  /// +  /// \warning This method should only be called before invoking the rpc. +  /// +  /// \param deadline the deadline for the client call. Units are determined by +  /// the type used. The deadline is an absolute (not relative) time. +  template <typename T> +  void set_deadline(const T& deadline) { +    grpc::TimePoint<T> deadline_tp(deadline); +    deadline_ = deadline_tp.raw_time(); +  } + +  /// EXPERIMENTAL: Indicate that this request is idempotent. +  /// By default, RPCs are assumed to <i>not</i> be idempotent. +  /// +  /// If true, the gRPC library assumes that it's safe to initiate +  /// this RPC multiple times. +  void set_idempotent(bool idempotent) { idempotent_ = idempotent; } + +  /// EXPERIMENTAL: Set this request to be cacheable. +  /// If set, grpc is free to use the HTTP GET verb for sending the request, +  /// with the possibility of receiving a cached response. +  void set_cacheable(bool cacheable) { cacheable_ = cacheable; } + +  /// EXPERIMENTAL: Trigger wait-for-ready or not on this request. +  /// See https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +  /// If set, if an RPC is made when a channel's connectivity state is +  /// TRANSIENT_FAILURE or CONNECTING, the call will not "fail fast", +  /// and the channel will wait until the channel is READY before making the +  /// call. +  void set_wait_for_ready(bool wait_for_ready) { +    wait_for_ready_ = wait_for_ready; +    wait_for_ready_explicitly_set_ = true; +  } + +  /// DEPRECATED: Use set_wait_for_ready() instead. +  void set_fail_fast(bool fail_fast) { set_wait_for_ready(!fail_fast); } + +  /// Return the deadline for the client call. +  std::chrono::system_clock::time_point deadline() const { +    return grpc::Timespec2Timepoint(deadline_); +  } + +  /// Return a \a gpr_timespec representation of the client call's deadline. +  gpr_timespec raw_deadline() const { return deadline_; } + +  /// Set the per call authority header (see +  /// https://tools.ietf.org/html/rfc7540#section-8.1.2.3). +  void set_authority(const TString& authority) { authority_ = authority; } + +  /// Return the authentication context for the associated client call. +  /// It is only valid to call this during the lifetime of the client call. +  /// +  /// \see grpc::AuthContext. +  std::shared_ptr<const grpc::AuthContext> auth_context() const { +    if (auth_context_.get() == nullptr) { +      auth_context_ = grpc::CreateAuthContext(call_); +    } +    return auth_context_; +  } + +  /// Set credentials for the client call. +  /// +  /// A credentials object encapsulates all the state needed by a client to +  /// authenticate with a server and make various assertions, e.g., about the +  /// client’s identity, role, or whether it is authorized to make a particular +  /// call. +  /// +  /// It is legal to call this only before initial metadata is sent. +  /// +  /// \see  https://grpc.io/docs/guides/auth.html +  void set_credentials(const std::shared_ptr<grpc::CallCredentials>& creds); + +  /// EXPERIMENTAL debugging API +  /// +  /// Returns the credentials for the client call. This should be used only in +  /// tests and for diagnostic purposes, and should not be used by application +  /// logic. +  std::shared_ptr<grpc::CallCredentials> credentials() { return creds_; } + +  /// Return the compression algorithm the client call will request be used. +  /// Note that the gRPC runtime may decide to ignore this request, for example, +  /// due to resource constraints. +  grpc_compression_algorithm compression_algorithm() const { +    return compression_algorithm_; +  } + +  /// Set \a algorithm to be the compression algorithm used for the client call. +  /// +  /// \param algorithm The compression algorithm used for the client call. +  void set_compression_algorithm(grpc_compression_algorithm algorithm); + +  /// Flag whether the initial metadata should be \a corked +  /// +  /// If \a corked is true, then the initial metadata will be coalesced with the +  /// write of first message in the stream. As a result, any tag set for the +  /// initial metadata operation (starting a client-streaming or bidi-streaming +  /// RPC) will not actually be sent to the completion queue or delivered +  /// via Next. +  /// +  /// \param corked The flag indicating whether the initial metadata is to be +  /// corked or not. +  void set_initial_metadata_corked(bool corked) { +    initial_metadata_corked_ = corked; +  } + +  /// Return the peer uri in a string. +  /// It is only valid to call this during the lifetime of the client call. +  /// +  /// \warning This value is never authenticated or subject to any security +  /// related code. It must not be used for any authentication related +  /// functionality. Instead, use auth_context. +  /// +  /// \return The call's peer URI. +  TString peer() const; + +  /// Sets the census context. +  /// It is only valid to call this before the client call is created. A common +  /// place of setting census context is from within the DefaultConstructor +  /// method of GlobalCallbacks. +  void set_census_context(struct census_context* ccp) { census_context_ = ccp; } + +  /// Returns the census context that has been set, or nullptr if not set. +  struct census_context* census_context() const { +    return census_context_; +  } + +  /// Send a best-effort out-of-band cancel on the call associated with +  /// this client context.  The call could be in any stage; e.g., if it is +  /// already finished, it may still return success. +  /// +  /// There is no guarantee the call will be cancelled. +  /// +  /// Note that TryCancel() does not change any of the tags that are pending +  /// on the completion queue. All pending tags will still be delivered +  /// (though their ok result may reflect the effect of cancellation). +  void TryCancel(); + +  /// Global Callbacks +  /// +  /// Can be set exactly once per application to install hooks whenever +  /// a client context is constructed and destructed. +  class GlobalCallbacks { +   public: +    virtual ~GlobalCallbacks() {} +    virtual void DefaultConstructor(ClientContext* context) = 0; +    virtual void Destructor(ClientContext* context) = 0; +  }; +  static void SetGlobalCallbacks(GlobalCallbacks* callbacks); + +  /// Should be used for framework-level extensions only. +  /// Applications never need to call this method. +  grpc_call* c_call() { return call_; } + +  /// EXPERIMENTAL debugging API +  /// +  /// if status is not ok() for an RPC, this will return a detailed string +  /// of the gRPC Core error that led to the failure. It should not be relied +  /// upon for anything other than gaining more debug data in failure cases. +  TString debug_error_string() const { return debug_error_string_; } + + private: +  // Disallow copy and assign. +  ClientContext(const ClientContext&); +  ClientContext& operator=(const ClientContext&); + +  friend class ::grpc::testing::InteropClientContextInspector; +  friend class ::grpc::internal::CallOpClientRecvStatus; +  friend class ::grpc::internal::CallOpRecvInitialMetadata; +  friend class ::grpc::Channel; +  template <class R> +  friend class ::grpc::ClientReader; +  template <class W> +  friend class ::grpc::ClientWriter; +  template <class W, class R> +  friend class ::grpc::ClientReaderWriter; +  template <class R> +  friend class ::grpc::ClientAsyncReader; +  template <class W> +  friend class ::grpc::ClientAsyncWriter; +  template <class W, class R> +  friend class ::grpc::ClientAsyncReaderWriter; +  template <class R> +  friend class ::grpc::ClientAsyncResponseReader; +  template <class InputMessage, class OutputMessage> +  friend class ::grpc::internal::BlockingUnaryCallImpl; +  template <class InputMessage, class OutputMessage> +  friend class ::grpc::internal::CallbackUnaryCallImpl; +  template <class Request, class Response> +  friend class ::grpc::internal::ClientCallbackReaderWriterImpl; +  template <class Response> +  friend class ::grpc::internal::ClientCallbackReaderImpl; +  template <class Request> +  friend class ::grpc::internal::ClientCallbackWriterImpl; +  friend class ::grpc::internal::ClientCallbackUnaryImpl; +  friend class ::grpc::internal::ClientContextAccessor; + +  // Used by friend class CallOpClientRecvStatus +  void set_debug_error_string(const TString& debug_error_string) { +    debug_error_string_ = debug_error_string; +  } + +  grpc_call* call() const { return call_; } +  void set_call(grpc_call* call, +                const std::shared_ptr<::grpc::Channel>& channel); + +  grpc::experimental::ClientRpcInfo* set_client_rpc_info( +      const char* method, grpc::internal::RpcMethod::RpcType type, +      grpc::ChannelInterface* channel, +      const std::vector<std::unique_ptr< +          grpc::experimental::ClientInterceptorFactoryInterface>>& creators, +      size_t interceptor_pos) { +    rpc_info_ = grpc::experimental::ClientRpcInfo(this, type, method, channel); +    rpc_info_.RegisterInterceptors(creators, interceptor_pos); +    return &rpc_info_; +  } + +  uint32_t initial_metadata_flags() const { +    return (idempotent_ ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST : 0) | +           (wait_for_ready_ ? GRPC_INITIAL_METADATA_WAIT_FOR_READY : 0) | +           (cacheable_ ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST : 0) | +           (wait_for_ready_explicitly_set_ +                ? GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET +                : 0) | +           (initial_metadata_corked_ ? GRPC_INITIAL_METADATA_CORKED : 0); +  } + +  TString authority() { return authority_; } + +  void SendCancelToInterceptors(); + +  static std::unique_ptr<ClientContext> FromInternalServerContext( +      const grpc::ServerContextBase& server_context, +      PropagationOptions options); + +  bool initial_metadata_received_; +  bool wait_for_ready_; +  bool wait_for_ready_explicitly_set_; +  bool idempotent_; +  bool cacheable_; +  std::shared_ptr<::grpc::Channel> channel_; +  grpc::internal::Mutex mu_; +  grpc_call* call_; +  bool call_canceled_; +  gpr_timespec deadline_; +  grpc::string authority_; +  std::shared_ptr<grpc::CallCredentials> creds_; +  mutable std::shared_ptr<const grpc::AuthContext> auth_context_; +  struct census_context* census_context_; +  std::multimap<TString, TString> send_initial_metadata_; +  mutable grpc::internal::MetadataMap recv_initial_metadata_; +  mutable grpc::internal::MetadataMap trailing_metadata_; + +  grpc_call* propagate_from_call_; +  PropagationOptions propagation_options_; + +  grpc_compression_algorithm compression_algorithm_; +  bool initial_metadata_corked_; + +  TString debug_error_string_; + +  grpc::experimental::ClientRpcInfo rpc_info_; +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CLIENT_CONTEXT_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h new file mode 100644 index 00000000000..78be1f7597e --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_interceptor.h @@ -0,0 +1,190 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CLIENT_INTERCEPTOR_H +#define GRPCPP_IMPL_CODEGEN_CLIENT_INTERCEPTOR_H + +#include <memory> +#include <vector> + +#include <grpcpp/impl/codegen/interceptor.h> +#include <grpcpp/impl/codegen/rpc_method.h> +#include <grpcpp/impl/codegen/string_ref.h> + +namespace grpc { + +class Channel; +class ClientContext; + +namespace internal { +class InterceptorBatchMethodsImpl; +} + +namespace experimental { +class ClientRpcInfo; + +// A factory interface for creation of client interceptors. A vector of +// factories can be provided at channel creation which will be used to create a +// new vector of client interceptors per RPC. Client interceptor authors should +// create a subclass of ClientInterceptorFactorInterface which creates objects +// of their interceptors. +class ClientInterceptorFactoryInterface { + public: +  virtual ~ClientInterceptorFactoryInterface() {} +  // Returns a pointer to an Interceptor object on successful creation, nullptr +  // otherwise. If nullptr is returned, this server interceptor factory is +  // ignored for the purposes of that RPC. +  virtual Interceptor* CreateClientInterceptor(ClientRpcInfo* info) = 0; +}; +}  // namespace experimental + +namespace internal { +extern experimental::ClientInterceptorFactoryInterface* +    g_global_client_interceptor_factory; +} + +/// ClientRpcInfo represents the state of a particular RPC as it +/// appears to an interceptor. It is created and owned by the library and +/// passed to the CreateClientInterceptor method of the application's +/// ClientInterceptorFactoryInterface implementation +namespace experimental { +class ClientRpcInfo { + public: +  // TODO(yashykt): Stop default-constructing ClientRpcInfo and remove UNKNOWN +  //                from the list of possible Types. +  /// Type categorizes RPCs by unary or streaming type +  enum class Type { +    UNARY, +    CLIENT_STREAMING, +    SERVER_STREAMING, +    BIDI_STREAMING, +    UNKNOWN  // UNKNOWN is not API and will be removed later +  }; + +  ~ClientRpcInfo() {} + +  // Delete copy constructor but allow default move constructor +  ClientRpcInfo(const ClientRpcInfo&) = delete; +  ClientRpcInfo(ClientRpcInfo&&) = default; + +  // Getter methods + +  /// Return the fully-specified method name +  const char* method() const { return method_; } + +  /// Return a pointer to the channel on which the RPC is being sent +  ChannelInterface* channel() { return channel_; } + +  /// Return a pointer to the underlying ClientContext structure associated +  /// with the RPC to support features that apply to it +  grpc::ClientContext* client_context() { return ctx_; } + +  /// Return the type of the RPC (unary or a streaming flavor) +  Type type() const { return type_; } + + private: +  static_assert(Type::UNARY == +                    static_cast<Type>(internal::RpcMethod::NORMAL_RPC), +                "violated expectation about Type enum"); +  static_assert(Type::CLIENT_STREAMING == +                    static_cast<Type>(internal::RpcMethod::CLIENT_STREAMING), +                "violated expectation about Type enum"); +  static_assert(Type::SERVER_STREAMING == +                    static_cast<Type>(internal::RpcMethod::SERVER_STREAMING), +                "violated expectation about Type enum"); +  static_assert(Type::BIDI_STREAMING == +                    static_cast<Type>(internal::RpcMethod::BIDI_STREAMING), +                "violated expectation about Type enum"); + +  // Default constructor should only be used by ClientContext +  ClientRpcInfo() = default; + +  // Constructor will only be called from ClientContext +  ClientRpcInfo(grpc::ClientContext* ctx, internal::RpcMethod::RpcType type, +                const char* method, grpc::ChannelInterface* channel) +      : ctx_(ctx), +        type_(static_cast<Type>(type)), +        method_(method), +        channel_(channel) {} + +  // Move assignment should only be used by ClientContext +  // TODO(yashykt): Delete move assignment +  ClientRpcInfo& operator=(ClientRpcInfo&&) = default; + +  // Runs interceptor at pos \a pos. +  void RunInterceptor( +      experimental::InterceptorBatchMethods* interceptor_methods, size_t pos) { +    GPR_CODEGEN_ASSERT(pos < interceptors_.size()); +    interceptors_[pos]->Intercept(interceptor_methods); +  } + +  void RegisterInterceptors( +      const std::vector<std::unique_ptr< +          experimental::ClientInterceptorFactoryInterface>>& creators, +      size_t interceptor_pos) { +    if (interceptor_pos > creators.size()) { +      // No interceptors to register +      return; +    } +    // NOTE: The following is not a range-based for loop because it will only +    //       iterate over a portion of the creators vector. +    for (auto it = creators.begin() + interceptor_pos; it != creators.end(); +         ++it) { +      auto* interceptor = (*it)->CreateClientInterceptor(this); +      if (interceptor != nullptr) { +        interceptors_.push_back( +            std::unique_ptr<experimental::Interceptor>(interceptor)); +      } +    } +    if (internal::g_global_client_interceptor_factory != nullptr) { +      interceptors_.push_back(std::unique_ptr<experimental::Interceptor>( +          internal::g_global_client_interceptor_factory +              ->CreateClientInterceptor(this))); +    } +  } + +  grpc::ClientContext* ctx_ = nullptr; +  // TODO(yashykt): make type_ const once move-assignment is deleted +  Type type_{Type::UNKNOWN}; +  const char* method_ = nullptr; +  grpc::ChannelInterface* channel_ = nullptr; +  std::vector<std::unique_ptr<experimental::Interceptor>> interceptors_; +  bool hijacked_ = false; +  size_t hijacked_interceptor_ = 0; + +  friend class internal::InterceptorBatchMethodsImpl; +  friend class grpc::ClientContext; +}; + +// PLEASE DO NOT USE THIS. ALWAYS PREFER PER CHANNEL INTERCEPTORS OVER A GLOBAL +// INTERCEPTOR. IF USAGE IS ABSOLUTELY NECESSARY, PLEASE READ THE SAFETY NOTES. +// Registers a global client interceptor factory object, which is used for all +// RPCs made in this process. The application is responsible for maintaining the +// life of the object while gRPC operations are in progress. The global +// interceptor factory should only be registered once at the start of the +// process before any gRPC operations have begun. +void RegisterGlobalClientInterceptorFactory( +    ClientInterceptorFactoryInterface* factory); + +// For testing purposes only +void TestOnlyResetGlobalClientInterceptorFactory(); + +}  // namespace experimental +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CLIENT_INTERCEPTOR_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h new file mode 100644 index 00000000000..098bb50ee2c --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/client_unary_call.h @@ -0,0 +1,91 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CLIENT_UNARY_CALL_H +#define GRPCPP_IMPL_CODEGEN_CLIENT_UNARY_CALL_H + +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/channel_interface.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { + +class ClientContext; +namespace internal { +class RpcMethod; +/// Wrapper that performs a blocking unary call +template <class InputMessage, class OutputMessage> +Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method, +                         grpc::ClientContext* context, +                         const InputMessage& request, OutputMessage* result) { +  return BlockingUnaryCallImpl<InputMessage, OutputMessage>( +             channel, method, context, request, result) +      .status(); +} + +template <class InputMessage, class OutputMessage> +class BlockingUnaryCallImpl { + public: +  BlockingUnaryCallImpl(ChannelInterface* channel, const RpcMethod& method, +                        grpc::ClientContext* context, +                        const InputMessage& request, OutputMessage* result) { +    ::grpc::CompletionQueue cq(grpc_completion_queue_attributes{ +        GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING, +        nullptr});  // Pluckable completion queue +    ::grpc::internal::Call call(channel->CreateCall(method, context, &cq)); +    CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, +              CallOpRecvInitialMetadata, CallOpRecvMessage<OutputMessage>, +              CallOpClientSendClose, CallOpClientRecvStatus> +        ops; +    status_ = ops.SendMessagePtr(&request); +    if (!status_.ok()) { +      return; +    } +    ops.SendInitialMetadata(&context->send_initial_metadata_, +                            context->initial_metadata_flags()); +    ops.RecvInitialMetadata(context); +    ops.RecvMessage(result); +    ops.AllowNoMessage(); +    ops.ClientSendClose(); +    ops.ClientRecvStatus(context, &status_); +    call.PerformOps(&ops); +    cq.Pluck(&ops); +    // Some of the ops might fail. If the ops fail in the core layer, status +    // would reflect the error. But, if the ops fail in the C++ layer, the +    // status would still be the same as the one returned by gRPC Core. This can +    // happen if deserialization of the message fails. +    // TODO(yashykt): If deserialization fails, but the status received is OK, +    // then it might be a good idea to change the status to something better +    // than StatusCode::UNIMPLEMENTED to reflect this. +    if (!ops.got_message && status_.ok()) { +      status_ = Status(StatusCode::UNIMPLEMENTED, +                       "No message returned for unary request"); +    } +  } +  Status status() { return status_; } + + private: +  Status status_; +}; + +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CLIENT_UNARY_CALL_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h new file mode 100644 index 00000000000..ca0c77276a8 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue.h @@ -0,0 +1,448 @@ +/* + * + * Copyright 2015-2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/// A completion queue implements a concurrent producer-consumer queue, with +/// two main API-exposed methods: \a Next and \a AsyncNext. These +/// methods are the essential component of the gRPC C++ asynchronous API. +/// There is also a \a Shutdown method to indicate that a given completion queue +/// will no longer have regular events. This must be called before the +/// completion queue is destroyed. +/// All completion queue APIs are thread-safe and may be used concurrently with +/// any other completion queue API invocation; it is acceptable to have +/// multiple threads calling \a Next or \a AsyncNext on the same or different +/// completion queues, or to call these methods concurrently with a \a Shutdown +/// elsewhere. +/// \remark{All other API calls on completion queue should be completed before +/// a completion queue destructor is called.} +#ifndef GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H +#define GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H + +#include <list> + +#include <grpc/impl/codegen/atm.h> +#include <grpcpp/impl/codegen/completion_queue_tag.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/grpc_library.h> +#include <grpcpp/impl/codegen/status.h> +#include <grpcpp/impl/codegen/sync.h> +#include <grpcpp/impl/codegen/time.h> + +struct grpc_completion_queue; + +namespace grpc { +template <class R> +class ClientReader; +template <class W> +class ClientWriter; +template <class W, class R> +class ClientReaderWriter; +template <class R> +class ServerReader; +template <class W> +class ServerWriter; +namespace internal { +template <class W, class R> +class ServerReaderWriterBody; + +template <class ServiceType, class RequestType, class ResponseType> +class RpcMethodHandler; +template <class ServiceType, class RequestType, class ResponseType> +class ClientStreamingHandler; +template <class ServiceType, class RequestType, class ResponseType> +class ServerStreamingHandler; +template <class Streamer, bool WriteNeeded> +class TemplatedBidiStreamingHandler; +template <::grpc::StatusCode code> +class ErrorMethodHandler; +}  // namespace internal + +class Channel; +class ChannelInterface; +class Server; +class ServerBuilder; +class ServerContextBase; +class ServerInterface; + +namespace internal { +class CompletionQueueTag; +class RpcMethod; +template <class InputMessage, class OutputMessage> +class BlockingUnaryCallImpl; +template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6> +class CallOpSet; +}  // namespace internal + +extern CoreCodegenInterface* g_core_codegen_interface; + +/// A thin wrapper around \ref grpc_completion_queue (see \ref +/// src/core/lib/surface/completion_queue.h). +/// See \ref doc/cpp/perf_notes.md for notes on best practices for high +/// performance servers. +class CompletionQueue : private ::grpc::GrpcLibraryCodegen { + public: +  /// Default constructor. Implicitly creates a \a grpc_completion_queue +  /// instance. +  CompletionQueue() +      : CompletionQueue(grpc_completion_queue_attributes{ +            GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING, +            nullptr}) {} + +  /// Wrap \a take, taking ownership of the instance. +  /// +  /// \param take The completion queue instance to wrap. Ownership is taken. +  explicit CompletionQueue(grpc_completion_queue* take); + +  /// Destructor. Destroys the owned wrapped completion queue / instance. +  ~CompletionQueue() { +    ::grpc::g_core_codegen_interface->grpc_completion_queue_destroy(cq_); +  } + +  /// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT. +  enum NextStatus { +    SHUTDOWN,   ///< The completion queue has been shutdown and fully-drained +    GOT_EVENT,  ///< Got a new event; \a tag will be filled in with its +                ///< associated value; \a ok indicating its success. +    TIMEOUT     ///< deadline was reached. +  }; + +  /// Read from the queue, blocking until an event is available or the queue is +  /// shutting down. +  /// +  /// \param tag [out] Updated to point to the read event's tag. +  /// \param ok [out] true if read a successful event, false otherwise. +  /// +  /// Note that each tag sent to the completion queue (through RPC operations +  /// or alarms) will be delivered out of the completion queue by a call to +  /// Next (or a related method), regardless of whether the operation succeeded +  /// or not. Success here means that this operation completed in the normal +  /// valid manner. +  /// +  /// Server-side RPC request: \a ok indicates that the RPC has indeed +  /// been started. If it is false, the server has been Shutdown +  /// before this particular call got matched to an incoming RPC. +  /// +  /// Client-side StartCall/RPC invocation: \a ok indicates that the RPC is +  /// going to go to the wire. If it is false, it not going to the wire. This +  /// would happen if the channel is either permanently broken or +  /// transiently broken but with the fail-fast option. (Note that async unary +  /// RPCs don't post a CQ tag at this point, nor do client-streaming +  /// or bidi-streaming RPCs that have the initial metadata corked option set.) +  /// +  /// Client-side Write, Client-side WritesDone, Server-side Write, +  /// Server-side Finish, Server-side SendInitialMetadata (which is +  /// typically included in Write or Finish when not done explicitly): +  /// \a ok means that the data/metadata/status/etc is going to go to the +  /// wire. If it is false, it not going to the wire because the call +  /// is already dead (i.e., canceled, deadline expired, other side +  /// dropped the channel, etc). +  /// +  /// Client-side Read, Server-side Read, Client-side +  /// RecvInitialMetadata (which is typically included in Read if not +  /// done explicitly): \a ok indicates whether there is a valid message +  /// that got read. If not, you know that there are certainly no more +  /// messages that can ever be read from this stream. For the client-side +  /// operations, this only happens because the call is dead. For the +  /// server-sider operation, though, this could happen because the client +  /// has done a WritesDone already. +  /// +  /// Client-side Finish: \a ok should always be true +  /// +  /// Server-side AsyncNotifyWhenDone: \a ok should always be true +  /// +  /// Alarm: \a ok is true if it expired, false if it was canceled +  /// +  /// \return true if got an event, false if the queue is fully drained and +  ///         shut down. +  bool Next(void** tag, bool* ok) { +    return (AsyncNextInternal(tag, ok, +                              ::grpc::g_core_codegen_interface->gpr_inf_future( +                                  GPR_CLOCK_REALTIME)) != SHUTDOWN); +  } + +  /// Read from the queue, blocking up to \a deadline (or the queue's shutdown). +  /// Both \a tag and \a ok are updated upon success (if an event is available +  /// within the \a deadline).  A \a tag points to an arbitrary location usually +  /// employed to uniquely identify an event. +  /// +  /// \param tag [out] Upon success, updated to point to the event's tag. +  /// \param ok [out] Upon success, true if a successful event, false otherwise +  ///        See documentation for CompletionQueue::Next for explanation of ok +  /// \param deadline [in] How long to block in wait for an event. +  /// +  /// \return The type of event read. +  template <typename T> +  NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) { +    ::grpc::TimePoint<T> deadline_tp(deadline); +    return AsyncNextInternal(tag, ok, deadline_tp.raw_time()); +  } + +  /// EXPERIMENTAL +  /// First executes \a F, then reads from the queue, blocking up to +  /// \a deadline (or the queue's shutdown). +  /// Both \a tag and \a ok are updated upon success (if an event is available +  /// within the \a deadline).  A \a tag points to an arbitrary location usually +  /// employed to uniquely identify an event. +  /// +  /// \param f [in] Function to execute before calling AsyncNext on this queue. +  /// \param tag [out] Upon success, updated to point to the event's tag. +  /// \param ok [out] Upon success, true if read a regular event, false +  /// otherwise. +  /// \param deadline [in] How long to block in wait for an event. +  /// +  /// \return The type of event read. +  template <typename T, typename F> +  NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) { +    CompletionQueueTLSCache cache = CompletionQueueTLSCache(this); +    f(); +    if (cache.Flush(tag, ok)) { +      return GOT_EVENT; +    } else { +      return AsyncNext(tag, ok, deadline); +    } +  } + +  /// Request the shutdown of the queue. +  /// +  /// \warning This method must be called at some point if this completion queue +  /// is accessed with Next or AsyncNext. \a Next will not return false +  /// until this method has been called and all pending tags have been drained. +  /// (Likewise for \a AsyncNext returning \a NextStatus::SHUTDOWN .) +  /// Only once either one of these methods does that (that is, once the queue +  /// has been \em drained) can an instance of this class be destroyed. +  /// Also note that applications must ensure that no work is enqueued on this +  /// completion queue after this method is called. +  void Shutdown(); + +  /// Returns a \em raw pointer to the underlying \a grpc_completion_queue +  /// instance. +  /// +  /// \warning Remember that the returned instance is owned. No transfer of +  /// owership is performed. +  grpc_completion_queue* cq() { return cq_; } + + protected: +  /// Private constructor of CompletionQueue only visible to friend classes +  CompletionQueue(const grpc_completion_queue_attributes& attributes) { +    cq_ = ::grpc::g_core_codegen_interface->grpc_completion_queue_create( +        ::grpc::g_core_codegen_interface->grpc_completion_queue_factory_lookup( +            &attributes), +        &attributes, NULL); +    InitialAvalanching();  // reserve this for the future shutdown +  } + + private: +  // Friends for access to server registration lists that enable checking and +  // logging on shutdown +  friend class ::grpc::ServerBuilder; +  friend class ::grpc::Server; + +  // Friend synchronous wrappers so that they can access Pluck(), which is +  // a semi-private API geared towards the synchronous implementation. +  template <class R> +  friend class ::grpc::ClientReader; +  template <class W> +  friend class ::grpc::ClientWriter; +  template <class W, class R> +  friend class ::grpc::ClientReaderWriter; +  template <class R> +  friend class ::grpc::ServerReader; +  template <class W> +  friend class ::grpc::ServerWriter; +  template <class W, class R> +  friend class ::grpc::internal::ServerReaderWriterBody; +  template <class ServiceType, class RequestType, class ResponseType> +  friend class ::grpc::internal::RpcMethodHandler; +  template <class ServiceType, class RequestType, class ResponseType> +  friend class ::grpc::internal::ClientStreamingHandler; +  template <class ServiceType, class RequestType, class ResponseType> +  friend class ::grpc::internal::ServerStreamingHandler; +  template <class Streamer, bool WriteNeeded> +  friend class ::grpc::internal::TemplatedBidiStreamingHandler; +  template <::grpc::StatusCode code> +  friend class ::grpc::internal::ErrorMethodHandler; +  friend class ::grpc::ServerContextBase; +  friend class ::grpc::ServerInterface; +  template <class InputMessage, class OutputMessage> +  friend class ::grpc::internal::BlockingUnaryCallImpl; + +  // Friends that need access to constructor for callback CQ +  friend class ::grpc::Channel; + +  // For access to Register/CompleteAvalanching +  template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6> +  friend class ::grpc::internal::CallOpSet; + +  /// EXPERIMENTAL +  /// Creates a Thread Local cache to store the first event +  /// On this completion queue queued from this thread.  Once +  /// initialized, it must be flushed on the same thread. +  class CompletionQueueTLSCache { +   public: +    CompletionQueueTLSCache(CompletionQueue* cq); +    ~CompletionQueueTLSCache(); +    bool Flush(void** tag, bool* ok); + +   private: +    CompletionQueue* cq_; +    bool flushed_; +  }; + +  NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline); + +  /// Wraps \a grpc_completion_queue_pluck. +  /// \warning Must not be mixed with calls to \a Next. +  bool Pluck(::grpc::internal::CompletionQueueTag* tag) { +    auto deadline = +        ::grpc::g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME); +    while (true) { +      auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck( +          cq_, tag, deadline, nullptr); +      bool ok = ev.success != 0; +      void* ignored = tag; +      if (tag->FinalizeResult(&ignored, &ok)) { +        GPR_CODEGEN_ASSERT(ignored == tag); +        return ok; +      } +    } +  } + +  /// Performs a single polling pluck on \a tag. +  /// \warning Must not be mixed with calls to \a Next. +  /// +  /// TODO: sreek - This calls tag->FinalizeResult() even if the cq_ is already +  /// shutdown. This is most likely a bug and if it is a bug, then change this +  /// implementation to simple call the other TryPluck function with a zero +  /// timeout. i.e: +  ///      TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME)) +  void TryPluck(::grpc::internal::CompletionQueueTag* tag) { +    auto deadline = +        ::grpc::g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME); +    auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck( +        cq_, tag, deadline, nullptr); +    if (ev.type == GRPC_QUEUE_TIMEOUT) return; +    bool ok = ev.success != 0; +    void* ignored = tag; +    // the tag must be swallowed if using TryPluck +    GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok)); +  } + +  /// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if +  /// the pluck() was successful and returned the tag. +  /// +  /// This exects tag->FinalizeResult (if called) to return 'false' i.e expects +  /// that the tag is internal not something that is returned to the user. +  void TryPluck(::grpc::internal::CompletionQueueTag* tag, +                gpr_timespec deadline) { +    auto ev = ::grpc::g_core_codegen_interface->grpc_completion_queue_pluck( +        cq_, tag, deadline, nullptr); +    if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) { +      return; +    } + +    bool ok = ev.success != 0; +    void* ignored = tag; +    GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok)); +  } + +  /// Manage state of avalanching operations : completion queue tags that +  /// trigger other completion queue operations. The underlying core completion +  /// queue should not really shutdown until all avalanching operations have +  /// been finalized. Note that we maintain the requirement that an avalanche +  /// registration must take place before CQ shutdown (which must be maintained +  /// elsehwere) +  void InitialAvalanching() { +    gpr_atm_rel_store(&avalanches_in_flight_, static_cast<gpr_atm>(1)); +  } +  void RegisterAvalanching() { +    gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_, +                                 static_cast<gpr_atm>(1)); +  } +  void CompleteAvalanching() { +    if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_, +                                     static_cast<gpr_atm>(-1)) == 1) { +      ::grpc::g_core_codegen_interface->grpc_completion_queue_shutdown(cq_); +    } +  } + +  void RegisterServer(const ::grpc::Server* server) { +    (void)server; +#ifndef NDEBUG +    grpc::internal::MutexLock l(&server_list_mutex_); +    server_list_.push_back(server); +#endif +  } +  void UnregisterServer(const ::grpc::Server* server) { +    (void)server; +#ifndef NDEBUG +    grpc::internal::MutexLock l(&server_list_mutex_); +    server_list_.remove(server); +#endif +  } +  bool ServerListEmpty() const { +#ifndef NDEBUG +    grpc::internal::MutexLock l(&server_list_mutex_); +    return server_list_.empty(); +#endif +    return true; +  } + +  grpc_completion_queue* cq_;  // owned + +  gpr_atm avalanches_in_flight_; + +  // List of servers associated with this CQ. Even though this is only used with +  // NDEBUG, instantiate it in all cases since otherwise the size will be +  // inconsistent. +  mutable grpc::internal::Mutex server_list_mutex_; +  std::list<const ::grpc::Server*> +      server_list_ /* GUARDED_BY(server_list_mutex_) */; +}; + +/// A specific type of completion queue used by the processing of notifications +/// by servers. Instantiated by \a ServerBuilder or Server (for health checker). +class ServerCompletionQueue : public CompletionQueue { + public: +  bool IsFrequentlyPolled() { return polling_type_ != GRPC_CQ_NON_LISTENING; } + + protected: +  /// Default constructor +  ServerCompletionQueue() : polling_type_(GRPC_CQ_DEFAULT_POLLING) {} + + private: +  /// \param completion_type indicates whether this is a NEXT or CALLBACK +  /// completion queue. +  /// \param polling_type Informs the GRPC library about the type of polling +  /// allowed on this completion queue. See grpc_cq_polling_type's description +  /// in grpc_types.h for more details. +  /// \param shutdown_cb is the shutdown callback used for CALLBACK api queues +  ServerCompletionQueue(grpc_cq_completion_type completion_type, +                        grpc_cq_polling_type polling_type, +                        grpc_experimental_completion_queue_functor* shutdown_cb) +      : CompletionQueue(grpc_completion_queue_attributes{ +            GRPC_CQ_CURRENT_VERSION, completion_type, polling_type, +            shutdown_cb}), +        polling_type_(polling_type) {} + +  grpc_cq_polling_type polling_type_; +  friend class ::grpc::ServerBuilder; +  friend class ::grpc::Server; +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue_tag.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue_tag.h new file mode 100644 index 00000000000..304386a9ecc --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/completion_queue_tag.h @@ -0,0 +1,54 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_TAG_H +#define GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_TAG_H + +namespace grpc { + +namespace internal { +/// An interface allowing implementors to process and filter event tags. +class CompletionQueueTag { + public: +  virtual ~CompletionQueueTag() {} + +  /// FinalizeResult must be called before informing user code that the +  /// operation bound to the underlying core completion queue tag has +  /// completed. In practice, this means: +  /// +  ///   1. For the sync API - before returning from Pluck +  ///   2. For the CQ-based async API - before returning from Next +  ///   3. For the callback-based API - before invoking the user callback +  /// +  /// This is the method that translates from core-side tag/status to +  /// C++ API-observable tag/status. +  /// +  /// The return value is the status of the operation (returning status is the +  /// general behavior of this function). If this function returns false, the +  /// tag is dropped and not returned from the completion queue: this concept is +  /// for events that are observed at core but not requested by the user +  /// application (e.g., server shutdown, for server unimplemented method +  /// responses, or for cases where a server-side RPC doesn't have a completion +  /// notification registered using AsyncNotifyWhenDone) +  virtual bool FinalizeResult(void** tag, bool* status) = 0; +}; +}  // namespace internal + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_TAG_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h new file mode 100644 index 00000000000..87f9914273d --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/config.h @@ -0,0 +1,43 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CONFIG_H +#define GRPCPP_IMPL_CODEGEN_CONFIG_H + +#include <util/generic/string.h> + +/// The following macros are deprecated and appear only for users +/// with PB files generated using gRPC 1.0.x plugins. They should +/// not be used in new code +#define GRPC_OVERRIDE override  // deprecated +#define GRPC_FINAL final        // deprecated + +#ifdef GRPC_CUSTOM_STRING +#warning GRPC_CUSTOM_STRING is no longer supported. Please use TString. +#endif + +namespace grpc { + +// Using grpc::string and grpc::to_string is discouraged in favor of +// TString and ToString. This is only for legacy code using +// them explictly. +typedef TString string;     // deprecated + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CONFIG_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/config_protobuf.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/config_protobuf.h new file mode 100644 index 00000000000..c4012fb00c9 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/config_protobuf.h @@ -0,0 +1,104 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CONFIG_PROTOBUF_H +#define GRPCPP_IMPL_CODEGEN_CONFIG_PROTOBUF_H + +#define GRPC_OPEN_SOURCE_PROTO + +#ifndef GRPC_CUSTOM_MESSAGE +#ifdef GRPC_USE_PROTO_LITE +#include <google/protobuf/message_lite.h> +#define GRPC_CUSTOM_MESSAGE ::google::protobuf::MessageLite +#define GRPC_CUSTOM_MESSAGELITE ::google::protobuf::MessageLite +#else +#include <google/protobuf/message.h> +#define GRPC_CUSTOM_MESSAGE ::google::protobuf::Message +#define GRPC_CUSTOM_MESSAGELITE ::google::protobuf::MessageLite +#endif +#endif + +#ifndef GRPC_CUSTOM_DESCRIPTOR +#include <google/protobuf/descriptor.h> +#include <google/protobuf/descriptor.pb.h> +#define GRPC_CUSTOM_DESCRIPTOR ::google::protobuf::Descriptor +#define GRPC_CUSTOM_DESCRIPTORPOOL ::google::protobuf::DescriptorPool +#define GRPC_CUSTOM_FIELDDESCRIPTOR ::google::protobuf::FieldDescriptor +#define GRPC_CUSTOM_FILEDESCRIPTOR ::google::protobuf::FileDescriptor +#define GRPC_CUSTOM_FILEDESCRIPTORPROTO ::google::protobuf::FileDescriptorProto +#define GRPC_CUSTOM_METHODDESCRIPTOR ::google::protobuf::MethodDescriptor +#define GRPC_CUSTOM_SERVICEDESCRIPTOR ::google::protobuf::ServiceDescriptor +#define GRPC_CUSTOM_SOURCELOCATION ::google::protobuf::SourceLocation +#endif + +#ifndef GRPC_CUSTOM_DESCRIPTORDATABASE +#include <google/protobuf/descriptor_database.h> +#define GRPC_CUSTOM_DESCRIPTORDATABASE ::google::protobuf::DescriptorDatabase +#define GRPC_CUSTOM_SIMPLEDESCRIPTORDATABASE \ +  ::google::protobuf::SimpleDescriptorDatabase +#endif + +#ifndef GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM +#include <google/protobuf/io/coded_stream.h> +#include <google/protobuf/io/zero_copy_stream.h> +#define GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM \ +  ::google::protobuf::io::ZeroCopyOutputStream +#define GRPC_CUSTOM_ZEROCOPYINPUTSTREAM \ +  ::google::protobuf::io::ZeroCopyInputStream +#define GRPC_CUSTOM_CODEDINPUTSTREAM ::google::protobuf::io::CodedInputStream +#endif + +#ifndef GRPC_CUSTOM_JSONUTIL +#include <google/protobuf/util/json_util.h> +#define GRPC_CUSTOM_JSONUTIL ::google::protobuf::util +#define GRPC_CUSTOM_UTIL_STATUS ::google::protobuf::util::Status +#endif + +namespace grpc { +namespace protobuf { + +typedef GRPC_CUSTOM_MESSAGE Message; +typedef GRPC_CUSTOM_MESSAGELITE MessageLite; + +typedef GRPC_CUSTOM_DESCRIPTOR Descriptor; +typedef GRPC_CUSTOM_DESCRIPTORPOOL DescriptorPool; +typedef GRPC_CUSTOM_DESCRIPTORDATABASE DescriptorDatabase; +typedef GRPC_CUSTOM_FIELDDESCRIPTOR FieldDescriptor; +typedef GRPC_CUSTOM_FILEDESCRIPTOR FileDescriptor; +typedef GRPC_CUSTOM_FILEDESCRIPTORPROTO FileDescriptorProto; +typedef GRPC_CUSTOM_METHODDESCRIPTOR MethodDescriptor; +typedef GRPC_CUSTOM_SERVICEDESCRIPTOR ServiceDescriptor; +typedef GRPC_CUSTOM_SIMPLEDESCRIPTORDATABASE SimpleDescriptorDatabase; +typedef GRPC_CUSTOM_SOURCELOCATION SourceLocation; + +namespace util { +typedef GRPC_CUSTOM_UTIL_STATUS Status; +}  // namespace util + +namespace json = GRPC_CUSTOM_JSONUTIL; + +namespace io { +typedef GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM ZeroCopyOutputStream; +typedef GRPC_CUSTOM_ZEROCOPYINPUTSTREAM ZeroCopyInputStream; +typedef GRPC_CUSTOM_CODEDINPUTSTREAM CodedInputStream; +}  // namespace io + +}  // namespace protobuf +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CONFIG_PROTOBUF_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/core_codegen.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/core_codegen.h new file mode 100644 index 00000000000..50c8da4ffe7 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/core_codegen.h @@ -0,0 +1,127 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CORE_CODEGEN_H +#define GRPCPP_IMPL_CODEGEN_CORE_CODEGEN_H + +// This file should be compiled as part of grpcpp. + +#include <grpc/byte_buffer.h> +#include <grpc/grpc.h> +#include <grpc/impl/codegen/grpc_types.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> + +namespace grpc { + +/// Implementation of the core codegen interface. +class CoreCodegen final : public CoreCodegenInterface { + private: +  virtual const grpc_completion_queue_factory* +  grpc_completion_queue_factory_lookup( +      const grpc_completion_queue_attributes* attributes) override; +  virtual grpc_completion_queue* grpc_completion_queue_create( +      const grpc_completion_queue_factory* factory, +      const grpc_completion_queue_attributes* attributes, +      void* reserved) override; +  grpc_completion_queue* grpc_completion_queue_create_for_next( +      void* reserved) override; +  grpc_completion_queue* grpc_completion_queue_create_for_pluck( +      void* reserved) override; +  void grpc_completion_queue_shutdown(grpc_completion_queue* cq) override; +  void grpc_completion_queue_destroy(grpc_completion_queue* cq) override; +  grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag, +                                         gpr_timespec deadline, +                                         void* reserved) override; + +  void* gpr_malloc(size_t size) override; +  void gpr_free(void* p) override; + +  void grpc_init() override; +  void grpc_shutdown() override; + +  void gpr_mu_init(gpr_mu* mu) override; +  void gpr_mu_destroy(gpr_mu* mu) override; +  void gpr_mu_lock(gpr_mu* mu) override; +  void gpr_mu_unlock(gpr_mu* mu) override; +  void gpr_cv_init(gpr_cv* cv) override; +  void gpr_cv_destroy(gpr_cv* cv) override; +  int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) override; +  void gpr_cv_signal(gpr_cv* cv) override; +  void gpr_cv_broadcast(gpr_cv* cv) override; + +  grpc_call_error grpc_call_start_batch(grpc_call* call, const grpc_op* ops, +                                        size_t nops, void* tag, +                                        void* reserved) override; +  grpc_call_error grpc_call_cancel_with_status(grpc_call* call, +                                               grpc_status_code status, +                                               const char* description, +                                               void* reserved) override; +  void grpc_call_ref(grpc_call* call) override; +  void grpc_call_unref(grpc_call* call) override; +  void* grpc_call_arena_alloc(grpc_call* call, size_t length) override; +  const char* grpc_call_error_to_string(grpc_call_error error) override; + +  grpc_byte_buffer* grpc_byte_buffer_copy(grpc_byte_buffer* bb) override; +  void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) override; +  size_t grpc_byte_buffer_length(grpc_byte_buffer* bb) override; + +  int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, +                                   grpc_byte_buffer* buffer) override; +  void grpc_byte_buffer_reader_destroy( +      grpc_byte_buffer_reader* reader) override; +  int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader, +                                   grpc_slice* slice) override; +  int grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader, +                                   grpc_slice** slice) override; + +  grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice, +                                                size_t nslices) override; +  grpc_slice grpc_slice_new_with_user_data(void* p, size_t len, +                                           void (*destroy)(void*), +                                           void* user_data) override; +  grpc_slice grpc_slice_new_with_len(void* p, size_t len, +                                     void (*destroy)(void*, size_t)) override; +  grpc_slice grpc_empty_slice() override; +  grpc_slice grpc_slice_malloc(size_t length) override; +  void grpc_slice_unref(grpc_slice slice) override; +  grpc_slice grpc_slice_ref(grpc_slice slice) override; +  grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split) override; +  grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split) override; +  grpc_slice grpc_slice_sub(grpc_slice s, size_t begin, size_t end) override; +  void grpc_slice_buffer_add(grpc_slice_buffer* sb, grpc_slice slice) override; +  void grpc_slice_buffer_pop(grpc_slice_buffer* sb) override; +  grpc_slice grpc_slice_from_static_buffer(const void* buffer, +                                           size_t length) override; +  grpc_slice grpc_slice_from_copied_buffer(const void* buffer, +                                           size_t length) override; +  void grpc_metadata_array_init(grpc_metadata_array* array) override; +  void grpc_metadata_array_destroy(grpc_metadata_array* array) override; + +  gpr_timespec gpr_inf_future(gpr_clock_type type) override; +  gpr_timespec gpr_time_0(gpr_clock_type type) override; + +  virtual const Status& ok() override; +  virtual const Status& cancelled() override; + +  void assert_fail(const char* failed_assertion, const char* file, +                   int line) override; +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CORE_CODEGEN_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/core_codegen_interface.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/core_codegen_interface.h new file mode 100644 index 00000000000..c08cf6c683d --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/core_codegen_interface.h @@ -0,0 +1,164 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CORE_CODEGEN_INTERFACE_H +#define GRPCPP_IMPL_CODEGEN_CORE_CODEGEN_INTERFACE_H + +#include <grpc/impl/codegen/byte_buffer.h> +#include <grpc/impl/codegen/byte_buffer_reader.h> +#include <grpc/impl/codegen/grpc_types.h> +#include <grpc/impl/codegen/sync.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { + +/// Interface between the codegen library and the minimal subset of core +/// features required by the generated code. +/// +/// All undocumented methods are simply forwarding the call to their namesakes. +/// Please refer to their corresponding documentation for details. +/// +/// \warning This interface should be considered internal and private. +class CoreCodegenInterface { + public: +  virtual ~CoreCodegenInterface() = default; + +  /// Upon a failed assertion, log the error. +  virtual void assert_fail(const char* failed_assertion, const char* file, +                           int line) = 0; + +  virtual const grpc_completion_queue_factory* +  grpc_completion_queue_factory_lookup( +      const grpc_completion_queue_attributes* attributes) = 0; +  virtual grpc_completion_queue* grpc_completion_queue_create( +      const grpc_completion_queue_factory* factory, +      const grpc_completion_queue_attributes* attributes, void* reserved) = 0; +  virtual grpc_completion_queue* grpc_completion_queue_create_for_next( +      void* reserved) = 0; +  virtual grpc_completion_queue* grpc_completion_queue_create_for_pluck( +      void* reserved) = 0; +  virtual void grpc_completion_queue_shutdown(grpc_completion_queue* cq) = 0; +  virtual void grpc_completion_queue_destroy(grpc_completion_queue* cq) = 0; +  virtual grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, +                                                 void* tag, +                                                 gpr_timespec deadline, +                                                 void* reserved) = 0; + +  virtual void* gpr_malloc(size_t size) = 0; +  virtual void gpr_free(void* p) = 0; + +  // These are only to be used to fix edge cases involving grpc_init and +  // grpc_shutdown. Calling grpc_init from the codegen interface before +  // the real grpc_init is called will cause a crash, so if you use this +  // function, ensure that it is not the first call to grpc_init. +  virtual void grpc_init() = 0; +  virtual void grpc_shutdown() = 0; + +  virtual void gpr_mu_init(gpr_mu* mu) = 0; +  virtual void gpr_mu_destroy(gpr_mu* mu) = 0; +  virtual void gpr_mu_lock(gpr_mu* mu) = 0; +  virtual void gpr_mu_unlock(gpr_mu* mu) = 0; +  virtual void gpr_cv_init(gpr_cv* cv) = 0; +  virtual void gpr_cv_destroy(gpr_cv* cv) = 0; +  virtual int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, +                          gpr_timespec abs_deadline) = 0; +  virtual void gpr_cv_signal(gpr_cv* cv) = 0; +  virtual void gpr_cv_broadcast(gpr_cv* cv) = 0; + +  virtual grpc_byte_buffer* grpc_byte_buffer_copy(grpc_byte_buffer* bb) = 0; +  virtual void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) = 0; +  virtual size_t grpc_byte_buffer_length(grpc_byte_buffer* bb) +      GRPC_MUST_USE_RESULT = 0; + +  virtual int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, +                                           grpc_byte_buffer* buffer) +      GRPC_MUST_USE_RESULT = 0; +  virtual void grpc_byte_buffer_reader_destroy( +      grpc_byte_buffer_reader* reader) = 0; +  virtual int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader, +                                           grpc_slice* slice) = 0; +  virtual int grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader, +                                           grpc_slice** slice) = 0; + +  virtual grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice, +                                                        size_t nslices) = 0; +  virtual grpc_slice grpc_slice_new_with_user_data(void* p, size_t len, +                                                   void (*destroy)(void*), +                                                   void* user_data) = 0; +  virtual grpc_slice grpc_slice_new_with_len(void* p, size_t len, +                                             void (*destroy)(void*, +                                                             size_t)) = 0; +  virtual grpc_call_error grpc_call_start_batch(grpc_call* call, +                                                const grpc_op* ops, size_t nops, +                                                void* tag, void* reserved) = 0; +  virtual grpc_call_error grpc_call_cancel_with_status(grpc_call* call, +                                                       grpc_status_code status, +                                                       const char* description, +                                                       void* reserved) = 0; +  virtual void grpc_call_ref(grpc_call* call) = 0; +  virtual void grpc_call_unref(grpc_call* call) = 0; +  virtual void* grpc_call_arena_alloc(grpc_call* call, size_t length) = 0; +  virtual const char* grpc_call_error_to_string(grpc_call_error error) = 0; +  virtual grpc_slice grpc_empty_slice() = 0; +  virtual grpc_slice grpc_slice_malloc(size_t length) = 0; +  virtual void grpc_slice_unref(grpc_slice slice) = 0; +  virtual grpc_slice grpc_slice_ref(grpc_slice slice) = 0; +  virtual grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split) = 0; +  virtual grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split) = 0; +  virtual grpc_slice grpc_slice_sub(grpc_slice s, size_t begin, size_t end) = 0; +  virtual void grpc_slice_buffer_add(grpc_slice_buffer* sb, +                                     grpc_slice slice) = 0; +  virtual void grpc_slice_buffer_pop(grpc_slice_buffer* sb) = 0; +  virtual grpc_slice grpc_slice_from_static_buffer(const void* buffer, +                                                   size_t length) = 0; +  virtual grpc_slice grpc_slice_from_copied_buffer(const void* buffer, +                                                   size_t length) = 0; + +  virtual void grpc_metadata_array_init(grpc_metadata_array* array) = 0; +  virtual void grpc_metadata_array_destroy(grpc_metadata_array* array) = 0; + +  virtual const Status& ok() = 0; +  virtual const Status& cancelled() = 0; + +  virtual gpr_timespec gpr_inf_future(gpr_clock_type type) = 0; +  virtual gpr_timespec gpr_time_0(gpr_clock_type type) = 0; +}; + +extern CoreCodegenInterface* g_core_codegen_interface; + +/// Codegen specific version of \a GPR_ASSERT. +#define GPR_CODEGEN_ASSERT(x)                                              \ +  do {                                                                     \ +    if (GPR_UNLIKELY(!(x))) {                                              \ +      grpc::g_core_codegen_interface->assert_fail(#x, __FILE__, __LINE__); \ +    }                                                                      \ +  } while (0) + +/// Codegen specific version of \a GPR_DEBUG_ASSERT. +#ifndef NDEBUG +#define GPR_CODEGEN_DEBUG_ASSERT(x) GPR_CODEGEN_ASSERT(x) +#else +#define GPR_CODEGEN_DEBUG_ASSERT(x) \ +  do {                              \ +  } while (0) +#endif + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CORE_CODEGEN_INTERFACE_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/create_auth_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/create_auth_context.h new file mode 100644 index 00000000000..cb6095c3a5a --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/create_auth_context.h @@ -0,0 +1,33 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_CREATE_AUTH_CONTEXT_H +#define GRPCPP_IMPL_CODEGEN_CREATE_AUTH_CONTEXT_H + +#include <memory> + +#include <grpc/impl/codegen/grpc_types.h> +#include <grpcpp/impl/codegen/security/auth_context.h> + +namespace grpc { + +std::shared_ptr<const AuthContext> CreateAuthContext(grpc_call* call); + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_CREATE_AUTH_CONTEXT_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h new file mode 100644 index 00000000000..1a3bbd3349a --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/delegating_channel.h @@ -0,0 +1,87 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_DELEGATING_CHANNEL_H +#define GRPCPP_IMPL_CODEGEN_DELEGATING_CHANNEL_H + +namespace grpc { +namespace experimental { + +class DelegatingChannel : public ::grpc::ChannelInterface { + public: +  virtual ~DelegatingChannel() {} + +  DelegatingChannel(std::shared_ptr<::grpc::ChannelInterface> delegate_channel) +      : delegate_channel_(delegate_channel) {} + +  grpc_connectivity_state GetState(bool try_to_connect) override { +    return delegate_channel()->GetState(try_to_connect); +  } + +  std::shared_ptr<::grpc::ChannelInterface> delegate_channel() { +    return delegate_channel_; +  } + + private: +  internal::Call CreateCall(const internal::RpcMethod& method, +                            ClientContext* context, +                            ::grpc::CompletionQueue* cq) final { +    return delegate_channel()->CreateCall(method, context, cq); +  } + +  void PerformOpsOnCall(internal::CallOpSetInterface* ops, +                        internal::Call* call) final { +    delegate_channel()->PerformOpsOnCall(ops, call); +  } + +  void* RegisterMethod(const char* method) final { +    return delegate_channel()->RegisterMethod(method); +  } + +  void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed, +                               gpr_timespec deadline, +                               ::grpc::CompletionQueue* cq, +                               void* tag) override { +    delegate_channel()->NotifyOnStateChangeImpl(last_observed, deadline, cq, +                                                tag); +  } + +  bool WaitForStateChangeImpl(grpc_connectivity_state last_observed, +                              gpr_timespec deadline) override { +    return delegate_channel()->WaitForStateChangeImpl(last_observed, deadline); +  } + +  internal::Call CreateCallInternal(const internal::RpcMethod& method, +                                    ClientContext* context, +                                    ::grpc::CompletionQueue* cq, +                                    size_t interceptor_pos) final { +    return delegate_channel()->CreateCallInternal(method, context, cq, +                                                  interceptor_pos); +  } + +  ::grpc::CompletionQueue* CallbackCQ() final { +    return delegate_channel()->CallbackCQ(); +  } + +  std::shared_ptr<::grpc::ChannelInterface> delegate_channel_; +}; + +}  // namespace experimental +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_DELEGATING_CHANNEL_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/grpc_library.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/grpc_library.h new file mode 100644 index 00000000000..17c904d71a1 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/grpc_library.h @@ -0,0 +1,64 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_GRPC_LIBRARY_H +#define GRPCPP_IMPL_CODEGEN_GRPC_LIBRARY_H + +#include <grpcpp/impl/codegen/core_codegen_interface.h> + +namespace grpc { + +class GrpcLibraryInterface { + public: +  virtual ~GrpcLibraryInterface() = default; +  virtual void init() = 0; +  virtual void shutdown() = 0; +}; + +/// Initialized by \a grpc::GrpcLibraryInitializer from +/// <grpcpp/impl/grpc_library.h> +extern GrpcLibraryInterface* g_glip; + +/// Classes that require gRPC to be initialized should inherit from this class. +class GrpcLibraryCodegen { + public: +  GrpcLibraryCodegen(bool call_grpc_init = true) : grpc_init_called_(false) { +    if (call_grpc_init) { +      GPR_CODEGEN_ASSERT(g_glip && +                         "gRPC library not initialized. See " +                         "grpc::internal::GrpcLibraryInitializer."); +      g_glip->init(); +      grpc_init_called_ = true; +    } +  } +  virtual ~GrpcLibraryCodegen() { +    if (grpc_init_called_) { +      GPR_CODEGEN_ASSERT(g_glip && +                         "gRPC library not initialized. See " +                         "grpc::internal::GrpcLibraryInitializer."); +      g_glip->shutdown(); +    } +  } + + private: +  bool grpc_init_called_; +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_GRPC_LIBRARY_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h new file mode 100644 index 00000000000..c729970ca88 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/intercepted_channel.h @@ -0,0 +1,84 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_INTERCEPTED_CHANNEL_H +#define GRPCPP_IMPL_CODEGEN_INTERCEPTED_CHANNEL_H + +#include <grpcpp/impl/codegen/channel_interface.h> + +namespace grpc { +class CompletionQueue; + +namespace internal { + +class InterceptorBatchMethodsImpl; + +/// An InterceptedChannel is available to client Interceptors. An +/// InterceptedChannel is unique to an interceptor, and when an RPC is started +/// on this channel, only those interceptors that come after this interceptor +/// see the RPC. +class InterceptedChannel : public ChannelInterface { + public: +  virtual ~InterceptedChannel() { channel_ = nullptr; } + +  /// Get the current channel state. If the channel is in IDLE and +  /// \a try_to_connect is set to true, try to connect. +  grpc_connectivity_state GetState(bool try_to_connect) override { +    return channel_->GetState(try_to_connect); +  } + + private: +  InterceptedChannel(ChannelInterface* channel, size_t pos) +      : channel_(channel), interceptor_pos_(pos) {} + +  Call CreateCall(const RpcMethod& method, ::grpc::ClientContext* context, +                  ::grpc::CompletionQueue* cq) override { +    return channel_->CreateCallInternal(method, context, cq, interceptor_pos_); +  } + +  void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override { +    return channel_->PerformOpsOnCall(ops, call); +  } +  void* RegisterMethod(const char* method) override { +    return channel_->RegisterMethod(method); +  } + +  void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed, +                               gpr_timespec deadline, +                               ::grpc::CompletionQueue* cq, +                               void* tag) override { +    return channel_->NotifyOnStateChangeImpl(last_observed, deadline, cq, tag); +  } +  bool WaitForStateChangeImpl(grpc_connectivity_state last_observed, +                              gpr_timespec deadline) override { +    return channel_->WaitForStateChangeImpl(last_observed, deadline); +  } + +  ::grpc::CompletionQueue* CallbackCQ() override { +    return channel_->CallbackCQ(); +  } + +  ChannelInterface* channel_; +  size_t interceptor_pos_; + +  friend class InterceptorBatchMethodsImpl; +}; +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_INTERCEPTED_CHANNEL_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h new file mode 100644 index 00000000000..d0afa03a178 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor.h @@ -0,0 +1,228 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_INTERCEPTOR_H +#define GRPCPP_IMPL_CODEGEN_INTERCEPTOR_H + +#include <memory> + +#include <grpc/impl/codegen/grpc_types.h> +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/metadata_map.h> + +namespace grpc { + +class ChannelInterface; +class Status; + +namespace experimental { + +/// An enumeration of different possible points at which the \a Intercept +/// method of the \a Interceptor interface may be called. Any given call +/// to \a Intercept will include one or more of these hook points, and +/// each hook point makes certain types of information available to the +/// interceptor. +/// In these enumeration names, PRE_SEND means that an interception has taken +/// place between the time the application provided a certain type of data +/// (e.g., initial metadata, status) and the time that that data goes to the +/// other side. POST_SEND means that the data has been committed for going to +/// the other side (even if it has not yet been received at the other side). +/// PRE_RECV means an interception between the time that a certain +/// operation has been requested and it is available. POST_RECV means that a +/// result is available but has not yet been passed back to the application. +/// A batch of interception points will only contain either PRE or POST hooks +/// but not both types. For example, a batch with PRE_SEND hook points will not +/// contain POST_RECV or POST_SEND ops. Likewise, a batch with POST_* ops can +/// not contain PRE_* ops. +enum class InterceptionHookPoints { +  /// The first three in this list are for clients and servers +  PRE_SEND_INITIAL_METADATA, +  PRE_SEND_MESSAGE, +  POST_SEND_MESSAGE, +  PRE_SEND_STATUS,  // server only +  PRE_SEND_CLOSE,   // client only: WritesDone for stream; after write in unary +  /// The following three are for hijacked clients only. A batch with PRE_RECV_* +  /// hook points will never contain hook points of other types. +  PRE_RECV_INITIAL_METADATA, +  PRE_RECV_MESSAGE, +  PRE_RECV_STATUS, +  /// The following two are for all clients and servers +  POST_RECV_INITIAL_METADATA, +  POST_RECV_MESSAGE, +  POST_RECV_STATUS,  // client only +  POST_RECV_CLOSE,   // server only +  /// This is a special hook point available to both clients and servers when +  /// TryCancel() is performed. +  ///  - No other hook points will be present along with this. +  ///  - It is illegal for an interceptor to block/delay this operation. +  ///  - ALL interceptors see this hook point irrespective of whether the +  ///    RPC was hijacked or not. +  PRE_SEND_CANCEL, +  NUM_INTERCEPTION_HOOKS +}; + +/// Class that is passed as an argument to the \a Intercept method +/// of the application's \a Interceptor interface implementation. It has five +/// purposes: +///   1. Indicate which hook points are present at a specific interception +///   2. Allow an interceptor to inform the library that an RPC should +///      continue to the next stage of its processing (which may be another +///      interceptor or the main path of the library) +///   3. Allow an interceptor to hijack the processing of the RPC (only for +///      client-side RPCs with PRE_SEND_INITIAL_METADATA) so that it does not +///      proceed with normal processing beyond that stage +///   4. Access the relevant fields of an RPC at each interception point +///   5. Set some fields of an RPC at each interception point, when possible +class InterceptorBatchMethods { + public: +  virtual ~InterceptorBatchMethods() {} +  /// Determine whether the current batch has an interception hook point +  /// of type \a type +  virtual bool QueryInterceptionHookPoint(InterceptionHookPoints type) = 0; +  /// Signal that the interceptor is done intercepting the current batch of the +  /// RPC. Every interceptor must either call Proceed or Hijack on each +  /// interception. In most cases, only Proceed will be used. Explicit use of +  /// Proceed is what enables interceptors to delay the processing of RPCs +  /// while they perform other work. +  /// Proceed is a no-op if the batch contains PRE_SEND_CANCEL. Simply returning +  /// from the Intercept method does the job of continuing the RPC in this case. +  /// This is because PRE_SEND_CANCEL is always in a separate batch and is not +  /// allowed to be delayed. +  virtual void Proceed() = 0; +  /// Indicate that the interceptor has hijacked the RPC (only valid if the +  /// batch contains send_initial_metadata on the client side). Later +  /// interceptors in the interceptor list will not be called. Later batches +  /// on the same RPC will go through interception, but only up to the point +  /// of the hijacking interceptor. +  virtual void Hijack() = 0; + +  /// Send Message Methods +  /// GetSerializedSendMessage and GetSendMessage/ModifySendMessage are the +  /// available methods to view and modify the request payload. An interceptor +  /// can access the payload in either serialized form or non-serialized form +  /// but not both at the same time. +  /// gRPC performs serialization in a lazy manner, which means +  /// that a call to GetSerializedSendMessage will result in a serialization +  /// operation if the payload stored is not in the serialized form already; the +  /// non-serialized form will be lost and GetSendMessage will no longer return +  /// a valid pointer, and this will remain true for later interceptors too. +  /// This can change however if ModifySendMessage is used to replace the +  /// current payload. Note that ModifySendMessage requires a new payload +  /// message in the non-serialized form. This will overwrite the existing +  /// payload irrespective of whether it had been serialized earlier. Also note +  /// that gRPC Async API requires early serialization of the payload which +  /// means that the payload would be available in the serialized form only +  /// unless an interceptor replaces the payload with ModifySendMessage. + +  /// Returns a modifable ByteBuffer holding the serialized form of the message +  /// that is going to be sent. Valid for PRE_SEND_MESSAGE interceptions. +  /// A return value of nullptr indicates that this ByteBuffer is not valid. +  virtual ByteBuffer* GetSerializedSendMessage() = 0; + +  /// Returns a non-modifiable pointer to the non-serialized form of the message +  /// to be sent. Valid for PRE_SEND_MESSAGE interceptions. A return value of +  /// nullptr indicates that this field is not valid. +  virtual const void* GetSendMessage() = 0; + +  /// Overwrites the message to be sent with \a message. \a message should be in +  /// the non-serialized form expected by the method. Valid for PRE_SEND_MESSAGE +  /// interceptions. Note that the interceptor is responsible for maintaining +  /// the life of the message till it is serialized or it receives the +  /// POST_SEND_MESSAGE interception point, whichever happens earlier. The +  /// modifying interceptor may itself force early serialization by calling +  /// GetSerializedSendMessage. +  virtual void ModifySendMessage(const void* message) = 0; + +  /// Checks whether the SEND MESSAGE op succeeded. Valid for POST_SEND_MESSAGE +  /// interceptions. +  virtual bool GetSendMessageStatus() = 0; + +  /// Returns a modifiable multimap of the initial metadata to be sent. Valid +  /// for PRE_SEND_INITIAL_METADATA interceptions. A value of nullptr indicates +  /// that this field is not valid. +  virtual std::multimap<TString, TString>* GetSendInitialMetadata() = 0; + +  /// Returns the status to be sent. Valid for PRE_SEND_STATUS interceptions. +  virtual Status GetSendStatus() = 0; + +  /// Overwrites the status with \a status. Valid for PRE_SEND_STATUS +  /// interceptions. +  virtual void ModifySendStatus(const Status& status) = 0; + +  /// Returns a modifiable multimap of the trailing metadata to be sent. Valid +  /// for PRE_SEND_STATUS interceptions. A value of nullptr indicates +  /// that this field is not valid. +  virtual std::multimap<TString, TString>* +  GetSendTrailingMetadata() = 0; + +  /// Returns a pointer to the modifiable received message. Note that the +  /// message is already deserialized but the type is not set; the interceptor +  /// should static_cast to the appropriate type before using it. This is valid +  /// for PRE_RECV_MESSAGE and POST_RECV_MESSAGE interceptions; nullptr for not +  /// valid +  virtual void* GetRecvMessage() = 0; + +  /// Returns a modifiable multimap of the received initial metadata. +  /// Valid for PRE_RECV_INITIAL_METADATA and POST_RECV_INITIAL_METADATA +  /// interceptions; nullptr if not valid +  virtual std::multimap<grpc::string_ref, grpc::string_ref>* +  GetRecvInitialMetadata() = 0; + +  /// Returns a modifiable view of the received status on PRE_RECV_STATUS and +  /// POST_RECV_STATUS interceptions; nullptr if not valid. +  virtual Status* GetRecvStatus() = 0; + +  /// Returns a modifiable multimap of the received trailing metadata on +  /// PRE_RECV_STATUS and POST_RECV_STATUS interceptions; nullptr if not valid +  virtual std::multimap<grpc::string_ref, grpc::string_ref>* +  GetRecvTrailingMetadata() = 0; + +  /// Gets an intercepted channel. When a call is started on this interceptor, +  /// only interceptors after the current interceptor are created from the +  /// factory objects registered with the channel. This allows calls to be +  /// started from interceptors without infinite regress through the interceptor +  /// list. +  virtual std::unique_ptr<ChannelInterface> GetInterceptedChannel() = 0; + +  /// On a hijacked RPC, an interceptor can decide to fail a PRE_RECV_MESSAGE +  /// op. This would be a signal to the reader that there will be no more +  /// messages, or the stream has failed or been cancelled. +  virtual void FailHijackedRecvMessage() = 0; + +  /// On a hijacked RPC/ to-be hijacked RPC, this can be called to fail a SEND +  /// MESSAGE op +  virtual void FailHijackedSendMessage() = 0; +}; + +/// Interface for an interceptor. Interceptor authors must create a class +/// that derives from this parent class. +class Interceptor { + public: +  virtual ~Interceptor() {} + +  /// The one public method of an Interceptor interface. Override this to +  /// trigger the desired actions at the hook points described above. +  virtual void Intercept(InterceptorBatchMethods* methods) = 0; +}; + +}  // namespace experimental +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_INTERCEPTOR_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h new file mode 100644 index 00000000000..714351f5432 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/interceptor_common.h @@ -0,0 +1,558 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#if defined(__GNUC__) +#pragma GCC system_header +#endif + +#ifndef GRPCPP_IMPL_CODEGEN_INTERCEPTOR_COMMON_H +#define GRPCPP_IMPL_CODEGEN_INTERCEPTOR_COMMON_H + +#include <array> +#include <functional> + +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/call_op_set_interface.h> +#include <grpcpp/impl/codegen/client_interceptor.h> +#include <grpcpp/impl/codegen/intercepted_channel.h> +#include <grpcpp/impl/codegen/server_interceptor.h> + +#include <grpc/impl/codegen/grpc_types.h> + +namespace grpc { +namespace internal { + +class InterceptorBatchMethodsImpl +    : public experimental::InterceptorBatchMethods { + public: +  InterceptorBatchMethodsImpl() { +    for (auto i = static_cast<experimental::InterceptionHookPoints>(0); +         i < experimental::InterceptionHookPoints::NUM_INTERCEPTION_HOOKS; +         i = static_cast<experimental::InterceptionHookPoints>( +             static_cast<size_t>(i) + 1)) { +      hooks_[static_cast<size_t>(i)] = false; +    } +  } + +  ~InterceptorBatchMethodsImpl() {} + +  bool QueryInterceptionHookPoint( +      experimental::InterceptionHookPoints type) override { +    return hooks_[static_cast<size_t>(type)]; +  } + +  void Proceed() override { +    if (call_->client_rpc_info() != nullptr) { +      return ProceedClient(); +    } +    GPR_CODEGEN_ASSERT(call_->server_rpc_info() != nullptr); +    ProceedServer(); +  } + +  void Hijack() override { +    // Only the client can hijack when sending down initial metadata +    GPR_CODEGEN_ASSERT(!reverse_ && ops_ != nullptr && +                       call_->client_rpc_info() != nullptr); +    // It is illegal to call Hijack twice +    GPR_CODEGEN_ASSERT(!ran_hijacking_interceptor_); +    auto* rpc_info = call_->client_rpc_info(); +    rpc_info->hijacked_ = true; +    rpc_info->hijacked_interceptor_ = current_interceptor_index_; +    ClearHookPoints(); +    ops_->SetHijackingState(); +    ran_hijacking_interceptor_ = true; +    rpc_info->RunInterceptor(this, current_interceptor_index_); +  } + +  void AddInterceptionHookPoint(experimental::InterceptionHookPoints type) { +    hooks_[static_cast<size_t>(type)] = true; +  } + +  ByteBuffer* GetSerializedSendMessage() override { +    GPR_CODEGEN_ASSERT(orig_send_message_ != nullptr); +    if (*orig_send_message_ != nullptr) { +      GPR_CODEGEN_ASSERT(serializer_(*orig_send_message_).ok()); +      *orig_send_message_ = nullptr; +    } +    return send_message_; +  } + +  const void* GetSendMessage() override { +    GPR_CODEGEN_ASSERT(orig_send_message_ != nullptr); +    return *orig_send_message_; +  } + +  void ModifySendMessage(const void* message) override { +    GPR_CODEGEN_ASSERT(orig_send_message_ != nullptr); +    *orig_send_message_ = message; +  } + +  bool GetSendMessageStatus() override { return !*fail_send_message_; } + +  std::multimap<TString, TString>* GetSendInitialMetadata() override { +    return send_initial_metadata_; +  } + +  Status GetSendStatus() override { +    return Status(static_cast<StatusCode>(*code_), *error_message_, +                  *error_details_); +  } + +  void ModifySendStatus(const Status& status) override { +    *code_ = static_cast<grpc_status_code>(status.error_code()); +    *error_details_ = status.error_details(); +    *error_message_ = status.error_message(); +  } + +  std::multimap<TString, TString>* GetSendTrailingMetadata() override { +    return send_trailing_metadata_; +  } + +  void* GetRecvMessage() override { return recv_message_; } + +  std::multimap<grpc::string_ref, grpc::string_ref>* GetRecvInitialMetadata() +      override { +    return recv_initial_metadata_->map(); +  } + +  Status* GetRecvStatus() override { return recv_status_; } + +  void FailHijackedSendMessage() override { +    GPR_CODEGEN_ASSERT(hooks_[static_cast<size_t>( +        experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)]); +    *fail_send_message_ = true; +  } + +  std::multimap<grpc::string_ref, grpc::string_ref>* GetRecvTrailingMetadata() +      override { +    return recv_trailing_metadata_->map(); +  } + +  void SetSendMessage(ByteBuffer* buf, const void** msg, +                      bool* fail_send_message, +                      std::function<Status(const void*)> serializer) { +    send_message_ = buf; +    orig_send_message_ = msg; +    fail_send_message_ = fail_send_message; +    serializer_ = serializer; +  } + +  void SetSendInitialMetadata( +      std::multimap<TString, TString>* metadata) { +    send_initial_metadata_ = metadata; +  } + +  void SetSendStatus(grpc_status_code* code, TString* error_details, +                     TString* error_message) { +    code_ = code; +    error_details_ = error_details; +    error_message_ = error_message; +  } + +  void SetSendTrailingMetadata( +      std::multimap<TString, TString>* metadata) { +    send_trailing_metadata_ = metadata; +  } + +  void SetRecvMessage(void* message, bool* hijacked_recv_message_failed) { +    recv_message_ = message; +    hijacked_recv_message_failed_ = hijacked_recv_message_failed; +  } + +  void SetRecvInitialMetadata(MetadataMap* map) { +    recv_initial_metadata_ = map; +  } + +  void SetRecvStatus(Status* status) { recv_status_ = status; } + +  void SetRecvTrailingMetadata(MetadataMap* map) { +    recv_trailing_metadata_ = map; +  } + +  std::unique_ptr<ChannelInterface> GetInterceptedChannel() override { +    auto* info = call_->client_rpc_info(); +    if (info == nullptr) { +      return std::unique_ptr<ChannelInterface>(nullptr); +    } +    // The intercepted channel starts from the interceptor just after the +    // current interceptor +    return std::unique_ptr<ChannelInterface>(new InterceptedChannel( +        info->channel(), current_interceptor_index_ + 1)); +  } + +  void FailHijackedRecvMessage() override { +    GPR_CODEGEN_ASSERT(hooks_[static_cast<size_t>( +        experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)]); +    *hijacked_recv_message_failed_ = true; +  } + +  // Clears all state +  void ClearState() { +    reverse_ = false; +    ran_hijacking_interceptor_ = false; +    ClearHookPoints(); +  } + +  // Prepares for Post_recv operations +  void SetReverse() { +    reverse_ = true; +    ran_hijacking_interceptor_ = false; +    ClearHookPoints(); +  } + +  // This needs to be set before interceptors are run +  void SetCall(Call* call) { call_ = call; } + +  // This needs to be set before interceptors are run using RunInterceptors(). +  // Alternatively, RunInterceptors(std::function<void(void)> f) can be used. +  void SetCallOpSetInterface(CallOpSetInterface* ops) { ops_ = ops; } + +  // SetCall should have been called before this. +  // Returns true if the interceptors list is empty +  bool InterceptorsListEmpty() { +    auto* client_rpc_info = call_->client_rpc_info(); +    if (client_rpc_info != nullptr) { +      if (client_rpc_info->interceptors_.size() == 0) { +        return true; +      } else { +        return false; +      } +    } + +    auto* server_rpc_info = call_->server_rpc_info(); +    if (server_rpc_info == nullptr || +        server_rpc_info->interceptors_.size() == 0) { +      return true; +    } +    return false; +  } + +  // This should be used only by subclasses of CallOpSetInterface. SetCall and +  // SetCallOpSetInterface should have been called before this. After all the +  // interceptors are done running, either ContinueFillOpsAfterInterception or +  // ContinueFinalizeOpsAfterInterception will be called. Note that neither of +  // them is invoked if there were no interceptors registered. +  bool RunInterceptors() { +    GPR_CODEGEN_ASSERT(ops_); +    auto* client_rpc_info = call_->client_rpc_info(); +    if (client_rpc_info != nullptr) { +      if (client_rpc_info->interceptors_.size() == 0) { +        return true; +      } else { +        RunClientInterceptors(); +        return false; +      } +    } + +    auto* server_rpc_info = call_->server_rpc_info(); +    if (server_rpc_info == nullptr || +        server_rpc_info->interceptors_.size() == 0) { +      return true; +    } +    RunServerInterceptors(); +    return false; +  } + +  // Returns true if no interceptors are run. Returns false otherwise if there +  // are interceptors registered. After the interceptors are done running \a f +  // will be invoked. This is to be used only by BaseAsyncRequest and +  // SyncRequest. +  bool RunInterceptors(std::function<void(void)> f) { +    // This is used only by the server for initial call request +    GPR_CODEGEN_ASSERT(reverse_ == true); +    GPR_CODEGEN_ASSERT(call_->client_rpc_info() == nullptr); +    auto* server_rpc_info = call_->server_rpc_info(); +    if (server_rpc_info == nullptr || +        server_rpc_info->interceptors_.size() == 0) { +      return true; +    } +    callback_ = std::move(f); +    RunServerInterceptors(); +    return false; +  } + + private: +  void RunClientInterceptors() { +    auto* rpc_info = call_->client_rpc_info(); +    if (!reverse_) { +      current_interceptor_index_ = 0; +    } else { +      if (rpc_info->hijacked_) { +        current_interceptor_index_ = rpc_info->hijacked_interceptor_; +      } else { +        current_interceptor_index_ = rpc_info->interceptors_.size() - 1; +      } +    } +    rpc_info->RunInterceptor(this, current_interceptor_index_); +  } + +  void RunServerInterceptors() { +    auto* rpc_info = call_->server_rpc_info(); +    if (!reverse_) { +      current_interceptor_index_ = 0; +    } else { +      current_interceptor_index_ = rpc_info->interceptors_.size() - 1; +    } +    rpc_info->RunInterceptor(this, current_interceptor_index_); +  } + +  void ProceedClient() { +    auto* rpc_info = call_->client_rpc_info(); +    if (rpc_info->hijacked_ && !reverse_ && +        current_interceptor_index_ == rpc_info->hijacked_interceptor_ && +        !ran_hijacking_interceptor_) { +      // We now need to provide hijacked recv ops to this interceptor +      ClearHookPoints(); +      ops_->SetHijackingState(); +      ran_hijacking_interceptor_ = true; +      rpc_info->RunInterceptor(this, current_interceptor_index_); +      return; +    } +    if (!reverse_) { +      current_interceptor_index_++; +      // We are going down the stack of interceptors +      if (current_interceptor_index_ < rpc_info->interceptors_.size()) { +        if (rpc_info->hijacked_ && +            current_interceptor_index_ > rpc_info->hijacked_interceptor_) { +          // This is a hijacked RPC and we are done with hijacking +          ops_->ContinueFillOpsAfterInterception(); +        } else { +          rpc_info->RunInterceptor(this, current_interceptor_index_); +        } +      } else { +        // we are done running all the interceptors without any hijacking +        ops_->ContinueFillOpsAfterInterception(); +      } +    } else { +      // We are going up the stack of interceptors +      if (current_interceptor_index_ > 0) { +        // Continue running interceptors +        current_interceptor_index_--; +        rpc_info->RunInterceptor(this, current_interceptor_index_); +      } else { +        // we are done running all the interceptors without any hijacking +        ops_->ContinueFinalizeResultAfterInterception(); +      } +    } +  } + +  void ProceedServer() { +    auto* rpc_info = call_->server_rpc_info(); +    if (!reverse_) { +      current_interceptor_index_++; +      if (current_interceptor_index_ < rpc_info->interceptors_.size()) { +        return rpc_info->RunInterceptor(this, current_interceptor_index_); +      } else if (ops_) { +        return ops_->ContinueFillOpsAfterInterception(); +      } +    } else { +      // We are going up the stack of interceptors +      if (current_interceptor_index_ > 0) { +        // Continue running interceptors +        current_interceptor_index_--; +        return rpc_info->RunInterceptor(this, current_interceptor_index_); +      } else if (ops_) { +        return ops_->ContinueFinalizeResultAfterInterception(); +      } +    } +    GPR_CODEGEN_ASSERT(callback_); +    callback_(); +  } + +  void ClearHookPoints() { +    for (auto i = static_cast<experimental::InterceptionHookPoints>(0); +         i < experimental::InterceptionHookPoints::NUM_INTERCEPTION_HOOKS; +         i = static_cast<experimental::InterceptionHookPoints>( +             static_cast<size_t>(i) + 1)) { +      hooks_[static_cast<size_t>(i)] = false; +    } +  } + +  std::array<bool, +             static_cast<size_t>( +                 experimental::InterceptionHookPoints::NUM_INTERCEPTION_HOOKS)> +      hooks_; + +  size_t current_interceptor_index_ = 0;  // Current iterator +  bool reverse_ = false; +  bool ran_hijacking_interceptor_ = false; +  Call* call_ = nullptr;  // The Call object is present along with CallOpSet +                          // object/callback +  CallOpSetInterface* ops_ = nullptr; +  std::function<void(void)> callback_; + +  ByteBuffer* send_message_ = nullptr; +  bool* fail_send_message_ = nullptr; +  const void** orig_send_message_ = nullptr; +  std::function<Status(const void*)> serializer_; + +  std::multimap<TString, TString>* send_initial_metadata_; + +  grpc_status_code* code_ = nullptr; +  TString* error_details_ = nullptr; +  TString* error_message_ = nullptr; + +  std::multimap<TString, TString>* send_trailing_metadata_ = nullptr; + +  void* recv_message_ = nullptr; +  bool* hijacked_recv_message_failed_ = nullptr; + +  MetadataMap* recv_initial_metadata_ = nullptr; + +  Status* recv_status_ = nullptr; + +  MetadataMap* recv_trailing_metadata_ = nullptr; +}; + +// A special implementation of InterceptorBatchMethods to send a Cancel +// notification down the interceptor stack +class CancelInterceptorBatchMethods +    : public experimental::InterceptorBatchMethods { + public: +  bool QueryInterceptionHookPoint( +      experimental::InterceptionHookPoints type) override { +    if (type == experimental::InterceptionHookPoints::PRE_SEND_CANCEL) { +      return true; +    } else { +      return false; +    } +  } + +  void Proceed() override { +    // This is a no-op. For actual continuation of the RPC simply needs to +    // return from the Intercept method +  } + +  void Hijack() override { +    // Only the client can hijack when sending down initial metadata +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call Hijack on a method which has a " +                       "Cancel notification"); +  } + +  ByteBuffer* GetSerializedSendMessage() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetSendMessage on a method which " +                       "has a Cancel notification"); +    return nullptr; +  } + +  bool GetSendMessageStatus() override { +    GPR_CODEGEN_ASSERT( +        false && +        "It is illegal to call GetSendMessageStatus on a method which " +        "has a Cancel notification"); +    return false; +  } + +  const void* GetSendMessage() override { +    GPR_CODEGEN_ASSERT( +        false && +        "It is illegal to call GetOriginalSendMessage on a method which " +        "has a Cancel notification"); +    return nullptr; +  } + +  void ModifySendMessage(const void* /*message*/) override { +    GPR_CODEGEN_ASSERT( +        false && +        "It is illegal to call ModifySendMessage on a method which " +        "has a Cancel notification"); +  } + +  std::multimap<TString, TString>* GetSendInitialMetadata() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetSendInitialMetadata on a " +                       "method which has a Cancel notification"); +    return nullptr; +  } + +  Status GetSendStatus() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetSendStatus on a method which " +                       "has a Cancel notification"); +    return Status(); +  } + +  void ModifySendStatus(const Status& /*status*/) override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call ModifySendStatus on a method " +                       "which has a Cancel notification"); +    return; +  } + +  std::multimap<TString, TString>* GetSendTrailingMetadata() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetSendTrailingMetadata on a " +                       "method which has a Cancel notification"); +    return nullptr; +  } + +  void* GetRecvMessage() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetRecvMessage on a method which " +                       "has a Cancel notification"); +    return nullptr; +  } + +  std::multimap<grpc::string_ref, grpc::string_ref>* GetRecvInitialMetadata() +      override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetRecvInitialMetadata on a " +                       "method which has a Cancel notification"); +    return nullptr; +  } + +  Status* GetRecvStatus() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetRecvStatus on a method which " +                       "has a Cancel notification"); +    return nullptr; +  } + +  std::multimap<grpc::string_ref, grpc::string_ref>* GetRecvTrailingMetadata() +      override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetRecvTrailingMetadata on a " +                       "method which has a Cancel notification"); +    return nullptr; +  } + +  std::unique_ptr<ChannelInterface> GetInterceptedChannel() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call GetInterceptedChannel on a " +                       "method which has a Cancel notification"); +    return std::unique_ptr<ChannelInterface>(nullptr); +  } + +  void FailHijackedRecvMessage() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call FailHijackedRecvMessage on a " +                       "method which has a Cancel notification"); +  } + +  void FailHijackedSendMessage() override { +    GPR_CODEGEN_ASSERT(false && +                       "It is illegal to call FailHijackedSendMessage on a " +                       "method which has a Cancel notification"); +  } +}; +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_INTERCEPTOR_COMMON_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h new file mode 100644 index 00000000000..4048ea11974 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/message_allocator.h @@ -0,0 +1,93 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_MESSAGE_ALLOCATOR_H +#define GRPCPP_IMPL_CODEGEN_MESSAGE_ALLOCATOR_H + +namespace grpc { +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +namespace experimental { +#endif + +// NOTE: This is an API for advanced users who need custom allocators. +// Per rpc struct for the allocator. This is the interface to return to user. +class RpcAllocatorState { + public: +  virtual ~RpcAllocatorState() = default; +  // Optionally deallocate request early to reduce the size of working set. +  // A custom MessageAllocator needs to be registered to make use of this. +  // This is not abstract because implementing it is optional. +  virtual void FreeRequest() {} +}; + +// This is the interface returned by the allocator. +// grpc library will call the methods to get request/response pointers and to +// release the object when it is done. +template <typename RequestT, typename ResponseT> +class MessageHolder : public RpcAllocatorState { + public: +  // Release this object. For example, if the custom allocator's +  // AllocateMessasge creates an instance of a subclass with new, the Release() +  // should do a "delete this;". +  virtual void Release() = 0; +  RequestT* request() { return request_; } +  ResponseT* response() { return response_; } + + protected: +  void set_request(RequestT* request) { request_ = request; } +  void set_response(ResponseT* response) { response_ = response; } + + private: +  // NOTE: subclasses should set these pointers. +  RequestT* request_; +  ResponseT* response_; +}; + +// A custom allocator can be set via the generated code to a callback unary +// method, such as SetMessageAllocatorFor_Echo(custom_allocator). The allocator +// needs to be alive for the lifetime of the server. +// Implementations need to be thread-safe. +template <typename RequestT, typename ResponseT> +class MessageAllocator { + public: +  virtual ~MessageAllocator() = default; +  virtual MessageHolder<RequestT, ResponseT>* AllocateMessages() = 0; +}; + +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +}  // namespace experimental +#endif + +// TODO(vjpai): Remove namespace experimental when de-experimentalized fully. +#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL +namespace experimental { + +using ::grpc::RpcAllocatorState; + +template <typename RequestT, typename ResponseT> +using MessageHolder = ::grpc::MessageHolder<RequestT, ResponseT>; + +template <typename RequestT, typename ResponseT> +using MessageAllocator = ::grpc::MessageAllocator<RequestT, ResponseT>; + +}  // namespace experimental +#endif + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_MESSAGE_ALLOCATOR_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h new file mode 100644 index 00000000000..03afc0781a2 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/metadata_map.h @@ -0,0 +1,105 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_METADATA_MAP_H +#define GRPCPP_IMPL_CODEGEN_METADATA_MAP_H + +#include <map> + +#include <grpc/impl/codegen/log.h> +#include <grpcpp/impl/codegen/slice.h> + +namespace grpc { + +namespace internal { + +const char kBinaryErrorDetailsKey[] = "grpc-status-details-bin"; + +class MetadataMap { + public: +  MetadataMap() { Setup(); } + +  ~MetadataMap() { Destroy(); } + +  TString GetBinaryErrorDetails() { +    // if filled_, extract from the multimap for O(log(n)) +    if (filled_) { +      auto iter = map_.find(kBinaryErrorDetailsKey); +      if (iter != map_.end()) { +        return TString(iter->second.begin(), iter->second.length()); +      } +    } +    // if not yet filled, take the O(n) lookup to avoid allocating the +    // multimap until it is requested. +    // TODO(ncteisen): plumb this through core as a first class object, just +    // like code and message. +    else { +      for (size_t i = 0; i < arr_.count; i++) { +        if (strncmp(reinterpret_cast<const char*>( +                        GRPC_SLICE_START_PTR(arr_.metadata[i].key)), +                    kBinaryErrorDetailsKey, +                    GRPC_SLICE_LENGTH(arr_.metadata[i].key)) == 0) { +          return TString(reinterpret_cast<const char*>( +                                 GRPC_SLICE_START_PTR(arr_.metadata[i].value)), +                             GRPC_SLICE_LENGTH(arr_.metadata[i].value)); +        } +      } +    } +    return TString(); +  } + +  std::multimap<grpc::string_ref, grpc::string_ref>* map() { +    FillMap(); +    return &map_; +  } +  grpc_metadata_array* arr() { return &arr_; } + +  void Reset() { +    filled_ = false; +    map_.clear(); +    Destroy(); +    Setup(); +  } + + private: +  bool filled_ = false; +  grpc_metadata_array arr_; +  std::multimap<grpc::string_ref, grpc::string_ref> map_; + +  void Destroy() { +    g_core_codegen_interface->grpc_metadata_array_destroy(&arr_); +  } + +  void Setup() { memset(&arr_, 0, sizeof(arr_)); } + +  void FillMap() { +    if (filled_) return; +    filled_ = true; +    for (size_t i = 0; i < arr_.count; i++) { +      // TODO(yangg) handle duplicates? +      map_.insert(std::pair<grpc::string_ref, grpc::string_ref>( +          StringRefFromSlice(&arr_.metadata[i].key), +          StringRefFromSlice(&arr_.metadata[i].value))); +    } +  } +}; +}  // namespace internal + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_METADATA_MAP_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h new file mode 100644 index 00000000000..0033936b04b --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/method_handler.h @@ -0,0 +1,381 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_METHOD_HANDLER_H +#define GRPCPP_IMPL_CODEGEN_METHOD_HANDLER_H + +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/rpc_service_method.h> +#include <grpcpp/impl/codegen/sync_stream.h> + +namespace grpc { + +namespace internal { + +// Invoke the method handler, fill in the status, and +// return whether or not we finished safely (without an exception). +// Note that exception handling is 0-cost in most compiler/library +// implementations (except when an exception is actually thrown), +// so this process doesn't require additional overhead in the common case. +// Additionally, we don't need to return if we caught an exception or not; +// the handling is the same in either case. +template <class Callable> +::grpc::Status CatchingFunctionHandler(Callable&& handler) { +#if GRPC_ALLOW_EXCEPTIONS +  try { +    return handler(); +  } catch (...) { +    return ::grpc::Status(::grpc::StatusCode::UNKNOWN, +                          "Unexpected error in RPC handling"); +  } +#else   // GRPC_ALLOW_EXCEPTIONS +  return handler(); +#endif  // GRPC_ALLOW_EXCEPTIONS +} + +/// A wrapper class of an application provided rpc method handler. +template <class ServiceType, class RequestType, class ResponseType> +class RpcMethodHandler : public ::grpc::internal::MethodHandler { + public: +  RpcMethodHandler( +      std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*, +                                   const RequestType*, ResponseType*)> +          func, +      ServiceType* service) +      : func_(func), service_(service) {} + +  void RunHandler(const HandlerParameter& param) final { +    ResponseType rsp; +    ::grpc::Status status = param.status; +    if (status.ok()) { +      status = CatchingFunctionHandler([this, ¶m, &rsp] { +        return func_(service_, +                     static_cast<::grpc::ServerContext*>(param.server_context), +                     static_cast<RequestType*>(param.request), &rsp); +      }); +      static_cast<RequestType*>(param.request)->~RequestType(); +    } + +    GPR_CODEGEN_ASSERT(!param.server_context->sent_initial_metadata_); +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpServerSendStatus> +        ops; +    ops.SendInitialMetadata(¶m.server_context->initial_metadata_, +                            param.server_context->initial_metadata_flags()); +    if (param.server_context->compression_level_set()) { +      ops.set_compression_level(param.server_context->compression_level()); +    } +    if (status.ok()) { +      status = ops.SendMessagePtr(&rsp); +    } +    ops.ServerSendStatus(¶m.server_context->trailing_metadata_, status); +    param.call->PerformOps(&ops); +    param.call->cq()->Pluck(&ops); +  } + +  void* Deserialize(grpc_call* call, grpc_byte_buffer* req, +                    ::grpc::Status* status, void** /*handler_data*/) final { +    ::grpc::ByteBuffer buf; +    buf.set_buffer(req); +    auto* request = +        new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +            call, sizeof(RequestType))) RequestType(); +    *status = +        ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request); +    buf.Release(); +    if (status->ok()) { +      return request; +    } +    request->~RequestType(); +    return nullptr; +  } + + private: +  /// Application provided rpc handler function. +  std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*, +                               const RequestType*, ResponseType*)> +      func_; +  // The class the above handler function lives in. +  ServiceType* service_; +}; + +/// A wrapper class of an application provided client streaming handler. +template <class ServiceType, class RequestType, class ResponseType> +class ClientStreamingHandler : public ::grpc::internal::MethodHandler { + public: +  ClientStreamingHandler( +      std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*, +                                   ServerReader<RequestType>*, ResponseType*)> +          func, +      ServiceType* service) +      : func_(func), service_(service) {} + +  void RunHandler(const HandlerParameter& param) final { +    ServerReader<RequestType> reader( +        param.call, static_cast<::grpc::ServerContext*>(param.server_context)); +    ResponseType rsp; +    ::grpc::Status status = CatchingFunctionHandler([this, ¶m, &reader, +                                                     &rsp] { +      return func_(service_, +                   static_cast<::grpc::ServerContext*>(param.server_context), +                   &reader, &rsp); +    }); + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpServerSendStatus> +        ops; +    if (!param.server_context->sent_initial_metadata_) { +      ops.SendInitialMetadata(¶m.server_context->initial_metadata_, +                              param.server_context->initial_metadata_flags()); +      if (param.server_context->compression_level_set()) { +        ops.set_compression_level(param.server_context->compression_level()); +      } +    } +    if (status.ok()) { +      status = ops.SendMessagePtr(&rsp); +    } +    ops.ServerSendStatus(¶m.server_context->trailing_metadata_, status); +    param.call->PerformOps(&ops); +    param.call->cq()->Pluck(&ops); +  } + + private: +  std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*, +                               ServerReader<RequestType>*, ResponseType*)> +      func_; +  ServiceType* service_; +}; + +/// A wrapper class of an application provided server streaming handler. +template <class ServiceType, class RequestType, class ResponseType> +class ServerStreamingHandler : public ::grpc::internal::MethodHandler { + public: +  ServerStreamingHandler(std::function<::grpc::Status( +                             ServiceType*, ::grpc::ServerContext*, +                             const RequestType*, ServerWriter<ResponseType>*)> +                             func, +                         ServiceType* service) +      : func_(func), service_(service) {} + +  void RunHandler(const HandlerParameter& param) final { +    ::grpc::Status status = param.status; +    if (status.ok()) { +      ServerWriter<ResponseType> writer( +          param.call, +          static_cast<::grpc::ServerContext*>(param.server_context)); +      status = CatchingFunctionHandler([this, ¶m, &writer] { +        return func_(service_, +                     static_cast<::grpc::ServerContext*>(param.server_context), +                     static_cast<RequestType*>(param.request), &writer); +      }); +      static_cast<RequestType*>(param.request)->~RequestType(); +    } + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpServerSendStatus> +        ops; +    if (!param.server_context->sent_initial_metadata_) { +      ops.SendInitialMetadata(¶m.server_context->initial_metadata_, +                              param.server_context->initial_metadata_flags()); +      if (param.server_context->compression_level_set()) { +        ops.set_compression_level(param.server_context->compression_level()); +      } +    } +    ops.ServerSendStatus(¶m.server_context->trailing_metadata_, status); +    param.call->PerformOps(&ops); +    if (param.server_context->has_pending_ops_) { +      param.call->cq()->Pluck(¶m.server_context->pending_ops_); +    } +    param.call->cq()->Pluck(&ops); +  } + +  void* Deserialize(grpc_call* call, grpc_byte_buffer* req, +                    ::grpc::Status* status, void** /*handler_data*/) final { +    ::grpc::ByteBuffer buf; +    buf.set_buffer(req); +    auto* request = +        new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +            call, sizeof(RequestType))) RequestType(); +    *status = +        ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request); +    buf.Release(); +    if (status->ok()) { +      return request; +    } +    request->~RequestType(); +    return nullptr; +  } + + private: +  std::function<::grpc::Status(ServiceType*, ::grpc::ServerContext*, +                               const RequestType*, ServerWriter<ResponseType>*)> +      func_; +  ServiceType* service_; +}; + +/// A wrapper class of an application provided bidi-streaming handler. +/// This also applies to server-streamed implementation of a unary method +/// with the additional requirement that such methods must have done a +/// write for status to be ok +/// Since this is used by more than 1 class, the service is not passed in. +/// Instead, it is expected to be an implicitly-captured argument of func +/// (through bind or something along those lines) +template <class Streamer, bool WriteNeeded> +class TemplatedBidiStreamingHandler : public ::grpc::internal::MethodHandler { + public: +  TemplatedBidiStreamingHandler( +      std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func) +      : func_(func), write_needed_(WriteNeeded) {} + +  void RunHandler(const HandlerParameter& param) final { +    Streamer stream(param.call, +                    static_cast<::grpc::ServerContext*>(param.server_context)); +    ::grpc::Status status = CatchingFunctionHandler([this, ¶m, &stream] { +      return func_(static_cast<::grpc::ServerContext*>(param.server_context), +                   &stream); +    }); + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpServerSendStatus> +        ops; +    if (!param.server_context->sent_initial_metadata_) { +      ops.SendInitialMetadata(¶m.server_context->initial_metadata_, +                              param.server_context->initial_metadata_flags()); +      if (param.server_context->compression_level_set()) { +        ops.set_compression_level(param.server_context->compression_level()); +      } +      if (write_needed_ && status.ok()) { +        // If we needed a write but never did one, we need to mark the +        // status as a fail +        status = ::grpc::Status(::grpc::StatusCode::INTERNAL, +                                "Service did not provide response message"); +      } +    } +    ops.ServerSendStatus(¶m.server_context->trailing_metadata_, status); +    param.call->PerformOps(&ops); +    if (param.server_context->has_pending_ops_) { +      param.call->cq()->Pluck(¶m.server_context->pending_ops_); +    } +    param.call->cq()->Pluck(&ops); +  } + + private: +  std::function<::grpc::Status(::grpc::ServerContext*, Streamer*)> func_; +  const bool write_needed_; +}; + +template <class ServiceType, class RequestType, class ResponseType> +class BidiStreamingHandler +    : public TemplatedBidiStreamingHandler< +          ServerReaderWriter<ResponseType, RequestType>, false> { + public: +  BidiStreamingHandler(std::function<::grpc::Status( +                           ServiceType*, ::grpc::ServerContext*, +                           ServerReaderWriter<ResponseType, RequestType>*)> +                           func, +                       ServiceType* service) +      // TODO(vjpai): When gRPC supports C++14, move-capture func in the below +      : TemplatedBidiStreamingHandler< +            ServerReaderWriter<ResponseType, RequestType>, false>( +            [func, service]( +                ::grpc::ServerContext* ctx, +                ServerReaderWriter<ResponseType, RequestType>* streamer) { +              return func(service, ctx, streamer); +            }) {} +}; + +template <class RequestType, class ResponseType> +class StreamedUnaryHandler +    : public TemplatedBidiStreamingHandler< +          ServerUnaryStreamer<RequestType, ResponseType>, true> { + public: +  explicit StreamedUnaryHandler( +      std::function< +          ::grpc::Status(::grpc::ServerContext*, +                         ServerUnaryStreamer<RequestType, ResponseType>*)> +          func) +      : TemplatedBidiStreamingHandler< +            ServerUnaryStreamer<RequestType, ResponseType>, true>( +            std::move(func)) {} +}; + +template <class RequestType, class ResponseType> +class SplitServerStreamingHandler +    : public TemplatedBidiStreamingHandler< +          ServerSplitStreamer<RequestType, ResponseType>, false> { + public: +  explicit SplitServerStreamingHandler( +      std::function< +          ::grpc::Status(::grpc::ServerContext*, +                         ServerSplitStreamer<RequestType, ResponseType>*)> +          func) +      : TemplatedBidiStreamingHandler< +            ServerSplitStreamer<RequestType, ResponseType>, false>( +            std::move(func)) {} +}; + +/// General method handler class for errors that prevent real method use +/// e.g., handle unknown method by returning UNIMPLEMENTED error. +template <::grpc::StatusCode code> +class ErrorMethodHandler : public ::grpc::internal::MethodHandler { + public: +  template <class T> +  static void FillOps(::grpc::ServerContextBase* context, T* ops) { +    ::grpc::Status status(code, ""); +    if (!context->sent_initial_metadata_) { +      ops->SendInitialMetadata(&context->initial_metadata_, +                               context->initial_metadata_flags()); +      if (context->compression_level_set()) { +        ops->set_compression_level(context->compression_level()); +      } +      context->sent_initial_metadata_ = true; +    } +    ops->ServerSendStatus(&context->trailing_metadata_, status); +  } + +  void RunHandler(const HandlerParameter& param) final { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpServerSendStatus> +        ops; +    FillOps(param.server_context, &ops); +    param.call->PerformOps(&ops); +    param.call->cq()->Pluck(&ops); +  } + +  void* Deserialize(grpc_call* /*call*/, grpc_byte_buffer* req, +                    ::grpc::Status* /*status*/, void** /*handler_data*/) final { +    // We have to destroy any request payload +    if (req != nullptr) { +      ::grpc::g_core_codegen_interface->grpc_byte_buffer_destroy(req); +    } +    return nullptr; +  } +}; + +typedef ErrorMethodHandler<::grpc::StatusCode::UNIMPLEMENTED> +    UnknownMethodHandler; +typedef ErrorMethodHandler<::grpc::StatusCode::RESOURCE_EXHAUSTED> +    ResourceExhaustedHandler; + +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_METHOD_HANDLER_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_buffer_reader.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_buffer_reader.h new file mode 100644 index 00000000000..487471290d9 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_buffer_reader.h @@ -0,0 +1,149 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_PROTO_BUFFER_READER_H +#define GRPCPP_IMPL_CODEGEN_PROTO_BUFFER_READER_H + +#include <type_traits> + +#include <grpc/impl/codegen/byte_buffer_reader.h> +#include <grpc/impl/codegen/grpc_types.h> +#include <grpc/impl/codegen/slice.h> +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/config_protobuf.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/serialization_traits.h> +#include <grpcpp/impl/codegen/status.h> + +/// This header provides an object that reads bytes directly from a +/// grpc::ByteBuffer, via the ZeroCopyInputStream interface + +namespace grpc { + +extern CoreCodegenInterface* g_core_codegen_interface; + +/// This is a specialization of the protobuf class ZeroCopyInputStream +/// The principle is to get one chunk of data at a time from the proto layer, +/// with options to backup (re-see some bytes) or skip (forward past some bytes) +/// +/// Read more about ZeroCopyInputStream interface here: +/// https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.zero_copy_stream#ZeroCopyInputStream +class ProtoBufferReader : public ::grpc::protobuf::io::ZeroCopyInputStream { + public: +  /// Constructs buffer reader from \a buffer. Will set \a status() to non ok +  /// if \a buffer is invalid (the internal buffer has not been initialized). +  explicit ProtoBufferReader(ByteBuffer* buffer) +      : byte_count_(0), backup_count_(0), status_() { +    /// Implemented through a grpc_byte_buffer_reader which iterates +    /// over the slices that make up a byte buffer +    if (!buffer->Valid() || +        !g_core_codegen_interface->grpc_byte_buffer_reader_init( +            &reader_, buffer->c_buffer())) { +      status_ = Status(StatusCode::INTERNAL, +                       "Couldn't initialize byte buffer reader"); +    } +  } + +  ~ProtoBufferReader() { +    if (status_.ok()) { +      g_core_codegen_interface->grpc_byte_buffer_reader_destroy(&reader_); +    } +  } + +  /// Give the proto library a chunk of data from the stream. The caller +  /// may safely read from data[0, size - 1]. +  bool Next(const void** data, int* size) override { +    if (!status_.ok()) { +      return false; +    } +    /// If we have backed up previously, we need to return the backed-up slice +    if (backup_count_ > 0) { +      *data = GRPC_SLICE_START_PTR(*slice_) + GRPC_SLICE_LENGTH(*slice_) - +              backup_count_; +      GPR_CODEGEN_ASSERT(backup_count_ <= INT_MAX); +      *size = (int)backup_count_; +      backup_count_ = 0; +      return true; +    } +    /// Otherwise get the next slice from the byte buffer reader +    if (!g_core_codegen_interface->grpc_byte_buffer_reader_peek(&reader_, +                                                                &slice_)) { +      return false; +    } +    *data = GRPC_SLICE_START_PTR(*slice_); +    // On win x64, int is only 32bit +    GPR_CODEGEN_ASSERT(GRPC_SLICE_LENGTH(*slice_) <= INT_MAX); +    byte_count_ += * size = (int)GRPC_SLICE_LENGTH(*slice_); +    return true; +  } + +  /// Returns the status of the buffer reader. +  Status status() const { return status_; } + +  /// The proto library calls this to indicate that we should back up \a count +  /// bytes that have already been returned by the last call of Next. +  /// So do the backup and have that ready for a later Next. +  void BackUp(int count) override { +    GPR_CODEGEN_ASSERT(count <= static_cast<int>(GRPC_SLICE_LENGTH(*slice_))); +    backup_count_ = count; +  } + +  /// The proto library calls this to skip over \a count bytes. Implement this +  /// using Next and BackUp combined. +  bool Skip(int count) override { +    const void* data; +    int size; +    while (Next(&data, &size)) { +      if (size >= count) { +        BackUp(size - count); +        return true; +      } +      // size < count; +      count -= size; +    } +    // error or we have too large count; +    return false; +  } + +  /// Returns the total number of bytes read since this object was created. +  int64_t ByteCount() const override { return byte_count_ - backup_count_; } + +  // These protected members are needed to support internal optimizations. +  // they expose internal bits of grpc core that are NOT stable. If you have +  // a use case needs to use one of these functions, please send an email to +  // https://groups.google.com/forum/#!forum/grpc-io. + protected: +  void set_byte_count(int64_t byte_count) { byte_count_ = byte_count; } +  int64_t backup_count() { return backup_count_; } +  void set_backup_count(int64_t backup_count) { backup_count_ = backup_count; } +  grpc_byte_buffer_reader* reader() { return &reader_; } +  grpc_slice* slice() { return slice_; } +  grpc_slice** mutable_slice_ptr() { return &slice_; } + + private: +  int64_t byte_count_;              ///< total bytes read since object creation +  int64_t backup_count_;            ///< how far backed up in the stream we are +  grpc_byte_buffer_reader reader_;  ///< internal object to read \a grpc_slice +                                    ///< from the \a grpc_byte_buffer +  grpc_slice* slice_;               ///< current slice passed back to the caller +  Status status_;                   ///< status of the entire object +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_PROTO_BUFFER_READER_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_buffer_writer.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_buffer_writer.h new file mode 100644 index 00000000000..0af4616e508 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_buffer_writer.h @@ -0,0 +1,167 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_PROTO_BUFFER_WRITER_H +#define GRPCPP_IMPL_CODEGEN_PROTO_BUFFER_WRITER_H + +#include <type_traits> + +#include <grpc/impl/codegen/grpc_types.h> +#include <grpc/impl/codegen/slice.h> +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/config_protobuf.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/serialization_traits.h> +#include <grpcpp/impl/codegen/status.h> + +/// This header provides an object that writes bytes directly into a +/// grpc::ByteBuffer, via the ZeroCopyOutputStream interface + +namespace grpc { + +extern CoreCodegenInterface* g_core_codegen_interface; + +// Forward declaration for testing use only +namespace internal { +class ProtoBufferWriterPeer; +}  // namespace internal + +const int kProtoBufferWriterMaxBufferLength = 1024 * 1024; + +/// This is a specialization of the protobuf class ZeroCopyOutputStream. +/// The principle is to give the proto layer one buffer of bytes at a time +/// that it can use to serialize the next portion of the message, with the +/// option to "backup" if more buffer is given than required at the last buffer. +/// +/// Read more about ZeroCopyOutputStream interface here: +/// https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.io.zero_copy_stream#ZeroCopyOutputStream +class ProtoBufferWriter : public ::grpc::protobuf::io::ZeroCopyOutputStream { + public: +  /// Constructor for this derived class +  /// +  /// \param[out] byte_buffer A pointer to the grpc::ByteBuffer created +  /// \param block_size How big are the chunks to allocate at a time +  /// \param total_size How many total bytes are required for this proto +  ProtoBufferWriter(ByteBuffer* byte_buffer, int block_size, int total_size) +      : block_size_(block_size), +        total_size_(total_size), +        byte_count_(0), +        have_backup_(false) { +    GPR_CODEGEN_ASSERT(!byte_buffer->Valid()); +    /// Create an empty raw byte buffer and look at its underlying slice buffer +    grpc_byte_buffer* bp = +        g_core_codegen_interface->grpc_raw_byte_buffer_create(NULL, 0); +    byte_buffer->set_buffer(bp); +    slice_buffer_ = &bp->data.raw.slice_buffer; +  } + +  ~ProtoBufferWriter() { +    if (have_backup_) { +      g_core_codegen_interface->grpc_slice_unref(backup_slice_); +    } +  } + +  /// Give the proto library the next buffer of bytes and its size. It is +  /// safe for the caller to write from data[0, size - 1]. +  bool Next(void** data, int* size) override { +    // Protobuf should not ask for more memory than total_size_. +    GPR_CODEGEN_ASSERT(byte_count_ < total_size_); +    // 1. Use the remaining backup slice if we have one +    // 2. Otherwise allocate a slice, up to the remaining length needed +    //    or our maximum allocation size +    // 3. Provide the slice start and size available +    // 4. Add the slice being returned to the slice buffer +    size_t remain = static_cast<size_t>(total_size_ - byte_count_); +    if (have_backup_) { +      /// If we have a backup slice, we should use it first +      slice_ = backup_slice_; +      have_backup_ = false; +      if (GRPC_SLICE_LENGTH(slice_) > remain) { +        GRPC_SLICE_SET_LENGTH(slice_, remain); +      } +    } else { +      // When less than a whole block is needed, only allocate that much. +      // But make sure the allocated slice is not inlined. +      size_t allocate_length = +          remain > static_cast<size_t>(block_size_) ? block_size_ : remain; +      slice_ = g_core_codegen_interface->grpc_slice_malloc( +          allocate_length > GRPC_SLICE_INLINED_SIZE +              ? allocate_length +              : GRPC_SLICE_INLINED_SIZE + 1); +    } +    *data = GRPC_SLICE_START_PTR(slice_); +    // On win x64, int is only 32bit +    GPR_CODEGEN_ASSERT(GRPC_SLICE_LENGTH(slice_) <= INT_MAX); +    byte_count_ += * size = (int)GRPC_SLICE_LENGTH(slice_); +    g_core_codegen_interface->grpc_slice_buffer_add(slice_buffer_, slice_); +    return true; +  } + +  /// Backup by \a count bytes because Next returned more bytes than needed +  /// (only used in the last buffer). \a count must be less than or equal too +  /// the last buffer returned from next. +  void BackUp(int count) override { +    /// 1. Remove the partially-used last slice from the slice buffer +    /// 2. Split it into the needed (if any) and unneeded part +    /// 3. Add the needed part back to the slice buffer +    /// 4. Mark that we still have the remaining part (for later use/unref) +    GPR_CODEGEN_ASSERT(count <= static_cast<int>(GRPC_SLICE_LENGTH(slice_))); +    g_core_codegen_interface->grpc_slice_buffer_pop(slice_buffer_); +    if ((size_t)count == GRPC_SLICE_LENGTH(slice_)) { +      backup_slice_ = slice_; +    } else { +      backup_slice_ = g_core_codegen_interface->grpc_slice_split_tail( +          &slice_, GRPC_SLICE_LENGTH(slice_) - count); +      g_core_codegen_interface->grpc_slice_buffer_add(slice_buffer_, slice_); +    } +    // It's dangerous to keep an inlined grpc_slice as the backup slice, since +    // on a following Next() call, a reference will be returned to this slice +    // via GRPC_SLICE_START_PTR, which will not be an address held by +    // slice_buffer_. +    have_backup_ = backup_slice_.refcount != NULL; +    byte_count_ -= count; +  } + +  /// Returns the total number of bytes written since this object was created. +  int64_t ByteCount() const override { return byte_count_; } + +  // These protected members are needed to support internal optimizations. +  // they expose internal bits of grpc core that are NOT stable. If you have +  // a use case needs to use one of these functions, please send an email to +  // https://groups.google.com/forum/#!forum/grpc-io. + protected: +  grpc_slice_buffer* slice_buffer() { return slice_buffer_; } +  void set_byte_count(int64_t byte_count) { byte_count_ = byte_count; } + + private: +  // friend for testing purposes only +  friend class internal::ProtoBufferWriterPeer; +  const int block_size_;  ///< size to alloc for each new \a grpc_slice needed +  const int total_size_;  ///< byte size of proto being serialized +  int64_t byte_count_;    ///< bytes written since this object was created +  grpc_slice_buffer* +      slice_buffer_;  ///< internal buffer of slices holding the serialized data +  bool have_backup_;  ///< if we are holding a backup slice or not +  grpc_slice backup_slice_;  ///< holds space we can still write to, if the +                             ///< caller has called BackUp +  grpc_slice slice_;         ///< current slice passed back to the caller +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_PROTO_BUFFER_WRITER_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h new file mode 100644 index 00000000000..2e102135a36 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/proto_utils.h @@ -0,0 +1,119 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_PROTO_UTILS_H +#define GRPCPP_IMPL_CODEGEN_PROTO_UTILS_H + +#include <type_traits> + +#include <grpc/impl/codegen/byte_buffer_reader.h> +#include <grpc/impl/codegen/grpc_types.h> +#include <grpc/impl/codegen/slice.h> +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/config_protobuf.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/proto_buffer_reader.h> +#include <grpcpp/impl/codegen/proto_buffer_writer.h> +#include <grpcpp/impl/codegen/serialization_traits.h> +#include <grpcpp/impl/codegen/slice.h> +#include <grpcpp/impl/codegen/status.h> + +/// This header provides serialization and deserialization between gRPC +/// messages serialized using protobuf and the C++ objects they represent. + +namespace grpc { + +extern CoreCodegenInterface* g_core_codegen_interface; + +// ProtoBufferWriter must be a subclass of ::protobuf::io::ZeroCopyOutputStream. +template <class ProtoBufferWriter, class T> +Status GenericSerialize(const grpc::protobuf::MessageLite& msg, ByteBuffer* bb, +                        bool* own_buffer) { +  static_assert(std::is_base_of<protobuf::io::ZeroCopyOutputStream, +                                ProtoBufferWriter>::value, +                "ProtoBufferWriter must be a subclass of " +                "::protobuf::io::ZeroCopyOutputStream"); +  *own_buffer = true; +  int byte_size = msg.ByteSizeLong(); +  if ((size_t)byte_size <= GRPC_SLICE_INLINED_SIZE) { +    Slice slice(byte_size); +    // We serialize directly into the allocated slices memory +    GPR_CODEGEN_ASSERT(slice.end() == msg.SerializeWithCachedSizesToArray( +                                          const_cast<uint8_t*>(slice.begin()))); +    ByteBuffer tmp(&slice, 1); +    bb->Swap(&tmp); + +    return g_core_codegen_interface->ok(); +  } +  ProtoBufferWriter writer(bb, kProtoBufferWriterMaxBufferLength, byte_size); +  return msg.SerializeToZeroCopyStream(&writer) +             ? g_core_codegen_interface->ok() +             : Status(StatusCode::INTERNAL, "Failed to serialize message"); +} + +// BufferReader must be a subclass of ::protobuf::io::ZeroCopyInputStream. +template <class ProtoBufferReader, class T> +Status GenericDeserialize(ByteBuffer* buffer, +                          grpc::protobuf::MessageLite* msg) { +  static_assert(std::is_base_of<protobuf::io::ZeroCopyInputStream, +                                ProtoBufferReader>::value, +                "ProtoBufferReader must be a subclass of " +                "::protobuf::io::ZeroCopyInputStream"); +  if (buffer == nullptr) { +    return Status(StatusCode::INTERNAL, "No payload"); +  } +  Status result = g_core_codegen_interface->ok(); +  { +    ProtoBufferReader reader(buffer); +    if (!reader.status().ok()) { +      return reader.status(); +    } +    if (!msg->ParseFromZeroCopyStream(&reader)) { +      result = Status(StatusCode::INTERNAL, msg->InitializationErrorString()); +    } +  } +  buffer->Clear(); +  return result; +} + +// this is needed so the following class does not conflict with protobuf +// serializers that utilize internal-only tools. +#ifdef GRPC_OPEN_SOURCE_PROTO +// This class provides a protobuf serializer. It translates between protobuf +// objects and grpc_byte_buffers. More information about SerializationTraits can +// be found in include/grpcpp/impl/codegen/serialization_traits.h. +template <class T> +class SerializationTraits< +    T, typename std::enable_if< +           std::is_base_of<grpc::protobuf::MessageLite, T>::value>::type> { + public: +  static Status Serialize(const grpc::protobuf::MessageLite& msg, +                          ByteBuffer* bb, bool* own_buffer) { +    return GenericSerialize<ProtoBufferWriter, T>(msg, bb, own_buffer); +  } + +  static Status Deserialize(ByteBuffer* buffer, +                            grpc::protobuf::MessageLite* msg) { +    return GenericDeserialize<ProtoBufferReader, T>(buffer, msg); +  } +}; +#endif + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_PROTO_UTILS_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_method.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_method.h new file mode 100644 index 00000000000..9dcde954f1d --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_method.h @@ -0,0 +1,61 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_RPC_METHOD_H +#define GRPCPP_IMPL_CODEGEN_RPC_METHOD_H + +#include <memory> + +#include <grpcpp/impl/codegen/channel_interface.h> + +namespace grpc { +namespace internal { +/// Descriptor of an RPC method +class RpcMethod { + public: +  enum RpcType { +    NORMAL_RPC = 0, +    CLIENT_STREAMING,  // request streaming +    SERVER_STREAMING,  // response streaming +    BIDI_STREAMING +  }; + +  RpcMethod(const char* name, RpcType type) +      : name_(name), method_type_(type), channel_tag_(NULL) {} + +  RpcMethod(const char* name, RpcType type, +            const std::shared_ptr<ChannelInterface>& channel) +      : name_(name), +        method_type_(type), +        channel_tag_(channel->RegisterMethod(name)) {} + +  const char* name() const { return name_; } +  RpcType method_type() const { return method_type_; } +  void SetMethodType(RpcType type) { method_type_ = type; } +  void* channel_tag() const { return channel_tag_; } + + private: +  const char* const name_; +  RpcType method_type_; +  void* const channel_tag_; +}; + +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_RPC_METHOD_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h new file mode 100644 index 00000000000..4fcc2112435 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/rpc_service_method.h @@ -0,0 +1,153 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_RPC_SERVICE_METHOD_H +#define GRPCPP_IMPL_CODEGEN_RPC_SERVICE_METHOD_H + +#include <climits> +#include <functional> +#include <map> +#include <memory> +#include <vector> + +#include <grpc/impl/codegen/log.h> +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/rpc_method.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { +class ServerContextBase; +namespace internal { +/// Base class for running an RPC handler. +class MethodHandler { + public: +  virtual ~MethodHandler() {} +  struct HandlerParameter { +    /// Constructor for HandlerParameter +    /// +    /// \param c : the gRPC Call structure for this server call +    /// \param context : the ServerContext structure for this server call +    /// \param req : the request payload, if appropriate for this RPC +    /// \param req_status : the request status after any interceptors have run +    /// \param handler_data: internal data for the handler. +    /// \param requester : used only by the callback API. It is a function +    ///        called by the RPC Controller to request another RPC (and also +    ///        to set up the state required to make that request possible) +    HandlerParameter(Call* c, ::grpc::ServerContextBase* context, void* req, +                     Status req_status, void* handler_data, +                     std::function<void()> requester) +        : call(c), +          server_context(context), +          request(req), +          status(req_status), +          internal_data(handler_data), +          call_requester(std::move(requester)) {} +    ~HandlerParameter() {} +    Call* const call; +    ::grpc::ServerContextBase* const server_context; +    void* const request; +    const Status status; +    void* const internal_data; +    const std::function<void()> call_requester; +  }; +  virtual void RunHandler(const HandlerParameter& param) = 0; + +  /* Returns a pointer to the deserialized request. \a status reflects the +     result of deserialization. This pointer and the status should be filled in +     a HandlerParameter and passed to RunHandler. It is illegal to access the +     pointer after calling RunHandler. Ownership of the deserialized request is +     retained by the handler. Returns nullptr if deserialization failed. */ +  virtual void* Deserialize(grpc_call* /*call*/, grpc_byte_buffer* req, +                            Status* /*status*/, void** /*handler_data*/) { +    GPR_CODEGEN_ASSERT(req == nullptr); +    return nullptr; +  } +}; + +/// Server side rpc method class +class RpcServiceMethod : public RpcMethod { + public: +  /// Takes ownership of the handler +  RpcServiceMethod(const char* name, RpcMethod::RpcType type, +                   MethodHandler* handler) +      : RpcMethod(name, type), +        server_tag_(nullptr), +        api_type_(ApiType::SYNC), +        handler_(handler) {} + +  enum class ApiType { +    SYNC, +    ASYNC, +    RAW, +    CALL_BACK,  // not CALLBACK because that is reserved in Windows +    RAW_CALL_BACK, +  }; + +  void set_server_tag(void* tag) { server_tag_ = tag; } +  void* server_tag() const { return server_tag_; } +  /// if MethodHandler is nullptr, then this is an async method +  MethodHandler* handler() const { return handler_.get(); } +  ApiType api_type() const { return api_type_; } +  void SetHandler(MethodHandler* handler) { handler_.reset(handler); } +  void SetServerApiType(RpcServiceMethod::ApiType type) { +    if ((api_type_ == ApiType::SYNC) && +        (type == ApiType::ASYNC || type == ApiType::RAW)) { +      // this marks this method as async +      handler_.reset(); +    } else if (api_type_ != ApiType::SYNC) { +      // this is not an error condition, as it allows users to declare a server +      // like WithRawMethod_foo<AsyncService>. However since it +      // overwrites behavior, it should be logged. +      gpr_log( +          GPR_INFO, +          "You are marking method %s as '%s', even though it was " +          "previously marked '%s'. This behavior will overwrite the original " +          "behavior. If you expected this then ignore this message.", +          name(), TypeToString(api_type_), TypeToString(type)); +    } +    api_type_ = type; +  } + + private: +  void* server_tag_; +  ApiType api_type_; +  std::unique_ptr<MethodHandler> handler_; + +  const char* TypeToString(RpcServiceMethod::ApiType type) { +    switch (type) { +      case ApiType::SYNC: +        return "sync"; +      case ApiType::ASYNC: +        return "async"; +      case ApiType::RAW: +        return "raw"; +      case ApiType::CALL_BACK: +        return "callback"; +      case ApiType::RAW_CALL_BACK: +        return "raw_callback"; +      default: +        GPR_UNREACHABLE_CODE(return "unknown"); +    } +  } +}; +}  // namespace internal + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_RPC_SERVICE_METHOD_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h new file mode 100644 index 00000000000..220b78f2ebf --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/security/auth_context.h @@ -0,0 +1,94 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SECURITY_AUTH_CONTEXT_H +#define GRPCPP_IMPL_CODEGEN_SECURITY_AUTH_CONTEXT_H + +#include <iterator> +#include <vector> + +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/string_ref.h> + +struct grpc_auth_context; +struct grpc_auth_property; +struct grpc_auth_property_iterator; + +namespace grpc { +class SecureAuthContext; + +typedef std::pair<string_ref, string_ref> AuthProperty; + +class AuthPropertyIterator +    : public std::iterator<std::input_iterator_tag, const AuthProperty> { + public: +  ~AuthPropertyIterator(); +  AuthPropertyIterator& operator++(); +  AuthPropertyIterator operator++(int); +  bool operator==(const AuthPropertyIterator& rhs) const; +  bool operator!=(const AuthPropertyIterator& rhs) const; +  const AuthProperty operator*(); + + protected: +  AuthPropertyIterator(); +  AuthPropertyIterator(const grpc_auth_property* property, +                       const grpc_auth_property_iterator* iter); + + private: +  friend class SecureAuthContext; +  const grpc_auth_property* property_; +  // The following items form a grpc_auth_property_iterator. +  const grpc_auth_context* ctx_; +  size_t index_; +  const char* name_; +}; + +/// Class encapsulating the Authentication Information. +/// +/// It includes the secure identity of the peer, the type of secure transport +/// used as well as any other properties required by the authorization layer. +class AuthContext { + public: +  virtual ~AuthContext() {} + +  /// Returns true if the peer is authenticated. +  virtual bool IsPeerAuthenticated() const = 0; + +  /// A peer identity. +  /// +  /// It is, in general, comprised of one or more properties (in which case they +  /// have the same name). +  virtual std::vector<grpc::string_ref> GetPeerIdentity() const = 0; +  virtual TString GetPeerIdentityPropertyName() const = 0; + +  /// Returns all the property values with the given name. +  virtual std::vector<grpc::string_ref> FindPropertyValues( +      const TString& name) const = 0; + +  /// Iteration over all the properties. +  virtual AuthPropertyIterator begin() const = 0; +  virtual AuthPropertyIterator end() const = 0; + +  /// Mutation functions: should only be used by an AuthMetadataProcessor. +  virtual void AddProperty(const TString& key, const string_ref& value) = 0; +  virtual bool SetPeerIdentityPropertyName(const string& name) = 0; +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SECURITY_AUTH_CONTEXT_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/serialization_traits.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/serialization_traits.h new file mode 100644 index 00000000000..8f792232909 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/serialization_traits.h @@ -0,0 +1,62 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SERIALIZATION_TRAITS_H +#define GRPCPP_IMPL_CODEGEN_SERIALIZATION_TRAITS_H + +namespace grpc { + +/// Defines how to serialize and deserialize some type. +/// +/// Used for hooking different message serialization API's into GRPC. +/// Each SerializationTraits<Message> implementation must provide the +/// following functions: +/// 1.  static Status Serialize(const Message& msg, +///                             ByteBuffer* buffer, +///                             bool* own_buffer); +///     OR +///     static Status Serialize(const Message& msg, +///                             grpc_byte_buffer** buffer, +///                             bool* own_buffer); +///     The former is preferred; the latter is deprecated +/// +/// 2.  static Status Deserialize(ByteBuffer* buffer, +///                               Message* msg); +///     OR +///     static Status Deserialize(grpc_byte_buffer* buffer, +///                               Message* msg); +///     The former is preferred; the latter is deprecated +/// +/// Serialize is required to convert message to a ByteBuffer, and +/// return that byte buffer through *buffer. *own_buffer should +/// be set to true if the caller owns said byte buffer, or false if +/// ownership is retained elsewhere. +/// +/// Deserialize is required to convert buffer into the message stored at +/// msg. max_receive_message_size is passed in as a bound on the maximum +/// number of message bytes Deserialize should accept. +/// +/// Both functions return a Status, allowing them to explain what went +/// wrong if required. +template <class Message, +          class UnusedButHereForPartialTemplateSpecialization = void> +class SerializationTraits; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SERIALIZATION_TRAITS_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h new file mode 100644 index 00000000000..3794a9ffa79 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback.h @@ -0,0 +1,794 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H +#define GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H + +#include <atomic> +#include <functional> +#include <type_traits> + +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/call_op_set.h> +#include <grpcpp/impl/codegen/callback_common.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/message_allocator.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { + +// Declare base class of all reactors as internal +namespace internal { + +// Forward declarations +template <class Request, class Response> +class CallbackUnaryHandler; +template <class Request, class Response> +class CallbackClientStreamingHandler; +template <class Request, class Response> +class CallbackServerStreamingHandler; +template <class Request, class Response> +class CallbackBidiHandler; + +class ServerReactor { + public: +  virtual ~ServerReactor() = default; +  virtual void OnDone() = 0; +  virtual void OnCancel() = 0; + +  // The following is not API. It is for internal use only and specifies whether +  // all reactions of this Reactor can be run without an extra executor +  // scheduling. This should only be used for internally-defined reactors with +  // trivial reactions. +  virtual bool InternalInlineable() { return false; } + + private: +  template <class Request, class Response> +  friend class CallbackUnaryHandler; +  template <class Request, class Response> +  friend class CallbackClientStreamingHandler; +  template <class Request, class Response> +  friend class CallbackServerStreamingHandler; +  template <class Request, class Response> +  friend class CallbackBidiHandler; +}; + +/// The base class of ServerCallbackUnary etc. +class ServerCallbackCall { + public: +  virtual ~ServerCallbackCall() {} + +  // This object is responsible for tracking when it is safe to call OnDone and +  // OnCancel. OnDone should not be called until the method handler is complete, +  // Finish has been called, the ServerContext CompletionOp (which tracks +  // cancellation or successful completion) has completed, and all outstanding +  // Read/Write actions have seen their reactions. OnCancel should not be called +  // until after the method handler is done and the RPC has completed with a +  // cancellation. This is tracked by counting how many of these conditions have +  // been met and calling OnCancel when none remain unmet. + +  // Public versions of MaybeDone: one where we don't know the reactor in +  // advance (used for the ServerContext CompletionOp), and one for where we +  // know the inlineability of the OnDone reaction. You should set the inline +  // flag to true if either the Reactor is InternalInlineable() or if this +  // callback is already being forced to run dispatched to an executor +  // (typically because it contains additional work than just the MaybeDone). + +  void MaybeDone() { +    if (GPR_UNLIKELY(Unref() == 1)) { +      ScheduleOnDone(reactor()->InternalInlineable()); +    } +  } + +  void MaybeDone(bool inline_ondone) { +    if (GPR_UNLIKELY(Unref() == 1)) { +      ScheduleOnDone(inline_ondone); +    } +  } + +  // Fast version called with known reactor passed in, used from derived +  // classes, typically in non-cancel case +  void MaybeCallOnCancel(ServerReactor* reactor) { +    if (GPR_UNLIKELY(UnblockCancellation())) { +      CallOnCancel(reactor); +    } +  } + +  // Slower version called from object that doesn't know the reactor a priori +  // (such as the ServerContext CompletionOp which is formed before the +  // reactor). This is used in cancel cases only, so it's ok to be slower and +  // invoke a virtual function. +  void MaybeCallOnCancel() { +    if (GPR_UNLIKELY(UnblockCancellation())) { +      CallOnCancel(reactor()); +    } +  } + + protected: +  /// Increases the reference count +  void Ref() { callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); } + + private: +  virtual ServerReactor* reactor() = 0; + +  // CallOnDone performs the work required at completion of the RPC: invoking +  // the OnDone function and doing all necessary cleanup. This function is only +  // ever invoked on a fully-Unref'fed ServerCallbackCall. +  virtual void CallOnDone() = 0; + +  // If the OnDone reaction is inlineable, execute it inline. Otherwise send it +  // to an executor. +  void ScheduleOnDone(bool inline_ondone); + +  // If the OnCancel reaction is inlineable, execute it inline. Otherwise send +  // it to an executor. +  void CallOnCancel(ServerReactor* reactor); + +  // Implement the cancellation constraint counter. Return true if OnCancel +  // should be called, false otherwise. +  bool UnblockCancellation() { +    return on_cancel_conditions_remaining_.fetch_sub( +               1, std::memory_order_acq_rel) == 1; +  } + +  /// Decreases the reference count and returns the previous value +  int Unref() { +    return callbacks_outstanding_.fetch_sub(1, std::memory_order_acq_rel); +  } + +  std::atomic_int on_cancel_conditions_remaining_{2}; +  std::atomic_int callbacks_outstanding_{ +      3};  // reserve for start, Finish, and CompletionOp +}; + +template <class Request, class Response> +class DefaultMessageHolder +    : public ::grpc::experimental::MessageHolder<Request, Response> { + public: +  DefaultMessageHolder() { +    this->set_request(&request_obj_); +    this->set_response(&response_obj_); +  } +  void Release() override { +    // the object is allocated in the call arena. +    this->~DefaultMessageHolder<Request, Response>(); +  } + + private: +  Request request_obj_; +  Response response_obj_; +}; + +}  // namespace internal + +// Forward declarations +class ServerUnaryReactor; +template <class Request> +class ServerReadReactor; +template <class Response> +class ServerWriteReactor; +template <class Request, class Response> +class ServerBidiReactor; + +// NOTE: The actual call/stream object classes are provided as API only to +// support mocking. There are no implementations of these class interfaces in +// the API. +class ServerCallbackUnary : public internal::ServerCallbackCall { + public: +  virtual ~ServerCallbackUnary() {} +  virtual void Finish(::grpc::Status s) = 0; +  virtual void SendInitialMetadata() = 0; + + protected: +  // Use a template rather than explicitly specifying ServerUnaryReactor to +  // delay binding and avoid a circular forward declaration issue +  template <class Reactor> +  void BindReactor(Reactor* reactor) { +    reactor->InternalBindCall(this); +  } +}; + +template <class Request> +class ServerCallbackReader : public internal::ServerCallbackCall { + public: +  virtual ~ServerCallbackReader() {} +  virtual void Finish(::grpc::Status s) = 0; +  virtual void SendInitialMetadata() = 0; +  virtual void Read(Request* msg) = 0; + + protected: +  void BindReactor(ServerReadReactor<Request>* reactor) { +    reactor->InternalBindReader(this); +  } +}; + +template <class Response> +class ServerCallbackWriter : public internal::ServerCallbackCall { + public: +  virtual ~ServerCallbackWriter() {} + +  virtual void Finish(::grpc::Status s) = 0; +  virtual void SendInitialMetadata() = 0; +  virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0; +  virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options, +                              ::grpc::Status s) = 0; + + protected: +  void BindReactor(ServerWriteReactor<Response>* reactor) { +    reactor->InternalBindWriter(this); +  } +}; + +template <class Request, class Response> +class ServerCallbackReaderWriter : public internal::ServerCallbackCall { + public: +  virtual ~ServerCallbackReaderWriter() {} + +  virtual void Finish(::grpc::Status s) = 0; +  virtual void SendInitialMetadata() = 0; +  virtual void Read(Request* msg) = 0; +  virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0; +  virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options, +                              ::grpc::Status s) = 0; + + protected: +  void BindReactor(ServerBidiReactor<Request, Response>* reactor) { +    reactor->InternalBindStream(this); +  } +}; + +// The following classes are the reactor interfaces that are to be implemented +// by the user, returned as the output parameter of the method handler for a +// callback method. Note that none of the classes are pure; all reactions have a +// default empty reaction so that the user class only needs to override those +// classes that it cares about. + +/// \a ServerBidiReactor is the interface for a bidirectional streaming RPC. +template <class Request, class Response> +class ServerBidiReactor : public internal::ServerReactor { + public: +  // NOTE: Initializing stream_ as a constructor initializer rather than a +  //       default initializer because gcc-4.x requires a copy constructor for +  //       default initializing a templated member, which isn't ok for atomic. +  // TODO(vjpai): Switch to default constructor and default initializer when +  //              gcc-4.x is no longer supported +  ServerBidiReactor() : stream_(nullptr) {} +  ~ServerBidiReactor() = default; + +  /// Send any initial metadata stored in the RPC context. If not invoked, +  /// any initial metadata will be passed along with the first Write or the +  /// Finish (if there are no writes). +  void StartSendInitialMetadata() { +    ServerCallbackReaderWriter<Request, Response>* stream = +        stream_.load(std::memory_order_acquire); +    if (stream == nullptr) { +      grpc::internal::MutexLock l(&stream_mu_); +      stream = stream_.load(std::memory_order_relaxed); +      if (stream == nullptr) { +        backlog_.send_initial_metadata_wanted = true; +        return; +      } +    } +    stream->SendInitialMetadata(); +  } + +  /// Initiate a read operation. +  /// +  /// \param[out] req Where to eventually store the read message. Valid when +  ///                 the library calls OnReadDone +  void StartRead(Request* req) { +    ServerCallbackReaderWriter<Request, Response>* stream = +        stream_.load(std::memory_order_acquire); +    if (stream == nullptr) { +      grpc::internal::MutexLock l(&stream_mu_); +      stream = stream_.load(std::memory_order_relaxed); +      if (stream == nullptr) { +        backlog_.read_wanted = req; +        return; +      } +    } +    stream->Read(req); +  } + +  /// Initiate a write operation. +  /// +  /// \param[in] resp The message to be written. The library does not take +  ///                 ownership but the caller must ensure that the message is +  ///                 not deleted or modified until OnWriteDone is called. +  void StartWrite(const Response* resp) { +    StartWrite(resp, ::grpc::WriteOptions()); +  } + +  /// Initiate a write operation with specified options. +  /// +  /// \param[in] resp The message to be written. The library does not take +  ///                 ownership but the caller must ensure that the message is +  ///                 not deleted or modified until OnWriteDone is called. +  /// \param[in] options The WriteOptions to use for writing this message +  void StartWrite(const Response* resp, ::grpc::WriteOptions options) { +    ServerCallbackReaderWriter<Request, Response>* stream = +        stream_.load(std::memory_order_acquire); +    if (stream == nullptr) { +      grpc::internal::MutexLock l(&stream_mu_); +      stream = stream_.load(std::memory_order_relaxed); +      if (stream == nullptr) { +        backlog_.write_wanted = resp; +        backlog_.write_options_wanted = std::move(options); +        return; +      } +    } +    stream->Write(resp, std::move(options)); +  } + +  /// Initiate a write operation with specified options and final RPC Status, +  /// which also causes any trailing metadata for this RPC to be sent out. +  /// StartWriteAndFinish is like merging StartWriteLast and Finish into a +  /// single step. A key difference, though, is that this operation doesn't have +  /// an OnWriteDone reaction - it is considered complete only when OnDone is +  /// available. An RPC can either have StartWriteAndFinish or Finish, but not +  /// both. +  /// +  /// \param[in] resp The message to be written. The library does not take +  ///                 ownership but the caller must ensure that the message is +  ///                 not deleted or modified until OnDone is called. +  /// \param[in] options The WriteOptions to use for writing this message +  /// \param[in] s The status outcome of this RPC +  void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options, +                           ::grpc::Status s) { +    ServerCallbackReaderWriter<Request, Response>* stream = +        stream_.load(std::memory_order_acquire); +    if (stream == nullptr) { +      grpc::internal::MutexLock l(&stream_mu_); +      stream = stream_.load(std::memory_order_relaxed); +      if (stream == nullptr) { +        backlog_.write_and_finish_wanted = true; +        backlog_.write_wanted = resp; +        backlog_.write_options_wanted = std::move(options); +        backlog_.status_wanted = std::move(s); +        return; +      } +    } +    stream->WriteAndFinish(resp, std::move(options), std::move(s)); +  } + +  /// Inform system of a planned write operation with specified options, but +  /// allow the library to schedule the actual write coalesced with the writing +  /// of trailing metadata (which takes place on a Finish call). +  /// +  /// \param[in] resp The message to be written. The library does not take +  ///                 ownership but the caller must ensure that the message is +  ///                 not deleted or modified until OnWriteDone is called. +  /// \param[in] options The WriteOptions to use for writing this message +  void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) { +    StartWrite(resp, std::move(options.set_last_message())); +  } + +  /// Indicate that the stream is to be finished and the trailing metadata and +  /// RPC status are to be sent. Every RPC MUST be finished using either Finish +  /// or StartWriteAndFinish (but not both), even if the RPC is already +  /// cancelled. +  /// +  /// \param[in] s The status outcome of this RPC +  void Finish(::grpc::Status s) { +    ServerCallbackReaderWriter<Request, Response>* stream = +        stream_.load(std::memory_order_acquire); +    if (stream == nullptr) { +      grpc::internal::MutexLock l(&stream_mu_); +      stream = stream_.load(std::memory_order_relaxed); +      if (stream == nullptr) { +        backlog_.finish_wanted = true; +        backlog_.status_wanted = std::move(s); +        return; +      } +    } +    stream->Finish(std::move(s)); +  } + +  /// Notifies the application that an explicit StartSendInitialMetadata +  /// operation completed. Not used when the sending of initial metadata +  /// piggybacks onto the first write. +  /// +  /// \param[in] ok Was it successful? If false, no further write-side operation +  ///               will succeed. +  virtual void OnSendInitialMetadataDone(bool /*ok*/) {} + +  /// Notifies the application that a StartRead operation completed. +  /// +  /// \param[in] ok Was it successful? If false, no further read-side operation +  ///               will succeed. +  virtual void OnReadDone(bool /*ok*/) {} + +  /// Notifies the application that a StartWrite (or StartWriteLast) operation +  /// completed. +  /// +  /// \param[in] ok Was it successful? If false, no further write-side operation +  ///               will succeed. +  virtual void OnWriteDone(bool /*ok*/) {} + +  /// Notifies the application that all operations associated with this RPC +  /// have completed. This is an override (from the internal base class) but +  /// still abstract, so derived classes MUST override it to be instantiated. +  void OnDone() override = 0; + +  /// Notifies the application that this RPC has been cancelled. This is an +  /// override (from the internal base class) but not final, so derived classes +  /// should override it if they want to take action. +  void OnCancel() override {} + + private: +  friend class ServerCallbackReaderWriter<Request, Response>; +  // May be overridden by internal implementation details. This is not a public +  // customization point. +  virtual void InternalBindStream( +      ServerCallbackReaderWriter<Request, Response>* stream) { +    grpc::internal::MutexLock l(&stream_mu_); + +    if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) { +      stream->SendInitialMetadata(); +    } +    if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) { +      stream->Read(backlog_.read_wanted); +    } +    if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) { +      stream->WriteAndFinish(backlog_.write_wanted, +                             std::move(backlog_.write_options_wanted), +                             std::move(backlog_.status_wanted)); +    } else { +      if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) { +        stream->Write(backlog_.write_wanted, +                      std::move(backlog_.write_options_wanted)); +      } +      if (GPR_UNLIKELY(backlog_.finish_wanted)) { +        stream->Finish(std::move(backlog_.status_wanted)); +      } +    } +    // Set stream_ last so that other functions can use it lock-free +    stream_.store(stream, std::memory_order_release); +  } + +  grpc::internal::Mutex stream_mu_; +  // TODO(vjpai): Make stream_or_backlog_ into a std::variant or y_absl::variant +  //              once C++17 or ABSL is supported since stream and backlog are +  //              mutually exclusive in this class. Do likewise with the +  //              remaining reactor classes and their backlogs as well. +  std::atomic<ServerCallbackReaderWriter<Request, Response>*> stream_{nullptr}; +  struct PreBindBacklog { +    bool send_initial_metadata_wanted = false; +    bool write_and_finish_wanted = false; +    bool finish_wanted = false; +    Request* read_wanted = nullptr; +    const Response* write_wanted = nullptr; +    ::grpc::WriteOptions write_options_wanted; +    ::grpc::Status status_wanted; +  }; +  PreBindBacklog backlog_ /* GUARDED_BY(stream_mu_) */; +}; + +/// \a ServerReadReactor is the interface for a client-streaming RPC. +template <class Request> +class ServerReadReactor : public internal::ServerReactor { + public: +  ServerReadReactor() : reader_(nullptr) {} +  ~ServerReadReactor() = default; + +  /// The following operation initiations are exactly like ServerBidiReactor. +  void StartSendInitialMetadata() { +    ServerCallbackReader<Request>* reader = +        reader_.load(std::memory_order_acquire); +    if (reader == nullptr) { +      grpc::internal::MutexLock l(&reader_mu_); +      reader = reader_.load(std::memory_order_relaxed); +      if (reader == nullptr) { +        backlog_.send_initial_metadata_wanted = true; +        return; +      } +    } +    reader->SendInitialMetadata(); +  } +  void StartRead(Request* req) { +    ServerCallbackReader<Request>* reader = +        reader_.load(std::memory_order_acquire); +    if (reader == nullptr) { +      grpc::internal::MutexLock l(&reader_mu_); +      reader = reader_.load(std::memory_order_relaxed); +      if (reader == nullptr) { +        backlog_.read_wanted = req; +        return; +      } +    } +    reader->Read(req); +  } +  void Finish(::grpc::Status s) { +    ServerCallbackReader<Request>* reader = +        reader_.load(std::memory_order_acquire); +    if (reader == nullptr) { +      grpc::internal::MutexLock l(&reader_mu_); +      reader = reader_.load(std::memory_order_relaxed); +      if (reader == nullptr) { +        backlog_.finish_wanted = true; +        backlog_.status_wanted = std::move(s); +        return; +      } +    } +    reader->Finish(std::move(s)); +  } + +  /// The following notifications are exactly like ServerBidiReactor. +  virtual void OnSendInitialMetadataDone(bool /*ok*/) {} +  virtual void OnReadDone(bool /*ok*/) {} +  void OnDone() override = 0; +  void OnCancel() override {} + + private: +  friend class ServerCallbackReader<Request>; + +  // May be overridden by internal implementation details. This is not a public +  // customization point. +  virtual void InternalBindReader(ServerCallbackReader<Request>* reader) { +    grpc::internal::MutexLock l(&reader_mu_); + +    if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) { +      reader->SendInitialMetadata(); +    } +    if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) { +      reader->Read(backlog_.read_wanted); +    } +    if (GPR_UNLIKELY(backlog_.finish_wanted)) { +      reader->Finish(std::move(backlog_.status_wanted)); +    } +    // Set reader_ last so that other functions can use it lock-free +    reader_.store(reader, std::memory_order_release); +  } + +  grpc::internal::Mutex reader_mu_; +  std::atomic<ServerCallbackReader<Request>*> reader_{nullptr}; +  struct PreBindBacklog { +    bool send_initial_metadata_wanted = false; +    bool finish_wanted = false; +    Request* read_wanted = nullptr; +    ::grpc::Status status_wanted; +  }; +  PreBindBacklog backlog_ /* GUARDED_BY(reader_mu_) */; +}; + +/// \a ServerWriteReactor is the interface for a server-streaming RPC. +template <class Response> +class ServerWriteReactor : public internal::ServerReactor { + public: +  ServerWriteReactor() : writer_(nullptr) {} +  ~ServerWriteReactor() = default; + +  /// The following operation initiations are exactly like ServerBidiReactor. +  void StartSendInitialMetadata() { +    ServerCallbackWriter<Response>* writer = +        writer_.load(std::memory_order_acquire); +    if (writer == nullptr) { +      grpc::internal::MutexLock l(&writer_mu_); +      writer = writer_.load(std::memory_order_relaxed); +      if (writer == nullptr) { +        backlog_.send_initial_metadata_wanted = true; +        return; +      } +    } +    writer->SendInitialMetadata(); +  } +  void StartWrite(const Response* resp) { +    StartWrite(resp, ::grpc::WriteOptions()); +  } +  void StartWrite(const Response* resp, ::grpc::WriteOptions options) { +    ServerCallbackWriter<Response>* writer = +        writer_.load(std::memory_order_acquire); +    if (writer == nullptr) { +      grpc::internal::MutexLock l(&writer_mu_); +      writer = writer_.load(std::memory_order_relaxed); +      if (writer == nullptr) { +        backlog_.write_wanted = resp; +        backlog_.write_options_wanted = std::move(options); +        return; +      } +    } +    writer->Write(resp, std::move(options)); +  } +  void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options, +                           ::grpc::Status s) { +    ServerCallbackWriter<Response>* writer = +        writer_.load(std::memory_order_acquire); +    if (writer == nullptr) { +      grpc::internal::MutexLock l(&writer_mu_); +      writer = writer_.load(std::memory_order_relaxed); +      if (writer == nullptr) { +        backlog_.write_and_finish_wanted = true; +        backlog_.write_wanted = resp; +        backlog_.write_options_wanted = std::move(options); +        backlog_.status_wanted = std::move(s); +        return; +      } +    } +    writer->WriteAndFinish(resp, std::move(options), std::move(s)); +  } +  void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) { +    StartWrite(resp, std::move(options.set_last_message())); +  } +  void Finish(::grpc::Status s) { +    ServerCallbackWriter<Response>* writer = +        writer_.load(std::memory_order_acquire); +    if (writer == nullptr) { +      grpc::internal::MutexLock l(&writer_mu_); +      writer = writer_.load(std::memory_order_relaxed); +      if (writer == nullptr) { +        backlog_.finish_wanted = true; +        backlog_.status_wanted = std::move(s); +        return; +      } +    } +    writer->Finish(std::move(s)); +  } + +  /// The following notifications are exactly like ServerBidiReactor. +  virtual void OnSendInitialMetadataDone(bool /*ok*/) {} +  virtual void OnWriteDone(bool /*ok*/) {} +  void OnDone() override = 0; +  void OnCancel() override {} + + private: +  friend class ServerCallbackWriter<Response>; +  // May be overridden by internal implementation details. This is not a public +  // customization point. +  virtual void InternalBindWriter(ServerCallbackWriter<Response>* writer) { +    grpc::internal::MutexLock l(&writer_mu_); + +    if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) { +      writer->SendInitialMetadata(); +    } +    if (GPR_UNLIKELY(backlog_.write_and_finish_wanted)) { +      writer->WriteAndFinish(backlog_.write_wanted, +                             std::move(backlog_.write_options_wanted), +                             std::move(backlog_.status_wanted)); +    } else { +      if (GPR_UNLIKELY(backlog_.write_wanted != nullptr)) { +        writer->Write(backlog_.write_wanted, +                      std::move(backlog_.write_options_wanted)); +      } +      if (GPR_UNLIKELY(backlog_.finish_wanted)) { +        writer->Finish(std::move(backlog_.status_wanted)); +      } +    } +    // Set writer_ last so that other functions can use it lock-free +    writer_.store(writer, std::memory_order_release); +  } + +  grpc::internal::Mutex writer_mu_; +  std::atomic<ServerCallbackWriter<Response>*> writer_{nullptr}; +  struct PreBindBacklog { +    bool send_initial_metadata_wanted = false; +    bool write_and_finish_wanted = false; +    bool finish_wanted = false; +    const Response* write_wanted = nullptr; +    ::grpc::WriteOptions write_options_wanted; +    ::grpc::Status status_wanted; +  }; +  PreBindBacklog backlog_ /* GUARDED_BY(writer_mu_) */; +}; + +class ServerUnaryReactor : public internal::ServerReactor { + public: +  ServerUnaryReactor() : call_(nullptr) {} +  ~ServerUnaryReactor() = default; + +  /// StartSendInitialMetadata is exactly like ServerBidiReactor. +  void StartSendInitialMetadata() { +    ServerCallbackUnary* call = call_.load(std::memory_order_acquire); +    if (call == nullptr) { +      grpc::internal::MutexLock l(&call_mu_); +      call = call_.load(std::memory_order_relaxed); +      if (call == nullptr) { +        backlog_.send_initial_metadata_wanted = true; +        return; +      } +    } +    call->SendInitialMetadata(); +  } +  /// Finish is similar to ServerBidiReactor except for one detail. +  /// If the status is non-OK, any message will not be sent. Instead, +  /// the client will only receive the status and any trailing metadata. +  void Finish(::grpc::Status s) { +    ServerCallbackUnary* call = call_.load(std::memory_order_acquire); +    if (call == nullptr) { +      grpc::internal::MutexLock l(&call_mu_); +      call = call_.load(std::memory_order_relaxed); +      if (call == nullptr) { +        backlog_.finish_wanted = true; +        backlog_.status_wanted = std::move(s); +        return; +      } +    } +    call->Finish(std::move(s)); +  } + +  /// The following notifications are exactly like ServerBidiReactor. +  virtual void OnSendInitialMetadataDone(bool /*ok*/) {} +  void OnDone() override = 0; +  void OnCancel() override {} + + private: +  friend class ServerCallbackUnary; +  // May be overridden by internal implementation details. This is not a public +  // customization point. +  virtual void InternalBindCall(ServerCallbackUnary* call) { +    grpc::internal::MutexLock l(&call_mu_); + +    if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) { +      call->SendInitialMetadata(); +    } +    if (GPR_UNLIKELY(backlog_.finish_wanted)) { +      call->Finish(std::move(backlog_.status_wanted)); +    } +    // Set call_ last so that other functions can use it lock-free +    call_.store(call, std::memory_order_release); +  } + +  grpc::internal::Mutex call_mu_; +  std::atomic<ServerCallbackUnary*> call_{nullptr}; +  struct PreBindBacklog { +    bool send_initial_metadata_wanted = false; +    bool finish_wanted = false; +    ::grpc::Status status_wanted; +  }; +  PreBindBacklog backlog_ /* GUARDED_BY(call_mu_) */; +}; + +namespace internal { + +template <class Base> +class FinishOnlyReactor : public Base { + public: +  explicit FinishOnlyReactor(::grpc::Status s) { this->Finish(std::move(s)); } +  void OnDone() override { this->~FinishOnlyReactor(); } +}; + +using UnimplementedUnaryReactor = FinishOnlyReactor<ServerUnaryReactor>; +template <class Request> +using UnimplementedReadReactor = FinishOnlyReactor<ServerReadReactor<Request>>; +template <class Response> +using UnimplementedWriteReactor = +    FinishOnlyReactor<ServerWriteReactor<Response>>; +template <class Request, class Response> +using UnimplementedBidiReactor = +    FinishOnlyReactor<ServerBidiReactor<Request, Response>>; + +}  // namespace internal + +// TODO(vjpai): Remove namespace experimental when de-experimentalized fully. +namespace experimental { + +template <class Request> +using ServerReadReactor = ::grpc::ServerReadReactor<Request>; + +template <class Response> +using ServerWriteReactor = ::grpc::ServerWriteReactor<Response>; + +template <class Request, class Response> +using ServerBidiReactor = ::grpc::ServerBidiReactor<Request, Response>; + +using ServerUnaryReactor = ::grpc::ServerUnaryReactor; + +}  // namespace experimental + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h new file mode 100644 index 00000000000..8120fcaf851 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h @@ -0,0 +1,867 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_HANDLERS_H +#define GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_HANDLERS_H + +#include <grpcpp/impl/codegen/message_allocator.h> +#include <grpcpp/impl/codegen/rpc_service_method.h> +#include <grpcpp/impl/codegen/server_callback.h> +#include <grpcpp/impl/codegen/server_context.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { +namespace internal { + +template <class RequestType, class ResponseType> +class CallbackUnaryHandler : public ::grpc::internal::MethodHandler { + public: +  explicit CallbackUnaryHandler( +      std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*, +                                        const RequestType*, ResponseType*)> +          get_reactor) +      : get_reactor_(std::move(get_reactor)) {} + +  void SetMessageAllocator( +      ::grpc::experimental::MessageAllocator<RequestType, ResponseType>* +          allocator) { +    allocator_ = allocator; +  } + +  void RunHandler(const HandlerParameter& param) final { +    // Arena allocate a controller structure (that includes request/response) +    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call()); +    auto* allocator_state = static_cast< +        ::grpc::experimental::MessageHolder<RequestType, ResponseType>*>( +        param.internal_data); + +    auto* call = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        param.call->call(), sizeof(ServerCallbackUnaryImpl))) +        ServerCallbackUnaryImpl( +            static_cast<::grpc::CallbackServerContext*>(param.server_context), +            param.call, allocator_state, std::move(param.call_requester)); +    param.server_context->BeginCompletionOp( +        param.call, [call](bool) { call->MaybeDone(); }, call); + +    ServerUnaryReactor* reactor = nullptr; +    if (param.status.ok()) { +      reactor = ::grpc::internal::CatchingReactorGetter<ServerUnaryReactor>( +          get_reactor_, +          static_cast<::grpc::CallbackServerContext*>(param.server_context), +          call->request(), call->response()); +    } + +    if (reactor == nullptr) { +      // if deserialization or reactor creator failed, we need to fail the call +      reactor = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +          param.call->call(), sizeof(UnimplementedUnaryReactor))) +          UnimplementedUnaryReactor( +              ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); +    } + +    /// Invoke SetupReactor as the last part of the handler +    call->SetupReactor(reactor); +  } + +  void* Deserialize(grpc_call* call, grpc_byte_buffer* req, +                    ::grpc::Status* status, void** handler_data) final { +    ::grpc::ByteBuffer buf; +    buf.set_buffer(req); +    RequestType* request = nullptr; +    ::grpc::experimental::MessageHolder<RequestType, ResponseType>* +        allocator_state = nullptr; +    if (allocator_ != nullptr) { +      allocator_state = allocator_->AllocateMessages(); +    } else { +      allocator_state = +          new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +              call, sizeof(DefaultMessageHolder<RequestType, ResponseType>))) +              DefaultMessageHolder<RequestType, ResponseType>(); +    } +    *handler_data = allocator_state; +    request = allocator_state->request(); +    *status = +        ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request); +    buf.Release(); +    if (status->ok()) { +      return request; +    } +    // Clean up on deserialization failure. +    allocator_state->Release(); +    return nullptr; +  } + + private: +  std::function<ServerUnaryReactor*(::grpc::CallbackServerContext*, +                                    const RequestType*, ResponseType*)> +      get_reactor_; +  ::grpc::experimental::MessageAllocator<RequestType, ResponseType>* +      allocator_ = nullptr; + +  class ServerCallbackUnaryImpl : public ServerCallbackUnary { +   public: +    void Finish(::grpc::Status s) override { +      // A callback that only contains a call to MaybeDone can be run as an +      // inline callback regardless of whether or not OnDone is inlineable +      // because if the actual OnDone callback needs to be scheduled, MaybeDone +      // is responsible for dispatching to an executor thread if needed. Thus, +      // when setting up the finish_tag_, we can set its own callback to +      // inlineable. +      finish_tag_.Set( +          call_.call(), +          [this](bool) { +            this->MaybeDone( +                reactor_.load(std::memory_order_relaxed)->InternalInlineable()); +          }, +          &finish_ops_, /*can_inline=*/true); +      finish_ops_.set_core_cq_tag(&finish_tag_); + +      if (!ctx_->sent_initial_metadata_) { +        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                        ctx_->initial_metadata_flags()); +        if (ctx_->compression_level_set()) { +          finish_ops_.set_compression_level(ctx_->compression_level()); +        } +        ctx_->sent_initial_metadata_ = true; +      } +      // The response is dropped if the status is not OK. +      if (s.ok()) { +        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, +                                     finish_ops_.SendMessagePtr(response())); +      } else { +        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s); +      } +      finish_ops_.set_core_cq_tag(&finish_tag_); +      call_.PerformOps(&finish_ops_); +    } + +    void SendInitialMetadata() override { +      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); +      this->Ref(); +      // The callback for this function should not be marked inline because it +      // is directly invoking a user-controlled reaction +      // (OnSendInitialMetadataDone). Thus it must be dispatched to an executor +      // thread. However, any OnDone needed after that can be inlined because it +      // is already running on an executor thread. +      meta_tag_.Set(call_.call(), +                    [this](bool ok) { +                      ServerUnaryReactor* reactor = +                          reactor_.load(std::memory_order_relaxed); +                      reactor->OnSendInitialMetadataDone(ok); +                      this->MaybeDone(/*inlineable_ondone=*/true); +                    }, +                    &meta_ops_, /*can_inline=*/false); +      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                    ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        meta_ops_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +      meta_ops_.set_core_cq_tag(&meta_tag_); +      call_.PerformOps(&meta_ops_); +    } + +   private: +    friend class CallbackUnaryHandler<RequestType, ResponseType>; + +    ServerCallbackUnaryImpl( +        ::grpc::CallbackServerContext* ctx, ::grpc::internal::Call* call, +        ::grpc::experimental::MessageHolder<RequestType, ResponseType>* +            allocator_state, +        std::function<void()> call_requester) +        : ctx_(ctx), +          call_(*call), +          allocator_state_(allocator_state), +          call_requester_(std::move(call_requester)) { +      ctx_->set_message_allocator_state(allocator_state); +    } + +    /// SetupReactor binds the reactor (which also releases any queued +    /// operations), maybe calls OnCancel if possible/needed, and maybe marks +    /// the completion of the RPC. This should be the last component of the +    /// handler. +    void SetupReactor(ServerUnaryReactor* reactor) { +      reactor_.store(reactor, std::memory_order_relaxed); +      this->BindReactor(reactor); +      this->MaybeCallOnCancel(reactor); +      this->MaybeDone(reactor->InternalInlineable()); +    } + +    const RequestType* request() { return allocator_state_->request(); } +    ResponseType* response() { return allocator_state_->response(); } + +    void CallOnDone() override { +      reactor_.load(std::memory_order_relaxed)->OnDone(); +      grpc_call* call = call_.call(); +      auto call_requester = std::move(call_requester_); +      allocator_state_->Release(); +      this->~ServerCallbackUnaryImpl();  // explicitly call destructor +      ::grpc::g_core_codegen_interface->grpc_call_unref(call); +      call_requester(); +    } + +    ServerReactor* reactor() override { +      return reactor_.load(std::memory_order_relaxed); +    } + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +        meta_ops_; +    ::grpc::internal::CallbackWithSuccessTag meta_tag_; +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpServerSendStatus> +        finish_ops_; +    ::grpc::internal::CallbackWithSuccessTag finish_tag_; + +    ::grpc::CallbackServerContext* const ctx_; +    ::grpc::internal::Call call_; +    ::grpc::experimental::MessageHolder<RequestType, ResponseType>* const +        allocator_state_; +    std::function<void()> call_requester_; +    // reactor_ can always be loaded/stored with relaxed memory ordering because +    // its value is only set once, independently of other data in the object, +    // and the loads that use it will always actually come provably later even +    // though they are from different threads since they are triggered by +    // actions initiated only by the setting up of the reactor_ variable. In +    // a sense, it's a delayed "const": it gets its value from the SetupReactor +    // method (not the constructor, so it's not a true const), but it doesn't +    // change after that and it only gets used by actions caused, directly or +    // indirectly, by that setup. This comment also applies to the reactor_ +    // variables of the other streaming objects in this file. +    std::atomic<ServerUnaryReactor*> reactor_; +    // callbacks_outstanding_ follows a refcount pattern +    std::atomic<intptr_t> callbacks_outstanding_{ +        3};  // reserve for start, Finish, and CompletionOp +  }; +}; + +template <class RequestType, class ResponseType> +class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler { + public: +  explicit CallbackClientStreamingHandler( +      std::function<ServerReadReactor<RequestType>*( +          ::grpc::CallbackServerContext*, ResponseType*)> +          get_reactor) +      : get_reactor_(std::move(get_reactor)) {} +  void RunHandler(const HandlerParameter& param) final { +    // Arena allocate a reader structure (that includes response) +    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call()); + +    auto* reader = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        param.call->call(), sizeof(ServerCallbackReaderImpl))) +        ServerCallbackReaderImpl( +            static_cast<::grpc::CallbackServerContext*>(param.server_context), +            param.call, std::move(param.call_requester)); +    // Inlineable OnDone can be false in the CompletionOp callback because there +    // is no read reactor that has an inlineable OnDone; this only applies to +    // the DefaultReactor (which is unary). +    param.server_context->BeginCompletionOp( +        param.call, +        [reader](bool) { reader->MaybeDone(/*inlineable_ondone=*/false); }, +        reader); + +    ServerReadReactor<RequestType>* reactor = nullptr; +    if (param.status.ok()) { +      reactor = ::grpc::internal::CatchingReactorGetter< +          ServerReadReactor<RequestType>>( +          get_reactor_, +          static_cast<::grpc::CallbackServerContext*>(param.server_context), +          reader->response()); +    } + +    if (reactor == nullptr) { +      // if deserialization or reactor creator failed, we need to fail the call +      reactor = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +          param.call->call(), sizeof(UnimplementedReadReactor<RequestType>))) +          UnimplementedReadReactor<RequestType>( +              ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); +    } + +    reader->SetupReactor(reactor); +  } + + private: +  std::function<ServerReadReactor<RequestType>*(::grpc::CallbackServerContext*, +                                                ResponseType*)> +      get_reactor_; + +  class ServerCallbackReaderImpl : public ServerCallbackReader<RequestType> { +   public: +    void Finish(::grpc::Status s) override { +      // A finish tag with only MaybeDone can have its callback inlined +      // regardless even if OnDone is not inlineable because this callback just +      // checks a ref and then decides whether or not to dispatch OnDone. +      finish_tag_.Set(call_.call(), +                      [this](bool) { +                        // Inlineable OnDone can be false here because there is +                        // no read reactor that has an inlineable OnDone; this +                        // only applies to the DefaultReactor (which is unary). +                        this->MaybeDone(/*inlineable_ondone=*/false); +                      }, +                      &finish_ops_, /*can_inline=*/true); +      if (!ctx_->sent_initial_metadata_) { +        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                        ctx_->initial_metadata_flags()); +        if (ctx_->compression_level_set()) { +          finish_ops_.set_compression_level(ctx_->compression_level()); +        } +        ctx_->sent_initial_metadata_ = true; +      } +      // The response is dropped if the status is not OK. +      if (s.ok()) { +        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, +                                     finish_ops_.SendMessagePtr(&resp_)); +      } else { +        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s); +      } +      finish_ops_.set_core_cq_tag(&finish_tag_); +      call_.PerformOps(&finish_ops_); +    } + +    void SendInitialMetadata() override { +      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); +      this->Ref(); +      // The callback for this function should not be inlined because it invokes +      // a user-controlled reaction, but any resulting OnDone can be inlined in +      // the executor to which this callback is dispatched. +      meta_tag_.Set(call_.call(), +                    [this](bool ok) { +                      ServerReadReactor<RequestType>* reactor = +                          reactor_.load(std::memory_order_relaxed); +                      reactor->OnSendInitialMetadataDone(ok); +                      this->MaybeDone(/*inlineable_ondone=*/true); +                    }, +                    &meta_ops_, /*can_inline=*/false); +      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                    ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        meta_ops_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +      meta_ops_.set_core_cq_tag(&meta_tag_); +      call_.PerformOps(&meta_ops_); +    } + +    void Read(RequestType* req) override { +      this->Ref(); +      read_ops_.RecvMessage(req); +      call_.PerformOps(&read_ops_); +    } + +   private: +    friend class CallbackClientStreamingHandler<RequestType, ResponseType>; + +    ServerCallbackReaderImpl(::grpc::CallbackServerContext* ctx, +                             ::grpc::internal::Call* call, +                             std::function<void()> call_requester) +        : ctx_(ctx), call_(*call), call_requester_(std::move(call_requester)) {} + +    void SetupReactor(ServerReadReactor<RequestType>* reactor) { +      reactor_.store(reactor, std::memory_order_relaxed); +      // The callback for this function should not be inlined because it invokes +      // a user-controlled reaction, but any resulting OnDone can be inlined in +      // the executor to which this callback is dispatched. +      read_tag_.Set(call_.call(), +                    [this, reactor](bool ok) { +                      reactor->OnReadDone(ok); +                      this->MaybeDone(/*inlineable_ondone=*/true); +                    }, +                    &read_ops_, /*can_inline=*/false); +      read_ops_.set_core_cq_tag(&read_tag_); +      this->BindReactor(reactor); +      this->MaybeCallOnCancel(reactor); +      // Inlineable OnDone can be false here because there is no read +      // reactor that has an inlineable OnDone; this only applies to the +      // DefaultReactor (which is unary). +      this->MaybeDone(/*inlineable_ondone=*/false); +    } + +    ~ServerCallbackReaderImpl() {} + +    ResponseType* response() { return &resp_; } + +    void CallOnDone() override { +      reactor_.load(std::memory_order_relaxed)->OnDone(); +      grpc_call* call = call_.call(); +      auto call_requester = std::move(call_requester_); +      this->~ServerCallbackReaderImpl();  // explicitly call destructor +      ::grpc::g_core_codegen_interface->grpc_call_unref(call); +      call_requester(); +    } + +    ServerReactor* reactor() override { +      return reactor_.load(std::memory_order_relaxed); +    } + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +        meta_ops_; +    ::grpc::internal::CallbackWithSuccessTag meta_tag_; +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpServerSendStatus> +        finish_ops_; +    ::grpc::internal::CallbackWithSuccessTag finish_tag_; +    ::grpc::internal::CallOpSet< +        ::grpc::internal::CallOpRecvMessage<RequestType>> +        read_ops_; +    ::grpc::internal::CallbackWithSuccessTag read_tag_; + +    ::grpc::CallbackServerContext* const ctx_; +    ::grpc::internal::Call call_; +    ResponseType resp_; +    std::function<void()> call_requester_; +    // The memory ordering of reactor_ follows ServerCallbackUnaryImpl. +    std::atomic<ServerReadReactor<RequestType>*> reactor_; +    // callbacks_outstanding_ follows a refcount pattern +    std::atomic<intptr_t> callbacks_outstanding_{ +        3};  // reserve for OnStarted, Finish, and CompletionOp +  }; +}; + +template <class RequestType, class ResponseType> +class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler { + public: +  explicit CallbackServerStreamingHandler( +      std::function<ServerWriteReactor<ResponseType>*( +          ::grpc::CallbackServerContext*, const RequestType*)> +          get_reactor) +      : get_reactor_(std::move(get_reactor)) {} +  void RunHandler(const HandlerParameter& param) final { +    // Arena allocate a writer structure +    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call()); + +    auto* writer = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        param.call->call(), sizeof(ServerCallbackWriterImpl))) +        ServerCallbackWriterImpl( +            static_cast<::grpc::CallbackServerContext*>(param.server_context), +            param.call, static_cast<RequestType*>(param.request), +            std::move(param.call_requester)); +    // Inlineable OnDone can be false in the CompletionOp callback because there +    // is no write reactor that has an inlineable OnDone; this only applies to +    // the DefaultReactor (which is unary). +    param.server_context->BeginCompletionOp( +        param.call, +        [writer](bool) { writer->MaybeDone(/*inlineable_ondone=*/false); }, +        writer); + +    ServerWriteReactor<ResponseType>* reactor = nullptr; +    if (param.status.ok()) { +      reactor = ::grpc::internal::CatchingReactorGetter< +          ServerWriteReactor<ResponseType>>( +          get_reactor_, +          static_cast<::grpc::CallbackServerContext*>(param.server_context), +          writer->request()); +    } +    if (reactor == nullptr) { +      // if deserialization or reactor creator failed, we need to fail the call +      reactor = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +          param.call->call(), sizeof(UnimplementedWriteReactor<ResponseType>))) +          UnimplementedWriteReactor<ResponseType>( +              ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); +    } + +    writer->SetupReactor(reactor); +  } + +  void* Deserialize(grpc_call* call, grpc_byte_buffer* req, +                    ::grpc::Status* status, void** /*handler_data*/) final { +    ::grpc::ByteBuffer buf; +    buf.set_buffer(req); +    auto* request = +        new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +            call, sizeof(RequestType))) RequestType(); +    *status = +        ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request); +    buf.Release(); +    if (status->ok()) { +      return request; +    } +    request->~RequestType(); +    return nullptr; +  } + + private: +  std::function<ServerWriteReactor<ResponseType>*( +      ::grpc::CallbackServerContext*, const RequestType*)> +      get_reactor_; + +  class ServerCallbackWriterImpl : public ServerCallbackWriter<ResponseType> { +   public: +    void Finish(::grpc::Status s) override { +      // A finish tag with only MaybeDone can have its callback inlined +      // regardless even if OnDone is not inlineable because this callback just +      // checks a ref and then decides whether or not to dispatch OnDone. +      finish_tag_.Set(call_.call(), +                      [this](bool) { +                        // Inlineable OnDone can be false here because there is +                        // no write reactor that has an inlineable OnDone; this +                        // only applies to the DefaultReactor (which is unary). +                        this->MaybeDone(/*inlineable_ondone=*/false); +                      }, +                      &finish_ops_, /*can_inline=*/true); +      finish_ops_.set_core_cq_tag(&finish_tag_); + +      if (!ctx_->sent_initial_metadata_) { +        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                        ctx_->initial_metadata_flags()); +        if (ctx_->compression_level_set()) { +          finish_ops_.set_compression_level(ctx_->compression_level()); +        } +        ctx_->sent_initial_metadata_ = true; +      } +      finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s); +      call_.PerformOps(&finish_ops_); +    } + +    void SendInitialMetadata() override { +      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); +      this->Ref(); +      // The callback for this function should not be inlined because it invokes +      // a user-controlled reaction, but any resulting OnDone can be inlined in +      // the executor to which this callback is dispatched. +      meta_tag_.Set(call_.call(), +                    [this](bool ok) { +                      ServerWriteReactor<ResponseType>* reactor = +                          reactor_.load(std::memory_order_relaxed); +                      reactor->OnSendInitialMetadataDone(ok); +                      this->MaybeDone(/*inlineable_ondone=*/true); +                    }, +                    &meta_ops_, /*can_inline=*/false); +      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                    ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        meta_ops_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +      meta_ops_.set_core_cq_tag(&meta_tag_); +      call_.PerformOps(&meta_ops_); +    } + +    void Write(const ResponseType* resp, +               ::grpc::WriteOptions options) override { +      this->Ref(); +      if (options.is_last_message()) { +        options.set_buffer_hint(); +      } +      if (!ctx_->sent_initial_metadata_) { +        write_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                       ctx_->initial_metadata_flags()); +        if (ctx_->compression_level_set()) { +          write_ops_.set_compression_level(ctx_->compression_level()); +        } +        ctx_->sent_initial_metadata_ = true; +      } +      // TODO(vjpai): don't assert +      GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(resp, options).ok()); +      call_.PerformOps(&write_ops_); +    } + +    void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options, +                        ::grpc::Status s) override { +      // This combines the write into the finish callback +      // TODO(vjpai): don't assert +      GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok()); +      Finish(std::move(s)); +    } + +   private: +    friend class CallbackServerStreamingHandler<RequestType, ResponseType>; + +    ServerCallbackWriterImpl(::grpc::CallbackServerContext* ctx, +                             ::grpc::internal::Call* call, +                             const RequestType* req, +                             std::function<void()> call_requester) +        : ctx_(ctx), +          call_(*call), +          req_(req), +          call_requester_(std::move(call_requester)) {} + +    void SetupReactor(ServerWriteReactor<ResponseType>* reactor) { +      reactor_.store(reactor, std::memory_order_relaxed); +      // The callback for this function should not be inlined because it invokes +      // a user-controlled reaction, but any resulting OnDone can be inlined in +      // the executor to which this callback is dispatched. +      write_tag_.Set(call_.call(), +                     [this, reactor](bool ok) { +                       reactor->OnWriteDone(ok); +                       this->MaybeDone(/*inlineable_ondone=*/true); +                     }, +                     &write_ops_, /*can_inline=*/false); +      write_ops_.set_core_cq_tag(&write_tag_); +      this->BindReactor(reactor); +      this->MaybeCallOnCancel(reactor); +      // Inlineable OnDone can be false here because there is no write +      // reactor that has an inlineable OnDone; this only applies to the +      // DefaultReactor (which is unary). +      this->MaybeDone(/*inlineable_ondone=*/false); +    } +    ~ServerCallbackWriterImpl() { req_->~RequestType(); } + +    const RequestType* request() { return req_; } + +    void CallOnDone() override { +      reactor_.load(std::memory_order_relaxed)->OnDone(); +      grpc_call* call = call_.call(); +      auto call_requester = std::move(call_requester_); +      this->~ServerCallbackWriterImpl();  // explicitly call destructor +      ::grpc::g_core_codegen_interface->grpc_call_unref(call); +      call_requester(); +    } + +    ServerReactor* reactor() override { +      return reactor_.load(std::memory_order_relaxed); +    } + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +        meta_ops_; +    ::grpc::internal::CallbackWithSuccessTag meta_tag_; +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpServerSendStatus> +        finish_ops_; +    ::grpc::internal::CallbackWithSuccessTag finish_tag_; +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage> +        write_ops_; +    ::grpc::internal::CallbackWithSuccessTag write_tag_; + +    ::grpc::CallbackServerContext* const ctx_; +    ::grpc::internal::Call call_; +    const RequestType* req_; +    std::function<void()> call_requester_; +    // The memory ordering of reactor_ follows ServerCallbackUnaryImpl. +    std::atomic<ServerWriteReactor<ResponseType>*> reactor_; +    // callbacks_outstanding_ follows a refcount pattern +    std::atomic<intptr_t> callbacks_outstanding_{ +        3};  // reserve for OnStarted, Finish, and CompletionOp +  }; +}; + +template <class RequestType, class ResponseType> +class CallbackBidiHandler : public ::grpc::internal::MethodHandler { + public: +  explicit CallbackBidiHandler( +      std::function<ServerBidiReactor<RequestType, ResponseType>*( +          ::grpc::CallbackServerContext*)> +          get_reactor) +      : get_reactor_(std::move(get_reactor)) {} +  void RunHandler(const HandlerParameter& param) final { +    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call()); + +    auto* stream = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +        param.call->call(), sizeof(ServerCallbackReaderWriterImpl))) +        ServerCallbackReaderWriterImpl( +            static_cast<::grpc::CallbackServerContext*>(param.server_context), +            param.call, std::move(param.call_requester)); +    // Inlineable OnDone can be false in the CompletionOp callback because there +    // is no bidi reactor that has an inlineable OnDone; this only applies to +    // the DefaultReactor (which is unary). +    param.server_context->BeginCompletionOp( +        param.call, +        [stream](bool) { stream->MaybeDone(/*inlineable_ondone=*/false); }, +        stream); + +    ServerBidiReactor<RequestType, ResponseType>* reactor = nullptr; +    if (param.status.ok()) { +      reactor = ::grpc::internal::CatchingReactorGetter< +          ServerBidiReactor<RequestType, ResponseType>>( +          get_reactor_, +          static_cast<::grpc::CallbackServerContext*>(param.server_context)); +    } + +    if (reactor == nullptr) { +      // if deserialization or reactor creator failed, we need to fail the call +      reactor = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc( +          param.call->call(), +          sizeof(UnimplementedBidiReactor<RequestType, ResponseType>))) +          UnimplementedBidiReactor<RequestType, ResponseType>( +              ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); +    } + +    stream->SetupReactor(reactor); +  } + + private: +  std::function<ServerBidiReactor<RequestType, ResponseType>*( +      ::grpc::CallbackServerContext*)> +      get_reactor_; + +  class ServerCallbackReaderWriterImpl +      : public ServerCallbackReaderWriter<RequestType, ResponseType> { +   public: +    void Finish(::grpc::Status s) override { +      // A finish tag with only MaybeDone can have its callback inlined +      // regardless even if OnDone is not inlineable because this callback just +      // checks a ref and then decides whether or not to dispatch OnDone. +      finish_tag_.Set(call_.call(), +                      [this](bool) { +                        // Inlineable OnDone can be false here because there is +                        // no bidi reactor that has an inlineable OnDone; this +                        // only applies to the DefaultReactor (which is unary). +                        this->MaybeDone(/*inlineable_ondone=*/false); +                      }, +                      &finish_ops_, /*can_inline=*/true); +      finish_ops_.set_core_cq_tag(&finish_tag_); + +      if (!ctx_->sent_initial_metadata_) { +        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                        ctx_->initial_metadata_flags()); +        if (ctx_->compression_level_set()) { +          finish_ops_.set_compression_level(ctx_->compression_level()); +        } +        ctx_->sent_initial_metadata_ = true; +      } +      finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s); +      call_.PerformOps(&finish_ops_); +    } + +    void SendInitialMetadata() override { +      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); +      this->Ref(); +      // The callback for this function should not be inlined because it invokes +      // a user-controlled reaction, but any resulting OnDone can be inlined in +      // the executor to which this callback is dispatched. +      meta_tag_.Set(call_.call(), +                    [this](bool ok) { +                      ServerBidiReactor<RequestType, ResponseType>* reactor = +                          reactor_.load(std::memory_order_relaxed); +                      reactor->OnSendInitialMetadataDone(ok); +                      this->MaybeDone(/*inlineable_ondone=*/true); +                    }, +                    &meta_ops_, /*can_inline=*/false); +      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                    ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        meta_ops_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +      meta_ops_.set_core_cq_tag(&meta_tag_); +      call_.PerformOps(&meta_ops_); +    } + +    void Write(const ResponseType* resp, +               ::grpc::WriteOptions options) override { +      this->Ref(); +      if (options.is_last_message()) { +        options.set_buffer_hint(); +      } +      if (!ctx_->sent_initial_metadata_) { +        write_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                       ctx_->initial_metadata_flags()); +        if (ctx_->compression_level_set()) { +          write_ops_.set_compression_level(ctx_->compression_level()); +        } +        ctx_->sent_initial_metadata_ = true; +      } +      // TODO(vjpai): don't assert +      GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(resp, options).ok()); +      call_.PerformOps(&write_ops_); +    } + +    void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options, +                        ::grpc::Status s) override { +      // TODO(vjpai): don't assert +      GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok()); +      Finish(std::move(s)); +    } + +    void Read(RequestType* req) override { +      this->Ref(); +      read_ops_.RecvMessage(req); +      call_.PerformOps(&read_ops_); +    } + +   private: +    friend class CallbackBidiHandler<RequestType, ResponseType>; + +    ServerCallbackReaderWriterImpl(::grpc::CallbackServerContext* ctx, +                                   ::grpc::internal::Call* call, +                                   std::function<void()> call_requester) +        : ctx_(ctx), call_(*call), call_requester_(std::move(call_requester)) {} + +    void SetupReactor(ServerBidiReactor<RequestType, ResponseType>* reactor) { +      reactor_.store(reactor, std::memory_order_relaxed); +      // The callbacks for these functions should not be inlined because they +      // invoke user-controlled reactions, but any resulting OnDones can be +      // inlined in the executor to which a callback is dispatched. +      write_tag_.Set(call_.call(), +                     [this, reactor](bool ok) { +                       reactor->OnWriteDone(ok); +                       this->MaybeDone(/*inlineable_ondone=*/true); +                     }, +                     &write_ops_, /*can_inline=*/false); +      write_ops_.set_core_cq_tag(&write_tag_); +      read_tag_.Set(call_.call(), +                    [this, reactor](bool ok) { +                      reactor->OnReadDone(ok); +                      this->MaybeDone(/*inlineable_ondone=*/true); +                    }, +                    &read_ops_, /*can_inline=*/false); +      read_ops_.set_core_cq_tag(&read_tag_); +      this->BindReactor(reactor); +      this->MaybeCallOnCancel(reactor); +      // Inlineable OnDone can be false here because there is no bidi +      // reactor that has an inlineable OnDone; this only applies to the +      // DefaultReactor (which is unary). +      this->MaybeDone(/*inlineable_ondone=*/false); +    } + +    void CallOnDone() override { +      reactor_.load(std::memory_order_relaxed)->OnDone(); +      grpc_call* call = call_.call(); +      auto call_requester = std::move(call_requester_); +      this->~ServerCallbackReaderWriterImpl();  // explicitly call destructor +      ::grpc::g_core_codegen_interface->grpc_call_unref(call); +      call_requester(); +    } + +    ServerReactor* reactor() override { +      return reactor_.load(std::memory_order_relaxed); +    } + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +        meta_ops_; +    ::grpc::internal::CallbackWithSuccessTag meta_tag_; +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpServerSendStatus> +        finish_ops_; +    ::grpc::internal::CallbackWithSuccessTag finish_tag_; +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage> +        write_ops_; +    ::grpc::internal::CallbackWithSuccessTag write_tag_; +    ::grpc::internal::CallOpSet< +        ::grpc::internal::CallOpRecvMessage<RequestType>> +        read_ops_; +    ::grpc::internal::CallbackWithSuccessTag read_tag_; + +    ::grpc::CallbackServerContext* const ctx_; +    ::grpc::internal::Call call_; +    std::function<void()> call_requester_; +    // The memory ordering of reactor_ follows ServerCallbackUnaryImpl. +    std::atomic<ServerBidiReactor<RequestType, ResponseType>*> reactor_; +    // callbacks_outstanding_ follows a refcount pattern +    std::atomic<intptr_t> callbacks_outstanding_{ +        3};  // reserve for OnStarted, Finish, and CompletionOp +  }; +}; + +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_HANDLERS_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h new file mode 100644 index 00000000000..685f006cdaa --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_context.h @@ -0,0 +1,619 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H +#define GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H + +#include <atomic> +#include <cassert> +#include <map> +#include <memory> +#include <type_traits> +#include <vector> + +#include <grpc/impl/codegen/port_platform.h> + +#include <grpc/impl/codegen/compression_types.h> +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/call_op_set.h> +#include <grpcpp/impl/codegen/callback_common.h> +#include <grpcpp/impl/codegen/completion_queue_tag.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/create_auth_context.h> +#include <grpcpp/impl/codegen/message_allocator.h> +#include <grpcpp/impl/codegen/metadata_map.h> +#include <grpcpp/impl/codegen/security/auth_context.h> +#include <grpcpp/impl/codegen/server_callback.h> +#include <grpcpp/impl/codegen/server_interceptor.h> +#include <grpcpp/impl/codegen/status.h> +#include <grpcpp/impl/codegen/string_ref.h> +#include <grpcpp/impl/codegen/time.h> + +struct grpc_metadata; +struct grpc_call; +struct census_context; + +namespace grpc { +template <class W, class R> +class ServerAsyncReader; +template <class W> +class ServerAsyncWriter; +template <class W> +class ServerAsyncResponseWriter; +template <class W, class R> +class ServerAsyncReaderWriter; +template <class R> +class ServerReader; +template <class W> +class ServerWriter; + +namespace internal { +template <class ServiceType, class RequestType, class ResponseType> +class BidiStreamingHandler; +template <class RequestType, class ResponseType> +class CallbackUnaryHandler; +template <class RequestType, class ResponseType> +class CallbackClientStreamingHandler; +template <class RequestType, class ResponseType> +class CallbackServerStreamingHandler; +template <class RequestType, class ResponseType> +class CallbackBidiHandler; +template <class ServiceType, class RequestType, class ResponseType> +class ClientStreamingHandler; +template <class ServiceType, class RequestType, class ResponseType> +class RpcMethodHandler; +template <class Base> +class FinishOnlyReactor; +template <class W, class R> +class ServerReaderWriterBody; +template <class ServiceType, class RequestType, class ResponseType> +class ServerStreamingHandler; +class ServerReactor; +template <class Streamer, bool WriteNeeded> +class TemplatedBidiStreamingHandler; +template <::grpc::StatusCode code> +class ErrorMethodHandler; +}  // namespace internal + +class ClientContext; +class CompletionQueue; +class GenericServerContext; +class Server; +class ServerInterface; + +// TODO(vjpai): Remove namespace experimental when de-experimentalized fully. +namespace experimental { + +typedef ::grpc::ServerContextBase ServerContextBase; +typedef ::grpc::CallbackServerContext CallbackServerContext; + +}  // namespace experimental + +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +namespace experimental { +#endif +class GenericCallbackServerContext; +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +}  // namespace experimental +#endif +namespace internal { +class Call; +}  // namespace internal + +namespace testing { +class InteropServerContextInspector; +class ServerContextTestSpouse; +class DefaultReactorTestPeer; +}  // namespace testing + +/// Base class of ServerContext. Experimental until callback API is final. +class ServerContextBase { + public: +  virtual ~ServerContextBase(); + +  /// Return the deadline for the server call. +  std::chrono::system_clock::time_point deadline() const { +    return ::grpc::Timespec2Timepoint(deadline_); +  } + +  /// Return a \a gpr_timespec representation of the server call's deadline. +  gpr_timespec raw_deadline() const { return deadline_; } + +  /// Add the (\a key, \a value) pair to the initial metadata +  /// associated with a server call. These are made available at the client side +  /// by the \a grpc::ClientContext::GetServerInitialMetadata() method. +  /// +  /// \warning This method should only be called before sending initial metadata +  /// to the client (which can happen explicitly, or implicitly when sending a +  /// a response message or status to the client). +  /// +  /// \param key The metadata key. If \a value is binary data, it must +  /// end in "-bin". +  /// \param value The metadata value. If its value is binary, the key name +  /// must end in "-bin". +  /// +  /// Metadata must conform to the following format: +  /// Custom-Metadata -> Binary-Header / ASCII-Header +  /// Binary-Header -> {Header-Name "-bin" } {binary value} +  /// ASCII-Header -> Header-Name ASCII-Value +  /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - . +  /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII +  void AddInitialMetadata(const TString& key, const TString& value); + +  /// Add the (\a key, \a value) pair to the initial metadata +  /// associated with a server call. These are made available at the client +  /// side by the \a grpc::ClientContext::GetServerTrailingMetadata() method. +  /// +  /// \warning This method should only be called before sending trailing +  /// metadata to the client (which happens when the call is finished and a +  /// status is sent to the client). +  /// +  /// \param key The metadata key. If \a value is binary data, +  /// it must end in "-bin". +  /// \param value The metadata value. If its value is binary, the key name +  /// must end in "-bin". +  /// +  /// Metadata must conform to the following format: +  /// Custom-Metadata -> Binary-Header / ASCII-Header +  /// Binary-Header -> {Header-Name "-bin" } {binary value} +  /// ASCII-Header -> Header-Name ASCII-Value +  /// Header-Name -> 1*( %x30-39 / %x61-7A / "_" / "-" / ".") ; 0-9 a-z _ - . +  /// ASCII-Value -> 1*( %x20-%x7E ) ; space and printable ASCII +  void AddTrailingMetadata(const TString& key, const TString& value); + +  /// Return whether this RPC failed before the server could provide its status +  /// back to the client. This could be because of explicit API cancellation +  /// from the client-side or server-side, because of deadline exceeded, network +  /// connection reset, HTTP/2 parameter configuration (e.g., max message size, +  /// max connection age), etc. It does NOT include failure due to a non-OK +  /// status return from the server application's request handler, including +  /// Status::CANCELLED. +  /// +  /// IsCancelled is always safe to call when using sync or callback API. +  /// When using async API, it is only safe to call IsCancelled after +  /// the AsyncNotifyWhenDone tag has been delivered. Thread-safe. +  bool IsCancelled() const; + +  /// Cancel the Call from the server. This is a best-effort API and +  /// depending on when it is called, the RPC may still appear successful to +  /// the client. For example, if TryCancel() is called on a separate thread, it +  /// might race with the server handler which might return success to the +  /// client before TryCancel() was even started by the thread. +  /// +  /// It is the caller's responsibility to prevent such races and ensure that if +  /// TryCancel() is called, the serverhandler must return Status::CANCELLED. +  /// The only exception is that if the serverhandler is already returning an +  /// error status code, it is ok to not return Status::CANCELLED even if +  /// TryCancel() was called. +  /// +  /// For reasons such as the above, it is generally preferred to explicitly +  /// finish an RPC by returning Status::CANCELLED rather than using TryCancel. +  /// +  /// Note that TryCancel() does not change any of the tags that are pending +  /// on the completion queue. All pending tags will still be delivered +  /// (though their ok result may reflect the effect of cancellation). +  void TryCancel() const; + +  /// Return a collection of initial metadata key-value pairs sent from the +  /// client. Note that keys may happen more than +  /// once (ie, a \a std::multimap is returned). +  /// +  /// It is safe to use this method after initial metadata has been received, +  /// Calls always begin with the client sending initial metadata, so this is +  /// safe to access as soon as the call has begun on the server side. +  /// +  /// \return A multimap of initial metadata key-value pairs from the server. +  const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata() +      const { +    return *client_metadata_.map(); +  } + +  /// Return the compression algorithm to be used by the server call. +  grpc_compression_level compression_level() const { +    return compression_level_; +  } + +  /// Set \a level to be the compression level used for the server call. +  /// +  /// \param level The compression level used for the server call. +  void set_compression_level(grpc_compression_level level) { +    compression_level_set_ = true; +    compression_level_ = level; +  } + +  /// Return a bool indicating whether the compression level for this call +  /// has been set (either implicitly or through a previous call to +  /// \a set_compression_level. +  bool compression_level_set() const { return compression_level_set_; } + +  /// Return the compression algorithm the server call will request be used. +  /// Note that the gRPC runtime may decide to ignore this request, for example, +  /// due to resource constraints, or if the server is aware the client doesn't +  /// support the requested algorithm. +  grpc_compression_algorithm compression_algorithm() const { +    return compression_algorithm_; +  } +  /// Set \a algorithm to be the compression algorithm used for the server call. +  /// +  /// \param algorithm The compression algorithm used for the server call. +  void set_compression_algorithm(grpc_compression_algorithm algorithm); + +  /// Set the serialized load reporting costs in \a cost_data for the call. +  void SetLoadReportingCosts(const std::vector<TString>& cost_data); + +  /// Return the authentication context for this server call. +  /// +  /// \see grpc::AuthContext. +  std::shared_ptr<const ::grpc::AuthContext> auth_context() const { +    if (auth_context_.get() == nullptr) { +      auth_context_ = ::grpc::CreateAuthContext(call_.call); +    } +    return auth_context_; +  } + +  /// Return the peer uri in a string. +  /// WARNING: this value is never authenticated or subject to any security +  /// related code. It must not be used for any authentication related +  /// functionality. Instead, use auth_context. +  TString peer() const; + +  /// Get the census context associated with this server call. +  const struct census_context* census_context() const; + +  /// Should be used for framework-level extensions only. +  /// Applications never need to call this method. +  grpc_call* c_call() { return call_.call; } + + protected: +  /// Async only. Has to be called before the rpc starts. +  /// Returns the tag in completion queue when the rpc finishes. +  /// IsCancelled() can then be called to check whether the rpc was cancelled. +  /// TODO(vjpai): Fix this so that the tag is returned even if the call never +  /// starts (https://github.com/grpc/grpc/issues/10136). +  void AsyncNotifyWhenDone(void* tag) { +    has_notify_when_done_tag_ = true; +    async_notify_when_done_tag_ = tag; +  } + +  /// NOTE: This is an API for advanced users who need custom allocators. +  /// Get and maybe mutate the allocator state associated with the current RPC. +  /// Currently only applicable for callback unary RPC methods. +  /// WARNING: This is experimental API and could be changed or removed. +  ::grpc::experimental::RpcAllocatorState* GetRpcAllocatorState() { +    return message_allocator_state_; +  } + +  /// Get a library-owned default unary reactor for use in minimal reaction +  /// cases. This supports typical unary RPC usage of providing a response and +  /// status. It supports immediate Finish (finish from within the method +  /// handler) or delayed Finish (finish called after the method handler +  /// invocation). It does not support reacting to cancellation or completion, +  /// or early sending of initial metadata. Since this is a library-owned +  /// reactor, it should not be delete'd or freed in any way. This is more +  /// efficient than creating a user-owned reactor both because of avoiding an +  /// allocation and because its minimal reactions are optimized using a core +  /// surface flag that allows their reactions to run inline without any +  /// thread-hop. +  /// +  /// This method should not be called more than once or called after return +  /// from the method handler. +  /// +  /// WARNING: This is experimental API and could be changed or removed. +  ::grpc::ServerUnaryReactor* DefaultReactor() { +    // Short-circuit the case where a default reactor was already set up by +    // the TestPeer. +    if (test_unary_ != nullptr) { +      return reinterpret_cast<Reactor*>(&default_reactor_); +    } +    new (&default_reactor_) Reactor; +#ifndef NDEBUG +    bool old = false; +    assert(default_reactor_used_.compare_exchange_strong( +        old, true, std::memory_order_relaxed)); +#else +    default_reactor_used_.store(true, std::memory_order_relaxed); +#endif +    return reinterpret_cast<Reactor*>(&default_reactor_); +  } + +  /// Constructors for use by derived classes +  ServerContextBase(); +  ServerContextBase(gpr_timespec deadline, grpc_metadata_array* arr); + + private: +  friend class ::grpc::testing::InteropServerContextInspector; +  friend class ::grpc::testing::ServerContextTestSpouse; +  friend class ::grpc::testing::DefaultReactorTestPeer; +  friend class ::grpc::ServerInterface; +  friend class ::grpc::Server; +  template <class W, class R> +  friend class ::grpc::ServerAsyncReader; +  template <class W> +  friend class ::grpc::ServerAsyncWriter; +  template <class W> +  friend class ::grpc::ServerAsyncResponseWriter; +  template <class W, class R> +  friend class ::grpc::ServerAsyncReaderWriter; +  template <class R> +  friend class ::grpc::ServerReader; +  template <class W> +  friend class ::grpc::ServerWriter; +  template <class W, class R> +  friend class ::grpc::internal::ServerReaderWriterBody; +  template <class ServiceType, class RequestType, class ResponseType> +  friend class ::grpc::internal::RpcMethodHandler; +  template <class ServiceType, class RequestType, class ResponseType> +  friend class ::grpc::internal::ClientStreamingHandler; +  template <class ServiceType, class RequestType, class ResponseType> +  friend class ::grpc::internal::ServerStreamingHandler; +  template <class Streamer, bool WriteNeeded> +  friend class ::grpc::internal::TemplatedBidiStreamingHandler; +  template <class RequestType, class ResponseType> +  friend class ::grpc::internal::CallbackUnaryHandler; +  template <class RequestType, class ResponseType> +  friend class ::grpc::internal::CallbackClientStreamingHandler; +  template <class RequestType, class ResponseType> +  friend class ::grpc::internal::CallbackServerStreamingHandler; +  template <class RequestType, class ResponseType> +  friend class ::grpc::internal::CallbackBidiHandler; +  template <::grpc::StatusCode code> +  friend class ::grpc::internal::ErrorMethodHandler; +  template <class Base> +  friend class ::grpc::internal::FinishOnlyReactor; +  friend class ::grpc::ClientContext; +  friend class ::grpc::GenericServerContext; +#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL +  friend class ::grpc::GenericCallbackServerContext; +#else +  friend class ::grpc::experimental::GenericCallbackServerContext; +#endif + +  /// Prevent copying. +  ServerContextBase(const ServerContextBase&); +  ServerContextBase& operator=(const ServerContextBase&); + +  class CompletionOp; + +  void BeginCompletionOp( +      ::grpc::internal::Call* call, std::function<void(bool)> callback, +      ::grpc::internal::ServerCallbackCall* callback_controller); +  /// Return the tag queued by BeginCompletionOp() +  ::grpc::internal::CompletionQueueTag* GetCompletionOpTag(); + +  void set_call(grpc_call* call) { call_.call = call; } + +  void BindDeadlineAndMetadata(gpr_timespec deadline, grpc_metadata_array* arr); + +  uint32_t initial_metadata_flags() const { return 0; } + +  ::grpc::experimental::ServerRpcInfo* set_server_rpc_info( +      const char* method, ::grpc::internal::RpcMethod::RpcType type, +      const std::vector<std::unique_ptr< +          ::grpc::experimental::ServerInterceptorFactoryInterface>>& creators) { +    if (creators.size() != 0) { +      rpc_info_ = new ::grpc::experimental::ServerRpcInfo(this, method, type); +      rpc_info_->RegisterInterceptors(creators); +    } +    return rpc_info_; +  } + +  void set_message_allocator_state( +      ::grpc::experimental::RpcAllocatorState* allocator_state) { +    message_allocator_state_ = allocator_state; +  } + +  struct CallWrapper { +    ~CallWrapper(); + +    grpc_call* call = nullptr; +  }; + +  // NOTE: call_ must be the first data member of this object so that its +  //       destructor is the last to be called, since its destructor may unref +  //       the underlying core call which holds the arena that may be used to +  //       hold this object. +  CallWrapper call_; + +  CompletionOp* completion_op_ = nullptr; +  bool has_notify_when_done_tag_ = false; +  void* async_notify_when_done_tag_ = nullptr; +  ::grpc::internal::CallbackWithSuccessTag completion_tag_; + +  gpr_timespec deadline_; +  ::grpc::CompletionQueue* cq_ = nullptr; +  bool sent_initial_metadata_ = false; +  mutable std::shared_ptr<const ::grpc::AuthContext> auth_context_; +  mutable ::grpc::internal::MetadataMap client_metadata_; +  std::multimap<TString, TString> initial_metadata_; +  std::multimap<TString, TString> trailing_metadata_; + +  bool compression_level_set_ = false; +  grpc_compression_level compression_level_; +  grpc_compression_algorithm compression_algorithm_; + +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                              ::grpc::internal::CallOpSendMessage> +      pending_ops_; +  bool has_pending_ops_ = false; + +  ::grpc::experimental::ServerRpcInfo* rpc_info_ = nullptr; +  ::grpc::experimental::RpcAllocatorState* message_allocator_state_ = nullptr; + +  class Reactor : public ::grpc::ServerUnaryReactor { +   public: +    void OnCancel() override {} +    void OnDone() override {} +    // Override InternalInlineable for this class since its reactions are +    // trivial and thus do not need to be run from the executor (triggering a +    // thread hop). This should only be used by internal reactors (thus the +    // name) and not by user application code. +    bool InternalInlineable() override { return true; } +  }; + +  void SetupTestDefaultReactor(std::function<void(::grpc::Status)> func) { +    test_unary_.reset(new TestServerCallbackUnary(this, std::move(func))); +  } +  bool test_status_set() const { +    return (test_unary_ != nullptr) && test_unary_->status_set(); +  } +  ::grpc::Status test_status() const { return test_unary_->status(); } + +  class TestServerCallbackUnary : public ::grpc::ServerCallbackUnary { +   public: +    TestServerCallbackUnary(ServerContextBase* ctx, +                            std::function<void(::grpc::Status)> func) +        : reactor_(ctx->DefaultReactor()), func_(std::move(func)) { +      this->BindReactor(reactor_); +    } +    void Finish(::grpc::Status s) override { +      status_ = s; +      func_(std::move(s)); +      status_set_.store(true, std::memory_order_release); +    } +    void SendInitialMetadata() override {} + +    bool status_set() const { +      return status_set_.load(std::memory_order_acquire); +    } +    ::grpc::Status status() const { return status_; } + +   private: +    void CallOnDone() override {} +    ::grpc::internal::ServerReactor* reactor() override { return reactor_; } + +    ::grpc::ServerUnaryReactor* const reactor_; +    std::atomic_bool status_set_{false}; +    ::grpc::Status status_; +    const std::function<void(::grpc::Status s)> func_; +  }; + +  typename std::aligned_storage<sizeof(Reactor), alignof(Reactor)>::type +      default_reactor_; +  std::atomic_bool default_reactor_used_{false}; +  std::unique_ptr<TestServerCallbackUnary> test_unary_; +}; + +/// A ServerContext or CallbackServerContext allows the code implementing a +/// service handler to: +/// +/// - Add custom initial and trailing metadata key-value pairs that will +///   propagated to the client side. +/// - Control call settings such as compression and authentication. +/// - Access metadata coming from the client. +/// - Get performance metrics (ie, census). +/// +/// Context settings are only relevant to the call handler they are supplied to, +/// that is to say, they aren't sticky across multiple calls. Some of these +/// settings, such as the compression options, can be made persistent at server +/// construction time by specifying the appropriate \a ChannelArguments +/// to a \a grpc::ServerBuilder, via \a ServerBuilder::AddChannelArgument. +/// +/// \warning ServerContext instances should \em not be reused across rpcs. +class ServerContext : public ServerContextBase { + public: +  ServerContext() {}  // for async calls + +  using ServerContextBase::AddInitialMetadata; +  using ServerContextBase::AddTrailingMetadata; +  using ServerContextBase::auth_context; +  using ServerContextBase::c_call; +  using ServerContextBase::census_context; +  using ServerContextBase::client_metadata; +  using ServerContextBase::compression_algorithm; +  using ServerContextBase::compression_level; +  using ServerContextBase::compression_level_set; +  using ServerContextBase::deadline; +  using ServerContextBase::IsCancelled; +  using ServerContextBase::peer; +  using ServerContextBase::raw_deadline; +  using ServerContextBase::set_compression_algorithm; +  using ServerContextBase::set_compression_level; +  using ServerContextBase::SetLoadReportingCosts; +  using ServerContextBase::TryCancel; + +  // Sync/CQ-based Async ServerContext only +  using ServerContextBase::AsyncNotifyWhenDone; + + private: +  // Constructor for internal use by server only +  friend class ::grpc::Server; +  ServerContext(gpr_timespec deadline, grpc_metadata_array* arr) +      : ServerContextBase(deadline, arr) {} + +  // CallbackServerContext only +  using ServerContextBase::DefaultReactor; +  using ServerContextBase::GetRpcAllocatorState; + +  /// Prevent copying. +  ServerContext(const ServerContext&) = delete; +  ServerContext& operator=(const ServerContext&) = delete; +}; + +class CallbackServerContext : public ServerContextBase { + public: +  /// Public constructors are for direct use only by mocking tests. In practice, +  /// these objects will be owned by the library. +  CallbackServerContext() {} + +  using ServerContextBase::AddInitialMetadata; +  using ServerContextBase::AddTrailingMetadata; +  using ServerContextBase::auth_context; +  using ServerContextBase::c_call; +  using ServerContextBase::census_context; +  using ServerContextBase::client_metadata; +  using ServerContextBase::compression_algorithm; +  using ServerContextBase::compression_level; +  using ServerContextBase::compression_level_set; +  using ServerContextBase::deadline; +  using ServerContextBase::IsCancelled; +  using ServerContextBase::peer; +  using ServerContextBase::raw_deadline; +  using ServerContextBase::set_compression_algorithm; +  using ServerContextBase::set_compression_level; +  using ServerContextBase::SetLoadReportingCosts; +  using ServerContextBase::TryCancel; + +  // CallbackServerContext only +  using ServerContextBase::DefaultReactor; +  using ServerContextBase::GetRpcAllocatorState; + + private: +  // Sync/CQ-based Async ServerContext only +  using ServerContextBase::AsyncNotifyWhenDone; + +  /// Prevent copying. +  CallbackServerContext(const CallbackServerContext&) = delete; +  CallbackServerContext& operator=(const CallbackServerContext&) = delete; +}; + +}  // namespace grpc + +static_assert( +    std::is_base_of<::grpc::ServerContextBase, ::grpc::ServerContext>::value, +    "improper base class"); +static_assert(std::is_base_of<::grpc::ServerContextBase, +                              ::grpc::CallbackServerContext>::value, +              "improper base class"); +static_assert(sizeof(::grpc::ServerContextBase) == +                  sizeof(::grpc::ServerContext), +              "wrong size"); +static_assert(sizeof(::grpc::ServerContextBase) == +                  sizeof(::grpc::CallbackServerContext), +              "wrong size"); + +#endif  // GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h new file mode 100644 index 00000000000..7598e72a40e --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interceptor.h @@ -0,0 +1,139 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SERVER_INTERCEPTOR_H +#define GRPCPP_IMPL_CODEGEN_SERVER_INTERCEPTOR_H + +#include <atomic> +#include <vector> + +#include <grpcpp/impl/codegen/interceptor.h> +#include <grpcpp/impl/codegen/rpc_method.h> +#include <grpcpp/impl/codegen/string_ref.h> + +namespace grpc { +class ServerContextBase; +namespace internal { +class InterceptorBatchMethodsImpl; +} + +namespace experimental { +class ServerRpcInfo; + +// A factory interface for creation of server interceptors. A vector of +// factories can be provided to ServerBuilder which will be used to create a new +// vector of server interceptors per RPC. Server interceptor authors should +// create a subclass of ServerInterceptorFactorInterface which creates objects +// of their interceptors. +class ServerInterceptorFactoryInterface { + public: +  virtual ~ServerInterceptorFactoryInterface() {} +  // Returns a pointer to an Interceptor object on successful creation, nullptr +  // otherwise. If nullptr is returned, this server interceptor factory is +  // ignored for the purposes of that RPC. +  virtual Interceptor* CreateServerInterceptor(ServerRpcInfo* info) = 0; +}; + +/// ServerRpcInfo represents the state of a particular RPC as it +/// appears to an interceptor. It is created and owned by the library and +/// passed to the CreateServerInterceptor method of the application's +/// ServerInterceptorFactoryInterface implementation +class ServerRpcInfo { + public: +  /// Type categorizes RPCs by unary or streaming type +  enum class Type { UNARY, CLIENT_STREAMING, SERVER_STREAMING, BIDI_STREAMING }; + +  ~ServerRpcInfo() {} + +  // Delete all copy and move constructors and assignments +  ServerRpcInfo(const ServerRpcInfo&) = delete; +  ServerRpcInfo& operator=(const ServerRpcInfo&) = delete; +  ServerRpcInfo(ServerRpcInfo&&) = delete; +  ServerRpcInfo& operator=(ServerRpcInfo&&) = delete; + +  // Getter methods + +  /// Return the fully-specified method name +  const char* method() const { return method_; } + +  /// Return the type of the RPC (unary or a streaming flavor) +  Type type() const { return type_; } + +  /// Return a pointer to the underlying ServerContext structure associated +  /// with the RPC to support features that apply to it +  ServerContextBase* server_context() { return ctx_; } + + private: +  static_assert(Type::UNARY == +                    static_cast<Type>(internal::RpcMethod::NORMAL_RPC), +                "violated expectation about Type enum"); +  static_assert(Type::CLIENT_STREAMING == +                    static_cast<Type>(internal::RpcMethod::CLIENT_STREAMING), +                "violated expectation about Type enum"); +  static_assert(Type::SERVER_STREAMING == +                    static_cast<Type>(internal::RpcMethod::SERVER_STREAMING), +                "violated expectation about Type enum"); +  static_assert(Type::BIDI_STREAMING == +                    static_cast<Type>(internal::RpcMethod::BIDI_STREAMING), +                "violated expectation about Type enum"); + +  ServerRpcInfo(ServerContextBase* ctx, const char* method, +                internal::RpcMethod::RpcType type) +      : ctx_(ctx), method_(method), type_(static_cast<Type>(type)) {} + +  // Runs interceptor at pos \a pos. +  void RunInterceptor( +      experimental::InterceptorBatchMethods* interceptor_methods, size_t pos) { +    GPR_CODEGEN_ASSERT(pos < interceptors_.size()); +    interceptors_[pos]->Intercept(interceptor_methods); +  } + +  void RegisterInterceptors( +      const std::vector< +          std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>& +          creators) { +    for (const auto& creator : creators) { +      auto* interceptor = creator->CreateServerInterceptor(this); +      if (interceptor != nullptr) { +        interceptors_.push_back( +            std::unique_ptr<experimental::Interceptor>(interceptor)); +      } +    } +  } + +  void Ref() { ref_.fetch_add(1, std::memory_order_relaxed); } +  void Unref() { +    if (GPR_UNLIKELY(ref_.fetch_sub(1, std::memory_order_acq_rel) == 1)) { +      delete this; +    } +  } + +  ServerContextBase* ctx_ = nullptr; +  const char* method_ = nullptr; +  const Type type_; +  std::atomic<intptr_t> ref_{1}; +  std::vector<std::unique_ptr<experimental::Interceptor>> interceptors_; + +  friend class internal::InterceptorBatchMethodsImpl; +  friend class grpc::ServerContextBase; +}; + +}  // namespace experimental +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SERVER_INTERCEPTOR_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h new file mode 100644 index 00000000000..d97b7250251 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/server_interface.h @@ -0,0 +1,397 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SERVER_INTERFACE_H +#define GRPCPP_IMPL_CODEGEN_SERVER_INTERFACE_H + +#include <grpc/impl/codegen/port_platform.h> + +#include <grpc/impl/codegen/grpc_types.h> +#include <grpcpp/impl/codegen/byte_buffer.h> +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/call_hook.h> +#include <grpcpp/impl/codegen/completion_queue_tag.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/interceptor_common.h> +#include <grpcpp/impl/codegen/rpc_service_method.h> +#include <grpcpp/impl/codegen/server_context.h> + +namespace grpc { + +class AsyncGenericService; +class Channel; +class CompletionQueue; +class GenericServerContext; +class ServerCompletionQueue; +class ServerCredentials; +class Service; + +extern CoreCodegenInterface* g_core_codegen_interface; + +/// Models a gRPC server. +/// +/// Servers are configured and started via \a grpc::ServerBuilder. +namespace internal { +class ServerAsyncStreamingInterface; +}  // namespace internal + +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +namespace experimental { +#endif +class CallbackGenericService; +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +}  // namespace experimental +#endif + +namespace experimental { +class ServerInterceptorFactoryInterface; +}  // namespace experimental + +class ServerInterface : public internal::CallHook { + public: +  virtual ~ServerInterface() {} + +  /// \a Shutdown does the following things: +  /// +  /// 1. Shutdown the server: deactivate all listening ports, mark it in +  ///    "shutdown mode" so that further call Request's or incoming RPC matches +  ///    are no longer allowed. Also return all Request'ed-but-not-yet-active +  ///    calls as failed (!ok). This refers to calls that have been requested +  ///    at the server by the server-side library or application code but that +  ///    have not yet been matched to incoming RPCs from the client. Note that +  ///    this would even include default calls added automatically by the gRPC +  ///    C++ API without the user's input (e.g., "Unimplemented RPC method") +  /// +  /// 2. Block until all rpc method handlers invoked automatically by the sync +  ///    API finish. +  /// +  /// 3. If all pending calls complete (and all their operations are +  ///    retrieved by Next) before \a deadline expires, this finishes +  ///    gracefully. Otherwise, forcefully cancel all pending calls associated +  ///    with the server after \a deadline expires. In the case of the sync API, +  ///    if the RPC function for a streaming call has already been started and +  ///    takes a week to complete, the RPC function won't be forcefully +  ///    terminated (since that would leave state corrupt and incomplete) and +  ///    the method handler will just keep running (which will prevent the +  ///    server from completing the "join" operation that it needs to do at +  ///    shutdown time). +  /// +  /// All completion queue associated with the server (for example, for async +  /// serving) must be shutdown *after* this method has returned: +  /// See \a ServerBuilder::AddCompletionQueue for details. +  /// They must also be drained (by repeated Next) after being shutdown. +  /// +  /// \param deadline How long to wait until pending rpcs are forcefully +  /// terminated. +  template <class T> +  void Shutdown(const T& deadline) { +    ShutdownInternal(TimePoint<T>(deadline).raw_time()); +  } + +  /// Shutdown the server without a deadline and forced cancellation. +  /// +  /// All completion queue associated with the server (for example, for async +  /// serving) must be shutdown *after* this method has returned: +  /// See \a ServerBuilder::AddCompletionQueue for details. +  void Shutdown() { +    ShutdownInternal( +        g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_MONOTONIC)); +  } + +  /// Block waiting for all work to complete. +  /// +  /// \warning The server must be either shutting down or some other thread must +  /// call \a Shutdown for this function to ever return. +  virtual void Wait() = 0; + + protected: +  friend class ::grpc::Service; + +  /// Register a service. This call does not take ownership of the service. +  /// The service must exist for the lifetime of the Server instance. +  virtual bool RegisterService(const TString* host, Service* service) = 0; + +  /// Register a generic service. This call does not take ownership of the +  /// service. The service must exist for the lifetime of the Server instance. +  virtual void RegisterAsyncGenericService(AsyncGenericService* service) = 0; + +#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL +  /// Register a callback generic service. This call does not take ownership of +  /// the  service. The service must exist for the lifetime of the Server +  /// instance. May not be abstract since this is a post-1.0 API addition. + +  virtual void RegisterCallbackGenericService(CallbackGenericService* +                                              /*service*/) {} +#else +  /// NOTE: class experimental_registration_interface is not part of the public +  /// API of this class +  /// TODO(vjpai): Move these contents to public API when no longer experimental +  class experimental_registration_interface { +   public: +    virtual ~experimental_registration_interface() {} +    /// May not be abstract since this is a post-1.0 API addition +    virtual void RegisterCallbackGenericService( +        experimental::CallbackGenericService* /*service*/) {} +  }; + +  /// NOTE: The function experimental_registration() is not stable public API. +  /// It is a view to the experimental components of this class. It may be +  /// changed or removed at any time. May not be abstract since this is a +  /// post-1.0 API addition +  virtual experimental_registration_interface* experimental_registration() { +    return nullptr; +  } +#endif + +  /// Tries to bind \a server to the given \a addr. +  /// +  /// It can be invoked multiple times. +  /// +  /// \param addr The address to try to bind to the server (eg, localhost:1234, +  /// 192.168.1.1:31416, [::1]:27182, etc.). +  /// \params creds The credentials associated with the server. +  /// +  /// \return bound port number on success, 0 on failure. +  /// +  /// \warning It's an error to call this method on an already started server. +  virtual int AddListeningPort(const TString& addr, +                               ServerCredentials* creds) = 0; + +  /// Start the server. +  /// +  /// \param cqs Completion queues for handling asynchronous services. The +  /// caller is required to keep all completion queues live until the server is +  /// destroyed. +  /// \param num_cqs How many completion queues does \a cqs hold. +  virtual void Start(::grpc::ServerCompletionQueue** cqs, size_t num_cqs) = 0; + +  virtual void ShutdownInternal(gpr_timespec deadline) = 0; + +  virtual int max_receive_message_size() const = 0; + +  virtual grpc_server* server() = 0; + +  virtual void PerformOpsOnCall(internal::CallOpSetInterface* ops, +                                internal::Call* call) = 0; + +  class BaseAsyncRequest : public internal::CompletionQueueTag { +   public: +    BaseAsyncRequest(ServerInterface* server, ::grpc::ServerContext* context, +                     internal::ServerAsyncStreamingInterface* stream, +                     ::grpc::CompletionQueue* call_cq, +                     ::grpc::ServerCompletionQueue* notification_cq, void* tag, +                     bool delete_on_finalize); +    virtual ~BaseAsyncRequest(); + +    bool FinalizeResult(void** tag, bool* status) override; + +   private: +    void ContinueFinalizeResultAfterInterception(); + +   protected: +    ServerInterface* const server_; +    ::grpc::ServerContext* const context_; +    internal::ServerAsyncStreamingInterface* const stream_; +    ::grpc::CompletionQueue* const call_cq_; +    ::grpc::ServerCompletionQueue* const notification_cq_; +    void* const tag_; +    const bool delete_on_finalize_; +    grpc_call* call_; +    internal::Call call_wrapper_; +    internal::InterceptorBatchMethodsImpl interceptor_methods_; +    bool done_intercepting_; +  }; + +  /// RegisteredAsyncRequest is not part of the C++ API +  class RegisteredAsyncRequest : public BaseAsyncRequest { +   public: +    RegisteredAsyncRequest(ServerInterface* server, +                           ::grpc::ServerContext* context, +                           internal::ServerAsyncStreamingInterface* stream, +                           ::grpc::CompletionQueue* call_cq, +                           ::grpc::ServerCompletionQueue* notification_cq, +                           void* tag, const char* name, +                           internal::RpcMethod::RpcType type); + +    virtual bool FinalizeResult(void** tag, bool* status) override { +      /* If we are done intercepting, then there is nothing more for us to do */ +      if (done_intercepting_) { +        return BaseAsyncRequest::FinalizeResult(tag, status); +      } +      call_wrapper_ = ::grpc::internal::Call( +          call_, server_, call_cq_, server_->max_receive_message_size(), +          context_->set_server_rpc_info(name_, type_, +                                        *server_->interceptor_creators())); +      return BaseAsyncRequest::FinalizeResult(tag, status); +    } + +   protected: +    void IssueRequest(void* registered_method, grpc_byte_buffer** payload, +                      ::grpc::ServerCompletionQueue* notification_cq); +    const char* name_; +    const internal::RpcMethod::RpcType type_; +  }; + +  class NoPayloadAsyncRequest final : public RegisteredAsyncRequest { +   public: +    NoPayloadAsyncRequest(internal::RpcServiceMethod* registered_method, +                          ServerInterface* server, +                          ::grpc::ServerContext* context, +                          internal::ServerAsyncStreamingInterface* stream, +                          ::grpc::CompletionQueue* call_cq, +                          ::grpc::ServerCompletionQueue* notification_cq, +                          void* tag) +        : RegisteredAsyncRequest( +              server, context, stream, call_cq, notification_cq, tag, +              registered_method->name(), registered_method->method_type()) { +      IssueRequest(registered_method->server_tag(), nullptr, notification_cq); +    } + +    // uses RegisteredAsyncRequest::FinalizeResult +  }; + +  template <class Message> +  class PayloadAsyncRequest final : public RegisteredAsyncRequest { +   public: +    PayloadAsyncRequest(internal::RpcServiceMethod* registered_method, +                        ServerInterface* server, ::grpc::ServerContext* context, +                        internal::ServerAsyncStreamingInterface* stream, +                        ::grpc::CompletionQueue* call_cq, +                        ::grpc::ServerCompletionQueue* notification_cq, +                        void* tag, Message* request) +        : RegisteredAsyncRequest( +              server, context, stream, call_cq, notification_cq, tag, +              registered_method->name(), registered_method->method_type()), +          registered_method_(registered_method), +          request_(request) { +      IssueRequest(registered_method->server_tag(), payload_.bbuf_ptr(), +                   notification_cq); +    } + +    ~PayloadAsyncRequest() { +      payload_.Release();  // We do not own the payload_ +    } + +    bool FinalizeResult(void** tag, bool* status) override { +      /* If we are done intercepting, then there is nothing more for us to do */ +      if (done_intercepting_) { +        return RegisteredAsyncRequest::FinalizeResult(tag, status); +      } +      if (*status) { +        if (!payload_.Valid() || !SerializationTraits<Message>::Deserialize( +                                      payload_.bbuf_ptr(), request_) +                                      .ok()) { +          // If deserialization fails, we cancel the call and instantiate +          // a new instance of ourselves to request another call.  We then +          // return false, which prevents the call from being returned to +          // the application. +          g_core_codegen_interface->grpc_call_cancel_with_status( +              call_, GRPC_STATUS_INTERNAL, "Unable to parse request", nullptr); +          g_core_codegen_interface->grpc_call_unref(call_); +          new PayloadAsyncRequest(registered_method_, server_, context_, +                                  stream_, call_cq_, notification_cq_, tag_, +                                  request_); +          delete this; +          return false; +        } +      } +      /* Set interception point for recv message */ +      interceptor_methods_.AddInterceptionHookPoint( +          experimental::InterceptionHookPoints::POST_RECV_MESSAGE); +      interceptor_methods_.SetRecvMessage(request_, nullptr); +      return RegisteredAsyncRequest::FinalizeResult(tag, status); +    } + +   private: +    internal::RpcServiceMethod* const registered_method_; +    Message* const request_; +    ByteBuffer payload_; +  }; + +  class GenericAsyncRequest : public BaseAsyncRequest { +   public: +    GenericAsyncRequest(ServerInterface* server, GenericServerContext* context, +                        internal::ServerAsyncStreamingInterface* stream, +                        ::grpc::CompletionQueue* call_cq, +                        ::grpc::ServerCompletionQueue* notification_cq, +                        void* tag, bool delete_on_finalize); + +    bool FinalizeResult(void** tag, bool* status) override; + +   private: +    grpc_call_details call_details_; +  }; + +  template <class Message> +  void RequestAsyncCall(internal::RpcServiceMethod* method, +                        ::grpc::ServerContext* context, +                        internal::ServerAsyncStreamingInterface* stream, +                        ::grpc::CompletionQueue* call_cq, +                        ::grpc::ServerCompletionQueue* notification_cq, +                        void* tag, Message* message) { +    GPR_CODEGEN_ASSERT(method); +    new PayloadAsyncRequest<Message>(method, this, context, stream, call_cq, +                                     notification_cq, tag, message); +  } + +  void RequestAsyncCall(internal::RpcServiceMethod* method, +                        ::grpc::ServerContext* context, +                        internal::ServerAsyncStreamingInterface* stream, +                        ::grpc::CompletionQueue* call_cq, +                        ::grpc::ServerCompletionQueue* notification_cq, +                        void* tag) { +    GPR_CODEGEN_ASSERT(method); +    new NoPayloadAsyncRequest(method, this, context, stream, call_cq, +                              notification_cq, tag); +  } + +  void RequestAsyncGenericCall(GenericServerContext* context, +                               internal::ServerAsyncStreamingInterface* stream, +                               ::grpc::CompletionQueue* call_cq, +                               ::grpc::ServerCompletionQueue* notification_cq, +                               void* tag) { +    new GenericAsyncRequest(this, context, stream, call_cq, notification_cq, +                            tag, true); +  } + + private: +  // EXPERIMENTAL +  // Getter method for the vector of interceptor factory objects. +  // Returns a nullptr (rather than being pure) since this is a post-1.0 method +  // and adding a new pure method to an interface would be a breaking change +  // (even though this is private and non-API) +  virtual std::vector< +      std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>* +  interceptor_creators() { +    return nullptr; +  } + +  // EXPERIMENTAL +  // A method to get the callbackable completion queue associated with this +  // server. If the return value is nullptr, this server doesn't support +  // callback operations. +  // TODO(vjpai): Consider a better default like using a global CQ +  // Returns nullptr (rather than being pure) since this is a post-1.0 method +  // and adding a new pure method to an interface would be a breaking change +  // (even though this is private and non-API) +  virtual ::grpc::CompletionQueue* CallbackCQ() { return nullptr; } +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SERVER_INTERFACE_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h new file mode 100644 index 00000000000..30be904a3c3 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/service_type.h @@ -0,0 +1,275 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SERVICE_TYPE_H +#define GRPCPP_IMPL_CODEGEN_SERVICE_TYPE_H + +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/rpc_service_method.h> +#include <grpcpp/impl/codegen/serialization_traits.h> +#include <grpcpp/impl/codegen/server_interface.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { + +class CompletionQueue; +class ServerContext; +class ServerInterface; + +namespace internal { +class Call; +class ServerAsyncStreamingInterface { + public: +  virtual ~ServerAsyncStreamingInterface() {} + +  /// Request notification of the sending of initial metadata to the client. +  /// Completion will be notified by \a tag on the associated completion +  /// queue. This call is optional, but if it is used, it cannot be used +  /// concurrently with or after the \a Finish method. +  /// +  /// \param[in] tag Tag identifying this request. +  virtual void SendInitialMetadata(void* tag) = 0; + + private: +  friend class ::grpc::ServerInterface; +  virtual void BindCall(Call* call) = 0; +}; +}  // namespace internal + +/// Desriptor of an RPC service and its various RPC methods +class Service { + public: +  Service() : server_(nullptr) {} +  virtual ~Service() {} + +  bool has_async_methods() const { +    for (const auto& method : methods_) { +      if (method && method->handler() == nullptr) { +        return true; +      } +    } +    return false; +  } + +  bool has_synchronous_methods() const { +    for (const auto& method : methods_) { +      if (method && +          method->api_type() == internal::RpcServiceMethod::ApiType::SYNC) { +        return true; +      } +    } +    return false; +  } + +  bool has_callback_methods() const { +    for (const auto& method : methods_) { +      if (method && (method->api_type() == +                         internal::RpcServiceMethod::ApiType::CALL_BACK || +                     method->api_type() == +                         internal::RpcServiceMethod::ApiType::RAW_CALL_BACK)) { +        return true; +      } +    } +    return false; +  } + +  bool has_generic_methods() const { +    for (const auto& method : methods_) { +      if (method.get() == nullptr) { +        return true; +      } +    } +    return false; +  } + + protected: +  // TODO(vjpai): Promote experimental contents once callback API is accepted +  class experimental_type { +   public: +    explicit experimental_type(Service* service) : service_(service) {} + +    void MarkMethodCallback(int index, internal::MethodHandler* handler) { +      service_->MarkMethodCallbackInternal(index, handler); +    } + +    void MarkMethodRawCallback(int index, internal::MethodHandler* handler) { +      service_->MarkMethodRawCallbackInternal(index, handler); +    } + +    internal::MethodHandler* GetHandler(int index) { +      return service_->GetHandlerInternal(index); +    } + +   private: +    Service* service_; +  }; + +  experimental_type experimental() { return experimental_type(this); } + +  template <class Message> +  void RequestAsyncUnary(int index, ::grpc::ServerContext* context, +                         Message* request, +                         internal::ServerAsyncStreamingInterface* stream, +                         ::grpc::CompletionQueue* call_cq, +                         ::grpc::ServerCompletionQueue* notification_cq, +                         void* tag) { +    // Typecast the index to size_t for indexing into a vector +    // while preserving the API that existed before a compiler +    // warning was first seen (grpc/grpc#11664) +    size_t idx = static_cast<size_t>(index); +    server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq, +                              notification_cq, tag, request); +  } +  void RequestAsyncClientStreaming( +      int index, ::grpc::ServerContext* context, +      internal::ServerAsyncStreamingInterface* stream, +      ::grpc::CompletionQueue* call_cq, +      ::grpc::ServerCompletionQueue* notification_cq, void* tag) { +    size_t idx = static_cast<size_t>(index); +    server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq, +                              notification_cq, tag); +  } +  template <class Message> +  void RequestAsyncServerStreaming( +      int index, ::grpc::ServerContext* context, Message* request, +      internal::ServerAsyncStreamingInterface* stream, +      ::grpc::CompletionQueue* call_cq, +      ::grpc::ServerCompletionQueue* notification_cq, void* tag) { +    size_t idx = static_cast<size_t>(index); +    server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq, +                              notification_cq, tag, request); +  } +  void RequestAsyncBidiStreaming( +      int index, ::grpc::ServerContext* context, +      internal::ServerAsyncStreamingInterface* stream, +      ::grpc::CompletionQueue* call_cq, +      ::grpc::ServerCompletionQueue* notification_cq, void* tag) { +    size_t idx = static_cast<size_t>(index); +    server_->RequestAsyncCall(methods_[idx].get(), context, stream, call_cq, +                              notification_cq, tag); +  } + +  void AddMethod(internal::RpcServiceMethod* method) { +    methods_.emplace_back(method); +  } + +  void MarkMethodAsync(int index) { +    // This does not have to be a hard error, however no one has approached us +    // with a use case yet. Please file an issue if you believe you have one. +    size_t idx = static_cast<size_t>(index); +    GPR_CODEGEN_ASSERT( +        methods_[idx].get() != nullptr && +        "Cannot mark the method as 'async' because it has already been " +        "marked as 'generic'."); +    methods_[idx]->SetServerApiType(internal::RpcServiceMethod::ApiType::ASYNC); +  } + +  void MarkMethodRaw(int index) { +    // This does not have to be a hard error, however no one has approached us +    // with a use case yet. Please file an issue if you believe you have one. +    size_t idx = static_cast<size_t>(index); +    GPR_CODEGEN_ASSERT(methods_[idx].get() != nullptr && +                       "Cannot mark the method as 'raw' because it has already " +                       "been marked as 'generic'."); +    methods_[idx]->SetServerApiType(internal::RpcServiceMethod::ApiType::RAW); +  } + +  void MarkMethodGeneric(int index) { +    // This does not have to be a hard error, however no one has approached us +    // with a use case yet. Please file an issue if you believe you have one. +    size_t idx = static_cast<size_t>(index); +    GPR_CODEGEN_ASSERT( +        methods_[idx]->handler() != nullptr && +        "Cannot mark the method as 'generic' because it has already been " +        "marked as 'async' or 'raw'."); +    methods_[idx].reset(); +  } + +  void MarkMethodStreamed(int index, internal::MethodHandler* streamed_method) { +    // This does not have to be a hard error, however no one has approached us +    // with a use case yet. Please file an issue if you believe you have one. +    size_t idx = static_cast<size_t>(index); +    GPR_CODEGEN_ASSERT(methods_[idx] && methods_[idx]->handler() && +                       "Cannot mark an async or generic method Streamed"); +    methods_[idx]->SetHandler(streamed_method); + +    // From the server's point of view, streamed unary is a special +    // case of BIDI_STREAMING that has 1 read and 1 write, in that order, +    // and split server-side streaming is BIDI_STREAMING with 1 read and +    // any number of writes, in that order. +    methods_[idx]->SetMethodType(internal::RpcMethod::BIDI_STREAMING); +  } + +#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL +  void MarkMethodCallback(int index, internal::MethodHandler* handler) { +    MarkMethodCallbackInternal(index, handler); +  } + +  void MarkMethodRawCallback(int index, internal::MethodHandler* handler) { +    MarkMethodRawCallbackInternal(index, handler); +  } + +  internal::MethodHandler* GetHandler(int index) { +    return GetHandlerInternal(index); +  } +#endif + private: +  // TODO(vjpai): migrate the Internal functions to mainline functions once +  //              callback API is fully de-experimental +  void MarkMethodCallbackInternal(int index, internal::MethodHandler* handler) { +    // This does not have to be a hard error, however no one has approached us +    // with a use case yet. Please file an issue if you believe you have one. +    size_t idx = static_cast<size_t>(index); +    GPR_CODEGEN_ASSERT( +        methods_[idx].get() != nullptr && +        "Cannot mark the method as 'callback' because it has already been " +        "marked as 'generic'."); +    methods_[idx]->SetHandler(handler); +    methods_[idx]->SetServerApiType( +        internal::RpcServiceMethod::ApiType::CALL_BACK); +  } + +  void MarkMethodRawCallbackInternal(int index, +                                     internal::MethodHandler* handler) { +    // This does not have to be a hard error, however no one has approached us +    // with a use case yet. Please file an issue if you believe you have one. +    size_t idx = static_cast<size_t>(index); +    GPR_CODEGEN_ASSERT( +        methods_[idx].get() != nullptr && +        "Cannot mark the method as 'raw callback' because it has already " +        "been marked as 'generic'."); +    methods_[idx]->SetHandler(handler); +    methods_[idx]->SetServerApiType( +        internal::RpcServiceMethod::ApiType::RAW_CALL_BACK); +  } + +  internal::MethodHandler* GetHandlerInternal(int index) { +    size_t idx = static_cast<size_t>(index); +    return methods_[idx]->handler(); +  } + +  friend class Server; +  friend class ServerInterface; +  ServerInterface* server_; +  std::vector<std::unique_ptr<internal::RpcServiceMethod>> methods_; +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SERVICE_TYPE_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h new file mode 100644 index 00000000000..b1a24dcef8b --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/slice.h @@ -0,0 +1,143 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SLICE_H +#define GRPCPP_IMPL_CODEGEN_SLICE_H + +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/string_ref.h> + +#include <grpc/impl/codegen/slice.h> + +namespace grpc { + +/// A wrapper around \a grpc_slice. +/// +/// A slice represents a contiguous reference counted array of bytes. +/// It is cheap to take references to a slice, and it is cheap to create a +/// slice pointing to a subset of another slice. +class Slice final { + public: +  /// Construct an empty slice. +  Slice() : slice_(g_core_codegen_interface->grpc_empty_slice()) {} +  /// Destructor - drops one reference. +  ~Slice() { g_core_codegen_interface->grpc_slice_unref(slice_); } + +  enum AddRef { ADD_REF }; +  /// Construct a slice from \a slice, adding a reference. +  Slice(grpc_slice slice, AddRef) +      : slice_(g_core_codegen_interface->grpc_slice_ref(slice)) {} + +  enum StealRef { STEAL_REF }; +  /// Construct a slice from \a slice, stealing a reference. +  Slice(grpc_slice slice, StealRef) : slice_(slice) {} + +  /// Allocate a slice of specified size +  Slice(size_t len) +      : slice_(g_core_codegen_interface->grpc_slice_malloc(len)) {} + +  /// Construct a slice from a copied buffer +  Slice(const void* buf, size_t len) +      : slice_(g_core_codegen_interface->grpc_slice_from_copied_buffer( +            reinterpret_cast<const char*>(buf), len)) {} + +  /// Construct a slice from a copied string +  Slice(const TString& str) +      : slice_(g_core_codegen_interface->grpc_slice_from_copied_buffer( +            str.c_str(), str.length())) {} + +  enum StaticSlice { STATIC_SLICE }; + +  /// Construct a slice from a static buffer +  Slice(const void* buf, size_t len, StaticSlice) +      : slice_(g_core_codegen_interface->grpc_slice_from_static_buffer( +            reinterpret_cast<const char*>(buf), len)) {} + +  /// Copy constructor, adds a reference. +  Slice(const Slice& other) +      : slice_(g_core_codegen_interface->grpc_slice_ref(other.slice_)) {} + +  /// Assignment, reference count is unchanged. +  Slice& operator=(Slice other) { +    std::swap(slice_, other.slice_); +    return *this; +  } + +  /// Create a slice pointing at some data. Calls malloc to allocate a refcount +  /// for the object, and arranges that destroy will be called with the +  /// user data pointer passed in at destruction. Can be the same as buf or +  /// different (e.g., if data is part of a larger structure that must be +  /// destroyed when the data is no longer needed) +  Slice(void* buf, size_t len, void (*destroy)(void*), void* user_data) +      : slice_(g_core_codegen_interface->grpc_slice_new_with_user_data( +            buf, len, destroy, user_data)) {} + +  /// Specialization of above for common case where buf == user_data +  Slice(void* buf, size_t len, void (*destroy)(void*)) +      : Slice(buf, len, destroy, buf) {} + +  /// Similar to the above but has a destroy that also takes slice length +  Slice(void* buf, size_t len, void (*destroy)(void*, size_t)) +      : slice_(g_core_codegen_interface->grpc_slice_new_with_len(buf, len, +                                                                 destroy)) {} + +  /// Byte size. +  size_t size() const { return GRPC_SLICE_LENGTH(slice_); } + +  /// Raw pointer to the beginning (first element) of the slice. +  const uint8_t* begin() const { return GRPC_SLICE_START_PTR(slice_); } + +  /// Raw pointer to the end (one byte \em past the last element) of the slice. +  const uint8_t* end() const { return GRPC_SLICE_END_PTR(slice_); } + +  /// Raw C slice. Caller needs to call grpc_slice_unref when done. +  grpc_slice c_slice() const { +    return g_core_codegen_interface->grpc_slice_ref(slice_); +  } + + private: +  friend class ByteBuffer; + +  grpc_slice slice_; +}; + +inline grpc::string_ref StringRefFromSlice(const grpc_slice* slice) { +  return grpc::string_ref( +      reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(*slice)), +      GRPC_SLICE_LENGTH(*slice)); +} + +inline TString StringFromCopiedSlice(grpc_slice slice) { +  return TString(reinterpret_cast<char*>(GRPC_SLICE_START_PTR(slice)), +                     GRPC_SLICE_LENGTH(slice)); +} + +inline grpc_slice SliceReferencingString(const TString& str) { +  return g_core_codegen_interface->grpc_slice_from_static_buffer(str.data(), +                                                                 str.length()); +} + +inline grpc_slice SliceFromCopiedString(const TString& str) { +  return g_core_codegen_interface->grpc_slice_from_copied_buffer(str.data(), +                                                                 str.length()); +} + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SLICE_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h new file mode 100644 index 00000000000..a5ad6f32fef --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/status.h @@ -0,0 +1,133 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_STATUS_H +#define GRPCPP_IMPL_CODEGEN_STATUS_H + +#include <grpc/impl/codegen/status.h> +#include <grpcpp/impl/codegen/config.h> +#include <grpcpp/impl/codegen/status_code_enum.h> + +namespace grpc { + +/// Did it work? If it didn't, why? +/// +/// See \a grpc::StatusCode for details on the available code and their meaning. +class Status { + public: +  /// Construct an OK instance. +  Status() : code_(StatusCode::OK) { +    // Static assertions to make sure that the C++ API value correctly +    // maps to the core surface API value +    static_assert(StatusCode::OK == static_cast<StatusCode>(GRPC_STATUS_OK), +                  "Mismatched status code"); +    static_assert( +        StatusCode::CANCELLED == static_cast<StatusCode>(GRPC_STATUS_CANCELLED), +        "Mismatched status code"); +    static_assert( +        StatusCode::UNKNOWN == static_cast<StatusCode>(GRPC_STATUS_UNKNOWN), +        "Mismatched status code"); +    static_assert(StatusCode::INVALID_ARGUMENT == +                      static_cast<StatusCode>(GRPC_STATUS_INVALID_ARGUMENT), +                  "Mismatched status code"); +    static_assert(StatusCode::DEADLINE_EXCEEDED == +                      static_cast<StatusCode>(GRPC_STATUS_DEADLINE_EXCEEDED), +                  "Mismatched status code"); +    static_assert( +        StatusCode::NOT_FOUND == static_cast<StatusCode>(GRPC_STATUS_NOT_FOUND), +        "Mismatched status code"); +    static_assert(StatusCode::ALREADY_EXISTS == +                      static_cast<StatusCode>(GRPC_STATUS_ALREADY_EXISTS), +                  "Mismatched status code"); +    static_assert(StatusCode::PERMISSION_DENIED == +                      static_cast<StatusCode>(GRPC_STATUS_PERMISSION_DENIED), +                  "Mismatched status code"); +    static_assert(StatusCode::UNAUTHENTICATED == +                      static_cast<StatusCode>(GRPC_STATUS_UNAUTHENTICATED), +                  "Mismatched status code"); +    static_assert(StatusCode::RESOURCE_EXHAUSTED == +                      static_cast<StatusCode>(GRPC_STATUS_RESOURCE_EXHAUSTED), +                  "Mismatched status code"); +    static_assert(StatusCode::FAILED_PRECONDITION == +                      static_cast<StatusCode>(GRPC_STATUS_FAILED_PRECONDITION), +                  "Mismatched status code"); +    static_assert( +        StatusCode::ABORTED == static_cast<StatusCode>(GRPC_STATUS_ABORTED), +        "Mismatched status code"); +    static_assert(StatusCode::OUT_OF_RANGE == +                      static_cast<StatusCode>(GRPC_STATUS_OUT_OF_RANGE), +                  "Mismatched status code"); +    static_assert(StatusCode::UNIMPLEMENTED == +                      static_cast<StatusCode>(GRPC_STATUS_UNIMPLEMENTED), +                  "Mismatched status code"); +    static_assert( +        StatusCode::INTERNAL == static_cast<StatusCode>(GRPC_STATUS_INTERNAL), +        "Mismatched status code"); +    static_assert(StatusCode::UNAVAILABLE == +                      static_cast<StatusCode>(GRPC_STATUS_UNAVAILABLE), +                  "Mismatched status code"); +    static_assert( +        StatusCode::DATA_LOSS == static_cast<StatusCode>(GRPC_STATUS_DATA_LOSS), +        "Mismatched status code"); +  } + +  /// Construct an instance with associated \a code and \a error_message. +  /// It is an error to construct an OK status with non-empty \a error_message. +  Status(StatusCode code, const TString& error_message) +      : code_(code), error_message_(error_message) {} + +  /// Construct an instance with \a code,  \a error_message and +  /// \a error_details. It is an error to construct an OK status with non-empty +  /// \a error_message and/or \a error_details. +  Status(StatusCode code, const TString& error_message, +         const TString& error_details) +      : code_(code), +        error_message_(error_message), +        binary_error_details_(error_details) {} + +  // Pre-defined special status objects. +  /// An OK pre-defined instance. +  static const Status& OK; +  /// A CANCELLED pre-defined instance. +  static const Status& CANCELLED; + +  /// Return the instance's error code. +  StatusCode error_code() const { return code_; } +  /// Return the instance's error message. +  TString error_message() const { return error_message_; } +  /// Return the (binary) error details. +  // Usually it contains a serialized google.rpc.Status proto. +  TString error_details() const { return binary_error_details_; } + +  /// Is the status OK? +  bool ok() const { return code_ == StatusCode::OK; } + +  // Ignores any errors. This method does nothing except potentially suppress +  // complaints from any tools that are checking that errors are not dropped on +  // the floor. +  void IgnoreError() const {} + + private: +  StatusCode code_; +  TString error_message_; +  TString binary_error_details_; +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_STATUS_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/status_code_enum.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/status_code_enum.h new file mode 100644 index 00000000000..bdd7ead6add --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/status_code_enum.h @@ -0,0 +1,143 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_STATUS_CODE_ENUM_H +#define GRPCPP_IMPL_CODEGEN_STATUS_CODE_ENUM_H + +namespace grpc { + +enum StatusCode { +  /// Not an error; returned on success. +  OK = 0, + +  /// The operation was cancelled (typically by the caller). +  CANCELLED = 1, + +  /// Unknown error. An example of where this error may be returned is if a +  /// Status value received from another address space belongs to an error-space +  /// that is not known in this address space. Also errors raised by APIs that +  /// do not return enough error information may be converted to this error. +  UNKNOWN = 2, + +  /// Client specified an invalid argument. Note that this differs from +  /// FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments that are +  /// problematic regardless of the state of the system (e.g., a malformed file +  /// name). +  INVALID_ARGUMENT = 3, + +  /// Deadline expired before operation could complete. For operations that +  /// change the state of the system, this error may be returned even if the +  /// operation has completed successfully. For example, a successful response +  /// from a server could have been delayed long enough for the deadline to +  /// expire. +  DEADLINE_EXCEEDED = 4, + +  /// Some requested entity (e.g., file or directory) was not found. +  NOT_FOUND = 5, + +  /// Some entity that we attempted to create (e.g., file or directory) already +  /// exists. +  ALREADY_EXISTS = 6, + +  /// The caller does not have permission to execute the specified operation. +  /// PERMISSION_DENIED must not be used for rejections caused by exhausting +  /// some resource (use RESOURCE_EXHAUSTED instead for those errors). +  /// PERMISSION_DENIED must not be used if the caller can not be identified +  /// (use UNAUTHENTICATED instead for those errors). +  PERMISSION_DENIED = 7, + +  /// The request does not have valid authentication credentials for the +  /// operation. +  UNAUTHENTICATED = 16, + +  /// Some resource has been exhausted, perhaps a per-user quota, or perhaps the +  /// entire file system is out of space. +  RESOURCE_EXHAUSTED = 8, + +  /// Operation was rejected because the system is not in a state required for +  /// the operation's execution. For example, directory to be deleted may be +  /// non-empty, an rmdir operation is applied to a non-directory, etc. +  /// +  /// A litmus test that may help a service implementor in deciding +  /// between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE: +  ///  (a) Use UNAVAILABLE if the client can retry just the failing call. +  ///  (b) Use ABORTED if the client should retry at a higher-level +  ///      (e.g., restarting a read-modify-write sequence). +  ///  (c) Use FAILED_PRECONDITION if the client should not retry until +  ///      the system state has been explicitly fixed. E.g., if an "rmdir" +  ///      fails because the directory is non-empty, FAILED_PRECONDITION +  ///      should be returned since the client should not retry unless +  ///      they have first fixed up the directory by deleting files from it. +  ///  (d) Use FAILED_PRECONDITION if the client performs conditional +  ///      REST Get/Update/Delete on a resource and the resource on the +  ///      server does not match the condition. E.g., conflicting +  ///      read-modify-write on the same resource. +  FAILED_PRECONDITION = 9, + +  /// The operation was aborted, typically due to a concurrency issue like +  /// sequencer check failures, transaction aborts, etc. +  /// +  /// See litmus test above for deciding between FAILED_PRECONDITION, ABORTED, +  /// and UNAVAILABLE. +  ABORTED = 10, + +  /// Operation was attempted past the valid range. E.g., seeking or reading +  /// past end of file. +  /// +  /// Unlike INVALID_ARGUMENT, this error indicates a problem that may be fixed +  /// if the system state changes. For example, a 32-bit file system will +  /// generate INVALID_ARGUMENT if asked to read at an offset that is not in the +  /// range [0,2^32-1], but it will generate OUT_OF_RANGE if asked to read from +  /// an offset past the current file size. +  /// +  /// There is a fair bit of overlap between FAILED_PRECONDITION and +  /// OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific error) +  /// when it applies so that callers who are iterating through a space can +  /// easily look for an OUT_OF_RANGE error to detect when they are done. +  OUT_OF_RANGE = 11, + +  /// Operation is not implemented or not supported/enabled in this service. +  UNIMPLEMENTED = 12, + +  /// Internal errors. Means some invariants expected by underlying System has +  /// been broken. If you see one of these errors, Something is very broken. +  INTERNAL = 13, + +  /// The service is currently unavailable. This is a most likely a transient +  /// condition and may be corrected by retrying with a backoff. Note that it is +  /// not always safe to retry non-idempotent operations. +  /// +  /// \warning Although data MIGHT not have been transmitted when this +  /// status occurs, there is NOT A GUARANTEE that the server has not seen +  /// anything. So in general it is unsafe to retry on this status code +  /// if the call is non-idempotent. +  /// +  /// See litmus test above for deciding between FAILED_PRECONDITION, ABORTED, +  /// and UNAVAILABLE. +  UNAVAILABLE = 14, + +  /// Unrecoverable data loss or corruption. +  DATA_LOSS = 15, + +  /// Force users to include a default branch: +  DO_NOT_USE = -1 +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_STATUS_CODE_ENUM_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h new file mode 100644 index 00000000000..c5dcd31c1de --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/string_ref.h @@ -0,0 +1,149 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_STRING_REF_H +#define GRPCPP_IMPL_CODEGEN_STRING_REF_H + +#include <string.h> + +#include <algorithm> +#include <iosfwd> +#include <iostream> +#include <iterator> + +#include <grpcpp/impl/codegen/config.h> + +#include <util/stream/output.h> + +namespace grpc { + +/// This class is a non owning reference to a string. +/// +/// It should be a strict subset of the upcoming std::string_ref. +/// +/// \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3442.html +/// +/// The constexpr is dropped or replaced with const for legacy compiler +/// compatibility. +class string_ref { + public: +  /// types +  typedef const char* const_iterator; +  typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + +  /// constants +  const static size_t npos; + +  /// construct/copy. +  string_ref() : data_(nullptr), length_(0) {} +  string_ref(const string_ref& other) +      : data_(other.data_), length_(other.length_) {} +  string_ref& operator=(const string_ref& rhs) { +    data_ = rhs.data_; +    length_ = rhs.length_; +    return *this; +  } + +  string_ref(const char* s) : data_(s), length_(strlen(s)) {} +  string_ref(const char* s, size_t l) : data_(s), length_(l) {} +  string_ref(const TString& s) : data_(s.data()), length_(s.length()) {} + +  /// iterators +  const_iterator begin() const { return data_; } +  const_iterator end() const { return data_ + length_; } +  const_iterator cbegin() const { return data_; } +  const_iterator cend() const { return data_ + length_; } +  const_reverse_iterator rbegin() const { +    return const_reverse_iterator(end()); +  } +  const_reverse_iterator rend() const { +    return const_reverse_iterator(begin()); +  } +  const_reverse_iterator crbegin() const { +    return const_reverse_iterator(end()); +  } +  const_reverse_iterator crend() const { +    return const_reverse_iterator(begin()); +  } + +  /// capacity +  size_t size() const { return length_; } +  size_t length() const { return length_; } +  size_t max_size() const { return length_; } +  bool empty() const { return length_ == 0; } + +  /// element access +  const char* data() const { return data_; } + +  /// string operations +  int compare(string_ref x) const { +    size_t min_size = length_ < x.length_ ? length_ : x.length_; +    int r = memcmp(data_, x.data_, min_size); +    if (r < 0) return -1; +    if (r > 0) return 1; +    if (length_ < x.length_) return -1; +    if (length_ > x.length_) return 1; +    return 0; +  } + +  bool starts_with(string_ref x) const { +    return length_ >= x.length_ && (memcmp(data_, x.data_, x.length_) == 0); +  } + +  bool ends_with(string_ref x) const { +    return length_ >= x.length_ && +           (memcmp(data_ + (length_ - x.length_), x.data_, x.length_) == 0); +  } + +  size_t find(string_ref s) const { +    auto it = std::search(cbegin(), cend(), s.cbegin(), s.cend()); +    return it == cend() ? npos : std::distance(cbegin(), it); +  } + +  size_t find(char c) const { +    auto it = std::find(cbegin(), cend(), c); +    return it == cend() ? npos : std::distance(cbegin(), it); +  } + +  string_ref substr(size_t pos, size_t n = npos) const { +    if (pos > length_) pos = length_; +    if (n > (length_ - pos)) n = length_ - pos; +    return string_ref(data_ + pos, n); +  } + + private: +  const char* data_; +  size_t length_; +}; + +/// Comparison operators +inline bool operator==(string_ref x, string_ref y) { return x.compare(y) == 0; } +inline bool operator!=(string_ref x, string_ref y) { return x.compare(y) != 0; } +inline bool operator<(string_ref x, string_ref y) { return x.compare(y) < 0; } +inline bool operator<=(string_ref x, string_ref y) { return x.compare(y) <= 0; } +inline bool operator>(string_ref x, string_ref y) { return x.compare(y) > 0; } +inline bool operator>=(string_ref x, string_ref y) { return x.compare(y) >= 0; } + +inline IOutputStream& operator<<(IOutputStream& out, const string_ref& string) { +  TString t(string.begin(), string.end()); +  return out << t; +} + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_STRING_REF_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/stub_options.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/stub_options.h new file mode 100644 index 00000000000..a56695a8f82 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/stub_options.h @@ -0,0 +1,29 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_STUB_OPTIONS_H +#define GRPCPP_IMPL_CODEGEN_STUB_OPTIONS_H + +namespace grpc { + +/// Useful interface for generated stubs +class StubOptions {}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_STUB_OPTIONS_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/sync.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/sync.h new file mode 100644 index 00000000000..146f182e57b --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/sync.h @@ -0,0 +1,151 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SYNC_H +#define GRPCPP_IMPL_CODEGEN_SYNC_H + +#include <grpc/impl/codegen/port_platform.h> + +#ifdef GPR_HAS_PTHREAD_H +#include <pthread.h> +#endif + +#include <mutex> + +#include <grpc/impl/codegen/log.h> +#include <grpc/impl/codegen/sync.h> + +#include <grpcpp/impl/codegen/core_codegen_interface.h> + +// The core library is not accessible in C++ codegen headers, and vice versa. +// Thus, we need to have duplicate headers with similar functionality. +// Make sure any change to this file is also reflected in +// src/core/lib/gprpp/sync.h too. +// +// Whenever possible, prefer "src/core/lib/gprpp/sync.h" over this file, +// since in core we do not rely on g_core_codegen_interface and hence do not +// pay the costs of virtual function calls. + +namespace grpc { +namespace internal { + +class Mutex { + public: +  Mutex() { g_core_codegen_interface->gpr_mu_init(&mu_); } +  ~Mutex() { g_core_codegen_interface->gpr_mu_destroy(&mu_); } + +  Mutex(const Mutex&) = delete; +  Mutex& operator=(const Mutex&) = delete; + +  gpr_mu* get() { return &mu_; } +  const gpr_mu* get() const { return &mu_; } + + private: +  union { +    gpr_mu mu_; +    std::mutex do_not_use_sth_; +#ifdef GPR_HAS_PTHREAD_H +    pthread_mutex_t do_not_use_pth_; +#endif +  }; +}; + +// MutexLock is a std:: +class MutexLock { + public: +  explicit MutexLock(Mutex* mu) : mu_(mu->get()) { +    g_core_codegen_interface->gpr_mu_lock(mu_); +  } +  explicit MutexLock(gpr_mu* mu) : mu_(mu) { +    g_core_codegen_interface->gpr_mu_lock(mu_); +  } +  ~MutexLock() { g_core_codegen_interface->gpr_mu_unlock(mu_); } + +  MutexLock(const MutexLock&) = delete; +  MutexLock& operator=(const MutexLock&) = delete; + + private: +  gpr_mu* const mu_; +}; + +class ReleasableMutexLock { + public: +  explicit ReleasableMutexLock(Mutex* mu) : mu_(mu->get()) { +    g_core_codegen_interface->gpr_mu_lock(mu_); +  } +  explicit ReleasableMutexLock(gpr_mu* mu) : mu_(mu) { +    g_core_codegen_interface->gpr_mu_lock(mu_); +  } +  ~ReleasableMutexLock() { +    if (!released_) g_core_codegen_interface->gpr_mu_unlock(mu_); +  } + +  ReleasableMutexLock(const ReleasableMutexLock&) = delete; +  ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete; + +  void Lock() { +    GPR_DEBUG_ASSERT(released_); +    g_core_codegen_interface->gpr_mu_lock(mu_); +    released_ = false; +  } + +  void Unlock() { +    GPR_DEBUG_ASSERT(!released_); +    released_ = true; +    g_core_codegen_interface->gpr_mu_unlock(mu_); +  } + + private: +  gpr_mu* const mu_; +  bool released_ = false; +}; + +class CondVar { + public: +  CondVar() { g_core_codegen_interface->gpr_cv_init(&cv_); } +  ~CondVar() { g_core_codegen_interface->gpr_cv_destroy(&cv_); } + +  CondVar(const CondVar&) = delete; +  CondVar& operator=(const CondVar&) = delete; + +  void Signal() { g_core_codegen_interface->gpr_cv_signal(&cv_); } +  void Broadcast() { g_core_codegen_interface->gpr_cv_broadcast(&cv_); } + +  int Wait(Mutex* mu) { +    return Wait(mu, +                g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME)); +  } +  int Wait(Mutex* mu, const gpr_timespec& deadline) { +    return g_core_codegen_interface->gpr_cv_wait(&cv_, mu->get(), deadline); +  } + +  template <typename Predicate> +  void WaitUntil(Mutex* mu, Predicate pred) { +    while (!pred()) { +      Wait(mu, g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME)); +    } +  } + + private: +  gpr_cv cv_; +}; + +}  // namespace internal +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SYNC_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h new file mode 100644 index 00000000000..408f42f280d --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/sync_stream.h @@ -0,0 +1,943 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H +#define GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H + +#include <grpcpp/impl/codegen/call.h> +#include <grpcpp/impl/codegen/channel_interface.h> +#include <grpcpp/impl/codegen/client_context.h> +#include <grpcpp/impl/codegen/completion_queue.h> +#include <grpcpp/impl/codegen/core_codegen_interface.h> +#include <grpcpp/impl/codegen/server_context.h> +#include <grpcpp/impl/codegen/service_type.h> +#include <grpcpp/impl/codegen/status.h> + +namespace grpc { + +namespace internal { +/// Common interface for all synchronous client side streaming. +class ClientStreamingInterface { + public: +  virtual ~ClientStreamingInterface() {} + +  /// Block waiting until the stream finishes and a final status of the call is +  /// available. +  /// +  /// It is appropriate to call this method exactly once when both: +  ///   * the calling code (client-side) has no more message to send +  ///     (this can be declared implicitly by calling this method, or +  ///     explicitly through an earlier call to <i>WritesDone</i> method of the +  ///     class in use, e.g. \a ClientWriterInterface::WritesDone or +  ///     \a ClientReaderWriterInterface::WritesDone). +  ///   * there are no more messages to be received from the server (which can +  ///     be known implicitly, or explicitly from an earlier call to \a +  ///     ReaderInterface::Read that returned "false"). +  /// +  /// This function will return either: +  /// - when all incoming messages have been read and the server has +  ///   returned status. +  /// - when the server has returned a non-OK status. +  /// - OR when the call failed for some reason and the library generated a +  ///   status. +  /// +  /// Return values: +  ///   - \a Status contains the status code, message and details for the call +  ///   - the \a ClientContext associated with this call is updated with +  ///     possible trailing metadata sent from the server. +  virtual ::grpc::Status Finish() = 0; +}; + +/// Common interface for all synchronous server side streaming. +class ServerStreamingInterface { + public: +  virtual ~ServerStreamingInterface() {} + +  /// Block to send initial metadata to client. +  /// This call is optional, but if it is used, it cannot be used concurrently +  /// with or after the \a Finish method. +  /// +  /// The initial metadata that will be sent to the client will be +  /// taken from the \a ServerContext associated with the call. +  virtual void SendInitialMetadata() = 0; +}; + +/// An interface that yields a sequence of messages of type \a R. +template <class R> +class ReaderInterface { + public: +  virtual ~ReaderInterface() {} + +  /// Get an upper bound on the next message size available for reading on this +  /// stream. +  virtual bool NextMessageSize(uint32_t* sz) = 0; + +  /// Block to read a message and parse to \a msg. Returns \a true on success. +  /// This is thread-safe with respect to \a Write or \WritesDone methods on +  /// the same stream. It should not be called concurrently with another \a +  /// Read on the same stream as the order of delivery will not be defined. +  /// +  /// \param[out] msg The read message. +  /// +  /// \return \a false when there will be no more incoming messages, either +  /// because the other side has called \a WritesDone() or the stream has failed +  /// (or been cancelled). +  virtual bool Read(R* msg) = 0; +}; + +/// An interface that can be fed a sequence of messages of type \a W. +template <class W> +class WriterInterface { + public: +  virtual ~WriterInterface() {} + +  /// Block to write \a msg to the stream with WriteOptions \a options. +  /// This is thread-safe with respect to \a ReaderInterface::Read +  /// +  /// \param msg The message to be written to the stream. +  /// \param options The WriteOptions affecting the write operation. +  /// +  /// \return \a true on success, \a false when the stream has been closed. +  virtual bool Write(const W& msg, ::grpc::WriteOptions options) = 0; + +  /// Block to write \a msg to the stream with default write options. +  /// This is thread-safe with respect to \a ReaderInterface::Read +  /// +  /// \param msg The message to be written to the stream. +  /// +  /// \return \a true on success, \a false when the stream has been closed. +  inline bool Write(const W& msg) { return Write(msg, ::grpc::WriteOptions()); } + +  /// Write \a msg and coalesce it with the writing of trailing metadata, using +  /// WriteOptions \a options. +  /// +  /// For client, WriteLast is equivalent of performing Write and WritesDone in +  /// a single step. \a msg and trailing metadata are coalesced and sent on wire +  /// by calling this function. For server, WriteLast buffers the \a msg. +  /// The writing of \a msg is held until the service handler returns, +  /// where \a msg and trailing metadata are coalesced and sent on wire. +  /// Note that WriteLast can only buffer \a msg up to the flow control window +  /// size. If \a msg size is larger than the window size, it will be sent on +  /// wire without buffering. +  /// +  /// \param[in] msg The message to be written to the stream. +  /// \param[in] options The WriteOptions to be used to write this message. +  void WriteLast(const W& msg, ::grpc::WriteOptions options) { +    Write(msg, options.set_last_message()); +  } +}; + +}  // namespace internal + +/// Client-side interface for streaming reads of message of type \a R. +template <class R> +class ClientReaderInterface : public internal::ClientStreamingInterface, +                              public internal::ReaderInterface<R> { + public: +  /// Block to wait for initial metadata from server. The received metadata +  /// can only be accessed after this call returns. Should only be called before +  /// the first read. Calling this method is optional, and if it is not called +  /// the metadata will be available in ClientContext after the first read. +  virtual void WaitForInitialMetadata() = 0; +}; + +namespace internal { +template <class R> +class ClientReaderFactory { + public: +  template <class W> +  static ClientReader<R>* Create(::grpc::ChannelInterface* channel, +                                 const ::grpc::internal::RpcMethod& method, +                                 ::grpc::ClientContext* context, +                                 const W& request) { +    return new ClientReader<R>(channel, method, context, request); +  } +}; +}  // namespace internal + +/// Synchronous (blocking) client-side API for doing server-streaming RPCs, +/// where the stream of messages coming from the server has messages +/// of type \a R. +template <class R> +class ClientReader final : public ClientReaderInterface<R> { + public: +  /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for +  /// semantics. +  /// +  //  Side effect: +  ///   Once complete, the initial metadata read from +  ///   the server will be accessible through the \a ClientContext used to +  ///   construct this object. +  void WaitForInitialMetadata() override { +    GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata> +        ops; +    ops.RecvInitialMetadata(context_); +    call_.PerformOps(&ops); +    cq_.Pluck(&ops);  /// status ignored +  } + +  bool NextMessageSize(uint32_t* sz) override { +    int result = call_.max_receive_message_size(); +    *sz = (result > 0) ? result : UINT32_MAX; +    return true; +  } + +  /// See the \a ReaderInterface.Read method for semantics. +  /// Side effect: +  ///   This also receives initial metadata from the server, if not +  ///   already received (if initial metadata is received, it can be then +  ///   accessed through the \a ClientContext associated with this call). +  bool Read(R* msg) override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                                ::grpc::internal::CallOpRecvMessage<R>> +        ops; +    if (!context_->initial_metadata_received_) { +      ops.RecvInitialMetadata(context_); +    } +    ops.RecvMessage(msg); +    call_.PerformOps(&ops); +    return cq_.Pluck(&ops) && ops.got_message; +  } + +  /// See the \a ClientStreamingInterface.Finish method for semantics. +  /// +  /// Side effect: +  ///   The \a ClientContext associated with this call is updated with +  ///   possible metadata received from the server. +  ::grpc::Status Finish() override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientRecvStatus> ops; +    ::grpc::Status status; +    ops.ClientRecvStatus(context_, &status); +    call_.PerformOps(&ops); +    GPR_CODEGEN_ASSERT(cq_.Pluck(&ops)); +    return status; +  } + + private: +  friend class internal::ClientReaderFactory<R>; +  ::grpc::ClientContext* context_; +  ::grpc::CompletionQueue cq_; +  ::grpc::internal::Call call_; + +  /// Block to create a stream and write the initial metadata and \a request +  /// out. Note that \a context will be used to fill in custom initial +  /// metadata used to send to the server when starting the call. +  template <class W> +  ClientReader(::grpc::ChannelInterface* channel, +               const ::grpc::internal::RpcMethod& method, +               ::grpc::ClientContext* context, const W& request) +      : context_(context), +        cq_(grpc_completion_queue_attributes{ +            GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING, +            nullptr}),  // Pluckable cq +        call_(channel->CreateCall(method, context, &cq_)) { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpClientSendClose> +        ops; +    ops.SendInitialMetadata(&context->send_initial_metadata_, +                            context->initial_metadata_flags()); +    // TODO(ctiller): don't assert +    GPR_CODEGEN_ASSERT(ops.SendMessagePtr(&request).ok()); +    ops.ClientSendClose(); +    call_.PerformOps(&ops); +    cq_.Pluck(&ops); +  } +}; + +/// Client-side interface for streaming writes of message type \a W. +template <class W> +class ClientWriterInterface : public internal::ClientStreamingInterface, +                              public internal::WriterInterface<W> { + public: +  /// Half close writing from the client. (signal that the stream of messages +  /// coming from the client is complete). +  /// Blocks until currently-pending writes are completed. +  /// Thread safe with respect to \a ReaderInterface::Read operations only +  /// +  /// \return Whether the writes were successful. +  virtual bool WritesDone() = 0; +}; + +namespace internal { +template <class W> +class ClientWriterFactory { + public: +  template <class R> +  static ClientWriter<W>* Create(::grpc::ChannelInterface* channel, +                                 const ::grpc::internal::RpcMethod& method, +                                 ::grpc::ClientContext* context, R* response) { +    return new ClientWriter<W>(channel, method, context, response); +  } +}; +}  // namespace internal + +/// Synchronous (blocking) client-side API for doing client-streaming RPCs, +/// where the outgoing message stream coming from the client has messages of +/// type \a W. +template <class W> +class ClientWriter : public ClientWriterInterface<W> { + public: +  /// See the \a ClientStreamingInterface.WaitForInitialMetadata method for +  /// semantics. +  /// +  //  Side effect: +  ///   Once complete, the initial metadata read from the server will be +  ///   accessible through the \a ClientContext used to construct this object. +  void WaitForInitialMetadata() { +    GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata> +        ops; +    ops.RecvInitialMetadata(context_); +    call_.PerformOps(&ops); +    cq_.Pluck(&ops);  // status ignored +  } + +  /// See the WriterInterface.Write(const W& msg, WriteOptions options) method +  /// for semantics. +  /// +  /// Side effect: +  ///   Also sends initial metadata if not already sent (using the +  ///   \a ClientContext associated with this call). +  using internal::WriterInterface<W>::Write; +  bool Write(const W& msg, ::grpc::WriteOptions options) override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpClientSendClose> +        ops; + +    if (options.is_last_message()) { +      options.set_buffer_hint(); +      ops.ClientSendClose(); +    } +    if (context_->initial_metadata_corked_) { +      ops.SendInitialMetadata(&context_->send_initial_metadata_, +                              context_->initial_metadata_flags()); +      context_->set_initial_metadata_corked(false); +    } +    if (!ops.SendMessagePtr(&msg, options).ok()) { +      return false; +    } + +    call_.PerformOps(&ops); +    return cq_.Pluck(&ops); +  } + +  bool WritesDone() override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops; +    ops.ClientSendClose(); +    call_.PerformOps(&ops); +    return cq_.Pluck(&ops); +  } + +  /// See the ClientStreamingInterface.Finish method for semantics. +  /// Side effects: +  ///   - Also receives initial metadata if not already received. +  ///   - Attempts to fill in the \a response parameter passed +  ///     to the constructor of this instance with the response +  ///     message from the server. +  ::grpc::Status Finish() override { +    ::grpc::Status status; +    if (!context_->initial_metadata_received_) { +      finish_ops_.RecvInitialMetadata(context_); +    } +    finish_ops_.ClientRecvStatus(context_, &status); +    call_.PerformOps(&finish_ops_); +    GPR_CODEGEN_ASSERT(cq_.Pluck(&finish_ops_)); +    return status; +  } + + private: +  friend class internal::ClientWriterFactory<W>; + +  /// Block to create a stream (i.e. send request headers and other initial +  /// metadata to the server). Note that \a context will be used to fill +  /// in custom initial metadata. \a response will be filled in with the +  /// single expected response message from the server upon a successful +  /// call to the \a Finish method of this instance. +  template <class R> +  ClientWriter(::grpc::ChannelInterface* channel, +               const ::grpc::internal::RpcMethod& method, +               ::grpc::ClientContext* context, R* response) +      : context_(context), +        cq_(grpc_completion_queue_attributes{ +            GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING, +            nullptr}),  // Pluckable cq +        call_(channel->CreateCall(method, context, &cq_)) { +    finish_ops_.RecvMessage(response); +    finish_ops_.AllowNoMessage(); + +    if (!context_->initial_metadata_corked_) { +      ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +          ops; +      ops.SendInitialMetadata(&context->send_initial_metadata_, +                              context->initial_metadata_flags()); +      call_.PerformOps(&ops); +      cq_.Pluck(&ops); +    } +  } + +  ::grpc::ClientContext* context_; +  ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                              ::grpc::internal::CallOpGenericRecvMessage, +                              ::grpc::internal::CallOpClientRecvStatus> +      finish_ops_; +  ::grpc::CompletionQueue cq_; +  ::grpc::internal::Call call_; +}; + +/// Client-side interface for bi-directional streaming with +/// client-to-server stream messages of type \a W and +/// server-to-client stream messages of type \a R. +template <class W, class R> +class ClientReaderWriterInterface : public internal::ClientStreamingInterface, +                                    public internal::WriterInterface<W>, +                                    public internal::ReaderInterface<R> { + public: +  /// Block to wait for initial metadata from server. The received metadata +  /// can only be accessed after this call returns. Should only be called before +  /// the first read. Calling this method is optional, and if it is not called +  /// the metadata will be available in ClientContext after the first read. +  virtual void WaitForInitialMetadata() = 0; + +  /// Half close writing from the client. (signal that the stream of messages +  /// coming from the client is complete). +  /// Blocks until currently-pending writes are completed. +  /// Thread-safe with respect to \a ReaderInterface::Read +  /// +  /// \return Whether the writes were successful. +  virtual bool WritesDone() = 0; +}; + +namespace internal { +template <class W, class R> +class ClientReaderWriterFactory { + public: +  static ClientReaderWriter<W, R>* Create( +      ::grpc::ChannelInterface* channel, +      const ::grpc::internal::RpcMethod& method, +      ::grpc::ClientContext* context) { +    return new ClientReaderWriter<W, R>(channel, method, context); +  } +}; +}  // namespace internal + +/// Synchronous (blocking) client-side API for bi-directional streaming RPCs, +/// where the outgoing message stream coming from the client has messages of +/// type \a W, and the incoming messages stream coming from the server has +/// messages of type \a R. +template <class W, class R> +class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> { + public: +  /// Block waiting to read initial metadata from the server. +  /// This call is optional, but if it is used, it cannot be used concurrently +  /// with or after the \a Finish method. +  /// +  /// Once complete, the initial metadata read from the server will be +  /// accessible through the \a ClientContext used to construct this object. +  void WaitForInitialMetadata() override { +    GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata> +        ops; +    ops.RecvInitialMetadata(context_); +    call_.PerformOps(&ops); +    cq_.Pluck(&ops);  // status ignored +  } + +  bool NextMessageSize(uint32_t* sz) override { +    int result = call_.max_receive_message_size(); +    *sz = (result > 0) ? result : UINT32_MAX; +    return true; +  } + +  /// See the \a ReaderInterface.Read method for semantics. +  /// Side effect: +  ///   Also receives initial metadata if not already received (updates the \a +  ///   ClientContext associated with this call in that case). +  bool Read(R* msg) override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                                ::grpc::internal::CallOpRecvMessage<R>> +        ops; +    if (!context_->initial_metadata_received_) { +      ops.RecvInitialMetadata(context_); +    } +    ops.RecvMessage(msg); +    call_.PerformOps(&ops); +    return cq_.Pluck(&ops) && ops.got_message; +  } + +  /// See the \a WriterInterface.Write method for semantics. +  /// +  /// Side effect: +  ///   Also sends initial metadata if not already sent (using the +  ///   \a ClientContext associated with this call to fill in values). +  using internal::WriterInterface<W>::Write; +  bool Write(const W& msg, ::grpc::WriteOptions options) override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata, +                                ::grpc::internal::CallOpSendMessage, +                                ::grpc::internal::CallOpClientSendClose> +        ops; + +    if (options.is_last_message()) { +      options.set_buffer_hint(); +      ops.ClientSendClose(); +    } +    if (context_->initial_metadata_corked_) { +      ops.SendInitialMetadata(&context_->send_initial_metadata_, +                              context_->initial_metadata_flags()); +      context_->set_initial_metadata_corked(false); +    } +    if (!ops.SendMessagePtr(&msg, options).ok()) { +      return false; +    } + +    call_.PerformOps(&ops); +    return cq_.Pluck(&ops); +  } + +  bool WritesDone() override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpClientSendClose> ops; +    ops.ClientSendClose(); +    call_.PerformOps(&ops); +    return cq_.Pluck(&ops); +  } + +  /// See the ClientStreamingInterface.Finish method for semantics. +  /// +  /// Side effect: +  ///   - the \a ClientContext associated with this call is updated with +  ///     possible trailing metadata sent from the server. +  ::grpc::Status Finish() override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvInitialMetadata, +                                ::grpc::internal::CallOpClientRecvStatus> +        ops; +    if (!context_->initial_metadata_received_) { +      ops.RecvInitialMetadata(context_); +    } +    ::grpc::Status status; +    ops.ClientRecvStatus(context_, &status); +    call_.PerformOps(&ops); +    GPR_CODEGEN_ASSERT(cq_.Pluck(&ops)); +    return status; +  } + + private: +  friend class internal::ClientReaderWriterFactory<W, R>; + +  ::grpc::ClientContext* context_; +  ::grpc::CompletionQueue cq_; +  ::grpc::internal::Call call_; + +  /// Block to create a stream and write the initial metadata and \a request +  /// out. Note that \a context will be used to fill in custom initial metadata +  /// used to send to the server when starting the call. +  ClientReaderWriter(::grpc::ChannelInterface* channel, +                     const ::grpc::internal::RpcMethod& method, +                     ::grpc::ClientContext* context) +      : context_(context), +        cq_(grpc_completion_queue_attributes{ +            GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING, +            nullptr}),  // Pluckable cq +        call_(channel->CreateCall(method, context, &cq_)) { +    if (!context_->initial_metadata_corked_) { +      ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +          ops; +      ops.SendInitialMetadata(&context->send_initial_metadata_, +                              context->initial_metadata_flags()); +      call_.PerformOps(&ops); +      cq_.Pluck(&ops); +    } +  } +}; + +/// Server-side interface for streaming reads of message of type \a R. +template <class R> +class ServerReaderInterface : public internal::ServerStreamingInterface, +                              public internal::ReaderInterface<R> {}; + +/// Synchronous (blocking) server-side API for doing client-streaming RPCs, +/// where the incoming message stream coming from the client has messages of +/// type \a R. +template <class R> +class ServerReader final : public ServerReaderInterface<R> { + public: +  /// See the \a ServerStreamingInterface.SendInitialMetadata method +  /// for semantics. Note that initial metadata will be affected by the +  /// \a ServerContext associated with this call. +  void SendInitialMetadata() override { +    GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +        ops; +    ops.SendInitialMetadata(&ctx_->initial_metadata_, +                            ctx_->initial_metadata_flags()); +    if (ctx_->compression_level_set()) { +      ops.set_compression_level(ctx_->compression_level()); +    } +    ctx_->sent_initial_metadata_ = true; +    call_->PerformOps(&ops); +    call_->cq()->Pluck(&ops); +  } + +  bool NextMessageSize(uint32_t* sz) override { +    int result = call_->max_receive_message_size(); +    *sz = (result > 0) ? result : UINT32_MAX; +    return true; +  } + +  bool Read(R* msg) override { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops; +    ops.RecvMessage(msg); +    call_->PerformOps(&ops); +    return call_->cq()->Pluck(&ops) && ops.got_message; +  } + + private: +  ::grpc::internal::Call* const call_; +  ServerContext* const ctx_; + +  template <class ServiceType, class RequestType, class ResponseType> +  friend class internal::ClientStreamingHandler; + +  ServerReader(::grpc::internal::Call* call, ::grpc::ServerContext* ctx) +      : call_(call), ctx_(ctx) {} +}; + +/// Server-side interface for streaming writes of message of type \a W. +template <class W> +class ServerWriterInterface : public internal::ServerStreamingInterface, +                              public internal::WriterInterface<W> {}; + +/// Synchronous (blocking) server-side API for doing for doing a +/// server-streaming RPCs, where the outgoing message stream coming from the +/// server has messages of type \a W. +template <class W> +class ServerWriter final : public ServerWriterInterface<W> { + public: +  /// See the \a ServerStreamingInterface.SendInitialMetadata method +  /// for semantics. +  /// Note that initial metadata will be affected by the +  /// \a ServerContext associated with this call. +  void SendInitialMetadata() override { +    GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); + +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata> +        ops; +    ops.SendInitialMetadata(&ctx_->initial_metadata_, +                            ctx_->initial_metadata_flags()); +    if (ctx_->compression_level_set()) { +      ops.set_compression_level(ctx_->compression_level()); +    } +    ctx_->sent_initial_metadata_ = true; +    call_->PerformOps(&ops); +    call_->cq()->Pluck(&ops); +  } + +  /// See the \a WriterInterface.Write method for semantics. +  /// +  /// Side effect: +  ///   Also sends initial metadata if not already sent (using the +  ///   \a ClientContext associated with this call to fill in values). +  using internal::WriterInterface<W>::Write; +  bool Write(const W& msg, ::grpc::WriteOptions options) override { +    if (options.is_last_message()) { +      options.set_buffer_hint(); +    } + +    if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) { +      return false; +    } +    if (!ctx_->sent_initial_metadata_) { +      ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                             ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        ctx_->pending_ops_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +    } +    call_->PerformOps(&ctx_->pending_ops_); +    // if this is the last message we defer the pluck until AFTER we start +    // the trailing md op. This prevents hangs. See +    // https://github.com/grpc/grpc/issues/11546 +    if (options.is_last_message()) { +      ctx_->has_pending_ops_ = true; +      return true; +    } +    ctx_->has_pending_ops_ = false; +    return call_->cq()->Pluck(&ctx_->pending_ops_); +  } + + private: +  ::grpc::internal::Call* const call_; +  ::grpc::ServerContext* const ctx_; + +  template <class ServiceType, class RequestType, class ResponseType> +  friend class internal::ServerStreamingHandler; + +  ServerWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx) +      : call_(call), ctx_(ctx) {} +}; + +/// Server-side interface for bi-directional streaming. +template <class W, class R> +class ServerReaderWriterInterface : public internal::ServerStreamingInterface, +                                    public internal::WriterInterface<W>, +                                    public internal::ReaderInterface<R> {}; + +/// Actual implementation of bi-directional streaming +namespace internal { +template <class W, class R> +class ServerReaderWriterBody final { + public: +  ServerReaderWriterBody(grpc::internal::Call* call, ::grpc::ServerContext* ctx) +      : call_(call), ctx_(ctx) {} + +  void SendInitialMetadata() { +    GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); + +    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops; +    ops.SendInitialMetadata(&ctx_->initial_metadata_, +                            ctx_->initial_metadata_flags()); +    if (ctx_->compression_level_set()) { +      ops.set_compression_level(ctx_->compression_level()); +    } +    ctx_->sent_initial_metadata_ = true; +    call_->PerformOps(&ops); +    call_->cq()->Pluck(&ops); +  } + +  bool NextMessageSize(uint32_t* sz) { +    int result = call_->max_receive_message_size(); +    *sz = (result > 0) ? result : UINT32_MAX; +    return true; +  } + +  bool Read(R* msg) { +    ::grpc::internal::CallOpSet<::grpc::internal::CallOpRecvMessage<R>> ops; +    ops.RecvMessage(msg); +    call_->PerformOps(&ops); +    return call_->cq()->Pluck(&ops) && ops.got_message; +  } + +  bool Write(const W& msg, ::grpc::WriteOptions options) { +    if (options.is_last_message()) { +      options.set_buffer_hint(); +    } +    if (!ctx_->pending_ops_.SendMessagePtr(&msg, options).ok()) { +      return false; +    } +    if (!ctx_->sent_initial_metadata_) { +      ctx_->pending_ops_.SendInitialMetadata(&ctx_->initial_metadata_, +                                             ctx_->initial_metadata_flags()); +      if (ctx_->compression_level_set()) { +        ctx_->pending_ops_.set_compression_level(ctx_->compression_level()); +      } +      ctx_->sent_initial_metadata_ = true; +    } +    call_->PerformOps(&ctx_->pending_ops_); +    // if this is the last message we defer the pluck until AFTER we start +    // the trailing md op. This prevents hangs. See +    // https://github.com/grpc/grpc/issues/11546 +    if (options.is_last_message()) { +      ctx_->has_pending_ops_ = true; +      return true; +    } +    ctx_->has_pending_ops_ = false; +    return call_->cq()->Pluck(&ctx_->pending_ops_); +  } + + private: +  grpc::internal::Call* const call_; +  ::grpc::ServerContext* const ctx_; +}; + +}  // namespace internal + +/// Synchronous (blocking) server-side API for a bidirectional +/// streaming call, where the incoming message stream coming from the client has +/// messages of type \a R, and the outgoing message streaming coming from +/// the server has messages of type \a W. +template <class W, class R> +class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> { + public: +  /// See the \a ServerStreamingInterface.SendInitialMetadata method +  /// for semantics. Note that initial metadata will be affected by the +  /// \a ServerContext associated with this call. +  void SendInitialMetadata() override { body_.SendInitialMetadata(); } + +  bool NextMessageSize(uint32_t* sz) override { +    return body_.NextMessageSize(sz); +  } + +  bool Read(R* msg) override { return body_.Read(msg); } + +  /// See the \a WriterInterface.Write(const W& msg, WriteOptions options) +  /// method for semantics. +  /// Side effect: +  ///   Also sends initial metadata if not already sent (using the \a +  ///   ServerContext associated with this call). +  using internal::WriterInterface<W>::Write; +  bool Write(const W& msg, ::grpc::WriteOptions options) override { +    return body_.Write(msg, options); +  } + + private: +  internal::ServerReaderWriterBody<W, R> body_; + +  friend class internal::TemplatedBidiStreamingHandler<ServerReaderWriter<W, R>, +                                                       false>; +  ServerReaderWriter(::grpc::internal::Call* call, ::grpc::ServerContext* ctx) +      : body_(call, ctx) {} +}; + +/// A class to represent a flow-controlled unary call. This is something +/// of a hybrid between conventional unary and streaming. This is invoked +/// through a unary call on the client side, but the server responds to it +/// as though it were a single-ping-pong streaming call. The server can use +/// the \a NextMessageSize method to determine an upper-bound on the size of +/// the message. A key difference relative to streaming: ServerUnaryStreamer +/// must have exactly 1 Read and exactly 1 Write, in that order, to function +/// correctly. Otherwise, the RPC is in error. +template <class RequestType, class ResponseType> +class ServerUnaryStreamer final +    : public ServerReaderWriterInterface<ResponseType, RequestType> { + public: +  /// Block to send initial metadata to client. +  /// Implicit input parameter: +  ///    - the \a ServerContext associated with this call will be used for +  ///      sending initial metadata. +  void SendInitialMetadata() override { body_.SendInitialMetadata(); } + +  /// Get an upper bound on the request message size from the client. +  bool NextMessageSize(uint32_t* sz) override { +    return body_.NextMessageSize(sz); +  } + +  /// Read a message of type \a R into \a msg. Completion will be notified by \a +  /// tag on the associated completion queue. +  /// This is thread-safe with respect to \a Write or \a WritesDone methods. It +  /// should not be called concurrently with other streaming APIs +  /// on the same stream. It is not meaningful to call it concurrently +  /// with another \a ReaderInterface::Read on the same stream since reads on +  /// the same stream are delivered in order. +  /// +  /// \param[out] msg Where to eventually store the read message. +  /// \param[in] tag The tag identifying the operation. +  bool Read(RequestType* request) override { +    if (read_done_) { +      return false; +    } +    read_done_ = true; +    return body_.Read(request); +  } + +  /// Block to write \a msg to the stream with WriteOptions \a options. +  /// This is thread-safe with respect to \a ReaderInterface::Read +  /// +  /// \param msg The message to be written to the stream. +  /// \param options The WriteOptions affecting the write operation. +  /// +  /// \return \a true on success, \a false when the stream has been closed. +  using internal::WriterInterface<ResponseType>::Write; +  bool Write(const ResponseType& response, +             ::grpc::WriteOptions options) override { +    if (write_done_ || !read_done_) { +      return false; +    } +    write_done_ = true; +    return body_.Write(response, options); +  } + + private: +  internal::ServerReaderWriterBody<ResponseType, RequestType> body_; +  bool read_done_; +  bool write_done_; + +  friend class internal::TemplatedBidiStreamingHandler< +      ServerUnaryStreamer<RequestType, ResponseType>, true>; +  ServerUnaryStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx) +      : body_(call, ctx), read_done_(false), write_done_(false) {} +}; + +/// A class to represent a flow-controlled server-side streaming call. +/// This is something of a hybrid between server-side and bidi streaming. +/// This is invoked through a server-side streaming call on the client side, +/// but the server responds to it as though it were a bidi streaming call that +/// must first have exactly 1 Read and then any number of Writes. +template <class RequestType, class ResponseType> +class ServerSplitStreamer final +    : public ServerReaderWriterInterface<ResponseType, RequestType> { + public: +  /// Block to send initial metadata to client. +  /// Implicit input parameter: +  ///    - the \a ServerContext associated with this call will be used for +  ///      sending initial metadata. +  void SendInitialMetadata() override { body_.SendInitialMetadata(); } + +  /// Get an upper bound on the request message size from the client. +  bool NextMessageSize(uint32_t* sz) override { +    return body_.NextMessageSize(sz); +  } + +  /// Read a message of type \a R into \a msg. Completion will be notified by \a +  /// tag on the associated completion queue. +  /// This is thread-safe with respect to \a Write or \a WritesDone methods. It +  /// should not be called concurrently with other streaming APIs +  /// on the same stream. It is not meaningful to call it concurrently +  /// with another \a ReaderInterface::Read on the same stream since reads on +  /// the same stream are delivered in order. +  /// +  /// \param[out] msg Where to eventually store the read message. +  /// \param[in] tag The tag identifying the operation. +  bool Read(RequestType* request) override { +    if (read_done_) { +      return false; +    } +    read_done_ = true; +    return body_.Read(request); +  } + +  /// Block to write \a msg to the stream with WriteOptions \a options. +  /// This is thread-safe with respect to \a ReaderInterface::Read +  /// +  /// \param msg The message to be written to the stream. +  /// \param options The WriteOptions affecting the write operation. +  /// +  /// \return \a true on success, \a false when the stream has been closed. +  using internal::WriterInterface<ResponseType>::Write; +  bool Write(const ResponseType& response, +             ::grpc::WriteOptions options) override { +    return read_done_ && body_.Write(response, options); +  } + + private: +  internal::ServerReaderWriterBody<ResponseType, RequestType> body_; +  bool read_done_; + +  friend class internal::TemplatedBidiStreamingHandler< +      ServerSplitStreamer<RequestType, ResponseType>, false>; +  ServerSplitStreamer(::grpc::internal::Call* call, ::grpc::ServerContext* ctx) +      : body_(call, ctx), read_done_(false) {} +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_SYNC_STREAM_H diff --git a/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h b/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h new file mode 100644 index 00000000000..3a54db45bf1 --- /dev/null +++ b/contrib/libs/grpc/include/grpcpp/impl/codegen/time.h @@ -0,0 +1,91 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *     http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPCPP_IMPL_CODEGEN_TIME_H +#define GRPCPP_IMPL_CODEGEN_TIME_H + +#if defined(__GNUC__) +#pragma GCC system_header +#endif + +#include <chrono> + +#include <grpc/impl/codegen/grpc_types.h> +#include <grpcpp/impl/codegen/config.h> + +namespace grpc { + +/** If you are trying to use CompletionQueue::AsyncNext with a time class that +    isn't either gpr_timespec or std::chrono::system_clock::time_point, you +    will most likely be looking at this comment as your compiler will have +    fired an error below. In order to fix this issue, you have two potential +    solutions: + +      1. Use gpr_timespec or std::chrono::system_clock::time_point instead +      2. Specialize the TimePoint class with whichever time class that you +         want to use here. See below for two examples of how to do this. + */ +template <typename T> +class TimePoint { + public: +  // If you see the error with methods below, you may need either +  // i) using the existing types having a conversion class such as +  // gpr_timespec and std::chrono::system_clock::time_point or +  // ii) writing a new TimePoint<YourType> to address your case. +  TimePoint(const T& /*time*/) = delete; +  gpr_timespec raw_time() = delete; +}; + +template <> +class TimePoint<gpr_timespec> { + public: +  TimePoint(const gpr_timespec& time) : time_(time) {} +  gpr_timespec raw_time() { return time_; } + + private: +  gpr_timespec time_; +}; + +}  // namespace grpc + +namespace grpc { + +// from and to should be absolute time. +void Timepoint2Timespec(const std::chrono::system_clock::time_point& from, +                        gpr_timespec* to); +void TimepointHR2Timespec( +    const std::chrono::high_resolution_clock::time_point& from, +    gpr_timespec* to); + +std::chrono::system_clock::time_point Timespec2Timepoint(gpr_timespec t); + +template <> +class TimePoint<std::chrono::system_clock::time_point> { + public: +  TimePoint(const std::chrono::system_clock::time_point& time) { +    Timepoint2Timespec(time, &time_); +  } +  gpr_timespec raw_time() const { return time_; } + + private: +  gpr_timespec time_; +}; + +}  // namespace grpc + +#endif  // GRPCPP_IMPL_CODEGEN_TIME_H  | 
