aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/grpc/test/cpp/end2end
diff options
context:
space:
mode:
authorarcadia-devtools <arcadia-devtools@yandex-team.ru>2022-03-15 21:33:41 +0300
committerarcadia-devtools <arcadia-devtools@yandex-team.ru>2022-03-15 21:33:41 +0300
commit3dd665b514943f69657b593eb51af90b99b1206b (patch)
tree0eb633e628bb1fe6c639574b1184d43def7c0a73 /contrib/libs/grpc/test/cpp/end2end
parenta68afc731202027f105bc5723ee11788017c29e2 (diff)
downloadydb-3dd665b514943f69657b593eb51af90b99b1206b.tar.gz
intermediate changes
ref:953ca886ec160075b38c0f3614de029b423f0a9e
Diffstat (limited to 'contrib/libs/grpc/test/cpp/end2end')
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt22
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/admin_services_end2end_test.cc107
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc28
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc37
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc230
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc123
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc5
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc25
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc335
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc78
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/context_allocator_end2end_test.cc334
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc5
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/end2end_test.cc155
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/exception_test.cc2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc17
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc26
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc19
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc213
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc6
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc16
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/interceptors_util.h24
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc34
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/mock_test.cc10
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc11
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc8
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc6
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc8
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc6
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc27
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc119
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc27
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc1
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_service_impl.h2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc14
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/time_change_test.cc10
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/xds_credentials_end2end_test.cc127
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc7767
40 files changed, 8076 insertions, 1914 deletions
diff --git a/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt b/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt
index a07ea0849d..ef0151141b 100644
--- a/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt
+++ b/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt
@@ -12,6 +12,20 @@
* limitations under the License.
+====================Apache-2.0====================
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
====================COPYRIGHT====================
* Copyright 2015 gRPC authors.
@@ -29,8 +43,16 @@
====================COPYRIGHT====================
+ * Copyright 2020 gRPC authors.
+
+
+====================COPYRIGHT====================
# Copyright 2019 gRPC authors.
====================COPYRIGHT====================
// Copyright 2019 The gRPC Authors
+
+
+====================COPYRIGHT====================
+// Copyright 2021 gRPC authors.
diff --git a/contrib/libs/grpc/test/cpp/end2end/admin_services_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/admin_services_end2end_test.cc
new file mode 100644
index 0000000000..50225f3baf
--- /dev/null
+++ b/contrib/libs/grpc/test/cpp/end2end/admin_services_end2end_test.cc
@@ -0,0 +1,107 @@
+//
+//
+// Copyright 2021 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+//
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "y_absl/strings/str_cat.h"
+
+#include <grpcpp/ext/proto_server_reflection_plugin.h>
+#include <grpcpp/grpcpp.h>
+
+#include "src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+
+#ifndef DISABLED_XDS_PROTO_IN_CC
+#include <grpcpp/ext/admin_services.h>
+
+namespace grpc {
+namespace testing {
+
+class AdminServicesTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ TString address =
+ y_absl::StrCat("localhost:", grpc_pick_unused_port_or_die());
+ // Create admin server
+ grpc::reflection::InitProtoReflectionServerBuilderPlugin();
+ ServerBuilder builder;
+ builder.AddListeningPort(address, InsecureServerCredentials());
+ ::grpc::AddAdminServices(&builder);
+ server_ = builder.BuildAndStart();
+ // Create channel
+ auto reflection_stub = reflection::v1alpha::ServerReflection::NewStub(
+ CreateChannel(address, InsecureChannelCredentials()));
+ stream_ = reflection_stub->ServerReflectionInfo(&reflection_ctx_);
+ }
+
+ std::vector<TString> GetServiceList() {
+ std::vector<TString> services;
+ reflection::v1alpha::ServerReflectionRequest request;
+ reflection::v1alpha::ServerReflectionResponse response;
+ request.set_list_services("");
+ stream_->Write(request);
+ stream_->Read(&response);
+ for (auto& service : response.list_services_response().service()) {
+ services.push_back(service.name());
+ }
+ return services;
+ }
+
+ private:
+ std::unique_ptr<Server> server_;
+ ClientContext reflection_ctx_;
+ std::shared_ptr<
+ ClientReaderWriter<reflection::v1alpha::ServerReflectionRequest,
+ reflection::v1alpha::ServerReflectionResponse>>
+ stream_;
+};
+
+#ifndef GRPC_NO_XDS
+// The ifndef conflicts with TEST_F and EXPECT_THAT macros, so we better isolate
+// the condition at test case level.
+TEST_F(AdminServicesTest, XdsEnabled) {
+ EXPECT_THAT(GetServiceList(),
+ ::testing::UnorderedElementsAre(
+ "envoy.service.status.v3.ClientStatusDiscoveryService",
+ "grpc.channelz.v1.Channelz",
+ "grpc.reflection.v1alpha.ServerReflection"));
+}
+#endif // GRPC_NO_XDS
+
+#ifdef GRPC_NO_XDS
+TEST_F(AdminServicesTest, XdsDisabled) {
+ EXPECT_THAT(GetServiceList(),
+ ::testing::UnorderedElementsAre(
+ "grpc.channelz.v1.Channelz",
+ "grpc.reflection.v1alpha.ServerReflection"));
+}
+#endif // GRPC_NO_XDS
+
+} // namespace testing
+} // namespace grpc
+
+#endif // DISABLED_XDS_PROTO_IN_CC
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int ret = RUN_ALL_TESTS();
+ return ret;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
index 45df8718f9..30f6d10092 100644
--- a/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
@@ -32,6 +32,8 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include "y_absl/memory/memory.h"
+
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/iomgr/port.h"
@@ -51,7 +53,6 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using grpc::testing::kTlsCredentialsType;
using std::chrono::system_clock;
namespace grpc {
@@ -59,7 +60,7 @@ namespace testing {
namespace {
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+void* tag(int t) { return reinterpret_cast<void*>(t); }
int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
class Verifier {
@@ -271,8 +272,8 @@ class AsyncEnd2endTest : public ::testing::TestWithParam<TestScenario> {
void* ignored_tag;
bool ignored_ok;
cq_->Shutdown();
- while (cq_->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cq_->Next(&ignored_tag, &ignored_ok)) {
+ }
stub_.reset();
grpc_recycle_unused_port(port_);
}
@@ -282,7 +283,8 @@ class AsyncEnd2endTest : public ::testing::TestWithParam<TestScenario> {
auto server_creds = GetCredentialsProvider()->GetServerCredentials(
GetParam().credentials_type);
builder.AddListeningPort(server_address_.str(), server_creds);
- service_.reset(new grpc::testing::EchoTestService::AsyncService());
+ service_ =
+ y_absl::make_unique<grpc::testing::EchoTestService::AsyncService>();
builder.RegisterService(service_.get());
if (GetParam().health_check_service) {
builder.RegisterService(&health_check_);
@@ -426,8 +428,8 @@ TEST_P(AsyncEnd2endTest, ReconnectChannel) {
void* ignored_tag;
bool ignored_ok;
cq_->Shutdown();
- while (cq_->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cq_->Next(&ignored_tag, &ignored_ok)) {
+ }
BuildAndStartServer();
// It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
// reconnect the channel.
@@ -1493,9 +1495,9 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
cli_cq.Shutdown();
- void* dummy_tag;
- bool dummy_ok;
- while (cli_cq.Next(&dummy_tag, &dummy_ok)) {
+ void* phony_tag;
+ bool phony_ok;
+ while (cli_cq.Next(&phony_tag, &phony_ok)) {
}
}
@@ -1642,9 +1644,9 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
cli_cq.Shutdown();
- void* dummy_tag;
- bool dummy_ok;
- while (cli_cq.Next(&dummy_tag, &dummy_ok)) {
+ void* phony_tag;
+ bool phony_ok;
+ while (cli_cq.Next(&phony_tag, &phony_ok)) {
}
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc b/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc
index e6695982bd..2e258d03d4 100644
--- a/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc
@@ -42,7 +42,6 @@
#include "src/core/lib/gpr/env.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/debugger_macros.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/test_service_impl.h"
@@ -145,18 +144,6 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> {
return CreateCustomChannel(server_address.str(), channel_creds, args);
}
- int GetStreamID(ClientContext& context) {
- int stream_id = 0;
- grpc_call* call = context.c_call();
- if (call) {
- grpc_chttp2_stream* stream = grpc_chttp2_stream_from_call(call);
- if (stream) {
- stream_id = stream->id;
- }
- }
- return stream_id;
- }
-
void SendRpc(
const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub,
bool expect_success = false) {
@@ -166,13 +153,11 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> {
request.set_message(msg);
ClientContext context;
Status status = stub->Echo(&context, request, response.get());
- int stream_id = GetStreamID(context);
if (status.ok()) {
- gpr_log(GPR_DEBUG, "RPC with stream_id %d succeeded", stream_id);
+ gpr_log(GPR_DEBUG, "RPC with succeeded");
EXPECT_EQ(msg, response->message());
} else {
- gpr_log(GPR_DEBUG, "RPC with stream_id %d failed: %s", stream_id,
- status.error_message().c_str());
+ gpr_log(GPR_DEBUG, "RPC failed: %s", status.error_message().c_str());
}
if (expect_success) {
EXPECT_TRUE(status.ok());
@@ -205,9 +190,9 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> {
} else {
GPR_ASSERT(ret == grpc::CompletionQueue::TIMEOUT);
// This can happen if we hit the Apple CFStream bug which results in the
- // read stream hanging. We are ignoring hangs and timeouts, but these
+ // read stream freezing. We are ignoring hangs and timeouts, but these
// tests are still useful as they can catch memory memory corruptions,
- // crashes and other bugs that don't result in test hang/timeout.
+ // crashes and other bugs that don't result in test freeze/timeout.
return false;
}
}
@@ -392,17 +377,16 @@ TEST_P(CFStreamTest, NetworkFlapRpcsInFlight) {
++total_completions;
GPR_ASSERT(ok);
AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
- int stream_id = GetStreamID(call->context);
if (!call->status.ok()) {
- gpr_log(GPR_DEBUG, "RPC with stream_id %d failed with error: %s",
- stream_id, call->status.error_message().c_str());
+ gpr_log(GPR_DEBUG, "RPC failed with error: %s",
+ call->status.error_message().c_str());
// Bring network up when RPCs start failing
if (network_down) {
NetworkUp();
network_down = false;
}
} else {
- gpr_log(GPR_DEBUG, "RPC with stream_id %d succeeded", stream_id);
+ gpr_log(GPR_DEBUG, "RPC succeeded");
}
delete call;
}
@@ -440,13 +424,12 @@ TEST_P(CFStreamTest, ConcurrentRpc) {
++total_completions;
GPR_ASSERT(ok);
AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
- int stream_id = GetStreamID(call->context);
if (!call->status.ok()) {
- gpr_log(GPR_DEBUG, "RPC with stream_id %d failed with error: %s",
- stream_id, call->status.error_message().c_str());
+ gpr_log(GPR_DEBUG, "RPC failed with error: %s",
+ call->status.error_message().c_str());
// Bring network up when RPCs start failing
} else {
- gpr_log(GPR_DEBUG, "RPC with stream_id %d succeeded", stream_id);
+ gpr_log(GPR_DEBUG, "RPC succeeded");
}
delete call;
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc b/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
index 9c723bebb6..e076d8cc5d 100644
--- a/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
@@ -19,25 +19,35 @@
#include <grpc/support/port_platform.h>
#include <grpc/grpc.h>
+#include <grpc/grpc_security.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
+#include <grpcpp/ext/channelz_service_plugin.h>
#include <grpcpp/security/credentials.h>
#include <grpcpp/security/server_credentials.h>
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
-#include <grpcpp/ext/channelz_service_plugin.h>
+#include "y_absl/memory/memory.h"
+
#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/iomgr/load_file.h"
+#include "src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h"
+#include "src/core/lib/security/security_connector/ssl_utils.h"
+#include "src/core/lib/slice/slice_utils.h"
+#include "src/cpp/client/secure_credentials.h"
#include "src/proto/grpc/channelz/channelz.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/test_credentials_provider.h"
#include <gtest/gtest.h>
+using grpc::channelz::v1::Address;
using grpc::channelz::v1::GetChannelRequest;
using grpc::channelz::v1::GetChannelResponse;
using grpc::channelz::v1::GetServerRequest;
@@ -57,6 +67,14 @@ namespace grpc {
namespace testing {
namespace {
+static bool ValidateAddress(const Address& address) {
+ if (address.address_case() != Address::kTcpipAddress) {
+ return true;
+ }
+ return address.tcpip_address().ip_address().size() == 4 ||
+ address.tcpip_address().ip_address().size() == 16;
+}
+
// Proxy service supports N backends. Sends RPC to backend dictated by
// request->backend_channel_idx().
class Proxy : public ::grpc::testing::EchoTestService::Service {
@@ -100,9 +118,75 @@ class Proxy : public ::grpc::testing::EchoTestService::Service {
std::vector<std::unique_ptr<::grpc::testing::EchoTestService::Stub>> stubs_;
};
-} // namespace
+enum class CredentialsType {
+ kInsecure = 0,
+ kTls = 1,
+ kMtls = 2,
+};
+
+constexpr char kCaCertPath[] = "src/core/tsi/test_creds/ca.pem";
+constexpr char kServerCertPath[] = "src/core/tsi/test_creds/server1.pem";
+constexpr char kServerKeyPath[] = "src/core/tsi/test_creds/server1.key";
+constexpr char kClientCertPath[] = "src/core/tsi/test_creds/client.pem";
+constexpr char kClientKeyPath[] = "src/core/tsi/test_creds/client.key";
+
+TString ReadFile(const char* file_path) {
+ grpc_slice slice;
+ GPR_ASSERT(
+ GRPC_LOG_IF_ERROR("load_file", grpc_load_file(file_path, 0, &slice)));
+ TString file_contents(grpc_core::StringViewFromSlice(slice));
+ grpc_slice_unref(slice);
+ return file_contents;
+}
+
+grpc_core::PemKeyCertPairList ReadTlsIdentityPair(const char* key_path,
+ const char* cert_path) {
+ return grpc_core::PemKeyCertPairList{
+ grpc_core::PemKeyCertPair(ReadFile(key_path), ReadFile(cert_path))};
+}
+
+std::shared_ptr<grpc::ChannelCredentials> GetChannelCredentials(
+ CredentialsType type, ChannelArguments* args) {
+ if (type == CredentialsType::kInsecure) {
+ return InsecureChannelCredentials();
+ }
+ args->SetSslTargetNameOverride("foo.test.google.fr");
+ std::vector<experimental::IdentityKeyCertPair> identity_key_cert_pairs = {
+ {ReadFile(kClientKeyPath), ReadFile(kClientCertPath)}};
+ grpc::experimental::TlsChannelCredentialsOptions options;
+ options.set_certificate_provider(
+ std::make_shared<grpc::experimental::StaticDataCertificateProvider>(
+ ReadFile(kCaCertPath), identity_key_cert_pairs));
+ if (type == CredentialsType::kMtls) {
+ options.watch_identity_key_cert_pairs();
+ }
+ options.watch_root_certs();
+ return grpc::experimental::TlsCredentials(options);
+}
+
+std::shared_ptr<grpc::ServerCredentials> GetServerCredentials(
+ CredentialsType type) {
+ if (type == CredentialsType::kInsecure) {
+ return InsecureServerCredentials();
+ }
+ std::vector<experimental::IdentityKeyCertPair> identity_key_cert_pairs = {
+ {ReadFile(kServerKeyPath), ReadFile(kServerCertPath)}};
+ auto certificate_provider =
+ std::make_shared<grpc::experimental::StaticDataCertificateProvider>(
+ ReadFile(kCaCertPath), identity_key_cert_pairs);
+ grpc::experimental::TlsServerCredentialsOptions options(certificate_provider);
+ options.watch_root_certs();
+ options.watch_identity_key_cert_pairs();
+ options.set_cert_request_type(GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY);
+ return grpc::experimental::TlsServerCredentials(options);
+}
+
+TString RemoveWhitespaces(TString input) {
+ input.erase(remove_if(input.begin(), input.end(), isspace), input.end());
+ return input;
+}
-class ChannelzServerTest : public ::testing::Test {
+class ChannelzServerTest : public ::testing::TestWithParam<CredentialsType> {
public:
ChannelzServerTest() {}
static void SetUpTestCase() {
@@ -120,7 +204,7 @@ class ChannelzServerTest : public ::testing::Test {
ServerBuilder proxy_builder;
TString proxy_server_address = "localhost:" + to_string(proxy_port_);
proxy_builder.AddListeningPort(proxy_server_address,
- InsecureServerCredentials());
+ GetServerCredentials(GetParam()));
// forces channelz and channel tracing to be enabled.
proxy_builder.AddChannelArgument(GRPC_ARG_ENABLE_CHANNELZ, 1);
proxy_builder.AddChannelArgument(
@@ -139,8 +223,8 @@ class ChannelzServerTest : public ::testing::Test {
TString backend_server_address =
"localhost:" + to_string(backends_[i].port);
backend_builder.AddListeningPort(backend_server_address,
- InsecureServerCredentials());
- backends_[i].service.reset(new TestServiceImpl);
+ GetServerCredentials(GetParam()));
+ backends_[i].service = y_absl::make_unique<TestServiceImpl>();
// ensure that the backend itself has channelz disabled.
backend_builder.AddChannelArgument(GRPC_ARG_ENABLE_CHANNELZ, 0);
backend_builder.RegisterService(backends_[i].service.get());
@@ -152,7 +236,8 @@ class ChannelzServerTest : public ::testing::Test {
args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 1);
args.SetInt(GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE, 1024);
std::shared_ptr<Channel> channel_to_backend = ::grpc::CreateCustomChannel(
- backend_server_address, InsecureChannelCredentials(), args);
+ backend_server_address, GetChannelCredentials(GetParam(), &args),
+ args);
proxy_service_.AddChannelToBackend(channel_to_backend);
}
}
@@ -162,8 +247,8 @@ class ChannelzServerTest : public ::testing::Test {
ChannelArguments args;
// disable channelz. We only want to focus on proxy to backend outbound.
args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 0);
- std::shared_ptr<Channel> channel =
- ::grpc::CreateCustomChannel(target, InsecureChannelCredentials(), args);
+ std::shared_ptr<Channel> channel = ::grpc::CreateCustomChannel(
+ target, GetChannelCredentials(GetParam(), &args), args);
channelz_stub_ = grpc::channelz::v1::Channelz::NewStub(channel);
echo_stub_ = grpc::testing::EchoTestService::NewStub(channel);
}
@@ -175,8 +260,8 @@ class ChannelzServerTest : public ::testing::Test {
args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 0);
// This ensures that gRPC will not do connection sharing.
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
- std::shared_ptr<Channel> channel =
- ::grpc::CreateCustomChannel(target, InsecureChannelCredentials(), args);
+ std::shared_ptr<Channel> channel = ::grpc::CreateCustomChannel(
+ target, GetChannelCredentials(GetParam(), &args), args);
return grpc::testing::EchoTestService::NewStub(channel);
}
@@ -258,7 +343,7 @@ class ChannelzServerTest : public ::testing::Test {
std::vector<BackendData> backends_;
};
-TEST_F(ChannelzServerTest, BasicTest) {
+TEST_P(ChannelzServerTest, BasicTest) {
ResetStubs();
ConfigureProxy(1);
GetTopChannelsRequest request;
@@ -270,7 +355,7 @@ TEST_F(ChannelzServerTest, BasicTest) {
EXPECT_EQ(response.channel_size(), 1);
}
-TEST_F(ChannelzServerTest, HighStartId) {
+TEST_P(ChannelzServerTest, HighStartId) {
ResetStubs();
ConfigureProxy(1);
GetTopChannelsRequest request;
@@ -282,7 +367,7 @@ TEST_F(ChannelzServerTest, HighStartId) {
EXPECT_EQ(response.channel_size(), 0);
}
-TEST_F(ChannelzServerTest, SuccessfulRequestTest) {
+TEST_P(ChannelzServerTest, SuccessfulRequestTest) {
ResetStubs();
ConfigureProxy(1);
SendSuccessfulEcho(0);
@@ -297,7 +382,7 @@ TEST_F(ChannelzServerTest, SuccessfulRequestTest) {
EXPECT_EQ(response.channel().data().calls_failed(), 0);
}
-TEST_F(ChannelzServerTest, FailedRequestTest) {
+TEST_P(ChannelzServerTest, FailedRequestTest) {
ResetStubs();
ConfigureProxy(1);
SendFailedEcho(0);
@@ -312,7 +397,7 @@ TEST_F(ChannelzServerTest, FailedRequestTest) {
EXPECT_EQ(response.channel().data().calls_failed(), 1);
}
-TEST_F(ChannelzServerTest, ManyRequestsTest) {
+TEST_P(ChannelzServerTest, ManyRequestsTest) {
ResetStubs();
ConfigureProxy(1);
// send some RPCs
@@ -336,7 +421,7 @@ TEST_F(ChannelzServerTest, ManyRequestsTest) {
EXPECT_EQ(response.channel().data().calls_failed(), kNumFailed);
}
-TEST_F(ChannelzServerTest, ManyChannels) {
+TEST_P(ChannelzServerTest, ManyChannels) {
ResetStubs();
const int kNumChannels = 4;
ConfigureProxy(kNumChannels);
@@ -349,7 +434,7 @@ TEST_F(ChannelzServerTest, ManyChannels) {
EXPECT_EQ(response.channel_size(), kNumChannels);
}
-TEST_F(ChannelzServerTest, ManyRequestsManyChannels) {
+TEST_P(ChannelzServerTest, ManyRequestsManyChannels) {
ResetStubs();
const int kNumChannels = 4;
ConfigureProxy(kNumChannels);
@@ -418,7 +503,7 @@ TEST_F(ChannelzServerTest, ManyRequestsManyChannels) {
}
}
-TEST_F(ChannelzServerTest, ManySubchannels) {
+TEST_P(ChannelzServerTest, ManySubchannels) {
ResetStubs();
const int kNumChannels = 4;
ConfigureProxy(kNumChannels);
@@ -466,7 +551,7 @@ TEST_F(ChannelzServerTest, ManySubchannels) {
}
}
-TEST_F(ChannelzServerTest, BasicServerTest) {
+TEST_P(ChannelzServerTest, BasicServerTest) {
ResetStubs();
ConfigureProxy(1);
GetServersRequest request;
@@ -478,7 +563,7 @@ TEST_F(ChannelzServerTest, BasicServerTest) {
EXPECT_EQ(response.server_size(), 1);
}
-TEST_F(ChannelzServerTest, BasicGetServerTest) {
+TEST_P(ChannelzServerTest, BasicGetServerTest) {
ResetStubs();
ConfigureProxy(1);
GetServersRequest get_servers_request;
@@ -501,7 +586,7 @@ TEST_F(ChannelzServerTest, BasicGetServerTest) {
get_server_response.server().ref().server_id());
}
-TEST_F(ChannelzServerTest, ServerCallTest) {
+TEST_P(ChannelzServerTest, ServerCallTest) {
ResetStubs();
ConfigureProxy(1);
const int kNumSuccess = 10;
@@ -528,7 +613,7 @@ TEST_F(ChannelzServerTest, ServerCallTest) {
kNumSuccess + kNumFailed + 1);
}
-TEST_F(ChannelzServerTest, ManySubchannelsAndSockets) {
+TEST_P(ChannelzServerTest, ManySubchannelsAndSockets) {
ResetStubs();
const int kNumChannels = 4;
ConfigureProxy(kNumChannels);
@@ -594,10 +679,24 @@ TEST_F(ChannelzServerTest, ManySubchannelsAndSockets) {
// calls succeeded == messages received.
EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_succeeded(),
get_socket_resp.socket().data().messages_received());
+ switch (GetParam()) {
+ case CredentialsType::kInsecure:
+ EXPECT_FALSE(get_socket_resp.socket().has_security());
+ break;
+ case CredentialsType::kTls:
+ case CredentialsType::kMtls:
+ EXPECT_TRUE(get_socket_resp.socket().has_security());
+ EXPECT_TRUE(get_socket_resp.socket().security().has_tls());
+ EXPECT_EQ(
+ RemoveWhitespaces(
+ get_socket_resp.socket().security().tls().remote_certificate()),
+ RemoveWhitespaces(ReadFile(kServerCertPath)));
+ break;
+ }
}
}
-TEST_F(ChannelzServerTest, StreamingRPC) {
+TEST_P(ChannelzServerTest, StreamingRPC) {
ResetStubs();
ConfigureProxy(1);
const int kNumMessages = 5;
@@ -645,9 +744,24 @@ TEST_F(ChannelzServerTest, StreamingRPC) {
EXPECT_EQ(get_socket_response.socket().data().messages_sent(), kNumMessages);
EXPECT_EQ(get_socket_response.socket().data().messages_received(),
kNumMessages);
+ switch (GetParam()) {
+ case CredentialsType::kInsecure:
+ EXPECT_FALSE(get_socket_response.socket().has_security());
+ break;
+ case CredentialsType::kTls:
+ case CredentialsType::kMtls:
+ EXPECT_TRUE(get_socket_response.socket().has_security());
+ EXPECT_TRUE(get_socket_response.socket().security().has_tls());
+ EXPECT_EQ(RemoveWhitespaces(get_socket_response.socket()
+ .security()
+ .tls()
+ .remote_certificate()),
+ RemoveWhitespaces(ReadFile(kServerCertPath)));
+ break;
+ }
}
-TEST_F(ChannelzServerTest, GetServerSocketsTest) {
+TEST_P(ChannelzServerTest, GetServerSocketsTest) {
ResetStubs();
ConfigureProxy(1);
GetServersRequest get_server_request;
@@ -670,9 +784,43 @@ TEST_F(ChannelzServerTest, GetServerSocketsTest) {
EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
EXPECT_EQ(get_server_sockets_response.socket_ref_size(), 1);
EXPECT_TRUE(get_server_sockets_response.socket_ref(0).name().find("http"));
+ // Get the socket to verify security information.
+ GetSocketRequest get_socket_request;
+ GetSocketResponse get_socket_response;
+ ClientContext get_socket_context;
+ get_socket_request.set_socket_id(
+ get_server_sockets_response.socket_ref(0).socket_id());
+ s = channelz_stub_->GetSocket(&get_socket_context, get_socket_request,
+ &get_socket_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_TRUE(ValidateAddress(get_socket_response.socket().remote()));
+ EXPECT_TRUE(ValidateAddress(get_socket_response.socket().local()));
+ switch (GetParam()) {
+ case CredentialsType::kInsecure:
+ EXPECT_FALSE(get_socket_response.socket().has_security());
+ break;
+ case CredentialsType::kTls:
+ case CredentialsType::kMtls:
+ EXPECT_TRUE(get_socket_response.socket().has_security());
+ EXPECT_TRUE(get_socket_response.socket().security().has_tls());
+ if (GetParam() == CredentialsType::kMtls) {
+ EXPECT_EQ(RemoveWhitespaces(get_socket_response.socket()
+ .security()
+ .tls()
+ .remote_certificate()),
+ RemoveWhitespaces(ReadFile(kClientCertPath)));
+ } else {
+ EXPECT_TRUE(get_socket_response.socket()
+ .security()
+ .tls()
+ .remote_certificate()
+ .empty());
+ }
+ break;
+ }
}
-TEST_F(ChannelzServerTest, GetServerSocketsPaginationTest) {
+TEST_P(ChannelzServerTest, GetServerSocketsPaginationTest) {
ResetStubs();
ConfigureProxy(1);
std::vector<std::unique_ptr<grpc::testing::EchoTestService::Stub>> stubs;
@@ -733,7 +881,7 @@ TEST_F(ChannelzServerTest, GetServerSocketsPaginationTest) {
}
}
-TEST_F(ChannelzServerTest, GetServerListenSocketsTest) {
+TEST_P(ChannelzServerTest, GetServerListenSocketsTest) {
ResetStubs();
ConfigureProxy(1);
GetServersRequest get_server_request;
@@ -744,19 +892,41 @@ TEST_F(ChannelzServerTest, GetServerListenSocketsTest) {
&get_server_response);
EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
EXPECT_EQ(get_server_response.server_size(), 1);
- EXPECT_EQ(get_server_response.server(0).listen_socket_size(), 1);
+ // The resolver might return one or two addresses depending on the
+ // configuration, one for ipv4 and one for ipv6.
+ int listen_socket_size = get_server_response.server(0).listen_socket_size();
+ EXPECT_TRUE(listen_socket_size == 1 || listen_socket_size == 2);
GetSocketRequest get_socket_request;
GetSocketResponse get_socket_response;
get_socket_request.set_socket_id(
get_server_response.server(0).listen_socket(0).socket_id());
EXPECT_TRUE(
get_server_response.server(0).listen_socket(0).name().find("http"));
- ClientContext get_socket_context;
- s = channelz_stub_->GetSocket(&get_socket_context, get_socket_request,
+ ClientContext get_socket_context_1;
+ s = channelz_stub_->GetSocket(&get_socket_context_1, get_socket_request,
&get_socket_response);
EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+
+ EXPECT_TRUE(ValidateAddress(get_socket_response.socket().remote()));
+ EXPECT_TRUE(ValidateAddress(get_socket_response.socket().local()));
+ if (listen_socket_size == 2) {
+ get_socket_request.set_socket_id(
+ get_server_response.server(0).listen_socket(1).socket_id());
+ ClientContext get_socket_context_2;
+ EXPECT_TRUE(
+ get_server_response.server(0).listen_socket(1).name().find("http"));
+ s = channelz_stub_->GetSocket(&get_socket_context_2, get_socket_request,
+ &get_socket_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ }
}
+INSTANTIATE_TEST_SUITE_P(ChannelzServer, ChannelzServerTest,
+ ::testing::ValuesIn(std::vector<CredentialsType>(
+ {CredentialsType::kInsecure, CredentialsType::kTls,
+ CredentialsType::kMtls})));
+
+} // namespace
} // namespace testing
} // namespace grpc
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
index 12cb40a953..1d81cdea70 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
@@ -34,6 +34,7 @@
#include <sstream>
#include <thread>
+#include "y_absl/memory/memory.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
@@ -45,17 +46,6 @@
#include "test/cpp/util/string_ref_helper.h"
#include "test/cpp/util/test_credentials_provider.h"
-// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
-// should be skipped based on a decision made at SetUp time. In particular, any
-// callback tests can only be run if the iomgr can run in the background or if
-// the transport is in-process.
-#define MAYBE_SKIP_TEST \
- do { \
- if (do_not_test_) { \
- return; \
- } \
- } while (0)
-
namespace grpc {
namespace testing {
namespace {
@@ -119,21 +109,16 @@ class ClientCallbackEnd2endTest
std::vector<
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
creators;
- // Add 20 dummy server interceptors
+ // Add 20 phony server interceptors
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
}
server_ = builder.BuildAndStart();
is_server_started_ = true;
- if (GetParam().protocol == Protocol::TCP &&
- !grpc_iomgr_run_in_background()) {
- do_not_test_ = true;
- }
}
void ResetStub() {
@@ -148,7 +133,7 @@ class ClientCallbackEnd2endTest
} else {
channel_ = CreateCustomChannelWithInterceptors(
server_address_.str(), channel_creds, args,
- CreateDummyClientInterceptors());
+ CreatePhonyClientInterceptors());
}
break;
case Protocol::INPROC:
@@ -156,15 +141,15 @@ class ClientCallbackEnd2endTest
channel_ = server_->InProcessChannel(args);
} else {
channel_ = server_->experimental().InProcessChannelWithInterceptors(
- args, CreateDummyClientInterceptors());
+ args, CreatePhonyClientInterceptors());
}
break;
default:
assert(false);
}
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- generic_stub_.reset(new GenericStub(channel_));
- DummyInterceptor::Reset();
+ generic_stub_ = y_absl::make_unique<GenericStub>(channel_);
+ PhonyInterceptor::Reset();
}
void TearDown() override {
@@ -256,7 +241,7 @@ class ClientCallbackEnd2endTest
cv.notify_one();
#if GRPC_ALLOW_EXCEPTIONS
if (maybe_except) {
- throw - 1;
+ throw -1;
}
#else
GPR_ASSERT(!maybe_except);
@@ -282,7 +267,7 @@ class ClientCallbackEnd2endTest
: reuses_remaining_(reuses), do_writes_done_(do_writes_done) {
activate_ = [this, test, method_name, test_str] {
if (reuses_remaining_ > 0) {
- cli_ctx_.reset(new ClientContext);
+ cli_ctx_ = y_absl::make_unique<ClientContext>();
reuses_remaining_--;
test->generic_stub_->experimental().PrepareBidiStreamingCall(
cli_ctx_.get(), method_name, this);
@@ -337,7 +322,6 @@ class ClientCallbackEnd2endTest
rpc.Await();
}
}
- bool do_not_test_{false};
bool is_server_started_{false};
int picked_port_{0};
std::shared_ptr<Channel> channel_;
@@ -350,13 +334,11 @@ class ClientCallbackEnd2endTest
};
TEST_P(ClientCallbackEnd2endTest, SimpleRpc) {
- MAYBE_SKIP_TEST;
ResetStub();
SendRpcs(1, false);
}
TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
@@ -391,7 +373,6 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
}
TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
- MAYBE_SKIP_TEST;
ResetStub();
// The request/response state associated with an RPC and the synchronization
@@ -433,7 +414,7 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
rpc_state[index].done = true;
rpc_state[index].cv.notify_all();
// Call the next level of nesting if possible
- if (index + 1 < rpc_state.size()) {
+ if (index + 1 < int(rpc_state.size())) {
nested_call(index + 1);
}
});
@@ -452,7 +433,6 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
}
TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) {
- MAYBE_SKIP_TEST;
ResetStub();
std::mutex mu;
std::condition_variable cv;
@@ -480,13 +460,11 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) {
}
TEST_P(ClientCallbackEnd2endTest, SequentialRpcs) {
- MAYBE_SKIP_TEST;
ResetStub();
SendRpcs(10, false);
}
TEST_P(ClientCallbackEnd2endTest, SendClientInitialMetadata) {
- MAYBE_SKIP_TEST;
ResetStub();
SimpleRequest request;
SimpleResponse response;
@@ -513,51 +491,43 @@ TEST_P(ClientCallbackEnd2endTest, SendClientInitialMetadata) {
}
TEST_P(ClientCallbackEnd2endTest, SimpleRpcWithBinaryMetadata) {
- MAYBE_SKIP_TEST;
ResetStub();
SendRpcs(1, true);
}
TEST_P(ClientCallbackEnd2endTest, SequentialRpcsWithVariedBinaryMetadataValue) {
- MAYBE_SKIP_TEST;
ResetStub();
SendRpcs(10, true);
}
TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcs) {
- MAYBE_SKIP_TEST;
ResetStub();
SendRpcsGeneric(10, false);
}
TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidi) {
- MAYBE_SKIP_TEST;
ResetStub();
SendGenericEchoAsBidi(10, 1, /*do_writes_done=*/true);
}
TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidiWithReactorReuse) {
- MAYBE_SKIP_TEST;
ResetStub();
SendGenericEchoAsBidi(10, 10, /*do_writes_done=*/true);
}
TEST_P(ClientCallbackEnd2endTest, GenericRpcNoWritesDone) {
- MAYBE_SKIP_TEST;
ResetStub();
SendGenericEchoAsBidi(1, 1, /*do_writes_done=*/false);
}
#if GRPC_ALLOW_EXCEPTIONS
TEST_P(ClientCallbackEnd2endTest, ExceptingRpc) {
- MAYBE_SKIP_TEST;
ResetStub();
SendRpcsGeneric(10, true);
}
#endif
TEST_P(ClientCallbackEnd2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
- MAYBE_SKIP_TEST;
ResetStub();
std::vector<std::thread> threads;
threads.reserve(10);
@@ -570,7 +540,6 @@ TEST_P(ClientCallbackEnd2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
}
TEST_P(ClientCallbackEnd2endTest, MultipleRpcs) {
- MAYBE_SKIP_TEST;
ResetStub();
std::vector<std::thread> threads;
threads.reserve(10);
@@ -583,7 +552,6 @@ TEST_P(ClientCallbackEnd2endTest, MultipleRpcs) {
}
TEST_P(ClientCallbackEnd2endTest, CancelRpcBeforeStart) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -607,12 +575,11 @@ TEST_P(ClientCallbackEnd2endTest, CancelRpcBeforeStart) {
cv.wait(l);
}
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, RequestEchoServerCancel) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -743,70 +710,64 @@ class WriteClient : public grpc::experimental::ClientWriteReactor<EchoRequest> {
};
TEST_P(ClientCallbackEnd2endTest, RequestStream) {
- MAYBE_SKIP_TEST;
ResetStub();
WriteClient test{stub_.get(), DO_NOT_CANCEL, 3};
test.Await();
// Make sure that the server interceptors were not notified to cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(0, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, ClientCancelsRequestStream) {
- MAYBE_SKIP_TEST;
ResetStub();
WriteClient test{stub_.get(), DO_NOT_CANCEL, 3, ClientCancelInfo{2}};
test.Await();
// Make sure that the server interceptors got the cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Server to cancel before doing reading the request
TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelBeforeReads) {
- MAYBE_SKIP_TEST;
ResetStub();
WriteClient test{stub_.get(), CANCEL_BEFORE_PROCESSING, 1};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Server to cancel while reading a request from the stream in parallel
TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelDuringRead) {
- MAYBE_SKIP_TEST;
ResetStub();
WriteClient test{stub_.get(), CANCEL_DURING_PROCESSING, 10};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Server to cancel after reading all the requests but before returning to the
// client
TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelAfterReads) {
- MAYBE_SKIP_TEST;
ResetStub();
WriteClient test{stub_.get(), CANCEL_AFTER_PROCESSING, 4};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, UnaryReactor) {
- MAYBE_SKIP_TEST;
ResetStub();
class UnaryClient : public grpc::experimental::ClientUnaryReactor {
public:
- UnaryClient(grpc::testing::EchoTestService::Stub* stub) {
+ explicit UnaryClient(grpc::testing::EchoTestService::Stub* stub) {
cli_ctx_.AddMetadata("key1", "val1");
cli_ctx_.AddMetadata("key2", "val2");
request_.mutable_param()->set_echo_metadata_initially(true);
@@ -856,12 +817,11 @@ TEST_P(ClientCallbackEnd2endTest, UnaryReactor) {
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(0, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) {
- MAYBE_SKIP_TEST;
ResetStub();
const TString kMethodName("/grpc.testing.EchoTestService/Echo");
class UnaryClient : public grpc::experimental::ClientUnaryReactor {
@@ -922,7 +882,7 @@ TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) {
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(0, PhonyInterceptor::GetNumTimesCancel());
}
}
@@ -1027,18 +987,16 @@ class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> {
};
TEST_P(ClientCallbackEnd2endTest, ResponseStream) {
- MAYBE_SKIP_TEST;
ResetStub();
ReadClient test{stub_.get(), DO_NOT_CANCEL};
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(0, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, ClientCancelsResponseStream) {
- MAYBE_SKIP_TEST;
ResetStub();
ReadClient test{stub_.get(), DO_NOT_CANCEL, ClientCancelInfo{2}};
test.Await();
@@ -1048,38 +1006,35 @@ TEST_P(ClientCallbackEnd2endTest, ClientCancelsResponseStream) {
// Server to cancel before sending any response messages
TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelBefore) {
- MAYBE_SKIP_TEST;
ResetStub();
ReadClient test{stub_.get(), CANCEL_BEFORE_PROCESSING};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Server to cancel while writing a response to the stream in parallel
TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelDuring) {
- MAYBE_SKIP_TEST;
ResetStub();
ReadClient test{stub_.get(), CANCEL_DURING_PROCESSING};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Server to cancel after writing all the respones to the stream but before
// returning to the client
TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelAfter) {
- MAYBE_SKIP_TEST;
ResetStub();
ReadClient test{stub_.get(), CANCEL_AFTER_PROCESSING};
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
@@ -1238,7 +1193,6 @@ class BidiClient
};
TEST_P(ClientCallbackEnd2endTest, BidiStream) {
- MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
@@ -1246,12 +1200,11 @@ TEST_P(ClientCallbackEnd2endTest, BidiStream) {
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(0, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) {
- MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
@@ -1259,12 +1212,11 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) {
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(0, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) {
- MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
@@ -1272,12 +1224,11 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) {
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(0, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
- MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
@@ -1285,12 +1236,11 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(0, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
- MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
@@ -1299,27 +1249,25 @@ TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
test.Await();
// Make sure that the server interceptors were notified of a cancel
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Server to cancel before reading/writing any requests/responses on the stream
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
- MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2,
/*cork_metadata=*/false, /*first_write_async=*/false);
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Server to cancel while reading/writing requests/responses on the stream in
// parallel
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
- MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING,
/*num_msgs_to_send=*/10, /*cork_metadata=*/false,
@@ -1327,31 +1275,29 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Server to cancel after reading/writing all requests/responses on the stream
// but before returning to the client
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) {
- MAYBE_SKIP_TEST;
ResetStub();
BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5,
/*cork_metadata=*/false, /*first_write_async=*/false);
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(ClientCallbackEnd2endTest, SimultaneousReadAndWritesDone) {
- MAYBE_SKIP_TEST;
ResetStub();
class Client : public grpc::experimental::ClientBidiReactor<EchoRequest,
EchoResponse> {
public:
- Client(grpc::testing::EchoTestService::Stub* stub) {
+ explicit Client(grpc::testing::EchoTestService::Stub* stub) {
request_.set_message("Hello bidi ");
stub->experimental_async()->BidiStream(&context_, this);
StartWrite(&request_);
@@ -1394,7 +1340,6 @@ TEST_P(ClientCallbackEnd2endTest, SimultaneousReadAndWritesDone) {
}
TEST_P(ClientCallbackEnd2endTest, UnimplementedRpc) {
- MAYBE_SKIP_TEST;
ChannelArguments args;
const auto& channel_creds = GetCredentialsProvider()->GetChannelCredentials(
GetParam().credentials_type, &args);
@@ -1429,12 +1374,12 @@ TEST_P(ClientCallbackEnd2endTest, UnimplementedRpc) {
TEST_P(ClientCallbackEnd2endTest,
ResponseStreamExtraReactionFlowReadsUntilDone) {
- MAYBE_SKIP_TEST;
ResetStub();
class ReadAllIncomingDataClient
: public grpc::experimental::ClientReadReactor<EchoResponse> {
public:
- ReadAllIncomingDataClient(grpc::testing::EchoTestService::Stub* stub) {
+ explicit ReadAllIncomingDataClient(
+ grpc::testing::EchoTestService::Stub* stub) {
request_.set_message("Hello client ");
stub->experimental_async()->ResponseStream(&context_, &request_, this);
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
index 80e1869396..7fd8a224bb 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
@@ -26,6 +26,8 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include "y_absl/memory/memory.h"
+
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
@@ -36,7 +38,6 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
static TString g_root;
@@ -54,7 +55,7 @@ class CrashTest : public ::testing::Test {
std::ostringstream addr_stream;
addr_stream << "localhost:" << port;
auto addr = addr_stream.str();
- server_.reset(new SubProcess({
+ server_ = y_absl::make_unique<SubProcess>(std::vector<TString>({
g_root + "/client_crash_test_server",
"--address=" + addr,
}));
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc b/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
index 2d5be420f2..bf4ae9e041 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
@@ -16,31 +16,24 @@
*
*/
-#include <gflags/gflags.h>
-#include <iostream>
-#include <memory>
-#include <util/generic/string.h>
-
#include <grpc/support/log.h>
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include <iostream>
+#include <memory>
+#include <util/generic/string.h>
+
+#include "y_absl/flags/flag.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/util/test_config.h"
-DEFINE_string(address, "", "Address to bind to");
+Y_ABSL_FLAG(TString, address, "", "Address to bind to");
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-// In some distros, gflags is in the namespace google, and in some others,
-// in gflags. This hack is enabling us to find both.
-namespace google {}
-namespace gflags {}
-using namespace google;
-using namespace gflags;
-
namespace grpc {
namespace testing {
@@ -63,10 +56,12 @@ void RunServer() {
ServiceImpl service;
ServerBuilder builder;
- builder.AddListeningPort(FLAGS_address, grpc::InsecureServerCredentials());
+ builder.AddListeningPort(y_absl::GetFlag(FLAGS_address),
+ grpc::InsecureServerCredentials());
builder.RegisterService(&service);
std::unique_ptr<Server> server(builder.BuildAndStart());
- std::cout << "Server listening on " << FLAGS_address << std::endl;
+ std::cout << "Server listening on " << y_absl::GetFlag(FLAGS_address)
+ << std::endl;
server->Wait();
}
} // namespace testing
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
index 956876d9f6..9088d07f65 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
@@ -19,16 +19,21 @@
#include <memory>
#include <vector>
+#include "y_absl/memory/memory.h"
+
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
+#include <grpcpp/create_channel_posix.h>
#include <grpcpp/generic/generic_stub.h>
#include <grpcpp/impl/codegen/proto_utils.h>
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include <grpcpp/server_posix.h>
#include <grpcpp/support/client_interceptor.h>
+#include "src/core/lib/iomgr/port.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@@ -37,6 +42,11 @@
#include "test/cpp/util/byte_buffer_proto_helper.h"
#include "test/cpp/util/string_ref_helper.h"
+#ifdef GRPC_POSIX_SOCKET
+#include <fcntl.h>
+#include "src/core/lib/iomgr/socket_utils_posix.h"
+#endif /* GRPC_POSIX_SOCKET */
+
#include <gtest/gtest.h>
namespace grpc {
@@ -54,17 +64,22 @@ enum class RPCType {
kAsyncCQBidiStreaming,
};
+enum class ChannelType {
+ kHttpChannel,
+ kFdChannel,
+};
+
/* Hijacks Echo RPC and fills in the expected values */
class HijackingInterceptor : public experimental::Interceptor {
public:
- HijackingInterceptor(experimental::ClientRpcInfo* info) {
+ explicit HijackingInterceptor(experimental::ClientRpcInfo* info) {
info_ = info;
// Make sure it is the right method
EXPECT_EQ(strcmp("/grpc.testing.EchoTestService/Echo", info->method()), 0);
EXPECT_EQ(info->type(), experimental::ClientRpcInfo::Type::UNARY);
}
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
bool hijack = false;
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
@@ -155,7 +170,7 @@ class HijackingInterceptor : public experimental::Interceptor {
class HijackingInterceptorFactory
: public experimental::ClientInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::Interceptor* CreateClientInterceptor(
experimental::ClientRpcInfo* info) override {
return new HijackingInterceptor(info);
}
@@ -163,13 +178,14 @@ class HijackingInterceptorFactory
class HijackingInterceptorMakesAnotherCall : public experimental::Interceptor {
public:
- HijackingInterceptorMakesAnotherCall(experimental::ClientRpcInfo* info) {
+ explicit HijackingInterceptorMakesAnotherCall(
+ experimental::ClientRpcInfo* info) {
info_ = info;
// Make sure it is the right method
EXPECT_EQ(strcmp("/grpc.testing.EchoTestService/Echo", info->method()), 0);
}
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
auto* map = methods->GetSendInitialMetadata();
@@ -277,7 +293,7 @@ class HijackingInterceptorMakesAnotherCall : public experimental::Interceptor {
class HijackingInterceptorMakesAnotherCallFactory
: public experimental::ClientInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::Interceptor* CreateClientInterceptor(
experimental::ClientRpcInfo* info) override {
return new HijackingInterceptorMakesAnotherCall(info);
}
@@ -285,11 +301,12 @@ class HijackingInterceptorMakesAnotherCallFactory
class BidiStreamingRpcHijackingInterceptor : public experimental::Interceptor {
public:
- BidiStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
+ explicit BidiStreamingRpcHijackingInterceptor(
+ experimental::ClientRpcInfo* info) {
info_ = info;
}
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
bool hijack = false;
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
@@ -355,10 +372,11 @@ class BidiStreamingRpcHijackingInterceptor : public experimental::Interceptor {
class ClientStreamingRpcHijackingInterceptor
: public experimental::Interceptor {
public:
- ClientStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
+ explicit ClientStreamingRpcHijackingInterceptor(
+ experimental::ClientRpcInfo* info) {
info_ = info;
}
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
bool hijack = false;
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
@@ -400,7 +418,7 @@ bool ClientStreamingRpcHijackingInterceptor::got_failed_send_ = false;
class ClientStreamingRpcHijackingInterceptorFactory
: public experimental::ClientInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::Interceptor* CreateClientInterceptor(
experimental::ClientRpcInfo* info) override {
return new ClientStreamingRpcHijackingInterceptor(info);
}
@@ -409,12 +427,13 @@ class ClientStreamingRpcHijackingInterceptorFactory
class ServerStreamingRpcHijackingInterceptor
: public experimental::Interceptor {
public:
- ServerStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
+ explicit ServerStreamingRpcHijackingInterceptor(
+ experimental::ClientRpcInfo* info) {
info_ = info;
got_failed_message_ = false;
}
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
bool hijack = false;
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
@@ -498,7 +517,7 @@ bool ServerStreamingRpcHijackingInterceptor::got_failed_message_ = false;
class ServerStreamingRpcHijackingInterceptorFactory
: public experimental::ClientInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::Interceptor* CreateClientInterceptor(
experimental::ClientRpcInfo* info) override {
return new ServerStreamingRpcHijackingInterceptor(info);
}
@@ -507,7 +526,7 @@ class ServerStreamingRpcHijackingInterceptorFactory
class BidiStreamingRpcHijackingInterceptorFactory
: public experimental::ClientInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::Interceptor* CreateClientInterceptor(
experimental::ClientRpcInfo* info) override {
return new BidiStreamingRpcHijackingInterceptor(info);
}
@@ -519,7 +538,7 @@ class BidiStreamingRpcHijackingInterceptorFactory
// single RPC should be made on the channel before calling the Verify methods.
class LoggingInterceptor : public experimental::Interceptor {
public:
- LoggingInterceptor(experimental::ClientRpcInfo* /*info*/) {
+ explicit LoggingInterceptor(experimental::ClientRpcInfo* /*info*/) {
pre_send_initial_metadata_ = false;
pre_send_message_count_ = 0;
pre_send_close_ = false;
@@ -528,7 +547,7 @@ class LoggingInterceptor : public experimental::Interceptor {
post_recv_status_ = false;
}
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
auto* map = methods->GetSendInitialMetadata();
@@ -677,7 +696,7 @@ bool LoggingInterceptor::post_recv_status_;
class LoggingInterceptorFactory
: public experimental::ClientInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::Interceptor* CreateClientInterceptor(
experimental::ClientRpcInfo* info) override {
return new LoggingInterceptor(info);
}
@@ -685,22 +704,35 @@ class LoggingInterceptorFactory
class TestScenario {
public:
- explicit TestScenario(const RPCType& type) : type_(type) {}
+ explicit TestScenario(const ChannelType& channel_type,
+ const RPCType& rpc_type)
+ : channel_type_(channel_type), rpc_type_(rpc_type) {}
+
+ ChannelType channel_type() const { return channel_type_; }
- RPCType type() const { return type_; }
+ RPCType rpc_type() const { return rpc_type_; }
private:
- RPCType type_;
+ const ChannelType channel_type_;
+ const RPCType rpc_type_;
};
std::vector<TestScenario> CreateTestScenarios() {
std::vector<TestScenario> scenarios;
- scenarios.emplace_back(RPCType::kSyncUnary);
- scenarios.emplace_back(RPCType::kSyncClientStreaming);
- scenarios.emplace_back(RPCType::kSyncServerStreaming);
- scenarios.emplace_back(RPCType::kSyncBidiStreaming);
- scenarios.emplace_back(RPCType::kAsyncCQUnary);
- scenarios.emplace_back(RPCType::kAsyncCQServerStreaming);
+ std::vector<RPCType> rpc_types;
+ rpc_types.emplace_back(RPCType::kSyncUnary);
+ rpc_types.emplace_back(RPCType::kSyncClientStreaming);
+ rpc_types.emplace_back(RPCType::kSyncServerStreaming);
+ rpc_types.emplace_back(RPCType::kSyncBidiStreaming);
+ rpc_types.emplace_back(RPCType::kAsyncCQUnary);
+ rpc_types.emplace_back(RPCType::kAsyncCQServerStreaming);
+ for (const auto& rpc_type : rpc_types) {
+ scenarios.emplace_back(ChannelType::kHttpChannel, rpc_type);
+// TODO(yashykt): Maybe add support for non-posix sockets too
+#ifdef GRPC_POSIX_SOCKET
+ scenarios.emplace_back(ChannelType::kFdChannel, rpc_type);
+#endif /* GRPC_POSIX_SOCKET */
+ }
return scenarios;
}
@@ -708,19 +740,56 @@ class ParameterizedClientInterceptorsEnd2endTest
: public ::testing::TestWithParam<TestScenario> {
protected:
ParameterizedClientInterceptorsEnd2endTest() {
- int port = grpc_pick_unused_port_or_die();
-
ServerBuilder builder;
- server_address_ = "localhost:" + ToString(port);
- builder.AddListeningPort(server_address_, InsecureServerCredentials());
builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
+ if (GetParam().channel_type() == ChannelType::kHttpChannel) {
+ int port = grpc_pick_unused_port_or_die();
+ server_address_ = "localhost:" + ToString(port);
+ builder.AddListeningPort(server_address_, InsecureServerCredentials());
+ server_ = builder.BuildAndStart();
+ }
+#ifdef GRPC_POSIX_SOCKET
+ else if (GetParam().channel_type() == ChannelType::kFdChannel) {
+ int flags;
+ GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv_) == 0);
+ flags = fcntl(sv_[0], F_GETFL, 0);
+ GPR_ASSERT(fcntl(sv_[0], F_SETFL, flags | O_NONBLOCK) == 0);
+ flags = fcntl(sv_[1], F_GETFL, 0);
+ GPR_ASSERT(fcntl(sv_[1], F_SETFL, flags | O_NONBLOCK) == 0);
+ GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv_[0]) ==
+ GRPC_ERROR_NONE);
+ GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv_[1]) ==
+ GRPC_ERROR_NONE);
+ server_ = builder.BuildAndStart();
+ AddInsecureChannelFromFd(server_.get(), sv_[1]);
+ }
+#endif /* GRPC_POSIX_SOCKET */
+ }
+
+ ~ParameterizedClientInterceptorsEnd2endTest() override {
+ server_->Shutdown();
}
- ~ParameterizedClientInterceptorsEnd2endTest() { server_->Shutdown(); }
+ std::shared_ptr<grpc::Channel> CreateClientChannel(
+ std::vector<std::unique_ptr<
+ grpc::experimental::ClientInterceptorFactoryInterface>>
+ creators) {
+ if (GetParam().channel_type() == ChannelType::kHttpChannel) {
+ return experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), ChannelArguments(),
+ std::move(creators));
+ }
+#ifdef GRPC_POSIX_SOCKET
+ else if (GetParam().channel_type() == ChannelType::kFdChannel) {
+ return experimental::CreateCustomInsecureChannelWithInterceptorsFromFd(
+ "", sv_[0], ChannelArguments(), std::move(creators));
+ }
+#endif /* GRPC_POSIX_SOCKET */
+ return nullptr;
+ }
void SendRPC(const std::shared_ptr<Channel>& channel) {
- switch (GetParam().type()) {
+ switch (GetParam().rpc_type()) {
case RPCType::kSyncUnary:
MakeCall(channel);
break;
@@ -749,6 +818,7 @@ class ParameterizedClientInterceptorsEnd2endTest
}
TString server_address_;
+ int sv_[2];
EchoTestServiceStreamingImpl service_;
std::unique_ptr<Server> server_;
};
@@ -756,22 +826,19 @@ class ParameterizedClientInterceptorsEnd2endTest
TEST_P(ParameterizedClientInterceptorsEnd2endTest,
ClientInterceptorLoggingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
+ creators.push_back(y_absl::make_unique<LoggingInterceptorFactory>());
+ // Add 20 phony interceptors
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ auto channel = CreateClientChannel(std::move(creators));
SendRPC(channel);
- LoggingInterceptor::VerifyCall(GetParam().type());
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ LoggingInterceptor::VerifyCall(GetParam().rpc_type());
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
INSTANTIATE_TEST_SUITE_P(ParameterizedClientInterceptorsEnd2end,
@@ -791,7 +858,7 @@ class ClientInterceptorsEnd2endTest
server_ = builder.BuildAndStart();
}
- ~ClientInterceptorsEnd2endTest() { server_->Shutdown(); }
+ ~ClientInterceptorsEnd2endTest() override { server_->Shutdown(); }
TString server_address_;
TestServiceImpl service_;
@@ -803,8 +870,7 @@ TEST_F(ClientInterceptorsEnd2endTest,
ChannelArguments args;
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
- new HijackingInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<HijackingInterceptorFactory>());
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, nullptr, args, std::move(creators));
MakeCall(channel);
@@ -812,37 +878,32 @@ TEST_F(ClientInterceptorsEnd2endTest,
TEST_F(ClientInterceptorsEnd2endTest, ClientInterceptorHijackingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- // Add 20 dummy interceptors before hijacking interceptor
+ // Add 20 phony interceptors before hijacking interceptor
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
- creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
- new HijackingInterceptorFactory()));
- // Add 20 dummy interceptors after hijacking interceptor
+ creators.push_back(y_absl::make_unique<HijackingInterceptorFactory>());
+ // Add 20 phony interceptors after hijacking interceptor
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeCall(channel);
- // Make sure only 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure only 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
TEST_F(ClientInterceptorsEnd2endTest, ClientInterceptorLogThenHijackTest) {
ChannelArguments args;
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
- new HijackingInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<LoggingInterceptorFactory>());
+ creators.push_back(y_absl::make_unique<HijackingInterceptorFactory>());
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeCall(channel);
@@ -852,22 +913,20 @@ TEST_F(ClientInterceptorsEnd2endTest, ClientInterceptorLogThenHijackTest) {
TEST_F(ClientInterceptorsEnd2endTest,
ClientInterceptorHijackingMakesAnotherCallTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- // Add 5 dummy interceptors before hijacking interceptor
+ // Add 5 phony interceptors before hijacking interceptor
creators.reserve(5);
for (auto i = 0; i < 5; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
creators.push_back(
std::unique_ptr<experimental::ClientInterceptorFactoryInterface>(
new HijackingInterceptorMakesAnotherCallFactory()));
- // Add 7 dummy interceptors after hijacking interceptor
+ // Add 7 phony interceptors after hijacking interceptor
for (auto i = 0; i < 7; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = server_->experimental().InProcessChannelWithInterceptors(
args, std::move(creators));
@@ -875,7 +934,7 @@ TEST_F(ClientInterceptorsEnd2endTest,
MakeCall(channel);
// Make sure all interceptors were run once, since the hijacking interceptor
// makes an RPC on the intercepted channel
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 12);
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 12);
}
class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test {
@@ -890,7 +949,7 @@ class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test {
server_ = builder.BuildAndStart();
}
- ~ClientInterceptorsCallbackEnd2endTest() { server_->Shutdown(); }
+ ~ClientInterceptorsCallbackEnd2endTest() override { server_->Shutdown(); }
TString server_address_;
TestServiceImpl service_;
@@ -900,45 +959,40 @@ class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test {
TEST_F(ClientInterceptorsCallbackEnd2endTest,
ClientInterceptorLoggingTestWithCallback) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
+ creators.push_back(y_absl::make_unique<LoggingInterceptorFactory>());
+ // Add 20 phony interceptors
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = server_->experimental().InProcessChannelWithInterceptors(
args, std::move(creators));
MakeCallbackCall(channel);
LoggingInterceptor::VerifyUnaryCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
TEST_F(ClientInterceptorsCallbackEnd2endTest,
ClientInterceptorFactoryAllowsNullptrReturn) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors and 20 null interceptors
+ creators.push_back(y_absl::make_unique<LoggingInterceptorFactory>());
+ // Add 20 phony interceptors and 20 null interceptors
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- creators.push_back(
- std::unique_ptr<NullInterceptorFactory>(new NullInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
+ creators.push_back(y_absl::make_unique<NullInterceptorFactory>());
}
auto channel = server_->experimental().InProcessChannelWithInterceptors(
args, std::move(creators));
MakeCallbackCall(channel);
LoggingInterceptor::VerifyUnaryCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test {
@@ -953,7 +1007,7 @@ class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test {
server_ = builder.BuildAndStart();
}
- ~ClientInterceptorsStreamingEnd2endTest() { server_->Shutdown(); }
+ ~ClientInterceptorsStreamingEnd2endTest() override { server_->Shutdown(); }
TString server_address_;
EchoTestServiceStreamingImpl service_;
@@ -962,42 +1016,38 @@ class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test {
TEST_F(ClientInterceptorsStreamingEnd2endTest, ClientStreamingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
+ creators.push_back(y_absl::make_unique<LoggingInterceptorFactory>());
+ // Add 20 phony interceptors
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeClientStreamingCall(channel);
LoggingInterceptor::VerifyClientStreamingCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
+ creators.push_back(y_absl::make_unique<LoggingInterceptorFactory>());
+ // Add 20 phony interceptors
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeServerStreamingCall(channel);
LoggingInterceptor::VerifyServerStreamingCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
TEST_F(ClientInterceptorsStreamingEnd2endTest, ClientStreamingHijackingTest) {
@@ -1005,8 +1055,7 @@ TEST_F(ClientInterceptorsStreamingEnd2endTest, ClientStreamingHijackingTest) {
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
creators.push_back(
- std::unique_ptr<ClientStreamingRpcHijackingInterceptorFactory>(
- new ClientStreamingRpcHijackingInterceptorFactory()));
+ y_absl::make_unique<ClientStreamingRpcHijackingInterceptorFactory>());
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
@@ -1031,12 +1080,11 @@ TEST_F(ClientInterceptorsStreamingEnd2endTest, ClientStreamingHijackingTest) {
TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingHijackingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
creators.push_back(
- std::unique_ptr<ServerStreamingRpcHijackingInterceptorFactory>(
- new ServerStreamingRpcHijackingInterceptorFactory()));
+ y_absl::make_unique<ServerStreamingRpcHijackingInterceptorFactory>());
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeServerStreamingCall(channel);
@@ -1046,12 +1094,11 @@ TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingHijackingTest) {
TEST_F(ClientInterceptorsStreamingEnd2endTest,
AsyncCQServerStreamingHijackingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
creators.push_back(
- std::unique_ptr<ServerStreamingRpcHijackingInterceptorFactory>(
- new ServerStreamingRpcHijackingInterceptorFactory()));
+ y_absl::make_unique<ServerStreamingRpcHijackingInterceptorFactory>());
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeAsyncCQServerStreamingCall(channel);
@@ -1060,12 +1107,11 @@ TEST_F(ClientInterceptorsStreamingEnd2endTest,
TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingHijackingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
creators.push_back(
- std::unique_ptr<BidiStreamingRpcHijackingInterceptorFactory>(
- new BidiStreamingRpcHijackingInterceptorFactory()));
+ y_absl::make_unique<BidiStreamingRpcHijackingInterceptorFactory>());
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeBidiStreamingCall(channel);
@@ -1073,22 +1119,20 @@ TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingHijackingTest) {
TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
+ creators.push_back(y_absl::make_unique<LoggingInterceptorFactory>());
+ // Add 20 phony interceptors
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeBidiStreamingCall(channel);
LoggingInterceptor::VerifyBidiStreamingCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
class ClientGlobalInterceptorEnd2endTest : public ::testing::Test {
@@ -1103,34 +1147,33 @@ class ClientGlobalInterceptorEnd2endTest : public ::testing::Test {
server_ = builder.BuildAndStart();
}
- ~ClientGlobalInterceptorEnd2endTest() { server_->Shutdown(); }
+ ~ClientGlobalInterceptorEnd2endTest() override { server_->Shutdown(); }
TString server_address_;
TestServiceImpl service_;
std::unique_ptr<Server> server_;
};
-TEST_F(ClientGlobalInterceptorEnd2endTest, DummyGlobalInterceptor) {
+TEST_F(ClientGlobalInterceptorEnd2endTest, PhonyGlobalInterceptor) {
// We should ideally be registering a global interceptor only once per
// process, but for the purposes of testing, it should be fine to modify the
// registered global interceptor when there are no ongoing gRPC operations
- DummyInterceptorFactory global_factory;
+ PhonyInterceptorFactory global_factory;
experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- // Add 20 dummy interceptors
+ // Add 20 phony interceptors
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeCall(channel);
- // Make sure all 20 dummy interceptors were run with the global interceptor
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 21);
+ // Make sure all 20 phony interceptors were run with the global interceptor
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 21);
experimental::TestOnlyResetGlobalClientInterceptorFactory();
}
@@ -1141,21 +1184,20 @@ TEST_F(ClientGlobalInterceptorEnd2endTest, LoggingGlobalInterceptor) {
LoggingInterceptorFactory global_factory;
experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- // Add 20 dummy interceptors
+ // Add 20 phony interceptors
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeCall(channel);
LoggingInterceptor::VerifyUnaryCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
experimental::TestOnlyResetGlobalClientInterceptorFactory();
}
@@ -1166,20 +1208,19 @@ TEST_F(ClientGlobalInterceptorEnd2endTest, HijackingGlobalInterceptor) {
HijackingInterceptorFactory global_factory;
experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- // Add 20 dummy interceptors
+ // Add 20 phony interceptors
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
MakeCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
experimental::TestOnlyResetGlobalClientInterceptorFactory();
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
index fd08dd163d..990bab043e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
@@ -24,6 +24,7 @@
#include <util/generic/string.h>
#include <thread>
+#include "y_absl/memory/memory.h"
#include "y_absl/strings/str_cat.h"
#include <grpc/grpc.h>
@@ -58,6 +59,7 @@
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "src/proto/grpc/testing/xds/orca_load_report_for_test.pb.h"
#include "test/core/util/port.h"
+#include "test/core/util/resolve_localhost_ip46.h"
#include "test/core/util/test_config.h"
#include "test/core/util/test_lb_policies.h"
#include "test/cpp/end2end/test_service_impl.h"
@@ -67,7 +69,6 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
// defined in tcp_client.cc
extern grpc_tcp_client_vtable* grpc_tcp_client_impl;
@@ -152,12 +153,14 @@ class MyTestServiceImpl : public TestServiceImpl {
class FakeResolverResponseGeneratorWrapper {
public:
- FakeResolverResponseGeneratorWrapper()
- : response_generator_(grpc_core::MakeRefCounted<
+ explicit FakeResolverResponseGeneratorWrapper(bool ipv6_only)
+ : ipv6_only_(ipv6_only),
+ response_generator_(grpc_core::MakeRefCounted<
grpc_core::FakeResolverResponseGenerator>()) {}
FakeResolverResponseGeneratorWrapper(
FakeResolverResponseGeneratorWrapper&& other) noexcept {
+ ipv6_only_ = other.ipv6_only_;
response_generator_ = std::move(other.response_generator_);
}
@@ -167,13 +170,15 @@ class FakeResolverResponseGeneratorWrapper {
std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute =
nullptr) {
grpc_core::ExecCtx exec_ctx;
- response_generator_->SetResponse(BuildFakeResults(
- ports, service_config_json, attribute_key, std::move(attribute)));
+ response_generator_->SetResponse(
+ BuildFakeResults(ipv6_only_, ports, service_config_json, attribute_key,
+ std::move(attribute)));
}
void SetNextResolutionUponError(const std::vector<int>& ports) {
grpc_core::ExecCtx exec_ctx;
- response_generator_->SetReresolutionResponse(BuildFakeResults(ports));
+ response_generator_->SetReresolutionResponse(
+ BuildFakeResults(ipv6_only_, ports));
}
void SetFailureOnReresolution() {
@@ -187,17 +192,18 @@ class FakeResolverResponseGeneratorWrapper {
private:
static grpc_core::Resolver::Result BuildFakeResults(
- const std::vector<int>& ports, const char* service_config_json = nullptr,
+ bool ipv6_only, const std::vector<int>& ports,
+ const char* service_config_json = nullptr,
const char* attribute_key = nullptr,
std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute =
nullptr) {
grpc_core::Resolver::Result result;
for (const int& port : ports) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
- GPR_ASSERT(lb_uri != nullptr);
+ y_absl::StatusOr<grpc_core::URI> lb_uri = grpc_core::URI::Parse(
+ y_absl::StrCat(ipv6_only ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", port));
+ GPR_ASSERT(lb_uri.ok());
grpc_resolved_address address;
- GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
+ GPR_ASSERT(grpc_parse_uri(*lb_uri, &address));
std::map<const char*,
std::unique_ptr<grpc_core::ServerAddress::AttributeInterface>>
attributes;
@@ -206,7 +212,6 @@ class FakeResolverResponseGeneratorWrapper {
}
result.addresses.emplace_back(address.addr, address.len,
nullptr /* args */, std::move(attributes));
- grpc_uri_destroy(lb_uri);
}
if (service_config_json != nullptr) {
result.service_config = grpc_core::ServiceConfig::Create(
@@ -216,6 +221,7 @@ class FakeResolverResponseGeneratorWrapper {
return result;
}
+ bool ipv6_only_ = false;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
response_generator_;
};
@@ -238,7 +244,14 @@ class ClientLbEnd2endTest : public ::testing::Test {
#endif
}
- void SetUp() override { grpc_init(); }
+ void SetUp() override {
+ grpc_init();
+ bool localhost_resolves_to_ipv4 = false;
+ bool localhost_resolves_to_ipv6 = false;
+ grpc_core::LocalhostResolves(&localhost_resolves_to_ipv4,
+ &localhost_resolves_to_ipv6);
+ ipv6_only_ = !localhost_resolves_to_ipv4 && localhost_resolves_to_ipv6;
+ }
void TearDown() override {
for (size_t i = 0; i < servers_.size(); ++i) {
@@ -246,7 +259,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
}
servers_.clear();
creds_.reset();
- grpc_shutdown_blocking();
+ grpc_shutdown();
}
void CreateServers(size_t num_servers,
@@ -278,7 +291,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
}
FakeResolverResponseGeneratorWrapper BuildResolverResponseGenerator() {
- return FakeResolverResponseGeneratorWrapper();
+ return FakeResolverResponseGeneratorWrapper(ipv6_only_);
}
std::unique_ptr<grpc::testing::EchoTestService::Stub> BuildStub(
@@ -290,7 +303,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
const TString& lb_policy_name,
const FakeResolverResponseGeneratorWrapper& response_generator,
ChannelArguments args = ChannelArguments()) {
- if (lb_policy_name.size() > 0) {
+ if (!lb_policy_name.empty()) {
args.SetLoadBalancingPolicyName(lb_policy_name);
} // else, default to pick first
args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
@@ -359,9 +372,9 @@ class ClientLbEnd2endTest : public ::testing::Test {
grpc::internal::Mutex mu;
grpc::internal::MutexLock lock(&mu);
grpc::internal::CondVar cond;
- thread_.reset(new std::thread(
- std::bind(&ServerData::Serve, this, server_host, &mu, &cond)));
- cond.WaitUntil(&mu, [this] { return server_ready_; });
+ thread_ = y_absl::make_unique<std::thread>(
+ std::bind(&ServerData::Serve, this, server_host, &mu, &cond));
+ grpc::internal::WaitUntil(&cond, &mu, [this] { return server_ready_; });
server_ready_ = false;
gpr_log(GPR_INFO, "server startup complete");
}
@@ -412,7 +425,8 @@ class ClientLbEnd2endTest : public ::testing::Test {
}
bool WaitForChannelState(
- Channel* channel, std::function<bool(grpc_connectivity_state)> predicate,
+ Channel* channel,
+ const std::function<bool(grpc_connectivity_state)>& predicate,
bool try_to_connect = false, int timeout_seconds = 5) {
const gpr_timespec deadline =
grpc_timeout_seconds_to_deadline(timeout_seconds);
@@ -467,6 +481,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
std::vector<std::unique_ptr<ServerData>> servers_;
const TString kRequestMessage_;
std::shared_ptr<ChannelCredentials> creds_;
+ bool ipv6_only_ = false;
};
TEST_F(ClientLbEnd2endTest, ChannelStateConnectingWhenResolving) {
@@ -1637,14 +1652,17 @@ TEST_F(ClientLbEnd2endTest, ChannelIdleness) {
// The initial channel state should be IDLE.
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
// After sending RPC, channel state should be READY.
+ gpr_log(GPR_INFO, "*** SENDING RPC, CHANNEL SHOULD CONNECT ***");
response_generator.SetNextResolution(GetServersPorts());
CheckRpcSendOk(stub, DEBUG_LOCATION);
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
// After a period time not using the channel, the channel state should switch
// to IDLE.
+ gpr_log(GPR_INFO, "*** WAITING FOR CHANNEL TO GO IDLE ***");
gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(1200));
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
// Sending a new RPC should awake the IDLE channel.
+ gpr_log(GPR_INFO, "*** SENDING ANOTHER RPC, CHANNEL SHOULD RECONNECT ***");
response_generator.SetNextResolution(GetServersPorts());
CheckRpcSendOk(stub, DEBUG_LOCATION);
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
@@ -1662,7 +1680,7 @@ class ClientLbPickArgsTest : public ClientLbEnd2endTest {
grpc_core::RegisterTestPickArgsLoadBalancingPolicy(SavePickArgs);
}
- static void TearDownTestCase() { grpc_shutdown_blocking(); }
+ static void TearDownTestCase() { grpc_shutdown(); }
const std::vector<grpc_core::PickArgsSeen>& args_seen_list() {
grpc::internal::MutexLock lock(&mu_);
@@ -1728,7 +1746,7 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
ReportTrailerIntercepted);
}
- static void TearDownTestCase() { grpc_shutdown_blocking(); }
+ static void TearDownTestCase() { grpc_shutdown(); }
int trailers_intercepted() {
grpc::internal::MutexLock lock(&mu_);
@@ -1754,7 +1772,8 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
self->trailers_intercepted_++;
self->trailing_metadata_ = args_seen.metadata;
if (backend_metric_data != nullptr) {
- self->load_report_.reset(new udpa::data::orca::v1::OrcaLoadReport);
+ self->load_report_ =
+ y_absl::make_unique<udpa::data::orca::v1::OrcaLoadReport>();
self->load_report_->set_cpu_utilization(
backend_metric_data->cpu_utilization);
self->load_report_->set_mem_utilization(
@@ -1762,13 +1781,11 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
self->load_report_->set_rps(backend_metric_data->requests_per_second);
for (const auto& p : backend_metric_data->request_cost) {
TString name = TString(p.first);
- (*self->load_report_->mutable_request_cost())[std::move(name)] =
- p.second;
+ (*self->load_report_->mutable_request_cost())[name] = p.second;
}
for (const auto& p : backend_metric_data->utilization) {
TString name = TString(p.first);
- (*self->load_report_->mutable_utilization())[std::move(name)] =
- p.second;
+ (*self->load_report_->mutable_utilization())[name] = p.second;
}
}
}
@@ -1933,7 +1950,7 @@ class ClientLbAddressTest : public ClientLbEnd2endTest {
grpc_core::RegisterAddressTestLoadBalancingPolicy(SaveAddress);
}
- static void TearDownTestCase() { grpc_shutdown_blocking(); }
+ static void TearDownTestCase() { grpc_shutdown(); }
const std::vector<TString>& addresses_seen() {
grpc::internal::MutexLock lock(&mu_);
@@ -1972,8 +1989,9 @@ TEST_F(ClientLbAddressTest, Basic) {
// Make sure that the attributes wind up on the subchannels.
std::vector<TString> expected;
for (const int port : GetServersPorts()) {
- expected.emplace_back(y_absl::StrCat(
- "127.0.0.1:", port, " args={} attributes={", kAttributeKey, "=foo}"));
+ expected.emplace_back(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", port,
+ " args={} attributes={", kAttributeKey, "=foo}"));
}
EXPECT_EQ(addresses_seen(), expected);
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/context_allocator_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/context_allocator_end2end_test.cc
new file mode 100644
index 0000000000..7e20793678
--- /dev/null
+++ b/contrib/libs/grpc/test/cpp/end2end/context_allocator_end2end_test.cc
@@ -0,0 +1,334 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/impl/codegen/log.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/client_callback.h>
+#include <grpcpp/support/message_allocator.h>
+#include <gtest/gtest.h>
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <thread>
+
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/test_credentials_provider.h"
+
+namespace grpc {
+namespace testing {
+namespace {
+
+enum class Protocol { INPROC, TCP };
+
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+using experimental::GenericCallbackServerContext;
+#endif
+
+class TestScenario {
+ public:
+ TestScenario(Protocol protocol, const TString& creds_type)
+ : protocol(protocol), credentials_type(creds_type) {}
+ void Log() const;
+ Protocol protocol;
+ const TString credentials_type;
+};
+
+static std::ostream& operator<<(std::ostream& out,
+ const TestScenario& scenario) {
+ return out << "TestScenario{protocol="
+ << (scenario.protocol == Protocol::INPROC ? "INPROC" : "TCP")
+ << "," << scenario.credentials_type << "}";
+}
+
+void TestScenario::Log() const {
+ std::ostringstream out;
+ out << *this;
+ gpr_log(GPR_INFO, "%s", out.str().c_str());
+}
+
+class ContextAllocatorEnd2endTestBase
+ : public ::testing::TestWithParam<TestScenario> {
+ protected:
+ static void SetUpTestCase() { grpc_init(); }
+ static void TearDownTestCase() { grpc_shutdown(); }
+ ContextAllocatorEnd2endTestBase() {}
+
+ ~ContextAllocatorEnd2endTestBase() override = default;
+
+ void SetUp() override { GetParam().Log(); }
+
+ void CreateServer(std::unique_ptr<grpc::ContextAllocator> context_allocator) {
+ ServerBuilder builder;
+
+ auto server_creds = GetCredentialsProvider()->GetServerCredentials(
+ GetParam().credentials_type);
+ if (GetParam().protocol == Protocol::TCP) {
+ picked_port_ = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << picked_port_;
+ builder.AddListeningPort(server_address_.str(), server_creds);
+ }
+ builder.experimental().SetContextAllocator(std::move(context_allocator));
+ builder.RegisterService(&callback_service_);
+
+ server_ = builder.BuildAndStart();
+ }
+
+ void DestroyServer() {
+ if (server_) {
+ server_->Shutdown();
+ server_.reset();
+ }
+ }
+
+ void ResetStub() {
+ ChannelArguments args;
+ auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
+ GetParam().credentials_type, &args);
+ switch (GetParam().protocol) {
+ case Protocol::TCP:
+ channel_ = ::grpc::CreateCustomChannel(server_address_.str(),
+ channel_creds, args);
+ break;
+ case Protocol::INPROC:
+ channel_ = server_->InProcessChannel(args);
+ break;
+ default:
+ assert(false);
+ }
+ stub_ = EchoTestService::NewStub(channel_);
+ }
+
+ void TearDown() override {
+ DestroyServer();
+ if (picked_port_ > 0) {
+ grpc_recycle_unused_port(picked_port_);
+ }
+ }
+
+ void SendRpcs(int num_rpcs) {
+ TString test_string("");
+ for (int i = 0; i < num_rpcs; i++) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext cli_ctx;
+
+ test_string += TString(1024, 'x');
+ request.set_message(test_string);
+ TString val;
+ cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
+
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ stub_->experimental_async()->Echo(
+ &cli_ctx, &request, &response,
+ [&request, &response, &done, &mu, &cv, val](Status s) {
+ GPR_ASSERT(s.ok());
+
+ EXPECT_EQ(request.message(), response.message());
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+ });
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+ }
+ }
+
+ int picked_port_{0};
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<EchoTestService::Stub> stub_;
+ CallbackTestServiceImpl callback_service_;
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+};
+
+class DefaultContextAllocatorTest : public ContextAllocatorEnd2endTestBase {};
+
+TEST_P(DefaultContextAllocatorTest, SimpleRpc) {
+ const int kRpcCount = 10;
+ CreateServer(nullptr);
+ ResetStub();
+ SendRpcs(kRpcCount);
+}
+
+class NullContextAllocatorTest : public ContextAllocatorEnd2endTestBase {
+ public:
+ class NullAllocator : public grpc::ContextAllocator {
+ public:
+ NullAllocator(std::atomic<int>* allocation_count,
+ std::atomic<int>* deallocation_count)
+ : allocation_count_(allocation_count),
+ deallocation_count_(deallocation_count) {}
+ grpc::CallbackServerContext* NewCallbackServerContext() override {
+ allocation_count_->fetch_add(1, std::memory_order_relaxed);
+ return nullptr;
+ }
+
+ GenericCallbackServerContext* NewGenericCallbackServerContext() override {
+ allocation_count_->fetch_add(1, std::memory_order_relaxed);
+ return nullptr;
+ }
+
+ void Release(
+ grpc::CallbackServerContext* /*callback_server_context*/) override {
+ deallocation_count_->fetch_add(1, std::memory_order_relaxed);
+ }
+
+ void Release(
+ GenericCallbackServerContext* /*generic_callback_server_context*/)
+ override {
+ deallocation_count_->fetch_add(1, std::memory_order_relaxed);
+ }
+
+ std::atomic<int>* allocation_count_;
+ std::atomic<int>* deallocation_count_;
+ };
+};
+
+TEST_P(NullContextAllocatorTest, UnaryRpc) {
+ const int kRpcCount = 10;
+ std::atomic<int> allocation_count{0};
+ std::atomic<int> deallocation_count{0};
+ std::unique_ptr<NullAllocator> allocator(
+ new NullAllocator(&allocation_count, &deallocation_count));
+ CreateServer(std::move(allocator));
+ ResetStub();
+ SendRpcs(kRpcCount);
+ // messages_deallocaton_count is updated in Release after server side
+ // OnDone.
+ DestroyServer();
+ EXPECT_EQ(kRpcCount, allocation_count);
+ EXPECT_EQ(kRpcCount, deallocation_count);
+}
+
+class SimpleContextAllocatorTest : public ContextAllocatorEnd2endTestBase {
+ public:
+ class SimpleAllocator : public grpc::ContextAllocator {
+ public:
+ SimpleAllocator(std::atomic<int>* allocation_count,
+ std::atomic<int>* deallocation_count)
+ : allocation_count_(allocation_count),
+ deallocation_count_(deallocation_count) {}
+ grpc::CallbackServerContext* NewCallbackServerContext() override {
+ allocation_count_->fetch_add(1, std::memory_order_relaxed);
+ return new grpc::CallbackServerContext();
+ }
+ GenericCallbackServerContext* NewGenericCallbackServerContext() override {
+ allocation_count_->fetch_add(1, std::memory_order_relaxed);
+ return new GenericCallbackServerContext();
+ }
+
+ void Release(
+ grpc::CallbackServerContext* callback_server_context) override {
+ deallocation_count_->fetch_add(1, std::memory_order_relaxed);
+ delete callback_server_context;
+ }
+
+ void Release(GenericCallbackServerContext* generic_callback_server_context)
+ override {
+ deallocation_count_->fetch_add(1, std::memory_order_relaxed);
+ delete generic_callback_server_context;
+ }
+
+ std::atomic<int>* allocation_count_;
+ std::atomic<int>* deallocation_count_;
+ };
+};
+
+TEST_P(SimpleContextAllocatorTest, UnaryRpc) {
+ const int kRpcCount = 10;
+ std::atomic<int> allocation_count{0};
+ std::atomic<int> deallocation_count{0};
+ std::unique_ptr<SimpleAllocator> allocator(
+ new SimpleAllocator(&allocation_count, &deallocation_count));
+ CreateServer(std::move(allocator));
+ ResetStub();
+ SendRpcs(kRpcCount);
+ // messages_deallocaton_count is updated in Release after server side
+ // OnDone.
+ DestroyServer();
+ EXPECT_EQ(kRpcCount, allocation_count);
+ EXPECT_EQ(kRpcCount, deallocation_count);
+}
+
+std::vector<TestScenario> CreateTestScenarios(bool test_insecure) {
+ std::vector<TestScenario> scenarios;
+ std::vector<TString> credentials_types{
+ GetCredentialsProvider()->GetSecureCredentialsTypeList()};
+ auto insec_ok = [] {
+ // Only allow insecure credentials type when it is registered with the
+ // provider. User may create providers that do not have insecure.
+ return GetCredentialsProvider()->GetChannelCredentials(
+ kInsecureCredentialsType, nullptr) != nullptr;
+ };
+ if (test_insecure && insec_ok()) {
+ credentials_types.push_back(kInsecureCredentialsType);
+ }
+ GPR_ASSERT(!credentials_types.empty());
+
+ Protocol parr[]{Protocol::INPROC, Protocol::TCP};
+ for (Protocol p : parr) {
+ for (const auto& cred : credentials_types) {
+ if (p == Protocol::INPROC &&
+ (cred != kInsecureCredentialsType || !insec_ok())) {
+ continue;
+ }
+ scenarios.emplace_back(p, cred);
+ }
+ }
+ return scenarios;
+}
+
+// TODO(ddyihai): adding client streaming/server streaming/bidi streaming
+// test.
+
+INSTANTIATE_TEST_SUITE_P(DefaultContextAllocatorTest,
+ DefaultContextAllocatorTest,
+ ::testing::ValuesIn(CreateTestScenarios(true)));
+INSTANTIATE_TEST_SUITE_P(NullContextAllocatorTest, NullContextAllocatorTest,
+ ::testing::ValuesIn(CreateTestScenarios(true)));
+INSTANTIATE_TEST_SUITE_P(SimpleContextAllocatorTest, SimpleContextAllocatorTest,
+ ::testing::ValuesIn(CreateTestScenarios(true)));
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int ret = RUN_ALL_TESTS();
+ return ret;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc b/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc
index 5d025ecb94..424ef1979e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc
@@ -45,7 +45,8 @@ namespace {
class TestChannel : public experimental::DelegatingChannel {
public:
- TestChannel(const std::shared_ptr<ChannelInterface>& delegate_channel)
+ explicit TestChannel(
+ const std::shared_ptr<ChannelInterface>& delegate_channel)
: experimental::DelegatingChannel(delegate_channel) {}
// Always returns GRPC_CHANNEL_READY
grpc_connectivity_state GetState(bool /*try_to_connect*/) override {
@@ -64,7 +65,7 @@ class DelegatingChannelTest : public ::testing::Test {
server_ = builder.BuildAndStart();
}
- ~DelegatingChannelTest() { server_->Shutdown(); }
+ ~DelegatingChannelTest() override { server_->Shutdown(); }
TString server_address_;
TestServiceImpl service_;
diff --git a/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
index ad2ddb7e84..d8c20bd575 100644
--- a/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
@@ -37,7 +37,10 @@
#include <mutex>
#include <thread>
+#include "y_absl/memory/memory.h"
+#include "y_absl/strings/match.h"
#include "y_absl/strings/str_format.h"
+
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/iomgr/iomgr.h"
@@ -62,17 +65,6 @@ using grpc::testing::EchoResponse;
using grpc::testing::kTlsCredentialsType;
using std::chrono::system_clock;
-// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
-// should be skipped based on a decision made at SetUp time. In particular,
-// tests that use the callback server can only be run if the iomgr can run in
-// the background or if the transport is in-process.
-#define MAYBE_SKIP_TEST \
- do { \
- if (do_not_test_) { \
- return; \
- } \
- } while (0)
-
namespace grpc {
namespace testing {
namespace {
@@ -202,7 +194,8 @@ class TestAuthMetadataProcessor : public AuthMetadataProcessor {
public:
static const char kGoodGuy[];
- TestAuthMetadataProcessor(bool is_blocking) : is_blocking_(is_blocking) {}
+ explicit TestAuthMetadataProcessor(bool is_blocking)
+ : is_blocking_(is_blocking) {}
std::shared_ptr<CallCredentials> GetCompatibleClientCreds() {
return grpc::MetadataCredentialsFromPlugin(
@@ -257,7 +250,7 @@ const char TestAuthMetadataProcessor::kIdentityPropName[] = "novel identity";
class Proxy : public ::grpc::testing::EchoTestService::Service {
public:
- Proxy(const std::shared_ptr<Channel>& channel)
+ explicit Proxy(const std::shared_ptr<Channel>& channel)
: stub_(grpc::testing::EchoTestService::NewStub(channel)) {}
Status Echo(ServerContext* server_context, const EchoRequest* request,
@@ -327,14 +320,6 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
GetParam().Log();
}
- void SetUp() override {
- if (GetParam().callback_server && !GetParam().inproc &&
- !grpc_iomgr_run_in_background()) {
- do_not_test_ = true;
- return;
- }
- }
-
void TearDown() override {
if (is_server_started_) {
server_->Shutdown();
@@ -348,7 +333,7 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
void StartServer(const std::shared_ptr<AuthMetadataProcessor>& processor) {
int port = grpc_pick_unused_port_or_die();
first_picked_port_ = port;
- server_address_ << "127.0.0.1:" << port;
+ server_address_ << "localhost:" << port;
// Setup server
BuildAndStartServer(processor);
}
@@ -373,11 +358,10 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
std::vector<
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
creators;
- // Add 20 dummy server interceptors
+ // Add 20 phony server interceptors
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
}
@@ -426,7 +410,7 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
} else {
channel_ = CreateCustomChannelWithInterceptors(
server_address_.str(), channel_creds, args,
- interceptor_creators.empty() ? CreateDummyClientInterceptors()
+ interceptor_creators.empty() ? CreatePhonyClientInterceptors()
: std::move(interceptor_creators));
}
} else {
@@ -435,7 +419,7 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
} else {
channel_ = server_->experimental().InProcessChannelWithInterceptors(
args, interceptor_creators.empty()
- ? CreateDummyClientInterceptors()
+ ? CreatePhonyClientInterceptors()
: std::move(interceptor_creators));
}
}
@@ -447,7 +431,7 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
interceptor_creators = {}) {
ResetChannel(std::move(interceptor_creators));
if (GetParam().use_proxy) {
- proxy_service_.reset(new Proxy(channel_));
+ proxy_service_ = y_absl::make_unique<Proxy>(channel_);
int port = grpc_pick_unused_port_or_die();
std::ostringstream proxyaddr;
proxyaddr << "localhost:" << port;
@@ -466,10 +450,9 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
}
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
}
- bool do_not_test_{false};
bool is_server_started_;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
@@ -525,7 +508,6 @@ class End2endServerTryCancelTest : public End2endTest {
// NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
void TestRequestStreamServerCancel(
ServerTryCancelRequestPhase server_try_cancel, int num_msgs_to_send) {
- MAYBE_SKIP_TEST;
RestartServer(std::shared_ptr<AuthMetadataProcessor>());
ResetStub();
EchoRequest request;
@@ -585,7 +567,7 @@ class End2endServerTryCancelTest : public End2endTest {
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
@@ -604,7 +586,6 @@ class End2endServerTryCancelTest : public End2endTest {
// NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
void TestResponseStreamServerCancel(
ServerTryCancelRequestPhase server_try_cancel) {
- MAYBE_SKIP_TEST;
RestartServer(std::shared_ptr<AuthMetadataProcessor>());
ResetStub();
EchoRequest request;
@@ -667,7 +648,7 @@ class End2endServerTryCancelTest : public End2endTest {
EXPECT_FALSE(s.ok());
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
@@ -686,7 +667,6 @@ class End2endServerTryCancelTest : public End2endTest {
// NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
void TestBidiStreamServerCancel(ServerTryCancelRequestPhase server_try_cancel,
int num_messages) {
- MAYBE_SKIP_TEST;
RestartServer(std::shared_ptr<AuthMetadataProcessor>());
ResetStub();
EchoRequest request;
@@ -756,13 +736,12 @@ class End2endServerTryCancelTest : public End2endTest {
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
};
TEST_P(End2endServerTryCancelTest, RequestEchoServerCancel) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -825,7 +804,6 @@ TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelAfter) {
}
TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) {
- MAYBE_SKIP_TEST;
// User-Agent is an HTTP header for HTTP transports only
if (GetParam().inproc) {
return;
@@ -849,7 +827,6 @@ TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) {
}
TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
- MAYBE_SKIP_TEST;
ResetStub();
std::vector<std::thread> threads;
threads.reserve(10);
@@ -862,7 +839,6 @@ TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
}
TEST_P(End2endTest, MultipleRpcs) {
- MAYBE_SKIP_TEST;
ResetStub();
std::vector<std::thread> threads;
threads.reserve(10);
@@ -875,7 +851,6 @@ TEST_P(End2endTest, MultipleRpcs) {
}
TEST_P(End2endTest, ManyStubs) {
- MAYBE_SKIP_TEST;
ResetStub();
ChannelTestPeer peer(channel_.get());
int registered_calls_pre = peer.registered_calls();
@@ -888,7 +863,6 @@ TEST_P(End2endTest, ManyStubs) {
}
TEST_P(End2endTest, EmptyBinaryMetadata) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -901,7 +875,6 @@ TEST_P(End2endTest, EmptyBinaryMetadata) {
}
TEST_P(End2endTest, ReconnectChannel) {
- MAYBE_SKIP_TEST;
if (GetParam().inproc) {
return;
}
@@ -929,7 +902,6 @@ TEST_P(End2endTest, ReconnectChannel) {
}
TEST_P(End2endTest, RequestStreamOneRequest) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -946,7 +918,6 @@ TEST_P(End2endTest, RequestStreamOneRequest) {
}
TEST_P(End2endTest, RequestStreamOneRequestWithCoalescingApi) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -962,7 +933,6 @@ TEST_P(End2endTest, RequestStreamOneRequestWithCoalescingApi) {
}
TEST_P(End2endTest, RequestStreamTwoRequests) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -979,7 +949,6 @@ TEST_P(End2endTest, RequestStreamTwoRequests) {
}
TEST_P(End2endTest, RequestStreamTwoRequestsWithWriteThrough) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -996,7 +965,6 @@ TEST_P(End2endTest, RequestStreamTwoRequestsWithWriteThrough) {
}
TEST_P(End2endTest, RequestStreamTwoRequestsWithCoalescingApi) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1013,7 +981,6 @@ TEST_P(End2endTest, RequestStreamTwoRequestsWithCoalescingApi) {
}
TEST_P(End2endTest, ResponseStream) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1032,7 +999,6 @@ TEST_P(End2endTest, ResponseStream) {
}
TEST_P(End2endTest, ResponseStreamWithCoalescingApi) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1054,7 +1020,6 @@ TEST_P(End2endTest, ResponseStreamWithCoalescingApi) {
// This was added to prevent regression from issue:
// https://github.com/grpc/grpc/issues/11546
TEST_P(End2endTest, ResponseStreamWithEverythingCoalesced) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1076,7 +1041,6 @@ TEST_P(End2endTest, ResponseStreamWithEverythingCoalesced) {
}
TEST_P(End2endTest, BidiStream) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1101,7 +1065,6 @@ TEST_P(End2endTest, BidiStream) {
}
TEST_P(End2endTest, BidiStreamWithCoalescingApi) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1137,7 +1100,6 @@ TEST_P(End2endTest, BidiStreamWithCoalescingApi) {
// This was added to prevent regression from issue:
// https://github.com/grpc/grpc/issues/11546
TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1163,7 +1125,6 @@ TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) {
// Talk to the two services with the same name but different package names.
// The two stubs are created on the same channel.
TEST_P(End2endTest, DiffPackageServices) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1192,7 +1153,6 @@ void CancelRpc(ClientContext* context, int delay_us, ServiceType* service) {
}
TEST_P(End2endTest, CancelRpcBeforeStart) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1203,12 +1163,11 @@ TEST_P(End2endTest, CancelRpcBeforeStart) {
EXPECT_EQ("", response.message());
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(End2endTest, CancelRpcAfterStart) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1239,13 +1198,12 @@ TEST_P(End2endTest, CancelRpcAfterStart) {
EXPECT_EQ("", response.message());
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Client cancels request stream after sending two messages
TEST_P(End2endTest, ClientCancelsRequestStream) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1263,13 +1221,12 @@ TEST_P(End2endTest, ClientCancelsRequestStream) {
EXPECT_EQ(response.message(), "");
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Client cancels server stream after sending some messages
TEST_P(End2endTest, ClientCancelsResponseStream) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1299,13 +1256,12 @@ TEST_P(End2endTest, ClientCancelsResponseStream) {
// who won the race.
EXPECT_GE(grpc::StatusCode::CANCELLED, s.error_code());
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
// Client cancels bidi stream after sending some messages
TEST_P(End2endTest, ClientCancelsBidi) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1336,12 +1292,11 @@ TEST_P(End2endTest, ClientCancelsBidi) {
Status s = stream->Finish();
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ EXPECT_EQ(20, PhonyInterceptor::GetNumTimesCancel());
}
}
TEST_P(End2endTest, RpcMaxMessageSize) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1356,7 +1311,7 @@ TEST_P(End2endTest, RpcMaxMessageSize) {
void ReaderThreadFunc(ClientReaderWriter<EchoRequest, EchoResponse>* stream,
gpr_event* ev) {
EchoResponse resp;
- gpr_event_set(ev, (void*)1);
+ gpr_event_set(ev, reinterpret_cast<void*>(1));
while (stream->Read(&resp)) {
gpr_log(GPR_INFO, "Read message");
}
@@ -1364,7 +1319,6 @@ void ReaderThreadFunc(ClientReaderWriter<EchoRequest, EchoResponse>* stream,
// Run a Read and a WritesDone simultaneously.
TEST_P(End2endTest, SimultaneousReadWritesDone) {
- MAYBE_SKIP_TEST;
ResetStub();
ClientContext context;
gpr_event ev;
@@ -1379,7 +1333,6 @@ TEST_P(End2endTest, SimultaneousReadWritesDone) {
}
TEST_P(End2endTest, ChannelState) {
- MAYBE_SKIP_TEST;
if (GetParam().inproc) {
return;
}
@@ -1413,7 +1366,7 @@ TEST_P(End2endTest, ChannelStateTimeout) {
}
int port = grpc_pick_unused_port_or_die();
std::ostringstream server_address;
- server_address << "127.0.0.1:" << port;
+ server_address << "localhost:" << port;
// Channel to non-existing server
auto channel =
grpc::CreateChannel(server_address.str(), InsecureChannelCredentials());
@@ -1430,7 +1383,6 @@ TEST_P(End2endTest, ChannelStateTimeout) {
// Talking to a non-existing service.
TEST_P(End2endTest, NonExistingService) {
- MAYBE_SKIP_TEST;
ResetChannel();
std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
stub = grpc::testing::UnimplementedEchoService::NewStub(channel_);
@@ -1448,7 +1400,6 @@ TEST_P(End2endTest, NonExistingService) {
// Ask the server to send back a serialized proto in trailer.
// This is an example of setting error details.
TEST_P(End2endTest, BinaryTrailerTest) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1475,7 +1426,6 @@ TEST_P(End2endTest, BinaryTrailerTest) {
}
TEST_P(End2endTest, ExpectErrorTest) {
- MAYBE_SKIP_TEST;
ResetStub();
std::vector<ErrorStatus> expected_status;
@@ -1510,13 +1460,11 @@ TEST_P(End2endTest, ExpectErrorTest) {
EXPECT_EQ(iter->code(), s.error_code());
EXPECT_EQ(iter->error_message(), s.error_message());
EXPECT_EQ(iter->binary_error_details(), s.error_details());
- EXPECT_TRUE(context.debug_error_string().find("created") !=
- TString::npos);
- EXPECT_TRUE(context.debug_error_string().find("file") != TString::npos);
- EXPECT_TRUE(context.debug_error_string().find("line") != TString::npos);
- EXPECT_TRUE(context.debug_error_string().find("status") !=
- TString::npos);
- EXPECT_TRUE(context.debug_error_string().find("13") != TString::npos);
+ EXPECT_TRUE(y_absl::StrContains(context.debug_error_string(), "created"));
+ EXPECT_TRUE(y_absl::StrContains(context.debug_error_string(), "file"));
+ EXPECT_TRUE(y_absl::StrContains(context.debug_error_string(), "line"));
+ EXPECT_TRUE(y_absl::StrContains(context.debug_error_string(), "status"));
+ EXPECT_TRUE(y_absl::StrContains(context.debug_error_string(), "13"));
}
}
@@ -1527,13 +1475,11 @@ class ProxyEnd2endTest : public End2endTest {
};
TEST_P(ProxyEnd2endTest, SimpleRpc) {
- MAYBE_SKIP_TEST;
ResetStub();
SendRpc(stub_.get(), 1, false);
}
TEST_P(ProxyEnd2endTest, SimpleRpcWithEmptyMessages) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1544,7 +1490,6 @@ TEST_P(ProxyEnd2endTest, SimpleRpcWithEmptyMessages) {
}
TEST_P(ProxyEnd2endTest, MultipleRpcs) {
- MAYBE_SKIP_TEST;
ResetStub();
std::vector<std::thread> threads;
threads.reserve(10);
@@ -1558,7 +1503,6 @@ TEST_P(ProxyEnd2endTest, MultipleRpcs) {
// Set a 10us deadline and make sure proper error is returned.
TEST_P(ProxyEnd2endTest, RpcDeadlineExpires) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1584,7 +1528,6 @@ TEST_P(ProxyEnd2endTest, RpcDeadlineExpires) {
// Set a long but finite deadline.
TEST_P(ProxyEnd2endTest, RpcLongDeadline) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1601,7 +1544,6 @@ TEST_P(ProxyEnd2endTest, RpcLongDeadline) {
// Ask server to echo back the deadline it sees.
TEST_P(ProxyEnd2endTest, EchoDeadline) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1627,7 +1569,6 @@ TEST_P(ProxyEnd2endTest, EchoDeadline) {
// Ask server to echo back the deadline it sees. The rpc has no deadline.
TEST_P(ProxyEnd2endTest, EchoDeadlineForNoDeadlineRpc) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1643,7 +1584,6 @@ TEST_P(ProxyEnd2endTest, EchoDeadlineForNoDeadlineRpc) {
}
TEST_P(ProxyEnd2endTest, UnimplementedRpc) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1659,7 +1599,6 @@ TEST_P(ProxyEnd2endTest, UnimplementedRpc) {
// Client cancels rpc after 10ms
TEST_P(ProxyEnd2endTest, ClientCancelsRpc) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1694,7 +1633,6 @@ TEST_P(ProxyEnd2endTest, ClientCancelsRpc) {
// Server cancels rpc after 1ms
TEST_P(ProxyEnd2endTest, ServerCancelsRpc) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1709,7 +1647,6 @@ TEST_P(ProxyEnd2endTest, ServerCancelsRpc) {
// Make the response larger than the flow control window.
TEST_P(ProxyEnd2endTest, HugeResponse) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1727,7 +1664,6 @@ TEST_P(ProxyEnd2endTest, HugeResponse) {
}
TEST_P(ProxyEnd2endTest, Peer) {
- MAYBE_SKIP_TEST;
// Peer is not meaningful for inproc
if (GetParam().inproc) {
return;
@@ -1756,7 +1692,6 @@ class SecureEnd2endTest : public End2endTest {
};
TEST_P(SecureEnd2endTest, SimpleRpcWithHost) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
@@ -1788,7 +1723,6 @@ bool MetadataContains(
}
TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) {
- MAYBE_SKIP_TEST;
auto* processor = new TestAuthMetadataProcessor(true);
StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
ResetStub();
@@ -1814,7 +1748,6 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) {
}
TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) {
- MAYBE_SKIP_TEST;
auto* processor = new TestAuthMetadataProcessor(true);
StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
ResetStub();
@@ -1830,7 +1763,6 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) {
}
TEST_P(SecureEnd2endTest, SetPerCallCredentials) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1856,9 +1788,10 @@ TEST_P(SecureEnd2endTest, SetPerCallCredentials) {
class CredentialsInterceptor : public experimental::Interceptor {
public:
- CredentialsInterceptor(experimental::ClientRpcInfo* info) : info_(info) {}
+ explicit CredentialsInterceptor(experimental::ClientRpcInfo* info)
+ : info_(info) {}
- void Intercept(experimental::InterceptorBatchMethods* methods) {
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
std::shared_ptr<CallCredentials> creds =
@@ -1875,20 +1808,19 @@ class CredentialsInterceptor : public experimental::Interceptor {
class CredentialsInterceptorFactory
: public experimental::ClientInterceptorFactoryInterface {
CredentialsInterceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* info) {
+ experimental::ClientRpcInfo* info) override {
return new CredentialsInterceptor(info);
}
};
TEST_P(SecureEnd2endTest, CallCredentialsInterception) {
- MAYBE_SKIP_TEST;
if (!GetParam().use_interceptors) {
return;
}
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators;
- interceptor_creators.push_back(std::unique_ptr<CredentialsInterceptorFactory>(
- new CredentialsInterceptorFactory()));
+ interceptor_creators.push_back(
+ y_absl::make_unique<CredentialsInterceptorFactory>());
ResetStub(std::move(interceptor_creators));
EchoRequest request;
EchoResponse response;
@@ -1911,14 +1843,13 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterception) {
}
TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
- MAYBE_SKIP_TEST;
if (!GetParam().use_interceptors) {
return;
}
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators;
- interceptor_creators.push_back(std::unique_ptr<CredentialsInterceptorFactory>(
- new CredentialsInterceptorFactory()));
+ interceptor_creators.push_back(
+ y_absl::make_unique<CredentialsInterceptorFactory>());
ResetStub(std::move(interceptor_creators));
EchoRequest request;
EchoResponse response;
@@ -1946,7 +1877,6 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
}
TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -1984,7 +1914,6 @@ TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
}
TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -2005,7 +1934,6 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) {
}
TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -2025,7 +1953,6 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) {
}
TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
request.mutable_param()->set_skip_cancelled_check(true);
@@ -2051,7 +1978,6 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) {
}
TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
request.mutable_param()->set_skip_cancelled_check(true);
@@ -2080,7 +2006,6 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) {
}
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -2104,7 +2029,6 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
}
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
- MAYBE_SKIP_TEST;
auto* processor = new TestAuthMetadataProcessor(false);
StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
ResetStub();
@@ -2133,7 +2057,6 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
}
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) {
- MAYBE_SKIP_TEST;
auto* processor = new TestAuthMetadataProcessor(false);
StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
ResetStub();
@@ -2152,7 +2075,6 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) {
}
TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -2176,7 +2098,6 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
}
TEST_P(SecureEnd2endTest, CompositeCallCreds) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -2209,7 +2130,6 @@ TEST_P(SecureEnd2endTest, CompositeCallCreds) {
}
TEST_P(SecureEnd2endTest, ClientAuthContext) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
@@ -2245,7 +2165,7 @@ class ResourceQuotaEnd2endTest : public End2endTest {
ResourceQuotaEnd2endTest()
: server_resource_quota_("server_resource_quota") {}
- virtual void ConfigureServerBuilder(ServerBuilder* builder) override {
+ void ConfigureServerBuilder(ServerBuilder* builder) override {
builder->SetResourceQuota(server_resource_quota_);
}
@@ -2254,7 +2174,6 @@ class ResourceQuotaEnd2endTest : public End2endTest {
};
TEST_P(ResourceQuotaEnd2endTest, SimpleRequest) {
- MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
diff --git a/contrib/libs/grpc/test/cpp/end2end/exception_test.cc b/contrib/libs/grpc/test/cpp/end2end/exception_test.cc
index cd29eb8a10..404abfad37 100644
--- a/contrib/libs/grpc/test/cpp/end2end/exception_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/exception_test.cc
@@ -41,7 +41,7 @@ class ExceptingServiceImpl : public ::grpc::testing::EchoTestService::Service {
public:
Status Echo(ServerContext* /*server_context*/, const EchoRequest* /*request*/,
EchoResponse* /*response*/) override {
- throw - 1;
+ throw -1;
}
Status RequestStream(ServerContext* /*context*/,
ServerReader<EchoRequest>* /*reader*/,
diff --git a/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
index 2f26d0716c..e0ac3d325e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
@@ -34,6 +34,8 @@
#include <grpcpp/support/config.h>
#include <grpcpp/support/slice.h>
+#include "y_absl/memory/memory.h"
+
#include "src/cpp/common/channel_filter.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
@@ -44,13 +46,12 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
namespace grpc {
namespace testing {
namespace {
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+void* tag(int i) { return reinterpret_cast<void*>(i); }
void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
bool ok;
@@ -101,7 +102,7 @@ int GetCallCounterValue() {
class ChannelDataImpl : public ChannelData {
public:
grpc_error* Init(grpc_channel_element* /*elem*/,
- grpc_channel_element_args* /*args*/) {
+ grpc_channel_element_args* /*args*/) override {
IncrementConnectionCounter();
return GRPC_ERROR_NONE;
}
@@ -151,16 +152,16 @@ class FilterEnd2endTest : public ::testing::Test {
bool ignored_ok;
cli_cq_.Shutdown();
srv_cq_->Shutdown();
- while (cli_cq_.Next(&ignored_tag, &ignored_ok))
- ;
- while (srv_cq_->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cli_cq_.Next(&ignored_tag, &ignored_ok)) {
+ }
+ while (srv_cq_->Next(&ignored_tag, &ignored_ok)) {
+ }
}
void ResetStub() {
std::shared_ptr<Channel> channel = grpc::CreateChannel(
server_address_.str(), InsecureChannelCredentials());
- generic_stub_.reset(new GenericStub(channel));
+ generic_stub_ = y_absl::make_unique<GenericStub>(channel);
ResetConnectionCounter();
ResetCallCounter();
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc b/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc
index 3ee75952c0..8570b58a0e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc
@@ -38,10 +38,11 @@
#include <random>
#include <thread>
+#include "y_absl/memory/memory.h"
+
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/gpr/env.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/debugger_macros.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/test_service_impl.h"
@@ -180,7 +181,7 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
// ip6-looopback, but ipv6 support is not enabled by default in docker.
port_ = SERVER_PORT;
- server_.reset(new ServerData(port_, GetParam().credentials_type));
+ server_ = y_absl::make_unique<ServerData>(port_, GetParam().credentials_type);
server_->Start(server_host_);
}
void StopServer() { server_->Shutdown(); }
@@ -193,7 +194,7 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
std::shared_ptr<Channel> BuildChannel(
const TString& lb_policy_name,
ChannelArguments args = ChannelArguments()) {
- if (lb_policy_name.size() > 0) {
+ if (!lb_policy_name.empty()) {
args.SetLoadBalancingPolicyName(lb_policy_name);
} // else, default to pick first
auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
@@ -206,7 +207,7 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
bool SendRpc(
const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub,
int timeout_ms = 0, bool wait_for_ready = false) {
- auto response = std::unique_ptr<EchoResponse>(new EchoResponse());
+ auto response = y_absl::make_unique<EchoResponse>();
EchoRequest request;
auto& msg = GetParam().message_content;
request.set_message(msg);
@@ -224,19 +225,10 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
}
Status status = stub->Echo(&context, request, response.get());
auto ok = status.ok();
- int stream_id = 0;
- grpc_call* call = context.c_call();
- if (call) {
- grpc_chttp2_stream* stream = grpc_chttp2_stream_from_call(call);
- if (stream) {
- stream_id = stream->id;
- }
- }
if (ok) {
- gpr_log(GPR_DEBUG, "RPC with stream_id %d succeeded", stream_id);
+ gpr_log(GPR_DEBUG, "RPC succeeded");
} else {
- gpr_log(GPR_DEBUG, "RPC with stream_id %d failed: %s", stream_id,
- status.error_message().c_str());
+ gpr_log(GPR_DEBUG, "RPC failed: %s", status.error_message().c_str());
}
return ok;
}
@@ -257,8 +249,8 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
std::mutex mu;
std::unique_lock<std::mutex> lock(mu);
std::condition_variable cond;
- thread_.reset(new std::thread(
- std::bind(&ServerData::Serve, this, server_host, &mu, &cond)));
+ thread_ = y_absl::make_unique<std::thread>(
+ std::bind(&ServerData::Serve, this, server_host, &mu, &cond));
cond.wait(lock, [this] { return server_ready_; });
server_ready_ = false;
gpr_log(GPR_INFO, "server startup complete");
diff --git a/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
index 59eec49fb2..4d9dcbade4 100644
--- a/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
@@ -32,6 +32,8 @@
#include <grpcpp/server_context.h>
#include <grpcpp/support/slice.h>
+#include "y_absl/memory/memory.h"
+
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@@ -41,13 +43,12 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
namespace grpc {
namespace testing {
namespace {
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+void* tag(int i) { return reinterpret_cast<void*>(i); }
void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
bool ok;
@@ -85,10 +86,10 @@ class GenericEnd2endTest : public ::testing::Test {
bool ignored_ok;
cli_cq_.Shutdown();
srv_cq_->Shutdown();
- while (cli_cq_.Next(&ignored_tag, &ignored_ok))
- ;
- while (srv_cq_->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cli_cq_.Next(&ignored_tag, &ignored_ok)) {
+ }
+ while (srv_cq_->Next(&ignored_tag, &ignored_ok)) {
+ }
shut_down_ = true;
}
}
@@ -98,7 +99,7 @@ class GenericEnd2endTest : public ::testing::Test {
std::shared_ptr<Channel> channel = grpc::CreateChannel(
server_address_.str(), InsecureChannelCredentials());
stub_ = grpc::testing::EchoTestService::NewStub(channel);
- generic_stub_.reset(new GenericStub(channel));
+ generic_stub_ = y_absl::make_unique<GenericStub>(channel);
}
void server_ok(int i) { verify_ok(srv_cq_.get(), i, true); }
@@ -285,8 +286,8 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
SerializeToByteBuffer(&send_request);
std::thread request_call([this]() { server_ok(4); });
std::unique_ptr<GenericClientAsyncResponseReader> call =
- generic_stub_->PrepareUnaryCall(&cli_ctx, kMethodName,
- *cli_send_buffer.get(), &cli_cq_);
+ generic_stub_->PrepareUnaryCall(&cli_ctx, kMethodName, *cli_send_buffer,
+ &cli_cq_);
call->StartCall();
ByteBuffer cli_recv_buffer;
call->Finish(&cli_recv_buffer, &recv_status, tag(1));
diff --git a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
index 6208dc2535..8cd73aa9a9 100644
--- a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
@@ -24,6 +24,7 @@
#include <util/generic/string.h>
#include <thread>
+#include "y_absl/memory/memory.h"
#include "y_absl/strings/str_cat.h"
#include "y_absl/strings/str_format.h"
@@ -53,6 +54,7 @@
#include "src/cpp/server/secure_server_credentials.h"
#include "test/core/util/port.h"
+#include "test/core/util/resolve_localhost_ip46.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/test_service_impl.h"
@@ -184,6 +186,12 @@ TString Ip4ToPackedString(const char* ip_str) {
return TString(reinterpret_cast<const char*>(&ip4), sizeof(ip4));
}
+TString Ip6ToPackedString(const char* ip_str) {
+ struct in6_addr ip6;
+ GPR_ASSERT(inet_pton(AF_INET6, ip_str, &ip6) == 1);
+ return TString(reinterpret_cast<const char*>(&ip6), sizeof(ip6));
+}
+
struct ClientStats {
size_t num_calls_started = 0;
size_t num_calls_finished = 0;
@@ -266,7 +274,8 @@ class BalancerServiceImpl : public BalancerService {
}
{
grpc::internal::MutexLock lock(&mu_);
- serverlist_cond_.WaitUntil(&mu_, [this] { return serverlist_done_; });
+ grpc::internal::WaitUntil(&serverlist_cond_, &mu_,
+ [this] { return serverlist_done_; });
}
if (client_load_reporting_interval_seconds_ > 0) {
@@ -321,35 +330,13 @@ class BalancerServiceImpl : public BalancerService {
gpr_log(GPR_INFO, "LB[%p]: shut down", this);
}
- static LoadBalanceResponse BuildResponseForBackends(
- const std::vector<int>& backend_ports,
- const std::map<TString, size_t>& drop_token_counts) {
- LoadBalanceResponse response;
- for (const auto& drop_token_count : drop_token_counts) {
- for (size_t i = 0; i < drop_token_count.second; ++i) {
- auto* server = response.mutable_server_list()->add_servers();
- server->set_drop(true);
- server->set_load_balance_token(drop_token_count.first);
- }
- }
- for (const int& backend_port : backend_ports) {
- auto* server = response.mutable_server_list()->add_servers();
- server->set_ip_address(Ip4ToPackedString("127.0.0.1"));
- server->set_port(backend_port);
- static int token_count = 0;
- server->set_load_balance_token(
- y_absl::StrFormat("token%03d", ++token_count));
- }
- return response;
- }
-
ClientStats WaitForLoadReport() {
grpc::internal::MutexLock lock(&mu_);
grpc::internal::CondVar cv;
if (load_report_queue_.empty()) {
load_report_cond_ = &cv;
- load_report_cond_->WaitUntil(
- &mu_, [this] { return !load_report_queue_.empty(); });
+ grpc::internal::WaitUntil(load_report_cond_, &mu_,
+ [this] { return !load_report_queue_.empty(); });
load_report_cond_ = nullptr;
}
ClientStats load_report = std::move(load_report_queue_.front());
@@ -361,7 +348,7 @@ class BalancerServiceImpl : public BalancerService {
grpc::internal::MutexLock lock(&mu_);
if (!serverlist_done_) {
serverlist_done_ = true;
- serverlist_cond_.Broadcast();
+ serverlist_cond_.SignalAll();
}
}
@@ -418,6 +405,11 @@ class GrpclbEnd2endTest : public ::testing::Test {
static void TearDownTestCase() { grpc_shutdown(); }
void SetUp() override {
+ bool localhost_resolves_to_ipv4 = false;
+ bool localhost_resolves_to_ipv6 = false;
+ grpc_core::LocalhostResolves(&localhost_resolves_to_ipv4,
+ &localhost_resolves_to_ipv6);
+ ipv6_only_ = !localhost_resolves_to_ipv4 && localhost_resolves_to_ipv6;
response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
// Start the backends.
@@ -546,26 +538,26 @@ class GrpclbEnd2endTest : public ::testing::Test {
TString balancer_name;
};
- static grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList(
+ grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList(
const std::vector<AddressData>& address_data) {
grpc_core::ServerAddressList addresses;
for (const auto& addr : address_data) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", addr.port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
- GPR_ASSERT(lb_uri != nullptr);
+ y_absl::StatusOr<grpc_core::URI> lb_uri =
+ grpc_core::URI::Parse(y_absl::StrCat(
+ ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", addr.port));
+ GPR_ASSERT(lb_uri.ok());
grpc_resolved_address address;
- GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
+ GPR_ASSERT(grpc_parse_uri(*lb_uri, &address));
grpc_arg arg = grpc_core::CreateAuthorityOverrideChannelArg(
addr.balancer_name.c_str());
grpc_channel_args* args =
grpc_channel_args_copy_and_add(nullptr, &arg, 1);
addresses.emplace_back(address.addr, address.len, args);
- grpc_uri_destroy(lb_uri);
}
return addresses;
}
- static grpc_core::Resolver::Result MakeResolverResult(
+ grpc_core::Resolver::Result MakeResolverResult(
const std::vector<AddressData>& balancer_address_data,
const std::vector<AddressData>& backend_address_data = {},
const char* service_config_json = kDefaultServiceConfig) {
@@ -612,8 +604,8 @@ class GrpclbEnd2endTest : public ::testing::Test {
response_generator_->SetReresolutionResponse(std::move(result));
}
- const std::vector<int> GetBackendPorts(size_t start_index = 0,
- size_t stop_index = 0) const {
+ std::vector<int> GetBackendPorts(size_t start_index = 0,
+ size_t stop_index = 0) const {
if (stop_index == 0) stop_index = backends_.size();
std::vector<int> backend_ports;
for (size_t i = start_index; i < stop_index; ++i) {
@@ -628,6 +620,29 @@ class GrpclbEnd2endTest : public ::testing::Test {
balancers_[i]->service_.add_response(response, delay_ms);
}
+ LoadBalanceResponse BuildResponseForBackends(
+ const std::vector<int>& backend_ports,
+ const std::map<TString, size_t>& drop_token_counts) {
+ LoadBalanceResponse response;
+ for (const auto& drop_token_count : drop_token_counts) {
+ for (size_t i = 0; i < drop_token_count.second; ++i) {
+ auto* server = response.mutable_server_list()->add_servers();
+ server->set_drop(true);
+ server->set_load_balance_token(drop_token_count.first);
+ }
+ }
+ for (const int& backend_port : backend_ports) {
+ auto* server = response.mutable_server_list()->add_servers();
+ server->set_ip_address(ipv6_only_ ? Ip6ToPackedString("::1")
+ : Ip4ToPackedString("127.0.0.1"));
+ server->set_port(backend_port);
+ static int token_count = 0;
+ server->set_load_balance_token(
+ y_absl::StrFormat("token%03d", ++token_count));
+ }
+ return response;
+ }
+
Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000,
bool wait_for_ready = false,
const Status& expected_status = Status::OK) {
@@ -682,8 +697,8 @@ class GrpclbEnd2endTest : public ::testing::Test {
// by ServerThread::Serve from firing before the wait below is hit.
grpc::internal::MutexLock lock(&mu);
grpc::internal::CondVar cond;
- thread_.reset(new std::thread(
- std::bind(&ServerThread::Serve, this, server_host, &mu, &cond)));
+ thread_ = y_absl::make_unique<std::thread>(
+ std::bind(&ServerThread::Serve, this, server_host, &mu, &cond));
cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
}
@@ -726,6 +741,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
const size_t num_backends_;
const size_t num_balancers_;
const int client_load_reporting_interval_seconds_;
+ bool ipv6_only_ = false;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::vector<std::unique_ptr<ServerThread<BackendServiceImpl>>> backends_;
@@ -745,8 +761,7 @@ TEST_F(SingleBalancerTest, Vanilla) {
SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -771,8 +786,7 @@ TEST_F(SingleBalancerTest, Vanilla) {
TEST_F(SingleBalancerTest, ReturnServerStatus) {
SetNextResolutionAllBalancers();
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// We need to wait for all backends to come online.
WaitForAllBackends();
// Send a request that the backend will fail, and make sure we get
@@ -793,8 +807,7 @@ TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) {
" ]\n"
"}");
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
CheckRpcSendOk(1, 1000 /* timeout_ms */, true /* wait_for_ready */);
balancers_[0]->service_.NotifyDoneWithServerlists();
// The balancer got a single request.
@@ -841,8 +854,7 @@ TEST_F(SingleBalancerTest, UsePickFirstChildPolicy) {
" ]\n"
"}");
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
const size_t kNumRpcs = num_backends_ * 2;
CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
balancers_[0]->service_.NotifyDoneWithServerlists();
@@ -872,8 +884,7 @@ TEST_F(SingleBalancerTest, SwapChildPolicy) {
" ]\n"
"}");
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
const size_t kNumRpcs = num_backends_ * 2;
CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
// Check that all requests went to the first backend. This verifies
@@ -908,8 +919,7 @@ TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) {
ports.push_back(backends_[0]->port_);
ports.push_back(backends_[0]->port_);
const size_t kNumRpcsPerAddress = 10;
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(ports, {}), 0);
// We need to wait for the backend to come online.
WaitForBackend(0);
// Send kNumRpcsPerAddress RPCs per server.
@@ -927,8 +937,7 @@ TEST_F(SingleBalancerTest, SecureNaming) {
SetNextResolution({AddressData{balancers_[0]->port_, "lb"}});
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -970,8 +979,7 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
ScheduleResponseForBalancer(0, LoadBalanceResponse(), 0);
// Send non-empty serverlist only after kServerlistDelayMs
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- kServerlistDelayMs);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), kServerlistDelayMs);
const auto t0 = system_clock::now();
// Client will block: LB will initially send empty serverlist.
CheckRpcSendOk(1, kCallDeadlineMs, true /* wait_for_ready */);
@@ -997,8 +1005,7 @@ TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) {
for (size_t i = 0; i < kNumUnreachableServers; ++i) {
ports.push_back(grpc_pick_unused_port_or_die());
}
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(ports, {}), 0);
const Status status = SendRpc();
// The error shouldn't be DEADLINE_EXCEEDED.
EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
@@ -1027,7 +1034,7 @@ TEST_F(SingleBalancerTest, Fallback) {
// Send non-empty serverlist only after kServerlistDelayMs.
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
GetBackendPorts(kNumBackendsInResolution /* start_index */), {}),
kServerlistDelayMs);
@@ -1096,7 +1103,7 @@ TEST_F(SingleBalancerTest, FallbackUpdate) {
// Send non-empty serverlist only after kServerlistDelayMs.
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
GetBackendPorts(kNumBackendsInResolution +
kNumBackendsInResolutionUpdate /* start_index */),
{}),
@@ -1201,10 +1208,9 @@ TEST_F(SingleBalancerTest,
balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
}
SetNextResolution(balancer_addresses, backend_addresses);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
// Try to connect.
channel_->GetState(true /* try_to_connect */);
WaitForAllBackends(1 /* num_requests_multiple_of */,
@@ -1234,10 +1240,9 @@ TEST_F(SingleBalancerTest,
// Now start the balancer again. This should cause us to exit
// fallback mode.
balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
WaitForAllBackends(1 /* num_requests_multiple_of */,
kNumFallbackBackends /* start_index */);
}
@@ -1256,10 +1261,9 @@ TEST_F(SingleBalancerTest,
balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
}
SetNextResolution(balancer_addresses, backend_addresses);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
// Try to connect.
channel_->GetState(true /* try_to_connect */);
WaitForAllBackends(1 /* num_requests_multiple_of */,
@@ -1287,10 +1291,9 @@ TEST_F(SingleBalancerTest,
// Now start the balancer again. This should cause us to exit
// fallback mode.
balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
WaitForAllBackends(1 /* num_requests_multiple_of */,
kNumFallbackBackends /* start_index */);
}
@@ -1358,7 +1361,7 @@ TEST_F(SingleBalancerTest, FallbackControlledByBalancer_AfterFirstServerlist) {
// then sends the serverlist again.
// The serverlist points to backend 1.
LoadBalanceResponse serverlist_resp =
- BalancerServiceImpl::BuildResponseForBackends({backends_[1]->port_}, {});
+ BuildResponseForBackends({backends_[1]->port_}, {});
LoadBalanceResponse fallback_resp;
fallback_resp.mutable_fallback_response();
ScheduleResponseForBalancer(0, serverlist_resp, 0);
@@ -1375,8 +1378,7 @@ TEST_F(SingleBalancerTest, BackendsRestart) {
SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// Send kNumRpcsPerAddress RPCs per server.
@@ -1406,8 +1408,7 @@ TEST_F(SingleBalancerTest, ServiceNameFromLbPolicyConfig) {
SetNextResolutionAllBalancers(kServiceConfigWithTarget);
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -1424,10 +1425,10 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
SetNextResolutionAllBalancers();
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(first_backend, {}),
+ 0);
+ ScheduleResponseForBalancer(1, BuildResponseForBackends(second_backend, {}),
+ 0);
// Wait until the first backend is ready.
WaitForBackend(0);
@@ -1482,10 +1483,10 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[0]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(first_backend, {}),
+ 0);
+ ScheduleResponseForBalancer(1, BuildResponseForBackends(second_backend, {}),
+ 0);
// Wait until the first backend is ready.
WaitForBackend(0);
@@ -1555,10 +1556,10 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(first_backend, {}),
+ 0);
+ ScheduleResponseForBalancer(1, BuildResponseForBackends(second_backend, {}),
+ 0);
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
@@ -1688,10 +1689,10 @@ class UpdatesWithClientLoadReportingTest : public GrpclbEnd2endTest {
TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) {
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(first_backend, {}),
+ 0);
+ ScheduleResponseForBalancer(1, BuildResponseForBackends(second_backend, {}),
+ 0);
// Ask channel to connect to trigger resolver creation.
channel_->GetState(true);
@@ -1767,7 +1768,7 @@ TEST_F(SingleBalancerTest, Drop) {
const int num_total_addresses = num_backends_ + num_of_drop_addresses;
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
GetBackendPorts(),
{{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
{"load_balancing", num_of_drop_by_load_balancing_addresses}}),
@@ -1806,7 +1807,7 @@ TEST_F(SingleBalancerTest, DropAllFirst) {
const int num_of_drop_by_load_balancing_addresses = 1;
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
{}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
{"load_balancing", num_of_drop_by_load_balancing_addresses}}),
0);
@@ -1818,13 +1819,12 @@ TEST_F(SingleBalancerTest, DropAllFirst) {
TEST_F(SingleBalancerTest, DropAll) {
SetNextResolutionAllBalancers();
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
const int num_of_drop_by_rate_limiting_addresses = 1;
const int num_of_drop_by_load_balancing_addresses = 1;
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
{}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
{"load_balancing", num_of_drop_by_load_balancing_addresses}}),
1000);
@@ -1850,8 +1850,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
@@ -1892,8 +1891,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
// Balancer returns backends starting at index 1.
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(0, kNumBackendsFirstPass), {}),
+ BuildResponseForBackends(GetBackendPorts(0, kNumBackendsFirstPass), {}),
0);
// Wait until all backends returned by the balancer are ready.
int num_ok = 0;
@@ -1922,10 +1920,9 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
}
// Now restart the balancer, this time pointing to all backends.
balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumBackendsFirstPass), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumBackendsFirstPass), {}),
+ 0);
// Wait for queries to start going to one of the new backends.
// This tells us that we're now using the new serverlist.
do {
@@ -1955,7 +1952,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
const int num_total_addresses = num_backends_ + num_of_drop_addresses;
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
GetBackendPorts(),
{{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
{"load_balancing", num_of_drop_by_load_balancing_addresses}}),
diff --git a/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
index e4ebee8e93..4c9896522e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
@@ -49,7 +49,7 @@ using ::grpc::experimental::GenericCallbackServerContext;
using ::grpc::experimental::ServerGenericBidiReactor;
#endif
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+void* tag(int i) { return reinterpret_cast<void*>(i); }
bool VerifyReturnSuccess(CompletionQueue* cq, int i) {
void* got_tag;
@@ -305,8 +305,8 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> {
bool ignored_ok;
for (auto it = cqs_.begin(); it != cqs_.end(); ++it) {
(*it)->Shutdown();
- while ((*it)->Next(&ignored_tag, &ignored_ok))
- ;
+ while ((*it)->Next(&ignored_tag, &ignored_ok)) {
+ }
}
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
index ff88953651..ca22077a5c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
@@ -17,14 +17,17 @@
*/
#include "test/cpp/end2end/interceptors_util.h"
+
+#include "y_absl/memory/memory.h"
+
#include <util/string/cast.h>
namespace grpc {
namespace testing {
-std::atomic<int> DummyInterceptor::num_times_run_;
-std::atomic<int> DummyInterceptor::num_times_run_reverse_;
-std::atomic<int> DummyInterceptor::num_times_cancel_;
+std::atomic<int> PhonyInterceptor::num_times_run_;
+std::atomic<int> PhonyInterceptor::num_times_run_reverse_;
+std::atomic<int> PhonyInterceptor::num_times_cancel_;
void MakeCall(const std::shared_ptr<Channel>& channel) {
auto stub = grpc::testing::EchoTestService::NewStub(channel);
@@ -198,14 +201,13 @@ bool CheckMetadata(const std::multimap<TString, TString>& map,
}
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
-CreateDummyClientInterceptors() {
+CreatePhonyClientInterceptors() {
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
creators;
- // Add 20 dummy interceptors before hijacking interceptor
+ // Add 20 phony interceptors before hijacking interceptor
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
return creators;
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
index c95170bbbc..d0c91ea9b7 100644
--- a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
+++ b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
@@ -29,11 +29,11 @@ namespace grpc {
namespace testing {
/* This interceptor does nothing. Just keeps a global count on the number of
* times it was invoked. */
-class DummyInterceptor : public experimental::Interceptor {
+class PhonyInterceptor : public experimental::Interceptor {
public:
- DummyInterceptor() {}
+ PhonyInterceptor() {}
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
num_times_run_++;
@@ -67,18 +67,18 @@ class DummyInterceptor : public experimental::Interceptor {
static std::atomic<int> num_times_cancel_;
};
-class DummyInterceptorFactory
+class PhonyInterceptorFactory
: public experimental::ClientInterceptorFactoryInterface,
public experimental::ServerInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::Interceptor* CreateClientInterceptor(
experimental::ClientRpcInfo* /*info*/) override {
- return new DummyInterceptor();
+ return new PhonyInterceptor();
}
- virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::Interceptor* CreateServerInterceptor(
experimental::ServerRpcInfo* /*info*/) override {
- return new DummyInterceptor();
+ return new PhonyInterceptor();
}
};
@@ -87,12 +87,12 @@ class NullInterceptorFactory
: public experimental::ClientInterceptorFactoryInterface,
public experimental::ServerInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::Interceptor* CreateClientInterceptor(
experimental::ClientRpcInfo* /*info*/) override {
return nullptr;
}
- virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::Interceptor* CreateServerInterceptor(
experimental::ServerRpcInfo* /*info*/) override {
return nullptr;
}
@@ -189,9 +189,9 @@ bool CheckMetadata(const std::multimap<TString, TString>& map,
const string& key, const string& value);
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
-CreateDummyClientInterceptors();
+CreatePhonyClientInterceptors();
-inline void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+inline void* tag(int i) { return reinterpret_cast<void*>(i); }
inline int detag(void* p) {
return static_cast<int>(reinterpret_cast<intptr_t>(p));
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc
index 4bf755206e..d6a1444501 100644
--- a/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc
@@ -45,17 +45,6 @@
#include "test/core/util/test_config.h"
#include "test/cpp/util/test_credentials_provider.h"
-// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
-// should be skipped based on a decision made at SetUp time. In particular, any
-// callback tests can only be run if the iomgr can run in the background or if
-// the transport is in-process.
-#define MAYBE_SKIP_TEST \
- do { \
- if (do_not_test_) { \
- return; \
- } \
- } while (0)
-
namespace grpc {
namespace testing {
namespace {
@@ -69,7 +58,7 @@ class CallbackTestServiceImpl
std::function<void(experimental::RpcAllocatorState* allocator_state,
const EchoRequest* req, EchoResponse* resp)>
mutator) {
- allocator_mutator_ = mutator;
+ allocator_mutator_ = std::move(mutator);
}
experimental::ServerUnaryReactor* Echo(
@@ -117,17 +106,9 @@ void TestScenario::Log() const {
class MessageAllocatorEnd2endTestBase
: public ::testing::TestWithParam<TestScenario> {
protected:
- MessageAllocatorEnd2endTestBase() {
- GetParam().Log();
- if (GetParam().protocol == Protocol::TCP) {
- if (!grpc_iomgr_run_in_background()) {
- do_not_test_ = true;
- return;
- }
- }
- }
+ MessageAllocatorEnd2endTestBase() { GetParam().Log(); }
- ~MessageAllocatorEnd2endTestBase() = default;
+ ~MessageAllocatorEnd2endTestBase() override = default;
void CreateServer(
experimental::MessageAllocator<EchoRequest, EchoResponse>* allocator) {
@@ -210,7 +191,6 @@ class MessageAllocatorEnd2endTestBase
}
}
- bool do_not_test_{false};
int picked_port_{0};
std::shared_ptr<Channel> channel_;
std::unique_ptr<EchoTestService::Stub> stub_;
@@ -222,7 +202,6 @@ class MessageAllocatorEnd2endTestBase
class NullAllocatorTest : public MessageAllocatorEnd2endTestBase {};
TEST_P(NullAllocatorTest, SimpleRpc) {
- MAYBE_SKIP_TEST;
CreateServer(nullptr);
ResetStub();
SendRpcs(1);
@@ -278,7 +257,6 @@ class SimpleAllocatorTest : public MessageAllocatorEnd2endTestBase {
};
TEST_P(SimpleAllocatorTest, SimpleRpc) {
- MAYBE_SKIP_TEST;
const int kRpcCount = 10;
std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator);
CreateServer(allocator.get());
@@ -293,7 +271,6 @@ TEST_P(SimpleAllocatorTest, SimpleRpc) {
}
TEST_P(SimpleAllocatorTest, RpcWithEarlyFreeRequest) {
- MAYBE_SKIP_TEST;
const int kRpcCount = 10;
std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator);
auto mutator = [](experimental::RpcAllocatorState* allocator_state,
@@ -318,7 +295,6 @@ TEST_P(SimpleAllocatorTest, RpcWithEarlyFreeRequest) {
}
TEST_P(SimpleAllocatorTest, RpcWithReleaseRequest) {
- MAYBE_SKIP_TEST;
const int kRpcCount = 10;
std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator);
std::vector<EchoRequest*> released_requests;
@@ -378,7 +354,6 @@ class ArenaAllocatorTest : public MessageAllocatorEnd2endTestBase {
};
TEST_P(ArenaAllocatorTest, SimpleRpc) {
- MAYBE_SKIP_TEST;
const int kRpcCount = 10;
std::unique_ptr<ArenaAllocator> allocator(new ArenaAllocator);
CreateServer(allocator.get());
@@ -429,10 +404,7 @@ INSTANTIATE_TEST_SUITE_P(ArenaAllocatorTest, ArenaAllocatorTest,
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
- // The grpc_init is to cover the MAYBE_SKIP_TEST.
- grpc_init();
::testing::InitGoogleTest(&argc, argv);
int ret = RUN_ALL_TESTS();
- grpc_shutdown();
return ret;
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/mock_test.cc b/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
index a3d61c4e98..da3f48fa64 100644
--- a/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
@@ -48,11 +48,9 @@ using grpc::testing::EchoResponse;
using grpc::testing::EchoTestService;
using grpc::testing::MockClientReaderWriter;
using std::vector;
-using std::chrono::system_clock;
using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
-using ::testing::Invoke;
using ::testing::Return;
using ::testing::SaveArg;
using ::testing::SetArgPointee;
@@ -207,7 +205,7 @@ TEST_F(MockCallbackTest, MockedCallSucceedsWithWait) {
req.set_message("mock 1");
auto* reactor = service_.Echo(&ctx, &req, &resp);
- cv.WaitUntil(&mu, [&] {
+ grpc::internal::WaitUntil(&cv, &mu, [&] {
grpc::internal::MutexLock l(&mu);
return status_set;
});
@@ -290,7 +288,7 @@ class TestServiceImpl : public EchoTestService::Service {
}
private:
- const vector<TString> split(const TString& input) {
+ vector<TString> split(const TString& input) {
TString buff("");
vector<TString> result;
@@ -299,11 +297,11 @@ class TestServiceImpl : public EchoTestService::Service {
buff += n;
continue;
}
- if (buff == "") continue;
+ if (buff.empty()) continue;
result.push_back(buff);
buff = "";
}
- if (buff != "") result.push_back(buff);
+ if (!buff.empty()) result.push_back(buff);
return result;
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc b/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
index 4be070ec71..73e8fa2e71 100644
--- a/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
@@ -25,6 +25,8 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include "y_absl/memory/memory.h"
+
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/iomgr/port.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
@@ -99,8 +101,8 @@ class NonblockingTest : public ::testing::Test {
void* ignored_tag;
bool ignored_ok;
cq_->Shutdown();
- while (LoopForTag(&ignored_tag, &ignored_ok))
- ;
+ while (LoopForTag(&ignored_tag, &ignored_ok)) {
+ }
stub_.reset();
grpc_recycle_unused_port(port_);
}
@@ -109,7 +111,8 @@ class NonblockingTest : public ::testing::Test {
ServerBuilder builder;
builder.AddListeningPort(server_address_.str(),
grpc::InsecureServerCredentials());
- service_.reset(new grpc::testing::EchoTestService::AsyncService());
+ service_ =
+ y_absl::make_unique<grpc::testing::EchoTestService::AsyncService>();
builder.RegisterService(service_.get());
cq_ = builder.AddCompletionQueue();
server_ = builder.BuildAndStart();
@@ -209,6 +212,8 @@ int main(int argc, char** argv) {
gpr_tls_destroy(&g_is_nonblocking_poll);
return ret;
#else // GRPC_POSIX_SOCKET
+ (void)argc;
+ (void)argv;
return 0;
#endif // GRPC_POSIX_SOCKET
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc b/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
index d79b33da70..44b8f9211e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
@@ -27,6 +27,8 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include "y_absl/memory/memory.h"
+
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@@ -57,8 +59,8 @@ class ProtoServerReflectionTest : public ::testing::Test {
std::shared_ptr<Channel> channel =
grpc::CreateChannel(target, InsecureChannelCredentials());
stub_ = grpc::testing::EchoTestService::NewStub(channel);
- desc_db_.reset(new ProtoReflectionDescriptorDatabase(channel));
- desc_pool_.reset(new protobuf::DescriptorPool(desc_db_.get()));
+ desc_db_ = y_absl::make_unique<ProtoReflectionDescriptorDatabase>(channel);
+ desc_pool_ = y_absl::make_unique<protobuf::DescriptorPool>(desc_db_.get());
}
string to_string(const int number) {
@@ -133,7 +135,7 @@ TEST_F(ProtoServerReflectionTest, CheckResponseWithLocalDescriptorPool) {
std::vector<TString> services;
desc_db_->GetServices(&services);
// The service list has at least one service (reflection servcie).
- EXPECT_TRUE(services.size() > 0);
+ EXPECT_TRUE(!services.empty());
for (auto it = services.begin(); it != services.end(); ++it) {
CompareService(*it);
diff --git a/contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc
index 184dc1e5f5..d6300ecad6 100644
--- a/contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc
@@ -50,7 +50,7 @@ namespace testing {
namespace {
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+void* tag(int i) { return reinterpret_cast<void*>(i); }
int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
class Verifier {
@@ -110,8 +110,8 @@ class RawEnd2EndTest : public ::testing::Test {
void* ignored_tag;
bool ignored_ok;
cq_->Shutdown();
- while (cq_->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cq_->Next(&ignored_tag, &ignored_ok)) {
+ }
stub_.reset();
grpc_recycle_unused_port(port_);
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
index 004902cad3..cf47dade7c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
@@ -31,6 +31,8 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include "y_absl/memory/memory.h"
+
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@@ -138,7 +140,7 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
void SetUp() override {
port_ = grpc_pick_unused_port_or_die();
- builder_.reset(new ServerBuilder());
+ builder_ = y_absl::make_unique<ServerBuilder>();
}
void InsertPlugin() {
@@ -227,8 +229,8 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
void RunCQ() {
void* tag;
bool ok;
- while (cq_->Next(&tag, &ok))
- ;
+ while (cq_->Next(&tag, &ok)) {
+ }
}
};
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
index 3616d680f9..1f5103cb51 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
@@ -26,6 +26,8 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include "y_absl/memory/memory.h"
+
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
@@ -36,7 +38,6 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
static TString g_root;
@@ -99,7 +100,8 @@ class CrashTest : public ::testing::Test {
std::ostringstream addr_stream;
addr_stream << "localhost:" << port;
auto addr = addr_stream.str();
- client_.reset(new SubProcess({g_root + "/server_crash_test_client",
+ client_ = y_absl::make_unique<SubProcess>(
+ std::vector<TString>({g_root + "/server_crash_test_client",
"--address=" + addr, "--mode=" + mode}));
GPR_ASSERT(client_);
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc b/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
index 202fb2836c..ee9b3d7b88 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
@@ -16,37 +16,37 @@
*
*/
-#include <gflags/gflags.h>
-#include <iostream>
-#include <memory>
-#include <sstream>
-#include <util/generic/string.h>
-
#include <grpc/support/log.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
+#include <iostream>
+#include <memory>
+#include <sstream>
+#include <util/generic/string.h>
+
+#include "y_absl/flags/flag.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/util/test_config.h"
-DEFINE_string(address, "", "Address to connect to");
-DEFINE_string(mode, "", "Test mode to use");
+Y_ABSL_FLAG(TString, address, "", "Address to connect to");
+Y_ABSL_FLAG(TString, mode, "", "Test mode to use");
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
- auto stub = grpc::testing::EchoTestService::NewStub(
- grpc::CreateChannel(FLAGS_address, grpc::InsecureChannelCredentials()));
+ auto stub = grpc::testing::EchoTestService::NewStub(grpc::CreateChannel(
+ y_absl::GetFlag(FLAGS_address), grpc::InsecureChannelCredentials()));
EchoRequest request;
EchoResponse response;
grpc::ClientContext context;
context.set_wait_for_ready(true);
- if (FLAGS_mode == "bidi") {
+ if (y_absl::GetFlag(FLAGS_mode) == "bidi") {
auto stream = stub->BidiStream(&context);
for (int i = 0;; i++) {
std::ostringstream msg;
@@ -56,7 +56,7 @@ int main(int argc, char** argv) {
GPR_ASSERT(stream->Read(&response));
GPR_ASSERT(response.message() == request.message());
}
- } else if (FLAGS_mode == "response") {
+ } else if (y_absl::GetFlag(FLAGS_mode) == "response") {
EchoRequest request;
request.set_message("Hello");
auto stream = stub->ResponseStream(&context, request);
@@ -64,7 +64,8 @@ int main(int argc, char** argv) {
GPR_ASSERT(stream->Read(&response));
}
} else {
- gpr_log(GPR_ERROR, "invalid test mode '%s'", FLAGS_mode.c_str());
+ gpr_log(GPR_ERROR, "invalid test mode '%s'",
+ y_absl::GetFlag(FLAGS_mode).c_str());
return 1;
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
index 0f340516b0..543dcf64e4 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
@@ -115,7 +115,7 @@ class ServerEarlyReturnTest : public ::testing::Test {
void SetUp() override {
int port = grpc_pick_unused_port_or_die();
picked_port_ = port;
- server_address_ << "127.0.0.1:" << port;
+ server_address_ << "localhost:" << port;
ServerBuilder builder;
builder.AddListeningPort(server_address_.str(),
InsecureServerCredentials());
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
index 6d2dc772ef..a57f471280 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
@@ -29,6 +29,9 @@
#include <grpcpp/server_context.h>
#include <grpcpp/support/server_interceptor.h>
+#include "y_absl/memory/memory.h"
+#include "y_absl/strings/match.h"
+
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@@ -44,7 +47,7 @@ namespace {
class LoggingInterceptor : public experimental::Interceptor {
public:
- LoggingInterceptor(experimental::ServerRpcInfo* info) {
+ explicit LoggingInterceptor(experimental::ServerRpcInfo* info) {
info_ = info;
// Check the method name and compare to the type
@@ -96,8 +99,8 @@ class LoggingInterceptor : public experimental::Interceptor {
bool found = false;
// Check that we received the metadata as an echo
for (const auto& pair : *map) {
- found = pair.first.find("testkey") == 0 &&
- pair.second.find("testvalue") == 0;
+ found = y_absl::StartsWith(pair.first, "testkey") &&
+ y_absl::StartsWith(pair.second, "testvalue");
if (found) break;
}
EXPECT_EQ(found, true);
@@ -138,7 +141,7 @@ class LoggingInterceptor : public experimental::Interceptor {
class LoggingInterceptorFactory
: public experimental::ServerInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::Interceptor* CreateServerInterceptor(
experimental::ServerRpcInfo* info) override {
return new LoggingInterceptor(info);
}
@@ -147,7 +150,7 @@ class LoggingInterceptorFactory
// Test if SendMessage function family works as expected for sync/callback apis
class SyncSendMessageTester : public experimental::Interceptor {
public:
- SyncSendMessageTester(experimental::ServerRpcInfo* /*info*/) {}
+ explicit SyncSendMessageTester(experimental::ServerRpcInfo* /*info*/) {}
void Intercept(experimental::InterceptorBatchMethods* methods) override {
if (methods->QueryInterceptionHookPoint(
@@ -168,7 +171,7 @@ class SyncSendMessageTester : public experimental::Interceptor {
class SyncSendMessageTesterFactory
: public experimental::ServerInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::Interceptor* CreateServerInterceptor(
experimental::ServerRpcInfo* info) override {
return new SyncSendMessageTester(info);
}
@@ -177,7 +180,7 @@ class SyncSendMessageTesterFactory
// Test if SendMessage function family works as expected for sync/callback apis
class SyncSendMessageVerifier : public experimental::Interceptor {
public:
- SyncSendMessageVerifier(experimental::ServerRpcInfo* /*info*/) {}
+ explicit SyncSendMessageVerifier(experimental::ServerRpcInfo* /*info*/) {}
void Intercept(experimental::InterceptorBatchMethods* methods) override {
if (methods->QueryInterceptionHookPoint(
@@ -203,7 +206,7 @@ class SyncSendMessageVerifier : public experimental::Interceptor {
class SyncSendMessageVerifierFactory
: public experimental::ServerInterceptorFactoryInterface {
public:
- virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::Interceptor* CreateServerInterceptor(
experimental::ServerRpcInfo* info) override {
return new SyncSendMessageVerifier(info);
}
@@ -249,12 +252,10 @@ class ServerInterceptorsEnd2endSyncUnaryTest : public ::testing::Test {
creators.push_back(
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
new LoggingInterceptorFactory()));
- // Add 20 dummy interceptor factories and null interceptor factories
+ // Add 20 phony interceptor factories and null interceptor factories
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- creators.push_back(std::unique_ptr<NullInterceptorFactory>(
- new NullInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
+ creators.push_back(y_absl::make_unique<NullInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
server_ = builder.BuildAndStart();
@@ -266,12 +267,12 @@ class ServerInterceptorsEnd2endSyncUnaryTest : public ::testing::Test {
TEST_F(ServerInterceptorsEnd2endSyncUnaryTest, UnaryTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
auto channel =
grpc::CreateChannel(server_address_, InsecureChannelCredentials());
MakeCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test {
@@ -297,8 +298,7 @@ class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test {
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
new LoggingInterceptorFactory()));
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
server_ = builder.BuildAndStart();
@@ -310,38 +310,38 @@ class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test {
TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, ClientStreamingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
auto channel =
grpc::CreateChannel(server_address_, InsecureChannelCredentials());
MakeClientStreamingCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, ServerStreamingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
auto channel =
grpc::CreateChannel(server_address_, InsecureChannelCredentials());
MakeServerStreamingCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, BidiStreamingTest) {
ChannelArguments args;
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
auto channel =
grpc::CreateChannel(server_address_, InsecureChannelCredentials());
MakeBidiStreamingCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
}
class ServerInterceptorsAsyncEnd2endTest : public ::testing::Test {};
TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) {
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
int port = 5006; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
@@ -354,8 +354,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) {
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
new LoggingInterceptorFactory()));
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
auto cq = builder.AddCompletionQueue();
@@ -401,20 +400,20 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) {
EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
"testvalue"));
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
server->Shutdown();
cq->Shutdown();
void* ignored_tag;
bool ignored_ok;
- while (cq->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cq->Next(&ignored_tag, &ignored_ok)) {
+ }
// grpc_recycle_unused_port(port);
}
TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) {
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
int port = 5007; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
@@ -427,8 +426,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) {
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
new LoggingInterceptorFactory()));
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
auto cq = builder.AddCompletionQueue();
@@ -484,20 +482,20 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) {
EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
"testvalue"));
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
server->Shutdown();
cq->Shutdown();
void* ignored_tag;
bool ignored_ok;
- while (cq->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cq->Next(&ignored_tag, &ignored_ok)) {
+ }
// grpc_recycle_unused_port(port);
}
TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
int port = 5008; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
@@ -508,8 +506,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
creators;
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
auto srv_cq = builder.AddCompletionQueue();
@@ -591,21 +588,21 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
"testvalue"));
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
server->Shutdown();
void* ignored_tag;
bool ignored_ok;
- while (cli_cq.Next(&ignored_tag, &ignored_ok))
- ;
- while (srv_cq->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cli_cq.Next(&ignored_tag, &ignored_ok)) {
+ }
+ while (srv_cq->Next(&ignored_tag, &ignored_ok)) {
+ }
// grpc_recycle_unused_port(port);
}
TEST_F(ServerInterceptorsAsyncEnd2endTest, UnimplementedRpcTest) {
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
int port = 5009; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
@@ -614,8 +611,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, UnimplementedRpcTest) {
creators;
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
auto cq = builder.AddCompletionQueue();
@@ -641,15 +637,15 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, UnimplementedRpcTest) {
EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
EXPECT_EQ("", recv_status.error_message());
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
server->Shutdown();
cq->Shutdown();
void* ignored_tag;
bool ignored_ok;
- while (cq->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cq->Next(&ignored_tag, &ignored_ok)) {
+ }
// grpc_recycle_unused_port(port);
}
@@ -657,7 +653,7 @@ class ServerInterceptorsSyncUnimplementedEnd2endTest : public ::testing::Test {
};
TEST_F(ServerInterceptorsSyncUnimplementedEnd2endTest, UnimplementedRpcTest) {
- DummyInterceptor::Reset();
+ PhonyInterceptor::Reset();
int port = 5010; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
@@ -668,8 +664,7 @@ TEST_F(ServerInterceptorsSyncUnimplementedEnd2endTest, UnimplementedRpcTest) {
creators;
creators.reserve(20);
for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
+ creators.push_back(y_absl::make_unique<PhonyInterceptorFactory>());
}
builder.experimental().SetInterceptorCreators(std::move(creators));
auto server = builder.BuildAndStart();
@@ -690,8 +685,8 @@ TEST_F(ServerInterceptorsSyncUnimplementedEnd2endTest, UnimplementedRpcTest) {
EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
EXPECT_EQ("", recv_status.error_message());
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ // Make sure all 20 phony interceptors were run
+ EXPECT_EQ(PhonyInterceptor::GetNumTimesRun(), 20);
server->Shutdown();
// grpc_recycle_unused_port(port);
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
index 13833cf66c..e97b51f21e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
@@ -96,7 +96,7 @@ class ServerLoadReportingEnd2endTest : public ::testing::Test {
auto stub = EchoTestService::NewStub(
grpc::CreateChannel(server_address_, InsecureChannelCredentials()));
TString lb_token = lb_id + lb_tag;
- for (int i = 0; i < num_requests; ++i) {
+ for (size_t i = 0; i < num_requests; ++i) {
ClientContext ctx;
if (!lb_token.empty()) ctx.AddMetadata(GRPC_LB_TOKEN_MD_KEY, lb_token);
EchoRequest request;
diff --git a/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc
index cee33343c1..12dde9f35e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc
@@ -24,6 +24,7 @@
#include <util/generic/string.h>
#include <thread>
+#include "y_absl/memory/memory.h"
#include "y_absl/strings/str_cat.h"
#include <grpc/grpc.h>
@@ -56,6 +57,7 @@
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
+#include "test/core/util/resolve_localhost_ip46.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/test_service_impl.h"
@@ -64,7 +66,6 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
namespace grpc {
namespace testing {
@@ -131,6 +132,11 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
grpc_init();
response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
+ bool localhost_resolves_to_ipv4 = false;
+ bool localhost_resolves_to_ipv6 = false;
+ grpc_core::LocalhostResolves(&localhost_resolves_to_ipv4,
+ &localhost_resolves_to_ipv6);
+ ipv6_only_ = !localhost_resolves_to_ipv4 && localhost_resolves_to_ipv6;
}
void TearDown() override {
@@ -143,7 +149,7 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
stub_.reset();
servers_.clear();
creds_.reset();
- grpc_shutdown_blocking();
+ grpc_shutdown();
}
void CreateServers(size_t num_servers,
@@ -169,14 +175,14 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
grpc_core::Resolver::Result BuildFakeResults(const std::vector<int>& ports) {
grpc_core::Resolver::Result result;
for (const int& port : ports) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
- GPR_ASSERT(lb_uri != nullptr);
+ TString lb_uri_str =
+ y_absl::StrCat(ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", port);
+ y_absl::StatusOr<grpc_core::URI> lb_uri = grpc_core::URI::Parse(lb_uri_str);
+ GPR_ASSERT(lb_uri.ok());
grpc_resolved_address address;
- GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
+ GPR_ASSERT(grpc_parse_uri(*lb_uri, &address));
result.addresses.emplace_back(address.addr, address.len,
nullptr /* args */);
- grpc_uri_destroy(lb_uri);
}
return result;
}
@@ -311,9 +317,9 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
grpc::internal::Mutex mu;
grpc::internal::MutexLock lock(&mu);
grpc::internal::CondVar cond;
- thread_.reset(new std::thread(
- std::bind(&ServerData::Serve, this, server_host, &mu, &cond)));
- cond.WaitUntil(&mu, [this] { return server_ready_; });
+ thread_ = y_absl::make_unique<std::thread>(
+ std::bind(&ServerData::Serve, this, server_host, &mu, &cond));
+ grpc::internal::WaitUntil(&cond, &mu, [this] { return server_ready_; });
server_ready_ = false;
gpr_log(GPR_INFO, "server startup complete");
}
@@ -422,6 +428,7 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
return "{\"version\": \"invalid_default\"";
}
+ bool ipv6_only_ = false;
const TString server_host_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::vector<std::unique_ptr<ServerData>> servers_;
diff --git a/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc b/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
index 3aa7a766c4..734b9344dd 100644
--- a/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
@@ -48,7 +48,7 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
Status Echo(ServerContext* context, const EchoRequest* /*request*/,
EchoResponse* /*response*/) override {
- gpr_event_set(ev_, (void*)1);
+ gpr_event_set(ev_, reinterpret_cast<void*>(1));
while (!context->IsCancelled()) {
}
return Status::OK;
diff --git a/contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc b/contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc
index f2252063fb..bfc0d1e370 100644
--- a/contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc
@@ -42,7 +42,6 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
const char* kLargeString =
"("
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
index 5f207f1979..eb4583ff87 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
+++ b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
@@ -38,8 +38,6 @@
#include <util/string/cast.h>
-using std::chrono::system_clock;
-
namespace grpc {
namespace testing {
diff --git a/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc b/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
index 8acb953729..1a0ca6c0ab 100644
--- a/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
@@ -42,7 +42,6 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
const int kNumThreads = 100; // Number of threads
const int kNumAsyncSendThreads = 2;
@@ -207,8 +206,8 @@ class CommonStressTestAsyncServer : public BaseClass {
void* ignored_tag;
bool ignored_ok;
- while (cq_->Next(&ignored_tag, &ignored_ok))
- ;
+ while (cq_->Next(&ignored_tag, &ignored_ok)) {
+ }
this->TearDownEnd();
}
@@ -246,7 +245,7 @@ class CommonStressTestAsyncServer : public BaseClass {
service_.RequestEcho(contexts_[i].srv_ctx.get(),
&contexts_[i].recv_request,
contexts_[i].response_writer.get(), cq_.get(),
- cq_.get(), (void*)static_cast<intptr_t>(i));
+ cq_.get(), reinterpret_cast<void*>(i));
}
}
struct Context {
@@ -342,8 +341,8 @@ class AsyncClientEnd2endTest : public ::testing::Test {
void TearDown() override {
void* ignored_tag;
bool ignored_ok;
- while (cq_.Next(&ignored_tag, &ignored_ok))
- ;
+ while (cq_.Next(&ignored_tag, &ignored_ok)) {
+ }
common_.TearDown();
}
@@ -370,8 +369,7 @@ class AsyncClientEnd2endTest : public ::testing::Test {
request.set_message(TString("Hello: " + grpc::to_string(i)).c_str());
call->response_reader =
common_.GetStub()->AsyncEcho(&call->context, request, &cq_);
- call->response_reader->Finish(&call->response, &call->status,
- (void*)call);
+ call->response_reader->Finish(&call->response, &call->status, call);
grpc::internal::MutexLock l(&mu_);
rpcs_outstanding_++;
diff --git a/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc b/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
index 48b9eace12..ef879f899c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
@@ -26,6 +26,8 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
+#include "y_absl/memory/memory.h"
+
#include "src/core/lib/iomgr/timer.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
@@ -95,7 +97,7 @@ namespace {
// gpr_now() is called with invalid clock_type
TEST(TimespecTest, GprNowInvalidClockType) {
// initialize to some junk value
- gpr_clock_type invalid_clock_type = (gpr_clock_type)32641;
+ gpr_clock_type invalid_clock_type = static_cast<gpr_clock_type>(32641);
EXPECT_DEATH(gpr_now(invalid_clock_type), ".*");
}
@@ -133,7 +135,7 @@ class TimeChangeTest : public ::testing::Test {
std::ostringstream addr_stream;
addr_stream << "localhost:" << port;
server_address_ = addr_stream.str();
- server_.reset(new SubProcess({
+ server_ = y_absl::make_unique<SubProcess>(std::vector<TString>({
g_root + "/client_crash_test_server",
"--address=" + server_address_,
}));
@@ -148,14 +150,14 @@ class TimeChangeTest : public ::testing::Test {
static void TearDownTestCase() { server_.reset(); }
- void SetUp() {
+ void SetUp() override {
channel_ =
grpc::CreateChannel(server_address_, InsecureChannelCredentials());
GPR_ASSERT(channel_);
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
}
- void TearDown() { reset_now_offset(); }
+ void TearDown() override { reset_now_offset(); }
std::unique_ptr<grpc::testing::EchoTestService::Stub> CreateStub() {
return grpc::testing::EchoTestService::NewStub(channel_);
diff --git a/contrib/libs/grpc/test/cpp/end2end/xds_credentials_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/xds_credentials_end2end_test.cc
new file mode 100644
index 0000000000..fb5cfc9f4a
--- /dev/null
+++ b/contrib/libs/grpc/test/cpp/end2end/xds_credentials_end2end_test.cc
@@ -0,0 +1,127 @@
+//
+//
+// Copyright 2020 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+//
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <grpc/grpc.h>
+#include <grpcpp/server_builder.h>
+
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/test_credentials_provider.h"
+
+namespace grpc {
+namespace testing {
+namespace {
+
+class XdsCredentialsEnd2EndFallbackTest
+ : public ::testing::TestWithParam<const char*> {
+ protected:
+ XdsCredentialsEnd2EndFallbackTest() {
+ int port = grpc_pick_unused_port_or_die();
+ ServerBuilder builder;
+ server_address_ = "localhost:" + ToString(port);
+ builder.AddListeningPort(
+ server_address_,
+ GetCredentialsProvider()->GetServerCredentials(GetParam()));
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ TString server_address_;
+ TestServiceImpl service_;
+ std::unique_ptr<Server> server_;
+};
+
+TEST_P(XdsCredentialsEnd2EndFallbackTest, NoXdsSchemeInTarget) {
+ // Target does not use 'xds:///' scheme and should result in using fallback
+ // credentials.
+ ChannelArguments args;
+ auto channel = grpc::CreateCustomChannel(
+ server_address_,
+ grpc::experimental::XdsCredentials(
+ GetCredentialsProvider()->GetChannelCredentials(GetParam(), &args)),
+ args);
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
+ req.set_message("Hello");
+ EchoResponse resp;
+ Status s = stub->Echo(&ctx, req, &resp);
+ EXPECT_EQ(s.ok(), true);
+ EXPECT_EQ(resp.message(), "Hello");
+}
+
+class XdsServerCredentialsEnd2EndFallbackTest
+ : public ::testing::TestWithParam<const char*> {
+ protected:
+ XdsServerCredentialsEnd2EndFallbackTest() {
+ int port = grpc_pick_unused_port_or_die();
+ // Build a server that is not xDS enabled but uses XdsServerCredentials.
+ ServerBuilder builder;
+ server_address_ = "localhost:" + ToString(port);
+ builder.AddListeningPort(
+ server_address_,
+ grpc::experimental::XdsServerCredentials(
+ GetCredentialsProvider()->GetServerCredentials(GetParam())));
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ TString server_address_;
+ TestServiceImpl service_;
+ std::unique_ptr<Server> server_;
+};
+
+TEST_P(XdsServerCredentialsEnd2EndFallbackTest, Basic) {
+ ChannelArguments args;
+ auto channel = grpc::CreateCustomChannel(
+ server_address_,
+ GetCredentialsProvider()->GetChannelCredentials(GetParam(), &args), args);
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
+ req.set_message("Hello");
+ EchoResponse resp;
+ Status s = stub->Echo(&ctx, req, &resp);
+ EXPECT_EQ(s.ok(), true);
+ EXPECT_EQ(resp.message(), "Hello");
+}
+
+INSTANTIATE_TEST_SUITE_P(XdsCredentialsEnd2EndFallback,
+ XdsCredentialsEnd2EndFallbackTest,
+ ::testing::ValuesIn(std::vector<const char*>(
+ {kInsecureCredentialsType, kTlsCredentialsType})));
+
+INSTANTIATE_TEST_SUITE_P(XdsServerCredentialsEnd2EndFallback,
+ XdsServerCredentialsEnd2EndFallbackTest,
+ ::testing::ValuesIn(std::vector<const char*>(
+ {kInsecureCredentialsType, kTlsCredentialsType})));
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ grpc::testing::TestEnvironment env(argc, argv);
+ const auto result = RUN_ALL_TESTS();
+ return result;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc
index 603e6186bf..8ca7f8e6f2 100644
--- a/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc
@@ -29,56 +29,74 @@
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include "y_absl/functional/bind_front.h"
+#include "y_absl/memory/memory.h"
#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/str_join.h"
#include "y_absl/types/optional.h"
#include <grpc/grpc.h>
+#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
+#include <grpcpp/security/tls_certificate_provider.h>
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
+#include <grpcpp/xds_server_builder.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
+#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_args.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/server_address.h"
+#include "src/core/ext/xds/certificate_provider_registry.h"
#include "src/core/ext/xds/xds_api.h"
#include "src/core/ext/xds/xds_channel_args.h"
#include "src/core/ext/xds/xds_client.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gpr/time_precise.h"
#include "src/core/lib/gpr/tmpfile.h"
-#include "src/core/lib/gprpp/map.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
+#include "src/core/lib/gprpp/time_util.h"
+#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/iomgr/parse_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/cpp/client/secure_credentials.h"
#include "src/cpp/server/secure_server_credentials.h"
-
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/test_service_impl.h"
-
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "src/proto/grpc/testing/xds/ads_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/cds_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/eds_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.h"
-
#include "src/proto/grpc/testing/xds/v3/ads.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/aggregate_cluster.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/cluster.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/discovery.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/fault.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/listener.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/lrs.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/route.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/router.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/tls.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/resolve_localhost_ip46.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+
+#ifndef DISABLED_XDS_PROTO_IN_CC
+#include "src/cpp/server/csds/csds.h"
+#include "src/proto/grpc/testing/xds/v3/csds.grpc.pb.h"
+#endif // DISABLED_XDS_PROTO_IN_CC
namespace grpc {
namespace testing {
@@ -86,15 +104,27 @@ namespace {
using std::chrono::system_clock;
+#ifndef DISABLED_XDS_PROTO_IN_CC
+using ::envoy::admin::v3::ClientResourceStatus;
+#endif // DISABLED_XDS_PROTO_IN_CC
using ::envoy::config::cluster::v3::CircuitBreakers;
using ::envoy::config::cluster::v3::Cluster;
+using ::envoy::config::cluster::v3::CustomClusterType;
using ::envoy::config::cluster::v3::RoutingPriority;
using ::envoy::config::endpoint::v3::ClusterLoadAssignment;
using ::envoy::config::endpoint::v3::HealthStatus;
+using ::envoy::config::listener::v3::FilterChainMatch;
using ::envoy::config::listener::v3::Listener;
using ::envoy::config::route::v3::RouteConfiguration;
+using ::envoy::extensions::clusters::aggregate::v3::ClusterConfig;
+using ::envoy::extensions::filters::http::fault::v3::HTTPFault;
using ::envoy::extensions::filters::network::http_connection_manager::v3::
HttpConnectionManager;
+using ::envoy::extensions::filters::network::http_connection_manager::v3::
+ HttpFilter;
+using ::envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext;
+using ::envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext;
+using ::envoy::type::matcher::v3::StringMatcher;
using ::envoy::type::v3::FractionalPercent;
constexpr char kLdsTypeUrl[] =
@@ -129,9 +159,12 @@ constexpr char kDefaultServiceConfig[] =
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
- " { \"eds_experimental\":{\n"
- " \"clusterName\": \"server.example.com\",\n"
- " \"lrsLoadReportingServerName\": \"\"\n"
+ " { \"xds_cluster_resolver_experimental\":{\n"
+ " \"discoveryMechanisms\": [\n"
+ " { \"clusterName\": \"server.example.com\",\n"
+ " \"type\": \"EDS\",\n"
+ " \"lrsLoadReportingServerName\": \"\"\n"
+ " } ]\n"
" } }\n"
" ]\n"
"}";
@@ -139,8 +172,11 @@ constexpr char kDefaultServiceConfigWithoutLoadReporting[] =
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
- " { \"eds_experimental\":{\n"
- " \"clusterName\": \"server.example.com\"\n"
+ " { \"xds_cluster_resolver_experimental\":{\n"
+ " \"discoveryMechanisms\": [\n"
+ " { \"clusterName\": \"server.example.com\",\n"
+ " \"type\": \"EDS\"\n"
+ " } ]\n"
" } }\n"
" ]\n"
"}";
@@ -167,7 +203,25 @@ constexpr char kBootstrapFileV3[] =
" \"locality\": {\n"
" \"region\": \"corp\",\n"
" \"zone\": \"svl\",\n"
- " \"subzone\": \"mp3\"\n"
+ " \"sub_zone\": \"mp3\"\n"
+ " }\n"
+ " },\n"
+ " \"server_listener_resource_name_template\": "
+ "\"grpc/server?xds.resource.listening_address=%s\",\n"
+ " \"certificate_providers\": {\n"
+ " \"fake_plugin1\": {\n"
+ " \"plugin_name\": \"fake1\"\n"
+ " },\n"
+ " \"fake_plugin2\": {\n"
+ " \"plugin_name\": \"fake2\"\n"
+ " },\n"
+ " \"file_plugin\": {\n"
+ " \"plugin_name\": \"file_watcher\",\n"
+ " \"config\": {\n"
+ " \"certificate_file\": \"src/core/tsi/test_creds/client.pem\",\n"
+ " \"private_key_file\": \"src/core/tsi/test_creds/client.key\",\n"
+ " \"ca_certificate_file\": \"src/core/tsi/test_creds/ca.pem\"\n"
+ " }"
" }\n"
" }\n"
"}\n";
@@ -193,10 +247,17 @@ constexpr char kBootstrapFileV2[] =
" \"locality\": {\n"
" \"region\": \"corp\",\n"
" \"zone\": \"svl\",\n"
- " \"subzone\": \"mp3\"\n"
+ " \"sub_zone\": \"mp3\"\n"
" }\n"
" }\n"
"}\n";
+constexpr char kCaCertPath[] = "src/core/tsi/test_creds/ca.pem";
+constexpr char kServerCertPath[] = "src/core/tsi/test_creds/server1.pem";
+constexpr char kServerKeyPath[] = "src/core/tsi/test_creds/server1.key";
+constexpr char kClientCertPath[] = "src/core/tsi/test_creds/client.pem";
+constexpr char kClientKeyPath[] = "src/core/tsi/test_creds/client.key";
+constexpr char kBadClientCertPath[] = "src/core/tsi/test_creds/badclient.pem";
+constexpr char kBadClientKeyPath[] = "src/core/tsi/test_creds/badclient.key";
char* g_bootstrap_file_v3;
char* g_bootstrap_file_v2;
@@ -213,25 +274,6 @@ void WriteBootstrapFiles() {
g_bootstrap_file_v2 = bootstrap_file;
}
-// Helper class to minimize the number of unique ports we use for this test.
-class PortSaver {
- public:
- int GetPort() {
- if (idx_ >= ports_.size()) {
- ports_.push_back(grpc_pick_unused_port_or_die());
- }
- return ports_[idx_++];
- }
-
- void Reset() { idx_ = 0; }
-
- private:
- std::vector<int> ports_;
- size_t idx_ = 0;
-};
-
-PortSaver* g_port_saver = nullptr;
-
template <typename ServiceType>
class CountedService : public ServiceType {
public:
@@ -266,9 +308,6 @@ class CountedService : public ServiceType {
size_t response_count_ = 0;
};
-const char g_kCallCredsMdKey[] = "Balancer should not ...";
-const char g_kCallCredsMdValue[] = "... receive me";
-
template <typename RpcService>
class BackendServiceImpl
: public CountedService<TestMultipleServiceImpl<RpcService>> {
@@ -277,19 +316,20 @@ class BackendServiceImpl
Status Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) override {
- // Backend should receive the call credentials metadata.
- auto call_credentials_entry =
- context->client_metadata().find(g_kCallCredsMdKey);
- EXPECT_NE(call_credentials_entry, context->client_metadata().end());
- if (call_credentials_entry != context->client_metadata().end()) {
- EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue);
- }
+ auto peer_identity = context->auth_context()->GetPeerIdentity();
CountedService<TestMultipleServiceImpl<RpcService>>::IncreaseRequestCount();
const auto status =
TestMultipleServiceImpl<RpcService>::Echo(context, request, response);
CountedService<
TestMultipleServiceImpl<RpcService>>::IncreaseResponseCount();
- AddClient(context->peer());
+ {
+ grpc_core::MutexLock lock(&mu_);
+ clients_.insert(context->peer());
+ last_peer_identity_.clear();
+ for (const auto& entry : peer_identity) {
+ last_peer_identity_.emplace_back(entry.data(), entry.size());
+ }
+ }
return status;
}
@@ -307,18 +347,19 @@ class BackendServiceImpl
void Shutdown() {}
std::set<TString> clients() {
- grpc_core::MutexLock lock(&clients_mu_);
+ grpc_core::MutexLock lock(&mu_);
return clients_;
}
- private:
- void AddClient(const TString& client) {
- grpc_core::MutexLock lock(&clients_mu_);
- clients_.insert(client);
+ const std::vector<TString>& last_peer_identity() {
+ grpc_core::MutexLock lock(&mu_);
+ return last_peer_identity_;
}
- grpc_core::Mutex clients_mu_;
+ private:
+ grpc_core::Mutex mu_;
std::set<TString> clients_;
+ std::vector<TString> last_peer_identity_;
};
class ClientStats {
@@ -328,7 +369,7 @@ class ClientStats {
// Converts from proto message class.
template <class UpstreamLocalityStats>
- LocalityStats(const UpstreamLocalityStats& upstream_locality_stats)
+ explicit LocalityStats(const UpstreamLocalityStats& upstream_locality_stats)
: total_successful_requests(
upstream_locality_stats.total_successful_requests()),
total_requests_in_progress(
@@ -440,7 +481,7 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
struct EdsResourceArgs {
struct Locality {
- Locality(const TString& sub_zone, std::vector<int> ports,
+ Locality(TString sub_zone, std::vector<int> ports,
int lb_weight = kDefaultLocalityWeight,
int priority = kDefaultLocalityPriority,
std::vector<HealthStatus> health_statuses = {})
@@ -467,32 +508,9 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
FractionalPercent::MILLION;
};
- explicit AdsServiceImpl(bool enable_load_reporting)
+ AdsServiceImpl()
: v2_rpc_service_(this, /*is_v2=*/true),
- v3_rpc_service_(this, /*is_v2=*/false) {
- // Construct RDS response data.
- default_route_config_.set_name(kDefaultRouteConfigurationName);
- auto* virtual_host = default_route_config_.add_virtual_hosts();
- virtual_host->add_domains("*");
- auto* route = virtual_host->add_routes();
- route->mutable_match()->set_prefix("");
- route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRdsResource(default_route_config_);
- // Construct LDS response data (with inlined RDS result).
- default_listener_ = BuildListener(default_route_config_);
- SetLdsResource(default_listener_);
- // Construct CDS response data.
- default_cluster_.set_name(kDefaultClusterName);
- default_cluster_.set_type(Cluster::EDS);
- auto* eds_config = default_cluster_.mutable_eds_cluster_config();
- eds_config->mutable_eds_config()->mutable_ads();
- eds_config->set_service_name(kDefaultEdsServiceName);
- default_cluster_.set_lb_policy(Cluster::ROUND_ROBIN);
- if (enable_load_reporting) {
- default_cluster_.mutable_lrs_server()->mutable_self();
- }
- SetCdsResource(default_cluster_);
- }
+ v3_rpc_service_(this, /*is_v2=*/false) {}
bool seen_v2_client() const { return seen_v2_client_; }
bool seen_v3_client() const { return seen_v3_client_; }
@@ -507,12 +525,6 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
return &v3_rpc_service_;
}
- Listener default_listener() const { return default_listener_; }
- RouteConfiguration default_route_config() const {
- return default_route_config_;
- }
- Cluster default_cluster() const { return default_cluster_; }
-
ResponseState lds_response_state() {
grpc_core::MutexLock lock(&ads_mu_);
return resource_type_response_state_[kLdsTypeUrl];
@@ -538,14 +550,24 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
resource_types_to_ignore_.emplace(type_url);
}
+ void SetResourceMinVersion(const TString& type_url, int version) {
+ grpc_core::MutexLock lock(&ads_mu_);
+ resource_type_min_versions_[type_url] = version;
+ }
+
void UnsetResource(const TString& type_url, const TString& name) {
grpc_core::MutexLock lock(&ads_mu_);
- ResourceState& state = resource_map_[type_url][name];
- ++state.version;
- state.resource.reset();
- gpr_log(GPR_INFO, "ADS[%p]: Unsetting %s resource %s to version %u", this,
- type_url.c_str(), name.c_str(), state.version);
- for (SubscriptionState* subscription : state.subscriptions) {
+ ResourceTypeState& resource_type_state = resource_map_[type_url];
+ ++resource_type_state.resource_type_version;
+ ResourceState& resource_state = resource_type_state.resource_name_map[name];
+ resource_state.resource_type_version =
+ resource_type_state.resource_type_version;
+ resource_state.resource.reset();
+ gpr_log(GPR_INFO,
+ "ADS[%p]: Unsetting %s resource %s; resource_type_version now %u",
+ this, type_url.c_str(), name.c_str(),
+ resource_type_state.resource_type_version);
+ for (SubscriptionState* subscription : resource_state.subscriptions) {
subscription->update_queue->emplace_back(type_url, name);
}
}
@@ -553,12 +575,17 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
void SetResource(google::protobuf::Any resource, const TString& type_url,
const TString& name) {
grpc_core::MutexLock lock(&ads_mu_);
- ResourceState& state = resource_map_[type_url][name];
- ++state.version;
- state.resource = std::move(resource);
- gpr_log(GPR_INFO, "ADS[%p]: Updating %s resource %s to version %u", this,
- type_url.c_str(), name.c_str(), state.version);
- for (SubscriptionState* subscription : state.subscriptions) {
+ ResourceTypeState& resource_type_state = resource_map_[type_url];
+ ++resource_type_state.resource_type_version;
+ ResourceState& resource_state = resource_type_state.resource_name_map[name];
+ resource_state.resource_type_version =
+ resource_type_state.resource_type_version;
+ resource_state.resource = std::move(resource);
+ gpr_log(GPR_INFO,
+ "ADS[%p]: Updating %s resource %s; resource_type_version now %u",
+ this, type_url.c_str(), name.c_str(),
+ resource_type_state.resource_type_version);
+ for (SubscriptionState* subscription : resource_state.subscriptions) {
subscription->update_queue->emplace_back(type_url, name);
}
}
@@ -587,68 +614,6 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
SetResource(std::move(resource), kEdsTypeUrl, assignment.cluster_name());
}
- void SetLdsToUseDynamicRds() {
- auto listener = default_listener_;
- HttpConnectionManager http_connection_manager;
- auto* rds = http_connection_manager.mutable_rds();
- rds->set_route_config_name(kDefaultRouteConfigurationName);
- rds->mutable_config_source()->mutable_ads();
- listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
- http_connection_manager);
- SetLdsResource(listener);
- }
-
- static Listener BuildListener(const RouteConfiguration& route_config) {
- HttpConnectionManager http_connection_manager;
- *(http_connection_manager.mutable_route_config()) = route_config;
- Listener listener;
- listener.set_name(kServerName);
- listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
- http_connection_manager);
- return listener;
- }
-
- static ClusterLoadAssignment BuildEdsResource(
- const EdsResourceArgs& args,
- const char* eds_service_name = kDefaultEdsServiceName) {
- ClusterLoadAssignment assignment;
- assignment.set_cluster_name(eds_service_name);
- for (const auto& locality : args.locality_list) {
- auto* endpoints = assignment.add_endpoints();
- endpoints->mutable_load_balancing_weight()->set_value(locality.lb_weight);
- endpoints->set_priority(locality.priority);
- endpoints->mutable_locality()->set_region(kDefaultLocalityRegion);
- endpoints->mutable_locality()->set_zone(kDefaultLocalityZone);
- endpoints->mutable_locality()->set_sub_zone(locality.sub_zone);
- for (size_t i = 0; i < locality.ports.size(); ++i) {
- const int& port = locality.ports[i];
- auto* lb_endpoints = endpoints->add_lb_endpoints();
- if (locality.health_statuses.size() > i &&
- locality.health_statuses[i] != HealthStatus::UNKNOWN) {
- lb_endpoints->set_health_status(locality.health_statuses[i]);
- }
- auto* endpoint = lb_endpoints->mutable_endpoint();
- auto* address = endpoint->mutable_address();
- auto* socket_address = address->mutable_socket_address();
- socket_address->set_address("127.0.0.1");
- socket_address->set_port_value(port);
- }
- }
- if (!args.drop_categories.empty()) {
- auto* policy = assignment.mutable_policy();
- for (const auto& p : args.drop_categories) {
- const TString& name = p.first;
- const uint32_t parts_per_million = p.second;
- auto* drop_overload = policy->add_drop_overloads();
- drop_overload->set_category(name);
- auto* drop_percentage = drop_overload->mutable_drop_percentage();
- drop_percentage->set_numerator(parts_per_million);
- drop_percentage->set_denominator(args.drop_denominator);
- }
- }
- return assignment;
- }
-
void Start() {
grpc_core::MutexLock lock(&ads_mu_);
ads_done_ = false;
@@ -671,7 +636,7 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
void NotifyDoneWithAdsCallLocked() {
if (!ads_done_) {
ads_done_ = true;
- ads_cond_.Broadcast();
+ ads_cond_.SignalAll();
}
}
@@ -688,8 +653,6 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
// A struct representing a client's subscription to a particular resource.
struct SubscriptionState {
- // Version that the client currently knows about.
- int current_version = 0;
// The queue upon which to place updates when the resource is updated.
UpdateQueue* update_queue;
};
@@ -700,20 +663,32 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
using SubscriptionMap =
std::map<TString /* type_url */, SubscriptionNameMap>;
- // A struct representing the current state for a resource:
- // - the version of the resource that is set by the SetResource() methods.
- // - a list of subscriptions interested in this resource.
+ // Sent state for a given resource type.
+ struct SentState {
+ int nonce = 0;
+ int resource_type_version = 0;
+ };
+
+ // A struct representing the current state for an individual resource.
struct ResourceState {
- int version = 0;
+ // The resource itself, if present.
y_absl::optional<google::protobuf::Any> resource;
+ // The resource type version that this resource was last updated in.
+ int resource_type_version = 0;
+ // A list of subscriptions to this resource.
std::set<SubscriptionState*> subscriptions;
};
- // A struct representing the current state for all resources:
- // LDS, CDS, EDS, and RDS for the class as a whole.
+ // The current state for all individual resources of a given type.
using ResourceNameMap =
std::map<TString /* resource_name */, ResourceState>;
- using ResourceMap = std::map<TString /* type_url */, ResourceNameMap>;
+
+ struct ResourceTypeState {
+ int resource_type_version = 0;
+ ResourceNameMap resource_name_map;
+ };
+
+ using ResourceMap = std::map<TString /* type_url */, ResourceTypeState>;
template <class RpcApi, class DiscoveryRequest, class DiscoveryResponse>
class RpcService : public RpcApi::Service {
@@ -732,201 +707,99 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
} else {
parent_->seen_v3_client_ = true;
}
+ // Take a reference of the AdsServiceImpl object, which will go
+ // out of scope when this request handler returns. This ensures
+ // that the parent won't be destroyed until this stream is complete.
+ std::shared_ptr<AdsServiceImpl> ads_service_impl =
+ parent_->shared_from_this();
// Resources (type/name pairs) that have changed since the client
// subscribed to them.
UpdateQueue update_queue;
// Resources that the client will be subscribed to keyed by resource type
// url.
SubscriptionMap subscription_map;
- [&]() {
+ // Sent state for each resource type.
+ std::map<TString /*type_url*/, SentState> sent_state_map;
+ // Spawn a thread to read requests from the stream.
+ // Requests will be delivered to this thread in a queue.
+ std::deque<DiscoveryRequest> requests;
+ bool stream_closed = false;
+ std::thread reader(std::bind(&RpcService::BlockingRead, this, stream,
+ &requests, &stream_closed));
+ // Main loop to process requests and updates.
+ while (true) {
+ // Boolean to keep track if the loop received any work to do: a
+ // request or an update; regardless whether a response was actually
+ // sent out.
+ bool did_work = false;
+ // Look for new requests and and decide what to handle.
+ y_absl::optional<DiscoveryResponse> response;
{
grpc_core::MutexLock lock(&parent_->ads_mu_);
- if (parent_->ads_done_) return;
- }
- // Balancer shouldn't receive the call credentials metadata.
- EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey),
- context->client_metadata().end());
- // Current Version map keyed by resource type url.
- std::map<TString, int> resource_type_version;
- // Creating blocking thread to read from stream.
- std::deque<DiscoveryRequest> requests;
- bool stream_closed = false;
- // Take a reference of the AdsServiceImpl object, reference will go
- // out of scope after the reader thread is joined.
- std::shared_ptr<AdsServiceImpl> ads_service_impl =
- parent_->shared_from_this();
- std::thread reader(std::bind(&RpcService::BlockingRead, this, stream,
- &requests, &stream_closed));
- // Main loop to look for requests and updates.
- while (true) {
- // Look for new requests and and decide what to handle.
- y_absl::optional<DiscoveryResponse> response;
- // Boolean to keep track if the loop received any work to do: a
- // request or an update; regardless whether a response was actually
- // sent out.
- bool did_work = false;
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- if (stream_closed) break;
- if (!requests.empty()) {
- DiscoveryRequest request = std::move(requests.front());
- requests.pop_front();
- did_work = true;
- gpr_log(GPR_INFO,
- "ADS[%p]: Received request for type %s with content %s",
- this, request.type_url().c_str(),
- request.DebugString().c_str());
- const TString v3_resource_type =
- TypeUrlToV3(request.type_url());
- // As long as we are not in shutdown, identify ACK and NACK by
- // looking for version information and comparing it to nonce (this
- // server ensures they are always set to the same in a response.)
- auto it =
- parent_->resource_type_response_state_.find(v3_resource_type);
- if (it != parent_->resource_type_response_state_.end()) {
- if (!request.response_nonce().empty()) {
- it->second.state =
- (!request.version_info().empty() &&
- request.version_info() == request.response_nonce())
- ? ResponseState::ACKED
- : ResponseState::NACKED;
- }
- if (request.has_error_detail()) {
- it->second.error_message = request.error_detail().message();
- }
- }
- // As long as the test did not tell us to ignore this type of
- // request, look at all the resource names.
- if (parent_->resource_types_to_ignore_.find(v3_resource_type) ==
- parent_->resource_types_to_ignore_.end()) {
- auto& subscription_name_map =
- subscription_map[v3_resource_type];
- auto& resource_name_map =
- parent_->resource_map_[v3_resource_type];
- std::set<TString> resources_in_current_request;
- std::set<TString> resources_added_to_response;
- for (const TString& resource_name :
- request.resource_names()) {
- resources_in_current_request.emplace(resource_name);
- auto& subscription_state =
- subscription_name_map[resource_name];
- auto& resource_state = resource_name_map[resource_name];
- // Subscribe if needed.
- parent_->MaybeSubscribe(v3_resource_type, resource_name,
- &subscription_state, &resource_state,
- &update_queue);
- // Send update if needed.
- if (ClientNeedsResourceUpdate(resource_state,
- &subscription_state)) {
- gpr_log(GPR_INFO,
- "ADS[%p]: Sending update for type=%s name=%s "
- "version=%d",
- this, request.type_url().c_str(),
- resource_name.c_str(), resource_state.version);
- resources_added_to_response.emplace(resource_name);
- if (!response.has_value()) response.emplace();
- if (resource_state.resource.has_value()) {
- auto* resource = response->add_resources();
- resource->CopyFrom(resource_state.resource.value());
- if (is_v2_) {
- resource->set_type_url(request.type_url());
- }
- }
- } else {
- gpr_log(GPR_INFO,
- "ADS[%p]: client does not need update for "
- "type=%s name=%s version=%d",
- this, request.type_url().c_str(),
- resource_name.c_str(), resource_state.version);
- }
- }
- // Process unsubscriptions for any resource no longer
- // present in the request's resource list.
- parent_->ProcessUnsubscriptions(
- v3_resource_type, resources_in_current_request,
- &subscription_name_map, &resource_name_map);
- // Send response if needed.
- if (!resources_added_to_response.empty()) {
- CompleteBuildingDiscoveryResponse(
- v3_resource_type, request.type_url(),
- ++resource_type_version[v3_resource_type],
- subscription_name_map, resources_added_to_response,
- &response.value());
- }
- }
- }
- }
- if (response.has_value()) {
- gpr_log(GPR_INFO, "ADS[%p]: Sending response: %s", this,
- response->DebugString().c_str());
- stream->Write(response.value());
+ // If the stream has been closed or our parent is being shut
+ // down, stop immediately.
+ if (stream_closed || parent_->ads_done_) break;
+ // Otherwise, see if there's a request to read from the queue.
+ if (!requests.empty()) {
+ DiscoveryRequest request = std::move(requests.front());
+ requests.pop_front();
+ did_work = true;
+ gpr_log(GPR_INFO,
+ "ADS[%p]: Received request for type %s with content %s",
+ this, request.type_url().c_str(),
+ request.DebugString().c_str());
+ const TString v3_resource_type =
+ TypeUrlToV3(request.type_url());
+ SentState& sent_state = sent_state_map[v3_resource_type];
+ // Process request.
+ ProcessRequest(request, v3_resource_type, &update_queue,
+ &subscription_map, &sent_state, &response);
}
- response.reset();
- // Look for updates and decide what to handle.
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- if (!update_queue.empty()) {
- const TString resource_type =
- std::move(update_queue.front().first);
- const TString resource_name =
- std::move(update_queue.front().second);
- update_queue.pop_front();
- const TString v2_resource_type = TypeUrlToV2(resource_type);
- did_work = true;
- gpr_log(GPR_INFO, "ADS[%p]: Received update for type=%s name=%s",
- this, resource_type.c_str(), resource_name.c_str());
- auto& subscription_name_map = subscription_map[resource_type];
- auto& resource_name_map = parent_->resource_map_[resource_type];
- auto it = subscription_name_map.find(resource_name);
- if (it != subscription_name_map.end()) {
- SubscriptionState& subscription_state = it->second;
- ResourceState& resource_state =
- resource_name_map[resource_name];
- if (ClientNeedsResourceUpdate(resource_state,
- &subscription_state)) {
- gpr_log(
- GPR_INFO,
- "ADS[%p]: Sending update for type=%s name=%s version=%d",
- this, resource_type.c_str(), resource_name.c_str(),
- resource_state.version);
- response.emplace();
- if (resource_state.resource.has_value()) {
- auto* resource = response->add_resources();
- resource->CopyFrom(resource_state.resource.value());
- if (is_v2_) {
- resource->set_type_url(v2_resource_type);
- }
- }
- CompleteBuildingDiscoveryResponse(
- resource_type, v2_resource_type,
- ++resource_type_version[resource_type],
- subscription_name_map, {resource_name},
- &response.value());
- }
- }
- }
- }
- if (response.has_value()) {
- gpr_log(GPR_INFO, "ADS[%p]: Sending update response: %s", this,
- response->DebugString().c_str());
- stream->Write(response.value());
+ }
+ if (response.has_value()) {
+ gpr_log(GPR_INFO, "ADS[%p]: Sending response: %s", this,
+ response->DebugString().c_str());
+ stream->Write(response.value());
+ }
+ response.reset();
+ // Look for updates and decide what to handle.
+ {
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ if (!update_queue.empty()) {
+ const TString resource_type =
+ std::move(update_queue.front().first);
+ const TString resource_name =
+ std::move(update_queue.front().second);
+ update_queue.pop_front();
+ did_work = true;
+ SentState& sent_state = sent_state_map[resource_type];
+ ProcessUpdate(resource_type, resource_name, &subscription_map,
+ &sent_state, &response);
}
- // If we didn't find anything to do, delay before the next loop
- // iteration; otherwise, check whether we should exit and then
- // immediately continue.
- gpr_timespec deadline =
- grpc_timeout_milliseconds_to_deadline(did_work ? 0 : 10);
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- if (!parent_->ads_cond_.WaitUntil(
- &parent_->ads_mu_, [this] { return parent_->ads_done_; },
- deadline)) {
- break;
- }
+ }
+ if (response.has_value()) {
+ gpr_log(GPR_INFO, "ADS[%p]: Sending update response: %s", this,
+ response->DebugString().c_str());
+ stream->Write(response.value());
+ }
+ // If we didn't find anything to do, delay before the next loop
+ // iteration; otherwise, check whether we should exit and then
+ // immediately continue.
+ gpr_timespec deadline =
+ grpc_timeout_milliseconds_to_deadline(did_work ? 0 : 10);
+ {
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ if (!grpc_core::WaitUntilWithDeadline(
+ &parent_->ads_cond_, &parent_->ads_mu_,
+ [this] { return parent_->ads_done_; },
+ grpc_core::ToAbslTime(deadline))) {
+ break;
}
}
- reader.join();
- }();
+ }
+ // Done with main loop. Clean up before returning.
+ // Join reader thread.
+ reader.join();
// Clean up any subscriptions that were still active when the call
// finished.
{
@@ -937,8 +810,9 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
for (auto& q : subscription_name_map) {
const TString& resource_name = q.first;
SubscriptionState& subscription_state = q.second;
- ResourceState& resource_state =
- parent_->resource_map_[type_url][resource_name];
+ ResourceNameMap& resource_name_map =
+ parent_->resource_map_[type_url].resource_name_map;
+ ResourceState& resource_state = resource_name_map[resource_name];
resource_state.subscriptions.erase(&subscription_state);
}
}
@@ -949,20 +823,140 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
}
private:
- static TString TypeUrlToV2(const TString& resource_type) {
- if (resource_type == kLdsTypeUrl) return kLdsV2TypeUrl;
- if (resource_type == kRdsTypeUrl) return kRdsV2TypeUrl;
- if (resource_type == kCdsTypeUrl) return kCdsV2TypeUrl;
- if (resource_type == kEdsTypeUrl) return kEdsV2TypeUrl;
- return resource_type;
+ // Processes a response read from the client.
+ // Populates response if needed.
+ void ProcessRequest(const DiscoveryRequest& request,
+ const TString& v3_resource_type,
+ UpdateQueue* update_queue,
+ SubscriptionMap* subscription_map,
+ SentState* sent_state,
+ y_absl::optional<DiscoveryResponse>* response) {
+ // Check the nonce sent by the client, if any.
+ // (This will be absent on the first request on a stream.)
+ if (request.response_nonce().empty()) {
+ int client_resource_type_version = 0;
+ if (!request.version_info().empty()) {
+ GPR_ASSERT(y_absl::SimpleAtoi(request.version_info(),
+ &client_resource_type_version));
+ }
+ EXPECT_GE(client_resource_type_version,
+ parent_->resource_type_min_versions_[v3_resource_type])
+ << "resource_type: " << v3_resource_type;
+ } else {
+ int client_nonce;
+ GPR_ASSERT(y_absl::SimpleAtoi(request.response_nonce(), &client_nonce));
+ // Ignore requests with stale nonces.
+ if (client_nonce < sent_state->nonce) return;
+ // Check for ACK or NACK.
+ auto it = parent_->resource_type_response_state_.find(v3_resource_type);
+ if (it != parent_->resource_type_response_state_.end()) {
+ if (!request.has_error_detail()) {
+ it->second.state = ResponseState::ACKED;
+ it->second.error_message.clear();
+ gpr_log(GPR_INFO,
+ "ADS[%p]: client ACKed resource_type=%s version=%s", this,
+ request.type_url().c_str(), request.version_info().c_str());
+ } else {
+ it->second.state = ResponseState::NACKED;
+ EXPECT_EQ(request.error_detail().code(),
+ GRPC_STATUS_INVALID_ARGUMENT);
+ it->second.error_message = request.error_detail().message();
+ gpr_log(GPR_INFO,
+ "ADS[%p]: client NACKed resource_type=%s version=%s: %s",
+ this, request.type_url().c_str(),
+ request.version_info().c_str(),
+ it->second.error_message.c_str());
+ }
+ }
+ }
+ // Ignore resource types as requested by tests.
+ if (parent_->resource_types_to_ignore_.find(v3_resource_type) !=
+ parent_->resource_types_to_ignore_.end()) {
+ return;
+ }
+ // Look at all the resource names in the request.
+ auto& subscription_name_map = (*subscription_map)[v3_resource_type];
+ auto& resource_type_state = parent_->resource_map_[v3_resource_type];
+ auto& resource_name_map = resource_type_state.resource_name_map;
+ std::set<TString> resources_in_current_request;
+ std::set<TString> resources_added_to_response;
+ for (const TString& resource_name : request.resource_names()) {
+ resources_in_current_request.emplace(resource_name);
+ auto& subscription_state = subscription_name_map[resource_name];
+ auto& resource_state = resource_name_map[resource_name];
+ // Subscribe if needed.
+ // Send the resource in the response if either (a) this is
+ // a new subscription or (b) there is an updated version of
+ // this resource to send.
+ if (parent_->MaybeSubscribe(v3_resource_type, resource_name,
+ &subscription_state, &resource_state,
+ update_queue) ||
+ ClientNeedsResourceUpdate(resource_type_state, resource_state,
+ sent_state->resource_type_version)) {
+ gpr_log(GPR_INFO, "ADS[%p]: Sending update for type=%s name=%s", this,
+ request.type_url().c_str(), resource_name.c_str());
+ resources_added_to_response.emplace(resource_name);
+ if (!response->has_value()) response->emplace();
+ if (resource_state.resource.has_value()) {
+ auto* resource = (*response)->add_resources();
+ resource->CopyFrom(resource_state.resource.value());
+ if (is_v2_) {
+ resource->set_type_url(request.type_url());
+ }
+ }
+ } else {
+ gpr_log(GPR_INFO,
+ "ADS[%p]: client does not need update for type=%s name=%s",
+ this, request.type_url().c_str(), resource_name.c_str());
+ }
+ }
+ // Process unsubscriptions for any resource no longer
+ // present in the request's resource list.
+ parent_->ProcessUnsubscriptions(
+ v3_resource_type, resources_in_current_request,
+ &subscription_name_map, &resource_name_map);
+ // Construct response if needed.
+ if (!resources_added_to_response.empty()) {
+ CompleteBuildingDiscoveryResponse(
+ v3_resource_type, request.type_url(),
+ resource_type_state.resource_type_version, subscription_name_map,
+ resources_added_to_response, sent_state, &response->value());
+ }
}
- static TString TypeUrlToV3(const TString& resource_type) {
- if (resource_type == kLdsV2TypeUrl) return kLdsTypeUrl;
- if (resource_type == kRdsV2TypeUrl) return kRdsTypeUrl;
- if (resource_type == kCdsV2TypeUrl) return kCdsTypeUrl;
- if (resource_type == kEdsV2TypeUrl) return kEdsTypeUrl;
- return resource_type;
+ // Processes a resource update from the test.
+ // Populates response if needed.
+ void ProcessUpdate(const TString& resource_type,
+ const TString& resource_name,
+ SubscriptionMap* subscription_map, SentState* sent_state,
+ y_absl::optional<DiscoveryResponse>* response) {
+ const TString v2_resource_type = TypeUrlToV2(resource_type);
+ gpr_log(GPR_INFO, "ADS[%p]: Received update for type=%s name=%s", this,
+ resource_type.c_str(), resource_name.c_str());
+ auto& subscription_name_map = (*subscription_map)[resource_type];
+ auto& resource_type_state = parent_->resource_map_[resource_type];
+ auto& resource_name_map = resource_type_state.resource_name_map;
+ auto it = subscription_name_map.find(resource_name);
+ if (it != subscription_name_map.end()) {
+ ResourceState& resource_state = resource_name_map[resource_name];
+ if (ClientNeedsResourceUpdate(resource_type_state, resource_state,
+ sent_state->resource_type_version)) {
+ gpr_log(GPR_INFO, "ADS[%p]: Sending update for type=%s name=%s", this,
+ resource_type.c_str(), resource_name.c_str());
+ response->emplace();
+ if (resource_state.resource.has_value()) {
+ auto* resource = (*response)->add_resources();
+ resource->CopyFrom(resource_state.resource.value());
+ if (is_v2_) {
+ resource->set_type_url(v2_resource_type);
+ }
+ }
+ CompleteBuildingDiscoveryResponse(
+ resource_type, v2_resource_type,
+ resource_type_state.resource_type_version, subscription_name_map,
+ {resource_name}, sent_state, &response->value());
+ }
+ }
}
// Starting a thread to do blocking read on the stream until cancel.
@@ -989,29 +983,21 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
*stream_closed = true;
}
- static void CheckBuildVersion(
- const ::envoy::api::v2::DiscoveryRequest& request) {
- EXPECT_FALSE(request.node().build_version().empty());
- }
-
- static void CheckBuildVersion(
- const ::envoy::service::discovery::v3::DiscoveryRequest& request) {}
-
// Completing the building a DiscoveryResponse by adding common information
// for all resources and by adding all subscribed resources for LDS and CDS.
void CompleteBuildingDiscoveryResponse(
const TString& resource_type, const TString& v2_resource_type,
const int version, const SubscriptionNameMap& subscription_name_map,
const std::set<TString>& resources_added_to_response,
- DiscoveryResponse* response) {
+ SentState* sent_state, DiscoveryResponse* response) {
auto& response_state =
parent_->resource_type_response_state_[resource_type];
if (response_state.state == ResponseState::NOT_SENT) {
response_state.state = ResponseState::SENT;
}
response->set_type_url(is_v2_ ? v2_resource_type : resource_type);
- response->set_version_info(y_absl::StrCat(version));
- response->set_nonce(y_absl::StrCat(version));
+ response->set_version_info(ToString(version));
+ response->set_nonce(ToString(++sent_state->nonce));
if (resource_type == kLdsTypeUrl || resource_type == kCdsTypeUrl) {
// For LDS and CDS we must send back all subscribed resources
// (even the unchanged ones)
@@ -1019,8 +1005,10 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
const TString& resource_name = p.first;
if (resources_added_to_response.find(resource_name) ==
resources_added_to_response.end()) {
+ ResourceNameMap& resource_name_map =
+ parent_->resource_map_[resource_type].resource_name_map;
const ResourceState& resource_state =
- parent_->resource_map_[resource_type][resource_name];
+ resource_name_map[resource_name];
if (resource_state.resource.has_value()) {
auto* resource = response->add_resources();
resource->CopyFrom(resource_state.resource.value());
@@ -1031,39 +1019,64 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
}
}
}
+ sent_state->resource_type_version = version;
}
+ static TString TypeUrlToV2(const TString& resource_type) {
+ if (resource_type == kLdsTypeUrl) return kLdsV2TypeUrl;
+ if (resource_type == kRdsTypeUrl) return kRdsV2TypeUrl;
+ if (resource_type == kCdsTypeUrl) return kCdsV2TypeUrl;
+ if (resource_type == kEdsTypeUrl) return kEdsV2TypeUrl;
+ return resource_type;
+ }
+
+ static TString TypeUrlToV3(const TString& resource_type) {
+ if (resource_type == kLdsV2TypeUrl) return kLdsTypeUrl;
+ if (resource_type == kRdsV2TypeUrl) return kRdsTypeUrl;
+ if (resource_type == kCdsV2TypeUrl) return kCdsTypeUrl;
+ if (resource_type == kEdsV2TypeUrl) return kEdsTypeUrl;
+ return resource_type;
+ }
+
+ static void CheckBuildVersion(
+ const ::envoy::api::v2::DiscoveryRequest& request) {
+ EXPECT_FALSE(request.node().build_version().empty());
+ }
+
+ static void CheckBuildVersion(
+ const ::envoy::service::discovery::v3::DiscoveryRequest& /*request*/) {}
+
AdsServiceImpl* parent_;
const bool is_v2_;
};
// Checks whether the client needs to receive a newer version of
- // the resource. If so, updates subscription_state->current_version and
- // returns true.
- static bool ClientNeedsResourceUpdate(const ResourceState& resource_state,
- SubscriptionState* subscription_state) {
- if (subscription_state->current_version < resource_state.version) {
- subscription_state->current_version = resource_state.version;
- return true;
- }
- return false;
+ // the resource.
+ static bool ClientNeedsResourceUpdate(
+ const ResourceTypeState& resource_type_state,
+ const ResourceState& resource_state, int client_resource_type_version) {
+ return client_resource_type_version <
+ resource_type_state.resource_type_version &&
+ resource_state.resource_type_version <=
+ resource_type_state.resource_type_version;
}
// Subscribes to a resource if not already subscribed:
// 1. Sets the update_queue field in subscription_state.
// 2. Adds subscription_state to resource_state->subscriptions.
- void MaybeSubscribe(const TString& resource_type,
+ bool MaybeSubscribe(const TString& resource_type,
const TString& resource_name,
SubscriptionState* subscription_state,
ResourceState* resource_state,
UpdateQueue* update_queue) {
// The update_queue will be null if we were not previously subscribed.
- if (subscription_state->update_queue != nullptr) return;
+ if (subscription_state->update_queue != nullptr) return false;
subscription_state->update_queue = update_queue;
resource_state->subscriptions.emplace(subscription_state);
gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p",
this, resource_type.c_str(), resource_name.c_str(),
&subscription_state);
+ return true;
}
// Removes subscriptions for resources no longer present in the
@@ -1123,12 +1136,10 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
// Protect the members below.
grpc_core::Mutex ads_mu_;
bool ads_done_ = false;
- Listener default_listener_;
- RouteConfiguration default_route_config_;
- Cluster default_cluster_;
std::map<TString /* type_url */, ResponseState>
resource_type_response_state_;
std::set<TString /*resource_type*/> resource_types_to_ignore_;
+ std::map<TString /*resource_type*/, int> resource_type_min_versions_;
// An instance data member containing the current state of all resources.
// Note that an entry will exist whenever either of the following is true:
// - The resource exists (i.e., has been created by SetResource() and has not
@@ -1193,8 +1204,8 @@ class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
grpc_core::CondVar cv;
if (result_queue_.empty()) {
load_report_cond_ = &cv;
- load_report_cond_->WaitUntil(&load_report_mu_,
- [this] { return !result_queue_.empty(); });
+ grpc_core::WaitUntil(load_report_cond_, &load_report_mu_,
+ [this] { return !result_queue_.empty(); });
load_report_cond_ = nullptr;
}
std::vector<ClientStats> result = std::move(result_queue_.front());
@@ -1261,8 +1272,8 @@ class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
}
// Wait until notified done.
grpc_core::MutexLock lock(&parent_->lrs_mu_);
- parent_->lrs_cv_.WaitUntil(&parent_->lrs_mu_,
- [this] { return parent_->lrs_done_; });
+ grpc_core::WaitUntil(&parent_->lrs_cv_, &parent_->lrs_mu_,
+ [this] { return parent_->lrs_done_; });
}
gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats done", this);
return Status::OK;
@@ -1275,7 +1286,7 @@ class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
void NotifyDoneWithLrsCallLocked() {
if (!lrs_done_) {
lrs_done_ = true;
- lrs_cv_.Broadcast();
+ lrs_cv_.SignalAll();
}
}
@@ -1303,60 +1314,382 @@ class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
class TestType {
public:
- TestType(bool use_xds_resolver, bool enable_load_reporting,
- bool enable_rds_testing = false, bool use_v2 = false)
- : use_xds_resolver_(use_xds_resolver),
- enable_load_reporting_(enable_load_reporting),
- enable_rds_testing_(enable_rds_testing),
- use_v2_(use_v2) {}
-
- bool use_xds_resolver() const { return use_xds_resolver_; }
+ enum FilterConfigSetup {
+ // Set the fault injection filter directly from LDS
+ kHTTPConnectionManagerOriginal,
+ // Enable the fault injection filter in LDS, but override the filter config
+ // in route.
+ kRouteOverride,
+ };
+
+ TestType& set_use_fake_resolver() {
+ use_fake_resolver_ = true;
+ return *this;
+ }
+
+ TestType& set_enable_load_reporting() {
+ enable_load_reporting_ = true;
+ return *this;
+ }
+
+ TestType& set_enable_rds_testing() {
+ enable_rds_testing_ = true;
+ return *this;
+ }
+
+ TestType& set_use_v2() {
+ use_v2_ = true;
+ return *this;
+ }
+
+ TestType& set_use_xds_credentials() {
+ use_xds_credentials_ = true;
+ return *this;
+ }
+
+ TestType& set_use_csds_streaming() {
+ use_csds_streaming_ = true;
+ return *this;
+ }
+
+ TestType& set_filter_config_setup(const FilterConfigSetup& setup) {
+ filter_config_setup_ = setup;
+ return *this;
+ }
+
+ bool use_fake_resolver() const { return use_fake_resolver_; }
bool enable_load_reporting() const { return enable_load_reporting_; }
bool enable_rds_testing() const { return enable_rds_testing_; }
bool use_v2() const { return use_v2_; }
+ bool use_xds_credentials() const { return use_xds_credentials_; }
+ bool use_csds_streaming() const { return use_csds_streaming_; }
+ const FilterConfigSetup& filter_config_setup() const {
+ return filter_config_setup_;
+ }
TString AsString() const {
- TString retval = (use_xds_resolver_ ? "XdsResolver" : "FakeResolver");
+ TString retval = (use_fake_resolver_ ? "FakeResolver" : "XdsResolver");
retval += (use_v2_ ? "V2" : "V3");
if (enable_load_reporting_) retval += "WithLoadReporting";
if (enable_rds_testing_) retval += "Rds";
+ if (use_xds_credentials_) retval += "XdsCreds";
+ if (use_csds_streaming_) retval += "CsdsStreaming";
+ if (filter_config_setup_ == kRouteOverride) {
+ retval += "FilterPerRouteOverride";
+ }
return retval;
}
private:
- const bool use_xds_resolver_;
- const bool enable_load_reporting_;
- const bool enable_rds_testing_;
- const bool use_v2_;
+ bool use_fake_resolver_ = false;
+ bool enable_load_reporting_ = false;
+ bool enable_rds_testing_ = false;
+ bool use_v2_ = false;
+ bool use_xds_credentials_ = false;
+ bool use_csds_streaming_ = false;
+ FilterConfigSetup filter_config_setup_ = kHTTPConnectionManagerOriginal;
+};
+
+TString ReadFile(const char* file_path) {
+ grpc_slice slice;
+ GPR_ASSERT(
+ GRPC_LOG_IF_ERROR("load_file", grpc_load_file(file_path, 0, &slice)));
+ TString file_contents(grpc_core::StringViewFromSlice(slice));
+ grpc_slice_unref(slice);
+ return file_contents;
+}
+
+grpc_core::PemKeyCertPairList ReadTlsIdentityPair(const char* key_path,
+ const char* cert_path) {
+ return grpc_core::PemKeyCertPairList{
+ grpc_core::PemKeyCertPair(ReadFile(key_path), ReadFile(cert_path))};
+}
+
+// Based on StaticDataCertificateProvider, but provides alternate certificates
+// if the certificate name is not empty.
+class FakeCertificateProvider final : public grpc_tls_certificate_provider {
+ public:
+ struct CertData {
+ TString root_certificate;
+ grpc_core::PemKeyCertPairList identity_key_cert_pairs;
+ };
+
+ using CertDataMap = std::map<TString /*cert_name */, CertData>;
+
+ explicit FakeCertificateProvider(CertDataMap cert_data_map)
+ : distributor_(
+ grpc_core::MakeRefCounted<grpc_tls_certificate_distributor>()),
+ cert_data_map_(std::move(cert_data_map)) {
+ distributor_->SetWatchStatusCallback([this](TString cert_name,
+ bool root_being_watched,
+ bool identity_being_watched) {
+ if (!root_being_watched && !identity_being_watched) return;
+ auto it = cert_data_map_.find(cert_name);
+ if (it == cert_data_map_.end()) {
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(
+ y_absl::StrCat("No certificates available for cert_name \"",
+ cert_name, "\"")
+ .c_str());
+ distributor_->SetErrorForCert(cert_name, GRPC_ERROR_REF(error),
+ GRPC_ERROR_REF(error));
+ GRPC_ERROR_UNREF(error);
+ } else {
+ y_absl::optional<TString> root_certificate;
+ y_absl::optional<grpc_core::PemKeyCertPairList> pem_key_cert_pairs;
+ if (root_being_watched) {
+ root_certificate = it->second.root_certificate;
+ }
+ if (identity_being_watched) {
+ pem_key_cert_pairs = it->second.identity_key_cert_pairs;
+ }
+ distributor_->SetKeyMaterials(cert_name, std::move(root_certificate),
+ std::move(pem_key_cert_pairs));
+ }
+ });
+ }
+
+ ~FakeCertificateProvider() override {
+ distributor_->SetWatchStatusCallback(nullptr);
+ }
+
+ grpc_core::RefCountedPtr<grpc_tls_certificate_distributor> distributor()
+ const override {
+ return distributor_;
+ }
+
+ private:
+ grpc_core::RefCountedPtr<grpc_tls_certificate_distributor> distributor_;
+ CertDataMap cert_data_map_;
+};
+
+class FakeCertificateProviderFactory
+ : public grpc_core::CertificateProviderFactory {
+ public:
+ class Config : public grpc_core::CertificateProviderFactory::Config {
+ public:
+ explicit Config(const char* name) : name_(name) {}
+
+ const char* name() const override { return name_; }
+
+ TString ToString() const override { return "{}"; }
+
+ private:
+ const char* name_;
+ };
+
+ FakeCertificateProviderFactory(
+ const char* name, FakeCertificateProvider::CertDataMap** cert_data_map)
+ : name_(name), cert_data_map_(cert_data_map) {
+ GPR_ASSERT(cert_data_map != nullptr);
+ }
+
+ const char* name() const override { return name_; }
+
+ grpc_core::RefCountedPtr<grpc_core::CertificateProviderFactory::Config>
+ CreateCertificateProviderConfig(const grpc_core::Json& /*config_json*/,
+ grpc_error** /*error*/) override {
+ return grpc_core::MakeRefCounted<Config>(name_);
+ }
+
+ grpc_core::RefCountedPtr<grpc_tls_certificate_provider>
+ CreateCertificateProvider(
+ grpc_core::RefCountedPtr<grpc_core::CertificateProviderFactory::Config>
+ /*config*/) override {
+ if (*cert_data_map_ == nullptr) return nullptr;
+ return grpc_core::MakeRefCounted<FakeCertificateProvider>(**cert_data_map_);
+ }
+
+ private:
+ const char* name_;
+ FakeCertificateProvider::CertDataMap** cert_data_map_;
};
+// Global variables for each provider.
+FakeCertificateProvider::CertDataMap* g_fake1_cert_data_map = nullptr;
+FakeCertificateProvider::CertDataMap* g_fake2_cert_data_map = nullptr;
+
+int ServerAuthCheckSchedule(void* /* config_user_data */,
+ grpc_tls_server_authorization_check_arg* arg) {
+ arg->success = 1;
+ arg->status = GRPC_STATUS_OK;
+ return 0; /* synchronous check */
+}
+
+std::shared_ptr<ChannelCredentials> CreateTlsFallbackCredentials() {
+ // TODO(yashykt): Switch to using C++ API once b/173823806 is fixed.
+ grpc_tls_credentials_options* options = grpc_tls_credentials_options_create();
+ grpc_tls_credentials_options_set_server_verification_option(
+ options, GRPC_TLS_SKIP_HOSTNAME_VERIFICATION);
+ grpc_tls_credentials_options_set_certificate_provider(
+ options,
+ grpc_core::MakeRefCounted<grpc_core::StaticDataCertificateProvider>(
+ ReadFile(kCaCertPath),
+ ReadTlsIdentityPair(kServerKeyPath, kServerCertPath))
+ .get());
+ grpc_tls_credentials_options_watch_root_certs(options);
+ grpc_tls_credentials_options_watch_identity_key_cert_pairs(options);
+ grpc_tls_server_authorization_check_config* check_config =
+ grpc_tls_server_authorization_check_config_create(
+ nullptr, ServerAuthCheckSchedule, nullptr, nullptr);
+ grpc_tls_credentials_options_set_server_authorization_check_config(
+ options, check_config);
+ auto channel_creds = std::make_shared<SecureChannelCredentials>(
+ grpc_tls_credentials_create(options));
+ grpc_tls_server_authorization_check_config_release(check_config);
+ return channel_creds;
+}
+
+// A No-op HTTP filter used for verifying parsing logic.
+class NoOpHttpFilter : public grpc_core::XdsHttpFilterImpl {
+ public:
+ NoOpHttpFilter(TString name, bool supported_on_clients,
+ bool supported_on_servers)
+ : name_(std::move(name)),
+ supported_on_clients_(supported_on_clients),
+ supported_on_servers_(supported_on_servers) {}
+
+ void PopulateSymtab(upb_symtab* /* symtab */) const override {}
+
+ y_absl::StatusOr<grpc_core::XdsHttpFilterImpl::FilterConfig>
+ GenerateFilterConfig(upb_strview /* serialized_filter_config */,
+ upb_arena* /* arena */) const override {
+ return grpc_core::XdsHttpFilterImpl::FilterConfig{name_, grpc_core::Json()};
+ }
+
+ y_absl::StatusOr<grpc_core::XdsHttpFilterImpl::FilterConfig>
+ GenerateFilterConfigOverride(upb_strview /*serialized_filter_config*/,
+ upb_arena* /*arena*/) const override {
+ return grpc_core::XdsHttpFilterImpl::FilterConfig{name_, grpc_core::Json()};
+ }
+
+ const grpc_channel_filter* channel_filter() const override { return nullptr; }
+
+ y_absl::StatusOr<grpc_core::XdsHttpFilterImpl::ServiceConfigJsonEntry>
+ GenerateServiceConfig(
+ const FilterConfig& /*hcm_filter_config*/,
+ const FilterConfig* /*filter_config_override*/) const override {
+ return grpc_core::XdsHttpFilterImpl::ServiceConfigJsonEntry{name_, ""};
+ }
+
+ bool IsSupportedOnClients() const override { return supported_on_clients_; }
+
+ bool IsSupportedOnServers() const override { return supported_on_servers_; }
+
+ private:
+ const TString name_;
+ const bool supported_on_clients_;
+ const bool supported_on_servers_;
+};
+
+namespace {
+
+void* response_generator_arg_copy(void* p) {
+ auto* generator = static_cast<grpc_core::FakeResolverResponseGenerator*>(p);
+ generator->Ref().release();
+ return p;
+}
+
+void response_generator_arg_destroy(void* p) {
+ auto* generator = static_cast<grpc_core::FakeResolverResponseGenerator*>(p);
+ generator->Unref();
+}
+
+int response_generator_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
+
+const grpc_arg_pointer_vtable
+ kLogicalDnsClusterResolverResponseGeneratorVtable = {
+ response_generator_arg_copy, response_generator_arg_destroy,
+ response_generator_cmp};
+
+// There is slight difference between time fetched by GPR and by C++ system
+// clock API. It's unclear if they are using the same syscall, but we do know
+// GPR round the number at millisecond-level. This creates a 1ms difference,
+// which could cause flake.
+grpc_millis NowFromCycleCounter() {
+ gpr_cycle_counter now = gpr_get_cycle_counter();
+ return grpc_cycle_counter_to_millis_round_up(now);
+}
+
+} // namespace
+
class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
protected:
+ // TODO(roth): We currently set the number of backends and number of
+ // balancers on a per-test-suite basis, not a per-test-case basis.
+ // However, not every individual test case in a given test suite uses
+ // the same number of backends or balancers, so we wind up having to
+ // set the numbers for the test suite to the max number needed by any
+ // one test case in that test suite. This results in starting more
+ // servers (and using more ports) than we actually need. When we have
+ // time, change each test to directly start the number of backends and
+ // balancers that it needs, so that we aren't wasting resources.
XdsEnd2endTest(size_t num_backends, size_t num_balancers,
- int client_load_reporting_interval_seconds = 100)
+ int client_load_reporting_interval_seconds = 100,
+ bool use_xds_enabled_server = false,
+ bool bootstrap_contents_from_env_var = false)
: num_backends_(num_backends),
num_balancers_(num_balancers),
client_load_reporting_interval_seconds_(
- client_load_reporting_interval_seconds) {}
-
- static void SetUpTestCase() {
- // Make the backup poller poll very frequently in order to pick up
- // updates from all the subchannels's FDs.
- GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 1);
-#if TARGET_OS_IPHONE
- // Workaround Apple CFStream bug
- gpr_setenv("grpc_cfstream", "0");
-#endif
- grpc_init();
- }
-
- static void TearDownTestCase() { grpc_shutdown(); }
+ client_load_reporting_interval_seconds),
+ use_xds_enabled_server_(use_xds_enabled_server),
+ bootstrap_contents_from_env_var_(bootstrap_contents_from_env_var) {}
void SetUp() override {
- gpr_setenv("GRPC_XDS_EXPERIMENTAL_V3_SUPPORT", "true");
- gpr_setenv("GRPC_XDS_BOOTSTRAP",
- GetParam().use_v2() ? g_bootstrap_file_v2 : g_bootstrap_file_v3);
- g_port_saver->Reset();
+ if (bootstrap_contents_from_env_var_) {
+ gpr_setenv("GRPC_XDS_BOOTSTRAP_CONFIG",
+ GetParam().use_v2() ? kBootstrapFileV2 : kBootstrapFileV3);
+ } else {
+ gpr_setenv("GRPC_XDS_BOOTSTRAP", GetParam().use_v2()
+ ? g_bootstrap_file_v2
+ : g_bootstrap_file_v3);
+ }
+ bool localhost_resolves_to_ipv4 = false;
+ bool localhost_resolves_to_ipv6 = false;
+ grpc_core::LocalhostResolves(&localhost_resolves_to_ipv4,
+ &localhost_resolves_to_ipv6);
+ ipv6_only_ = !localhost_resolves_to_ipv4 && localhost_resolves_to_ipv6;
+ // Initialize default xDS resources.
+ // Construct LDS resource.
+ default_listener_.set_name(kServerName);
+ HttpConnectionManager http_connection_manager;
+ if (!GetParam().use_v2()) {
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("router");
+ filter->mutable_typed_config()->PackFrom(
+ envoy::extensions::filters::http::router::v3::Router());
+ }
+ default_listener_.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ // Construct RDS resource.
+ default_route_config_.set_name(kDefaultRouteConfigurationName);
+ auto* virtual_host = default_route_config_.add_virtual_hosts();
+ virtual_host->add_domains("*");
+ auto* route = virtual_host->add_routes();
+ route->mutable_match()->set_prefix("");
+ route->mutable_route()->set_cluster(kDefaultClusterName);
+ // Construct CDS resource.
+ default_cluster_.set_name(kDefaultClusterName);
+ default_cluster_.set_type(Cluster::EDS);
+ auto* eds_config = default_cluster_.mutable_eds_cluster_config();
+ eds_config->mutable_eds_config()->mutable_ads();
+ eds_config->set_service_name(kDefaultEdsServiceName);
+ default_cluster_.set_lb_policy(Cluster::ROUND_ROBIN);
+ if (GetParam().enable_load_reporting()) {
+ default_cluster_.mutable_lrs_server()->mutable_self();
+ }
+ // Start the load balancers.
+ for (size_t i = 0; i < num_balancers_; ++i) {
+ balancers_.emplace_back(
+ new BalancerServerThread(GetParam().enable_load_reporting()
+ ? client_load_reporting_interval_seconds_
+ : 0));
+ balancers_.back()->Start();
+ // Initialize resources.
+ SetListenerAndRouteConfiguration(i, default_listener_,
+ default_route_config_);
+ balancers_.back()->ads_service()->SetCdsResource(default_cluster_);
+ }
+ // Initialize XdsClient state.
response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
// Inject xDS channel response generator.
@@ -1365,6 +1698,9 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
xds_channel_args_to_add_.emplace_back(
grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
lb_channel_response_generator_.get()));
+ // Inject xDS logical cluster resolver response generator.
+ logical_dns_cluster_resolver_response_generator_ =
+ grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
if (xds_resource_does_not_exist_timeout_ms_ > 0) {
xds_channel_args_to_add_.emplace_back(grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS),
@@ -1383,25 +1719,16 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
grpc_core::internal::UnsetGlobalXdsClientForTest();
// Start the backends.
for (size_t i = 0; i < num_backends_; ++i) {
- backends_.emplace_back(new BackendServerThread);
+ backends_.emplace_back(new BackendServerThread(use_xds_enabled_server_));
backends_.back()->Start();
}
- // Start the load balancers.
- for (size_t i = 0; i < num_balancers_; ++i) {
- balancers_.emplace_back(
- new BalancerServerThread(GetParam().enable_load_reporting()
- ? client_load_reporting_interval_seconds_
- : 0));
- balancers_.back()->Start();
- if (GetParam().enable_rds_testing()) {
- balancers_[i]->ads_service()->SetLdsToUseDynamicRds();
- }
- }
+ // Create channel and stub.
ResetStub();
}
const char* DefaultEdsServiceName() const {
- return GetParam().use_xds_resolver() ? kDefaultEdsServiceName : kServerName;
+ return GetParam().use_fake_resolver() ? kServerName
+ : kDefaultEdsServiceName;
}
void TearDown() override {
@@ -1410,6 +1737,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
// Clear global xDS channel args, since they will go out of scope
// when this test object is destroyed.
grpc_core::internal::SetXdsChannelArgsForTest(nullptr);
+ gpr_unsetenv("GRPC_XDS_BOOTSTRAP");
+ gpr_unsetenv("GRPC_XDS_BOOTSTRAP_CONFIG");
}
void StartAllBackends() {
@@ -1432,31 +1761,33 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
}
std::shared_ptr<Channel> CreateChannel(
- int failover_timeout = 0, const char* server_name = kServerName) {
+ int failover_timeout = 0, const char* server_name = kServerName,
+ grpc_core::FakeResolverResponseGenerator* response_generator = nullptr) {
ChannelArguments args;
if (failover_timeout > 0) {
args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout);
}
// If the parent channel is using the fake resolver, we inject the
// response generator here.
- if (!GetParam().use_xds_resolver()) {
+ if (GetParam().use_fake_resolver()) {
+ if (response_generator == nullptr) {
+ response_generator = response_generator_.get();
+ }
args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
- response_generator_.get());
+ response_generator);
}
+ args.SetPointerWithVtable(
+ GRPC_ARG_XDS_LOGICAL_DNS_CLUSTER_FAKE_RESOLVER_RESPONSE_GENERATOR,
+ logical_dns_cluster_resolver_response_generator_.get(),
+ &kLogicalDnsClusterResolverResponseGeneratorVtable);
TString uri = y_absl::StrCat(
- GetParam().use_xds_resolver() ? "xds" : "fake", ":///", server_name);
- // TODO(dgq): templatize tests to run everything using both secure and
- // insecure channel credentials.
- grpc_channel_credentials* channel_creds =
- grpc_fake_transport_security_credentials_create();
- grpc_call_credentials* call_creds = grpc_md_only_test_credentials_create(
- g_kCallCredsMdKey, g_kCallCredsMdValue, false);
- std::shared_ptr<ChannelCredentials> creds(
- new SecureChannelCredentials(grpc_composite_channel_credentials_create(
- channel_creds, call_creds, nullptr)));
- call_creds->Unref();
- channel_creds->Unref();
- return ::grpc::CreateCustomChannel(uri, creds, args);
+ GetParam().use_fake_resolver() ? "fake" : "xds", ":///", server_name);
+ std::shared_ptr<ChannelCredentials> channel_creds =
+ GetParam().use_xds_credentials()
+ ? experimental::XdsCredentials(CreateTlsFallbackCredentials())
+ : std::make_shared<SecureChannelCredentials>(
+ grpc_fake_transport_security_credentials_create());
+ return ::grpc::CreateCustomChannel(uri, channel_creds, args);
}
enum RpcService {
@@ -1478,6 +1809,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
bool wait_for_ready = false;
bool server_fail = false;
std::vector<std::pair<TString, TString>> metadata;
+ int client_cancel_after_us = 0;
+ bool skip_cancelled_check = false;
RpcOptions() {}
@@ -1506,11 +1839,45 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
return *this;
}
+ RpcOptions& set_skip_cancelled_check(bool rpc_skip_cancelled_check) {
+ skip_cancelled_check = rpc_skip_cancelled_check;
+ return *this;
+ }
+
RpcOptions& set_metadata(
std::vector<std::pair<TString, TString>> rpc_metadata) {
- metadata = rpc_metadata;
+ metadata = std::move(rpc_metadata);
return *this;
}
+
+ RpcOptions& set_client_cancel_after_us(int rpc_client_cancel_after_us) {
+ client_cancel_after_us = rpc_client_cancel_after_us;
+ return *this;
+ }
+
+ // Populates context and request.
+ void SetupRpc(ClientContext* context, EchoRequest* request) const {
+ for (const auto& item : metadata) {
+ context->AddMetadata(item.first, item.second);
+ }
+ if (timeout_ms != 0) {
+ context->set_deadline(
+ grpc_timeout_milliseconds_to_deadline(timeout_ms));
+ }
+ if (wait_for_ready) context->set_wait_for_ready(true);
+ request->set_message(kRequestMessage);
+ if (server_fail) {
+ request->mutable_param()->mutable_expected_error()->set_code(
+ GRPC_STATUS_FAILED_PRECONDITION);
+ }
+ if (client_cancel_after_us != 0) {
+ request->mutable_param()->set_client_cancel_after_us(
+ client_cancel_after_us);
+ }
+ if (skip_cancelled_check) {
+ request->mutable_param()->set_skip_cancelled_check(true);
+ }
+ }
};
template <typename Stub>
@@ -1525,6 +1892,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
case METHOD_ECHO2:
return (*stub)->Echo2(context, request, response);
}
+ GPR_UNREACHABLE_CODE();
}
void ResetBackendCounters(size_t start_index = 0, size_t stop_index = 0) {
@@ -1542,16 +1910,19 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
for (size_t i = start_index; i < stop_index; ++i) {
switch (rpc_options.service) {
case SERVICE_ECHO:
- if (backends_[i]->backend_service()->request_count() == 0)
+ if (backends_[i]->backend_service()->request_count() == 0) {
return false;
+ }
break;
case SERVICE_ECHO1:
- if (backends_[i]->backend_service1()->request_count() == 0)
+ if (backends_[i]->backend_service1()->request_count() == 0) {
return false;
+ }
break;
case SERVICE_ECHO2:
- if (backends_[i]->backend_service2()->request_count() == 0)
+ if (backends_[i]->backend_service2()->request_count() == 0) {
return false;
+ }
break;
}
}
@@ -1560,12 +1931,14 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure,
int* num_drops,
- const RpcOptions& rpc_options = RpcOptions()) {
+ const RpcOptions& rpc_options = RpcOptions(),
+ const char* drop_error_message =
+ "Call dropped by load balancing policy") {
const Status status = SendRpc(rpc_options);
if (status.ok()) {
++*num_ok;
} else {
- if (status.error_message() == "Call dropped by load balancing policy") {
+ if (status.error_message() == drop_error_message) {
++*num_drops;
} else {
++*num_failure;
@@ -1615,19 +1988,20 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
const std::vector<int>& ports) {
grpc_core::ServerAddressList addresses;
for (int port : ports) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
- GPR_ASSERT(lb_uri != nullptr);
+ y_absl::StatusOr<grpc_core::URI> lb_uri = grpc_core::URI::Parse(
+ y_absl::StrCat(ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", port));
+ GPR_ASSERT(lb_uri.ok());
grpc_resolved_address address;
- GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
+ GPR_ASSERT(grpc_parse_uri(*lb_uri, &address));
addresses.emplace_back(address.addr, address.len, nullptr);
- grpc_uri_destroy(lb_uri);
}
return addresses;
}
- void SetNextResolution(const std::vector<int>& ports) {
- if (GetParam().use_xds_resolver()) return; // Not used with xds resolver.
+ void SetNextResolution(
+ const std::vector<int>& ports,
+ grpc_core::FakeResolverResponseGenerator* response_generator = nullptr) {
+ if (!GetParam().use_fake_resolver()) return; // Not used with xds resolver.
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
result.addresses = CreateAddressListFromPortList(ports);
@@ -1640,7 +2014,10 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error);
ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
ASSERT_NE(result.service_config.get(), nullptr);
- response_generator_->SetResponse(std::move(result));
+ if (response_generator == nullptr) {
+ response_generator = response_generator_.get();
+ }
+ response_generator->SetResponse(std::move(result));
}
void SetNextResolutionForLbChannelAllBalancers(
@@ -1683,8 +2060,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
response_generator_->SetReresolutionResponse(std::move(result));
}
- const std::vector<int> GetBackendPorts(size_t start_index = 0,
- size_t stop_index = 0) const {
+ std::vector<int> GetBackendPorts(size_t start_index = 0,
+ size_t stop_index = 0) const {
if (stop_index == 0) stop_index = backends_.size();
std::vector<int> backend_ports;
for (size_t i = start_index; i < stop_index; ++i) {
@@ -1697,21 +2074,9 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
EchoResponse* response = nullptr) {
const bool local_response = (response == nullptr);
if (local_response) response = new EchoResponse;
- EchoRequest request;
ClientContext context;
- for (const auto& metadata : rpc_options.metadata) {
- context.AddMetadata(metadata.first, metadata.second);
- }
- if (rpc_options.timeout_ms != 0) {
- context.set_deadline(
- grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms));
- }
- if (rpc_options.wait_for_ready) context.set_wait_for_ready(true);
- request.set_message(kRequestMessage);
- if (rpc_options.server_fail) {
- request.mutable_param()->mutable_expected_error()->set_code(
- GRPC_STATUS_FAILED_PRECONDITION);
- }
+ EchoRequest request;
+ rpc_options.SetupRpc(&context, &request);
Status status;
switch (rpc_options.service) {
case SERVICE_ECHO:
@@ -1742,20 +2107,97 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
}
}
- void CheckRpcSendFailure(const size_t times = 1,
- const RpcOptions& rpc_options = RpcOptions()) {
+ void CheckRpcSendFailure(
+ const size_t times = 1, const RpcOptions& rpc_options = RpcOptions(),
+ const StatusCode expected_error_code = StatusCode::OK) {
for (size_t i = 0; i < times; ++i) {
const Status status = SendRpc(rpc_options);
EXPECT_FALSE(status.ok());
+ if (expected_error_code != StatusCode::OK) {
+ EXPECT_EQ(expected_error_code, status.error_code());
+ }
}
}
+ static Listener BuildListener(const RouteConfiguration& route_config) {
+ HttpConnectionManager http_connection_manager;
+ *(http_connection_manager.mutable_route_config()) = route_config;
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("router");
+ filter->mutable_typed_config()->PackFrom(
+ envoy::extensions::filters::http::router::v3::Router());
+ Listener listener;
+ listener.set_name(kServerName);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ return listener;
+ }
+
+ ClusterLoadAssignment BuildEdsResource(
+ const AdsServiceImpl::EdsResourceArgs& args,
+ const char* eds_service_name = kDefaultEdsServiceName) {
+ ClusterLoadAssignment assignment;
+ assignment.set_cluster_name(eds_service_name);
+ for (const auto& locality : args.locality_list) {
+ auto* endpoints = assignment.add_endpoints();
+ endpoints->mutable_load_balancing_weight()->set_value(locality.lb_weight);
+ endpoints->set_priority(locality.priority);
+ endpoints->mutable_locality()->set_region(kDefaultLocalityRegion);
+ endpoints->mutable_locality()->set_zone(kDefaultLocalityZone);
+ endpoints->mutable_locality()->set_sub_zone(locality.sub_zone);
+ for (size_t i = 0; i < locality.ports.size(); ++i) {
+ const int& port = locality.ports[i];
+ auto* lb_endpoints = endpoints->add_lb_endpoints();
+ if (locality.health_statuses.size() > i &&
+ locality.health_statuses[i] != HealthStatus::UNKNOWN) {
+ lb_endpoints->set_health_status(locality.health_statuses[i]);
+ }
+ auto* endpoint = lb_endpoints->mutable_endpoint();
+ auto* address = endpoint->mutable_address();
+ auto* socket_address = address->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(port);
+ }
+ }
+ if (!args.drop_categories.empty()) {
+ auto* policy = assignment.mutable_policy();
+ for (const auto& p : args.drop_categories) {
+ const TString& name = p.first;
+ const uint32_t parts_per_million = p.second;
+ auto* drop_overload = policy->add_drop_overloads();
+ drop_overload->set_category(name);
+ auto* drop_percentage = drop_overload->mutable_drop_percentage();
+ drop_percentage->set_numerator(parts_per_million);
+ drop_percentage->set_denominator(args.drop_denominator);
+ }
+ }
+ return assignment;
+ }
+
+ void SetListenerAndRouteConfiguration(
+ int idx, Listener listener, const RouteConfiguration& route_config) {
+ auto* api_listener =
+ listener.mutable_api_listener()->mutable_api_listener();
+ HttpConnectionManager http_connection_manager;
+ api_listener->UnpackTo(&http_connection_manager);
+ if (GetParam().enable_rds_testing()) {
+ auto* rds = http_connection_manager.mutable_rds();
+ rds->set_route_config_name(kDefaultRouteConfigurationName);
+ rds->mutable_config_source()->mutable_ads();
+ balancers_[idx]->ads_service()->SetRdsResource(route_config);
+ } else {
+ *http_connection_manager.mutable_route_config() = route_config;
+ }
+ api_listener->PackFrom(http_connection_manager);
+ balancers_[idx]->ads_service()->SetLdsResource(listener);
+ }
+
void SetRouteConfiguration(int idx, const RouteConfiguration& route_config) {
if (GetParam().enable_rds_testing()) {
balancers_[idx]->ads_service()->SetRdsResource(route_config);
} else {
balancers_[idx]->ads_service()->SetLdsResource(
- AdsServiceImpl::BuildListener(route_config));
+ BuildListener(route_config));
}
}
@@ -1779,9 +2221,36 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
}
protected:
+ class XdsServingStatusNotifier
+ : public grpc::experimental::XdsServerServingStatusNotifierInterface {
+ public:
+ void OnServingStatusChange(TString uri, grpc::Status status) override {
+ grpc_core::MutexLock lock(&mu_);
+ status_map[uri] = status;
+ cond_.Signal();
+ }
+
+ void WaitOnServingStatusChange(TString uri,
+ grpc::StatusCode expected_status) {
+ grpc_core::MutexLock lock(&mu_);
+ std::map<TString, grpc::Status>::iterator it;
+ while ((it = status_map.find(uri)) == status_map.end() ||
+ it->second.error_code() != expected_status) {
+ cond_.Wait(&mu_);
+ }
+ }
+
+ private:
+ grpc_core::Mutex mu_;
+ grpc_core::CondVar cond_;
+ std::map<TString, grpc::Status> status_map;
+ };
+
class ServerThread {
public:
- ServerThread() : port_(g_port_saver->GetPort()) {}
+ explicit ServerThread(bool use_xds_enabled_server = false)
+ : port_(grpc_pick_unused_port_or_die()),
+ use_xds_enabled_server_(use_xds_enabled_server) {}
virtual ~ServerThread(){};
void Start() {
@@ -1794,8 +2263,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
// by ServerThread::Serve from firing before the wait below is hit.
grpc_core::MutexLock lock(&mu);
grpc_core::CondVar cond;
- thread_.reset(
- new std::thread(std::bind(&ServerThread::Serve, this, &mu, &cond)));
+ thread_ = y_absl::make_unique<std::thread>(
+ std::bind(&ServerThread::Serve, this, &mu, &cond));
cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", Type());
}
@@ -1806,12 +2275,18 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
grpc_core::MutexLock lock(mu);
std::ostringstream server_address;
server_address << "localhost:" << port_;
- ServerBuilder builder;
- std::shared_ptr<ServerCredentials> creds(new SecureServerCredentials(
- grpc_fake_transport_security_server_credentials_create()));
- builder.AddListeningPort(server_address.str(), creds);
- RegisterAllServices(&builder);
- server_ = builder.BuildAndStart();
+ if (use_xds_enabled_server_) {
+ experimental::XdsServerBuilder builder;
+ builder.set_status_notifier(&notifier_);
+ builder.AddListeningPort(server_address.str(), Credentials());
+ RegisterAllServices(&builder);
+ server_ = builder.BuildAndStart();
+ } else {
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address.str(), Credentials());
+ RegisterAllServices(&builder);
+ server_ = builder.BuildAndStart();
+ }
cond->Signal();
}
@@ -1825,8 +2300,17 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
running_ = false;
}
+ virtual std::shared_ptr<ServerCredentials> Credentials() {
+ return std::make_shared<SecureServerCredentials>(
+ grpc_fake_transport_security_server_credentials_create());
+ }
+
int port() const { return port_; }
+ bool use_xds_enabled_server() const { return use_xds_enabled_server_; }
+
+ XdsServingStatusNotifier* notifier() { return &notifier_; }
+
private:
virtual void RegisterAllServices(ServerBuilder* builder) = 0;
virtual void StartAllServices() = 0;
@@ -1836,12 +2320,17 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
const int port_;
std::unique_ptr<Server> server_;
+ XdsServingStatusNotifier notifier_;
std::unique_ptr<std::thread> thread_;
bool running_ = false;
+ const bool use_xds_enabled_server_;
};
class BackendServerThread : public ServerThread {
public:
+ explicit BackendServerThread(bool use_xds_enabled_server)
+ : ServerThread(use_xds_enabled_server) {}
+
BackendServiceImpl<::grpc::testing::EchoTestService::Service>*
backend_service() {
return &backend_service_;
@@ -1855,6 +2344,34 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
return &backend_service2_;
}
+ std::shared_ptr<ServerCredentials> Credentials() override {
+ if (GetParam().use_xds_credentials()) {
+ if (use_xds_enabled_server()) {
+ // We are testing server's use of XdsServerCredentials
+ return experimental::XdsServerCredentials(
+ InsecureServerCredentials());
+ } else {
+ // We are testing client's use of XdsCredentials
+ TString root_cert = ReadFile(kCaCertPath);
+ TString identity_cert = ReadFile(kServerCertPath);
+ TString private_key = ReadFile(kServerKeyPath);
+ std::vector<experimental::IdentityKeyCertPair>
+ identity_key_cert_pairs = {{private_key, identity_cert}};
+ auto certificate_provider = std::make_shared<
+ grpc::experimental::StaticDataCertificateProvider>(
+ root_cert, identity_key_cert_pairs);
+ grpc::experimental::TlsServerCredentialsOptions options(
+ certificate_provider);
+ options.watch_root_certs();
+ options.watch_identity_key_cert_pairs();
+ options.set_cert_request_type(
+ GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY);
+ return grpc::experimental::TlsServerCredentials(options);
+ }
+ }
+ return ServerThread::Credentials();
+ }
+
private:
void RegisterAllServices(ServerBuilder* builder) override {
builder->RegisterService(&backend_service_);
@@ -1887,7 +2404,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
class BalancerServerThread : public ServerThread {
public:
explicit BalancerServerThread(int client_load_reporting_interval = 0)
- : ads_service_(new AdsServiceImpl(client_load_reporting_interval > 0)),
+ : ads_service_(new AdsServiceImpl()),
lrs_service_(new LrsServiceImpl(client_load_reporting_interval)) {}
AdsServiceImpl* ads_service() { return ads_service_.get(); }
@@ -1917,9 +2434,55 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
std::shared_ptr<LrsServiceImpl> lrs_service_;
};
+#ifndef DISABLED_XDS_PROTO_IN_CC
+ class AdminServerThread : public ServerThread {
+ private:
+ void RegisterAllServices(ServerBuilder* builder) override {
+ builder->RegisterService(&csds_service_);
+ }
+ void StartAllServices() override {}
+ void ShutdownAllServices() override {}
+
+ const char* Type() override { return "Admin"; }
+
+ grpc::xds::experimental::ClientStatusDiscoveryService csds_service_;
+ };
+#endif // DISABLED_XDS_PROTO_IN_CC
+
+ class LongRunningRpc {
+ public:
+ void StartRpc(grpc::testing::EchoTestService::Stub* stub,
+ const RpcOptions& rpc_options =
+ RpcOptions().set_client_cancel_after_us(1 * 1000 *
+ 1000)) {
+ sender_thread_ = std::thread([this, stub, rpc_options]() {
+ EchoRequest request;
+ EchoResponse response;
+ rpc_options.SetupRpc(&context_, &request);
+ status_ = stub->Echo(&context_, request, &response);
+ });
+ }
+
+ void CancelRpc() {
+ context_.TryCancel();
+ if (sender_thread_.joinable()) sender_thread_.join();
+ }
+
+ Status GetStatus() {
+ if (sender_thread_.joinable()) sender_thread_.join();
+ return status_;
+ }
+
+ private:
+ std::thread sender_thread_;
+ ClientContext context_;
+ Status status_;
+ };
+
const size_t num_backends_;
const size_t num_balancers_;
const int client_load_reporting_interval_seconds_;
+ bool ipv6_only_ = false;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::unique_ptr<grpc::testing::EchoTest1Service::Stub> stub1_;
@@ -1930,9 +2493,17 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
response_generator_;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
lb_channel_response_generator_;
+ grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
+ logical_dns_cluster_resolver_response_generator_;
int xds_resource_does_not_exist_timeout_ms_ = 0;
y_absl::InlinedVector<grpc_arg, 2> xds_channel_args_to_add_;
grpc_channel_args xds_channel_args_;
+
+ Listener default_listener_;
+ RouteConfiguration default_route_config_;
+ Cluster default_cluster_;
+ bool use_xds_enabled_server_;
+ bool bootstrap_contents_from_env_var_;
};
class BasicTest : public XdsEnd2endTest {
@@ -1950,7 +2521,7 @@ TEST_P(BasicTest, Vanilla) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -1963,9 +2534,10 @@ TEST_P(BasicTest, Vanilla) {
backends_[i]->backend_service()->request_count());
}
// Check LB policy name for the channel.
- EXPECT_EQ((GetParam().use_xds_resolver() ? "xds_cluster_manager_experimental"
- : "eds_experimental"),
- channel_->GetLoadBalancingPolicyName());
+ EXPECT_EQ(
+ (GetParam().use_fake_resolver() ? "xds_cluster_resolver_experimental"
+ : "xds_cluster_manager_experimental"),
+ channel_->GetLoadBalancingPolicyName());
}
TEST_P(BasicTest, IgnoresUnhealthyEndpoints) {
@@ -1980,7 +2552,7 @@ TEST_P(BasicTest, IgnoresUnhealthyEndpoints) {
{HealthStatus::DRAINING}},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -2006,7 +2578,7 @@ TEST_P(BasicTest, SameBackendListedMultipleTimes) {
});
const size_t kNumRpcsPerAddress = 10;
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// We need to wait for the backend to come online.
WaitForBackend(0);
// Send kNumRpcsPerAddress RPCs per server.
@@ -2031,15 +2603,14 @@ TEST_P(BasicTest, InitiallyEmptyServerlist) {
empty_locality,
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Send non-empty serverlist only after kServerlistDelayMs.
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts()},
});
- std::thread delayed_resource_setter(
- std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()),
- kServerlistDelayMs));
+ std::thread delayed_resource_setter(std::bind(
+ &BasicTest::SetEdsResourceWithDelay, this, 0,
+ BuildEdsResource(args, DefaultEdsServiceName()), kServerlistDelayMs));
const auto t0 = system_clock::now();
// Client will block: LB will initially send empty serverlist.
CheckRpcSendOk(
@@ -2063,13 +2634,13 @@ TEST_P(BasicTest, AllServersUnreachableFailFast) {
const size_t kNumUnreachableServers = 5;
std::vector<int> ports;
for (size_t i = 0; i < kNumUnreachableServers; ++i) {
- ports.push_back(g_port_saver->GetPort());
+ ports.push_back(grpc_pick_unused_port_or_die());
}
AdsServiceImpl::EdsResourceArgs args({
{"locality0", ports},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
const Status status = SendRpc();
// The error shouldn't be DEADLINE_EXCEEDED.
EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
@@ -2084,7 +2655,7 @@ TEST_P(BasicTest, BackendsRestart) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Stop backends. RPCs should fail.
ShutdownAllBackends();
@@ -2111,7 +2682,7 @@ TEST_P(BasicTest, IgnoresDuplicateUpdates) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for all backends to come online.
WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server, but send an EDS update in
@@ -2121,7 +2692,7 @@ TEST_P(BasicTest, IgnoresDuplicateUpdates) {
for (size_t i = 0; i < kNumRpcsPerAddress; ++i) {
CheckRpcSendOk(2);
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
CheckRpcSendOk(2);
}
// Each backend should have gotten the right number of requests.
@@ -2133,6 +2704,34 @@ TEST_P(BasicTest, IgnoresDuplicateUpdates) {
using XdsResolverOnlyTest = BasicTest;
+TEST_P(XdsResolverOnlyTest, ResourceTypeVersionPersistsAcrossStreamRestarts) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
+ // Wait for backends to come online.
+ WaitForAllBackends(0, 1);
+ // Stop balancer.
+ balancers_[0]->Shutdown();
+ // Tell balancer to require minimum version 1 for all resource types.
+ balancers_[0]->ads_service()->SetResourceMinVersion(kLdsTypeUrl, 1);
+ balancers_[0]->ads_service()->SetResourceMinVersion(kRdsTypeUrl, 1);
+ balancers_[0]->ads_service()->SetResourceMinVersion(kCdsTypeUrl, 1);
+ balancers_[0]->ads_service()->SetResourceMinVersion(kEdsTypeUrl, 1);
+ // Update backend, just so we can be sure that the client has
+ // reconnected to the balancer.
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args2));
+ // Restart balancer.
+ balancers_[0]->Start();
+ // Make sure client has reconnected.
+ WaitForAllBackends(1, 2);
+}
+
// Tests switching over from one cluster to another.
TEST_P(XdsResolverOnlyTest, ChangeClusters) {
const char* kNewClusterName = "new_cluster_name";
@@ -2142,8 +2741,7 @@ TEST_P(XdsResolverOnlyTest, ChangeClusters) {
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends(0, 2);
// Populate new EDS resource.
@@ -2151,23 +2749,20 @@ TEST_P(XdsResolverOnlyTest, ChangeClusters) {
{"locality0", GetBackendPorts(2, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
+ BuildEdsResource(args2, kNewEdsServiceName));
// Populate new CDS resource.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Change RDS resource to point to new cluster.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
new_route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->set_cluster(kNewClusterName);
- Listener listener =
- balancers_[0]->ads_service()->BuildListener(new_route_config);
- balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetListenerAndRouteConfiguration(0, default_listener_, new_route_config);
// Wait for all new backends to be used.
std::tuple<int, int, int> counts = WaitForAllBackends(2, 4);
// Make sure no RPCs failed in the transition.
@@ -2181,8 +2776,7 @@ TEST_P(XdsResolverOnlyTest, ClusterRemoved) {
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends();
// Unset CDS resource.
@@ -2199,7 +2793,18 @@ TEST_P(XdsResolverOnlyTest, ClusterRemoved) {
// Tests that we restart all xDS requests when we reestablish the ADS call.
TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) {
- balancers_[0]->ads_service()->SetLdsToUseDynamicRds();
+ // Manually configure use of RDS.
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* rds = http_connection_manager.mutable_rds();
+ rds->set_route_config_name(kDefaultRouteConfigurationName);
+ rds->mutable_config_source()->mutable_ads();
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ balancers_[0]->ads_service()->SetRdsResource(default_route_config_);
const char* kNewClusterName = "new_cluster_name";
const char* kNewEdsServiceName = "new_eds_service_name";
SetNextResolution({});
@@ -2207,8 +2812,7 @@ TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) {
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends(0, 2);
// Now shut down and restart the balancer. When the client
@@ -2223,16 +2827,15 @@ TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) {
{"locality0", GetBackendPorts(2, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
+ BuildEdsResource(args2, kNewEdsServiceName));
// Populate new CDS resource.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Change RDS resource to point to new cluster.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
new_route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
@@ -2245,52 +2848,23 @@ TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) {
}
TEST_P(XdsResolverOnlyTest, DefaultRouteSpecifiesSlashPrefix) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_match()
->set_prefix("/");
- balancers_[0]->ads_service()->SetLdsResource(
- AdsServiceImpl::BuildListener(route_config));
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends();
}
TEST_P(XdsResolverOnlyTest, CircuitBreaking) {
- class TestRpc {
- public:
- TestRpc() {}
-
- void StartRpc(grpc::testing::EchoTestService::Stub* stub) {
- sender_thread_ = std::thread([this, stub]() {
- EchoResponse response;
- EchoRequest request;
- request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000);
- request.set_message(kRequestMessage);
- status_ = stub->Echo(&context_, request, &response);
- });
- }
-
- void CancelRpc() {
- context_.TryCancel();
- sender_thread_.join();
- }
-
- private:
- std::thread sender_thread_;
- ClientContext context_;
- Status status_;
- };
-
- gpr_setenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING", "true");
constexpr size_t kMaxConcurrentRequests = 10;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
@@ -2298,17 +2872,16 @@ TEST_P(XdsResolverOnlyTest, CircuitBreaking) {
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// Update CDS resource to set max concurrent request.
CircuitBreakers circuit_breaks;
- Cluster cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster cluster = default_cluster_;
auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
threshold->set_priority(RoutingPriority::DEFAULT);
threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Send exactly max_concurrent_requests long RPCs.
- TestRpc rpcs[kMaxConcurrentRequests];
+ LongRunningRpc rpcs[kMaxConcurrentRequests];
for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
rpcs[i].StartRpc(stub_.get());
}
@@ -2333,55 +2906,38 @@ TEST_P(XdsResolverOnlyTest, CircuitBreaking) {
// Make sure RPCs go to the correct backend:
EXPECT_EQ(kMaxConcurrentRequests + 1,
backends_[0]->backend_service()->request_count());
- gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING");
}
-TEST_P(XdsResolverOnlyTest, CircuitBreakingDisabled) {
- class TestRpc {
- public:
- TestRpc() {}
-
- void StartRpc(grpc::testing::EchoTestService::Stub* stub) {
- sender_thread_ = std::thread([this, stub]() {
- EchoResponse response;
- EchoRequest request;
- request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000);
- request.set_message(kRequestMessage);
- status_ = stub->Echo(&context_, request, &response);
- });
- }
-
- void CancelRpc() {
- context_.TryCancel();
- sender_thread_.join();
- }
-
- private:
- std::thread sender_thread_;
- ClientContext context_;
- Status status_;
- };
-
+TEST_P(XdsResolverOnlyTest, CircuitBreakingMultipleChannelsShareCallCounter) {
constexpr size_t kMaxConcurrentRequests = 10;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// Update CDS resource to set max concurrent request.
CircuitBreakers circuit_breaks;
- Cluster cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster cluster = default_cluster_;
auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
threshold->set_priority(RoutingPriority::DEFAULT);
threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
balancers_[0]->ads_service()->SetCdsResource(cluster);
- // Send exactly max_concurrent_requests long RPCs.
- TestRpc rpcs[kMaxConcurrentRequests];
+ // Create second channel.
+ auto response_generator2 =
+ grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
+ auto channel2 = CreateChannel(
+ /*failover_timeout=*/0, /*server_name=*/kServerName,
+ response_generator2.get());
+ auto stub2 = grpc::testing::EchoTestService::NewStub(channel2);
+ // Set resolution results for both channels and for the xDS channel.
+ SetNextResolution({});
+ SetNextResolution({}, response_generator2.get());
+ SetNextResolutionForLbChannelAllBalancers();
+ // Send exactly max_concurrent_requests long RPCs, alternating between
+ // the two channels.
+ LongRunningRpc rpcs[kMaxConcurrentRequests];
for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
- rpcs[i].StartRpc(stub_.get());
+ rpcs[i].StartRpc(i % 2 == 0 ? stub_.get() : stub2.get());
}
// Wait for all RPCs to be in flight.
while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
@@ -2389,10 +2945,16 @@ TEST_P(XdsResolverOnlyTest, CircuitBreakingDisabled) {
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
}
- // Sending a RPC now should not fail as circuit breaking is disabled.
+ // Sending a RPC now should fail, the error message should tell us
+ // we hit the max concurrent requests limit and got dropped.
Status status = SendRpc();
+ EXPECT_FALSE(status.ok());
+ EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
+ // Cancel one RPC to allow another one through
+ rpcs[0].CancelRpc();
+ status = SendRpc();
EXPECT_TRUE(status.ok());
- for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
+ for (size_t i = 1; i < kMaxConcurrentRequests; ++i) {
rpcs[i].CancelRpc();
}
// Make sure RPCs go to the correct backend:
@@ -2402,16 +2964,15 @@ TEST_P(XdsResolverOnlyTest, CircuitBreakingDisabled) {
TEST_P(XdsResolverOnlyTest, MultipleChannelsShareXdsClient) {
const char* kNewServerName = "new-server.example.com";
- Listener listener = balancers_[0]->ads_service()->default_listener();
+ Listener listener = default_listener_;
listener.set_name(kNewServerName);
- balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
WaitForAllBackends();
// Create second channel and tell it to connect to kNewServerName.
auto channel2 = CreateChannel(/*failover_timeout=*/0, kNewServerName);
@@ -2439,16 +3000,15 @@ TEST_P(XdsResolverLoadReportingOnlyTest, ChangeClusters) {
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// cluster kNewClusterName -> locality1 -> backends 2 and 3
AdsServiceImpl::EdsResourceArgs args2({
{"locality1", GetBackendPorts(2, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
+ BuildEdsResource(args2, kNewEdsServiceName));
// CDS resource for kNewClusterName.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
@@ -2485,15 +3045,12 @@ TEST_P(XdsResolverLoadReportingOnlyTest, ChangeClusters) {
::testing::Property(&ClientStats::total_dropped_requests,
num_drops))));
// Change RDS resource to point to new cluster.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
new_route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->set_cluster(kNewClusterName);
- Listener listener =
- balancers_[0]->ads_service()->BuildListener(new_route_config);
- balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetListenerAndRouteConfiguration(0, default_listener_, new_route_config);
// Wait for all new backends to be used.
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(2, 4);
// The load report received at the balancer should be correct.
@@ -2569,7 +3126,7 @@ TEST_P(SecureNamingTest, TargetNameIsExpected) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
CheckRpcSendOk();
}
@@ -2583,7 +3140,7 @@ TEST_P(SecureNamingTest, TargetNameIsUnexpected) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that we blow up (via abort() from the security connector) when
// the name from the balancer doesn't match expectations.
ASSERT_DEATH_IF_SUPPORTED({ CheckRpcSendOk(); }, "");
@@ -2594,23 +3151,27 @@ using LdsTest = BasicTest;
// Tests that LDS client should send a NACK if there is no API listener in the
// Listener in the LDS response.
TEST_P(LdsTest, NoApiListener) {
- auto listener = balancers_[0]->ads_service()->default_listener();
+ auto listener = default_listener_;
listener.clear_api_listener();
balancers_[0]->ads_service()->SetLdsResource(listener);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "Listener has no ApiListener.");
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Listener has neither address nor ApiListener"));
}
// Tests that LDS client should send a NACK if the route_specifier in the
// http_connection_manager is neither inlined route_config nor RDS.
TEST_P(LdsTest, WrongRouteSpecifier) {
- auto listener = balancers_[0]->ads_service()->default_listener();
+ auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
http_connection_manager.mutable_scoped_routes();
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
@@ -2618,18 +3179,22 @@ TEST_P(LdsTest, WrongRouteSpecifier) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "HttpConnectionManager neither has inlined route_config nor RDS.");
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "HttpConnectionManager neither has inlined route_config nor RDS."));
}
// Tests that LDS client should send a NACK if the rds message in the
// http_connection_manager is missing the config_source field.
TEST_P(LdsTest, RdsMissingConfigSource) {
- auto listener = balancers_[0]->ads_service()->default_listener();
+ auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
http_connection_manager.mutable_rds()->set_route_config_name(
kDefaultRouteConfigurationName);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
@@ -2638,18 +3203,21 @@ TEST_P(LdsTest, RdsMissingConfigSource) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "HttpConnectionManager missing config_source for RDS.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "HttpConnectionManager missing config_source for RDS."));
}
// Tests that LDS client should send a NACK if the rds message in the
// http_connection_manager has a config_source field that does not specify ADS.
TEST_P(LdsTest, RdsConfigSourceDoesNotSpecifyAds) {
- auto listener = balancers_[0]->ads_service()->default_listener();
+ auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
auto* rds = http_connection_manager.mutable_rds();
rds->set_route_config_name(kDefaultRouteConfigurationName);
rds->mutable_config_source()->mutable_self();
@@ -2659,11 +3227,358 @@ TEST_P(LdsTest, RdsConfigSourceDoesNotSpecifyAds) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "HttpConnectionManager ConfigSource for RDS does not specify ADS."));
+}
+
+// Tests that the NACK for multiple bad LDS resources includes both errors.
+TEST_P(LdsTest, MultipleBadResources) {
+ constexpr char kServerName2[] = "server.other.com";
+ auto listener = default_listener_;
+ listener.clear_api_listener();
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ listener.set_name(kServerName2);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ // Need to create a second channel to subscribe to a second LDS resource.
+ auto channel2 = CreateChannel(0, kServerName2);
+ auto stub2 = grpc::testing::EchoTestService::NewStub(channel2);
+ ClientContext context;
+ EchoRequest request;
+ request.set_message(kRequestMessage);
+ EchoResponse response;
+ grpc::Status status = stub2->Echo(&context, request, &response);
+ EXPECT_FALSE(status.ok());
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::AllOf(
+ ::testing::HasSubstr(y_absl::StrCat(
+ kServerName, ": Listener has neither address nor ApiListener")),
+ ::testing::HasSubstr(
+ y_absl::StrCat(kServerName2,
+ ": Listener has neither address nor ApiListener"))));
+}
+
+// Tests that we ignore filters after the router filter.
+TEST_P(LdsTest, IgnoresHttpFiltersAfterRouterFilter) {
+ SetNextResolutionForLbChannelAllBalancers();
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("unknown");
+ filter->mutable_typed_config()->set_type_url(
+ "grpc.testing.client_only_http_filter");
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ WaitForAllBackends();
+}
+
+// Test that we fail RPCs if there is no router filter.
+TEST_P(LdsTest, FailRpcsIfNoHttpRouterFilter) {
+ SetNextResolutionForLbChannelAllBalancers();
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ http_connection_manager.clear_http_filters();
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ Status status = SendRpc();
+ EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
+ EXPECT_EQ(status.error_message(), "no xDS HTTP router filter configured");
+ // Wait until xDS server sees ACK.
+ while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT) {
+ CheckRpcSendFailure();
+ }
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK empty filter names.
+TEST_P(LdsTest, RejectsEmptyHttpFilterName) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->mutable_typed_config()->PackFrom(Listener());
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("empty filter name at index 1"));
+}
+
+// Test that we NACK duplicate HTTP filter names.
+TEST_P(LdsTest, RejectsDuplicateHttpFilterName) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ *http_connection_manager.add_http_filters() =
+ http_connection_manager.http_filters(0);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "HttpConnectionManager ConfigSource for RDS does not specify ADS.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("duplicate HTTP filter name: router"));
+}
+
+// Test that we NACK unknown filter types.
+TEST_P(LdsTest, RejectsUnknownHttpFilterType) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("unknown");
+ filter->mutable_typed_config()->PackFrom(Listener());
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("no filter registered for config type "
+ "envoy.config.listener.v3.Listener"));
+}
+
+// Test that we ignore optional unknown filter types.
+TEST_P(LdsTest, IgnoresOptionalUnknownHttpFilterType) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("unknown");
+ filter->mutable_typed_config()->PackFrom(Listener());
+ filter->set_is_optional(true);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForAllBackends();
+ EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK filters without configs.
+TEST_P(LdsTest, RejectsHttpFilterWithoutConfig) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("unknown");
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "no filter config specified for filter name unknown"));
+}
+
+// Test that we ignore optional filters without configs.
+TEST_P(LdsTest, IgnoresOptionalHttpFilterWithoutConfig) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("unknown");
+ filter->set_is_optional(true);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForAllBackends();
+ EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK unparseable filter configs.
+TEST_P(LdsTest, RejectsUnparseableHttpFilterType) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("unknown");
+ filter->mutable_typed_config()->PackFrom(listener);
+ filter->mutable_typed_config()->set_type_url(
+ "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router");
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "filter config for type "
+ "envoy.extensions.filters.http.router.v3.Router failed to parse"));
+}
+
+// Test that we NACK HTTP filters unsupported on client-side.
+TEST_P(LdsTest, RejectsHttpFiltersNotSupportedOnClients) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("grpc.testing.server_only_http_filter");
+ filter->mutable_typed_config()->set_type_url(
+ "grpc.testing.server_only_http_filter");
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Filter grpc.testing.server_only_http_filter is not "
+ "supported on clients"));
+}
+
+// Test that we ignore optional HTTP filters unsupported on client-side.
+TEST_P(LdsTest, IgnoresOptionalHttpFiltersNotSupportedOnClients) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("grpc.testing.server_only_http_filter");
+ filter->mutable_typed_config()->set_type_url(
+ "grpc.testing.server_only_http_filter");
+ filter->set_is_optional(true);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForBackend(0);
+ EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+using LdsV2Test = LdsTest;
+
+// Tests that we ignore the HTTP filter list in v2.
+// TODO(roth): The test framework is not set up to allow us to test
+// the server sending v2 resources when the client requests v3, so this
+// just tests a pure v2 setup. When we have time, fix this.
+TEST_P(LdsV2Test, IgnoresHttpFilters) {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ auto* filter = http_connection_manager.add_http_filters();
+ filter->set_name("unknown");
+ filter->mutable_typed_config()->PackFrom(Listener());
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendOk();
}
using LdsRdsTest = BasicTest;
@@ -2690,8 +3605,7 @@ TEST_P(LdsRdsTest, ListenerRemoved) {
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends();
// Unset LDS resource.
@@ -2709,8 +3623,7 @@ TEST_P(LdsRdsTest, ListenerRemoved) {
// Tests that LDS client ACKs but fails if matching domain can't be found in
// the LDS response.
TEST_P(LdsRdsTest, NoMatchedDomain) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
route_config.mutable_virtual_hosts(0)->clear_domains();
route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain");
SetRouteConfiguration(0, route_config);
@@ -2719,15 +3632,14 @@ TEST_P(LdsRdsTest, NoMatchedDomain) {
CheckRpcSendFailure();
// Do a bit of polling, to allow the ACK to get to the ADS server.
channel_->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100));
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
// Tests that LDS client should choose the virtual host with matching domain if
// multiple virtual hosts exist in the LDS response.
TEST_P(LdsRdsTest, ChooseMatchedDomain) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
*(route_config.add_virtual_hosts()) = route_config.virtual_hosts(0);
route_config.mutable_virtual_hosts(0)->clear_domains();
route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain");
@@ -2742,8 +3654,7 @@ TEST_P(LdsRdsTest, ChooseMatchedDomain) {
// Tests that LDS client should choose the last route in the virtual host if
// multiple routes exist in the LDS response.
TEST_P(LdsRdsTest, ChooseLastRoute) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
*(route_config.mutable_virtual_hosts(0)->add_routes()) =
route_config.virtual_hosts(0).routes(0);
route_config.mutable_virtual_hosts(0)
@@ -2758,27 +3669,9 @@ TEST_P(LdsRdsTest, ChooseLastRoute) {
AdsServiceImpl::ResponseState::ACKED);
}
-// Tests that LDS client should send a NACK if route match has a case_sensitive
-// set to false.
-TEST_P(LdsRdsTest, RouteMatchHasCaseSensitiveFalse) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->mutable_case_sensitive()->set_value(false);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "case_sensitive if set must be set to true.");
-}
-
// Tests that LDS client should ignore route which has query_parameters.
TEST_P(LdsRdsTest, RouteMatchHasQueryParameters) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
route1->mutable_match()->add_query_parameters();
@@ -2786,16 +3679,16 @@ TEST_P(LdsRdsTest, RouteMatchHasQueryParameters) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should send a ACK if route match has a prefix
// that is either empty or a single slash
TEST_P(LdsRdsTest, RouteMatchHasValidPrefixEmptyOrSingleSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("");
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
@@ -2805,159 +3698,158 @@ TEST_P(LdsRdsTest, RouteMatchHasValidPrefixEmptyOrSingleSlash) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
(void)SendRpc();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
// Tests that LDS client should ignore route which has a path
// prefix string does not start with "/".
TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixNoLeadingSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service/");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has a prefix
// string with more than 2 slashes.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixExtraContent) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/Echo1/");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has a prefix
// string "//".
TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixDoubleSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("//");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// but it's empty.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathEmptyPath) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string does not start with "/".
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathNoLeadingSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("grpc.testing.EchoTest1Service/Echo1");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string that has too many slashes; for example, ends with "/".
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathTooManySlashes) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1/");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string that has only 1 slash: missing "/" between service and method.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathOnlyOneSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service.Echo1");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string that is missing service.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingService) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("//Echo1");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string that is missing method.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingMethod) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No valid routes specified."));
}
// Test that LDS client should reject route which has invalid path regex.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathRegex) {
const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->mutable_safe_regex()->set_regex("a[z-a]");
route1->mutable_route()->set_cluster(kNewCluster1Name);
@@ -2965,30 +3857,30 @@ TEST_P(LdsRdsTest, RouteMatchHasInvalidPathRegex) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "Invalid regex string specified in path matcher.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "path matcher: Invalid regex string specified in matcher."));
}
// Tests that LDS client should send a NACK if route has an action other than
// RouteAction in the LDS response.
TEST_P(LdsRdsTest, RouteHasNoRouteAction) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
route_config.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_redirect();
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No RouteAction found in route.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("No RouteAction found in route."));
}
TEST_P(LdsRdsTest, RouteActionClusterHasEmptyClusterName) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
route1->mutable_route()->set_cluster("");
@@ -2999,17 +3891,17 @@ TEST_P(LdsRdsTest, RouteActionClusterHasEmptyClusterName) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "RouteAction cluster contains empty cluster name.");
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("RouteAction cluster contains empty cluster name."));
}
TEST_P(LdsRdsTest, RouteActionWeightedTargetHasIncorrectTotalWeightSet) {
const size_t kWeight75 = 75;
const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
@@ -3027,16 +3919,44 @@ TEST_P(LdsRdsTest, RouteActionWeightedTargetHasIncorrectTotalWeightSet) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "RouteAction weighted_cluster has incorrect total weight");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "RouteAction weighted_cluster has incorrect total weight"));
+}
+
+TEST_P(LdsRdsTest, RouteActionWeightedClusterHasZeroTotalWeight) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ RouteConfiguration route_config = default_route_config_;
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* weighted_cluster1 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster1->set_name(kNewCluster1Name);
+ weighted_cluster1->mutable_weight()->set_value(0);
+ route1->mutable_route()
+ ->mutable_weighted_clusters()
+ ->mutable_total_weight()
+ ->set_value(0);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "RouteAction weighted_cluster has no valid clusters specified."));
}
TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasEmptyClusterName) {
const size_t kWeight75 = 75;
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
@@ -3054,18 +3974,18 @@ TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasEmptyClusterName) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(
+ EXPECT_THAT(
response_state.error_message,
- "RouteAction weighted_cluster cluster contains empty cluster name.");
+ ::testing::HasSubstr(
+ "RouteAction weighted_cluster cluster contains empty cluster name."));
}
TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasNoWeight) {
const size_t kWeight75 = 75;
const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
@@ -3082,16 +4002,16 @@ TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasNoWeight) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "RouteAction weighted_cluster cluster missing weight");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "RouteAction weighted_cluster cluster missing weight"));
}
TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRegex) {
const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher1 = route1->mutable_match()->add_headers();
@@ -3102,16 +4022,17 @@ TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRegex) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "Invalid regex string specified in header matcher.");
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "header matcher: Invalid regex string specified in matcher."));
}
TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRange) {
const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher1 = route1->mutable_match()->add_headers();
@@ -3123,11 +4044,13 @@ TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRange) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "Invalid range header matcher specifier specified: end "
- "cannot be smaller than start.");
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "header matcher: Invalid range specifier specified: end cannot be "
+ "smaller than start."));
}
// Tests that LDS client should choose the default route (with no matching
@@ -3152,26 +4075,24 @@ TEST_P(LdsRdsTest, XdsRoutingPathMatching) {
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(3, 4)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1");
route1->mutable_route()->set_cluster(kNewCluster1Name);
@@ -3210,6 +4131,70 @@ TEST_P(LdsRdsTest, XdsRoutingPathMatching) {
EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
}
+TEST_P(LdsRdsTest, XdsRoutingPathMatchingCaseInsensitive) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEcho1Rpcs = 10;
+ const size_t kNumEchoRpcs = 30;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = default_cluster_;
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = default_cluster_;
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration new_route_config = default_route_config_;
+ // First route will not match, since it's case-sensitive.
+ // Second route will match with same path.
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("/GrPc.TeStInG.EcHoTeSt1SErViCe/EcHo1");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->set_path("/GrPc.TeStInG.EcHoTeSt1SErViCe/EcHo1");
+ route2->mutable_match()->mutable_case_sensitive()->set_value(false);
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
+ .set_rpc_service(SERVICE_ECHO1)
+ .set_rpc_method(METHOD_ECHO1)
+ .set_wait_for_ready(true));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
+}
+
TEST_P(LdsRdsTest, XdsRoutingPrefixMatching) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
@@ -3230,26 +4215,24 @@ TEST_P(LdsRdsTest, XdsRoutingPrefixMatching) {
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(3, 4)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
route1->mutable_route()->set_cluster(kNewCluster1Name);
@@ -3283,6 +4266,70 @@ TEST_P(LdsRdsTest, XdsRoutingPrefixMatching) {
EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
}
+TEST_P(LdsRdsTest, XdsRoutingPrefixMatchingCaseInsensitive) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEcho1Rpcs = 10;
+ const size_t kNumEchoRpcs = 30;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = default_cluster_;
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = default_cluster_;
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration new_route_config = default_route_config_;
+ // First route will not match, since it's case-sensitive.
+ // Second route will match with same path.
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/GrPc.TeStInG.EcHoTeSt1SErViCe");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->set_prefix("/GrPc.TeStInG.EcHoTeSt1SErViCe");
+ route2->mutable_match()->mutable_case_sensitive()->set_value(false);
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
+ .set_rpc_service(SERVICE_ECHO1)
+ .set_rpc_method(METHOD_ECHO1)
+ .set_wait_for_ready(true));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
+}
+
TEST_P(LdsRdsTest, XdsRoutingPathRegexMatching) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
@@ -3303,26 +4350,24 @@ TEST_P(LdsRdsTest, XdsRoutingPathRegexMatching) {
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(3, 4)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
// Will match "/grpc.testing.EchoTest1Service/"
route1->mutable_match()->mutable_safe_regex()->set_regex(".*1.*");
@@ -3358,11 +4403,78 @@ TEST_P(LdsRdsTest, XdsRoutingPathRegexMatching) {
EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
}
+TEST_P(LdsRdsTest, XdsRoutingPathRegexMatchingCaseInsensitive) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEcho1Rpcs = 10;
+ const size_t kNumEchoRpcs = 30;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = default_cluster_;
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = default_cluster_;
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration new_route_config = default_route_config_;
+ // First route will not match, since it's case-sensitive.
+ // Second route will match with same path.
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->mutable_safe_regex()->set_regex(
+ ".*EcHoTeSt1SErViCe.*");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->mutable_safe_regex()->set_regex(
+ ".*EcHoTeSt1SErViCe.*");
+ route2->mutable_match()->mutable_case_sensitive()->set_value(false);
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
+ .set_rpc_service(SERVICE_ECHO1)
+ .set_rpc_method(METHOD_ECHO1)
+ .set_wait_for_ready(true));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
+}
+
TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const char* kNotUsedClusterName = "not_used_cluster";
const size_t kNumEcho1Rpcs = 1000;
const size_t kNumEchoRpcs = 10;
const size_t kWeight75 = 75;
@@ -3379,26 +4491,24 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) {
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
@@ -3409,6 +4519,11 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) {
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster2->set_name(kNewCluster2Name);
weighted_cluster2->mutable_weight()->set_value(kWeight25);
+ // Cluster with weight 0 will not be used.
+ auto* weighted_cluster3 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster3->set_name(kNotUsedClusterName);
+ weighted_cluster3->mutable_weight()->set_value(0);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
@@ -3431,21 +4546,23 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) {
const int weight_25_request_count =
backends_[2]->backend_service1()->request_count();
const double kErrorTolerance = 0.2;
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ EXPECT_THAT(
+ weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight75 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight75 / 100 * (1 + kErrorTolerance))));
+ // TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
const double kErrorToleranceSmallLoad = 0.3;
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
+ ::testing::AllOf(
+ ::testing::Ge(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
+ 100 * (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
+ 100 * (1 + kErrorToleranceSmallLoad))));
}
TEST_P(LdsRdsTest, RouteActionWeightedTargetDefaultRoute) {
@@ -3468,26 +4585,24 @@ TEST_P(LdsRdsTest, RouteActionWeightedTargetDefaultRoute) {
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("");
auto* weighted_cluster1 =
@@ -3512,21 +4627,23 @@ TEST_P(LdsRdsTest, RouteActionWeightedTargetDefaultRoute) {
const int weight_25_request_count =
backends_[2]->backend_service()->request_count();
const double kErrorTolerance = 0.2;
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEchoRpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ EXPECT_THAT(
+ weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEchoRpcs) *
+ kWeight75 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEchoRpcs) *
+ kWeight75 / 100 * (1 + kErrorTolerance))));
+ // TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
const double kErrorToleranceSmallLoad = 0.3;
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEchoRpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
+ ::testing::AllOf(
+ ::testing::Ge(static_cast<double>(kNumEchoRpcs) * kWeight25 /
+ 100 * (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(static_cast<double>(kNumEchoRpcs) * kWeight25 /
+ 100 * (1 + kErrorToleranceSmallLoad))));
}
TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) {
@@ -3556,33 +4673,31 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) {
AdsServiceImpl::EdsResourceArgs args3({
{"locality0", GetBackendPorts(3, 4)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ BuildEdsResource(args2, kNewEdsService2Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
+ BuildEdsResource(args3, kNewEdsService3Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster3 = default_cluster_;
new_cluster3.set_name(kNewCluster3Name);
new_cluster3.mutable_eds_cluster_config()->set_service_name(
kNewEdsService3Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
// Populating Route Configurations.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
@@ -3618,21 +4733,23 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) {
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
const double kErrorTolerance = 0.2;
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ EXPECT_THAT(
+ weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight75 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight75 / 100 * (1 + kErrorTolerance))));
+ // TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
const double kErrorToleranceSmallLoad = 0.3;
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
+ ::testing::AllOf(
+ ::testing::Ge(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
+ 100 * (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
+ 100 * (1 + kErrorToleranceSmallLoad))));
// Change Route Configurations: same clusters different weights.
weighted_cluster1->mutable_weight()->set_value(kWeight50);
weighted_cluster2->mutable_weight()->set_value(kWeight50);
@@ -3655,16 +4772,18 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) {
backends_[2]->backend_service1()->request_count();
EXPECT_EQ(kNumEchoRpcs, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- EXPECT_THAT(weight_50_request_count_1,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 + kErrorTolerance))));
- EXPECT_THAT(weight_50_request_count_2,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 + kErrorTolerance))));
+ EXPECT_THAT(
+ weight_50_request_count_1,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight50 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight50 / 100 * (1 + kErrorTolerance))));
+ EXPECT_THAT(
+ weight_50_request_count_2,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight50 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight50 / 100 * (1 + kErrorTolerance))));
}
TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) {
@@ -3694,33 +4813,31 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) {
AdsServiceImpl::EdsResourceArgs args3({
{"locality0", GetBackendPorts(3, 4)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ BuildEdsResource(args2, kNewEdsService2Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
+ BuildEdsResource(args3, kNewEdsService3Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster3 = default_cluster_;
new_cluster3.set_name(kNewCluster3Name);
new_cluster3.mutable_eds_cluster_config()->set_service_name(
kNewEdsService3Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
// Populating Route Configurations.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
@@ -3755,21 +4872,23 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) {
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
const double kErrorTolerance = 0.2;
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ EXPECT_THAT(
+ weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight75 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight75 / 100 * (1 + kErrorTolerance))));
+ // TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
const double kErrorToleranceSmallLoad = 0.3;
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
+ ::testing::AllOf(
+ ::testing::Ge(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
+ 100 * (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
+ 100 * (1 + kErrorToleranceSmallLoad))));
// Change Route Configurations: new set of clusters with different weights.
weighted_cluster1->mutable_weight()->set_value(kWeight50);
weighted_cluster2->set_name(kNewCluster2Name);
@@ -3790,16 +4909,18 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) {
backends_[2]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- EXPECT_THAT(weight_50_request_count_1,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 + kErrorTolerance))));
- EXPECT_THAT(weight_50_request_count_2,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 + kErrorTolerance))));
+ EXPECT_THAT(
+ weight_50_request_count_1,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight50 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight50 / 100 * (1 + kErrorTolerance))));
+ EXPECT_THAT(
+ weight_50_request_count_2,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight50 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight50 / 100 * (1 + kErrorTolerance))));
// Change Route Configurations.
weighted_cluster1->mutable_weight()->set_value(kWeight75);
weighted_cluster2->set_name(kNewCluster3Name);
@@ -3818,20 +4939,22 @@ TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) {
EXPECT_EQ(0, backends_[2]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
weight_25_request_count = backends_[3]->backend_service1()->request_count();
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ EXPECT_THAT(
+ weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight75 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
+ kWeight75 / 100 * (1 + kErrorTolerance))));
+ // TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
+ ::testing::AllOf(
+ ::testing::Ge(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
+ 100 * (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
+ 100 * (1 + kErrorToleranceSmallLoad))));
}
TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClusters) {
@@ -3847,19 +4970,17 @@ TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClusters) {
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Send Route Configuration.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(0, 1);
CheckRpcSendOk(kNumEchoRpcs);
@@ -3888,12 +5009,11 @@ TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClustersWithPickingDelays) {
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
@@ -3903,8 +5023,7 @@ TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClustersWithPickingDelays) {
ShutdownBackend(0);
// Send a RouteConfiguration with a default route that points to
// backend 0.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration new_route_config = default_route_config_;
SetRouteConfiguration(0, new_route_config);
// Send exactly one RPC with no deadline and with wait_for_ready=true.
// This RPC will not complete until after backend 0 is started.
@@ -3932,6 +5051,305 @@ TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClustersWithPickingDelays) {
EXPECT_EQ(1, backends_[1]->backend_service()->request_count());
}
+TEST_P(LdsRdsTest, XdsRoutingApplyXdsTimeout) {
+ const int64_t kTimeoutMillis = 500;
+ const int64_t kTimeoutNano = kTimeoutMillis * 1000000;
+ const int64_t kTimeoutGrpcTimeoutHeaderMaxSecond = 1;
+ const int64_t kTimeoutMaxStreamDurationSecond = 2;
+ const int64_t kTimeoutHttpMaxStreamDurationSecond = 3;
+ const int64_t kTimeoutApplicationSecond = 4;
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const char* kNewCluster3Name = "new_cluster_3";
+ const char* kNewEdsService3Name = "new_eds_service_name_3";
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ AdsServiceImpl::EdsResourceArgs args3({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args2, kNewEdsService2Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args3, kNewEdsService3Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = default_cluster_;
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = default_cluster_;
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ Cluster new_cluster3 = default_cluster_;
+ new_cluster3.set_name(kNewCluster3Name);
+ new_cluster3.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService3Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
+ // Construct listener.
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ // Set up HTTP max_stream_duration of 3.5 seconds
+ auto* duration =
+ http_connection_manager.mutable_common_http_protocol_options()
+ ->mutable_max_stream_duration();
+ duration->set_seconds(kTimeoutHttpMaxStreamDurationSecond);
+ duration->set_nanos(kTimeoutNano);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ // Construct route config.
+ RouteConfiguration new_route_config = default_route_config_;
+ // route 1: Set max_stream_duration of 2.5 seconds, Set
+ // grpc_timeout_header_max of 1.5
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto* max_stream_duration =
+ route1->mutable_route()->mutable_max_stream_duration();
+ duration = max_stream_duration->mutable_max_stream_duration();
+ duration->set_seconds(kTimeoutMaxStreamDurationSecond);
+ duration->set_nanos(kTimeoutNano);
+ duration = max_stream_duration->mutable_grpc_timeout_header_max();
+ duration->set_seconds(kTimeoutGrpcTimeoutHeaderMaxSecond);
+ duration->set_nanos(kTimeoutNano);
+ // route 2: Set max_stream_duration of 2.5 seconds
+ auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2");
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ max_stream_duration = route2->mutable_route()->mutable_max_stream_duration();
+ duration = max_stream_duration->mutable_max_stream_duration();
+ duration->set_seconds(kTimeoutMaxStreamDurationSecond);
+ duration->set_nanos(kTimeoutNano);
+ // route 3: No timeout values in route configuration
+ auto* route3 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route3->mutable_match()->set_path("/grpc.testing.EchoTestService/Echo");
+ route3->mutable_route()->set_cluster(kNewCluster3Name);
+ // Set listener and route config.
+ SetListenerAndRouteConfiguration(0, std::move(listener), new_route_config);
+ // Test grpc_timeout_header_max of 1.5 seconds applied
+ grpc_millis t0 = NowFromCycleCounter();
+ grpc_millis t1 =
+ t0 + kTimeoutGrpcTimeoutHeaderMaxSecond * 1000 + kTimeoutMillis;
+ grpc_millis t2 = t0 + kTimeoutMaxStreamDurationSecond * 1000 + kTimeoutMillis;
+ CheckRpcSendFailure(1,
+ RpcOptions()
+ .set_rpc_service(SERVICE_ECHO1)
+ .set_rpc_method(METHOD_ECHO1)
+ .set_wait_for_ready(true)
+ .set_timeout_ms(kTimeoutApplicationSecond * 1000),
+ StatusCode::DEADLINE_EXCEEDED);
+ t0 = NowFromCycleCounter();
+ EXPECT_GE(t0, t1);
+ EXPECT_LT(t0, t2);
+ // Test max_stream_duration of 2.5 seconds applied
+ t0 = NowFromCycleCounter();
+ t1 = t0 + kTimeoutMaxStreamDurationSecond * 1000 + kTimeoutMillis;
+ t2 = t0 + kTimeoutHttpMaxStreamDurationSecond * 1000 + kTimeoutMillis;
+ CheckRpcSendFailure(1,
+ RpcOptions()
+ .set_rpc_service(SERVICE_ECHO2)
+ .set_rpc_method(METHOD_ECHO2)
+ .set_wait_for_ready(true)
+ .set_timeout_ms(kTimeoutApplicationSecond * 1000),
+ StatusCode::DEADLINE_EXCEEDED);
+ t0 = NowFromCycleCounter();
+ EXPECT_GE(t0, t1);
+ EXPECT_LT(t0, t2);
+ // Test http_stream_duration of 3.5 seconds applied
+ t0 = NowFromCycleCounter();
+ t1 = t0 + kTimeoutHttpMaxStreamDurationSecond * 1000 + kTimeoutMillis;
+ t2 = t0 + kTimeoutApplicationSecond * 1000 + kTimeoutMillis;
+ CheckRpcSendFailure(1,
+ RpcOptions().set_wait_for_ready(true).set_timeout_ms(
+ kTimeoutApplicationSecond * 1000),
+ StatusCode::DEADLINE_EXCEEDED);
+ t0 = NowFromCycleCounter();
+ EXPECT_GE(t0, t1);
+ EXPECT_LT(t0, t2);
+}
+
+TEST_P(LdsRdsTest, XdsRoutingApplyApplicationTimeoutWhenXdsTimeoutExplicit0) {
+ const int64_t kTimeoutNano = 500000000;
+ const int64_t kTimeoutMaxStreamDurationSecond = 2;
+ const int64_t kTimeoutHttpMaxStreamDurationSecond = 3;
+ const int64_t kTimeoutApplicationSecond = 4;
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = default_cluster_;
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = default_cluster_;
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Construct listener.
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ // Set up HTTP max_stream_duration of 3.5 seconds
+ auto* duration =
+ http_connection_manager.mutable_common_http_protocol_options()
+ ->mutable_max_stream_duration();
+ duration->set_seconds(kTimeoutHttpMaxStreamDurationSecond);
+ duration->set_nanos(kTimeoutNano);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ // Construct route config.
+ RouteConfiguration new_route_config = default_route_config_;
+ // route 1: Set max_stream_duration of 2.5 seconds, Set
+ // grpc_timeout_header_max of 0
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto* max_stream_duration =
+ route1->mutable_route()->mutable_max_stream_duration();
+ duration = max_stream_duration->mutable_max_stream_duration();
+ duration->set_seconds(kTimeoutMaxStreamDurationSecond);
+ duration->set_nanos(kTimeoutNano);
+ duration = max_stream_duration->mutable_grpc_timeout_header_max();
+ duration->set_seconds(0);
+ duration->set_nanos(0);
+ // route 2: Set max_stream_duration to 0
+ auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2");
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ max_stream_duration = route2->mutable_route()->mutable_max_stream_duration();
+ duration = max_stream_duration->mutable_max_stream_duration();
+ duration->set_seconds(0);
+ duration->set_nanos(0);
+ // Set listener and route config.
+ SetListenerAndRouteConfiguration(0, std::move(listener), new_route_config);
+ // Test application timeout is applied for route 1
+ auto t0 = system_clock::now();
+ CheckRpcSendFailure(1,
+ RpcOptions()
+ .set_rpc_service(SERVICE_ECHO1)
+ .set_rpc_method(METHOD_ECHO1)
+ .set_wait_for_ready(true)
+ .set_timeout_ms(kTimeoutApplicationSecond * 1000),
+ StatusCode::DEADLINE_EXCEEDED);
+ auto ellapsed_nano_seconds =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(system_clock::now() -
+ t0);
+ EXPECT_GT(ellapsed_nano_seconds.count(),
+ kTimeoutApplicationSecond * 1000000000);
+ // Test application timeout is applied for route 2
+ t0 = system_clock::now();
+ CheckRpcSendFailure(1,
+ RpcOptions()
+ .set_rpc_service(SERVICE_ECHO2)
+ .set_rpc_method(METHOD_ECHO2)
+ .set_wait_for_ready(true)
+ .set_timeout_ms(kTimeoutApplicationSecond * 1000),
+ StatusCode::DEADLINE_EXCEEDED);
+ ellapsed_nano_seconds = std::chrono::duration_cast<std::chrono::nanoseconds>(
+ system_clock::now() - t0);
+ EXPECT_GT(ellapsed_nano_seconds.count(),
+ kTimeoutApplicationSecond * 1000000000);
+}
+
+TEST_P(LdsRdsTest, XdsRoutingApplyApplicationTimeoutWhenHttpTimeoutExplicit0) {
+ const int64_t kTimeoutApplicationSecond = 4;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
+ &http_connection_manager);
+ // Set up HTTP max_stream_duration to be explicit 0
+ auto* duration =
+ http_connection_manager.mutable_common_http_protocol_options()
+ ->mutable_max_stream_duration();
+ duration->set_seconds(0);
+ duration->set_nanos(0);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ // Set listener and route config.
+ SetListenerAndRouteConfiguration(0, std::move(listener),
+ default_route_config_);
+ // Test application timeout is applied for route 1
+ auto t0 = system_clock::now();
+ CheckRpcSendFailure(1,
+ RpcOptions().set_wait_for_ready(true).set_timeout_ms(
+ kTimeoutApplicationSecond * 1000),
+ StatusCode::DEADLINE_EXCEEDED);
+ auto ellapsed_nano_seconds =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(system_clock::now() -
+ t0);
+ EXPECT_GT(ellapsed_nano_seconds.count(),
+ kTimeoutApplicationSecond * 1000000000);
+}
+
+// Test to ensure application-specified deadline won't be affected when
+// the xDS config does not specify a timeout.
+TEST_P(LdsRdsTest, XdsRoutingWithOnlyApplicationTimeout) {
+ const int64_t kTimeoutApplicationSecond = 4;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {grpc_pick_unused_port_or_die()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
+ auto t0 = system_clock::now();
+ CheckRpcSendFailure(1,
+ RpcOptions().set_wait_for_ready(true).set_timeout_ms(
+ kTimeoutApplicationSecond * 1000),
+ StatusCode::DEADLINE_EXCEEDED);
+ auto ellapsed_nano_seconds =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(system_clock::now() -
+ t0);
+ EXPECT_GT(ellapsed_nano_seconds.count(),
+ kTimeoutApplicationSecond * 1000000000);
+}
+
TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) {
const char* kNewClusterName = "new_cluster";
const char* kNewEdsServiceName = "new_eds_service_name";
@@ -3946,19 +5364,17 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) {
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher1 = route1->mutable_match()->add_headers();
@@ -3976,20 +5392,27 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) {
header_matcher4->set_present_match(false);
auto* header_matcher5 = route1->mutable_match()->add_headers();
header_matcher5->set_name("header5");
- header_matcher5->set_prefix_match("/grpc");
+ header_matcher5->set_present_match(true);
auto* header_matcher6 = route1->mutable_match()->add_headers();
header_matcher6->set_name("header6");
- header_matcher6->set_suffix_match(".cc");
- header_matcher6->set_invert_match(true);
+ header_matcher6->set_prefix_match("/grpc");
+ auto* header_matcher7 = route1->mutable_match()->add_headers();
+ header_matcher7->set_name("header7");
+ header_matcher7->set_suffix_match(".cc");
+ header_matcher7->set_invert_match(true);
route1->mutable_route()->set_cluster(kNewClusterName);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
std::vector<std::pair<TString, TString>> metadata = {
- {"header1", "POST"}, {"header2", "blah"},
- {"header3", "1"}, {"header5", "/grpc.testing.EchoTest1Service/"},
- {"header1", "PUT"}, {"header6", "grpc.java"},
+ {"header1", "POST"},
+ {"header2", "blah"},
+ {"header3", "1"},
+ {"header5", "anything"},
+ {"header6", "/grpc.testing.EchoTest1Service/"},
+ {"header1", "PUT"},
+ {"header7", "grpc.java"},
{"header1", "GET"},
};
const auto header_match_rpc_options = RpcOptions()
@@ -4008,7 +5431,7 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) {
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[1]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
@@ -4025,19 +5448,17 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialHeaderContentType) {
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("");
auto* header_matcher1 = route1->mutable_match()->add_headers();
@@ -4057,15 +5478,13 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialHeaderContentType) {
CheckRpcSendOk(kNumEchoRpcs);
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEchoRpcs = 100;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
@@ -4076,41 +5495,23 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) {
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 3)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ BuildEdsResource(args1, kNewEdsService1Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("");
auto* header_matcher1 = route1->mutable_match()->add_headers();
header_matcher1->set_name("grpc-foo-bin");
header_matcher1->set_present_match(true);
route1->mutable_route()->set_cluster(kNewCluster1Name);
- auto route2 = route_config.mutable_virtual_hosts(0)->add_routes();
- route2->mutable_match()->set_prefix("");
- auto* header_matcher2 = route2->mutable_match()->add_headers();
- header_matcher2->set_name("grpc-previous-rpc-attempts");
- header_matcher2->set_present_match(true);
- route2->mutable_route()->set_cluster(kNewCluster2Name);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
@@ -4118,7 +5519,6 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) {
// Send headers which will mismatch each route
std::vector<std::pair<TString, TString>> metadata = {
{"grpc-foo-bin", "grpc-foo-bin"},
- {"grpc-previous-rpc-attempts", "grpc-previous-rpc-attempts"},
};
WaitForAllBackends(0, 1);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata));
@@ -4126,8 +5526,7 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) {
// were mismatched.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
@@ -4144,19 +5543,17 @@ TEST_P(LdsRdsTest, XdsRoutingRuntimeFractionMatching) {
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()
->mutable_runtime_fraction()
@@ -4174,15 +5571,19 @@ TEST_P(LdsRdsTest, XdsRoutingRuntimeFractionMatching) {
const int matched_backend_count =
backends_[1]->backend_service()->request_count();
const double kErrorTolerance = 0.2;
- EXPECT_THAT(default_backend_count,
- ::testing::AllOf(
- ::testing::Ge(kNumRpcs * 75 / 100 * (1 - kErrorTolerance)),
- ::testing::Le(kNumRpcs * 75 / 100 * (1 + kErrorTolerance))));
- EXPECT_THAT(matched_backend_count,
- ::testing::AllOf(
- ::testing::Ge(kNumRpcs * 25 / 100 * (1 - kErrorTolerance)),
- ::testing::Le(kNumRpcs * 25 / 100 * (1 + kErrorTolerance))));
- const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_THAT(
+ default_backend_count,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumRpcs) * 75 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumRpcs) * 75 / 100 *
+ (1 + kErrorTolerance))));
+ EXPECT_THAT(
+ matched_backend_count,
+ ::testing::AllOf(::testing::Ge(static_cast<double>(kNumRpcs) * 25 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(static_cast<double>(kNumRpcs) * 25 / 100 *
+ (1 + kErrorTolerance))));
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
@@ -4210,33 +5611,31 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingUnmatchCases) {
AdsServiceImpl::EdsResourceArgs args3({
{"locality0", GetBackendPorts(3, 4)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ BuildEdsResource(args2, kNewEdsService2Name));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
+ BuildEdsResource(args3, kNewEdsService3Name));
// Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster3 = default_cluster_;
new_cluster3.set_name(kNewCluster3Name);
new_cluster3.mutable_eds_cluster_config()->set_service_name(
kNewEdsService3Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
// Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher1 = route1->mutable_match()->add_headers();
@@ -4283,7 +5682,7 @@ TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingUnmatchCases) {
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
- const auto& response_state = RouteConfigurationResponseState(0);
+ const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
@@ -4299,19 +5698,17 @@ TEST_P(LdsRdsTest, XdsRoutingChangeRoutesWithoutChangingClusters) {
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
+ RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
route1->mutable_route()->set_cluster(kNewClusterName);
@@ -4352,6 +5749,454 @@ TEST_P(LdsRdsTest, XdsRoutingChangeRoutesWithoutChangingClusters) {
EXPECT_EQ(1, backends_[1]->backend_service2()->request_count());
}
+// Test that we NACK unknown filter types in VirtualHost.
+TEST_P(LdsRdsTest, RejectsUnknownHttpFilterTypeInVirtualHost) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config =
+ route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(Listener());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("no filter registered for config type "
+ "envoy.config.listener.v3.Listener"));
+}
+
+// Test that we ignore optional unknown filter types in VirtualHost.
+TEST_P(LdsRdsTest, IgnoresOptionalUnknownHttpFilterTypeInVirtualHost) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config =
+ route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
+ ::envoy::config::route::v3::FilterConfig filter_config;
+ filter_config.mutable_config()->PackFrom(Listener());
+ filter_config.set_is_optional(true);
+ (*per_filter_config)["unknown"].PackFrom(filter_config);
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForAllBackends();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK filters without configs in VirtualHost.
+TEST_P(LdsRdsTest, RejectsHttpFilterWithoutConfigInVirtualHost) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config =
+ route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"];
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "no filter config specified for filter name unknown"));
+}
+
+// Test that we NACK filters without configs in FilterConfig in VirtualHost.
+TEST_P(LdsRdsTest, RejectsHttpFilterWithoutConfigInFilterConfigInVirtualHost) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config =
+ route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(
+ ::envoy::config::route::v3::FilterConfig());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "no filter config specified for filter name unknown"));
+}
+
+// Test that we ignore optional filters without configs in VirtualHost.
+TEST_P(LdsRdsTest, IgnoresOptionalHttpFilterWithoutConfigInVirtualHost) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config =
+ route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
+ ::envoy::config::route::v3::FilterConfig filter_config;
+ filter_config.set_is_optional(true);
+ (*per_filter_config)["unknown"].PackFrom(filter_config);
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForAllBackends();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK unparseable filter types in VirtualHost.
+TEST_P(LdsRdsTest, RejectsUnparseableHttpFilterTypeInVirtualHost) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config =
+ route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(
+ envoy::extensions::filters::http::router::v3::Router());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("router filter does not support config override"));
+}
+
+// Test that we NACK unknown filter types in Route.
+TEST_P(LdsRdsTest, RejectsUnknownHttpFilterTypeInRoute) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(Listener());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("no filter registered for config type "
+ "envoy.config.listener.v3.Listener"));
+}
+
+// Test that we ignore optional unknown filter types in Route.
+TEST_P(LdsRdsTest, IgnoresOptionalUnknownHttpFilterTypeInRoute) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_typed_per_filter_config();
+ ::envoy::config::route::v3::FilterConfig filter_config;
+ filter_config.mutable_config()->PackFrom(Listener());
+ filter_config.set_is_optional(true);
+ (*per_filter_config)["unknown"].PackFrom(filter_config);
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForAllBackends();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK filters without configs in Route.
+TEST_P(LdsRdsTest, RejectsHttpFilterWithoutConfigInRoute) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"];
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "no filter config specified for filter name unknown"));
+}
+
+// Test that we NACK filters without configs in FilterConfig in Route.
+TEST_P(LdsRdsTest, RejectsHttpFilterWithoutConfigInFilterConfigInRoute) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(
+ ::envoy::config::route::v3::FilterConfig());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "no filter config specified for filter name unknown"));
+}
+
+// Test that we ignore optional filters without configs in Route.
+TEST_P(LdsRdsTest, IgnoresOptionalHttpFilterWithoutConfigInRoute) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_typed_per_filter_config();
+ ::envoy::config::route::v3::FilterConfig filter_config;
+ filter_config.set_is_optional(true);
+ (*per_filter_config)["unknown"].PackFrom(filter_config);
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForAllBackends();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK unparseable filter types in Route.
+TEST_P(LdsRdsTest, RejectsUnparseableHttpFilterTypeInRoute) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* per_filter_config = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(
+ envoy::extensions::filters::http::router::v3::Router());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("router filter does not support config override"));
+}
+
+// Test that we NACK unknown filter types in ClusterWeight.
+TEST_P(LdsRdsTest, RejectsUnknownHttpFilterTypeInClusterWeight) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* cluster_weight = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->mutable_weighted_clusters()
+ ->add_clusters();
+ cluster_weight->set_name(kDefaultClusterName);
+ cluster_weight->mutable_weight()->set_value(100);
+ auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(Listener());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("no filter registered for config type "
+ "envoy.config.listener.v3.Listener"));
+}
+
+// Test that we ignore optional unknown filter types in ClusterWeight.
+TEST_P(LdsRdsTest, IgnoresOptionalUnknownHttpFilterTypeInClusterWeight) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* cluster_weight = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->mutable_weighted_clusters()
+ ->add_clusters();
+ cluster_weight->set_name(kDefaultClusterName);
+ cluster_weight->mutable_weight()->set_value(100);
+ auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
+ ::envoy::config::route::v3::FilterConfig filter_config;
+ filter_config.mutable_config()->PackFrom(Listener());
+ filter_config.set_is_optional(true);
+ (*per_filter_config)["unknown"].PackFrom(filter_config);
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForAllBackends();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK filters without configs in ClusterWeight.
+TEST_P(LdsRdsTest, RejectsHttpFilterWithoutConfigInClusterWeight) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* cluster_weight = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->mutable_weighted_clusters()
+ ->add_clusters();
+ cluster_weight->set_name(kDefaultClusterName);
+ cluster_weight->mutable_weight()->set_value(100);
+ auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"];
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "no filter config specified for filter name unknown"));
+}
+
+// Test that we NACK filters without configs in FilterConfig in ClusterWeight.
+TEST_P(LdsRdsTest,
+ RejectsHttpFilterWithoutConfigInFilterConfigInClusterWeight) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* cluster_weight = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->mutable_weighted_clusters()
+ ->add_clusters();
+ cluster_weight->set_name(kDefaultClusterName);
+ cluster_weight->mutable_weight()->set_value(100);
+ auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(
+ ::envoy::config::route::v3::FilterConfig());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "no filter config specified for filter name unknown"));
+}
+
+// Test that we ignore optional filters without configs in ClusterWeight.
+TEST_P(LdsRdsTest, IgnoresOptionalHttpFilterWithoutConfigInClusterWeight) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* cluster_weight = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->mutable_weighted_clusters()
+ ->add_clusters();
+ cluster_weight->set_name(kDefaultClusterName);
+ cluster_weight->mutable_weight()->set_value(100);
+ auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
+ ::envoy::config::route::v3::FilterConfig filter_config;
+ filter_config.set_is_optional(true);
+ (*per_filter_config)["unknown"].PackFrom(filter_config);
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ WaitForAllBackends();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Test that we NACK unparseable filter types in ClusterWeight.
+TEST_P(LdsRdsTest, RejectsUnparseableHttpFilterTypeInClusterWeight) {
+ if (GetParam().use_v2()) return; // Filters supported in v3 only.
+ RouteConfiguration route_config = default_route_config_;
+ auto* cluster_weight = route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->mutable_weighted_clusters()
+ ->add_clusters();
+ cluster_weight->set_name(kDefaultClusterName);
+ cluster_weight->mutable_weight()->set_value(100);
+ auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
+ (*per_filter_config)["unknown"].PackFrom(
+ envoy::extensions::filters::http::router::v3::Router());
+ SetListenerAndRouteConfiguration(0, default_listener_, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Wait until xDS server sees NACK.
+ do {
+ CheckRpcSendFailure();
+ } while (RouteConfigurationResponseState(0).state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("router filter does not support config override"));
+}
+
using CdsTest = BasicTest;
// Tests that CDS client should send an ACK upon correct CDS response.
@@ -4363,64 +6208,2470 @@ TEST_P(CdsTest, Vanilla) {
AdsServiceImpl::ResponseState::ACKED);
}
+TEST_P(CdsTest, LogicalDNSClusterType) {
+ gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER",
+ "true");
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create Logical DNS Cluster
+ auto cluster = default_cluster_;
+ cluster.set_type(Cluster::LOGICAL_DNS);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // Set Logical DNS result
+ {
+ grpc_core::ExecCtx exec_ctx;
+ grpc_core::Resolver::Result result;
+ result.addresses = CreateAddressListFromPortList(GetBackendPorts(1, 2));
+ logical_dns_cluster_resolver_response_generator_->SetResponse(
+ std::move(result));
+ }
+ // Wait for traffic to go to backend 1.
+ WaitForBackend(1);
+ gpr_unsetenv(
+ "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER");
+}
+
+TEST_P(CdsTest, AggregateClusterType) {
+ gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER",
+ "true");
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = default_cluster_;
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = default_cluster_;
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Create Aggregate Cluster
+ auto cluster = default_cluster_;
+ CustomClusterType* custom_cluster = cluster.mutable_cluster_type();
+ custom_cluster->set_name("envoy.clusters.aggregate");
+ ClusterConfig cluster_config;
+ cluster_config.add_clusters(kNewCluster1Name);
+ cluster_config.add_clusters(kNewCluster2Name);
+ custom_cluster->mutable_typed_config()->PackFrom(cluster_config);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // Wait for traffic to go to backend 1.
+ WaitForBackend(1);
+ // Shutdown backend 1 and wait for all traffic to go to backend 2.
+ ShutdownBackend(1);
+ WaitForBackend(2);
+ EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+ // Bring backend 1 back and ensure all traffic go back to it.
+ StartBackend(1);
+ WaitForBackend(1);
+ gpr_unsetenv(
+ "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER");
+}
+
+TEST_P(CdsTest, AggregateClusterEdsToLogicalDns) {
+ gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER",
+ "true");
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kLogicalDNSClusterName = "logical_dns_cluster";
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args1, kNewEdsService1Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = default_cluster_;
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ // Create Logical DNS Cluster
+ auto logical_dns_cluster = default_cluster_;
+ logical_dns_cluster.set_name(kLogicalDNSClusterName);
+ logical_dns_cluster.set_type(Cluster::LOGICAL_DNS);
+ balancers_[0]->ads_service()->SetCdsResource(logical_dns_cluster);
+ // Create Aggregate Cluster
+ auto cluster = default_cluster_;
+ CustomClusterType* custom_cluster = cluster.mutable_cluster_type();
+ custom_cluster->set_name("envoy.clusters.aggregate");
+ ClusterConfig cluster_config;
+ cluster_config.add_clusters(kNewCluster1Name);
+ cluster_config.add_clusters(kLogicalDNSClusterName);
+ custom_cluster->mutable_typed_config()->PackFrom(cluster_config);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // Set Logical DNS result
+ {
+ grpc_core::ExecCtx exec_ctx;
+ grpc_core::Resolver::Result result;
+ result.addresses = CreateAddressListFromPortList(GetBackendPorts(2, 3));
+ logical_dns_cluster_resolver_response_generator_->SetResponse(
+ std::move(result));
+ }
+ // Wait for traffic to go to backend 1.
+ WaitForBackend(1);
+ // Shutdown backend 1 and wait for all traffic to go to backend 2.
+ ShutdownBackend(1);
+ WaitForBackend(2);
+ EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+ // Bring backend 1 back and ensure all traffic go back to it.
+ StartBackend(1);
+ WaitForBackend(1);
+ gpr_unsetenv(
+ "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER");
+}
+
+TEST_P(CdsTest, AggregateClusterLogicalDnsToEds) {
+ gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER",
+ "true");
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const char* kLogicalDNSClusterName = "logical_dns_cluster";
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster2 = default_cluster_;
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Create Logical DNS Cluster
+ auto logical_dns_cluster = default_cluster_;
+ logical_dns_cluster.set_name(kLogicalDNSClusterName);
+ logical_dns_cluster.set_type(Cluster::LOGICAL_DNS);
+ balancers_[0]->ads_service()->SetCdsResource(logical_dns_cluster);
+ // Create Aggregate Cluster
+ auto cluster = default_cluster_;
+ CustomClusterType* custom_cluster = cluster.mutable_cluster_type();
+ custom_cluster->set_name("envoy.clusters.aggregate");
+ ClusterConfig cluster_config;
+ cluster_config.add_clusters(kLogicalDNSClusterName);
+ cluster_config.add_clusters(kNewCluster2Name);
+ custom_cluster->mutable_typed_config()->PackFrom(cluster_config);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // Set Logical DNS result
+ {
+ grpc_core::ExecCtx exec_ctx;
+ grpc_core::Resolver::Result result;
+ result.addresses = CreateAddressListFromPortList(GetBackendPorts(1, 2));
+ logical_dns_cluster_resolver_response_generator_->SetResponse(
+ std::move(result));
+ }
+ // Wait for traffic to go to backend 1.
+ WaitForBackend(1);
+ // Shutdown backend 1 and wait for all traffic to go to backend 2.
+ ShutdownBackend(1);
+ WaitForBackend(2);
+ EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+ // Bring backend 1 back and ensure all traffic go back to it.
+ StartBackend(1);
+ WaitForBackend(1);
+ gpr_unsetenv(
+ "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER");
+}
+
+// Test that CDS client should send a NACK if cluster type is Logical DNS but
+// the feature is not yet supported.
+TEST_P(CdsTest, LogicalDNSClusterTypeDisabled) {
+ auto cluster = default_cluster_;
+ cluster.set_type(Cluster::LOGICAL_DNS);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("DiscoveryType is not valid."));
+}
+
+// Test that CDS client should send a NACK if cluster type is AGGREGATE but
+// the feature is not yet supported.
+TEST_P(CdsTest, AggregateClusterTypeDisabled) {
+ auto cluster = default_cluster_;
+ CustomClusterType* custom_cluster = cluster.mutable_cluster_type();
+ custom_cluster->set_name("envoy.clusters.aggregate");
+ ClusterConfig cluster_config;
+ cluster_config.add_clusters("cluster1");
+ cluster_config.add_clusters("cluster2");
+ custom_cluster->mutable_typed_config()->PackFrom(cluster_config);
+ cluster.set_type(Cluster::LOGICAL_DNS);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("DiscoveryType is not valid."));
+}
+
// Tests that CDS client should send a NACK if the cluster type in CDS response
-// is other than EDS.
-TEST_P(CdsTest, WrongClusterType) {
- auto cluster = balancers_[0]->ads_service()->default_cluster();
+// is unsupported.
+TEST_P(CdsTest, UnsupportedClusterType) {
+ auto cluster = default_cluster_;
cluster.set_type(Cluster::STATIC);
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "DiscoveryType is not EDS.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("DiscoveryType is not valid."));
+}
+
+// Tests that the NACK for multiple bad resources includes both errors.
+TEST_P(CdsTest, MultipleBadResources) {
+ constexpr char kClusterName2[] = "cluster_name_2";
+ // Use unsupported type for default cluster.
+ auto cluster = default_cluster_;
+ cluster.set_type(Cluster::STATIC);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // Add second cluster with the same error.
+ cluster.set_name(kClusterName2);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // Change RouteConfig to point to both clusters.
+ RouteConfiguration route_config = default_route_config_;
+ auto* route = route_config.mutable_virtual_hosts(0)->add_routes();
+ route->mutable_match()->set_prefix("");
+ route->mutable_route()->set_cluster(kClusterName2);
+ SetRouteConfiguration(0, route_config);
+ // Send RPC.
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::AllOf(
+ ::testing::HasSubstr(y_absl::StrCat(
+ kDefaultClusterName, ": DiscoveryType is not valid.")),
+ ::testing::HasSubstr(y_absl::StrCat(
+ kClusterName2, ": DiscoveryType is not valid."))));
}
// Tests that CDS client should send a NACK if the eds_config in CDS response is
// other than ADS.
TEST_P(CdsTest, WrongEdsConfig) {
- auto cluster = balancers_[0]->ads_service()->default_cluster();
+ auto cluster = default_cluster_;
cluster.mutable_eds_cluster_config()->mutable_eds_config()->mutable_self();
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "EDS ConfigSource is not ADS.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("EDS ConfigSource is not ADS."));
}
// Tests that CDS client should send a NACK if the lb_policy in CDS response is
// other than ROUND_ROBIN.
TEST_P(CdsTest, WrongLbPolicy) {
- auto cluster = balancers_[0]->ads_service()->default_cluster();
+ auto cluster = default_cluster_;
cluster.set_lb_policy(Cluster::LEAST_REQUEST);
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "LB policy is not ROUND_ROBIN.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("LB policy is not supported."));
}
// Tests that CDS client should send a NACK if the lrs_server in CDS response is
// other than SELF.
TEST_P(CdsTest, WrongLrsServer) {
- auto cluster = balancers_[0]->ads_service()->default_cluster();
+ auto cluster = default_cluster_;
cluster.mutable_lrs_server()->mutable_ads();
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("LRS ConfigSource is not self."));
+}
+
+class XdsSecurityTest : public BasicTest {
+ protected:
+ static void SetUpTestCase() {
+ gpr_setenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", "true");
+ BasicTest::SetUpTestCase();
+ }
+
+ static void TearDownTestCase() {
+ BasicTest::TearDownTestCase();
+ gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT");
+ }
+
+ void SetUp() override {
+ BasicTest::SetUp();
+ root_cert_ = ReadFile(kCaCertPath);
+ bad_root_cert_ = ReadFile(kBadClientCertPath);
+ identity_pair_ = ReadTlsIdentityPair(kClientKeyPath, kClientCertPath);
+ // TODO(yashykt): Use different client certs here instead of reusing server
+ // certs after https://github.com/grpc/grpc/pull/24876 is merged
+ fallback_identity_pair_ =
+ ReadTlsIdentityPair(kServerKeyPath, kServerCertPath);
+ bad_identity_pair_ =
+ ReadTlsIdentityPair(kBadClientKeyPath, kBadClientCertPath);
+ server_san_exact_.set_exact("*.test.google.fr");
+ server_san_prefix_.set_prefix("waterzooi.test.google");
+ server_san_suffix_.set_suffix("google.fr");
+ server_san_contains_.set_contains("google");
+ server_san_regex_.mutable_safe_regex()->mutable_google_re2();
+ server_san_regex_.mutable_safe_regex()->set_regex(
+ "(foo|waterzooi).test.google.(fr|be)");
+ bad_san_1_.set_exact("192.168.1.4");
+ bad_san_2_.set_exact("foo.test.google.in");
+ authenticated_identity_ = {"testclient"};
+ fallback_authenticated_identity_ = {"*.test.google.fr",
+ "waterzooi.test.google.be",
+ "*.test.youtube.com", "192.168.1.3"};
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolutionForLbChannelAllBalancers();
+ }
+
+ void TearDown() override {
+ g_fake1_cert_data_map = nullptr;
+ g_fake2_cert_data_map = nullptr;
+ BasicTest::TearDown();
+ }
+
+ // Sends CDS updates with the new security configuration and verifies that
+ // after propagation, this new configuration is used for connections. If \a
+ // identity_instance_name and \a root_instance_name are both empty,
+ // connections are expected to use fallback credentials.
+ void UpdateAndVerifyXdsSecurityConfiguration(
+ y_absl::string_view root_instance_name,
+ y_absl::string_view root_certificate_name,
+ y_absl::string_view identity_instance_name,
+ y_absl::string_view identity_certificate_name,
+ const std::vector<StringMatcher>& san_matchers,
+ const std::vector<TString>& expected_authenticated_identity,
+ bool test_expects_failure = false) {
+ auto cluster = default_cluster_;
+ if (!identity_instance_name.empty() || !root_instance_name.empty()) {
+ auto* transport_socket = cluster.mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ UpstreamTlsContext upstream_tls_context;
+ if (!identity_instance_name.empty()) {
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_tls_certificate_certificate_provider_instance()
+ ->set_instance_name(TString(identity_instance_name));
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_tls_certificate_certificate_provider_instance()
+ ->set_certificate_name(TString(identity_certificate_name));
+ }
+ if (!root_instance_name.empty()) {
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_validation_context_certificate_provider_instance()
+ ->set_instance_name(TString(root_instance_name));
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_validation_context_certificate_provider_instance()
+ ->set_certificate_name(TString(root_certificate_name));
+ }
+ if (!san_matchers.empty()) {
+ auto* validation_context =
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_default_validation_context();
+ for (const auto& san_matcher : san_matchers) {
+ *validation_context->add_match_subject_alt_names() = san_matcher;
+ }
+ }
+ transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
+ }
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // The updates might take time to have an effect, so use a retry loop.
+ constexpr int kRetryCount = 100;
+ int num_tries = 0;
+ for (; num_tries < kRetryCount; num_tries++) {
+ // Give some time for the updates to propagate.
+ gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100));
+ if (test_expects_failure) {
+ // Restart the servers to force a reconnection so that previously
+ // connected subchannels are not used for the RPC.
+ ShutdownBackend(0);
+ StartBackend(0);
+ if (SendRpc().ok()) {
+ gpr_log(GPR_ERROR, "RPC succeeded. Failure expected. Trying again.");
+ continue;
+ }
+ } else {
+ WaitForBackend(0);
+ Status status = SendRpc();
+ if (!status.ok()) {
+ gpr_log(GPR_ERROR, "RPC failed. code=%d message=%s Trying again.",
+ status.error_code(), status.error_message().c_str());
+ continue;
+ }
+ if (backends_[0]->backend_service()->last_peer_identity() !=
+ expected_authenticated_identity) {
+ gpr_log(
+ GPR_ERROR,
+ "Expected client identity does not match. (actual) %s vs "
+ "(expected) %s Trying again.",
+ y_absl::StrJoin(
+ backends_[0]->backend_service()->last_peer_identity(), ",")
+ .c_str(),
+ y_absl::StrJoin(expected_authenticated_identity, ",").c_str());
+ continue;
+ }
+ }
+ break;
+ }
+ EXPECT_LT(num_tries, kRetryCount);
+ }
+
+ TString root_cert_;
+ TString bad_root_cert_;
+ grpc_core::PemKeyCertPairList identity_pair_;
+ grpc_core::PemKeyCertPairList fallback_identity_pair_;
+ grpc_core::PemKeyCertPairList bad_identity_pair_;
+ StringMatcher server_san_exact_;
+ StringMatcher server_san_prefix_;
+ StringMatcher server_san_suffix_;
+ StringMatcher server_san_contains_;
+ StringMatcher server_san_regex_;
+ StringMatcher bad_san_1_;
+ StringMatcher bad_san_2_;
+ std::vector<TString> authenticated_identity_;
+ std::vector<TString> fallback_authenticated_identity_;
+};
+
+TEST_P(XdsSecurityTest,
+ TLSConfigurationWithoutValidationContextCertificateProviderInstance) {
+ auto cluster = default_cluster_;
+ auto* transport_socket = cluster.mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ CheckRpcSendFailure();
+ const auto response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "TLS configuration provided but no "
+ "validation_context_certificate_provider_instance found."));
+}
+
+TEST_P(
+ XdsSecurityTest,
+ MatchSubjectAltNamesProvidedWithoutValidationContextCertificateProviderInstance) {
+ auto cluster = default_cluster_;
+ auto* transport_socket = cluster.mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ UpstreamTlsContext upstream_tls_context;
+ auto* validation_context = upstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_default_validation_context();
+ *validation_context->add_match_subject_alt_names() = server_san_exact_;
+ transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ CheckRpcSendFailure();
+ const auto response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "TLS configuration provided but no "
+ "validation_context_certificate_provider_instance found."));
+}
+
+TEST_P(
+ XdsSecurityTest,
+ TlsCertificateCertificateProviderInstanceWithoutValidationContextCertificateProviderInstance) {
+ auto cluster = default_cluster_;
+ auto* transport_socket = cluster.mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ UpstreamTlsContext upstream_tls_context;
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_tls_certificate_certificate_provider_instance()
+ ->set_instance_name(TString("instance_name"));
+ transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ CheckRpcSendFailure();
+ const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "LRS ConfigSource is not self.");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "TLS configuration provided but no "
+ "validation_context_certificate_provider_instance found."));
+}
+
+TEST_P(XdsSecurityTest, RegexSanMatcherDoesNotAllowIgnoreCase) {
+ auto cluster = default_cluster_;
+ auto* transport_socket = cluster.mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ UpstreamTlsContext upstream_tls_context;
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_validation_context_certificate_provider_instance()
+ ->set_instance_name(TString("fake_plugin1"));
+ auto* validation_context = upstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_default_validation_context();
+ StringMatcher matcher;
+ matcher.mutable_safe_regex()->mutable_google_re2();
+ matcher.mutable_safe_regex()->set_regex(
+ "(foo|waterzooi).test.google.(fr|be)");
+ matcher.set_ignore_case(true);
+ *validation_context->add_match_subject_alt_names() = matcher;
+ transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ CheckRpcSendFailure();
+ const auto response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "StringMatcher: ignore_case has no effect for SAFE_REGEX."));
+}
+
+TEST_P(XdsSecurityTest, UnknownRootCertificateProvider) {
+ auto cluster = default_cluster_;
+ auto* transport_socket = cluster.mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ UpstreamTlsContext upstream_tls_context;
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_validation_context_certificate_provider_instance()
+ ->set_instance_name("unknown");
+ transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ CheckRpcSendFailure(1, RpcOptions(), StatusCode::UNAVAILABLE);
+}
+
+TEST_P(XdsSecurityTest, UnknownIdentityCertificateProvider) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ auto cluster = default_cluster_;
+ auto* transport_socket = cluster.mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ UpstreamTlsContext upstream_tls_context;
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_tls_certificate_certificate_provider_instance()
+ ->set_instance_name("unknown");
+ upstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_validation_context_certificate_provider_instance()
+ ->set_instance_name("fake_plugin1");
+ transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ CheckRpcSendFailure(1, RpcOptions(), StatusCode::UNAVAILABLE);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithNoSanMatchers) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {}, authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithExactSanMatcher) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithPrefixSanMatcher) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_prefix_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithSuffixSanMatcher) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_suffix_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithContainsSanMatcher) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_contains_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithRegexSanMatcher) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_regex_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithSanMatchersUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration(
+ "fake_plugin1", "", "fake_plugin1", "",
+ {server_san_exact_, server_san_prefix_}, authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {bad_san_1_, bad_san_2_}, {},
+ true /* failure */);
+ UpdateAndVerifyXdsSecurityConfiguration(
+ "fake_plugin1", "", "fake_plugin1", "",
+ {server_san_prefix_, server_san_regex_}, authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithRootPluginUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ FakeCertificateProvider::CertDataMap fake2_cert_map = {
+ {"", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake2_cert_data_map = &fake2_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin2" /* bad root */, "",
+ "fake_plugin1", "", {}, {},
+ true /* failure */);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+ g_fake2_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithIdentityPluginUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ FakeCertificateProvider::CertDataMap fake2_cert_map = {
+ {"", {root_cert_, fallback_identity_pair_}}};
+ g_fake2_cert_data_map = &fake2_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin2",
+ "", {server_san_exact_},
+ fallback_authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+ g_fake2_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithBothPluginsUpdated) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ FakeCertificateProvider::CertDataMap fake2_cert_map = {
+ {"", {bad_root_cert_, bad_identity_pair_}},
+ {"good", {root_cert_, fallback_identity_pair_}}};
+ g_fake2_cert_data_map = &fake2_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin2", "", "fake_plugin2",
+ "", {}, {}, true /* failure */);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_prefix_},
+ authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration(
+ "fake_plugin2", "good", "fake_plugin2", "good", {server_san_prefix_},
+ fallback_authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+ g_fake2_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithRootCertificateNameUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"bad", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_regex_},
+ authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "bad", "fake_plugin1",
+ "", {server_san_regex_}, {},
+ true /* failure */);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest,
+ TestMtlsConfigurationWithIdentityCertificateNameUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"bad", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "bad", {server_san_exact_}, {},
+ true /* failure */);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest,
+ TestMtlsConfigurationWithIdentityCertificateNameUpdateGoodCerts) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"good", {root_cert_, fallback_identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "good", {server_san_exact_},
+ fallback_authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsConfigurationWithBothCertificateNamesUpdated) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"bad", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "bad", "fake_plugin1",
+ "bad", {server_san_prefix_}, {},
+ true /* failure */);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_prefix_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestTlsConfigurationWithNoSanMatchers) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "", {},
+ {} /* unauthenticated */);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestTlsConfigurationWithSanMatchers) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration(
+ "fake_plugin1", "", "", "",
+ {server_san_exact_, server_san_prefix_, server_san_regex_},
+ {} /* unauthenticated */);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestTlsConfigurationWithSanMatchersUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration(
+ "fake_plugin1", "", "", "", {server_san_exact_, server_san_prefix_},
+ {} /* unauthenticated */);
+ UpdateAndVerifyXdsSecurityConfiguration(
+ "fake_plugin1", "", "", "", {bad_san_1_, bad_san_2_},
+ {} /* unauthenticated */, true /* failure */);
+ UpdateAndVerifyXdsSecurityConfiguration(
+ "fake_plugin1", "", "", "", {server_san_prefix_, server_san_regex_},
+ {} /* unauthenticated */);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestTlsConfigurationWithRootCertificateNameUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"bad", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
+ {server_san_exact_},
+ {} /* unauthenticated */);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "bad", "", "",
+ {server_san_exact_}, {},
+ true /* failure */);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestTlsConfigurationWithRootPluginUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ FakeCertificateProvider::CertDataMap fake2_cert_map = {
+ {"", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake2_cert_data_map = &fake2_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
+ {server_san_exact_},
+ {} /* unauthenticated */);
+ UpdateAndVerifyXdsSecurityConfiguration(
+ "fake_plugin2", "", "", "", {server_san_exact_}, {}, true /* failure */);
+ g_fake1_cert_data_map = nullptr;
+ g_fake2_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestFallbackConfiguration) {
+ UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
+ fallback_authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsToTls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
+ {server_san_exact_},
+ {} /* unauthenticated */);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestMtlsToFallback) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
+ fallback_authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestTlsToMtls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
+ {server_san_exact_},
+ {} /* unauthenticated */);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestTlsToFallback) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
+ {server_san_exact_},
+ {} /* unauthenticated */);
+ UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
+ fallback_authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestFallbackToMtls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
+ fallback_authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
+ "", {server_san_exact_},
+ authenticated_identity_);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestFallbackToTls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
+ fallback_authenticated_identity_);
+ UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
+ {server_san_exact_},
+ {} /* unauthenticated */);
+ g_fake1_cert_data_map = nullptr;
+}
+
+TEST_P(XdsSecurityTest, TestFileWatcherCertificateProvider) {
+ UpdateAndVerifyXdsSecurityConfiguration("file_plugin", "", "file_plugin", "",
+ {server_san_exact_},
+ authenticated_identity_);
+}
+
+class XdsEnabledServerTest : public XdsEnd2endTest {
+ protected:
+ XdsEnabledServerTest()
+ : XdsEnd2endTest(1, 1, 100, true /* use_xds_enabled_server */) {}
+
+ void SetUp() override {
+ XdsEnd2endTest::SetUp();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ }
+};
+
+TEST_P(XdsEnabledServerTest, Basic) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ listener.add_filter_chains()->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ WaitForBackend(0);
+}
+
+TEST_P(XdsEnabledServerTest, BadLdsUpdateNoApiListenerNorAddress) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ listener.add_filter_chains()->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Listener has neither address nor ApiListener"));
+}
+
+TEST_P(XdsEnabledServerTest, BadLdsUpdateBothApiListenerAndAddress) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ listener.mutable_api_listener();
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Listener has both address and ApiListener"));
+}
+
+TEST_P(XdsEnabledServerTest, UnsupportedL4Filter) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ listener.add_filter_chains()->add_filters()->mutable_typed_config()->PackFrom(default_listener_ /* any proto object other than HttpConnectionManager */);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("Unsupported filter type"));
+}
+
+TEST_P(XdsEnabledServerTest, UnsupportedHttpFilter) {
+ // Set env var to enable filters parsing.
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ HttpConnectionManager http_connection_manager;
+ auto* http_filter = http_connection_manager.add_http_filters();
+ http_filter->set_name("grpc.testing.unsupported_http_filter");
+ http_filter->mutable_typed_config()->set_type_url(
+ "grpc.testing.unsupported_http_filter");
+ listener.add_filter_chains()->add_filters()->mutable_typed_config()->PackFrom(
+ http_connection_manager);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=[::1]:",
+ backends_[0]->port()));
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("no filter registered for config type "
+ "grpc.testing.unsupported_http_filter"));
+}
+
+TEST_P(XdsEnabledServerTest, HttpFilterNotSupportedOnServer) {
+ // Set env var to enable filters parsing.
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ HttpConnectionManager http_connection_manager;
+ auto* http_filter = http_connection_manager.add_http_filters();
+ http_filter->set_name("grpc.testing.client_only_http_filter");
+ http_filter->mutable_typed_config()->set_type_url(
+ "grpc.testing.client_only_http_filter");
+ listener.add_filter_chains()->add_filters()->mutable_typed_config()->PackFrom(
+ http_connection_manager);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=[::1]:",
+ backends_[0]->port()));
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Filter grpc.testing.client_only_http_filter is not "
+ "supported on servers"));
+}
+
+TEST_P(XdsEnabledServerTest,
+ HttpFilterNotSupportedOnServerIgnoredWhenOptional) {
+ // Set env var to enable filters parsing.
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ HttpConnectionManager http_connection_manager;
+ auto* http_filter = http_connection_manager.add_http_filters();
+ http_filter->set_name("grpc.testing.client_only_http_filter");
+ http_filter->mutable_typed_config()->set_type_url(
+ "grpc.testing.client_only_http_filter");
+ http_filter->set_is_optional(true);
+ listener.add_filter_chains()->add_filters()->mutable_typed_config()->PackFrom(
+ http_connection_manager);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=[::1]:",
+ backends_[0]->port()));
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ WaitForBackend(0);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Verify that a mismatch of listening address results in "not serving" status.
+TEST_P(XdsEnabledServerTest, ListenerAddressMismatch) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ listener.add_filter_chains()->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ WaitForBackend(0);
+ // Set a different listening address in the LDS update
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ "192.168.1.1");
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::FAILED_PRECONDITION);
+}
+
+TEST_P(XdsEnabledServerTest, UseOriginalDstNotSupported) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ listener.add_filter_chains()->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ listener.mutable_use_original_dst()->set_value(true);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Field \'use_original_dst\' is not supported."));
+}
+
+class XdsServerSecurityTest : public XdsEnd2endTest {
+ protected:
+ XdsServerSecurityTest()
+ : XdsEnd2endTest(1, 1, 100, true /* use_xds_enabled_server */) {}
+
+ static void SetUpTestCase() {
+ gpr_setenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", "true");
+ XdsEnd2endTest::SetUpTestCase();
+ }
+
+ static void TearDownTestCase() {
+ XdsEnd2endTest::TearDownTestCase();
+ gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT");
+ }
+
+ void SetUp() override {
+ XdsEnd2endTest::SetUp();
+ root_cert_ = ReadFile(kCaCertPath);
+ bad_root_cert_ = ReadFile(kBadClientCertPath);
+ identity_pair_ = ReadTlsIdentityPair(kServerKeyPath, kServerCertPath);
+ bad_identity_pair_ =
+ ReadTlsIdentityPair(kBadClientKeyPath, kBadClientCertPath);
+ identity_pair_2_ = ReadTlsIdentityPair(kClientKeyPath, kClientCertPath);
+ server_authenticated_identity_ = {"*.test.google.fr",
+ "waterzooi.test.google.be",
+ "*.test.youtube.com", "192.168.1.3"};
+ server_authenticated_identity_2_ = {"testclient"};
+ client_authenticated_identity_ = {"*.test.google.fr",
+ "waterzooi.test.google.be",
+ "*.test.youtube.com", "192.168.1.3"};
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ }
+
+ void TearDown() override {
+ g_fake1_cert_data_map = nullptr;
+ g_fake2_cert_data_map = nullptr;
+ XdsEnd2endTest::TearDown();
+ }
+
+ void SetLdsUpdate(y_absl::string_view root_instance_name,
+ y_absl::string_view root_certificate_name,
+ y_absl::string_view identity_instance_name,
+ y_absl::string_view identity_certificate_name,
+ bool require_client_certificates) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=127.0.0.1:",
+ backends_[0]->port()));
+ listener.mutable_address()->mutable_socket_address()->set_address(
+ "127.0.0.1");
+ listener.mutable_address()->mutable_socket_address()->set_port_value(
+ backends_[0]->port());
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ if (!identity_instance_name.empty()) {
+ auto* transport_socket = filter_chain->mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ DownstreamTlsContext downstream_tls_context;
+ downstream_tls_context.mutable_common_tls_context()
+ ->mutable_tls_certificate_certificate_provider_instance()
+ ->set_instance_name(TString(identity_instance_name));
+ downstream_tls_context.mutable_common_tls_context()
+ ->mutable_tls_certificate_certificate_provider_instance()
+ ->set_certificate_name(TString(identity_certificate_name));
+ if (!root_instance_name.empty()) {
+ downstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_validation_context_certificate_provider_instance()
+ ->set_instance_name(TString(root_instance_name));
+ downstream_tls_context.mutable_common_tls_context()
+ ->mutable_combined_validation_context()
+ ->mutable_validation_context_certificate_provider_instance()
+ ->set_certificate_name(TString(root_certificate_name));
+ downstream_tls_context.mutable_require_client_certificate()->set_value(
+ require_client_certificates);
+ }
+ transport_socket->mutable_typed_config()->PackFrom(
+ downstream_tls_context);
+ }
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=[::1]:",
+ backends_[0]->port()));
+ listener.mutable_address()->mutable_socket_address()->set_address("[::1]");
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ }
+
+ std::shared_ptr<grpc::Channel> CreateMtlsChannel() {
+ ChannelArguments args;
+ // Override target name for host name check
+ args.SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, 1);
+ TString uri = y_absl::StrCat(
+ ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", backends_[0]->port());
+ // TODO(yashykt): Switch to using C++ API once b/173823806 is fixed.
+ grpc_tls_credentials_options* options =
+ grpc_tls_credentials_options_create();
+ grpc_tls_credentials_options_set_server_verification_option(
+ options, GRPC_TLS_SKIP_HOSTNAME_VERIFICATION);
+ grpc_tls_credentials_options_set_certificate_provider(
+ options,
+ grpc_core::MakeRefCounted<grpc_core::StaticDataCertificateProvider>(
+ ReadFile(kCaCertPath),
+ ReadTlsIdentityPair(kServerKeyPath, kServerCertPath))
+ .get());
+ grpc_tls_credentials_options_watch_root_certs(options);
+ grpc_tls_credentials_options_watch_identity_key_cert_pairs(options);
+ grpc_tls_server_authorization_check_config* check_config =
+ grpc_tls_server_authorization_check_config_create(
+ nullptr, ServerAuthCheckSchedule, nullptr, nullptr);
+ grpc_tls_credentials_options_set_server_authorization_check_config(
+ options, check_config);
+ auto channel_creds = std::make_shared<SecureChannelCredentials>(
+ grpc_tls_credentials_create(options));
+ grpc_tls_server_authorization_check_config_release(check_config);
+ return CreateCustomChannel(uri, channel_creds, args);
+ }
+
+ std::shared_ptr<grpc::Channel> CreateTlsChannel() {
+ ChannelArguments args;
+ // Override target name for host name check
+ args.SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, 1);
+ TString uri = y_absl::StrCat(
+ ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", backends_[0]->port());
+ // TODO(yashykt): Switch to using C++ API once b/173823806 is fixed.
+ grpc_tls_credentials_options* options =
+ grpc_tls_credentials_options_create();
+ grpc_tls_credentials_options_set_server_verification_option(
+ options, GRPC_TLS_SKIP_HOSTNAME_VERIFICATION);
+ grpc_tls_credentials_options_set_certificate_provider(
+ options,
+ grpc_core::MakeRefCounted<grpc_core::StaticDataCertificateProvider>(
+ ReadFile(kCaCertPath),
+ ReadTlsIdentityPair(kServerKeyPath, kServerCertPath))
+ .get());
+ grpc_tls_credentials_options_watch_root_certs(options);
+ grpc_tls_server_authorization_check_config* check_config =
+ grpc_tls_server_authorization_check_config_create(
+ nullptr, ServerAuthCheckSchedule, nullptr, nullptr);
+ grpc_tls_credentials_options_set_server_authorization_check_config(
+ options, check_config);
+ auto channel_creds = std::make_shared<SecureChannelCredentials>(
+ grpc_tls_credentials_create(options));
+ grpc_tls_server_authorization_check_config_release(check_config);
+ return CreateCustomChannel(uri, channel_creds, args);
+ }
+
+ std::shared_ptr<grpc::Channel> CreateInsecureChannel() {
+ ChannelArguments args;
+ // Override target name for host name check
+ args.SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
+ ipv6_only_ ? "::1" : "127.0.0.1");
+ args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, 1);
+ TString uri = y_absl::StrCat(
+ ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", backends_[0]->port());
+ return CreateCustomChannel(uri, InsecureChannelCredentials(), args);
+ }
+
+ void SendRpc(std::function<std::shared_ptr<grpc::Channel>()> channel_creator,
+ std::vector<TString> expected_server_identity,
+ std::vector<TString> expected_client_identity,
+ bool test_expects_failure = false) {
+ gpr_log(GPR_INFO, "Sending RPC");
+ int num_tries = 0;
+ constexpr int kRetryCount = 10;
+ for (; num_tries < kRetryCount; num_tries++) {
+ auto channel = channel_creator();
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext context;
+ context.set_wait_for_ready(true);
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(2000));
+ EchoRequest request;
+ request.set_message(kRequestMessage);
+ EchoResponse response;
+ Status status = stub->Echo(&context, request, &response);
+ if (test_expects_failure) {
+ if (status.ok()) {
+ gpr_log(GPR_ERROR, "RPC succeeded. Failure expected. Trying again.");
+ continue;
+ }
+ } else {
+ if (!status.ok()) {
+ gpr_log(GPR_ERROR, "RPC failed. code=%d message=%s Trying again.",
+ status.error_code(), status.error_message().c_str());
+ continue;
+ }
+ EXPECT_EQ(response.message(), kRequestMessage);
+ std::vector<TString> peer_identity;
+ for (const auto& entry : context.auth_context()->GetPeerIdentity()) {
+ peer_identity.emplace_back(
+ TString(entry.data(), entry.size()).c_str());
+ }
+ if (peer_identity != expected_server_identity) {
+ gpr_log(GPR_ERROR,
+ "Expected server identity does not match. (actual) %s vs "
+ "(expected) %s Trying again.",
+ y_absl::StrJoin(peer_identity, ",").c_str(),
+ y_absl::StrJoin(expected_server_identity, ",").c_str());
+ continue;
+ }
+ if (backends_[0]->backend_service()->last_peer_identity() !=
+ expected_client_identity) {
+ gpr_log(
+ GPR_ERROR,
+ "Expected client identity does not match. (actual) %s vs "
+ "(expected) %s Trying again.",
+ y_absl::StrJoin(
+ backends_[0]->backend_service()->last_peer_identity(), ",")
+ .c_str(),
+ y_absl::StrJoin(expected_client_identity, ",").c_str());
+ continue;
+ }
+ }
+ break;
+ }
+ EXPECT_LT(num_tries, kRetryCount);
+ }
+
+ TString root_cert_;
+ TString bad_root_cert_;
+ grpc_core::PemKeyCertPairList identity_pair_;
+ grpc_core::PemKeyCertPairList bad_identity_pair_;
+ grpc_core::PemKeyCertPairList identity_pair_2_;
+ std::vector<TString> server_authenticated_identity_;
+ std::vector<TString> server_authenticated_identity_2_;
+ std::vector<TString> client_authenticated_identity_;
+};
+
+TEST_P(XdsServerSecurityTest, TlsConfigurationWithoutRootProviderInstance) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ auto* transport_socket = filter_chain->mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ DownstreamTlsContext downstream_tls_context;
+ transport_socket->mutable_typed_config()->PackFrom(downstream_tls_context);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ CheckRpcSendFailure(1, RpcOptions().set_wait_for_ready(true));
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr(
+ "TLS configuration provided but no "
+ "tls_certificate_certificate_provider_instance found."));
+}
+
+TEST_P(XdsServerSecurityTest, UnknownIdentityCertificateProvider) {
+ SetLdsUpdate("", "", "unknown", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerSecurityTest, UnknownRootCertificateProvider) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ SetLdsUpdate("unknown", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsWithRootPluginUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ FakeCertificateProvider::CertDataMap fake2_cert_map = {
+ {"", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake2_cert_data_map = &fake2_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+ SetLdsUpdate("fake_plugin2", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsWithIdentityPluginUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ FakeCertificateProvider::CertDataMap fake2_cert_map = {
+ {"", {root_cert_, identity_pair_2_}}};
+ g_fake2_cert_data_map = &fake2_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin2", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_2_, client_authenticated_identity_);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsWithBothPluginsUpdated) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ FakeCertificateProvider::CertDataMap fake2_cert_map = {
+ {"good", {root_cert_, identity_pair_2_}},
+ {"", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake2_cert_data_map = &fake2_cert_map;
+ SetLdsUpdate("fake_plugin2", "", "fake_plugin2", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); }, {}, {},
+ true /* test_expects_failure */);
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+ SetLdsUpdate("fake_plugin2", "good", "fake_plugin2", "good", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_2_, client_authenticated_identity_);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsWithRootCertificateNameUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"bad", {bad_root_cert_, bad_identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+ SetLdsUpdate("fake_plugin1", "bad", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsWithIdentityCertificateNameUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"good", {root_cert_, identity_pair_2_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "good", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_2_, client_authenticated_identity_);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsWithBothCertificateNamesUpdated) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"good", {root_cert_, identity_pair_2_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+ SetLdsUpdate("fake_plugin1", "good", "fake_plugin1", "good", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_2_, client_authenticated_identity_);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsNotRequiringButProvidingClientCerts) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsNotRequiringAndNotProvidingClientCerts) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_, {});
+}
+
+TEST_P(XdsServerSecurityTest, TestTls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_, {});
+}
+
+TEST_P(XdsServerSecurityTest, TestTlsWithIdentityPluginUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ FakeCertificateProvider::CertDataMap fake2_cert_map = {
+ {"", {root_cert_, identity_pair_2_}}};
+ g_fake2_cert_data_map = &fake2_cert_map;
+ SetLdsUpdate("", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_, {});
+ SetLdsUpdate("", "", "fake_plugin2", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_2_, {});
+}
+
+TEST_P(XdsServerSecurityTest, TestTlsWithIdentityCertificateNameUpdate) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}},
+ {"good", {root_cert_, identity_pair_2_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_, {});
+ SetLdsUpdate("", "", "fake_plugin1", "good", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_2_, {});
+}
+
+TEST_P(XdsServerSecurityTest, TestFallback) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("", "", "", "", false);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsToTls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateTlsChannel(); }, {}, {},
+ true /* test_expects_failure */);
+ SetLdsUpdate("", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_, {});
+}
+
+TEST_P(XdsServerSecurityTest, TestTlsToMtls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_, {});
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateTlsChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerSecurityTest, TestMtlsToFallback) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+ SetLdsUpdate("", "", "", "", false);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerSecurityTest, TestFallbackToMtls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("", "", "", "", false);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+ SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
+ SendRpc([this]() { return CreateMtlsChannel(); },
+ server_authenticated_identity_, client_authenticated_identity_);
+}
+
+TEST_P(XdsServerSecurityTest, TestTlsToFallback) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_, {});
+ SetLdsUpdate("", "", "", "", false);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerSecurityTest, TestFallbackToTls) {
+ FakeCertificateProvider::CertDataMap fake1_cert_map = {
+ {"", {root_cert_, identity_pair_}}};
+ g_fake1_cert_data_map = &fake1_cert_map;
+ SetLdsUpdate("", "", "", "", false);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+ SetLdsUpdate("", "", "fake_plugin1", "", false);
+ SendRpc([this]() { return CreateTlsChannel(); },
+ server_authenticated_identity_, {});
+}
+
+class XdsEnabledServerStatusNotificationTest : public XdsServerSecurityTest {
+ protected:
+ void SetValidLdsUpdate() { SetLdsUpdate("", "", "", "", false); }
+
+ void SetInvalidLdsUpdate() {
+ Listener listener;
+ listener.set_name(y_absl::StrCat(
+ "grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ }
+
+ void UnsetLdsUpdate() {
+ balancers_[0]->ads_service()->UnsetResource(
+ kLdsTypeUrl, y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:",
+ backends_[0]->port()));
+ }
+};
+
+TEST_P(XdsEnabledServerStatusNotificationTest, ServingStatus) {
+ SetValidLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::OK);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsEnabledServerStatusNotificationTest, NotServingStatus) {
+ SetInvalidLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::UNAVAILABLE);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsEnabledServerStatusNotificationTest, ErrorUpdateWhenAlreadyServing) {
+ SetValidLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::OK);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+ // Invalid update does not lead to a change in the serving status.
+ SetInvalidLdsUpdate();
+ do {
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::OK);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsEnabledServerStatusNotificationTest,
+ NotServingStatusToServingStatusTransition) {
+ SetInvalidLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::UNAVAILABLE);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+ // Send a valid LDS update to change to serving status
+ SetValidLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::OK);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+// This test verifies that the resource getting deleted when already serving
+// results in future connections being dropped.
+TEST_P(XdsEnabledServerStatusNotificationTest,
+ ServingStatusToNonServingStatusTransition) {
+ SetValidLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::OK);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+ // Deleting the resource should result in a non-serving status.
+ UnsetLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::NOT_FOUND);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsEnabledServerStatusNotificationTest, RepeatedServingStatusChanges) {
+ for (int i = 0; i < 5; i++) {
+ // Send a valid LDS update to get the server to start listening
+ SetValidLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:",
+ backends_[0]->port()),
+ grpc::StatusCode::OK);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+ // Deleting the resource will make the server start rejecting connections
+ UnsetLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:",
+ backends_[0]->port()),
+ grpc::StatusCode::NOT_FOUND);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+ }
+}
+
+TEST_P(XdsEnabledServerStatusNotificationTest, ExistingRpcsOnResourceDeletion) {
+ // Send a valid LDS update to get the server to start listening
+ SetValidLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::OK);
+ constexpr int kNumChannels = 10;
+ struct StreamingRpc {
+ std::shared_ptr<Channel> channel;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub;
+ ClientContext context;
+ std::unique_ptr<ClientWriter<EchoRequest>> writer;
+ } streaming_rpcs[kNumChannels];
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+ for (int i = 0; i < kNumChannels; i++) {
+ streaming_rpcs[i].channel = CreateInsecureChannel();
+ streaming_rpcs[i].stub =
+ grpc::testing::EchoTestService::NewStub(streaming_rpcs[i].channel);
+ streaming_rpcs[i].context.set_wait_for_ready(true);
+ streaming_rpcs[i].writer = streaming_rpcs[i].stub->RequestStream(
+ &streaming_rpcs[i].context, &response);
+ EXPECT_TRUE(streaming_rpcs[i].writer->Write(request));
+ }
+ // Deleting the resource will make the server start rejecting connections
+ UnsetLdsUpdate();
+ backends_[0]->notifier()->WaitOnServingStatusChange(
+ y_absl::StrCat(ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()),
+ grpc::StatusCode::NOT_FOUND);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+ for (int i = 0; i < kNumChannels; i++) {
+ EXPECT_TRUE(streaming_rpcs[i].writer->Write(request));
+ EXPECT_TRUE(streaming_rpcs[i].writer->WritesDone());
+ EXPECT_TRUE(streaming_rpcs[i].writer->Finish().ok());
+ // New RPCs on the existing channels should fail.
+ ClientContext new_context;
+ new_context.set_deadline(grpc_timeout_milliseconds_to_deadline(1000));
+ EXPECT_FALSE(
+ streaming_rpcs[i].stub->Echo(&new_context, request, &response).ok());
+ }
+}
+
+using XdsServerFilterChainMatchTest = XdsServerSecurityTest;
+
+TEST_P(XdsServerFilterChainMatchTest,
+ DefaultFilterChainUsedWhenNoFilterChainMentioned) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ listener.mutable_default_filter_chain()
+ ->add_filters()
+ ->mutable_typed_config()
+ ->PackFrom(HttpConnectionManager());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ DefaultFilterChainUsedWhenOtherFilterChainsDontMatch) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add a filter chain that will never get matched
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()
+ ->mutable_destination_port()
+ ->set_value(8080);
+ // Add default filter chain that should get used
+ listener.mutable_default_filter_chain()
+ ->add_filters()
+ ->mutable_typed_config()
+ ->PackFrom(HttpConnectionManager());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ FilterChainsWithDestinationPortDontMatch) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with destination port that should never get matched
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()
+ ->mutable_destination_port()
+ ->set_value(8080);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // RPC should fail since no matching filter chain was found and no default
+ // filter chain is configured.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerFilterChainMatchTest, FilterChainsWithServerNamesDontMatch) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with server name that should never get matched
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->add_server_names("server_name");
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // RPC should fail since no matching filter chain was found and no default
+ // filter chain is configured.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ FilterChainsWithTransportProtocolsOtherThanRawBufferDontMatch) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with transport protocol "tls" that should never match
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_transport_protocol("tls");
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // RPC should fail since no matching filter chain was found and no default
+ // filter chain is configured.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ FilterChainsWithApplicationProtocolsDontMatch) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with application protocol that should never get matched
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->add_application_protocols("h2");
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // RPC should fail since no matching filter chain was found and no default
+ // filter chain is configured.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {},
+ true /* test_expects_failure */);
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ FilterChainsWithTransportProtocolRawBufferIsPreferred) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with "raw_buffer" transport protocol
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_transport_protocol(
+ "raw_buffer");
+ // Add another filter chain with no transport protocol set but application
+ // protocol set (fails match)
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->add_application_protocols("h2");
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // A successful RPC proves that filter chains that mention "raw_buffer" as the
+ // transport protocol are chosen as the best match in the round.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ FilterChainsWithMoreSpecificDestinationPrefixRangesArePreferred) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with prefix range (length 4 and 16) but with server name
+ // mentioned. (Prefix range is matched first.)
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ auto* prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(4);
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(16);
+ filter_chain->mutable_filter_chain_match()->add_server_names("server_name");
+ // Add filter chain with two prefix ranges (length 8 and 24). Since 24 is the
+ // highest match, it should be chosen.
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(8);
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(24);
+ // Add another filter chain with a non-matching prefix range (with length 30)
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix("192.168.1.1");
+ prefix_range->mutable_prefix_len()->set_value(30);
+ filter_chain->mutable_filter_chain_match()->add_server_names("server_name");
+ // Add another filter chain with no prefix range mentioned
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->add_server_names("server_name");
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // A successful RPC proves that the filter chain with the longest matching
+ // prefix range was the best match.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ FilterChainsThatMentionSourceTypeArePreferred) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with the local source type (best match)
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_source_type(
+ FilterChainMatch::SAME_IP_OR_LOOPBACK);
+ // Add filter chain with the external source type but bad source port.
+ // Note that backends_[0]->port() will never be a match for the source port
+ // because it is already being used by a backend.
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_source_type(
+ FilterChainMatch::EXTERNAL);
+ filter_chain->mutable_filter_chain_match()->add_source_ports(
+ backends_[0]->port());
+ // Add filter chain with the default source type (ANY) but bad source port.
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->add_source_ports(
+ backends_[0]->port());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // A successful RPC proves that the filter chain with the longest matching
+ // prefix range was the best match.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ FilterChainsWithMoreSpecificSourcePrefixRangesArePreferred) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with source prefix range (length 16) but with a bad source
+ // port mentioned. (Prefix range is matched first.)
+ // Note that backends_[0]->port() will never be a match for the source port
+ // because it is already being used by a backend.
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ auto* source_prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ source_prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ source_prefix_range->mutable_prefix_len()->set_value(4);
+ source_prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ source_prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ source_prefix_range->mutable_prefix_len()->set_value(16);
+ filter_chain->mutable_filter_chain_match()->add_source_ports(
+ backends_[0]->port());
+ // Add filter chain with two source prefix ranges (length 8 and 24). Since 24
+ // is the highest match, it should be chosen.
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ source_prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ source_prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ source_prefix_range->mutable_prefix_len()->set_value(8);
+ source_prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ source_prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ source_prefix_range->mutable_prefix_len()->set_value(24);
+ // Add another filter chain with a non-matching source prefix range (with
+ // length 30) and bad source port
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ source_prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ source_prefix_range->set_address_prefix("192.168.1.1");
+ source_prefix_range->mutable_prefix_len()->set_value(30);
+ filter_chain->mutable_filter_chain_match()->add_source_ports(
+ backends_[0]->port());
+ // Add another filter chain with no source prefix range mentioned and bad
+ // source port
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->add_source_ports(
+ backends_[0]->port());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // A successful RPC proves that the filter chain with the longest matching
+ // source prefix range was the best match.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ FilterChainsWithMoreSpecificSourcePortArePreferred) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ // Since we don't know which port will be used by the channel, just add all
+ // ports except for 0.
+ for (int i = 1; i < 65536; i++) {
+ filter_chain->mutable_filter_chain_match()->add_source_ports(i);
+ }
+ // Add another filter chain with no source prefix range mentioned with a bad
+ // DownstreamTlsContext configuration.
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ auto* transport_socket = filter_chain->mutable_transport_socket();
+ transport_socket->set_name("envoy.transport_sockets.tls");
+ DownstreamTlsContext downstream_tls_context;
+ downstream_tls_context.mutable_common_tls_context()
+ ->mutable_tls_certificate_certificate_provider_instance()
+ ->set_instance_name("unknown");
+ transport_socket->mutable_typed_config()->PackFrom(downstream_tls_context);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // A successful RPC proves that the filter chain with matching source port
+ // was chosen.
+ SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
+}
+
+TEST_P(XdsServerFilterChainMatchTest, DuplicateMatchNacked) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ // Add a duplicate filter chain
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "Duplicate matching rules detected when adding filter chain: {}"));
+}
+
+TEST_P(XdsServerFilterChainMatchTest, DuplicateMatchOnPrefixRangesNacked) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with prefix range
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ auto* prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(16);
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(24);
+ // Add a filter chain with a duplicate prefix range entry
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(16);
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(32);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "Duplicate matching rules detected when adding filter chain: "
+ "{prefix_ranges={{address_prefix=127.0.0.0:0, prefix_len=16}, "
+ "{address_prefix=127.0.0.1:0, prefix_len=32}}}"));
+}
+
+TEST_P(XdsServerFilterChainMatchTest, DuplicateMatchOnTransportProtocolNacked) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with "raw_buffer" transport protocol
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_transport_protocol(
+ "raw_buffer");
+ // Add a duplicate filter chain with the same "raw_buffer" transport protocol
+ // entry
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_transport_protocol(
+ "raw_buffer");
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Duplicate matching rules detected when adding "
+ "filter chain: {transport_protocol=raw_buffer}"));
+}
+
+TEST_P(XdsServerFilterChainMatchTest, DuplicateMatchOnLocalSourceTypeNacked) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with the local source type
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_source_type(
+ FilterChainMatch::SAME_IP_OR_LOOPBACK);
+ // Add a duplicate filter chain with the same local source type entry
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_source_type(
+ FilterChainMatch::SAME_IP_OR_LOOPBACK);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Duplicate matching rules detected when adding "
+ "filter chain: {source_type=SAME_IP_OR_LOOPBACK}"));
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ DuplicateMatchOnExternalSourceTypeNacked) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with the external source type
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_source_type(
+ FilterChainMatch::EXTERNAL);
+ // Add a duplicate filter chain with the same external source type entry
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->set_source_type(
+ FilterChainMatch::EXTERNAL);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Duplicate matching rules detected when adding "
+ "filter chain: {source_type=EXTERNAL}"));
+}
+
+TEST_P(XdsServerFilterChainMatchTest,
+ DuplicateMatchOnSourcePrefixRangesNacked) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with source prefix range
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ auto* prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(16);
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(24);
+ // Add a filter chain with a duplicate source prefix range entry
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(16);
+ prefix_range =
+ filter_chain->mutable_filter_chain_match()->add_source_prefix_ranges();
+ prefix_range->set_address_prefix(ipv6_only_ ? "[::1]" : "127.0.0.1");
+ prefix_range->mutable_prefix_len()->set_value(32);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr(
+ "Duplicate matching rules detected when adding filter chain: "
+ "{source_prefix_ranges={{address_prefix=127.0.0.0:0, prefix_len=16}, "
+ "{address_prefix=127.0.0.1:0, prefix_len=32}}}"));
+}
+
+TEST_P(XdsServerFilterChainMatchTest, DuplicateMatchOnSourcePortNacked) {
+ Listener listener;
+ listener.set_name(
+ y_absl::StrCat("grpc/server?xds.resource.listening_address=",
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
+ auto* socket_address = listener.mutable_address()->mutable_socket_address();
+ socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
+ socket_address->set_port_value(backends_[0]->port());
+ // Add filter chain with the external source type
+ auto* filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->add_source_ports(8080);
+ // Add a duplicate filter chain with the same source port entry
+ filter_chain = listener.add_filter_chains();
+ filter_chain->add_filters()->mutable_typed_config()->PackFrom(
+ HttpConnectionManager());
+ filter_chain->mutable_filter_chain_match()->add_source_ports(8080);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ do {
+ CheckRpcSendFailure();
+ } while (balancers_[0]->ads_service()->lds_response_state().state ==
+ AdsServiceImpl::ResponseState::SENT);
+ const auto response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_THAT(
+ response_state.error_message,
+ ::testing::HasSubstr("Duplicate matching rules detected when adding "
+ "filter chain: {source_ports={8080}}"));
}
using EdsTest = BasicTest;
@@ -4433,14 +8684,13 @@ TEST_P(EdsTest, NacksSparsePriorityList) {
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(), kDefaultLocalityWeight, 1},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
CheckRpcSendFailure();
- const auto& response_state =
+ const auto response_state =
balancers_[0]->ads_service()->eds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "EDS update includes sparse priority list");
+ EXPECT_THAT(response_state.error_message,
+ ::testing::HasSubstr("sparse priority list"));
}
// In most of our tests, we use different names for different resource
@@ -4453,8 +8703,8 @@ TEST_P(EdsTest, EdsServiceNameDefaultsToClusterName) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, kDefaultClusterName));
- Cluster cluster = balancers_[0]->ads_service()->default_cluster();
+ BuildEdsResource(args, kDefaultClusterName));
+ Cluster cluster = default_cluster_;
cluster.mutable_eds_cluster_config()->clear_service_name();
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
@@ -4521,7 +8771,7 @@ TEST_P(LocalityMapTest, WeightedRoundRobin) {
{"locality1", GetBackendPorts(1, 2), kLocalityWeight1},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for both backends to be ready.
WaitForAllBackends(0, 2);
// Send kNumRpcs RPCs.
@@ -4555,7 +8805,7 @@ TEST_P(LocalityMapTest, LocalityContainingNoEndpoints) {
{"locality1", {}},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for both backends to be ready.
WaitForAllBackends();
// Send kNumRpcs RPCs.
@@ -4576,7 +8826,7 @@ TEST_P(LocalityMapTest, NoLocalities) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource({}, DefaultEdsServiceName()));
+ BuildEdsResource({}, DefaultEdsServiceName()));
Status status = SendRpc();
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
@@ -4598,15 +8848,14 @@ TEST_P(LocalityMapTest, StressTest) {
args.locality_list.emplace_back(std::move(locality));
}
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// The second ADS response contains 1 locality, which contains backend 1.
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(1, 2)},
});
std::thread delayed_resource_setter(
std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()),
- 60 * 1000));
+ BuildEdsResource(args, DefaultEdsServiceName()), 60 * 1000));
// Wait until backend 0 is ready, before which kNumLocalities localities are
// received and handled by the xds policy.
WaitForBackend(0, /*reset_counters=*/false);
@@ -4628,6 +8877,7 @@ TEST_P(LocalityMapTest, UpdateMap) {
const double kTotalLocalityWeight0 =
std::accumulate(kLocalityWeights0.begin(), kLocalityWeights0.end(), 0);
std::vector<double> locality_weight_rate_0;
+ locality_weight_rate_0.reserve(kLocalityWeights0.size());
for (int weight : kLocalityWeights0) {
locality_weight_rate_0.push_back(weight / kTotalLocalityWeight0);
}
@@ -4647,7 +8897,7 @@ TEST_P(LocalityMapTest, UpdateMap) {
{"locality2", GetBackendPorts(2, 3), 4},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for the first 3 backends to be ready.
WaitForAllBackends(0, 3);
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
@@ -4678,7 +8928,7 @@ TEST_P(LocalityMapTest, UpdateMap) {
{"locality3", GetBackendPorts(3, 4), 6},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Backend 3 hasn't received any request.
EXPECT_EQ(0U, backends_[3]->backend_service()->request_count());
// Wait until the locality update has been processed, as signaled by backend 3
@@ -4718,13 +8968,13 @@ TEST_P(LocalityMapTest, ReplaceAllLocalitiesInPriority) {
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality1", GetBackendPorts(1, 2)},
});
- std::thread delayed_resource_setter(std::bind(
- &BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 5000));
+ std::thread delayed_resource_setter(
+ std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
+ BuildEdsResource(args, DefaultEdsServiceName()), 5000));
// Wait for the first backend to be ready.
WaitForBackend(0);
// Keep sending RPCs until we switch over to backend 1, which tells us
@@ -4753,7 +9003,7 @@ TEST_P(FailoverTest, ChooseHighestPriority) {
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(3, false);
for (size_t i = 0; i < 3; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
@@ -4771,7 +9021,7 @@ TEST_P(FailoverTest, DoesNotUsePriorityWithNoEndpoints) {
{"locality3", {}, kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(0, false);
for (size_t i = 1; i < 3; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
@@ -4787,7 +9037,7 @@ TEST_P(FailoverTest, DoesNotUseLocalityWithNoEndpoints) {
{"locality1", GetBackendPorts(), kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for all backends to be used.
std::tuple<int, int, int> counts = WaitForAllBackends();
// Make sure no RPCs failed in the transition.
@@ -4808,7 +9058,7 @@ TEST_P(FailoverTest, Failover) {
ShutdownBackend(3);
ShutdownBackend(0);
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(1, false);
for (size_t i = 0; i < 4; ++i) {
if (i == 1) continue;
@@ -4828,10 +9078,11 @@ TEST_P(FailoverTest, SwitchBackToHigherPriority) {
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ WaitForBackend(3);
ShutdownBackend(3);
ShutdownBackend(0);
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(1, false);
for (size_t i = 0; i < 4; ++i) {
if (i == 1) continue;
@@ -4853,7 +9104,7 @@ TEST_P(FailoverTest, UpdateInitialUnavailable) {
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 1},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 1},
@@ -4862,9 +9113,9 @@ TEST_P(FailoverTest, UpdateInitialUnavailable) {
});
ShutdownBackend(0);
ShutdownBackend(1);
- std::thread delayed_resource_setter(std::bind(
- &BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
+ std::thread delayed_resource_setter(
+ std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
+ BuildEdsResource(args, DefaultEdsServiceName()), 1000));
gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(500, GPR_TIMESPAN));
// Send 0.5 second worth of RPCs.
@@ -4892,16 +9143,16 @@ TEST_P(FailoverTest, UpdatePriority) {
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 2},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 0},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 1},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 3},
});
- std::thread delayed_resource_setter(std::bind(
- &BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
+ std::thread delayed_resource_setter(
+ std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
+ BuildEdsResource(args, DefaultEdsServiceName()), 1000));
WaitForBackend(3, false);
for (size_t i = 0; i < 3; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
@@ -4925,7 +9176,7 @@ TEST_P(FailoverTest, MoveAllLocalitiesInCurrentPriorityToHigherPriority) {
{"locality1", GetBackendPorts(1, 3), kDefaultLocalityWeight, 1},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Second update:
// - Priority 0 contains both localities 0 and 1.
// - Priority 1 is not present.
@@ -4935,9 +9186,9 @@ TEST_P(FailoverTest, MoveAllLocalitiesInCurrentPriorityToHigherPriority) {
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(1, 4), kDefaultLocalityWeight, 0},
});
- std::thread delayed_resource_setter(std::bind(
- &BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
+ std::thread delayed_resource_setter(
+ std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
+ BuildEdsResource(args, DefaultEdsServiceName()), 1000));
// When we get the first update, all backends in priority 0 are down,
// so we will create priority 1. Backends 1 and 2 should have traffic,
// but backend 3 should not.
@@ -4971,7 +9222,7 @@ TEST_P(DropTest, Vanilla) {
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
@@ -5011,7 +9262,7 @@ TEST_P(DropTest, DropPerHundred) {
args.drop_categories = {{kLbDropType, kDropPerHundredForLb}};
args.drop_denominator = FractionalPercent::HUNDRED;
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
@@ -5050,7 +9301,7 @@ TEST_P(DropTest, DropPerTenThousand) {
args.drop_categories = {{kLbDropType, kDropPerTenThousandForLb}};
args.drop_denominator = FractionalPercent::TEN_THOUSAND;
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
@@ -5092,7 +9343,7 @@ TEST_P(DropTest, Update) {
});
args.drop_categories = {{kLbDropType, kDropPerMillionForLb}};
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
@@ -5123,7 +9374,7 @@ TEST_P(DropTest, Update) {
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the drop rate increases to the middle of the two configs, which
// implies that the update has been in effect.
const double kDropRateThreshold =
@@ -5181,7 +9432,7 @@ TEST_P(DropTest, DropAll) {
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Send kNumRpcs RPCs and all of them are dropped.
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
@@ -5205,12 +9456,12 @@ TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) {
{"locality0", {backends_[0]->port()}},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
balancers_[1]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
@@ -5268,12 +9519,12 @@ TEST_P(BalancerUpdateTest, Repeated) {
{"locality0", {backends_[0]->port()}},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
balancers_[1]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
@@ -5338,12 +9589,12 @@ TEST_P(BalancerUpdateTest, DeadUpdate) {
{"locality0", {backends_[0]->port()}},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
balancers_[1]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
@@ -5413,14 +9664,6 @@ TEST_P(BalancerUpdateTest, DeadUpdate) {
<< balancers_[2]->ads_service()->eds_response_state().error_message;
}
-// The re-resolution tests are deferred because they rely on the fallback mode,
-// which hasn't been supported.
-
-// TODO(juanlishen): Add TEST_P(BalancerUpdateTest, ReresolveDeadBackend).
-
-// TODO(juanlishen): Add TEST_P(UpdatesWithClientLoadReportingTest,
-// ReresolveDeadBalancer)
-
class ClientLoadReportingTest : public XdsEnd2endTest {
public:
ClientLoadReportingTest() : XdsEnd2endTest(4, 1, 3) {}
@@ -5428,7 +9671,7 @@ class ClientLoadReportingTest : public XdsEnd2endTest {
// Tests that the load report received at the balancer is correct.
TEST_P(ClientLoadReportingTest, Vanilla) {
- if (!GetParam().use_xds_resolver()) {
+ if (GetParam().use_fake_resolver()) {
balancers_[0]->lrs_service()->set_cluster_names({kServerName});
}
SetNextResolution({});
@@ -5441,7 +9684,7 @@ TEST_P(ClientLoadReportingTest, Vanilla) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
@@ -5488,7 +9731,7 @@ TEST_P(ClientLoadReportingTest, SendAllClusters) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
@@ -5533,7 +9776,7 @@ TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) {
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
@@ -5558,7 +9801,7 @@ TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) {
// Tests that if the balancer restarts, the client load report contains the
// stats before and after the restart correctly.
TEST_P(ClientLoadReportingTest, BalancerRestart) {
- if (!GetParam().use_xds_resolver()) {
+ if (GetParam().use_fake_resolver()) {
balancers_[0]->lrs_service()->set_cluster_names({kServerName});
}
SetNextResolution({});
@@ -5570,7 +9813,7 @@ TEST_P(ClientLoadReportingTest, BalancerRestart) {
{"locality0", GetBackendPorts(0, kNumBackendsFirstPass)},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends returned by the balancer are ready.
int num_ok = 0;
int num_failure = 0;
@@ -5608,7 +9851,7 @@ TEST_P(ClientLoadReportingTest, BalancerRestart) {
{"locality0", GetBackendPorts(kNumBackendsFirstPass)},
});
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for queries to start going to one of the new backends.
// This tells us that we're now using the new serverlist.
std::tie(num_ok, num_failure, num_drops) =
@@ -5634,7 +9877,7 @@ class ClientLoadReportingWithDropTest : public XdsEnd2endTest {
// Tests that the drop stats are correctly reported by client load reporting.
TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
- if (!GetParam().use_xds_resolver()) {
+ if (GetParam().use_fake_resolver()) {
balancers_[0]->lrs_service()->set_cluster_names({kServerName});
}
SetNextResolution({});
@@ -5653,7 +9896,7 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ BuildEdsResource(args, DefaultEdsServiceName()));
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
@@ -5706,117 +9949,1375 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
kDropRateForThrottle * (1 + kErrorTolerance))));
}
+class FaultInjectionTest : public XdsEnd2endTest {
+ public:
+ FaultInjectionTest() : XdsEnd2endTest(1, 1) {}
+
+ // Builds a Listener with Fault Injection filter config. If the http_fault is
+ // nullptr, then assign an empty filter config. This filter config is required
+ // to enable the fault injection features.
+ static Listener BuildListenerWithFaultInjection(
+ const HTTPFault& http_fault = HTTPFault()) {
+ HttpConnectionManager http_connection_manager;
+ Listener listener;
+ listener.set_name(kServerName);
+ HttpFilter* fault_filter = http_connection_manager.add_http_filters();
+ fault_filter->set_name("envoy.fault");
+ fault_filter->mutable_typed_config()->PackFrom(http_fault);
+ HttpFilter* router_filter = http_connection_manager.add_http_filters();
+ router_filter->set_name("router");
+ router_filter->mutable_typed_config()->PackFrom(
+ envoy::extensions::filters::http::router::v3::Router());
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ return listener;
+ }
+
+ RouteConfiguration BuildRouteConfigurationWithFaultInjection(
+ const HTTPFault& http_fault) {
+ // Package as Any
+ google::protobuf::Any filter_config;
+ filter_config.PackFrom(http_fault);
+ // Plug into the RouteConfiguration
+ RouteConfiguration new_route_config = default_route_config_;
+ auto* config_map = new_route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_typed_per_filter_config();
+ (*config_map)["envoy.fault"] = std::move(filter_config);
+ return new_route_config;
+ }
+
+ void SetFilterConfig(HTTPFault& http_fault) {
+ switch (GetParam().filter_config_setup()) {
+ case TestType::FilterConfigSetup::kRouteOverride: {
+ Listener listener = BuildListenerWithFaultInjection();
+ RouteConfiguration route =
+ BuildRouteConfigurationWithFaultInjection(http_fault);
+ SetListenerAndRouteConfiguration(0, listener, route);
+ break;
+ }
+ case TestType::FilterConfigSetup::kHTTPConnectionManagerOriginal: {
+ Listener listener = BuildListenerWithFaultInjection(http_fault);
+ SetListenerAndRouteConfiguration(0, listener, default_route_config_);
+ }
+ };
+ }
+};
+
+// Test to ensure the most basic fault injection config works.
+TEST_P(FaultInjectionTest, XdsFaultInjectionAlwaysAbort) {
+ const uint32_t kAbortPercentagePerHundred = 100;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ auto* abort_percentage = http_fault.mutable_abort()->mutable_percentage();
+ abort_percentage->set_numerator(kAbortPercentagePerHundred);
+ abort_percentage->set_denominator(FractionalPercent::HUNDRED);
+ http_fault.mutable_abort()->set_grpc_status(
+ static_cast<uint32_t>(StatusCode::ABORTED));
+ // Config fault injection via different setup
+ SetFilterConfig(http_fault);
+ // Fire several RPCs, and expect all of them to be aborted.
+ CheckRpcSendFailure(5, RpcOptions().set_wait_for_ready(true),
+ StatusCode::ABORTED);
+}
+
+// Without the listener config, the fault injection won't be enabled.
+TEST_P(FaultInjectionTest, XdsFaultInjectionWithoutListenerFilter) {
+ const uint32_t kAbortPercentagePerHundred = 100;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create an EDS resource
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ auto* abort_percentage = http_fault.mutable_abort()->mutable_percentage();
+ abort_percentage->set_numerator(kAbortPercentagePerHundred);
+ abort_percentage->set_denominator(FractionalPercent::HUNDRED);
+ http_fault.mutable_abort()->set_grpc_status(
+ static_cast<uint32_t>(StatusCode::ABORTED));
+ // Turn on fault injection
+ RouteConfiguration route =
+ BuildRouteConfigurationWithFaultInjection(http_fault);
+ SetListenerAndRouteConfiguration(0, default_listener_, route);
+ // Fire several RPCs, and expect all of them to be pass.
+ CheckRpcSendOk(5, RpcOptions().set_wait_for_ready(true));
+}
+
+TEST_P(FaultInjectionTest, XdsFaultInjectionPercentageAbort) {
+ const size_t kNumRpcs = 100;
+ const uint32_t kAbortPercentagePerHundred = 50;
+ const double kAbortRate = kAbortPercentagePerHundred / 100.0;
+ const double kErrorTolerance = 0.2;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create an EDS resource
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ auto* abort_percentage = http_fault.mutable_abort()->mutable_percentage();
+ abort_percentage->set_numerator(kAbortPercentagePerHundred);
+ abort_percentage->set_denominator(FractionalPercent::HUNDRED);
+ http_fault.mutable_abort()->set_grpc_status(
+ static_cast<uint32_t>(StatusCode::ABORTED));
+ // Config fault injection via different setup
+ SetFilterConfig(http_fault);
+ // Send kNumRpcs RPCs and count the aborts.
+ int num_total = 0, num_ok = 0, num_failure = 0, num_aborted = 0;
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_aborted,
+ RpcOptions(), "Fault injected");
+ }
+ EXPECT_EQ(kNumRpcs, num_total);
+ EXPECT_EQ(0, num_failure);
+ // The abort rate should be roughly equal to the expectation.
+ const double seen_abort_rate = static_cast<double>(num_aborted) / kNumRpcs;
+ EXPECT_THAT(seen_abort_rate,
+ ::testing::AllOf(::testing::Ge(kAbortRate - kErrorTolerance),
+ ::testing::Le(kAbortRate + kErrorTolerance)));
+}
+
+TEST_P(FaultInjectionTest, XdsFaultInjectionPercentageAbortViaHeaders) {
+ const size_t kNumRpcs = 100;
+ const uint32_t kAbortPercentageCap = 100;
+ const uint32_t kAbortPercentage = 50;
+ const double kAbortRate = kAbortPercentage / 100.0;
+ const double kErrorTolerance = 0.2;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create an EDS resource
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ http_fault.mutable_abort()->mutable_header_abort();
+ http_fault.mutable_abort()->mutable_percentage()->set_numerator(
+ kAbortPercentageCap);
+ // Config fault injection via different setup
+ SetFilterConfig(http_fault);
+ // Send kNumRpcs RPCs and count the aborts.
+ std::vector<std::pair<TString, TString>> metadata = {
+ {"x-envoy-fault-abort-grpc-request", "10"},
+ {"x-envoy-fault-abort-percentage", ToString(kAbortPercentage)},
+ };
+ int num_total = 0, num_ok = 0, num_failure = 0, num_aborted = 0;
+ RpcOptions options = RpcOptions().set_metadata(metadata);
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_aborted, options,
+ "Fault injected");
+ }
+ EXPECT_EQ(kNumRpcs, num_total);
+ EXPECT_EQ(0, num_failure);
+ // The abort rate should be roughly equal to the expectation.
+ const double seen_abort_rate = static_cast<double>(num_aborted) / kNumRpcs;
+ EXPECT_THAT(seen_abort_rate,
+ ::testing::AllOf(::testing::Ge(kAbortRate - kErrorTolerance),
+ ::testing::Le(kAbortRate + kErrorTolerance)));
+}
+
+// TODO(lidiz) reduce the error tolerance to a lower level without dramatically
+// increase the duration of fault injection tests.
+TEST_P(FaultInjectionTest, XdsFaultInjectionPercentageDelay) {
+ const size_t kNumRpcs = 100;
+ const uint32_t kFixedDelaySeconds = 100;
+ const uint32_t kRpcTimeoutMilliseconds = 10; // 10 ms
+ const uint32_t kDelayPercentagePerHundred = 95;
+ const double kDelayRate = kDelayPercentagePerHundred / 100.0;
+ const double kErrorTolerance = 0.2;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create an EDS resource
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ auto* delay_percentage = http_fault.mutable_delay()->mutable_percentage();
+ delay_percentage->set_numerator(kDelayPercentagePerHundred);
+ delay_percentage->set_denominator(FractionalPercent::HUNDRED);
+ auto* fixed_delay = http_fault.mutable_delay()->mutable_fixed_delay();
+ fixed_delay->set_seconds(kFixedDelaySeconds);
+ // Config fault injection via different setup
+ SetFilterConfig(http_fault);
+ // Send kNumRpcs RPCs and count the delays.
+ int num_total = 0, num_ok = 0, num_delayed = 0, num_dropped = 0;
+ RpcOptions options = RpcOptions()
+ .set_timeout_ms(kRpcTimeoutMilliseconds)
+ .set_skip_cancelled_check(true);
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ SendRpcAndCount(&num_total, &num_ok, &num_delayed, &num_dropped, options);
+ }
+ EXPECT_EQ(kNumRpcs, num_total);
+ EXPECT_EQ(0, num_dropped);
+ // The delay rate should be roughly equal to the expectation.
+ const double seen_delay_rate = static_cast<double>(num_delayed) / kNumRpcs;
+ EXPECT_THAT(seen_delay_rate,
+ ::testing::AllOf(::testing::Ge(kDelayRate - kErrorTolerance),
+ ::testing::Le(kDelayRate + kErrorTolerance)));
+}
+
+TEST_P(FaultInjectionTest, XdsFaultInjectionPercentageDelayViaHeaders) {
+ const size_t kNumRpcs = 100;
+ const uint32_t kFixedDelayMilliseconds = 100000; // 100 seconds
+ const uint32_t kRpcTimeoutMilliseconds = 10; // 10 ms
+ const uint32_t kDelayPercentageCap = 100;
+ const uint32_t kDelayPercentage = 50;
+ const double kDelayRate = kDelayPercentage / 100.0;
+ const double kErrorTolerance = 0.2;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create an EDS resource
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ http_fault.mutable_delay()->mutable_header_delay();
+ http_fault.mutable_delay()->mutable_percentage()->set_numerator(
+ kDelayPercentageCap);
+ // Config fault injection via different setup
+ SetFilterConfig(http_fault);
+ // Send kNumRpcs RPCs and count the delays.
+ std::vector<std::pair<TString, TString>> metadata = {
+ {"x-envoy-fault-delay-request", ToString(kFixedDelayMilliseconds)},
+ {"x-envoy-fault-delay-request-percentage",
+ ToString(kDelayPercentage)},
+ };
+ int num_total = 0, num_ok = 0, num_delayed = 0, num_dropped = 0;
+ RpcOptions options = RpcOptions()
+ .set_metadata(metadata)
+ .set_timeout_ms(kRpcTimeoutMilliseconds)
+ .set_skip_cancelled_check(true);
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ SendRpcAndCount(&num_total, &num_ok, &num_delayed, &num_dropped, options);
+ }
+ // The delay rate should be roughly equal to the expectation.
+ const double seen_delay_rate = static_cast<double>(num_delayed) / kNumRpcs;
+ EXPECT_THAT(seen_delay_rate,
+ ::testing::AllOf(::testing::Ge(kDelayRate - kErrorTolerance),
+ ::testing::Le(kDelayRate + kErrorTolerance)));
+}
+
+TEST_P(FaultInjectionTest, XdsFaultInjectionAlwaysDelayPercentageAbort) {
+ const size_t kNumRpcs = 100;
+ const uint32_t kAbortPercentagePerHundred = 50;
+ const double kAbortRate = kAbortPercentagePerHundred / 100.0;
+ const uint32_t kFixedDelayNanos = 10 * 1000 * 1000; // 10 ms
+ const double kErrorTolerance = 0.2;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create an EDS resource
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ auto* abort_percentage = http_fault.mutable_abort()->mutable_percentage();
+ abort_percentage->set_numerator(kAbortPercentagePerHundred);
+ abort_percentage->set_denominator(FractionalPercent::HUNDRED);
+ http_fault.mutable_abort()->set_grpc_status(
+ static_cast<uint32_t>(StatusCode::ABORTED));
+ auto* delay_percentage = http_fault.mutable_delay()->mutable_percentage();
+ delay_percentage->set_numerator(1000000); // Always inject DELAY!
+ delay_percentage->set_denominator(FractionalPercent::MILLION);
+ auto* fixed_delay = http_fault.mutable_delay()->mutable_fixed_delay();
+ fixed_delay->set_nanos(kFixedDelayNanos);
+ // Config fault injection via different setup
+ SetFilterConfig(http_fault);
+ // Send kNumRpcs RPCs and count the aborts.
+ int num_total = 0, num_ok = 0, num_failure = 0, num_aborted = 0;
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ grpc_millis t0 = NowFromCycleCounter();
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_aborted,
+ RpcOptions(), "Fault injected");
+ grpc_millis t1 = NowFromCycleCounter();
+ EXPECT_GE(t1, t0 + kFixedDelayNanos / 1000 / 1000);
+ }
+ EXPECT_EQ(kNumRpcs, num_total);
+ EXPECT_EQ(0, num_failure);
+ // The abort rate should be roughly equal to the expectation.
+ const double seen_abort_rate = static_cast<double>(num_aborted) / kNumRpcs;
+ EXPECT_THAT(seen_abort_rate,
+ ::testing::AllOf(::testing::Ge(kAbortRate - kErrorTolerance),
+ ::testing::Le(kAbortRate + kErrorTolerance)));
+}
+
+// This test and the above test apply different denominators to delay and abort.
+// This ensures that we are using the right denominator for each injected fault
+// in our code.
+TEST_P(FaultInjectionTest,
+ XdsFaultInjectionAlwaysDelayPercentageAbortSwitchDenominator) {
+ const size_t kNumRpcs = 100;
+ const uint32_t kAbortPercentagePerMillion = 500000;
+ const double kAbortRate = kAbortPercentagePerMillion / 1000000.0;
+ const uint32_t kFixedDelayNanos = 10 * 1000 * 1000; // 10 ms
+ const double kErrorTolerance = 0.2;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create an EDS resource
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ auto* abort_percentage = http_fault.mutable_abort()->mutable_percentage();
+ abort_percentage->set_numerator(kAbortPercentagePerMillion);
+ abort_percentage->set_denominator(FractionalPercent::MILLION);
+ http_fault.mutable_abort()->set_grpc_status(
+ static_cast<uint32_t>(StatusCode::ABORTED));
+ auto* delay_percentage = http_fault.mutable_delay()->mutable_percentage();
+ delay_percentage->set_numerator(100); // Always inject DELAY!
+ delay_percentage->set_denominator(FractionalPercent::HUNDRED);
+ auto* fixed_delay = http_fault.mutable_delay()->mutable_fixed_delay();
+ fixed_delay->set_nanos(kFixedDelayNanos);
+ // Config fault injection via different setup
+ SetFilterConfig(http_fault);
+ // Send kNumRpcs RPCs and count the aborts.
+ int num_total = 0, num_ok = 0, num_failure = 0, num_aborted = 0;
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ grpc_millis t0 = NowFromCycleCounter();
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_aborted,
+ RpcOptions(), "Fault injected");
+ grpc_millis t1 = NowFromCycleCounter();
+ EXPECT_GE(t1, t0 + kFixedDelayNanos / 1000 / 1000);
+ }
+ EXPECT_EQ(kNumRpcs, num_total);
+ EXPECT_EQ(0, num_failure);
+ // The abort rate should be roughly equal to the expectation.
+ const double seen_abort_rate = static_cast<double>(num_aborted) / kNumRpcs;
+ EXPECT_THAT(seen_abort_rate,
+ ::testing::AllOf(::testing::Ge(kAbortRate - kErrorTolerance),
+ ::testing::Le(kAbortRate + kErrorTolerance)));
+}
+
+TEST_P(FaultInjectionTest, XdsFaultInjectionMaxFault) {
+ const uint32_t kMaxFault = 10;
+ const uint32_t kNumRpcs = 30; // kNumRpcs should be bigger than kMaxFault
+ const uint32_t kRpcTimeoutMs = 2000; // 2 seconds
+ const uint32_t kLongDelaySeconds = 100; // 100 seconds
+ const uint32_t kAlwaysDelayPercentage = 100;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create an EDS resource
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Construct the fault injection filter config
+ HTTPFault http_fault;
+ auto* delay_percentage = http_fault.mutable_delay()->mutable_percentage();
+ delay_percentage->set_numerator(
+ kAlwaysDelayPercentage); // Always inject DELAY!
+ delay_percentage->set_denominator(FractionalPercent::HUNDRED);
+ auto* fixed_delay = http_fault.mutable_delay()->mutable_fixed_delay();
+ fixed_delay->set_seconds(kLongDelaySeconds);
+ http_fault.mutable_max_active_faults()->set_value(kMaxFault);
+ // Config fault injection via different setup
+ SetFilterConfig(http_fault);
+ // Sends a batch of long running RPCs with long timeout to consume all
+ // active faults quota.
+ int num_ok = 0, num_delayed = 0;
+ LongRunningRpc rpcs[kNumRpcs];
+ RpcOptions rpc_options = RpcOptions().set_timeout_ms(kRpcTimeoutMs);
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ rpcs[i].StartRpc(stub_.get(), rpc_options);
+ }
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ Status status = rpcs[i].GetStatus();
+ if (status.ok()) {
+ ++num_ok;
+ } else {
+ EXPECT_EQ(StatusCode::DEADLINE_EXCEEDED, status.error_code());
+ ++num_delayed;
+ }
+ }
+ // Only kMaxFault number of RPC should be fault injected..
+ EXPECT_EQ(kMaxFault, num_delayed);
+ // Other RPCs should be ok.
+ EXPECT_EQ(kNumRpcs - kMaxFault, num_ok);
+}
+
+class BootstrapContentsFromEnvVarTest : public XdsEnd2endTest {
+ public:
+ BootstrapContentsFromEnvVarTest() : XdsEnd2endTest(4, 1, 100, false, true) {}
+};
+
+TEST_P(BootstrapContentsFromEnvVarTest, Vanilla) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ WaitForAllBackends();
+}
+
+#ifndef DISABLED_XDS_PROTO_IN_CC
+class ClientStatusDiscoveryServiceTest : public XdsEnd2endTest {
+ public:
+ ClientStatusDiscoveryServiceTest() : XdsEnd2endTest(1, 1) {}
+
+ void SetUp() override {
+ XdsEnd2endTest::SetUp();
+ admin_server_thread_ = y_absl::make_unique<AdminServerThread>();
+ admin_server_thread_->Start();
+ TString admin_server_address = y_absl::StrCat(
+ ipv6_only_ ? "[::1]:" : "127.0.0.1:", admin_server_thread_->port());
+ admin_channel_ = grpc::CreateChannel(
+ admin_server_address,
+ std::make_shared<SecureChannelCredentials>(
+ grpc_fake_transport_security_credentials_create()));
+ csds_stub_ =
+ envoy::service::status::v3::ClientStatusDiscoveryService::NewStub(
+ admin_channel_);
+ if (GetParam().use_csds_streaming()) {
+ stream_ = csds_stub_->StreamClientStatus(&stream_context_);
+ }
+ }
+
+ void TearDown() override {
+ if (stream_ != nullptr) {
+ EXPECT_TRUE(stream_->WritesDone());
+ Status status = stream_->Finish();
+ EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
+ << " message=" << status.error_message();
+ }
+ admin_server_thread_->Shutdown();
+ XdsEnd2endTest::TearDown();
+ }
+
+ envoy::service::status::v3::ClientStatusResponse FetchCsdsResponse() {
+ envoy::service::status::v3::ClientStatusResponse response;
+ if (!GetParam().use_csds_streaming()) {
+ // Fetch through unary pulls
+ ClientContext context;
+ Status status = csds_stub_->FetchClientStatus(
+ &context, envoy::service::status::v3::ClientStatusRequest(),
+ &response);
+ EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
+ << " message=" << status.error_message();
+ } else {
+ // Fetch through streaming pulls
+ EXPECT_TRUE(
+ stream_->Write(envoy::service::status::v3::ClientStatusRequest()));
+ EXPECT_TRUE(stream_->Read(&response));
+ }
+ return response;
+ }
+
+ private:
+ std::unique_ptr<AdminServerThread> admin_server_thread_;
+ std::shared_ptr<Channel> admin_channel_;
+ std::unique_ptr<
+ envoy::service::status::v3::ClientStatusDiscoveryService::Stub>
+ csds_stub_;
+ ClientContext stream_context_;
+ std::unique_ptr<
+ ClientReaderWriter<envoy::service::status::v3::ClientStatusRequest,
+ envoy::service::status::v3::ClientStatusResponse>>
+ stream_;
+};
+
+MATCHER_P4(EqNode, id, user_agent_name, user_agent_version, client_features,
+ "equals Node") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(id, arg.id(), result_listener);
+ ok &= ::testing::ExplainMatchResult(user_agent_name, arg.user_agent_name(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(
+ user_agent_version, arg.user_agent_version(), result_listener);
+ ok &= ::testing::ExplainMatchResult(client_features, arg.client_features(),
+ result_listener);
+ return ok;
+}
+
+MATCHER_P2(EqListenersConfigDump, version_info, dynamic_listeners,
+ "equals ListenerConfigDump") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(::testing::ElementsAre(),
+ arg.static_listeners(), result_listener);
+ ok &= ::testing::ExplainMatchResult(version_info, arg.version_info(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(dynamic_listeners,
+ arg.dynamic_listeners(), result_listener);
+ return ok;
+}
+
+MATCHER_P2(EqDynamicListenerState, version_info, listener,
+ "equals DynamicListenerState") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(version_info, arg.version_info(),
+ result_listener);
+ ok &=
+ ::testing::ExplainMatchResult(listener, arg.listener(), result_listener);
+ return ok;
+}
+
+MATCHER_P2(EqListener, name, api_listener, "equals Listener") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(name, arg.name(), result_listener);
+ ok &= ::testing::ExplainMatchResult(
+ api_listener, arg.api_listener().api_listener(), result_listener);
+ return ok;
+}
+
+MATCHER_P(EqHttpConnectionManagerNotRds, route_config,
+ "equals HttpConnectionManager") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(route_config, arg.route_config(),
+ result_listener);
+ return ok;
+}
+
+MATCHER_P(EqRouteConfigurationName, name, "equals RouteConfiguration") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(name, arg.name(), result_listener);
+ return ok;
+}
+
+MATCHER_P2(EqRouteConfiguration, name, cluster_name,
+ "equals RouteConfiguration") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(name, arg.name(), result_listener);
+ ok &= ::testing::ExplainMatchResult(
+ ::testing::ElementsAre(::testing::Property(
+ &envoy::config::route::v3::VirtualHost::routes,
+ ::testing::ElementsAre(::testing::Property(
+ &envoy::config::route::v3::Route::route,
+ ::testing::Property(
+ &envoy::config::route::v3::RouteAction::cluster,
+ cluster_name))))),
+ arg.virtual_hosts(), result_listener);
+ return ok;
+}
+
+MATCHER_P(EqRoutesConfigDump, dynamic_route_configs,
+ "equals RoutesConfigDump") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(
+ ::testing::ElementsAre(), arg.static_route_configs(), result_listener);
+ ok &= ::testing::ExplainMatchResult(
+ dynamic_route_configs, arg.dynamic_route_configs(), result_listener);
+ return ok;
+}
+
+MATCHER_P2(EqClustersConfigDump, version_info, dynamic_active_clusters,
+ "equals ClustersConfigDump") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(::testing::ElementsAre(),
+ arg.static_clusters(), result_listener);
+ ok &= ::testing::ExplainMatchResult(::testing::ElementsAre(),
+ arg.dynamic_warming_clusters(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(version_info, arg.version_info(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(
+ dynamic_active_clusters, arg.dynamic_active_clusters(), result_listener);
+ return ok;
+}
+
+MATCHER_P(EqCluster, name, "equals Cluster") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(name, arg.name(), result_listener);
+ return ok;
+}
+
+MATCHER_P(EqEndpointsConfigDump, dynamic_endpoint_configs,
+ "equals EndpointsConfigDump") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(dynamic_endpoint_configs,
+ arg.dynamic_endpoint_configs(),
+ result_listener);
+ return ok;
+}
+
+MATCHER_P(EqEndpoint, port, "equals Endpoint") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(
+ port, arg.address().socket_address().port_value(), result_listener);
+ return ok;
+}
+
+MATCHER_P2(EqLocalityLbEndpoints, port, weight, "equals LocalityLbEndpoints") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(
+ ::testing::ElementsAre(::testing::Property(
+ &envoy::config::endpoint::v3::LbEndpoint::endpoint,
+ EqEndpoint(port))),
+ arg.lb_endpoints(), result_listener);
+ ok &= ::testing::ExplainMatchResult(
+ weight, arg.load_balancing_weight().value(), result_listener);
+ return ok;
+}
+
+MATCHER_P(EqClusterLoadAssignmentName, cluster_name,
+ "equals ClusterLoadAssignment") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(cluster_name, arg.cluster_name(),
+ result_listener);
+ return ok;
+}
+
+MATCHER_P3(EqClusterLoadAssignment, cluster_name, port, weight,
+ "equals ClusterLoadAssignment") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(cluster_name, arg.cluster_name(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(
+ ::testing::ElementsAre(EqLocalityLbEndpoints(port, weight)),
+ arg.endpoints(), result_listener);
+ return ok;
+}
+
+MATCHER_P2(EqUpdateFailureState, details, version_info,
+ "equals UpdateFailureState") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(details, arg.details(), result_listener);
+ ok &= ::testing::ExplainMatchResult(version_info, arg.version_info(),
+ result_listener);
+ return ok;
+}
+
+MATCHER_P(UnpackListener, matcher, "is a Listener") {
+ Listener config;
+ if (!::testing::ExplainMatchResult(true, arg.UnpackTo(&config),
+ result_listener)) {
+ return false;
+ }
+ return ::testing::ExplainMatchResult(matcher, config, result_listener);
+}
+
+MATCHER_P(UnpackRouteConfiguration, matcher, "is a RouteConfiguration") {
+ RouteConfiguration config;
+ if (!::testing::ExplainMatchResult(true, arg.UnpackTo(&config),
+ result_listener)) {
+ return false;
+ }
+ return ::testing::ExplainMatchResult(matcher, config, result_listener);
+}
+
+MATCHER_P(UnpackHttpConnectionManager, matcher, "is a HttpConnectionManager") {
+ HttpConnectionManager config;
+ if (!::testing::ExplainMatchResult(true, arg.UnpackTo(&config),
+ result_listener)) {
+ return false;
+ }
+ return ::testing::ExplainMatchResult(matcher, config, result_listener);
+}
+
+MATCHER_P(UnpackCluster, matcher, "is a Cluster") {
+ Cluster config;
+ if (!::testing::ExplainMatchResult(true, arg.UnpackTo(&config),
+ result_listener)) {
+ return false;
+ }
+ return ::testing::ExplainMatchResult(matcher, config, result_listener);
+}
+
+MATCHER_P(UnpackClusterLoadAssignment, matcher, "is a ClusterLoadAssignment") {
+ ClusterLoadAssignment config;
+ if (!::testing::ExplainMatchResult(true, arg.UnpackTo(&config),
+ result_listener)) {
+ return false;
+ }
+ return ::testing::ExplainMatchResult(matcher, config, result_listener);
+}
+
+MATCHER_P5(EqDynamicListener, name, version_info, client_status,
+ api_listener_matcher, error_state, "equals DynamicListener") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(false, arg.has_warming_state(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(false, arg.has_draining_state(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(name, arg.name(), result_listener);
+ ok &= ::testing::ExplainMatchResult(client_status, arg.client_status(),
+ result_listener);
+ if (client_status == ClientResourceStatus::ACKED ||
+ client_status == ClientResourceStatus::NACKED) {
+ ok &= ::testing::ExplainMatchResult(
+ EqDynamicListenerState(version_info, UnpackListener(EqListener(
+ name, api_listener_matcher))),
+ arg.active_state(), result_listener);
+ }
+ ok &= ::testing::ExplainMatchResult(error_state, arg.error_state(),
+ result_listener);
+ return ok;
+}
+
+MATCHER_P5(EqDynamicRouteConfig, name, version_info, client_status,
+ cluster_name, error_state, "equals DynamicRouteConfig") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(version_info, arg.version_info(),
+ result_listener);
+ if (client_status == ClientResourceStatus::REQUESTED ||
+ client_status == ClientResourceStatus::DOES_NOT_EXIST) {
+ ok &= ::testing::ExplainMatchResult(
+ UnpackRouteConfiguration(EqRouteConfigurationName(name)),
+ arg.route_config(), result_listener);
+ } else {
+ ok &= ::testing::ExplainMatchResult(
+ UnpackRouteConfiguration(EqRouteConfiguration(name, cluster_name)),
+ arg.route_config(), result_listener);
+ }
+ ok &= ::testing::ExplainMatchResult(error_state, arg.error_state(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(client_status, arg.client_status(),
+ result_listener);
+ return ok;
+}
+
+MATCHER_P4(EqDynamicCluster, name, version_info, client_status, error_state,
+ "equals DynamicCluster") {
+ bool ok = true;
+ ok &= ::testing::ExplainMatchResult(UnpackCluster(EqCluster(name)),
+ arg.cluster(), result_listener);
+ ok &= ::testing::ExplainMatchResult(version_info, arg.version_info(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(client_status, arg.client_status(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(error_state, arg.error_state(),
+ result_listener);
+ return ok;
+}
+
+MATCHER_P6(EqDynamicEndpointConfig, name, version_info, client_status, port,
+ weight, error_state, "equals DynamicEndpointConfig") {
+ bool ok = true;
+ if (client_status == ClientResourceStatus::REQUESTED ||
+ client_status == ClientResourceStatus::DOES_NOT_EXIST) {
+ ok &= ::testing::ExplainMatchResult(
+ UnpackClusterLoadAssignment(EqClusterLoadAssignmentName(name)),
+ arg.endpoint_config(), result_listener);
+ } else {
+ ok &= ::testing::ExplainMatchResult(
+ UnpackClusterLoadAssignment(
+ EqClusterLoadAssignment(name, port, weight)),
+ arg.endpoint_config(), result_listener);
+ }
+ ok &= ::testing::ExplainMatchResult(version_info, arg.version_info(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(client_status, arg.client_status(),
+ result_listener);
+ ok &= ::testing::ExplainMatchResult(error_state, arg.error_state(),
+ result_listener);
+ return ok;
+}
+
+MATCHER(IsRdsEnabledHCM, "is a RDS enabled HttpConnectionManager") {
+ return ::testing::ExplainMatchResult(
+ UnpackHttpConnectionManager(
+ ::testing::Property(&HttpConnectionManager::has_rds, true)),
+ arg, result_listener);
+}
+
+MATCHER_P2(EqNoRdsHCM, route_configuration_name, cluster_name,
+ "equals RDS disabled HttpConnectionManager") {
+ return ::testing::ExplainMatchResult(
+ UnpackHttpConnectionManager(EqHttpConnectionManagerNotRds(
+ EqRouteConfiguration(route_configuration_name, cluster_name))),
+ arg, result_listener);
+}
+
+TEST_P(ClientStatusDiscoveryServiceTest, XdsConfigDumpVanilla) {
+ const size_t kNumRpcs = 5;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {backends_[0]->port()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Send several RPCs to ensure the xDS setup works
+ CheckRpcSendOk(kNumRpcs);
+ // Fetches the client config
+ auto csds_response = FetchCsdsResponse();
+ gpr_log(GPR_INFO, "xDS config dump: %s", csds_response.DebugString().c_str());
+ EXPECT_EQ(1, csds_response.config_size());
+ const auto& client_config = csds_response.config(0);
+ // Validate the Node information
+ EXPECT_THAT(client_config.node(),
+ EqNode("xds_end2end_test", ::testing::HasSubstr("C-core"),
+ ::testing::HasSubstr(grpc_version_string()),
+ ::testing::ElementsAre(
+ "envoy.lb.does_not_support_overprovisioning")));
+ // Prepare matches for RDS on or off
+ ::testing::Matcher<google::protobuf::Any> api_listener_matcher;
+ ::testing::Matcher<envoy::admin::v3::RoutesConfigDump>
+ route_config_dump_matcher;
+ if (GetParam().enable_rds_testing()) {
+ api_listener_matcher = IsRdsEnabledHCM();
+ route_config_dump_matcher =
+ EqRoutesConfigDump(::testing::ElementsAre(EqDynamicRouteConfig(
+ kDefaultRouteConfigurationName, "1", ClientResourceStatus::ACKED,
+ kDefaultClusterName, ::testing::_)));
+ } else {
+ api_listener_matcher =
+ EqNoRdsHCM(kDefaultRouteConfigurationName, kDefaultClusterName);
+ route_config_dump_matcher = EqRoutesConfigDump(::testing::ElementsAre());
+ }
+ // Validate the dumped xDS configs
+ EXPECT_THAT(
+ client_config.xds_config(),
+ ::testing::UnorderedElementsAre(
+ ::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::listener_config,
+ EqListenersConfigDump(
+ "1", ::testing::ElementsAre(EqDynamicListener(
+ kServerName, "1", ClientResourceStatus::ACKED,
+ api_listener_matcher, ::testing::_)))),
+ ::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::route_config,
+ route_config_dump_matcher),
+ ::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::cluster_config,
+ EqClustersConfigDump(
+ "1", ::testing::ElementsAre(EqDynamicCluster(
+ kDefaultClusterName, "1",
+ ClientResourceStatus::ACKED, ::testing::_)))),
+ ::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::endpoint_config,
+ EqEndpointsConfigDump(
+ ::testing::ElementsAre(EqDynamicEndpointConfig(
+ kDefaultEdsServiceName, "1", ClientResourceStatus::ACKED,
+ backends_[0]->port(), kDefaultLocalityWeight,
+ ::testing::_))))));
+}
+
+TEST_P(ClientStatusDiscoveryServiceTest, XdsConfigDumpEmpty) {
+ // The CSDS service should not fail if XdsClient is not initialized or there
+ // is no working xDS configs.
+ FetchCsdsResponse();
+}
+
+TEST_P(ClientStatusDiscoveryServiceTest, XdsConfigDumpListenerError) {
+ int kFetchConfigRetries = 3;
+ int kFetchIntervalMilliseconds = 200;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {backends_[0]->port()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Ensure the xDS resolver has working configs.
+ CheckRpcSendOk();
+ // Bad Listener should be rejected.
+ Listener listener;
+ listener.set_name(kServerName);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // The old xDS configs should still be effective.
+ CheckRpcSendOk();
+ ::testing::Matcher<google::protobuf::Any> api_listener_matcher;
+ if (GetParam().enable_rds_testing()) {
+ api_listener_matcher = IsRdsEnabledHCM();
+ } else {
+ api_listener_matcher =
+ EqNoRdsHCM(kDefaultRouteConfigurationName, kDefaultClusterName);
+ }
+ for (int o = 0; o < kFetchConfigRetries; o++) {
+ auto csds_response = FetchCsdsResponse();
+ // Check if error state is propagated
+ bool ok = ::testing::Value(
+ csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::listener_config,
+ EqListenersConfigDump(
+ "1",
+ ::testing::ElementsAre(EqDynamicListener(
+ kServerName, "1", ClientResourceStatus::NACKED,
+ api_listener_matcher,
+ EqUpdateFailureState(
+ ::testing::HasSubstr(
+ "Listener has neither address nor ApiListener"),
+ "2")))))));
+ if (ok) return; // TEST PASSED!
+ gpr_sleep_until(
+ grpc_timeout_milliseconds_to_deadline(kFetchIntervalMilliseconds));
+ }
+ FAIL() << "error_state not seen in CSDS responses";
+}
+
+TEST_P(ClientStatusDiscoveryServiceTest, XdsConfigDumpRouteError) {
+ int kFetchConfigRetries = 3;
+ int kFetchIntervalMilliseconds = 200;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {backends_[0]->port()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Ensure the xDS resolver has working configs.
+ CheckRpcSendOk();
+ // Bad route config will be rejected.
+ RouteConfiguration route_config;
+ route_config.set_name(kDefaultRouteConfigurationName);
+ route_config.add_virtual_hosts();
+ SetRouteConfiguration(0, route_config);
+ // The old xDS configs should still be effective.
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendOk();
+ for (int o = 0; o < kFetchConfigRetries; o++) {
+ auto csds_response = FetchCsdsResponse();
+ bool ok = false;
+ if (GetParam().enable_rds_testing()) {
+ ok = ::testing::Value(
+ csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::route_config,
+ EqRoutesConfigDump(::testing::ElementsAre(EqDynamicRouteConfig(
+ kDefaultRouteConfigurationName, "1",
+ ClientResourceStatus::NACKED, kDefaultClusterName,
+ EqUpdateFailureState(
+ ::testing::HasSubstr("VirtualHost has no domains"),
+ "2")))))));
+ } else {
+ ok = ::testing::Value(
+ csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::listener_config,
+ EqListenersConfigDump(
+ "1",
+ ::testing::ElementsAre(EqDynamicListener(
+ kServerName, "1", ClientResourceStatus::NACKED,
+ EqNoRdsHCM(kDefaultRouteConfigurationName,
+ kDefaultClusterName),
+ EqUpdateFailureState(
+ ::testing::HasSubstr("VirtualHost has no domains"),
+ "2")))))));
+ }
+ if (ok) return; // TEST PASSED!
+ gpr_sleep_until(
+ grpc_timeout_milliseconds_to_deadline(kFetchIntervalMilliseconds));
+ }
+ FAIL() << "error_state not seen in CSDS responses";
+}
+
+TEST_P(ClientStatusDiscoveryServiceTest, XdsConfigDumpClusterError) {
+ int kFetchConfigRetries = 3;
+ int kFetchIntervalMilliseconds = 200;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {backends_[0]->port()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Ensure the xDS resolver has working configs.
+ CheckRpcSendOk();
+ // Listener without any route, will be rejected.
+ Cluster cluster;
+ cluster.set_name(kDefaultClusterName);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // The old xDS configs should still be effective.
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendOk();
+ for (int o = 0; o < kFetchConfigRetries; o++) {
+ auto csds_response = FetchCsdsResponse();
+ // Check if error state is propagated
+ bool ok = ::testing::Value(
+ csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::cluster_config,
+ EqClustersConfigDump(
+ "1", ::testing::ElementsAre(EqDynamicCluster(
+ kDefaultClusterName, "1", ClientResourceStatus::NACKED,
+ EqUpdateFailureState(
+ ::testing::HasSubstr("DiscoveryType not found"),
+ "2")))))));
+ if (ok) return; // TEST PASSED!
+ gpr_sleep_until(
+ grpc_timeout_milliseconds_to_deadline(kFetchIntervalMilliseconds));
+ }
+ FAIL() << "error_state not seen in CSDS responses";
+}
+
+TEST_P(ClientStatusDiscoveryServiceTest, XdsConfigDumpEndpointError) {
+ int kFetchConfigRetries = 3;
+ int kFetchIntervalMilliseconds = 200;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {backends_[0]->port()}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ BuildEdsResource(args, DefaultEdsServiceName()));
+ // Ensure the xDS resolver has working configs.
+ CheckRpcSendOk();
+ // Bad endpoint config will be rejected.
+ ClusterLoadAssignment cluster_load_assignment;
+ cluster_load_assignment.set_cluster_name(kDefaultEdsServiceName);
+ auto* endpoints = cluster_load_assignment.add_endpoints();
+ endpoints->mutable_load_balancing_weight()->set_value(1);
+ auto* endpoint = endpoints->add_lb_endpoints()->mutable_endpoint();
+ endpoint->mutable_address()->mutable_socket_address()->set_port_value(1 << 1);
+ balancers_[0]->ads_service()->SetEdsResource(cluster_load_assignment);
+ // The old xDS configs should still be effective.
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendOk();
+ for (int o = 0; o < kFetchConfigRetries; o++) {
+ auto csds_response = FetchCsdsResponse();
+
+ // Check if error state is propagated
+ bool ok = ::testing::Value(
+ csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::endpoint_config,
+ EqEndpointsConfigDump(
+ ::testing::ElementsAre(EqDynamicEndpointConfig(
+ kDefaultEdsServiceName, "1", ClientResourceStatus::NACKED,
+ backends_[0]->port(), kDefaultLocalityWeight,
+ EqUpdateFailureState(::testing::HasSubstr("Empty locality"),
+ "2")))))));
+ if (ok) return; // TEST PASSED!
+ gpr_sleep_until(
+ grpc_timeout_milliseconds_to_deadline(kFetchIntervalMilliseconds));
+ }
+ FAIL() << "error_state not seen in CSDS responses";
+}
+
+TEST_P(ClientStatusDiscoveryServiceTest, XdsConfigDumpListenerRequested) {
+ int kTimeoutMillisecond = 1000;
+ balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl, kServerName);
+ CheckRpcSendFailure(1, RpcOptions().set_timeout_ms(kTimeoutMillisecond),
+ grpc::DEADLINE_EXCEEDED);
+ auto csds_response = FetchCsdsResponse();
+ EXPECT_THAT(csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::listener_config,
+ EqListenersConfigDump(
+ ::testing::_, ::testing::ElementsAre(EqDynamicListener(
+ kServerName, ::testing::_,
+ ClientResourceStatus::REQUESTED,
+ ::testing::_, ::testing::_))))));
+}
+
+TEST_P(ClientStatusDiscoveryServiceTest, XdsConfigDumpClusterRequested) {
+ int kTimeoutMillisecond = 1000;
+ TString kClusterName1 = "cluster-1";
+ TString kClusterName2 = "cluster-2";
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Create a route config requesting two non-existing clusters
+ RouteConfiguration route_config;
+ route_config.set_name(kDefaultRouteConfigurationName);
+ auto* vh = route_config.add_virtual_hosts();
+ // The VirtualHost must match the domain name, otherwise will cause resolver
+ // transient failure.
+ vh->add_domains("*");
+ auto* routes1 = vh->add_routes();
+ routes1->mutable_match()->set_prefix("");
+ routes1->mutable_route()->set_cluster(kClusterName1);
+ auto* routes2 = vh->add_routes();
+ routes2->mutable_match()->set_prefix("");
+ routes2->mutable_route()->set_cluster(kClusterName2);
+ SetRouteConfiguration(0, route_config);
+ // Try to get the configs plumb through
+ CheckRpcSendFailure(1, RpcOptions().set_timeout_ms(kTimeoutMillisecond),
+ grpc::DEADLINE_EXCEEDED);
+ auto csds_response = FetchCsdsResponse();
+ EXPECT_THAT(csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::cluster_config,
+ EqClustersConfigDump(
+ ::testing::_,
+ ::testing::UnorderedElementsAre(
+ EqDynamicCluster(kClusterName1, ::testing::_,
+ ClientResourceStatus::REQUESTED,
+ ::testing::_),
+ EqDynamicCluster(kClusterName2, ::testing::_,
+ ClientResourceStatus::REQUESTED,
+ ::testing::_))))));
+}
+
+class CsdsShortAdsTimeoutTest : public ClientStatusDiscoveryServiceTest {
+ void SetUp() override {
+ // Shorten the ADS subscription timeout to speed up the test run.
+ xds_resource_does_not_exist_timeout_ms_ = 500;
+ ClientStatusDiscoveryServiceTest::SetUp();
+ }
+};
+
+TEST_P(CsdsShortAdsTimeoutTest, XdsConfigDumpListenerDoesNotExist) {
+ int kTimeoutMillisecond = 1000000; // 1000s wait for the transient failure.
+ balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl, kServerName);
+ CheckRpcSendFailure(1, RpcOptions().set_timeout_ms(kTimeoutMillisecond),
+ grpc::UNAVAILABLE);
+ auto csds_response = FetchCsdsResponse();
+ EXPECT_THAT(csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::listener_config,
+ EqListenersConfigDump(
+ ::testing::_, ::testing::ElementsAre(EqDynamicListener(
+ kServerName, ::testing::_,
+ ClientResourceStatus::DOES_NOT_EXIST,
+ ::testing::_, ::testing::_))))));
+}
+
+TEST_P(CsdsShortAdsTimeoutTest, XdsConfigDumpRouteConfigDoesNotExist) {
+ if (!GetParam().enable_rds_testing()) return;
+ int kTimeoutMillisecond = 1000000; // 1000s wait for the transient failure.
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ balancers_[0]->ads_service()->UnsetResource(kRdsTypeUrl,
+ kDefaultRouteConfigurationName);
+ CheckRpcSendFailure(1, RpcOptions().set_timeout_ms(kTimeoutMillisecond),
+ grpc::UNAVAILABLE);
+ auto csds_response = FetchCsdsResponse();
+ EXPECT_THAT(
+ csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::route_config,
+ EqRoutesConfigDump(::testing::ElementsAre(
+ EqDynamicRouteConfig(kDefaultRouteConfigurationName, ::testing::_,
+ ClientResourceStatus::DOES_NOT_EXIST,
+ ::testing::_, ::testing::_))))));
+}
+
+TEST_P(CsdsShortAdsTimeoutTest, XdsConfigDumpClusterDoesNotExist) {
+ int kTimeoutMillisecond = 1000000; // 1000s wait for the transient failure.
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ balancers_[0]->ads_service()->UnsetResource(kCdsTypeUrl, kDefaultClusterName);
+ CheckRpcSendFailure(1, RpcOptions().set_timeout_ms(kTimeoutMillisecond),
+ grpc::UNAVAILABLE);
+ auto csds_response = FetchCsdsResponse();
+ EXPECT_THAT(csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::cluster_config,
+ EqClustersConfigDump(::testing::_,
+ ::testing::ElementsAre(EqDynamicCluster(
+ kDefaultClusterName, ::testing::_,
+ ClientResourceStatus::DOES_NOT_EXIST,
+ ::testing::_))))));
+}
+
+TEST_P(CsdsShortAdsTimeoutTest, XdsConfigDumpEndpointDoesNotExist) {
+ int kTimeoutMillisecond = 1000000; // 1000s wait for the transient failure.
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ balancers_[0]->ads_service()->UnsetResource(kEdsTypeUrl,
+ kDefaultEdsServiceName);
+ CheckRpcSendFailure(1, RpcOptions().set_timeout_ms(kTimeoutMillisecond),
+ grpc::UNAVAILABLE);
+ auto csds_response = FetchCsdsResponse();
+ EXPECT_THAT(
+ csds_response.config(0).xds_config(),
+ ::testing::Contains(::testing::Property(
+ &envoy::service::status::v3::PerXdsConfig::endpoint_config,
+ EqEndpointsConfigDump(::testing::ElementsAre(EqDynamicEndpointConfig(
+ kDefaultEdsServiceName, ::testing::_,
+ ClientResourceStatus::DOES_NOT_EXIST, ::testing::_, ::testing::_,
+ ::testing::_))))));
+}
+#endif // DISABLED_XDS_PROTO_IN_CC
+
TString TestTypeName(const ::testing::TestParamInfo<TestType>& info) {
return info.param.AsString();
}
-// TestType params:
-// - use_xds_resolver
-// - enable_load_reporting
-// - enable_rds_testing = false
-// - use_v2 = false
-
-INSTANTIATE_TEST_SUITE_P(XdsTest, BasicTest,
- ::testing::Values(TestType(false, true),
- TestType(false, false),
- TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
+// Run with all combinations of xds/fake resolver and enabling load reporting.
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, BasicTest,
+ ::testing::Values(
+ TestType(), TestType().set_enable_load_reporting(),
+ TestType().set_use_fake_resolver(),
+ TestType().set_use_fake_resolver().set_enable_load_reporting()),
+ &TestTypeName);
// Run with both fake resolver and xds resolver.
// Don't run with load reporting or v2 or RDS, since they are irrelevant to
// the tests.
INSTANTIATE_TEST_SUITE_P(XdsTest, SecureNamingTest,
- ::testing::Values(TestType(false, false),
- TestType(true, false)),
+ ::testing::Values(TestType(),
+ TestType().set_use_fake_resolver()),
&TestTypeName);
// LDS depends on XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, LdsTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true)),
+INSTANTIATE_TEST_SUITE_P(XdsTest, LdsTest, ::testing::Values(TestType()),
+ &TestTypeName);
+INSTANTIATE_TEST_SUITE_P(XdsTest, LdsV2Test,
+ ::testing::Values(TestType().set_use_v2()),
&TestTypeName);
// LDS/RDS commmon tests depend on XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, LdsRdsTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true),
- TestType(true, false, true),
- TestType(true, true, true),
- // Also test with xDS v2.
- TestType(true, true, true, true)),
- &TestTypeName);
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, LdsRdsTest,
+ ::testing::Values(TestType(), TestType().set_enable_rds_testing(),
+ // Also test with xDS v2.
+ TestType().set_enable_rds_testing().set_use_v2()),
+ &TestTypeName);
+
+// CDS depends on XdsResolver.
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, CdsTest,
+ ::testing::Values(TestType(), TestType().set_enable_load_reporting()),
+ &TestTypeName);
// CDS depends on XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, CdsTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true)),
+// Security depends on v3.
+// Not enabling load reporting or RDS, since those are irrelevant to these
+// tests.
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, XdsSecurityTest,
+ ::testing::Values(TestType().set_use_xds_credentials()), &TestTypeName);
+
+// We are only testing the server here.
+INSTANTIATE_TEST_SUITE_P(XdsTest, XdsEnabledServerTest,
+ ::testing::Values(TestType()), &TestTypeName);
+
+// We are only testing the server here.
+INSTANTIATE_TEST_SUITE_P(XdsTest, XdsServerSecurityTest,
+ ::testing::Values(TestType()
+ .set_use_fake_resolver()
+ .set_use_xds_credentials()),
+ &TestTypeName);
+
+// We are only testing the server here.
+INSTANTIATE_TEST_SUITE_P(XdsTest, XdsEnabledServerStatusNotificationTest,
+ ::testing::Values(TestType()
+ .set_use_fake_resolver()
+ .set_use_xds_credentials()),
+ &TestTypeName);
+
+// We are only testing the server here.
+INSTANTIATE_TEST_SUITE_P(XdsTest, XdsServerFilterChainMatchTest,
+ ::testing::Values(TestType()
+ .set_use_fake_resolver()
+ .set_use_xds_credentials()),
&TestTypeName);
// EDS could be tested with or without XdsResolver, but the tests would
// be the same either way, so we test it only with XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, EdsTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, EdsTest,
+ ::testing::Values(TestType(), TestType().set_enable_load_reporting()),
+ &TestTypeName);
// Test initial resource timeouts for each resource type.
// Do this only for XdsResolver with RDS enabled, so that we can test
// all resource types.
// Run with V3 only, since the functionality is no different in V2.
INSTANTIATE_TEST_SUITE_P(XdsTest, TimeoutTest,
- ::testing::Values(TestType(true, false, true)),
+ ::testing::Values(TestType().set_enable_rds_testing()),
&TestTypeName);
// XdsResolverOnlyTest depends on XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverOnlyTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, XdsResolverOnlyTest,
+ ::testing::Values(TestType(), TestType().set_enable_load_reporting()),
+ &TestTypeName);
// XdsResolverLoadReprtingOnlyTest depends on XdsResolver and load reporting.
-INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverLoadReportingOnlyTest,
- ::testing::Values(TestType(true, true)),
- &TestTypeName);
-
-INSTANTIATE_TEST_SUITE_P(XdsTest, LocalityMapTest,
- ::testing::Values(TestType(false, true),
- TestType(false, false),
- TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
-
-INSTANTIATE_TEST_SUITE_P(XdsTest, FailoverTest,
- ::testing::Values(TestType(false, true),
- TestType(false, false),
- TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
-
-INSTANTIATE_TEST_SUITE_P(XdsTest, DropTest,
- ::testing::Values(TestType(false, true),
- TestType(false, false),
- TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
-
-INSTANTIATE_TEST_SUITE_P(XdsTest, BalancerUpdateTest,
- ::testing::Values(TestType(false, true),
- TestType(false, false),
- TestType(true, true)),
- &TestTypeName);
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, XdsResolverLoadReportingOnlyTest,
+ ::testing::Values(TestType().set_enable_load_reporting()), &TestTypeName);
+
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, LocalityMapTest,
+ ::testing::Values(
+ TestType(), TestType().set_enable_load_reporting(),
+ TestType().set_use_fake_resolver(),
+ TestType().set_use_fake_resolver().set_enable_load_reporting()),
+ &TestTypeName);
+
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, FailoverTest,
+ ::testing::Values(
+ TestType(), TestType().set_enable_load_reporting(),
+ TestType().set_use_fake_resolver(),
+ TestType().set_use_fake_resolver().set_enable_load_reporting()),
+ &TestTypeName);
+
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, DropTest,
+ ::testing::Values(
+ TestType(), TestType().set_enable_load_reporting(),
+ TestType().set_use_fake_resolver(),
+ TestType().set_use_fake_resolver().set_enable_load_reporting()),
+ &TestTypeName);
+
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, BalancerUpdateTest,
+ ::testing::Values(
+ TestType().set_use_fake_resolver(),
+ TestType().set_use_fake_resolver().set_enable_load_reporting(),
+ TestType().set_enable_load_reporting()),
+ &TestTypeName);
// Load reporting tests are not run with load reporting disabled.
-INSTANTIATE_TEST_SUITE_P(XdsTest, ClientLoadReportingTest,
- ::testing::Values(TestType(false, true),
- TestType(true, true)),
- &TestTypeName);
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, ClientLoadReportingTest,
+ ::testing::Values(
+ TestType().set_enable_load_reporting(),
+ TestType().set_enable_load_reporting().set_use_fake_resolver()),
+ &TestTypeName);
// Load reporting tests are not run with load reporting disabled.
-INSTANTIATE_TEST_SUITE_P(XdsTest, ClientLoadReportingWithDropTest,
- ::testing::Values(TestType(false, true),
- TestType(true, true)),
- &TestTypeName);
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, ClientLoadReportingWithDropTest,
+ ::testing::Values(
+ TestType().set_enable_load_reporting(),
+ TestType().set_enable_load_reporting().set_use_fake_resolver()),
+ &TestTypeName);
+
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, FaultInjectionTest,
+ ::testing::Values(
+ TestType(), TestType().set_enable_rds_testing(),
+ TestType().set_filter_config_setup(
+ TestType::FilterConfigSetup::kRouteOverride),
+ TestType().set_enable_rds_testing().set_filter_config_setup(
+ TestType::FilterConfigSetup::kRouteOverride)),
+ &TestTypeName);
+
+INSTANTIATE_TEST_SUITE_P(XdsTest, BootstrapContentsFromEnvVarTest,
+ ::testing::Values(TestType()), &TestTypeName);
+
+#ifndef DISABLED_XDS_PROTO_IN_CC
+// Run CSDS tests with RDS enabled and disabled.
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, ClientStatusDiscoveryServiceTest,
+ ::testing::Values(
+ TestType(), TestType().set_enable_rds_testing(),
+ TestType().set_use_csds_streaming(),
+ TestType().set_enable_rds_testing().set_use_csds_streaming()),
+ &TestTypeName);
+
+INSTANTIATE_TEST_SUITE_P(
+ XdsTest, CsdsShortAdsTimeoutTest,
+ ::testing::Values(
+ TestType(), TestType().set_enable_rds_testing(),
+ TestType().set_use_csds_streaming(),
+ TestType().set_enable_rds_testing().set_use_csds_streaming()),
+ &TestTypeName);
+#endif // DISABLED_XDS_PROTO_IN_CC
} // namespace
} // namespace testing
@@ -5826,7 +11327,29 @@ int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
::testing::InitGoogleTest(&argc, argv);
grpc::testing::WriteBootstrapFiles();
- grpc::testing::g_port_saver = new grpc::testing::PortSaver();
+ // Make the backup poller poll very frequently in order to pick up
+ // updates from all the subchannels's FDs.
+ GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 1);
+#if TARGET_OS_IPHONE
+ // Workaround Apple CFStream bug
+ gpr_setenv("grpc_cfstream", "0");
+#endif
+ grpc_core::CertificateProviderRegistry::RegisterCertificateProviderFactory(
+ y_absl::make_unique<grpc::testing::FakeCertificateProviderFactory>(
+ "fake1", &grpc::testing::g_fake1_cert_data_map));
+ grpc_core::CertificateProviderRegistry::RegisterCertificateProviderFactory(
+ y_absl::make_unique<grpc::testing::FakeCertificateProviderFactory>(
+ "fake2", &grpc::testing::g_fake2_cert_data_map));
+ grpc_init();
+ grpc_core::XdsHttpFilterRegistry::RegisterFilter(
+ y_absl::make_unique<grpc::testing::NoOpHttpFilter>(
+ "grpc.testing.client_only_http_filter", true, false),
+ {"grpc.testing.client_only_http_filter"});
+ grpc_core::XdsHttpFilterRegistry::RegisterFilter(
+ y_absl::make_unique<grpc::testing::NoOpHttpFilter>(
+ "grpc.testing.server_only_http_filter", false, true),
+ {"grpc.testing.server_only_http_filter"});
const auto result = RUN_ALL_TESTS();
+ grpc_shutdown();
return result;
}