aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/grpc/test/cpp/end2end
diff options
context:
space:
mode:
authorheretic <heretic@yandex-team.ru>2022-02-10 16:45:46 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:46 +0300
commit81eddc8c0b55990194e112b02d127b87d54164a9 (patch)
tree9142afc54d335ea52910662635b898e79e192e49 /contrib/libs/grpc/test/cpp/end2end
parent397cbe258b9e064f49c4ca575279f02f39fef76e (diff)
downloadydb-81eddc8c0b55990194e112b02d127b87d54164a9.tar.gz
Restoring authorship annotation for <heretic@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/grpc/test/cpp/end2end')
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt72
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc174
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc28
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc4
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc586
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc6
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc354
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc456
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc4
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/end2end_test.cc366
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc16
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc38
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc22
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc544
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/health/ya.make2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc24
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc50
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc120
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/interceptors_util.h40
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc60
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/mock_test.cc36
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc48
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc14
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc10
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc14
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc8
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc4
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc32
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc16
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc52
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc6
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h4
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc248
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_service_impl.h782
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc2
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/time_change_test.cc10
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc8498
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/ya.make14
42 files changed, 6386 insertions, 6386 deletions
diff --git a/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt b/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt
index ff3e2fd911..a07ea0849d 100644
--- a/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt
+++ b/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt
@@ -1,36 +1,36 @@
-====================Apache-2.0====================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-
-
-====================COPYRIGHT====================
- * Copyright 2015 gRPC authors.
-
-
-====================COPYRIGHT====================
- * Copyright 2016 gRPC authors.
-
-
-====================COPYRIGHT====================
- * Copyright 2017 gRPC authors.
-
-
-====================COPYRIGHT====================
- * Copyright 2018 gRPC authors.
-
-
-====================COPYRIGHT====================
-# Copyright 2019 gRPC authors.
-
-
-====================COPYRIGHT====================
-// Copyright 2019 The gRPC Authors
+====================Apache-2.0====================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+
+
+====================COPYRIGHT====================
+ * Copyright 2015 gRPC authors.
+
+
+====================COPYRIGHT====================
+ * Copyright 2016 gRPC authors.
+
+
+====================COPYRIGHT====================
+ * Copyright 2017 gRPC authors.
+
+
+====================COPYRIGHT====================
+ * Copyright 2018 gRPC authors.
+
+
+====================COPYRIGHT====================
+# Copyright 2019 gRPC authors.
+
+
+====================COPYRIGHT====================
+// Copyright 2019 The gRPC Authors
diff --git a/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
index 20f2946565..45df8718f9 100644
--- a/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
@@ -120,7 +120,7 @@ class Verifier {
while (!expectations_.empty()) {
Next(cq, ignore_ok);
}
- maybe_expectations_.clear();
+ maybe_expectations_.clear();
}
// This version of Verify stops after a certain deadline
@@ -140,7 +140,7 @@ class Verifier {
GotTag(got_tag, ok, false);
}
}
- maybe_expectations_.clear();
+ maybe_expectations_.clear();
}
// This version of Verify stops after a certain deadline, and uses the
@@ -163,7 +163,7 @@ class Verifier {
GotTag(got_tag, ok, false);
}
}
- maybe_expectations_.clear();
+ maybe_expectations_.clear();
}
private:
@@ -184,7 +184,7 @@ class Verifier {
if (!ignore_ok) {
EXPECT_EQ(it2->second.ok, ok);
}
- maybe_expectations_.erase(it2);
+ maybe_expectations_.erase(it2);
} else {
gpr_log(GPR_ERROR, "Unexpected tag: %p", got_tag);
abort();
@@ -224,8 +224,8 @@ class ServerBuilderSyncPluginDisabler : public ::grpc::ServerBuilderOption {
class TestScenario {
public:
- TestScenario(bool inproc_stub, const TString& creds_type, bool hcs,
- const TString& content)
+ TestScenario(bool inproc_stub, const TString& creds_type, bool hcs,
+ const TString& content)
: inproc(inproc_stub),
health_check_service(hcs),
credentials_type(creds_type),
@@ -233,8 +233,8 @@ class TestScenario {
void Log() const;
bool inproc;
bool health_check_service;
- const TString credentials_type;
- const TString message_content;
+ const TString credentials_type;
+ const TString message_content;
};
static std::ostream& operator<<(std::ostream& out,
@@ -355,52 +355,52 @@ TEST_P(AsyncEnd2endTest, SimpleRpc) {
SendRpc(1);
}
-TEST_P(AsyncEnd2endTest, SimpleRpcWithExpectedError) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
- ErrorStatus error_status;
-
- send_request.set_message(GetParam().message_content);
- error_status.set_code(1); // CANCELLED
- error_status.set_error_message("cancel error message");
- *send_request.mutable_param()->mutable_expected_error() = error_status;
-
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
-
- srv_ctx.AsyncNotifyWhenDone(tag(5));
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
- cq_.get(), tag(2));
-
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- response_writer.Finish(
- send_response,
- Status(
- static_cast<StatusCode>(recv_request.param().expected_error().code()),
- recv_request.param().expected_error().error_message()),
- tag(3));
- Verifier().Expect(3, true).Expect(4, true).Expect(5, true).Verify(cq_.get());
-
- EXPECT_EQ(recv_response.message(), "");
- EXPECT_EQ(recv_status.error_code(), error_status.code());
- EXPECT_EQ(recv_status.error_message(), error_status.error_message());
- EXPECT_FALSE(srv_ctx.IsCancelled());
-}
-
+TEST_P(AsyncEnd2endTest, SimpleRpcWithExpectedError) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+ ErrorStatus error_status;
+
+ send_request.set_message(GetParam().message_content);
+ error_status.set_code(1); // CANCELLED
+ error_status.set_error_message("cancel error message");
+ *send_request.mutable_param()->mutable_expected_error() = error_status;
+
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+
+ srv_ctx.AsyncNotifyWhenDone(tag(5));
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ response_writer.Finish(
+ send_response,
+ Status(
+ static_cast<StatusCode>(recv_request.param().expected_error().code()),
+ recv_request.param().expected_error().error_message()),
+ tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Expect(5, true).Verify(cq_.get());
+
+ EXPECT_EQ(recv_response.message(), "");
+ EXPECT_EQ(recv_status.error_code(), error_status.code());
+ EXPECT_EQ(recv_status.error_message(), error_status.error_message());
+ EXPECT_FALSE(srv_ctx.IsCancelled());
+}
+
TEST_P(AsyncEnd2endTest, SequentialRpcs) {
ResetStub();
SendRpc(10);
@@ -931,9 +931,9 @@ TEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message(GetParam().message_content);
- std::pair<TString, TString> meta1("key1", "val1");
- std::pair<TString, TString> meta2("key2", "val2");
- std::pair<TString, TString> meta3("g.r.d-bin", "xyz");
+ std::pair<TString, TString> meta1("key1", "val1");
+ std::pair<TString, TString> meta2("key2", "val2");
+ std::pair<TString, TString> meta3("g.r.d-bin", "xyz");
cli_ctx.AddMetadata(meta1.first, meta1.second);
cli_ctx.AddMetadata(meta2.first, meta2.second);
cli_ctx.AddMetadata(meta3.first, meta3.second);
@@ -977,8 +977,8 @@ TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message(GetParam().message_content);
- std::pair<TString, TString> meta1("key1", "val1");
- std::pair<TString, TString> meta2("key2", "val2");
+ std::pair<TString, TString> meta1("key1", "val1");
+ std::pair<TString, TString> meta2("key2", "val2");
std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
@@ -1020,8 +1020,8 @@ TEST_P(AsyncEnd2endTest, ServerInitialMetadataServerStreaming) {
ServerContext srv_ctx;
ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
- std::pair<::TString, ::TString> meta1("key1", "val1");
- std::pair<::TString, ::TString> meta2("key2", "val2");
+ std::pair<::TString, ::TString> meta1("key1", "val1");
+ std::pair<::TString, ::TString> meta2("key2", "val2");
std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
@@ -1075,8 +1075,8 @@ TEST_P(AsyncEnd2endTest, ServerInitialMetadataServerStreamingImplicit) {
ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
send_request.set_message(GetParam().message_content);
- std::pair<::TString, ::TString> meta1("key1", "val1");
- std::pair<::TString, ::TString> meta2("key2", "val2");
+ std::pair<::TString, ::TString> meta1("key1", "val1");
+ std::pair<::TString, ::TString> meta2("key2", "val2");
std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
@@ -1130,8 +1130,8 @@ TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message(GetParam().message_content);
- std::pair<TString, TString> meta1("key1", "val1");
- std::pair<TString, TString> meta2("key2", "val2");
+ std::pair<TString, TString> meta1("key1", "val1");
+ std::pair<TString, TString> meta2("key2", "val2");
std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
@@ -1175,19 +1175,19 @@ TEST_P(AsyncEnd2endTest, MetadataRpc) {
grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
send_request.set_message(GetParam().message_content);
- std::pair<TString, TString> meta1("key1", "val1");
- std::pair<TString, TString> meta2(
+ std::pair<TString, TString> meta1("key1", "val1");
+ std::pair<TString, TString> meta2(
"key2-bin",
- TString("\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13));
- std::pair<TString, TString> meta3("key3", "val3");
- std::pair<TString, TString> meta6(
+ TString("\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13));
+ std::pair<TString, TString> meta3("key3", "val3");
+ std::pair<TString, TString> meta6(
"key4-bin",
- TString("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",
- 14));
- std::pair<TString, TString> meta5("key5", "val5");
- std::pair<TString, TString> meta4(
+ TString("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",
+ 14));
+ std::pair<TString, TString> meta5("key5", "val5");
+ std::pair<TString, TString> meta4(
"key6-bin",
- TString(
+ TString(
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee", 15));
cli_ctx.AddMetadata(meta1.first, meta1.second);
@@ -1407,7 +1407,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
EchoRequest send_request;
// Client sends 3 messages (tags 3, 4 and 5)
for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
- send_request.set_message("Ping " + ToString(tag_idx));
+ send_request.set_message("Ping " + ToString(tag_idx));
cli_stream->Write(send_request, tag(tag_idx));
Verifier()
.Expect(tag_idx, expected_client_cq_result)
@@ -1592,7 +1592,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
// Server sends three messages (tags 3, 4 and 5)
// But if want_done tag is true, we might also see tag 11
for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
- send_response.set_message("Pong " + ToString(tag_idx));
+ send_response.set_message("Pong " + ToString(tag_idx));
srv_stream.Write(send_response, tag(tag_idx));
// Note that we'll add something to the verifier and verify that
// something was seen, but it might be tag 11 and not what we
@@ -1874,8 +1874,8 @@ TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelAfter) {
std::vector<TestScenario> CreateTestScenarios(bool /*test_secure*/,
bool test_message_size_limit) {
std::vector<TestScenario> scenarios;
- std::vector<TString> credentials_types;
- std::vector<TString> messages;
+ std::vector<TString> credentials_types;
+ std::vector<TString> messages;
auto insec_ok = [] {
// Only allow insecure credentials type when it is registered with the
@@ -1897,20 +1897,20 @@ std::vector<TestScenario> CreateTestScenarios(bool /*test_secure*/,
if (test_message_size_limit) {
for (size_t k = 1; k < GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH / 1024;
k *= 32) {
- TString big_msg;
+ TString big_msg;
for (size_t i = 0; i < k * 1024; ++i) {
char c = 'a' + (i % 26);
big_msg += c;
}
messages.push_back(big_msg);
}
- if (!BuiltUnderMsan()) {
- // 4MB message processing with SSL is very slow under msan
- // (causes timeouts) and doesn't really increase the signal from tests.
- // Reserve 100 bytes for other fields of the message proto.
- messages.push_back(
- TString(GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH - 100, 'a'));
- }
+ if (!BuiltUnderMsan()) {
+ // 4MB message processing with SSL is very slow under msan
+ // (causes timeouts) and doesn't really increase the signal from tests.
+ // Reserve 100 bytes for other fields of the message proto.
+ messages.push_back(
+ TString(GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH - 100, 'a'));
+ }
}
// TODO (sreek) Renable tests with health check service after the issue
diff --git a/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc b/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc
index bd4de86bd6..e6695982bd 100644
--- a/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc
@@ -60,10 +60,10 @@ namespace testing {
namespace {
struct TestScenario {
- TestScenario(const TString& creds_type, const TString& content)
+ TestScenario(const TString& creds_type, const TString& content)
: credentials_type(creds_type), message_content(content) {}
- const TString credentials_type;
- const TString message_content;
+ const TString credentials_type;
+ const TString message_content;
};
class CFStreamTest : public ::testing::TestWithParam<TestScenario> {
@@ -244,16 +244,16 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> {
private:
struct ServerData {
int port_;
- const TString creds_;
+ const TString creds_;
std::unique_ptr<Server> server_;
TestServiceImpl service_;
std::unique_ptr<std::thread> thread_;
bool server_ready_ = false;
- ServerData(int port, const TString& creds)
+ ServerData(int port, const TString& creds)
: port_(port), creds_(creds) {}
- void Start(const TString& server_host) {
+ void Start(const TString& server_host) {
gpr_log(GPR_INFO, "starting server on port %d", port_);
std::mutex mu;
std::unique_lock<std::mutex> lock(mu);
@@ -265,7 +265,7 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> {
gpr_log(GPR_INFO, "server startup complete");
}
- void Serve(const TString& server_host, std::mutex* mu,
+ void Serve(const TString& server_host, std::mutex* mu,
std::condition_variable* cond) {
std::ostringstream server_address;
server_address << server_host << ":" << port_;
@@ -287,17 +287,17 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> {
};
CompletionQueue cq_;
- const TString server_host_;
- const TString interface_;
- const TString ipv4_address_;
+ const TString server_host_;
+ const TString interface_;
+ const TString ipv4_address_;
std::unique_ptr<ServerData> server_;
int port_;
};
std::vector<TestScenario> CreateTestScenarios() {
std::vector<TestScenario> scenarios;
- std::vector<TString> credentials_types;
- std::vector<TString> messages;
+ std::vector<TString> credentials_types;
+ std::vector<TString> messages;
credentials_types.push_back(kInsecureCredentialsType);
auto sec_list = GetCredentialsProvider()->GetSecureCredentialsTypeList();
@@ -307,7 +307,7 @@ std::vector<TestScenario> CreateTestScenarios() {
messages.push_back("🖖");
for (size_t k = 1; k < GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH / 1024; k *= 32) {
- TString big_msg;
+ TString big_msg;
for (size_t i = 0; i < k * 1024; ++i) {
char c = 'a' + (i % 26);
big_msg += c;
@@ -489,7 +489,7 @@ TEST_P(CFStreamTest, ConcurrentRpc) {
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
- grpc::testing::TestEnvironment env(argc, argv);
+ grpc::testing::TestEnvironment env(argc, argv);
gpr_setenv("grpc_cfstream", "1");
const auto result = RUN_ALL_TESTS();
return result;
diff --git a/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc b/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
index fdc6784fb7..9c723bebb6 100644
--- a/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
@@ -118,7 +118,7 @@ class ChannelzServerTest : public ::testing::Test {
// We set up a proxy server with channelz enabled.
proxy_port_ = grpc_pick_unused_port_or_die();
ServerBuilder proxy_builder;
- TString proxy_server_address = "localhost:" + to_string(proxy_port_);
+ TString proxy_server_address = "localhost:" + to_string(proxy_port_);
proxy_builder.AddListeningPort(proxy_server_address,
InsecureServerCredentials());
// forces channelz and channel tracing to be enabled.
@@ -136,7 +136,7 @@ class ChannelzServerTest : public ::testing::Test {
// create a new backend.
backends_[i].port = grpc_pick_unused_port_or_die();
ServerBuilder backend_builder;
- TString backend_server_address =
+ TString backend_server_address =
"localhost:" + to_string(backends_[i].port);
backend_builder.AddListeningPort(backend_server_address,
InsecureServerCredentials());
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
index 467f482d3f..12cb40a953 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
@@ -25,15 +25,15 @@
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
#include <grpcpp/support/client_callback.h>
-#include <gtest/gtest.h>
-
-#include <algorithm>
-#include <condition_variable>
-#include <functional>
-#include <mutex>
-#include <sstream>
-#include <thread>
-
+#include <gtest/gtest.h>
+
+#include <algorithm>
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+#include <sstream>
+#include <thread>
+
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
@@ -65,7 +65,7 @@ enum class Protocol { INPROC, TCP };
class TestScenario {
public:
TestScenario(bool serve_callback, Protocol protocol, bool intercept,
- const TString& creds_type)
+ const TString& creds_type)
: callback_server(serve_callback),
protocol(protocol),
use_interceptors(intercept),
@@ -74,7 +74,7 @@ class TestScenario {
bool callback_server;
Protocol protocol;
bool use_interceptors;
- const TString credentials_type;
+ const TString credentials_type;
};
static std::ostream& operator<<(std::ostream& out,
@@ -180,7 +180,7 @@ class ClientCallbackEnd2endTest
}
void SendRpcs(int num_rpcs, bool with_binary_metadata) {
- TString test_string("");
+ TString test_string("");
for (int i = 0; i < num_rpcs; i++) {
EchoRequest request;
EchoResponse response;
@@ -188,12 +188,12 @@ class ClientCallbackEnd2endTest
test_string += "Hello world. ";
request.set_message(test_string);
- TString val;
+ TString val;
if (with_binary_metadata) {
request.mutable_param()->set_echo_metadata(true);
char bytes[8] = {'\0', '\1', '\2', '\3',
'\4', '\5', '\6', static_cast<char>(i)};
- val = TString(bytes, 8);
+ val = TString(bytes, 8);
cli_ctx.AddMetadata("custom-bin", val);
}
@@ -228,8 +228,8 @@ class ClientCallbackEnd2endTest
}
void SendRpcsGeneric(int num_rpcs, bool maybe_except) {
- const TString kMethodName("/grpc.testing.EchoTestService/Echo");
- TString test_string("");
+ const TString kMethodName("/grpc.testing.EchoTestService/Echo");
+ TString test_string("");
for (int i = 0; i < num_rpcs; i++) {
EchoRequest request;
std::unique_ptr<ByteBuffer> send_buf;
@@ -269,17 +269,17 @@ class ClientCallbackEnd2endTest
}
}
- void SendGenericEchoAsBidi(int num_rpcs, int reuses, bool do_writes_done) {
- const TString kMethodName("/grpc.testing.EchoTestService/Echo");
- TString test_string("");
+ void SendGenericEchoAsBidi(int num_rpcs, int reuses, bool do_writes_done) {
+ const TString kMethodName("/grpc.testing.EchoTestService/Echo");
+ TString test_string("");
for (int i = 0; i < num_rpcs; i++) {
test_string += "Hello world. ";
class Client : public grpc::experimental::ClientBidiReactor<ByteBuffer,
ByteBuffer> {
public:
- Client(ClientCallbackEnd2endTest* test, const TString& method_name,
- const TString& test_str, int reuses, bool do_writes_done)
- : reuses_remaining_(reuses), do_writes_done_(do_writes_done) {
+ Client(ClientCallbackEnd2endTest* test, const TString& method_name,
+ const TString& test_str, int reuses, bool do_writes_done)
+ : reuses_remaining_(reuses), do_writes_done_(do_writes_done) {
activate_ = [this, test, method_name, test_str] {
if (reuses_remaining_ > 0) {
cli_ctx_.reset(new ClientContext);
@@ -299,11 +299,11 @@ class ClientCallbackEnd2endTest
};
activate_();
}
- void OnWriteDone(bool /*ok*/) override {
- if (do_writes_done_) {
- StartWritesDone();
- }
- }
+ void OnWriteDone(bool /*ok*/) override {
+ if (do_writes_done_) {
+ StartWritesDone();
+ }
+ }
void OnReadDone(bool /*ok*/) override {
EchoResponse response;
EXPECT_TRUE(ParseFromByteBuffer(&recv_buf_, &response));
@@ -329,11 +329,11 @@ class ClientCallbackEnd2endTest
std::mutex mu_;
std::condition_variable cv_;
bool done_ = false;
- const bool do_writes_done_;
- };
+ const bool do_writes_done_;
+ };
+
+ Client rpc(this, kMethodName, test_string, reuses, do_writes_done);
- Client rpc(this, kMethodName, test_string, reuses, do_writes_done);
-
rpc.Await();
}
}
@@ -355,102 +355,102 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpc) {
SendRpcs(1, false);
}
-TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
+TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
MAYBE_SKIP_TEST;
ResetStub();
-
- EchoRequest request;
- EchoResponse response;
- ClientContext cli_ctx;
- ErrorStatus error_status;
-
- request.set_message("Hello failure");
- error_status.set_code(1); // CANCELLED
- error_status.set_error_message("cancel error message");
- *request.mutable_param()->mutable_expected_error() = error_status;
-
- std::mutex mu;
+
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext cli_ctx;
+ ErrorStatus error_status;
+
+ request.set_message("Hello failure");
+ error_status.set_code(1); // CANCELLED
+ error_status.set_error_message("cancel error message");
+ *request.mutable_param()->mutable_expected_error() = error_status;
+
+ std::mutex mu;
std::condition_variable cv;
bool done = false;
- stub_->experimental_async()->Echo(
- &cli_ctx, &request, &response,
- [&response, &done, &mu, &cv, &error_status](Status s) {
- EXPECT_EQ("", response.message());
- EXPECT_EQ(error_status.code(), s.error_code());
- EXPECT_EQ(error_status.error_message(), s.error_message());
- std::lock_guard<std::mutex> l(mu);
- done = true;
- cv.notify_one();
- });
-
- std::unique_lock<std::mutex> l(mu);
+ stub_->experimental_async()->Echo(
+ &cli_ctx, &request, &response,
+ [&response, &done, &mu, &cv, &error_status](Status s) {
+ EXPECT_EQ("", response.message());
+ EXPECT_EQ(error_status.code(), s.error_code());
+ EXPECT_EQ(error_status.error_message(), s.error_message());
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+ });
+
+ std::unique_lock<std::mutex> l(mu);
while (!done) {
cv.wait(l);
}
}
-TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
- MAYBE_SKIP_TEST;
- ResetStub();
-
- // The request/response state associated with an RPC and the synchronization
- // variables needed to notify its completion.
- struct RpcState {
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- EchoRequest request;
- EchoResponse response;
- ClientContext cli_ctx;
-
- RpcState() = default;
- ~RpcState() {
- // Grab the lock to prevent destruction while another is still holding
- // lock
- std::lock_guard<std::mutex> lock(mu);
- }
- };
- std::vector<RpcState> rpc_state(3);
- for (size_t i = 0; i < rpc_state.size(); i++) {
- TString message = "Hello locked world";
- message += ToString(i);
- rpc_state[i].request.set_message(message);
- }
-
- // Grab a lock and then start an RPC whose callback grabs the same lock and
- // then calls this function to start the next RPC under lock (up to a limit of
- // the size of the rpc_state vector).
- std::function<void(int)> nested_call = [this, &nested_call,
- &rpc_state](int index) {
- std::lock_guard<std::mutex> l(rpc_state[index].mu);
- stub_->experimental_async()->Echo(
- &rpc_state[index].cli_ctx, &rpc_state[index].request,
- &rpc_state[index].response,
- [index, &nested_call, &rpc_state](Status s) {
- std::lock_guard<std::mutex> l1(rpc_state[index].mu);
- EXPECT_TRUE(s.ok());
- rpc_state[index].done = true;
- rpc_state[index].cv.notify_all();
- // Call the next level of nesting if possible
- if (index + 1 < rpc_state.size()) {
- nested_call(index + 1);
- }
- });
- };
-
- nested_call(0);
-
- // Wait for completion notifications from all RPCs. Order doesn't matter.
- for (RpcState& state : rpc_state) {
- std::unique_lock<std::mutex> l(state.mu);
- while (!state.done) {
- state.cv.wait(l);
- }
- EXPECT_EQ(state.request.message(), state.response.message());
- }
-}
-
+TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+
+ // The request/response state associated with an RPC and the synchronization
+ // variables needed to notify its completion.
+ struct RpcState {
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext cli_ctx;
+
+ RpcState() = default;
+ ~RpcState() {
+ // Grab the lock to prevent destruction while another is still holding
+ // lock
+ std::lock_guard<std::mutex> lock(mu);
+ }
+ };
+ std::vector<RpcState> rpc_state(3);
+ for (size_t i = 0; i < rpc_state.size(); i++) {
+ TString message = "Hello locked world";
+ message += ToString(i);
+ rpc_state[i].request.set_message(message);
+ }
+
+ // Grab a lock and then start an RPC whose callback grabs the same lock and
+ // then calls this function to start the next RPC under lock (up to a limit of
+ // the size of the rpc_state vector).
+ std::function<void(int)> nested_call = [this, &nested_call,
+ &rpc_state](int index) {
+ std::lock_guard<std::mutex> l(rpc_state[index].mu);
+ stub_->experimental_async()->Echo(
+ &rpc_state[index].cli_ctx, &rpc_state[index].request,
+ &rpc_state[index].response,
+ [index, &nested_call, &rpc_state](Status s) {
+ std::lock_guard<std::mutex> l1(rpc_state[index].mu);
+ EXPECT_TRUE(s.ok());
+ rpc_state[index].done = true;
+ rpc_state[index].cv.notify_all();
+ // Call the next level of nesting if possible
+ if (index + 1 < rpc_state.size()) {
+ nested_call(index + 1);
+ }
+ });
+ };
+
+ nested_call(0);
+
+ // Wait for completion notifications from all RPCs. Order doesn't matter.
+ for (RpcState& state : rpc_state) {
+ std::unique_lock<std::mutex> l(state.mu);
+ while (!state.done) {
+ state.cv.wait(l);
+ }
+ EXPECT_EQ(state.request.message(), state.response.message());
+ }
+}
+
TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) {
MAYBE_SKIP_TEST;
ResetStub();
@@ -533,21 +533,21 @@ TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcs) {
TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidi) {
MAYBE_SKIP_TEST;
ResetStub();
- SendGenericEchoAsBidi(10, 1, /*do_writes_done=*/true);
+ SendGenericEchoAsBidi(10, 1, /*do_writes_done=*/true);
}
TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidiWithReactorReuse) {
MAYBE_SKIP_TEST;
ResetStub();
- SendGenericEchoAsBidi(10, 10, /*do_writes_done=*/true);
+ SendGenericEchoAsBidi(10, 10, /*do_writes_done=*/true);
+}
+
+TEST_P(ClientCallbackEnd2endTest, GenericRpcNoWritesDone) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SendGenericEchoAsBidi(1, 1, /*do_writes_done=*/false);
}
-TEST_P(ClientCallbackEnd2endTest, GenericRpcNoWritesDone) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SendGenericEchoAsBidi(1, 1, /*do_writes_done=*/false);
-}
-
#if GRPC_ALLOW_EXCEPTIONS
TEST_P(ClientCallbackEnd2endTest, ExceptingRpc) {
MAYBE_SKIP_TEST;
@@ -619,7 +619,7 @@ TEST_P(ClientCallbackEnd2endTest, RequestEchoServerCancel) {
ClientContext context;
request.set_message("hello");
context.AddMetadata(kServerTryCancelRequest,
- ToString(CANCEL_BEFORE_PROCESSING));
+ ToString(CANCEL_BEFORE_PROCESSING));
std::mutex mu;
std::condition_variable cv;
@@ -654,14 +654,14 @@ class WriteClient : public grpc::experimental::ClientWriteReactor<EchoRequest> {
: server_try_cancel_(server_try_cancel),
num_msgs_to_send_(num_msgs_to_send),
client_cancel_{client_cancel} {
- TString msg{"Hello server."};
+ TString msg{"Hello server."};
for (int i = 0; i < num_msgs_to_send; i++) {
desired_ += msg;
}
if (server_try_cancel != DO_NOT_CANCEL) {
// Send server_try_cancel value in the client metadata
context_.AddMetadata(kServerTryCancelRequest,
- ToString(server_try_cancel));
+ ToString(server_try_cancel));
}
context_.set_initial_metadata_corked(true);
stub->experimental_async()->RequestStream(&context_, &response_, this);
@@ -735,7 +735,7 @@ class WriteClient : public grpc::experimental::ClientWriteReactor<EchoRequest> {
const ServerTryCancelRequestPhase server_try_cancel_;
int num_msgs_sent_{0};
const int num_msgs_to_send_;
- TString desired_;
+ TString desired_;
const ClientCancelInfo client_cancel_;
std::mutex mu_;
std::condition_variable cv_;
@@ -860,72 +860,72 @@ TEST_P(ClientCallbackEnd2endTest, UnaryReactor) {
}
}
-TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) {
- MAYBE_SKIP_TEST;
- ResetStub();
- const TString kMethodName("/grpc.testing.EchoTestService/Echo");
- class UnaryClient : public grpc::experimental::ClientUnaryReactor {
- public:
- UnaryClient(grpc::GenericStub* stub, const TString& method_name) {
- cli_ctx_.AddMetadata("key1", "val1");
- cli_ctx_.AddMetadata("key2", "val2");
- request_.mutable_param()->set_echo_metadata_initially(true);
- request_.set_message("Hello metadata");
- send_buf_ = SerializeToByteBuffer(&request_);
-
- stub->experimental().PrepareUnaryCall(&cli_ctx_, method_name,
- send_buf_.get(), &recv_buf_, this);
- StartCall();
- }
- void OnReadInitialMetadataDone(bool ok) override {
- EXPECT_TRUE(ok);
- EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key1"));
- EXPECT_EQ(
- "val1",
- ToString(cli_ctx_.GetServerInitialMetadata().find("key1")->second));
- EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key2"));
- EXPECT_EQ(
- "val2",
- ToString(cli_ctx_.GetServerInitialMetadata().find("key2")->second));
- initial_metadata_done_ = true;
- }
- void OnDone(const Status& s) override {
- EXPECT_TRUE(initial_metadata_done_);
- EXPECT_EQ(0u, cli_ctx_.GetServerTrailingMetadata().size());
- EXPECT_TRUE(s.ok());
- EchoResponse response;
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buf_, &response));
- EXPECT_EQ(request_.message(), response.message());
- std::unique_lock<std::mutex> l(mu_);
- done_ = true;
- cv_.notify_one();
- }
- void Await() {
- std::unique_lock<std::mutex> l(mu_);
- while (!done_) {
- cv_.wait(l);
- }
- }
-
- private:
- EchoRequest request_;
- std::unique_ptr<ByteBuffer> send_buf_;
- ByteBuffer recv_buf_;
- ClientContext cli_ctx_;
- std::mutex mu_;
- std::condition_variable cv_;
- bool done_{false};
- bool initial_metadata_done_{false};
- };
-
- UnaryClient test{generic_stub_.get(), kMethodName};
- test.Await();
- // Make sure that the server interceptors were not notified of a cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
+TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ const TString kMethodName("/grpc.testing.EchoTestService/Echo");
+ class UnaryClient : public grpc::experimental::ClientUnaryReactor {
+ public:
+ UnaryClient(grpc::GenericStub* stub, const TString& method_name) {
+ cli_ctx_.AddMetadata("key1", "val1");
+ cli_ctx_.AddMetadata("key2", "val2");
+ request_.mutable_param()->set_echo_metadata_initially(true);
+ request_.set_message("Hello metadata");
+ send_buf_ = SerializeToByteBuffer(&request_);
+
+ stub->experimental().PrepareUnaryCall(&cli_ctx_, method_name,
+ send_buf_.get(), &recv_buf_, this);
+ StartCall();
+ }
+ void OnReadInitialMetadataDone(bool ok) override {
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key1"));
+ EXPECT_EQ(
+ "val1",
+ ToString(cli_ctx_.GetServerInitialMetadata().find("key1")->second));
+ EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key2"));
+ EXPECT_EQ(
+ "val2",
+ ToString(cli_ctx_.GetServerInitialMetadata().find("key2")->second));
+ initial_metadata_done_ = true;
+ }
+ void OnDone(const Status& s) override {
+ EXPECT_TRUE(initial_metadata_done_);
+ EXPECT_EQ(0u, cli_ctx_.GetServerTrailingMetadata().size());
+ EXPECT_TRUE(s.ok());
+ EchoResponse response;
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buf_, &response));
+ EXPECT_EQ(request_.message(), response.message());
+ std::unique_lock<std::mutex> l(mu_);
+ done_ = true;
+ cv_.notify_one();
+ }
+ void Await() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!done_) {
+ cv_.wait(l);
+ }
+ }
+
+ private:
+ EchoRequest request_;
+ std::unique_ptr<ByteBuffer> send_buf_;
+ ByteBuffer recv_buf_;
+ ClientContext cli_ctx_;
+ std::mutex mu_;
+ std::condition_variable cv_;
+ bool done_{false};
+ bool initial_metadata_done_{false};
+ };
+
+ UnaryClient test{generic_stub_.get(), kMethodName};
+ test.Await();
+ // Make sure that the server interceptors were not notified of a cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> {
public:
ReadClient(grpc::testing::EchoTestService::Stub* stub,
@@ -935,7 +935,7 @@ class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> {
if (server_try_cancel_ != DO_NOT_CANCEL) {
// Send server_try_cancel value in the client metadata
context_.AddMetadata(kServerTryCancelRequest,
- ToString(server_try_cancel));
+ ToString(server_try_cancel));
}
request_.set_message("Hello client ");
stub->experimental_async()->ResponseStream(&context_, &request_, this);
@@ -956,7 +956,7 @@ class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> {
} else {
EXPECT_LE(reads_complete_, kServerDefaultResponseStreamsToSend);
EXPECT_EQ(response_.message(),
- request_.message() + ToString(reads_complete_));
+ request_.message() + ToString(reads_complete_));
reads_complete_++;
if (client_cancel_.cancel &&
reads_complete_ == client_cancel_.ops_before_cancel) {
@@ -1088,20 +1088,20 @@ class BidiClient
public:
BidiClient(grpc::testing::EchoTestService::Stub* stub,
ServerTryCancelRequestPhase server_try_cancel,
- int num_msgs_to_send, bool cork_metadata, bool first_write_async,
- ClientCancelInfo client_cancel = {})
+ int num_msgs_to_send, bool cork_metadata, bool first_write_async,
+ ClientCancelInfo client_cancel = {})
: server_try_cancel_(server_try_cancel),
msgs_to_send_{num_msgs_to_send},
client_cancel_{client_cancel} {
if (server_try_cancel_ != DO_NOT_CANCEL) {
// Send server_try_cancel value in the client metadata
context_.AddMetadata(kServerTryCancelRequest,
- ToString(server_try_cancel));
+ ToString(server_try_cancel));
}
request_.set_message("Hello fren ");
- context_.set_initial_metadata_corked(cork_metadata);
+ context_.set_initial_metadata_corked(cork_metadata);
stub->experimental_async()->BidiStream(&context_, this);
- MaybeAsyncWrite(first_write_async);
+ MaybeAsyncWrite(first_write_async);
StartRead(&response_);
StartCall();
}
@@ -1122,10 +1122,10 @@ class BidiClient
}
}
void OnWriteDone(bool ok) override {
- if (async_write_thread_.joinable()) {
- async_write_thread_.join();
- RemoveHold();
- }
+ if (async_write_thread_.joinable()) {
+ async_write_thread_.join();
+ RemoveHold();
+ }
if (server_try_cancel_ == DO_NOT_CANCEL) {
EXPECT_TRUE(ok);
} else if (!ok) {
@@ -1190,26 +1190,26 @@ class BidiClient
}
private:
- void MaybeAsyncWrite(bool first_write_async) {
- if (first_write_async) {
- // Make sure that we have a write to issue.
- // TODO(vjpai): Make this work with 0 writes case as well.
- assert(msgs_to_send_ >= 1);
-
- AddHold();
- async_write_thread_ = std::thread([this] {
- std::unique_lock<std::mutex> lock(async_write_thread_mu_);
- async_write_thread_cv_.wait(
- lock, [this] { return async_write_thread_start_; });
- MaybeWrite();
- });
- std::lock_guard<std::mutex> lock(async_write_thread_mu_);
- async_write_thread_start_ = true;
- async_write_thread_cv_.notify_one();
- return;
- }
- MaybeWrite();
- }
+ void MaybeAsyncWrite(bool first_write_async) {
+ if (first_write_async) {
+ // Make sure that we have a write to issue.
+ // TODO(vjpai): Make this work with 0 writes case as well.
+ assert(msgs_to_send_ >= 1);
+
+ AddHold();
+ async_write_thread_ = std::thread([this] {
+ std::unique_lock<std::mutex> lock(async_write_thread_mu_);
+ async_write_thread_cv_.wait(
+ lock, [this] { return async_write_thread_start_; });
+ MaybeWrite();
+ });
+ std::lock_guard<std::mutex> lock(async_write_thread_mu_);
+ async_write_thread_start_ = true;
+ async_write_thread_cv_.notify_one();
+ return;
+ }
+ MaybeWrite();
+ }
void MaybeWrite() {
if (client_cancel_.cancel &&
writes_complete_ == client_cancel_.ops_before_cancel) {
@@ -1231,18 +1231,57 @@ class BidiClient
std::mutex mu_;
std::condition_variable cv_;
bool done_ = false;
- std::thread async_write_thread_;
- bool async_write_thread_start_ = false;
- std::mutex async_write_thread_mu_;
- std::condition_variable async_write_thread_cv_;
+ std::thread async_write_thread_;
+ bool async_write_thread_start_ = false;
+ std::mutex async_write_thread_mu_;
+ std::condition_variable async_write_thread_cv_;
};
TEST_P(ClientCallbackEnd2endTest, BidiStream) {
MAYBE_SKIP_TEST;
ResetStub();
- BidiClient test(stub_.get(), DO_NOT_CANCEL,
- kServerDefaultResponseStreamsToSend,
- /*cork_metadata=*/false, /*first_write_async=*/false);
+ BidiClient test(stub_.get(), DO_NOT_CANCEL,
+ kServerDefaultResponseStreamsToSend,
+ /*cork_metadata=*/false, /*first_write_async=*/false);
+ test.Await();
+ // Make sure that the server interceptors were not notified of a cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ BidiClient test(stub_.get(), DO_NOT_CANCEL,
+ kServerDefaultResponseStreamsToSend,
+ /*cork_metadata=*/false, /*first_write_async=*/true);
+ test.Await();
+ // Make sure that the server interceptors were not notified of a cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ BidiClient test(stub_.get(), DO_NOT_CANCEL,
+ kServerDefaultResponseStreamsToSend,
+ /*cork_metadata=*/true, /*first_write_async=*/false);
+ test.Await();
+ // Make sure that the server interceptors were not notified of a cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ BidiClient test(stub_.get(), DO_NOT_CANCEL,
+ kServerDefaultResponseStreamsToSend,
+ /*cork_metadata=*/true, /*first_write_async=*/true);
test.Await();
// Make sure that the server interceptors were not notified of a cancel
if (GetParam().use_interceptors) {
@@ -1250,52 +1289,13 @@ TEST_P(ClientCallbackEnd2endTest, BidiStream) {
}
}
-TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) {
- MAYBE_SKIP_TEST;
- ResetStub();
- BidiClient test(stub_.get(), DO_NOT_CANCEL,
- kServerDefaultResponseStreamsToSend,
- /*cork_metadata=*/false, /*first_write_async=*/true);
- test.Await();
- // Make sure that the server interceptors were not notified of a cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) {
- MAYBE_SKIP_TEST;
- ResetStub();
- BidiClient test(stub_.get(), DO_NOT_CANCEL,
- kServerDefaultResponseStreamsToSend,
- /*cork_metadata=*/true, /*first_write_async=*/false);
- test.Await();
- // Make sure that the server interceptors were not notified of a cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
- MAYBE_SKIP_TEST;
- ResetStub();
- BidiClient test(stub_.get(), DO_NOT_CANCEL,
- kServerDefaultResponseStreamsToSend,
- /*cork_metadata=*/true, /*first_write_async=*/true);
- test.Await();
- // Make sure that the server interceptors were not notified of a cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
MAYBE_SKIP_TEST;
ResetStub();
- BidiClient test(stub_.get(), DO_NOT_CANCEL,
- kServerDefaultResponseStreamsToSend,
- /*cork_metadata=*/false, /*first_write_async=*/false,
- ClientCancelInfo(2));
+ BidiClient test(stub_.get(), DO_NOT_CANCEL,
+ kServerDefaultResponseStreamsToSend,
+ /*cork_metadata=*/false, /*first_write_async=*/false,
+ ClientCancelInfo(2));
test.Await();
// Make sure that the server interceptors were notified of a cancel
if (GetParam().use_interceptors) {
@@ -1307,8 +1307,8 @@ TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
MAYBE_SKIP_TEST;
ResetStub();
- BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2,
- /*cork_metadata=*/false, /*first_write_async=*/false);
+ BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2,
+ /*cork_metadata=*/false, /*first_write_async=*/false);
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
@@ -1321,9 +1321,9 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
MAYBE_SKIP_TEST;
ResetStub();
- BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING,
- /*num_msgs_to_send=*/10, /*cork_metadata=*/false,
- /*first_write_async=*/false);
+ BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING,
+ /*num_msgs_to_send=*/10, /*cork_metadata=*/false,
+ /*first_write_async=*/false);
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
@@ -1336,8 +1336,8 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) {
MAYBE_SKIP_TEST;
ResetStub();
- BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5,
- /*cork_metadata=*/false, /*first_write_async=*/false);
+ BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5,
+ /*cork_metadata=*/false, /*first_write_async=*/false);
test.Await();
// Make sure that the server interceptors were notified
if (GetParam().use_interceptors) {
@@ -1452,12 +1452,12 @@ TEST_P(ClientCallbackEnd2endTest,
done_cv_.wait(l);
}
}
- // RemoveHold under the same lock used for OnDone to make sure that we don't
- // call OnDone directly or indirectly from the RemoveHold function.
- void RemoveHoldUnderLock() {
- std::unique_lock<std::mutex> l(mu_);
- RemoveHold();
- }
+ // RemoveHold under the same lock used for OnDone to make sure that we don't
+ // call OnDone directly or indirectly from the RemoveHold function.
+ void RemoveHoldUnderLock() {
+ std::unique_lock<std::mutex> l(mu_);
+ RemoveHold();
+ }
const Status& status() {
std::unique_lock<std::mutex> l(mu_);
return status_;
@@ -1502,7 +1502,7 @@ TEST_P(ClientCallbackEnd2endTest,
++reads_complete;
}
}
- client.RemoveHoldUnderLock();
+ client.RemoveHoldUnderLock();
client.Await();
EXPECT_EQ(kServerDefaultResponseStreamsToSend, reads_complete);
@@ -1516,7 +1516,7 @@ std::vector<TestScenario> CreateTestScenarios(bool test_insecure) {
#endif
std::vector<TestScenario> scenarios;
- std::vector<TString> credentials_types{
+ std::vector<TString> credentials_types{
GetCredentialsProvider()->GetSecureCredentialsTypeList()};
auto insec_ok = [] {
// Only allow insecure credentials type when it is registered with the
@@ -1556,8 +1556,8 @@ INSTANTIATE_TEST_SUITE_P(ClientCallbackEnd2endTest, ClientCallbackEnd2endTest,
} // namespace grpc
int main(int argc, char** argv) {
- ::testing::InitGoogleTest(&argc, argv);
- grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ grpc::testing::TestEnvironment env(argc, argv);
grpc_init();
int ret = RUN_ALL_TESTS();
grpc_shutdown();
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
index 429a4283bc..80e1869396 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
@@ -38,7 +38,7 @@ using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
using std::chrono::system_clock;
-static TString g_root;
+static TString g_root;
namespace grpc {
namespace testing {
@@ -127,9 +127,9 @@ TEST_F(CrashTest, KillAfterWrite) {
} // namespace grpc
int main(int argc, char** argv) {
- TString me = argv[0];
+ TString me = argv[0];
auto lslash = me.rfind('/');
- if (lslash != TString::npos) {
+ if (lslash != TString::npos) {
g_root = me.substr(0, lslash);
} else {
g_root = ".";
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc b/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
index 2962bd63da..2d5be420f2 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
@@ -19,7 +19,7 @@
#include <gflags/gflags.h>
#include <iostream>
#include <memory>
-#include <util/generic/string.h>
+#include <util/generic/string.h>
#include <grpc/support/log.h>
#include <grpcpp/server.h>
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
index 99bb3fb6d9..956876d9f6 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
@@ -43,17 +43,17 @@ namespace grpc {
namespace testing {
namespace {
-enum class RPCType {
- kSyncUnary,
- kSyncClientStreaming,
- kSyncServerStreaming,
- kSyncBidiStreaming,
- kAsyncCQUnary,
- kAsyncCQClientStreaming,
- kAsyncCQServerStreaming,
- kAsyncCQBidiStreaming,
-};
-
+enum class RPCType {
+ kSyncUnary,
+ kSyncClientStreaming,
+ kSyncServerStreaming,
+ kSyncBidiStreaming,
+ kAsyncCQUnary,
+ kAsyncCQClientStreaming,
+ kAsyncCQServerStreaming,
+ kAsyncCQBidiStreaming,
+};
+
/* Hijacks Echo RPC and fills in the expected values */
class HijackingInterceptor : public experimental::Interceptor {
public:
@@ -267,7 +267,7 @@ class HijackingInterceptorMakesAnotherCall : public experimental::Interceptor {
private:
experimental::ClientRpcInfo* info_;
- std::multimap<TString, TString> metadata_map_;
+ std::multimap<TString, TString> metadata_map_;
ClientContext ctx_;
EchoRequest req_;
EchoResponse resp_;
@@ -349,7 +349,7 @@ class BidiStreamingRpcHijackingInterceptor : public experimental::Interceptor {
private:
experimental::ClientRpcInfo* info_;
- TString msg;
+ TString msg;
};
class ClientStreamingRpcHijackingInterceptor
@@ -411,7 +411,7 @@ class ServerStreamingRpcHijackingInterceptor
public:
ServerStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
info_ = info;
- got_failed_message_ = false;
+ got_failed_message_ = false;
}
virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
@@ -543,22 +543,22 @@ class LoggingInterceptor : public experimental::Interceptor {
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
EchoRequest req;
- auto* send_msg = methods->GetSendMessage();
- if (send_msg == nullptr) {
- // We did not get the non-serialized form of the message. Get the
- // serialized form.
- auto* buffer = methods->GetSerializedSendMessage();
- auto copied_buffer = *buffer;
- EchoRequest req;
- EXPECT_TRUE(
- SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
- .ok());
- EXPECT_EQ(req.message(), "Hello");
- } else {
- EXPECT_EQ(
- static_cast<const EchoRequest*>(send_msg)->message().find("Hello"),
- 0u);
- }
+ auto* send_msg = methods->GetSendMessage();
+ if (send_msg == nullptr) {
+ // We did not get the non-serialized form of the message. Get the
+ // serialized form.
+ auto* buffer = methods->GetSerializedSendMessage();
+ auto copied_buffer = *buffer;
+ EchoRequest req;
+ EXPECT_TRUE(
+ SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
+ .ok());
+ EXPECT_EQ(req.message(), "Hello");
+ } else {
+ EXPECT_EQ(
+ static_cast<const EchoRequest*>(send_msg)->message().find("Hello"),
+ 0u);
+ }
auto* buffer = methods->GetSerializedSendMessage();
auto copied_buffer = *buffer;
EXPECT_TRUE(
@@ -606,27 +606,27 @@ class LoggingInterceptor : public experimental::Interceptor {
methods->Proceed();
}
- static void VerifyCall(RPCType type) {
- switch (type) {
- case RPCType::kSyncUnary:
- case RPCType::kAsyncCQUnary:
- VerifyUnaryCall();
- break;
- case RPCType::kSyncClientStreaming:
- case RPCType::kAsyncCQClientStreaming:
- VerifyClientStreamingCall();
- break;
- case RPCType::kSyncServerStreaming:
- case RPCType::kAsyncCQServerStreaming:
- VerifyServerStreamingCall();
- break;
- case RPCType::kSyncBidiStreaming:
- case RPCType::kAsyncCQBidiStreaming:
- VerifyBidiStreamingCall();
- break;
- }
- }
-
+ static void VerifyCall(RPCType type) {
+ switch (type) {
+ case RPCType::kSyncUnary:
+ case RPCType::kAsyncCQUnary:
+ VerifyUnaryCall();
+ break;
+ case RPCType::kSyncClientStreaming:
+ case RPCType::kAsyncCQClientStreaming:
+ VerifyClientStreamingCall();
+ break;
+ case RPCType::kSyncServerStreaming:
+ case RPCType::kAsyncCQServerStreaming:
+ VerifyServerStreamingCall();
+ break;
+ case RPCType::kSyncBidiStreaming:
+ case RPCType::kAsyncCQBidiStreaming:
+ VerifyBidiStreamingCall();
+ break;
+ }
+ }
+
static void VerifyCallCommon() {
EXPECT_TRUE(pre_send_initial_metadata_);
EXPECT_TRUE(pre_send_close_);
@@ -683,78 +683,78 @@ class LoggingInterceptorFactory
}
};
-class TestScenario {
- public:
- explicit TestScenario(const RPCType& type) : type_(type) {}
-
- RPCType type() const { return type_; }
-
- private:
- RPCType type_;
-};
-
-std::vector<TestScenario> CreateTestScenarios() {
- std::vector<TestScenario> scenarios;
- scenarios.emplace_back(RPCType::kSyncUnary);
- scenarios.emplace_back(RPCType::kSyncClientStreaming);
- scenarios.emplace_back(RPCType::kSyncServerStreaming);
- scenarios.emplace_back(RPCType::kSyncBidiStreaming);
- scenarios.emplace_back(RPCType::kAsyncCQUnary);
- scenarios.emplace_back(RPCType::kAsyncCQServerStreaming);
- return scenarios;
-}
-
-class ParameterizedClientInterceptorsEnd2endTest
- : public ::testing::TestWithParam<TestScenario> {
+class TestScenario {
+ public:
+ explicit TestScenario(const RPCType& type) : type_(type) {}
+
+ RPCType type() const { return type_; }
+
+ private:
+ RPCType type_;
+};
+
+std::vector<TestScenario> CreateTestScenarios() {
+ std::vector<TestScenario> scenarios;
+ scenarios.emplace_back(RPCType::kSyncUnary);
+ scenarios.emplace_back(RPCType::kSyncClientStreaming);
+ scenarios.emplace_back(RPCType::kSyncServerStreaming);
+ scenarios.emplace_back(RPCType::kSyncBidiStreaming);
+ scenarios.emplace_back(RPCType::kAsyncCQUnary);
+ scenarios.emplace_back(RPCType::kAsyncCQServerStreaming);
+ return scenarios;
+}
+
+class ParameterizedClientInterceptorsEnd2endTest
+ : public ::testing::TestWithParam<TestScenario> {
protected:
- ParameterizedClientInterceptorsEnd2endTest() {
+ ParameterizedClientInterceptorsEnd2endTest() {
int port = grpc_pick_unused_port_or_die();
ServerBuilder builder;
- server_address_ = "localhost:" + ToString(port);
+ server_address_ = "localhost:" + ToString(port);
builder.AddListeningPort(server_address_, InsecureServerCredentials());
builder.RegisterService(&service_);
server_ = builder.BuildAndStart();
}
- ~ParameterizedClientInterceptorsEnd2endTest() { server_->Shutdown(); }
-
- void SendRPC(const std::shared_ptr<Channel>& channel) {
- switch (GetParam().type()) {
- case RPCType::kSyncUnary:
- MakeCall(channel);
- break;
- case RPCType::kSyncClientStreaming:
- MakeClientStreamingCall(channel);
- break;
- case RPCType::kSyncServerStreaming:
- MakeServerStreamingCall(channel);
- break;
- case RPCType::kSyncBidiStreaming:
- MakeBidiStreamingCall(channel);
- break;
- case RPCType::kAsyncCQUnary:
- MakeAsyncCQCall(channel);
- break;
- case RPCType::kAsyncCQClientStreaming:
- // TODO(yashykt) : Fill this out
- break;
- case RPCType::kAsyncCQServerStreaming:
- MakeAsyncCQServerStreamingCall(channel);
- break;
- case RPCType::kAsyncCQBidiStreaming:
- // TODO(yashykt) : Fill this out
- break;
- }
- }
-
- TString server_address_;
- EchoTestServiceStreamingImpl service_;
+ ~ParameterizedClientInterceptorsEnd2endTest() { server_->Shutdown(); }
+
+ void SendRPC(const std::shared_ptr<Channel>& channel) {
+ switch (GetParam().type()) {
+ case RPCType::kSyncUnary:
+ MakeCall(channel);
+ break;
+ case RPCType::kSyncClientStreaming:
+ MakeClientStreamingCall(channel);
+ break;
+ case RPCType::kSyncServerStreaming:
+ MakeServerStreamingCall(channel);
+ break;
+ case RPCType::kSyncBidiStreaming:
+ MakeBidiStreamingCall(channel);
+ break;
+ case RPCType::kAsyncCQUnary:
+ MakeAsyncCQCall(channel);
+ break;
+ case RPCType::kAsyncCQClientStreaming:
+ // TODO(yashykt) : Fill this out
+ break;
+ case RPCType::kAsyncCQServerStreaming:
+ MakeAsyncCQServerStreamingCall(channel);
+ break;
+ case RPCType::kAsyncCQBidiStreaming:
+ // TODO(yashykt) : Fill this out
+ break;
+ }
+ }
+
+ TString server_address_;
+ EchoTestServiceStreamingImpl service_;
std::unique_ptr<Server> server_;
};
-TEST_P(ParameterizedClientInterceptorsEnd2endTest,
- ClientInterceptorLoggingTest) {
+TEST_P(ParameterizedClientInterceptorsEnd2endTest,
+ ClientInterceptorLoggingTest) {
ChannelArguments args;
DummyInterceptor::Reset();
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
@@ -768,36 +768,36 @@ TEST_P(ParameterizedClientInterceptorsEnd2endTest,
}
auto channel = experimental::CreateCustomChannelWithInterceptors(
server_address_, InsecureChannelCredentials(), args, std::move(creators));
- SendRPC(channel);
- LoggingInterceptor::VerifyCall(GetParam().type());
+ SendRPC(channel);
+ LoggingInterceptor::VerifyCall(GetParam().type());
// Make sure all 20 dummy interceptors were run
EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
}
-INSTANTIATE_TEST_SUITE_P(ParameterizedClientInterceptorsEnd2end,
- ParameterizedClientInterceptorsEnd2endTest,
- ::testing::ValuesIn(CreateTestScenarios()));
-
-class ClientInterceptorsEnd2endTest
- : public ::testing::TestWithParam<TestScenario> {
- protected:
- ClientInterceptorsEnd2endTest() {
- int port = grpc_pick_unused_port_or_die();
-
- ServerBuilder builder;
- server_address_ = "localhost:" + ToString(port);
- builder.AddListeningPort(server_address_, InsecureServerCredentials());
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- }
-
- ~ClientInterceptorsEnd2endTest() { server_->Shutdown(); }
-
- TString server_address_;
- TestServiceImpl service_;
- std::unique_ptr<Server> server_;
-};
-
+INSTANTIATE_TEST_SUITE_P(ParameterizedClientInterceptorsEnd2end,
+ ParameterizedClientInterceptorsEnd2endTest,
+ ::testing::ValuesIn(CreateTestScenarios()));
+
+class ClientInterceptorsEnd2endTest
+ : public ::testing::TestWithParam<TestScenario> {
+ protected:
+ ClientInterceptorsEnd2endTest() {
+ int port = grpc_pick_unused_port_or_die();
+
+ ServerBuilder builder;
+ server_address_ = "localhost:" + ToString(port);
+ builder.AddListeningPort(server_address_, InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ ~ClientInterceptorsEnd2endTest() { server_->Shutdown(); }
+
+ TString server_address_;
+ TestServiceImpl service_;
+ std::unique_ptr<Server> server_;
+};
+
TEST_F(ClientInterceptorsEnd2endTest,
LameChannelClientInterceptorHijackingTest) {
ChannelArguments args;
@@ -878,26 +878,26 @@ TEST_F(ClientInterceptorsEnd2endTest,
EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 12);
}
-class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test {
- protected:
- ClientInterceptorsCallbackEnd2endTest() {
- int port = grpc_pick_unused_port_or_die();
-
- ServerBuilder builder;
- server_address_ = "localhost:" + ToString(port);
- builder.AddListeningPort(server_address_, InsecureServerCredentials());
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- }
-
- ~ClientInterceptorsCallbackEnd2endTest() { server_->Shutdown(); }
-
- TString server_address_;
- TestServiceImpl service_;
- std::unique_ptr<Server> server_;
-};
-
-TEST_F(ClientInterceptorsCallbackEnd2endTest,
+class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test {
+ protected:
+ ClientInterceptorsCallbackEnd2endTest() {
+ int port = grpc_pick_unused_port_or_die();
+
+ ServerBuilder builder;
+ server_address_ = "localhost:" + ToString(port);
+ builder.AddListeningPort(server_address_, InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ ~ClientInterceptorsCallbackEnd2endTest() { server_->Shutdown(); }
+
+ TString server_address_;
+ TestServiceImpl service_;
+ std::unique_ptr<Server> server_;
+};
+
+TEST_F(ClientInterceptorsCallbackEnd2endTest,
ClientInterceptorLoggingTestWithCallback) {
ChannelArguments args;
DummyInterceptor::Reset();
@@ -918,7 +918,7 @@ TEST_F(ClientInterceptorsCallbackEnd2endTest,
EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
}
-TEST_F(ClientInterceptorsCallbackEnd2endTest,
+TEST_F(ClientInterceptorsCallbackEnd2endTest,
ClientInterceptorFactoryAllowsNullptrReturn) {
ChannelArguments args;
DummyInterceptor::Reset();
@@ -947,7 +947,7 @@ class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test {
int port = grpc_pick_unused_port_or_die();
ServerBuilder builder;
- server_address_ = "localhost:" + ToString(port);
+ server_address_ = "localhost:" + ToString(port);
builder.AddListeningPort(server_address_, InsecureServerCredentials());
builder.RegisterService(&service_);
server_ = builder.BuildAndStart();
@@ -955,7 +955,7 @@ class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test {
~ClientInterceptorsStreamingEnd2endTest() { server_->Shutdown(); }
- TString server_address_;
+ TString server_address_;
EchoTestServiceStreamingImpl service_;
std::unique_ptr<Server> server_;
};
@@ -1043,21 +1043,21 @@ TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingHijackingTest) {
EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage());
}
-TEST_F(ClientInterceptorsStreamingEnd2endTest,
- AsyncCQServerStreamingHijackingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(
- std::unique_ptr<ServerStreamingRpcHijackingInterceptorFactory>(
- new ServerStreamingRpcHijackingInterceptorFactory()));
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeAsyncCQServerStreamingCall(channel);
- EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage());
-}
-
+TEST_F(ClientInterceptorsStreamingEnd2endTest,
+ AsyncCQServerStreamingHijackingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(
+ std::unique_ptr<ServerStreamingRpcHijackingInterceptorFactory>(
+ new ServerStreamingRpcHijackingInterceptorFactory()));
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeAsyncCQServerStreamingCall(channel);
+ EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage());
+}
+
TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingHijackingTest) {
ChannelArguments args;
DummyInterceptor::Reset();
@@ -1097,7 +1097,7 @@ class ClientGlobalInterceptorEnd2endTest : public ::testing::Test {
int port = grpc_pick_unused_port_or_die();
ServerBuilder builder;
- server_address_ = "localhost:" + ToString(port);
+ server_address_ = "localhost:" + ToString(port);
builder.AddListeningPort(server_address_, InsecureServerCredentials());
builder.RegisterService(&service_);
server_ = builder.BuildAndStart();
@@ -1105,7 +1105,7 @@ class ClientGlobalInterceptorEnd2endTest : public ::testing::Test {
~ClientGlobalInterceptorEnd2endTest() { server_->Shutdown(); }
- TString server_address_;
+ TString server_address_;
TestServiceImpl service_;
std::unique_ptr<Server> server_;
};
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
index e33f45920b..fd08dd163d 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
@@ -21,11 +21,11 @@
#include <mutex>
#include <random>
#include <set>
-#include <util/generic/string.h>
+#include <util/generic/string.h>
#include <thread>
-#include "y_absl/strings/str_cat.h"
-
+#include "y_absl/strings/str_cat.h"
+
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
@@ -49,7 +49,7 @@
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
-#include "src/core/lib/iomgr/parse_address.h"
+#include "src/core/lib/iomgr/parse_address.h"
#include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/cpp/client/secure_credentials.h"
@@ -127,7 +127,7 @@ class MyTestServiceImpl : public TestServiceImpl {
request_count_ = 0;
}
- std::set<TString> clients() {
+ std::set<TString> clients() {
grpc::internal::MutexLock lock(&clients_mu_);
return clients_;
}
@@ -138,7 +138,7 @@ class MyTestServiceImpl : public TestServiceImpl {
}
private:
- void AddClient(const TString& client) {
+ void AddClient(const TString& client) {
grpc::internal::MutexLock lock(&clients_mu_);
clients_.insert(client);
}
@@ -147,7 +147,7 @@ class MyTestServiceImpl : public TestServiceImpl {
int request_count_ = 0;
const udpa::data::orca::v1::OrcaLoadReport* load_report_ = nullptr;
grpc::internal::Mutex clients_mu_;
- std::set<TString> clients_;
+ std::set<TString> clients_;
};
class FakeResolverResponseGeneratorWrapper {
@@ -157,18 +157,18 @@ class FakeResolverResponseGeneratorWrapper {
grpc_core::FakeResolverResponseGenerator>()) {}
FakeResolverResponseGeneratorWrapper(
- FakeResolverResponseGeneratorWrapper&& other) noexcept {
+ FakeResolverResponseGeneratorWrapper&& other) noexcept {
response_generator_ = std::move(other.response_generator_);
}
- void SetNextResolution(
- const std::vector<int>& ports, const char* service_config_json = nullptr,
- const char* attribute_key = nullptr,
- std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute =
- nullptr) {
+ void SetNextResolution(
+ const std::vector<int>& ports, const char* service_config_json = nullptr,
+ const char* attribute_key = nullptr,
+ std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute =
+ nullptr) {
grpc_core::ExecCtx exec_ctx;
- response_generator_->SetResponse(BuildFakeResults(
- ports, service_config_json, attribute_key, std::move(attribute)));
+ response_generator_->SetResponse(BuildFakeResults(
+ ports, service_config_json, attribute_key, std::move(attribute)));
}
void SetNextResolutionUponError(const std::vector<int>& ports) {
@@ -187,30 +187,30 @@ class FakeResolverResponseGeneratorWrapper {
private:
static grpc_core::Resolver::Result BuildFakeResults(
- const std::vector<int>& ports, const char* service_config_json = nullptr,
- const char* attribute_key = nullptr,
- std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute =
- nullptr) {
+ const std::vector<int>& ports, const char* service_config_json = nullptr,
+ const char* attribute_key = nullptr,
+ std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute =
+ nullptr) {
grpc_core::Resolver::Result result;
for (const int& port : ports) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
+ TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
+ grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
GPR_ASSERT(lb_uri != nullptr);
grpc_resolved_address address;
GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
- std::map<const char*,
- std::unique_ptr<grpc_core::ServerAddress::AttributeInterface>>
- attributes;
- if (attribute != nullptr) {
- attributes[attribute_key] = attribute->Copy();
- }
+ std::map<const char*,
+ std::unique_ptr<grpc_core::ServerAddress::AttributeInterface>>
+ attributes;
+ if (attribute != nullptr) {
+ attributes[attribute_key] = attribute->Copy();
+ }
result.addresses.emplace_back(address.addr, address.len,
- nullptr /* args */, std::move(attributes));
+ nullptr /* args */, std::move(attributes));
grpc_uri_destroy(lb_uri);
}
if (service_config_json != nullptr) {
result.service_config = grpc_core::ServiceConfig::Create(
- nullptr, service_config_json, &result.service_config_error);
+ nullptr, service_config_json, &result.service_config_error);
GPR_ASSERT(result.service_config != nullptr);
}
return result;
@@ -287,7 +287,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
}
std::shared_ptr<Channel> BuildChannel(
- const TString& lb_policy_name,
+ const TString& lb_policy_name,
const FakeResolverResponseGeneratorWrapper& response_generator,
ChannelArguments args = ChannelArguments()) {
if (lb_policy_name.size() > 0) {
@@ -306,13 +306,13 @@ class ClientLbEnd2endTest : public ::testing::Test {
if (local_response) response = new EchoResponse;
EchoRequest request;
request.set_message(kRequestMessage_);
- request.mutable_param()->set_echo_metadata(true);
+ request.mutable_param()->set_echo_metadata(true);
ClientContext context;
context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
if (wait_for_ready) context.set_wait_for_ready(true);
- context.AddMetadata("foo", "1");
- context.AddMetadata("bar", "2");
- context.AddMetadata("baz", "3");
+ context.AddMetadata("foo", "1");
+ context.AddMetadata("bar", "2");
+ context.AddMetadata("baz", "3");
Status status = stub->Echo(&context, request, response);
if (result != nullptr) *result = status;
if (local_response) delete response;
@@ -353,7 +353,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
port_ = port > 0 ? port : 5100; // grpc_pick_unused_port_or_die();
}
- void Start(const TString& server_host) {
+ void Start(const TString& server_host) {
gpr_log(GPR_INFO, "starting server on port %d", port_);
started_ = true;
grpc::internal::Mutex mu;
@@ -366,7 +366,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
gpr_log(GPR_INFO, "server startup complete");
}
- void Serve(const TString& server_host, grpc::internal::Mutex* mu,
+ void Serve(const TString& server_host, grpc::internal::Mutex* mu,
grpc::internal::CondVar* cond) {
std::ostringstream server_address;
server_address << server_host << ":" << port_;
@@ -388,7 +388,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
started_ = false;
}
- void SetServingStatus(const TString& service, bool serving) {
+ void SetServingStatus(const TString& service, bool serving) {
server_->GetHealthCheckService()->SetServingStatus(service, serving);
}
};
@@ -463,9 +463,9 @@ class ClientLbEnd2endTest : public ::testing::Test {
}
}
- const TString server_host_;
+ const TString server_host_;
std::vector<std::unique_ptr<ServerData>> servers_;
- const TString kRequestMessage_;
+ const TString kRequestMessage_;
std::shared_ptr<ChannelCredentials> creds_;
};
@@ -642,11 +642,11 @@ TEST_F(ClientLbEnd2endTest, PickFirstResetConnectionBackoff) {
channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
// Reset connection backoff.
experimental::ChannelResetConnectionBackoff(channel.get());
- // Wait for connect. Should happen as soon as the client connects to
- // the newly started server, which should be before the initial
- // backoff timeout elapses.
+ // Wait for connect. Should happen as soon as the client connects to
+ // the newly started server, which should be before the initial
+ // backoff timeout elapses.
EXPECT_TRUE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(20)));
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(20)));
const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
@@ -1650,96 +1650,96 @@ TEST_F(ClientLbEnd2endTest, ChannelIdleness) {
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
}
-class ClientLbPickArgsTest : public ClientLbEnd2endTest {
- protected:
- void SetUp() override {
- ClientLbEnd2endTest::SetUp();
- current_test_instance_ = this;
- }
-
- static void SetUpTestCase() {
- grpc_init();
- grpc_core::RegisterTestPickArgsLoadBalancingPolicy(SavePickArgs);
- }
-
- static void TearDownTestCase() { grpc_shutdown_blocking(); }
-
- const std::vector<grpc_core::PickArgsSeen>& args_seen_list() {
- grpc::internal::MutexLock lock(&mu_);
- return args_seen_list_;
- }
-
- private:
- static void SavePickArgs(const grpc_core::PickArgsSeen& args_seen) {
- ClientLbPickArgsTest* self = current_test_instance_;
- grpc::internal::MutexLock lock(&self->mu_);
- self->args_seen_list_.emplace_back(args_seen);
- }
-
- static ClientLbPickArgsTest* current_test_instance_;
- grpc::internal::Mutex mu_;
- std::vector<grpc_core::PickArgsSeen> args_seen_list_;
-};
-
-ClientLbPickArgsTest* ClientLbPickArgsTest::current_test_instance_ = nullptr;
-
-TEST_F(ClientLbPickArgsTest, Basic) {
- const int kNumServers = 1;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("test_pick_args_lb", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- CheckRpcSendOk(stub, DEBUG_LOCATION, /*wait_for_ready=*/true);
- // Check LB policy name for the channel.
- EXPECT_EQ("test_pick_args_lb", channel->GetLoadBalancingPolicyName());
- // There will be two entries, one for the pick tried in state
- // CONNECTING and another for the pick tried in state READY.
- EXPECT_THAT(args_seen_list(),
- ::testing::ElementsAre(
- ::testing::AllOf(
- ::testing::Field(&grpc_core::PickArgsSeen::path,
- "/grpc.testing.EchoTestService/Echo"),
- ::testing::Field(&grpc_core::PickArgsSeen::metadata,
- ::testing::UnorderedElementsAre(
- ::testing::Pair("foo", "1"),
- ::testing::Pair("bar", "2"),
- ::testing::Pair("baz", "3")))),
- ::testing::AllOf(
- ::testing::Field(&grpc_core::PickArgsSeen::path,
- "/grpc.testing.EchoTestService/Echo"),
- ::testing::Field(&grpc_core::PickArgsSeen::metadata,
- ::testing::UnorderedElementsAre(
- ::testing::Pair("foo", "1"),
- ::testing::Pair("bar", "2"),
- ::testing::Pair("baz", "3"))))));
-}
-
+class ClientLbPickArgsTest : public ClientLbEnd2endTest {
+ protected:
+ void SetUp() override {
+ ClientLbEnd2endTest::SetUp();
+ current_test_instance_ = this;
+ }
+
+ static void SetUpTestCase() {
+ grpc_init();
+ grpc_core::RegisterTestPickArgsLoadBalancingPolicy(SavePickArgs);
+ }
+
+ static void TearDownTestCase() { grpc_shutdown_blocking(); }
+
+ const std::vector<grpc_core::PickArgsSeen>& args_seen_list() {
+ grpc::internal::MutexLock lock(&mu_);
+ return args_seen_list_;
+ }
+
+ private:
+ static void SavePickArgs(const grpc_core::PickArgsSeen& args_seen) {
+ ClientLbPickArgsTest* self = current_test_instance_;
+ grpc::internal::MutexLock lock(&self->mu_);
+ self->args_seen_list_.emplace_back(args_seen);
+ }
+
+ static ClientLbPickArgsTest* current_test_instance_;
+ grpc::internal::Mutex mu_;
+ std::vector<grpc_core::PickArgsSeen> args_seen_list_;
+};
+
+ClientLbPickArgsTest* ClientLbPickArgsTest::current_test_instance_ = nullptr;
+
+TEST_F(ClientLbPickArgsTest, Basic) {
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("test_pick_args_lb", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ CheckRpcSendOk(stub, DEBUG_LOCATION, /*wait_for_ready=*/true);
+ // Check LB policy name for the channel.
+ EXPECT_EQ("test_pick_args_lb", channel->GetLoadBalancingPolicyName());
+ // There will be two entries, one for the pick tried in state
+ // CONNECTING and another for the pick tried in state READY.
+ EXPECT_THAT(args_seen_list(),
+ ::testing::ElementsAre(
+ ::testing::AllOf(
+ ::testing::Field(&grpc_core::PickArgsSeen::path,
+ "/grpc.testing.EchoTestService/Echo"),
+ ::testing::Field(&grpc_core::PickArgsSeen::metadata,
+ ::testing::UnorderedElementsAre(
+ ::testing::Pair("foo", "1"),
+ ::testing::Pair("bar", "2"),
+ ::testing::Pair("baz", "3")))),
+ ::testing::AllOf(
+ ::testing::Field(&grpc_core::PickArgsSeen::path,
+ "/grpc.testing.EchoTestService/Echo"),
+ ::testing::Field(&grpc_core::PickArgsSeen::metadata,
+ ::testing::UnorderedElementsAre(
+ ::testing::Pair("foo", "1"),
+ ::testing::Pair("bar", "2"),
+ ::testing::Pair("baz", "3"))))));
+}
+
class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
protected:
void SetUp() override {
ClientLbEnd2endTest::SetUp();
- current_test_instance_ = this;
+ current_test_instance_ = this;
}
- static void SetUpTestCase() {
- grpc_init();
- grpc_core::RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy(
- ReportTrailerIntercepted);
- }
-
- static void TearDownTestCase() { grpc_shutdown_blocking(); }
-
+ static void SetUpTestCase() {
+ grpc_init();
+ grpc_core::RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy(
+ ReportTrailerIntercepted);
+ }
+
+ static void TearDownTestCase() { grpc_shutdown_blocking(); }
+
int trailers_intercepted() {
grpc::internal::MutexLock lock(&mu_);
return trailers_intercepted_;
}
- const grpc_core::MetadataVector& trailing_metadata() {
- grpc::internal::MutexLock lock(&mu_);
- return trailing_metadata_;
- }
-
+ const grpc_core::MetadataVector& trailing_metadata() {
+ grpc::internal::MutexLock lock(&mu_);
+ return trailing_metadata_;
+ }
+
const udpa::data::orca::v1::OrcaLoadReport* backend_load_report() {
grpc::internal::MutexLock lock(&mu_);
return load_report_.get();
@@ -1747,12 +1747,12 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
private:
static void ReportTrailerIntercepted(
- const grpc_core::TrailingMetadataArgsSeen& args_seen) {
- const auto* backend_metric_data = args_seen.backend_metric_data;
- ClientLbInterceptTrailingMetadataTest* self = current_test_instance_;
+ const grpc_core::TrailingMetadataArgsSeen& args_seen) {
+ const auto* backend_metric_data = args_seen.backend_metric_data;
+ ClientLbInterceptTrailingMetadataTest* self = current_test_instance_;
grpc::internal::MutexLock lock(&self->mu_);
self->trailers_intercepted_++;
- self->trailing_metadata_ = args_seen.metadata;
+ self->trailing_metadata_ = args_seen.metadata;
if (backend_metric_data != nullptr) {
self->load_report_.reset(new udpa::data::orca::v1::OrcaLoadReport);
self->load_report_->set_cpu_utilization(
@@ -1761,28 +1761,28 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
backend_metric_data->mem_utilization);
self->load_report_->set_rps(backend_metric_data->requests_per_second);
for (const auto& p : backend_metric_data->request_cost) {
- TString name = TString(p.first);
- (*self->load_report_->mutable_request_cost())[std::move(name)] =
- p.second;
+ TString name = TString(p.first);
+ (*self->load_report_->mutable_request_cost())[std::move(name)] =
+ p.second;
}
for (const auto& p : backend_metric_data->utilization) {
- TString name = TString(p.first);
- (*self->load_report_->mutable_utilization())[std::move(name)] =
- p.second;
+ TString name = TString(p.first);
+ (*self->load_report_->mutable_utilization())[std::move(name)] =
+ p.second;
}
}
}
- static ClientLbInterceptTrailingMetadataTest* current_test_instance_;
+ static ClientLbInterceptTrailingMetadataTest* current_test_instance_;
grpc::internal::Mutex mu_;
int trailers_intercepted_ = 0;
- grpc_core::MetadataVector trailing_metadata_;
+ grpc_core::MetadataVector trailing_metadata_;
std::unique_ptr<udpa::data::orca::v1::OrcaLoadReport> load_report_;
};
-ClientLbInterceptTrailingMetadataTest*
- ClientLbInterceptTrailingMetadataTest::current_test_instance_ = nullptr;
-
+ClientLbInterceptTrailingMetadataTest*
+ ClientLbInterceptTrailingMetadataTest::current_test_instance_ = nullptr;
+
TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesDisabled) {
const int kNumServers = 1;
const int kNumRpcs = 10;
@@ -1799,13 +1799,13 @@ TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesDisabled) {
EXPECT_EQ("intercept_trailing_metadata_lb",
channel->GetLoadBalancingPolicyName());
EXPECT_EQ(kNumRpcs, trailers_intercepted());
- EXPECT_THAT(trailing_metadata(),
- ::testing::UnorderedElementsAre(
- // TODO(roth): Should grpc-status be visible here?
- ::testing::Pair("grpc-status", "0"),
- ::testing::Pair("user-agent", ::testing::_),
- ::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"),
- ::testing::Pair("baz", "3")));
+ EXPECT_THAT(trailing_metadata(),
+ ::testing::UnorderedElementsAre(
+ // TODO(roth): Should grpc-status be visible here?
+ ::testing::Pair("grpc-status", "0"),
+ ::testing::Pair("user-agent", ::testing::_),
+ ::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"),
+ ::testing::Pair("baz", "3")));
EXPECT_EQ(nullptr, backend_load_report());
}
@@ -1841,13 +1841,13 @@ TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesEnabled) {
EXPECT_EQ("intercept_trailing_metadata_lb",
channel->GetLoadBalancingPolicyName());
EXPECT_EQ(kNumRpcs, trailers_intercepted());
- EXPECT_THAT(trailing_metadata(),
- ::testing::UnorderedElementsAre(
- // TODO(roth): Should grpc-status be visible here?
- ::testing::Pair("grpc-status", "0"),
- ::testing::Pair("user-agent", ::testing::_),
- ::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"),
- ::testing::Pair("baz", "3")));
+ EXPECT_THAT(trailing_metadata(),
+ ::testing::UnorderedElementsAre(
+ // TODO(roth): Should grpc-status be visible here?
+ ::testing::Pair("grpc-status", "0"),
+ ::testing::Pair("user-agent", ::testing::_),
+ ::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"),
+ ::testing::Pair("baz", "3")));
EXPECT_EQ(nullptr, backend_load_report());
}
@@ -1901,83 +1901,83 @@ TEST_F(ClientLbInterceptTrailingMetadataTest, BackendMetricData) {
EXPECT_EQ(kNumRpcs, trailers_intercepted());
}
-class ClientLbAddressTest : public ClientLbEnd2endTest {
- protected:
- static const char* kAttributeKey;
-
- class Attribute : public grpc_core::ServerAddress::AttributeInterface {
- public:
- explicit Attribute(const TString& str) : str_(str) {}
-
- std::unique_ptr<AttributeInterface> Copy() const override {
- return y_absl::make_unique<Attribute>(str_);
- }
-
- int Cmp(const AttributeInterface* other) const override {
- return str_.compare(static_cast<const Attribute*>(other)->str_);
- }
-
- TString ToString() const override { return str_; }
-
- private:
- TString str_;
- };
-
- void SetUp() override {
- ClientLbEnd2endTest::SetUp();
- current_test_instance_ = this;
- }
-
- static void SetUpTestCase() {
- grpc_init();
- grpc_core::RegisterAddressTestLoadBalancingPolicy(SaveAddress);
- }
-
- static void TearDownTestCase() { grpc_shutdown_blocking(); }
-
- const std::vector<TString>& addresses_seen() {
- grpc::internal::MutexLock lock(&mu_);
- return addresses_seen_;
- }
-
- private:
- static void SaveAddress(const grpc_core::ServerAddress& address) {
- ClientLbAddressTest* self = current_test_instance_;
- grpc::internal::MutexLock lock(&self->mu_);
- self->addresses_seen_.emplace_back(address.ToString());
- }
-
- static ClientLbAddressTest* current_test_instance_;
- grpc::internal::Mutex mu_;
- std::vector<TString> addresses_seen_;
-};
-
-const char* ClientLbAddressTest::kAttributeKey = "attribute_key";
-
-ClientLbAddressTest* ClientLbAddressTest::current_test_instance_ = nullptr;
-
-TEST_F(ClientLbAddressTest, Basic) {
- const int kNumServers = 1;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("address_test_lb", response_generator);
- auto stub = BuildStub(channel);
- // Addresses returned by the resolver will have attached attributes.
- response_generator.SetNextResolution(GetServersPorts(), nullptr,
- kAttributeKey,
- y_absl::make_unique<Attribute>("foo"));
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- // Check LB policy name for the channel.
- EXPECT_EQ("address_test_lb", channel->GetLoadBalancingPolicyName());
- // Make sure that the attributes wind up on the subchannels.
- std::vector<TString> expected;
- for (const int port : GetServersPorts()) {
- expected.emplace_back(y_absl::StrCat(
- "127.0.0.1:", port, " args={} attributes={", kAttributeKey, "=foo}"));
- }
- EXPECT_EQ(addresses_seen(), expected);
-}
-
+class ClientLbAddressTest : public ClientLbEnd2endTest {
+ protected:
+ static const char* kAttributeKey;
+
+ class Attribute : public grpc_core::ServerAddress::AttributeInterface {
+ public:
+ explicit Attribute(const TString& str) : str_(str) {}
+
+ std::unique_ptr<AttributeInterface> Copy() const override {
+ return y_absl::make_unique<Attribute>(str_);
+ }
+
+ int Cmp(const AttributeInterface* other) const override {
+ return str_.compare(static_cast<const Attribute*>(other)->str_);
+ }
+
+ TString ToString() const override { return str_; }
+
+ private:
+ TString str_;
+ };
+
+ void SetUp() override {
+ ClientLbEnd2endTest::SetUp();
+ current_test_instance_ = this;
+ }
+
+ static void SetUpTestCase() {
+ grpc_init();
+ grpc_core::RegisterAddressTestLoadBalancingPolicy(SaveAddress);
+ }
+
+ static void TearDownTestCase() { grpc_shutdown_blocking(); }
+
+ const std::vector<TString>& addresses_seen() {
+ grpc::internal::MutexLock lock(&mu_);
+ return addresses_seen_;
+ }
+
+ private:
+ static void SaveAddress(const grpc_core::ServerAddress& address) {
+ ClientLbAddressTest* self = current_test_instance_;
+ grpc::internal::MutexLock lock(&self->mu_);
+ self->addresses_seen_.emplace_back(address.ToString());
+ }
+
+ static ClientLbAddressTest* current_test_instance_;
+ grpc::internal::Mutex mu_;
+ std::vector<TString> addresses_seen_;
+};
+
+const char* ClientLbAddressTest::kAttributeKey = "attribute_key";
+
+ClientLbAddressTest* ClientLbAddressTest::current_test_instance_ = nullptr;
+
+TEST_F(ClientLbAddressTest, Basic) {
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("address_test_lb", response_generator);
+ auto stub = BuildStub(channel);
+ // Addresses returned by the resolver will have attached attributes.
+ response_generator.SetNextResolution(GetServersPorts(), nullptr,
+ kAttributeKey,
+ y_absl::make_unique<Attribute>("foo"));
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ // Check LB policy name for the channel.
+ EXPECT_EQ("address_test_lb", channel->GetLoadBalancingPolicyName());
+ // Make sure that the attributes wind up on the subchannels.
+ std::vector<TString> expected;
+ for (const int port : GetServersPorts()) {
+ expected.emplace_back(y_absl::StrCat(
+ "127.0.0.1:", port, " args={} attributes={", kAttributeKey, "=foo}"));
+ }
+ EXPECT_EQ(addresses_seen(), expected);
+}
+
} // namespace
} // namespace testing
} // namespace grpc
diff --git a/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc b/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc
index c9ed229c1b..5d025ecb94 100644
--- a/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc
@@ -58,7 +58,7 @@ class DelegatingChannelTest : public ::testing::Test {
DelegatingChannelTest() {
int port = grpc_pick_unused_port_or_die();
ServerBuilder builder;
- server_address_ = "localhost:" + ToString(port);
+ server_address_ = "localhost:" + ToString(port);
builder.AddListeningPort(server_address_, InsecureServerCredentials());
builder.RegisterService(&service_);
server_ = builder.BuildAndStart();
@@ -66,7 +66,7 @@ class DelegatingChannelTest : public ::testing::Test {
~DelegatingChannelTest() { server_->Shutdown(); }
- TString server_address_;
+ TString server_address_;
TestServiceImpl service_;
std::unique_ptr<Server> server_;
};
diff --git a/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
index c4e1fac870..ad2ddb7e84 100644
--- a/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
@@ -31,13 +31,13 @@
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
-#include <grpcpp/support/string_ref.h>
-#include <grpcpp/test/channel_test_peer.h>
+#include <grpcpp/support/string_ref.h>
+#include <grpcpp/test/channel_test_peer.h>
#include <mutex>
#include <thread>
-#include "y_absl/strings/str_format.h"
+#include "y_absl/strings/str_format.h"
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/iomgr/iomgr.h"
@@ -77,71 +77,71 @@ namespace grpc {
namespace testing {
namespace {
-bool CheckIsLocalhost(const TString& addr) {
- const TString kIpv6("ipv6:[::1]:");
- const TString kIpv4MappedIpv6("ipv6:[::ffff:127.0.0.1]:");
- const TString kIpv4("ipv4:127.0.0.1:");
+bool CheckIsLocalhost(const TString& addr) {
+ const TString kIpv6("ipv6:[::1]:");
+ const TString kIpv4MappedIpv6("ipv6:[::ffff:127.0.0.1]:");
+ const TString kIpv4("ipv4:127.0.0.1:");
return addr.substr(0, kIpv4.size()) == kIpv4 ||
addr.substr(0, kIpv4MappedIpv6.size()) == kIpv4MappedIpv6 ||
addr.substr(0, kIpv6.size()) == kIpv6;
}
-const int kClientChannelBackupPollIntervalMs = 200;
-
+const int kClientChannelBackupPollIntervalMs = 200;
+
const char kTestCredsPluginErrorMsg[] = "Could not find plugin metadata.";
-const char kFakeToken[] = "fake_token";
-const char kFakeSelector[] = "fake_selector";
-const char kExpectedFakeCredsDebugString[] =
- "SecureCallCredentials{GoogleIAMCredentials{Token:present,"
- "AuthoritySelector:fake_selector}}";
-
-const char kWrongToken[] = "wrong_token";
-const char kWrongSelector[] = "wrong_selector";
-const char kExpectedWrongCredsDebugString[] =
- "SecureCallCredentials{GoogleIAMCredentials{Token:present,"
- "AuthoritySelector:wrong_selector}}";
-
-const char kFakeToken1[] = "fake_token1";
-const char kFakeSelector1[] = "fake_selector1";
-const char kExpectedFakeCreds1DebugString[] =
- "SecureCallCredentials{GoogleIAMCredentials{Token:present,"
- "AuthoritySelector:fake_selector1}}";
-
-const char kFakeToken2[] = "fake_token2";
-const char kFakeSelector2[] = "fake_selector2";
-const char kExpectedFakeCreds2DebugString[] =
- "SecureCallCredentials{GoogleIAMCredentials{Token:present,"
- "AuthoritySelector:fake_selector2}}";
-
-const char kExpectedAuthMetadataPluginKeyFailureCredsDebugString[] =
- "SecureCallCredentials{TestMetadataCredentials{key:TestPluginMetadata,"
- "value:Does not matter, will fail the key is invalid.}}";
-const char kExpectedAuthMetadataPluginValueFailureCredsDebugString[] =
- "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata,"
- "value:With illegal \n value.}}";
-const char kExpectedAuthMetadataPluginWithDeadlineCredsDebugString[] =
- "SecureCallCredentials{TestMetadataCredentials{key:meta_key,value:Does not "
- "matter}}";
-const char kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString[] =
- "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata,"
- "value:Does not matter, will fail anyway (see 3rd param)}}";
-const char
- kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString
- [] = "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-"
- "metadata,value:Dr Jekyll}}";
-const char
- kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString
- [] = "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-"
- "metadata,value:Mr Hyde}}";
-const char kExpectedBlockingAuthMetadataPluginFailureCredsDebugString[] =
- "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata,"
- "value:Does not matter, will fail anyway (see 3rd param)}}";
-const char kExpectedCompositeCallCredsDebugString[] =
- "SecureCallCredentials{CompositeCallCredentials{TestMetadataCredentials{"
- "key:call-creds-key1,value:call-creds-val1},TestMetadataCredentials{key:"
- "call-creds-key2,value:call-creds-val2}}}";
-
+const char kFakeToken[] = "fake_token";
+const char kFakeSelector[] = "fake_selector";
+const char kExpectedFakeCredsDebugString[] =
+ "SecureCallCredentials{GoogleIAMCredentials{Token:present,"
+ "AuthoritySelector:fake_selector}}";
+
+const char kWrongToken[] = "wrong_token";
+const char kWrongSelector[] = "wrong_selector";
+const char kExpectedWrongCredsDebugString[] =
+ "SecureCallCredentials{GoogleIAMCredentials{Token:present,"
+ "AuthoritySelector:wrong_selector}}";
+
+const char kFakeToken1[] = "fake_token1";
+const char kFakeSelector1[] = "fake_selector1";
+const char kExpectedFakeCreds1DebugString[] =
+ "SecureCallCredentials{GoogleIAMCredentials{Token:present,"
+ "AuthoritySelector:fake_selector1}}";
+
+const char kFakeToken2[] = "fake_token2";
+const char kFakeSelector2[] = "fake_selector2";
+const char kExpectedFakeCreds2DebugString[] =
+ "SecureCallCredentials{GoogleIAMCredentials{Token:present,"
+ "AuthoritySelector:fake_selector2}}";
+
+const char kExpectedAuthMetadataPluginKeyFailureCredsDebugString[] =
+ "SecureCallCredentials{TestMetadataCredentials{key:TestPluginMetadata,"
+ "value:Does not matter, will fail the key is invalid.}}";
+const char kExpectedAuthMetadataPluginValueFailureCredsDebugString[] =
+ "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata,"
+ "value:With illegal \n value.}}";
+const char kExpectedAuthMetadataPluginWithDeadlineCredsDebugString[] =
+ "SecureCallCredentials{TestMetadataCredentials{key:meta_key,value:Does not "
+ "matter}}";
+const char kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString[] =
+ "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata,"
+ "value:Does not matter, will fail anyway (see 3rd param)}}";
+const char
+ kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString
+ [] = "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-"
+ "metadata,value:Dr Jekyll}}";
+const char
+ kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString
+ [] = "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-"
+ "metadata,value:Mr Hyde}}";
+const char kExpectedBlockingAuthMetadataPluginFailureCredsDebugString[] =
+ "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata,"
+ "value:Does not matter, will fail anyway (see 3rd param)}}";
+const char kExpectedCompositeCallCredsDebugString[] =
+ "SecureCallCredentials{CompositeCallCredentials{TestMetadataCredentials{"
+ "key:call-creds-key1,value:call-creds-val1},TestMetadataCredentials{key:"
+ "call-creds-key2,value:call-creds-val2}}}";
+
class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin {
public:
static const char kGoodMetadataKey[];
@@ -162,7 +162,7 @@ class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin {
Status GetMetadata(
grpc::string_ref service_url, grpc::string_ref method_name,
const grpc::AuthContext& channel_auth_context,
- std::multimap<TString, TString>* metadata) override {
+ std::multimap<TString, TString>* metadata) override {
if (delay_ms_ != 0) {
gpr_sleep_until(
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
@@ -180,14 +180,14 @@ class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin {
}
}
- TString DebugString() override {
- return y_absl::StrFormat("TestMetadataCredentials{key:%s,value:%s}",
- metadata_key_.c_str(), metadata_value_.c_str());
- }
-
+ TString DebugString() override {
+ return y_absl::StrFormat("TestMetadataCredentials{key:%s,value:%s}",
+ metadata_key_.c_str(), metadata_value_.c_str());
+ }
+
private:
- TString metadata_key_;
- TString metadata_value_;
+ TString metadata_key_;
+ TString metadata_value_;
bool is_blocking_;
bool is_successful_;
int delay_ms_;
@@ -284,7 +284,7 @@ class TestServiceImplDupPkg
class TestScenario {
public:
TestScenario(bool interceptors, bool proxy, bool inproc_stub,
- const TString& creds_type, bool use_callback_server)
+ const TString& creds_type, bool use_callback_server)
: use_interceptors(interceptors),
use_proxy(proxy),
inproc(inproc_stub),
@@ -294,7 +294,7 @@ class TestScenario {
bool use_interceptors;
bool use_proxy;
bool inproc;
- const TString credentials_type;
+ const TString credentials_type;
bool callback_server;
};
@@ -482,7 +482,7 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
CallbackTestServiceImpl callback_service_;
TestServiceImpl special_service_;
TestServiceImplDupPkg dup_pkg_service_;
- TString user_agent_prefix_;
+ TString user_agent_prefix_;
int first_picked_port_;
};
@@ -497,7 +497,7 @@ static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs,
if (with_binary_metadata) {
char bytes[8] = {'\0', '\1', '\2', '\3',
'\4', '\5', '\6', static_cast<char>(i)};
- context.AddMetadata("custom-bin", TString(bytes, 8));
+ context.AddMetadata("custom-bin", TString(bytes, 8));
}
context.set_compression_algorithm(GRPC_COMPRESS_GZIP);
Status s = stub->Echo(&context, request, &response);
@@ -534,7 +534,7 @@ class End2endServerTryCancelTest : public End2endTest {
// Send server_try_cancel value in the client metadata
context.AddMetadata(kServerTryCancelRequest,
- ToString(server_try_cancel));
+ ToString(server_try_cancel));
auto stream = stub_->RequestStream(&context, &response);
@@ -613,7 +613,7 @@ class End2endServerTryCancelTest : public End2endTest {
// Send server_try_cancel in the client metadata
context.AddMetadata(kServerTryCancelRequest,
- ToString(server_try_cancel));
+ ToString(server_try_cancel));
request.set_message("hello");
auto stream = stub_->ResponseStream(&context, request);
@@ -624,7 +624,7 @@ class End2endServerTryCancelTest : public End2endTest {
break;
}
EXPECT_EQ(response.message(),
- request.message() + ToString(num_msgs_read));
+ request.message() + ToString(num_msgs_read));
num_msgs_read++;
}
gpr_log(GPR_INFO, "Read %d messages", num_msgs_read);
@@ -695,14 +695,14 @@ class End2endServerTryCancelTest : public End2endTest {
// Send server_try_cancel in the client metadata
context.AddMetadata(kServerTryCancelRequest,
- ToString(server_try_cancel));
+ ToString(server_try_cancel));
auto stream = stub_->BidiStream(&context);
int num_msgs_read = 0;
int num_msgs_sent = 0;
while (num_msgs_sent < num_messages) {
- request.set_message("hello " + ToString(num_msgs_sent));
+ request.set_message("hello " + ToString(num_msgs_sent));
if (!stream->Write(request)) {
break;
}
@@ -769,7 +769,7 @@ TEST_P(End2endServerTryCancelTest, RequestEchoServerCancel) {
ClientContext context;
context.AddMetadata(kServerTryCancelRequest,
- ToString(CANCEL_BEFORE_PROCESSING));
+ ToString(CANCEL_BEFORE_PROCESSING));
Status s = stub_->Echo(&context, request, &response);
EXPECT_FALSE(s.ok());
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
@@ -844,7 +844,7 @@ TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) {
const auto& trailing_metadata = context.GetServerTrailingMetadata();
auto iter = trailing_metadata.find("user-agent");
EXPECT_TRUE(iter != trailing_metadata.end());
- TString expected_prefix = user_agent_prefix_ + " grpc-c++/";
+ TString expected_prefix = user_agent_prefix_ + " grpc-c++/";
EXPECT_TRUE(iter->second.starts_with(expected_prefix)) << iter->second;
}
@@ -874,19 +874,19 @@ TEST_P(End2endTest, MultipleRpcs) {
}
}
-TEST_P(End2endTest, ManyStubs) {
- MAYBE_SKIP_TEST;
- ResetStub();
- ChannelTestPeer peer(channel_.get());
- int registered_calls_pre = peer.registered_calls();
- int registration_attempts_pre = peer.registration_attempts();
- for (int i = 0; i < 1000; ++i) {
- grpc::testing::EchoTestService::NewStub(channel_);
- }
- EXPECT_EQ(peer.registered_calls(), registered_calls_pre);
- EXPECT_GT(peer.registration_attempts(), registration_attempts_pre);
-}
-
+TEST_P(End2endTest, ManyStubs) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ ChannelTestPeer peer(channel_.get());
+ int registered_calls_pre = peer.registered_calls();
+ int registration_attempts_pre = peer.registration_attempts();
+ for (int i = 0; i < 1000; ++i) {
+ grpc::testing::EchoTestService::NewStub(channel_);
+ }
+ EXPECT_EQ(peer.registered_calls(), registered_calls_pre);
+ EXPECT_GT(peer.registration_attempts(), registration_attempts_pre);
+}
+
TEST_P(End2endTest, EmptyBinaryMetadata) {
MAYBE_SKIP_TEST;
ResetStub();
@@ -918,13 +918,13 @@ TEST_P(End2endTest, ReconnectChannel) {
SendRpc(stub_.get(), 1, false);
RestartServer(std::shared_ptr<AuthMetadataProcessor>());
// It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
- // reconnect the channel. Make it a factor of 5x
- gpr_sleep_until(
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(kClientChannelBackupPollIntervalMs * 5 *
- poller_slowdown_factor *
- grpc_test_slowdown_factor(),
- GPR_TIMESPAN)));
+ // reconnect the channel. Make it a factor of 5x
+ gpr_sleep_until(
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_millis(kClientChannelBackupPollIntervalMs * 5 *
+ poller_slowdown_factor *
+ grpc_test_slowdown_factor(),
+ GPR_TIMESPAN)));
SendRpc(stub_.get(), 1, false);
}
@@ -1023,7 +1023,7 @@ TEST_P(End2endTest, ResponseStream) {
auto stream = stub_->ResponseStream(&context, request);
for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + ToString(i));
+ EXPECT_EQ(response.message(), request.message() + ToString(i));
}
EXPECT_FALSE(stream->Read(&response));
@@ -1043,7 +1043,7 @@ TEST_P(End2endTest, ResponseStreamWithCoalescingApi) {
auto stream = stub_->ResponseStream(&context, request);
for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + ToString(i));
+ EXPECT_EQ(response.message(), request.message() + ToString(i));
}
EXPECT_FALSE(stream->Read(&response));
@@ -1081,12 +1081,12 @@ TEST_P(End2endTest, BidiStream) {
EchoRequest request;
EchoResponse response;
ClientContext context;
- TString msg("hello");
+ TString msg("hello");
auto stream = stub_->BidiStream(&context);
for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
- request.set_message(msg + ToString(i));
+ request.set_message(msg + ToString(i));
EXPECT_TRUE(stream->Write(request));
EXPECT_TRUE(stream->Read(&response));
EXPECT_EQ(response.message(), request.message());
@@ -1108,7 +1108,7 @@ TEST_P(End2endTest, BidiStreamWithCoalescingApi) {
ClientContext context;
context.AddMetadata(kServerFinishAfterNReads, "3");
context.set_initial_metadata_corked(true);
- TString msg("hello");
+ TString msg("hello");
auto stream = stub_->BidiStream(&context);
@@ -1144,7 +1144,7 @@ TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) {
ClientContext context;
context.AddMetadata(kServerFinishAfterNReads, "1");
context.set_initial_metadata_corked(true);
- TString msg("hello");
+ TString msg("hello");
auto stream = stub_->BidiStream(&context);
@@ -1207,34 +1207,34 @@ TEST_P(End2endTest, CancelRpcBeforeStart) {
}
}
-TEST_P(End2endTest, CancelRpcAfterStart) {
+TEST_P(End2endTest, CancelRpcAfterStart) {
MAYBE_SKIP_TEST;
ResetStub();
EchoRequest request;
EchoResponse response;
ClientContext context;
request.set_message("hello");
- request.mutable_param()->set_server_notify_client_when_started(true);
+ request.mutable_param()->set_server_notify_client_when_started(true);
request.mutable_param()->set_skip_cancelled_check(true);
Status s;
std::thread echo_thread([this, &s, &context, &request, &response] {
s = stub_->Echo(&context, request, &response);
EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
});
- if (!GetParam().callback_server) {
- service_.ClientWaitUntilRpcStarted();
- } else {
- callback_service_.ClientWaitUntilRpcStarted();
- }
-
+ if (!GetParam().callback_server) {
+ service_.ClientWaitUntilRpcStarted();
+ } else {
+ callback_service_.ClientWaitUntilRpcStarted();
+ }
+
context.TryCancel();
-
- if (!GetParam().callback_server) {
- service_.SignalServerToContinue();
- } else {
- callback_service_.SignalServerToContinue();
- }
-
+
+ if (!GetParam().callback_server) {
+ service_.SignalServerToContinue();
+ } else {
+ callback_service_.SignalServerToContinue();
+ }
+
echo_thread.join();
EXPECT_EQ("", response.message());
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
@@ -1310,7 +1310,7 @@ TEST_P(End2endTest, ClientCancelsBidi) {
EchoRequest request;
EchoResponse response;
ClientContext context;
- TString msg("hello");
+ TString msg("hello");
auto stream = stub_->BidiStream(&context);
@@ -1460,7 +1460,7 @@ TEST_P(End2endTest, BinaryTrailerTest) {
info->add_stack_entries("stack_entry_2");
info->add_stack_entries("stack_entry_3");
info->set_detail("detailed debug info");
- TString expected_string = info->SerializeAsString();
+ TString expected_string = info->SerializeAsString();
request.set_message("Hello");
Status s = stub_->Echo(&context, request, &response);
@@ -1511,12 +1511,12 @@ TEST_P(End2endTest, ExpectErrorTest) {
EXPECT_EQ(iter->error_message(), s.error_message());
EXPECT_EQ(iter->binary_error_details(), s.error_details());
EXPECT_TRUE(context.debug_error_string().find("created") !=
- TString::npos);
- EXPECT_TRUE(context.debug_error_string().find("file") != TString::npos);
- EXPECT_TRUE(context.debug_error_string().find("line") != TString::npos);
+ TString::npos);
+ EXPECT_TRUE(context.debug_error_string().find("file") != TString::npos);
+ EXPECT_TRUE(context.debug_error_string().find("line") != TString::npos);
EXPECT_TRUE(context.debug_error_string().find("status") !=
- TString::npos);
- EXPECT_TRUE(context.debug_error_string().find("13") != TString::npos);
+ TString::npos);
+ EXPECT_TRUE(context.debug_error_string().find("13") != TString::npos);
}
}
@@ -1774,7 +1774,7 @@ TEST_P(SecureEnd2endTest, SimpleRpcWithHost) {
bool MetadataContains(
const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
- const TString& key, const TString& value) {
+ const TString& key, const TString& value) {
int count = 0;
for (std::multimap<grpc::string_ref, grpc::string_ref>::const_iterator iter =
@@ -1810,7 +1810,7 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) {
// Metadata should have been consumed by the processor.
EXPECT_FALSE(MetadataContains(
context.GetServerTrailingMetadata(), GRPC_AUTHORIZATION_METADATA_KEY,
- TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy));
+ TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy));
}
TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) {
@@ -1836,7 +1836,7 @@ TEST_P(SecureEnd2endTest, SetPerCallCredentials) {
EchoResponse response;
ClientContext context;
std::shared_ptr<CallCredentials> creds =
- GoogleIAMCredentials(kFakeToken, kFakeSelector);
+ GoogleIAMCredentials(kFakeToken, kFakeSelector);
context.set_credentials(creds);
request.set_message("Hello");
request.mutable_param()->set_echo_metadata(true);
@@ -1846,12 +1846,12 @@ TEST_P(SecureEnd2endTest, SetPerCallCredentials) {
EXPECT_TRUE(s.ok());
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
- kFakeToken));
+ kFakeToken));
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
- kFakeSelector));
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedFakeCredsDebugString);
+ kFakeSelector));
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedFakeCredsDebugString);
}
class CredentialsInterceptor : public experimental::Interceptor {
@@ -1862,7 +1862,7 @@ class CredentialsInterceptor : public experimental::Interceptor {
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
std::shared_ptr<CallCredentials> creds =
- GoogleIAMCredentials(kFakeToken, kFakeSelector);
+ GoogleIAMCredentials(kFakeToken, kFakeSelector);
info_->client_context()->set_credentials(creds);
}
methods->Proceed();
@@ -1902,12 +1902,12 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterception) {
EXPECT_TRUE(s.ok());
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
- kFakeToken));
+ kFakeToken));
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
- kFakeSelector));
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedFakeCredsDebugString);
+ kFakeSelector));
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedFakeCredsDebugString);
}
TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
@@ -1924,11 +1924,11 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
EchoResponse response;
ClientContext context;
std::shared_ptr<CallCredentials> creds1 =
- GoogleIAMCredentials(kWrongToken, kWrongSelector);
+ GoogleIAMCredentials(kWrongToken, kWrongSelector);
context.set_credentials(creds1);
EXPECT_EQ(context.credentials(), creds1);
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedWrongCredsDebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedWrongCredsDebugString);
request.set_message("Hello");
request.mutable_param()->set_echo_metadata(true);
@@ -1937,12 +1937,12 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
EXPECT_TRUE(s.ok());
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
- kFakeToken));
+ kFakeToken));
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
- kFakeSelector));
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedFakeCredsDebugString);
+ kFakeSelector));
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedFakeCredsDebugString);
}
TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
@@ -1952,13 +1952,13 @@ TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
EchoResponse response;
ClientContext context;
std::shared_ptr<CallCredentials> creds1 =
- GoogleIAMCredentials(kFakeToken1, kFakeSelector1);
+ GoogleIAMCredentials(kFakeToken1, kFakeSelector1);
context.set_credentials(creds1);
EXPECT_EQ(context.credentials(), creds1);
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedFakeCreds1DebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedFakeCreds1DebugString);
std::shared_ptr<CallCredentials> creds2 =
- GoogleIAMCredentials(kFakeToken2, kFakeSelector2);
+ GoogleIAMCredentials(kFakeToken2, kFakeSelector2);
context.set_credentials(creds2);
EXPECT_EQ(context.credentials(), creds2);
request.set_message("Hello");
@@ -1967,18 +1967,18 @@ TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
Status s = stub_->Echo(&context, request, &response);
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
- kFakeToken2));
+ kFakeToken2));
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
- kFakeSelector2));
+ kFakeSelector2));
EXPECT_FALSE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
- kFakeToken1));
+ kFakeToken1));
EXPECT_FALSE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
- kFakeSelector1));
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedFakeCreds2DebugString);
+ kFakeSelector1));
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedFakeCreds2DebugString);
EXPECT_EQ(request.message(), response.message());
EXPECT_TRUE(s.ok());
}
@@ -2000,8 +2000,8 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) {
Status s = stub_->Echo(&context, request, &response);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedAuthMetadataPluginKeyFailureCredsDebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedAuthMetadataPluginKeyFailureCredsDebugString);
}
TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) {
@@ -2020,8 +2020,8 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) {
Status s = stub_->Echo(&context, request, &response);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedAuthMetadataPluginValueFailureCredsDebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedAuthMetadataPluginValueFailureCredsDebugString);
}
TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) {
@@ -2046,8 +2046,8 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) {
EXPECT_TRUE(s.error_code() == StatusCode::DEADLINE_EXCEEDED ||
s.error_code() == StatusCode::UNAVAILABLE);
}
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedAuthMetadataPluginWithDeadlineCredsDebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedAuthMetadataPluginWithDeadlineCredsDebugString);
}
TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) {
@@ -2075,8 +2075,8 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) {
s.error_code() == StatusCode::UNAVAILABLE);
}
cancel_thread.join();
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedAuthMetadataPluginWithDeadlineCredsDebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedAuthMetadataPluginWithDeadlineCredsDebugString);
}
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
@@ -2097,10 +2097,10 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
EXPECT_EQ(s.error_message(),
- TString("Getting metadata from plugin failed with error: ") +
+ TString("Getting metadata from plugin failed with error: ") +
kTestCredsPluginErrorMsg);
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString);
}
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
@@ -2126,10 +2126,10 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
// Metadata should have been consumed by the processor.
EXPECT_FALSE(MetadataContains(
context.GetServerTrailingMetadata(), GRPC_AUTHORIZATION_METADATA_KEY,
- TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy));
- EXPECT_EQ(
- context.credentials()->DebugString(),
- kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString);
+ TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy));
+ EXPECT_EQ(
+ context.credentials()->DebugString(),
+ kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString);
}
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) {
@@ -2146,9 +2146,9 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) {
Status s = stub_->Echo(&context, request, &response);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.error_code(), StatusCode::UNAUTHENTICATED);
- EXPECT_EQ(
- context.credentials()->DebugString(),
- kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString);
+ EXPECT_EQ(
+ context.credentials()->DebugString(),
+ kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString);
}
TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
@@ -2169,10 +2169,10 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
EXPECT_EQ(s.error_message(),
- TString("Getting metadata from plugin failed with error: ") +
+ TString("Getting metadata from plugin failed with error: ") +
kTestCredsPluginErrorMsg);
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedBlockingAuthMetadataPluginFailureCredsDebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedBlockingAuthMetadataPluginFailureCredsDebugString);
}
TEST_P(SecureEnd2endTest, CompositeCallCreds) {
@@ -2204,8 +2204,8 @@ TEST_P(SecureEnd2endTest, CompositeCallCreds) {
kMetadataKey1, kMetadataVal1));
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
kMetadataKey2, kMetadataVal2));
- EXPECT_EQ(context.credentials()->DebugString(),
- kExpectedCompositeCallCredsDebugString);
+ EXPECT_EQ(context.credentials()->DebugString(),
+ kExpectedCompositeCallCredsDebugString);
}
TEST_P(SecureEnd2endTest, ClientAuthContext) {
@@ -2274,10 +2274,10 @@ std::vector<TestScenario> CreateTestScenarios(bool use_proxy,
bool test_inproc,
bool test_callback_server) {
std::vector<TestScenario> scenarios;
- std::vector<TString> credentials_types;
+ std::vector<TString> credentials_types;
- GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms,
- kClientChannelBackupPollIntervalMs);
+ GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms,
+ kClientChannelBackupPollIntervalMs);
#if TARGET_OS_IPHONE
// Workaround Apple CFStream bug
gpr_setenv("grpc_cfstream", "0");
diff --git a/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
index 1026b78044..2f26d0716c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
@@ -18,7 +18,7 @@
#include <memory>
#include <mutex>
-#include <thread>
+#include <thread>
#include <grpc/grpc.h>
#include <grpc/support/time.h>
@@ -171,7 +171,7 @@ class FilterEnd2endTest : public ::testing::Test {
void client_fail(int i) { verify_ok(&cli_cq_, i, false); }
void SendRpc(int num_rpcs) {
- const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
+ const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
for (int i = 0; i < num_rpcs; i++) {
EchoRequest send_request;
EchoRequest recv_request;
@@ -185,7 +185,7 @@ class FilterEnd2endTest : public ::testing::Test {
// The string needs to be long enough to test heap-based slice.
send_request.set_message("Hello world. Hello world. Hello world.");
- std::thread request_call([this]() { server_ok(4); });
+ std::thread request_call([this]() { server_ok(4); });
std::unique_ptr<GenericClientAsyncReaderWriter> call =
generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
call->StartCall(tag(1));
@@ -202,7 +202,7 @@ class FilterEnd2endTest : public ::testing::Test {
generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
srv_cq_.get(), tag(4));
- request_call.join();
+ request_call.join();
EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
EXPECT_EQ(kMethodName, srv_ctx.method());
ByteBuffer recv_buffer;
@@ -239,7 +239,7 @@ class FilterEnd2endTest : public ::testing::Test {
std::unique_ptr<grpc::GenericStub> generic_stub_;
std::unique_ptr<Server> server_;
AsyncGenericService generic_service_;
- const TString server_host_;
+ const TString server_host_;
std::ostringstream server_address_;
};
@@ -267,7 +267,7 @@ TEST_F(FilterEnd2endTest, SimpleBidiStreaming) {
EXPECT_EQ(0, GetConnectionCounterValue());
EXPECT_EQ(0, GetCallCounterValue());
- const TString kMethodName(
+ const TString kMethodName(
"/grpc.cpp.test.util.EchoTestService/BidiStream");
EchoRequest send_request;
EchoRequest recv_request;
@@ -280,7 +280,7 @@ TEST_F(FilterEnd2endTest, SimpleBidiStreaming) {
cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
send_request.set_message("Hello");
- std::thread request_call([this]() { server_ok(2); });
+ std::thread request_call([this]() { server_ok(2); });
std::unique_ptr<GenericClientAsyncReaderWriter> cli_stream =
generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
cli_stream->StartCall(tag(1));
@@ -289,7 +289,7 @@ TEST_F(FilterEnd2endTest, SimpleBidiStreaming) {
generic_service_.RequestCall(&srv_ctx, &srv_stream, srv_cq_.get(),
srv_cq_.get(), tag(2));
- request_call.join();
+ request_call.join();
EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
EXPECT_EQ(kMethodName, srv_ctx.method());
diff --git a/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc b/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc
index b5fe65bc4b..3ee75952c0 100644
--- a/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc
@@ -56,10 +56,10 @@ namespace testing {
namespace {
struct TestScenario {
- TestScenario(const TString& creds_type, const TString& content)
+ TestScenario(const TString& creds_type, const TString& content)
: credentials_type(creds_type), message_content(content) {}
- const TString credentials_type;
- const TString message_content;
+ const TString credentials_type;
+ const TString message_content;
};
class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
@@ -191,7 +191,7 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
}
std::shared_ptr<Channel> BuildChannel(
- const TString& lb_policy_name,
+ const TString& lb_policy_name,
ChannelArguments args = ChannelArguments()) {
if (lb_policy_name.size() > 0) {
args.SetLoadBalancingPolicyName(lb_policy_name);
@@ -213,9 +213,9 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
ClientContext context;
if (timeout_ms > 0) {
context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
- // Allow an RPC to be canceled (for deadline exceeded) after it has
- // reached the server.
- request.mutable_param()->set_skip_cancelled_check(true);
+ // Allow an RPC to be canceled (for deadline exceeded) after it has
+ // reached the server.
+ request.mutable_param()->set_skip_cancelled_check(true);
}
// See https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md for
// details of wait-for-ready semantics
@@ -243,16 +243,16 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
struct ServerData {
int port_;
- const TString creds_;
+ const TString creds_;
std::unique_ptr<Server> server_;
TestServiceImpl service_;
std::unique_ptr<std::thread> thread_;
bool server_ready_ = false;
- ServerData(int port, const TString& creds)
+ ServerData(int port, const TString& creds)
: port_(port), creds_(creds) {}
- void Start(const TString& server_host) {
+ void Start(const TString& server_host) {
gpr_log(GPR_INFO, "starting server on port %d", port_);
std::mutex mu;
std::unique_lock<std::mutex> lock(mu);
@@ -264,7 +264,7 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
gpr_log(GPR_INFO, "server startup complete");
}
- void Serve(const TString& server_host, std::mutex* mu,
+ void Serve(const TString& server_host, std::mutex* mu,
std::condition_variable* cond) {
std::ostringstream server_address;
server_address << server_host << ":" << port_;
@@ -308,10 +308,10 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
}
private:
- const TString server_host_;
- const TString interface_;
- const TString ipv4_address_;
- const TString netmask_;
+ const TString server_host_;
+ const TString interface_;
+ const TString ipv4_address_;
+ const TString netmask_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::unique_ptr<ServerData> server_;
const int SERVER_PORT = 32750;
@@ -320,8 +320,8 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> {
std::vector<TestScenario> CreateTestScenarios() {
std::vector<TestScenario> scenarios;
- std::vector<TString> credentials_types;
- std::vector<TString> messages;
+ std::vector<TString> credentials_types;
+ std::vector<TString> messages;
credentials_types.push_back(kInsecureCredentialsType);
auto sec_list = GetCredentialsProvider()->GetSecureCredentialsTypeList();
@@ -331,7 +331,7 @@ std::vector<TestScenario> CreateTestScenarios() {
messages.push_back("🖖");
for (size_t k = 1; k < GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH / 1024; k *= 32) {
- TString big_msg;
+ TString big_msg;
for (size_t i = 0; i < k * 1024; ++i) {
char c = 'a' + (i % 26);
big_msg += c;
@@ -552,7 +552,7 @@ TEST_P(FlakyNetworkTest, ServerRestartKeepaliveDisabled) {
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
- grpc::testing::TestEnvironment env(argc, argv);
+ grpc::testing::TestEnvironment env(argc, argv);
auto result = RUN_ALL_TESTS();
return result;
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
index 98ed104446..59eec49fb2 100644
--- a/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
@@ -111,7 +111,7 @@ class GenericEnd2endTest : public ::testing::Test {
}
void SendRpc(int num_rpcs, bool check_deadline, gpr_timespec deadline) {
- const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
+ const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
for (int i = 0; i < num_rpcs; i++) {
EchoRequest send_request;
EchoRequest recv_request;
@@ -133,14 +133,14 @@ class GenericEnd2endTest : public ::testing::Test {
// Rather than using the original kMethodName, make a short-lived
// copy to also confirm that we don't refer to this object beyond
// the initial call preparation
- const TString* method_name = new TString(kMethodName);
+ const TString* method_name = new TString(kMethodName);
std::unique_ptr<GenericClientAsyncReaderWriter> call =
generic_stub_->PrepareCall(&cli_ctx, *method_name, &cli_cq_);
delete method_name; // Make sure that this is not needed after invocation
- std::thread request_call([this]() { server_ok(4); });
+ std::thread request_call([this]() { server_ok(4); });
call->StartCall(tag(1));
client_ok(1);
std::unique_ptr<ByteBuffer> send_buffer =
@@ -155,7 +155,7 @@ class GenericEnd2endTest : public ::testing::Test {
generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
srv_cq_.get(), tag(4));
- request_call.join();
+ request_call.join();
EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
EXPECT_EQ(kMethodName, srv_ctx.method());
@@ -246,7 +246,7 @@ class GenericEnd2endTest : public ::testing::Test {
std::unique_ptr<grpc::GenericStub> generic_stub_;
std::unique_ptr<Server> server_;
AsyncGenericService generic_service_;
- const TString server_host_;
+ const TString server_host_;
std::ostringstream server_address_;
bool shutting_down_;
bool shut_down_;
@@ -266,7 +266,7 @@ TEST_F(GenericEnd2endTest, SequentialRpcs) {
TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
ResetStub();
const int num_rpcs = 10;
- const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
+ const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
for (int i = 0; i < num_rpcs; i++) {
EchoRequest send_request;
EchoRequest recv_request;
@@ -283,7 +283,7 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
std::unique_ptr<ByteBuffer> cli_send_buffer =
SerializeToByteBuffer(&send_request);
- std::thread request_call([this]() { server_ok(4); });
+ std::thread request_call([this]() { server_ok(4); });
std::unique_ptr<GenericClientAsyncResponseReader> call =
generic_stub_->PrepareUnaryCall(&cli_ctx, kMethodName,
*cli_send_buffer.get(), &cli_cq_);
@@ -294,7 +294,7 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
srv_cq_.get(), tag(4));
- request_call.join();
+ request_call.join();
EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
EXPECT_EQ(kMethodName, srv_ctx.method());
@@ -324,7 +324,7 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
TEST_F(GenericEnd2endTest, SimpleBidiStreaming) {
ResetStub();
- const TString kMethodName(
+ const TString kMethodName(
"/grpc.cpp.test.util.EchoTestService/BidiStream");
EchoRequest send_request;
EchoRequest recv_request;
@@ -337,7 +337,7 @@ TEST_F(GenericEnd2endTest, SimpleBidiStreaming) {
cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
send_request.set_message("Hello");
- std::thread request_call([this]() { server_ok(2); });
+ std::thread request_call([this]() { server_ok(2); });
std::unique_ptr<GenericClientAsyncReaderWriter> cli_stream =
generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
cli_stream->StartCall(tag(1));
@@ -345,7 +345,7 @@ TEST_F(GenericEnd2endTest, SimpleBidiStreaming) {
generic_service_.RequestCall(&srv_ctx, &srv_stream, srv_cq_.get(),
srv_cq_.get(), tag(2));
- request_call.join();
+ request_call.join();
EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
EXPECT_EQ(kMethodName, srv_ctx.method());
diff --git a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
index 82d1cdc4c9..6208dc2535 100644
--- a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
@@ -16,17 +16,17 @@
*
*/
-#include <deque>
+#include <deque>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
-#include <util/generic/string.h>
+#include <util/generic/string.h>
#include <thread>
-#include "y_absl/strings/str_cat.h"
-#include "y_absl/strings/str_format.h"
-
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/strings/str_format.h"
+
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -39,16 +39,16 @@
#include <grpcpp/server_builder.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
-#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
+#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/service_config.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
-#include "src/core/lib/iomgr/parse_address.h"
+#include "src/core/lib/iomgr/parse_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
-#include "src/core/lib/transport/authority_override.h"
+#include "src/core/lib/transport/authority_override.h"
#include "src/cpp/client/secure_credentials.h"
#include "src/cpp/server/secure_server_credentials.h"
@@ -81,7 +81,7 @@
using std::chrono::system_clock;
-using grpc::lb::v1::LoadBalancer;
+using grpc::lb::v1::LoadBalancer;
using grpc::lb::v1::LoadBalanceRequest;
using grpc::lb::v1::LoadBalanceResponse;
@@ -89,13 +89,13 @@ namespace grpc {
namespace testing {
namespace {
-constexpr char kDefaultServiceConfig[] =
- "{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"grpclb\":{} }\n"
- " ]\n"
- "}";
-
+constexpr char kDefaultServiceConfig[] =
+ "{\n"
+ " \"loadBalancingConfig\":[\n"
+ " { \"grpclb\":{} }\n"
+ " ]\n"
+ "}";
+
template <typename ServiceType>
class CountedService : public ServiceType {
public:
@@ -162,26 +162,26 @@ class BackendServiceImpl : public BackendService {
void Shutdown() {}
- std::set<TString> clients() {
+ std::set<TString> clients() {
grpc::internal::MutexLock lock(&clients_mu_);
return clients_;
}
private:
- void AddClient(const TString& client) {
+ void AddClient(const TString& client) {
grpc::internal::MutexLock lock(&clients_mu_);
clients_.insert(client);
}
grpc::internal::Mutex mu_;
grpc::internal::Mutex clients_mu_;
- std::set<TString> clients_;
+ std::set<TString> clients_;
};
-TString Ip4ToPackedString(const char* ip_str) {
+TString Ip4ToPackedString(const char* ip_str) {
struct in_addr ip4;
GPR_ASSERT(inet_pton(AF_INET, ip_str, &ip4) == 1);
- return TString(reinterpret_cast<const char*>(&ip4), sizeof(ip4));
+ return TString(reinterpret_cast<const char*>(&ip4), sizeof(ip4));
}
struct ClientStats {
@@ -189,7 +189,7 @@ struct ClientStats {
size_t num_calls_finished = 0;
size_t num_calls_finished_with_client_failed_to_send = 0;
size_t num_calls_finished_known_received = 0;
- std::map<TString, size_t> drop_token_counts;
+ std::map<TString, size_t> drop_token_counts;
ClientStats& operator+=(const ClientStats& other) {
num_calls_started += other.num_calls_started;
@@ -237,11 +237,11 @@ class BalancerServiceImpl : public BalancerService {
if (!stream->Read(&request)) {
goto done;
- } else {
- if (request.has_initial_request()) {
- grpc::internal::MutexLock lock(&mu_);
- service_names_.push_back(request.initial_request().name());
- }
+ } else {
+ if (request.has_initial_request()) {
+ grpc::internal::MutexLock lock(&mu_);
+ service_names_.push_back(request.initial_request().name());
+ }
}
IncreaseRequestCount();
gpr_log(GPR_INFO, "LB[%p]: received initial message '%s'", this,
@@ -271,31 +271,31 @@ class BalancerServiceImpl : public BalancerService {
if (client_load_reporting_interval_seconds_ > 0) {
request.Clear();
- while (stream->Read(&request)) {
+ while (stream->Read(&request)) {
gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'",
this, request.DebugString().c_str());
GPR_ASSERT(request.has_client_stats());
- ClientStats load_report;
- load_report.num_calls_started =
+ ClientStats load_report;
+ load_report.num_calls_started =
request.client_stats().num_calls_started();
- load_report.num_calls_finished =
+ load_report.num_calls_finished =
request.client_stats().num_calls_finished();
- load_report.num_calls_finished_with_client_failed_to_send =
+ load_report.num_calls_finished_with_client_failed_to_send =
request.client_stats()
.num_calls_finished_with_client_failed_to_send();
- load_report.num_calls_finished_known_received =
+ load_report.num_calls_finished_known_received =
request.client_stats().num_calls_finished_known_received();
for (const auto& drop_token_count :
request.client_stats().calls_finished_with_drop()) {
- load_report
- .drop_token_counts[drop_token_count.load_balance_token()] =
+ load_report
+ .drop_token_counts[drop_token_count.load_balance_token()] =
drop_token_count.num_calls();
}
- // We need to acquire the lock here in order to prevent the notify_one
- // below from firing before its corresponding wait is executed.
- grpc::internal::MutexLock lock(&mu_);
- load_report_queue_.emplace_back(std::move(load_report));
- if (load_report_cond_ != nullptr) load_report_cond_->Signal();
+ // We need to acquire the lock here in order to prevent the notify_one
+ // below from firing before its corresponding wait is executed.
+ grpc::internal::MutexLock lock(&mu_);
+ load_report_queue_.emplace_back(std::move(load_report));
+ if (load_report_cond_ != nullptr) load_report_cond_->Signal();
}
}
}
@@ -313,7 +313,7 @@ class BalancerServiceImpl : public BalancerService {
grpc::internal::MutexLock lock(&mu_);
serverlist_done_ = false;
responses_and_delays_.clear();
- load_report_queue_.clear();
+ load_report_queue_.clear();
}
void Shutdown() {
@@ -323,7 +323,7 @@ class BalancerServiceImpl : public BalancerService {
static LoadBalanceResponse BuildResponseForBackends(
const std::vector<int>& backend_ports,
- const std::map<TString, size_t>& drop_token_counts) {
+ const std::map<TString, size_t>& drop_token_counts) {
LoadBalanceResponse response;
for (const auto& drop_token_count : drop_token_counts) {
for (size_t i = 0; i < drop_token_count.second; ++i) {
@@ -337,24 +337,24 @@ class BalancerServiceImpl : public BalancerService {
server->set_ip_address(Ip4ToPackedString("127.0.0.1"));
server->set_port(backend_port);
static int token_count = 0;
- server->set_load_balance_token(
- y_absl::StrFormat("token%03d", ++token_count));
+ server->set_load_balance_token(
+ y_absl::StrFormat("token%03d", ++token_count));
}
return response;
}
- ClientStats WaitForLoadReport() {
+ ClientStats WaitForLoadReport() {
grpc::internal::MutexLock lock(&mu_);
- grpc::internal::CondVar cv;
- if (load_report_queue_.empty()) {
- load_report_cond_ = &cv;
- load_report_cond_->WaitUntil(
- &mu_, [this] { return !load_report_queue_.empty(); });
- load_report_cond_ = nullptr;
- }
- ClientStats load_report = std::move(load_report_queue_.front());
- load_report_queue_.pop_front();
- return load_report;
+ grpc::internal::CondVar cv;
+ if (load_report_queue_.empty()) {
+ load_report_cond_ = &cv;
+ load_report_cond_->WaitUntil(
+ &mu_, [this] { return !load_report_queue_.empty(); });
+ load_report_cond_ = nullptr;
+ }
+ ClientStats load_report = std::move(load_report_queue_.front());
+ load_report_queue_.pop_front();
+ return load_report;
}
void NotifyDoneWithServerlists() {
@@ -365,11 +365,11 @@ class BalancerServiceImpl : public BalancerService {
}
}
- std::vector<TString> service_names() {
- grpc::internal::MutexLock lock(&mu_);
- return service_names_;
- }
-
+ std::vector<TString> service_names() {
+ grpc::internal::MutexLock lock(&mu_);
+ return service_names_;
+ }
+
private:
void SendResponse(Stream* stream, const LoadBalanceResponse& response,
int delay_ms) {
@@ -385,13 +385,13 @@ class BalancerServiceImpl : public BalancerService {
const int client_load_reporting_interval_seconds_;
std::vector<ResponseDelayPair> responses_and_delays_;
- std::vector<TString> service_names_;
-
+ std::vector<TString> service_names_;
+
grpc::internal::Mutex mu_;
grpc::internal::CondVar serverlist_cond_;
bool serverlist_done_ = false;
- grpc::internal::CondVar* load_report_cond_ = nullptr;
- std::deque<ClientStats> load_report_queue_;
+ grpc::internal::CondVar* load_report_cond_ = nullptr;
+ std::deque<ClientStats> load_report_queue_;
};
class GrpclbEnd2endTest : public ::testing::Test {
@@ -452,7 +452,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); }
void ResetStub(int fallback_timeout = 0,
- const TString& expected_targets = "") {
+ const TString& expected_targets = "") {
ChannelArguments args;
if (fallback_timeout > 0) args.SetGrpclbFallbackTimeout(fallback_timeout);
args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
@@ -543,72 +543,72 @@ class GrpclbEnd2endTest : public ::testing::Test {
struct AddressData {
int port;
- TString balancer_name;
+ TString balancer_name;
};
- static grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList(
+ static grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList(
const std::vector<AddressData>& address_data) {
grpc_core::ServerAddressList addresses;
for (const auto& addr : address_data) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", addr.port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
+ TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", addr.port);
+ grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
GPR_ASSERT(lb_uri != nullptr);
grpc_resolved_address address;
GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
- grpc_arg arg = grpc_core::CreateAuthorityOverrideChannelArg(
- addr.balancer_name.c_str());
- grpc_channel_args* args =
- grpc_channel_args_copy_and_add(nullptr, &arg, 1);
+ grpc_arg arg = grpc_core::CreateAuthorityOverrideChannelArg(
+ addr.balancer_name.c_str());
+ grpc_channel_args* args =
+ grpc_channel_args_copy_and_add(nullptr, &arg, 1);
addresses.emplace_back(address.addr, address.len, args);
grpc_uri_destroy(lb_uri);
}
return addresses;
}
- static grpc_core::Resolver::Result MakeResolverResult(
- const std::vector<AddressData>& balancer_address_data,
- const std::vector<AddressData>& backend_address_data = {},
- const char* service_config_json = kDefaultServiceConfig) {
- grpc_core::Resolver::Result result;
- result.addresses =
- CreateLbAddressesFromAddressDataList(backend_address_data);
- grpc_error* error = GRPC_ERROR_NONE;
- result.service_config =
- grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error);
- GPR_ASSERT(error == GRPC_ERROR_NONE);
- grpc_core::ServerAddressList balancer_addresses =
- CreateLbAddressesFromAddressDataList(balancer_address_data);
- grpc_arg arg = CreateGrpclbBalancerAddressesArg(&balancer_addresses);
- result.args = grpc_channel_args_copy_and_add(nullptr, &arg, 1);
- return result;
- }
-
+ static grpc_core::Resolver::Result MakeResolverResult(
+ const std::vector<AddressData>& balancer_address_data,
+ const std::vector<AddressData>& backend_address_data = {},
+ const char* service_config_json = kDefaultServiceConfig) {
+ grpc_core::Resolver::Result result;
+ result.addresses =
+ CreateLbAddressesFromAddressDataList(backend_address_data);
+ grpc_error* error = GRPC_ERROR_NONE;
+ result.service_config =
+ grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error);
+ GPR_ASSERT(error == GRPC_ERROR_NONE);
+ grpc_core::ServerAddressList balancer_addresses =
+ CreateLbAddressesFromAddressDataList(balancer_address_data);
+ grpc_arg arg = CreateGrpclbBalancerAddressesArg(&balancer_addresses);
+ result.args = grpc_channel_args_copy_and_add(nullptr, &arg, 1);
+ return result;
+ }
+
void SetNextResolutionAllBalancers(
- const char* service_config_json = kDefaultServiceConfig) {
+ const char* service_config_json = kDefaultServiceConfig) {
std::vector<AddressData> addresses;
for (size_t i = 0; i < balancers_.size(); ++i) {
- addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
}
- SetNextResolution(addresses, {}, service_config_json);
+ SetNextResolution(addresses, {}, service_config_json);
}
- void SetNextResolution(
- const std::vector<AddressData>& balancer_address_data,
- const std::vector<AddressData>& backend_address_data = {},
- const char* service_config_json = kDefaultServiceConfig) {
+ void SetNextResolution(
+ const std::vector<AddressData>& balancer_address_data,
+ const std::vector<AddressData>& backend_address_data = {},
+ const char* service_config_json = kDefaultServiceConfig) {
grpc_core::ExecCtx exec_ctx;
- grpc_core::Resolver::Result result = MakeResolverResult(
- balancer_address_data, backend_address_data, service_config_json);
+ grpc_core::Resolver::Result result = MakeResolverResult(
+ balancer_address_data, backend_address_data, service_config_json);
response_generator_->SetResponse(std::move(result));
}
void SetNextReresolutionResponse(
- const std::vector<AddressData>& balancer_address_data,
- const std::vector<AddressData>& backend_address_data = {},
- const char* service_config_json = kDefaultServiceConfig) {
+ const std::vector<AddressData>& balancer_address_data,
+ const std::vector<AddressData>& backend_address_data = {},
+ const char* service_config_json = kDefaultServiceConfig) {
grpc_core::ExecCtx exec_ctx;
- grpc_core::Resolver::Result result = MakeResolverResult(
- balancer_address_data, backend_address_data, service_config_json);
+ grpc_core::Resolver::Result result = MakeResolverResult(
+ balancer_address_data, backend_address_data, service_config_json);
response_generator_->SetReresolutionResponse(std::move(result));
}
@@ -629,17 +629,17 @@ class GrpclbEnd2endTest : public ::testing::Test {
}
Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000,
- bool wait_for_ready = false,
- const Status& expected_status = Status::OK) {
+ bool wait_for_ready = false,
+ const Status& expected_status = Status::OK) {
const bool local_response = (response == nullptr);
if (local_response) response = new EchoResponse;
EchoRequest request;
request.set_message(kRequestMessage_);
- if (!expected_status.ok()) {
- auto* error = request.mutable_param()->mutable_expected_error();
- error->set_code(expected_status.error_code());
- error->set_error_message(expected_status.error_message());
- }
+ if (!expected_status.ok()) {
+ auto* error = request.mutable_param()->mutable_expected_error();
+ error->set_code(expected_status.error_code());
+ error->set_error_message(expected_status.error_message());
+ }
ClientContext context;
context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
if (wait_for_ready) context.set_wait_for_ready(true);
@@ -667,12 +667,12 @@ class GrpclbEnd2endTest : public ::testing::Test {
template <typename T>
struct ServerThread {
template <typename... Args>
- explicit ServerThread(const TString& type, Args&&... args)
+ explicit ServerThread(const TString& type, Args&&... args)
: port_(grpc_pick_unused_port_or_die()),
type_(type),
service_(std::forward<Args>(args)...) {}
- void Start(const TString& server_host) {
+ void Start(const TString& server_host) {
gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
GPR_ASSERT(!running_);
running_ = true;
@@ -688,7 +688,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
}
- void Serve(const TString& server_host, grpc::internal::Mutex* mu,
+ void Serve(const TString& server_host, grpc::internal::Mutex* mu,
grpc::internal::CondVar* cond) {
// We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed.
@@ -715,14 +715,14 @@ class GrpclbEnd2endTest : public ::testing::Test {
}
const int port_;
- TString type_;
+ TString type_;
T service_;
std::unique_ptr<Server> server_;
std::unique_ptr<std::thread> thread_;
bool running_ = false;
};
- const TString server_host_;
+ const TString server_host_;
const size_t num_backends_;
const size_t num_balancers_;
const int client_load_reporting_interval_seconds_;
@@ -732,8 +732,8 @@ class GrpclbEnd2endTest : public ::testing::Test {
std::vector<std::unique_ptr<ServerThread<BalancerServiceImpl>>> balancers_;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
response_generator_;
- const TString kRequestMessage_ = "Live long and prosper.";
- const TString kApplicationTargetName_ = "application_target_name";
+ const TString kRequestMessage_ = "Live long and prosper.";
+ const TString kApplicationTargetName_ = "application_target_name";
};
class SingleBalancerTest : public GrpclbEnd2endTest {
@@ -768,22 +768,22 @@ TEST_F(SingleBalancerTest, Vanilla) {
EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
}
-TEST_F(SingleBalancerTest, ReturnServerStatus) {
- SetNextResolutionAllBalancers();
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- // We need to wait for all backends to come online.
- WaitForAllBackends();
- // Send a request that the backend will fail, and make sure we get
- // back the right status.
- Status expected(StatusCode::INVALID_ARGUMENT, "He's dead, Jim!");
- Status actual = SendRpc(/*response=*/nullptr, /*timeout_ms=*/1000,
- /*wait_for_ready=*/false, expected);
- EXPECT_EQ(actual.error_code(), expected.error_code());
- EXPECT_EQ(actual.error_message(), expected.error_message());
-}
-
+TEST_F(SingleBalancerTest, ReturnServerStatus) {
+ SetNextResolutionAllBalancers();
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ // We need to wait for all backends to come online.
+ WaitForAllBackends();
+ // Send a request that the backend will fail, and make sure we get
+ // back the right status.
+ Status expected(StatusCode::INVALID_ARGUMENT, "He's dead, Jim!");
+ Status actual = SendRpc(/*response=*/nullptr, /*timeout_ms=*/1000,
+ /*wait_for_ready=*/false, expected);
+ EXPECT_EQ(actual.error_code(), expected.error_code());
+ EXPECT_EQ(actual.error_message(), expected.error_message());
+}
+
TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) {
SetNextResolutionAllBalancers(
"{\n"
@@ -809,7 +809,7 @@ TEST_F(SingleBalancerTest,
SelectGrpclbWithMigrationServiceConfigAndNoAddresses) {
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
- SetNextResolution({}, {},
+ SetNextResolution({}, {},
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
@@ -883,7 +883,7 @@ TEST_F(SingleBalancerTest, SwapChildPolicy) {
EXPECT_EQ(backends_[i]->service_.request_count(), 0UL);
}
// Send new resolution that removes child policy from service config.
- SetNextResolutionAllBalancers();
+ SetNextResolutionAllBalancers();
WaitForAllBackends();
CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
// Check that every backend saw the same number of requests. This verifies
@@ -924,7 +924,7 @@ TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) {
TEST_F(SingleBalancerTest, SecureNaming) {
ResetStub(0, kApplicationTargetName_ + ";lb");
- SetNextResolution({AddressData{balancers_[0]->port_, "lb"}});
+ SetNextResolution({AddressData{balancers_[0]->port_, "lb"}});
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
@@ -956,7 +956,7 @@ TEST_F(SingleBalancerTest, SecureNamingDeathTest) {
ASSERT_DEATH_IF_SUPPORTED(
{
ResetStub(0, kApplicationTargetName_ + ";lb");
- SetNextResolution({AddressData{balancers_[0]->port_, "woops"}});
+ SetNextResolution({AddressData{balancers_[0]->port_, "woops"}});
channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1));
},
"");
@@ -1016,13 +1016,13 @@ TEST_F(SingleBalancerTest, Fallback) {
const size_t kNumBackendsInResolution = backends_.size() / 2;
ResetStub(kFallbackTimeoutMs);
- std::vector<AddressData> balancer_addresses;
- balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- std::vector<AddressData> backend_addresses;
+ std::vector<AddressData> balancer_addresses;
+ balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ std::vector<AddressData> backend_addresses;
for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
+ backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
}
- SetNextResolution(balancer_addresses, backend_addresses);
+ SetNextResolution(balancer_addresses, backend_addresses);
// Send non-empty serverlist only after kServerlistDelayMs.
ScheduleResponseForBalancer(
@@ -1085,13 +1085,13 @@ TEST_F(SingleBalancerTest, FallbackUpdate) {
const size_t kNumBackendsInResolutionUpdate = backends_.size() / 3;
ResetStub(kFallbackTimeoutMs);
- std::vector<AddressData> balancer_addresses;
- balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- std::vector<AddressData> backend_addresses;
+ std::vector<AddressData> balancer_addresses;
+ balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ std::vector<AddressData> backend_addresses;
for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
+ backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
}
- SetNextResolution(balancer_addresses, backend_addresses);
+ SetNextResolution(balancer_addresses, backend_addresses);
// Send non-empty serverlist only after kServerlistDelayMs.
ScheduleResponseForBalancer(
@@ -1121,14 +1121,14 @@ TEST_F(SingleBalancerTest, FallbackUpdate) {
EXPECT_EQ(0U, backends_[i]->service_.request_count());
}
- balancer_addresses.clear();
- balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- backend_addresses.clear();
+ balancer_addresses.clear();
+ balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ backend_addresses.clear();
for (size_t i = kNumBackendsInResolution;
i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
- backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
+ backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
}
- SetNextResolution(balancer_addresses, backend_addresses);
+ SetNextResolution(balancer_addresses, backend_addresses);
// Wait until the resolution update has been processed and all the new
// fallback backends are reachable.
@@ -1192,15 +1192,15 @@ TEST_F(SingleBalancerTest,
// First two backends are fallback, last two are pointed to by balancer.
const size_t kNumFallbackBackends = 2;
const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends;
- std::vector<AddressData> backend_addresses;
+ std::vector<AddressData> backend_addresses;
for (size_t i = 0; i < kNumFallbackBackends; ++i) {
- backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
+ backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
}
- std::vector<AddressData> balancer_addresses;
+ std::vector<AddressData> balancer_addresses;
for (size_t i = 0; i < balancers_.size(); ++i) {
- balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
+ balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
}
- SetNextResolution(balancer_addresses, backend_addresses);
+ SetNextResolution(balancer_addresses, backend_addresses);
ScheduleResponseForBalancer(0,
BalancerServiceImpl::BuildResponseForBackends(
GetBackendPorts(kNumFallbackBackends), {}),
@@ -1247,15 +1247,15 @@ TEST_F(SingleBalancerTest,
// First two backends are fallback, last two are pointed to by balancer.
const size_t kNumFallbackBackends = 2;
const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends;
- std::vector<AddressData> backend_addresses;
+ std::vector<AddressData> backend_addresses;
for (size_t i = 0; i < kNumFallbackBackends; ++i) {
- backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
+ backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
}
- std::vector<AddressData> balancer_addresses;
+ std::vector<AddressData> balancer_addresses;
for (size_t i = 0; i < balancers_.size(); ++i) {
- balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
+ balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
}
- SetNextResolution(balancer_addresses, backend_addresses);
+ SetNextResolution(balancer_addresses, backend_addresses);
ScheduleResponseForBalancer(0,
BalancerServiceImpl::BuildResponseForBackends(
GetBackendPorts(kNumFallbackBackends), {}),
@@ -1299,12 +1299,12 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) {
const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
// Return an unreachable balancer and one fallback backend.
- std::vector<AddressData> balancer_addresses;
- balancer_addresses.emplace_back(
- AddressData{grpc_pick_unused_port_or_die(), ""});
- std::vector<AddressData> backend_addresses;
- backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
- SetNextResolution(balancer_addresses, backend_addresses);
+ std::vector<AddressData> balancer_addresses;
+ balancer_addresses.emplace_back(
+ AddressData{grpc_pick_unused_port_or_die(), ""});
+ std::vector<AddressData> backend_addresses;
+ backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
+ SetNextResolution(balancer_addresses, backend_addresses);
// Send RPC with deadline less than the fallback timeout and make sure it
// succeeds.
CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
@@ -1314,12 +1314,12 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) {
TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) {
const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
- // Return one balancer and one fallback backend.
- std::vector<AddressData> balancer_addresses;
- balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- std::vector<AddressData> backend_addresses;
- backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
- SetNextResolution(balancer_addresses, backend_addresses);
+ // Return one balancer and one fallback backend.
+ std::vector<AddressData> balancer_addresses;
+ balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ std::vector<AddressData> backend_addresses;
+ backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
+ SetNextResolution(balancer_addresses, backend_addresses);
// Balancer drops call without sending a serverlist.
balancers_[0]->service_.NotifyDoneWithServerlists();
// Send RPC with deadline less than the fallback timeout and make sure it
@@ -1328,49 +1328,49 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) {
/* wait_for_ready */ false);
}
-TEST_F(SingleBalancerTest, FallbackControlledByBalancer_BeforeFirstServerlist) {
- const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
- ResetStub(kFallbackTimeoutMs);
- // Return one balancer and one fallback backend.
- std::vector<AddressData> balancer_addresses;
- balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- std::vector<AddressData> backend_addresses;
- backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
- SetNextResolution(balancer_addresses, backend_addresses);
- // Balancer explicitly tells client to fallback.
- LoadBalanceResponse resp;
- resp.mutable_fallback_response();
- ScheduleResponseForBalancer(0, resp, 0);
- // Send RPC with deadline less than the fallback timeout and make sure it
- // succeeds.
- CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
- /* wait_for_ready */ false);
-}
-
-TEST_F(SingleBalancerTest, FallbackControlledByBalancer_AfterFirstServerlist) {
- // Return one balancer and one fallback backend (backend 0).
- std::vector<AddressData> balancer_addresses;
- balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- std::vector<AddressData> backend_addresses;
- backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
- SetNextResolution(balancer_addresses, backend_addresses);
- // Balancer initially sends serverlist, then tells client to fall back,
- // then sends the serverlist again.
- // The serverlist points to backend 1.
- LoadBalanceResponse serverlist_resp =
- BalancerServiceImpl::BuildResponseForBackends({backends_[1]->port_}, {});
- LoadBalanceResponse fallback_resp;
- fallback_resp.mutable_fallback_response();
- ScheduleResponseForBalancer(0, serverlist_resp, 0);
- ScheduleResponseForBalancer(0, fallback_resp, 100);
- ScheduleResponseForBalancer(0, serverlist_resp, 100);
- // Requests initially go to backend 1, then go to backend 0 in
- // fallback mode, then go back to backend 1 when we exit fallback.
- WaitForBackend(1);
- WaitForBackend(0);
- WaitForBackend(1);
-}
-
+TEST_F(SingleBalancerTest, FallbackControlledByBalancer_BeforeFirstServerlist) {
+ const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
+ ResetStub(kFallbackTimeoutMs);
+ // Return one balancer and one fallback backend.
+ std::vector<AddressData> balancer_addresses;
+ balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ std::vector<AddressData> backend_addresses;
+ backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
+ SetNextResolution(balancer_addresses, backend_addresses);
+ // Balancer explicitly tells client to fallback.
+ LoadBalanceResponse resp;
+ resp.mutable_fallback_response();
+ ScheduleResponseForBalancer(0, resp, 0);
+ // Send RPC with deadline less than the fallback timeout and make sure it
+ // succeeds.
+ CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
+ /* wait_for_ready */ false);
+}
+
+TEST_F(SingleBalancerTest, FallbackControlledByBalancer_AfterFirstServerlist) {
+ // Return one balancer and one fallback backend (backend 0).
+ std::vector<AddressData> balancer_addresses;
+ balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ std::vector<AddressData> backend_addresses;
+ backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
+ SetNextResolution(balancer_addresses, backend_addresses);
+ // Balancer initially sends serverlist, then tells client to fall back,
+ // then sends the serverlist again.
+ // The serverlist points to backend 1.
+ LoadBalanceResponse serverlist_resp =
+ BalancerServiceImpl::BuildResponseForBackends({backends_[1]->port_}, {});
+ LoadBalanceResponse fallback_resp;
+ fallback_resp.mutable_fallback_response();
+ ScheduleResponseForBalancer(0, serverlist_resp, 0);
+ ScheduleResponseForBalancer(0, fallback_resp, 100);
+ ScheduleResponseForBalancer(0, serverlist_resp, 100);
+ // Requests initially go to backend 1, then go to backend 0 in
+ // fallback mode, then go back to backend 1 when we exit fallback.
+ WaitForBackend(1);
+ WaitForBackend(0);
+ WaitForBackend(1);
+}
+
TEST_F(SingleBalancerTest, BackendsRestart) {
SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
@@ -1394,27 +1394,27 @@ TEST_F(SingleBalancerTest, BackendsRestart) {
EXPECT_EQ(1U, balancers_[0]->service_.response_count());
}
-TEST_F(SingleBalancerTest, ServiceNameFromLbPolicyConfig) {
- constexpr char kServiceConfigWithTarget[] =
- "{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"grpclb\":{\n"
- " \"serviceName\":\"test_service\"\n"
- " }}\n"
- " ]\n"
- "}";
-
- SetNextResolutionAllBalancers(kServiceConfigWithTarget);
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- // Make sure that trying to connect works without a call.
- channel_->GetState(true /* try_to_connect */);
- // We need to wait for all backends to come online.
- WaitForAllBackends();
- EXPECT_EQ(balancers_[0]->service_.service_names().back(), "test_service");
-}
-
+TEST_F(SingleBalancerTest, ServiceNameFromLbPolicyConfig) {
+ constexpr char kServiceConfigWithTarget[] =
+ "{\n"
+ " \"loadBalancingConfig\":[\n"
+ " { \"grpclb\":{\n"
+ " \"serviceName\":\"test_service\"\n"
+ " }}\n"
+ " ]\n"
+ "}";
+
+ SetNextResolutionAllBalancers(kServiceConfigWithTarget);
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ // Make sure that trying to connect works without a call.
+ channel_->GetState(true /* try_to_connect */);
+ // We need to wait for all backends to come online.
+ WaitForAllBackends();
+ EXPECT_EQ(balancers_[0]->service_.service_names().back(), "test_service");
+}
+
class UpdatesTest : public GrpclbEnd2endTest {
public:
UpdatesTest() : GrpclbEnd2endTest(4, 3, 0) {}
@@ -1450,7 +1450,7 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
EXPECT_EQ(0U, balancers_[2]->service_.response_count());
std::vector<AddressData> addresses;
- addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolution(addresses);
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
@@ -1509,9 +1509,9 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
EXPECT_EQ(0U, balancers_[2]->service_.response_count());
std::vector<AddressData> addresses;
- addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
- addresses.emplace_back(AddressData{balancers_[2]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[2]->port_, ""});
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolution(addresses);
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
@@ -1529,8 +1529,8 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
balancers_[0]->service_.NotifyDoneWithServerlists();
addresses.clear();
- addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 ==========");
SetNextResolution(addresses);
gpr_log(GPR_INFO, "========= UPDATE 2 DONE ==========");
@@ -1550,7 +1550,7 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
std::vector<AddressData> addresses;
- addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
SetNextResolution(addresses);
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
@@ -1590,7 +1590,7 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
EXPECT_EQ(0U, balancers_[2]->service_.response_count());
addresses.clear();
- addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolution(addresses);
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
@@ -1627,20 +1627,20 @@ TEST_F(UpdatesTest, ReresolveDeadBackend) {
ResetStub(500);
// The first resolution contains the addresses of a balancer that never
// responds, and a fallback backend.
- std::vector<AddressData> balancer_addresses;
- balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- std::vector<AddressData> backend_addresses;
- backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
- SetNextResolution(balancer_addresses, backend_addresses);
+ std::vector<AddressData> balancer_addresses;
+ balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ std::vector<AddressData> backend_addresses;
+ backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
+ SetNextResolution(balancer_addresses, backend_addresses);
// Ask channel to connect to trigger resolver creation.
channel_->GetState(true);
// The re-resolution result will contain the addresses of the same balancer
// and a new fallback backend.
- balancer_addresses.clear();
- balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- backend_addresses.clear();
- backend_addresses.emplace_back(AddressData{backends_[1]->port_, ""});
- SetNextReresolutionResponse(balancer_addresses, backend_addresses);
+ balancer_addresses.clear();
+ balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ backend_addresses.clear();
+ backend_addresses.emplace_back(AddressData{backends_[1]->port_, ""});
+ SetNextReresolutionResponse(balancer_addresses, backend_addresses);
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
@@ -1686,20 +1686,20 @@ class UpdatesWithClientLoadReportingTest : public GrpclbEnd2endTest {
};
TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) {
- const std::vector<int> first_backend{GetBackendPorts()[0]};
- const std::vector<int> second_backend{GetBackendPorts()[1]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
-
+ const std::vector<int> first_backend{GetBackendPorts()[0]};
+ const std::vector<int> second_backend{GetBackendPorts()[1]};
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
+ ScheduleResponseForBalancer(
+ 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+
// Ask channel to connect to trigger resolver creation.
channel_->GetState(true);
std::vector<AddressData> addresses;
- addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
SetNextResolution(addresses);
addresses.clear();
- addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
+ addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
SetNextReresolutionResponse(addresses);
// Start servers and send 10 RPCs per server.
@@ -1869,11 +1869,11 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
// and sent a single response.
EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- ClientStats client_stats;
- do {
- client_stats += WaitForLoadReports();
- } while (client_stats.num_calls_finished !=
- kNumRpcsPerAddress * num_backends_ + num_ok);
+ ClientStats client_stats;
+ do {
+ client_stats += WaitForLoadReports();
+ } while (client_stats.num_calls_finished !=
+ kNumRpcsPerAddress * num_backends_ + num_ok);
EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
client_stats.num_calls_started);
EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
diff --git a/contrib/libs/grpc/test/cpp/end2end/health/ya.make b/contrib/libs/grpc/test/cpp/end2end/health/ya.make
index ce19862115..7330129b73 100644
--- a/contrib/libs/grpc/test/cpp/end2end/health/ya.make
+++ b/contrib/libs/grpc/test/cpp/end2end/health/ya.make
@@ -6,7 +6,7 @@ OWNER(
)
ADDINCL(
- ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
+ ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
${ARCADIA_ROOT}/contrib/libs/grpc
)
diff --git a/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc
index d39c79afe7..516b3a4c81 100644
--- a/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc
@@ -59,7 +59,7 @@ class CustomHealthCheckService : public HealthCheckServiceInterface {
: impl_(impl) {
impl_->SetStatus("", HealthCheckResponse::SERVING);
}
- void SetServingStatus(const TString& service_name,
+ void SetServingStatus(const TString& service_name,
bool serving) override {
impl_->SetStatus(service_name, serving ? HealthCheckResponse::SERVING
: HealthCheckResponse::NOT_SERVING);
@@ -130,7 +130,7 @@ class HealthServiceEnd2endTest : public ::testing::Test {
}
// When the expected_status is NOT OK, we do not care about the response.
- void SendHealthCheckRpc(const TString& service_name,
+ void SendHealthCheckRpc(const TString& service_name,
const Status& expected_status) {
EXPECT_FALSE(expected_status.ok());
SendHealthCheckRpc(service_name, expected_status,
@@ -138,7 +138,7 @@ class HealthServiceEnd2endTest : public ::testing::Test {
}
void SendHealthCheckRpc(
- const TString& service_name, const Status& expected_status,
+ const TString& service_name, const Status& expected_status,
HealthCheckResponse::ServingStatus expected_serving_status) {
HealthCheckRequest request;
request.set_service(service_name);
@@ -154,9 +154,9 @@ class HealthServiceEnd2endTest : public ::testing::Test {
void VerifyHealthCheckService() {
HealthCheckServiceInterface* service = server_->GetHealthCheckService();
EXPECT_TRUE(service != nullptr);
- const TString kHealthyService("healthy_service");
- const TString kUnhealthyService("unhealthy_service");
- const TString kNotRegisteredService("not_registered");
+ const TString kHealthyService("healthy_service");
+ const TString kUnhealthyService("unhealthy_service");
+ const TString kNotRegisteredService("not_registered");
service->SetServingStatus(kHealthyService, true);
service->SetServingStatus(kUnhealthyService, false);
@@ -181,7 +181,7 @@ class HealthServiceEnd2endTest : public ::testing::Test {
}
void VerifyHealthCheckServiceStreaming() {
- const TString kServiceName("service_name");
+ const TString kServiceName("service_name");
HealthCheckServiceInterface* service = server_->GetHealthCheckService();
// Start Watch for service.
ClientContext context;
@@ -217,10 +217,10 @@ class HealthServiceEnd2endTest : public ::testing::Test {
void VerifyHealthCheckServiceShutdown() {
HealthCheckServiceInterface* service = server_->GetHealthCheckService();
EXPECT_TRUE(service != nullptr);
- const TString kHealthyService("healthy_service");
- const TString kUnhealthyService("unhealthy_service");
- const TString kNotRegisteredService("not_registered");
- const TString kNewService("add_after_shutdown");
+ const TString kHealthyService("healthy_service");
+ const TString kUnhealthyService("unhealthy_service");
+ const TString kNotRegisteredService("not_registered");
+ const TString kNewService("add_after_shutdown");
service->SetServingStatus(kHealthyService, true);
service->SetServingStatus(kUnhealthyService, false);
@@ -305,7 +305,7 @@ TEST_F(HealthServiceEnd2endTest, DefaultHealthService) {
VerifyHealthCheckServiceStreaming();
// The default service has a size limit of the service name.
- const TString kTooLongServiceName(201, 'x');
+ const TString kTooLongServiceName(201, 'x');
SendHealthCheckRpc(kTooLongServiceName,
Status(StatusCode::INVALID_ARGUMENT, ""));
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
index d9fd316d0f..e4ebee8e93 100644
--- a/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
@@ -43,12 +43,12 @@ namespace grpc {
namespace testing {
namespace {
-#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
-using ::grpc::experimental::CallbackGenericService;
-using ::grpc::experimental::GenericCallbackServerContext;
-using ::grpc::experimental::ServerGenericBidiReactor;
-#endif
-
+#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
+using ::grpc::experimental::CallbackGenericService;
+using ::grpc::experimental::GenericCallbackServerContext;
+using ::grpc::experimental::ServerGenericBidiReactor;
+#endif
+
void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
bool VerifyReturnSuccess(CompletionQueue* cq, int i) {
@@ -251,10 +251,10 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> {
: false;
}
- bool SetUpServer(::grpc::Service* service1, ::grpc::Service* service2,
- AsyncGenericService* generic_service,
- CallbackGenericService* callback_generic_service,
- int max_message_size = 0) {
+ bool SetUpServer(::grpc::Service* service1, ::grpc::Service* service2,
+ AsyncGenericService* generic_service,
+ CallbackGenericService* callback_generic_service,
+ int max_message_size = 0) {
int port = grpc_pick_unused_port_or_die();
server_address_ << "localhost:" << port;
@@ -273,12 +273,12 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> {
builder.RegisterAsyncGenericService(generic_service);
}
if (callback_generic_service) {
-#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
- builder.RegisterCallbackGenericService(callback_generic_service);
-#else
+#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
+ builder.RegisterCallbackGenericService(callback_generic_service);
+#else
builder.experimental().RegisterCallbackGenericService(
callback_generic_service);
-#endif
+#endif
}
if (max_message_size != 0) {
@@ -354,7 +354,7 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> {
void SendSimpleClientStreaming() {
EchoRequest send_request;
EchoResponse recv_response;
- TString expected_message;
+ TString expected_message;
ClientContext cli_ctx;
cli_ctx.set_wait_for_ready(true);
send_request.set_message("Hello");
@@ -417,7 +417,7 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> {
EchoResponse response;
ClientContext context;
context.set_wait_for_ready(true);
- TString msg("hello");
+ TString msg("hello");
auto stream = stub_->BidiStream(&context);
@@ -661,7 +661,7 @@ class SplitResponseStreamDupPkg
gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
GPR_ASSERT(stream->Read(&req));
for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
- resp.set_message(req.message() + ToString(i) + "_dup");
+ resp.set_message(req.message() + ToString(i) + "_dup");
GPR_ASSERT(stream->Write(resp));
}
return Status::OK;
@@ -701,7 +701,7 @@ class FullySplitStreamedDupPkg
gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
GPR_ASSERT(stream->Read(&req));
for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
- resp.set_message(req.message() + ToString(i) + "_dup");
+ resp.set_message(req.message() + ToString(i) + "_dup");
GPR_ASSERT(stream->Write(resp));
}
return Status::OK;
@@ -753,7 +753,7 @@ class FullyStreamedDupPkg : public duplicate::EchoTestService::StreamedService {
gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
GPR_ASSERT(stream->Read(&req));
for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
- resp.set_message(req.message() + ToString(i) + "_dup");
+ resp.set_message(req.message() + ToString(i) + "_dup");
GPR_ASSERT(stream->Write(resp));
}
return Status::OK;
@@ -816,15 +816,15 @@ TEST_F(HybridEnd2endTest, GenericEcho) {
TEST_P(HybridEnd2endTest, CallbackGenericEcho) {
EchoTestService::WithGenericMethod_Echo<TestServiceImpl> service;
- class GenericEchoService : public CallbackGenericService {
+ class GenericEchoService : public CallbackGenericService {
private:
- ServerGenericBidiReactor* CreateReactor(
- GenericCallbackServerContext* context) override {
+ ServerGenericBidiReactor* CreateReactor(
+ GenericCallbackServerContext* context) override {
EXPECT_EQ(context->method(), "/grpc.testing.EchoTestService/Echo");
- gpr_log(GPR_DEBUG, "Constructor of generic service %d",
- static_cast<int>(context->deadline().time_since_epoch().count()));
+ gpr_log(GPR_DEBUG, "Constructor of generic service %d",
+ static_cast<int>(context->deadline().time_since_epoch().count()));
- class Reactor : public ServerGenericBidiReactor {
+ class Reactor : public ServerGenericBidiReactor {
public:
Reactor() { StartRead(&request_); }
diff --git a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
index 87bfe91f1a..ff88953651 100644
--- a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
@@ -17,7 +17,7 @@
*/
#include "test/cpp/end2end/interceptors_util.h"
-#include <util/string/cast.h>
+#include <util/string/cast.h>
namespace grpc {
namespace testing {
@@ -47,7 +47,7 @@ void MakeClientStreamingCall(const std::shared_ptr<Channel>& channel) {
ctx.AddMetadata("testkey", "testvalue");
req.set_message("Hello");
EchoResponse resp;
- string expected_resp = "";
+ string expected_resp = "";
auto writer = stub->RequestStream(&ctx, &resp);
for (int i = 0; i < kNumStreamingMessages; i++) {
writer->Write(req);
@@ -84,10 +84,10 @@ void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) {
EchoRequest req;
EchoResponse resp;
ctx.AddMetadata("testkey", "testvalue");
- req.mutable_param()->set_echo_metadata(true);
+ req.mutable_param()->set_echo_metadata(true);
auto stream = stub->BidiStream(&ctx);
for (auto i = 0; i < kNumStreamingMessages; i++) {
- req.set_message(TString("Hello") + ::ToString(i));
+ req.set_message(TString("Hello") + ::ToString(i));
stream->Write(req);
stream->Read(&resp);
EXPECT_EQ(req.message(), resp.message());
@@ -97,61 +97,61 @@ void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) {
EXPECT_EQ(s.ok(), true);
}
-void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel) {
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- CompletionQueue cq;
- EchoRequest send_request;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
-
- send_request.set_message("Hello");
- cli_ctx.AddMetadata("testkey", "testvalue");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub->AsyncEcho(&cli_ctx, send_request, &cq));
- response_reader->Finish(&recv_response, &recv_status, tag(1));
- Verifier().Expect(1, true).Verify(&cq);
- EXPECT_EQ(send_request.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-void MakeAsyncCQClientStreamingCall(
- const std::shared_ptr<Channel>& /*channel*/) {
- // TODO(yashykt) : Fill this out
-}
-
-void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel) {
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- CompletionQueue cq;
- EchoRequest send_request;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
-
- cli_ctx.AddMetadata("testkey", "testvalue");
- send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
- stub->AsyncResponseStream(&cli_ctx, send_request, &cq, tag(1)));
- Verifier().Expect(1, true).Verify(&cq);
- // Read the expected number of messages
- for (int i = 0; i < kNumStreamingMessages; i++) {
- cli_stream->Read(&recv_response, tag(2));
- Verifier().Expect(2, true).Verify(&cq);
- ASSERT_EQ(recv_response.message(), send_request.message());
- }
- // The next read should fail
- cli_stream->Read(&recv_response, tag(3));
- Verifier().Expect(3, false).Verify(&cq);
- // Get the status
- cli_stream->Finish(&recv_status, tag(4));
- Verifier().Expect(4, true).Verify(&cq);
- EXPECT_TRUE(recv_status.ok());
-}
-
-void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& /*channel*/) {
- // TODO(yashykt) : Fill this out
-}
-
+void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel) {
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ CompletionQueue cq;
+ EchoRequest send_request;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+
+ send_request.set_message("Hello");
+ cli_ctx.AddMetadata("testkey", "testvalue");
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub->AsyncEcho(&cli_ctx, send_request, &cq));
+ response_reader->Finish(&recv_response, &recv_status, tag(1));
+ Verifier().Expect(1, true).Verify(&cq);
+ EXPECT_EQ(send_request.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+void MakeAsyncCQClientStreamingCall(
+ const std::shared_ptr<Channel>& /*channel*/) {
+ // TODO(yashykt) : Fill this out
+}
+
+void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel) {
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ CompletionQueue cq;
+ EchoRequest send_request;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+
+ cli_ctx.AddMetadata("testkey", "testvalue");
+ send_request.set_message("Hello");
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub->AsyncResponseStream(&cli_ctx, send_request, &cq, tag(1)));
+ Verifier().Expect(1, true).Verify(&cq);
+ // Read the expected number of messages
+ for (int i = 0; i < kNumStreamingMessages; i++) {
+ cli_stream->Read(&recv_response, tag(2));
+ Verifier().Expect(2, true).Verify(&cq);
+ ASSERT_EQ(recv_response.message(), send_request.message());
+ }
+ // The next read should fail
+ cli_stream->Read(&recv_response, tag(3));
+ Verifier().Expect(3, false).Verify(&cq);
+ // Get the status
+ cli_stream->Finish(&recv_status, tag(4));
+ Verifier().Expect(4, true).Verify(&cq);
+ EXPECT_TRUE(recv_status.ok());
+}
+
+void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& /*channel*/) {
+ // TODO(yashykt) : Fill this out
+}
+
void MakeCallbackCall(const std::shared_ptr<Channel>& channel) {
auto stub = grpc::testing::EchoTestService::NewStub(channel);
ClientContext ctx;
@@ -187,7 +187,7 @@ bool CheckMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& map,
return false;
}
-bool CheckMetadata(const std::multimap<TString, TString>& map,
+bool CheckMetadata(const std::multimap<TString, TString>& map,
const string& key, const string& value) {
for (const auto& pair : map) {
if (pair.first == key.c_str() && pair.second == value.c_str()) {
diff --git a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
index f332e87762..c95170bbbc 100644
--- a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
+++ b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
@@ -102,16 +102,16 @@ class EchoTestServiceStreamingImpl : public EchoTestService::Service {
public:
~EchoTestServiceStreamingImpl() override {}
- Status Echo(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) override {
- auto client_metadata = context->client_metadata();
- for (const auto& pair : client_metadata) {
- context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second));
- }
- response->set_message(request->message());
- return Status::OK;
- }
-
+ Status Echo(ServerContext* context, const EchoRequest* request,
+ EchoResponse* response) override {
+ auto client_metadata = context->client_metadata();
+ for (const auto& pair : client_metadata) {
+ context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second));
+ }
+ response->set_message(request->message());
+ return Status::OK;
+ }
+
Status BidiStream(
ServerContext* context,
grpc::ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
@@ -138,7 +138,7 @@ class EchoTestServiceStreamingImpl : public EchoTestService::Service {
}
EchoRequest req;
- string response_str = "";
+ string response_str = "";
while (reader->Read(&req)) {
response_str += req.message();
}
@@ -172,20 +172,20 @@ void MakeServerStreamingCall(const std::shared_ptr<Channel>& channel);
void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel);
-void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel);
-
-void MakeAsyncCQClientStreamingCall(const std::shared_ptr<Channel>& channel);
-
-void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel);
-
-void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& channel);
-
+void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel);
+
+void MakeAsyncCQClientStreamingCall(const std::shared_ptr<Channel>& channel);
+
+void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel);
+
+void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& channel);
+
void MakeCallbackCall(const std::shared_ptr<Channel>& channel);
bool CheckMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& map,
const string& key, const string& value);
-bool CheckMetadata(const std::multimap<TString, TString>& map,
+bool CheckMetadata(const std::multimap<TString, TString>& map,
const string& key, const string& value);
std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
diff --git a/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc
index a6a42d82fc..4bf755206e 100644
--- a/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc
@@ -17,7 +17,7 @@
*/
#include <algorithm>
-#include <atomic>
+#include <atomic>
#include <condition_variable>
#include <functional>
#include <memory>
@@ -94,11 +94,11 @@ enum class Protocol { INPROC, TCP };
class TestScenario {
public:
- TestScenario(Protocol protocol, const TString& creds_type)
+ TestScenario(Protocol protocol, const TString& creds_type)
: protocol(protocol), credentials_type(creds_type) {}
void Log() const;
Protocol protocol;
- const TString credentials_type;
+ const TString credentials_type;
};
static std::ostream& operator<<(std::ostream& out,
@@ -146,13 +146,13 @@ class MessageAllocatorEnd2endTestBase
server_ = builder.BuildAndStart();
}
- void DestroyServer() {
- if (server_) {
- server_->Shutdown();
- server_.reset();
- }
- }
-
+ void DestroyServer() {
+ if (server_) {
+ server_->Shutdown();
+ server_.reset();
+ }
+ }
+
void ResetStub() {
ChannelArguments args;
auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
@@ -172,22 +172,22 @@ class MessageAllocatorEnd2endTestBase
}
void TearDown() override {
- DestroyServer();
+ DestroyServer();
if (picked_port_ > 0) {
grpc_recycle_unused_port(picked_port_);
}
}
void SendRpcs(int num_rpcs) {
- TString test_string("");
+ TString test_string("");
for (int i = 0; i < num_rpcs; i++) {
EchoRequest request;
EchoResponse response;
ClientContext cli_ctx;
- test_string += TString(1024, 'x');
+ test_string += TString(1024, 'x');
request.set_message(test_string);
- TString val;
+ TString val;
cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
std::mutex mu;
@@ -236,8 +236,8 @@ class SimpleAllocatorTest : public MessageAllocatorEnd2endTestBase {
class MessageHolderImpl
: public experimental::MessageHolder<EchoRequest, EchoResponse> {
public:
- MessageHolderImpl(std::atomic_int* request_deallocation_count,
- std::atomic_int* messages_deallocation_count)
+ MessageHolderImpl(std::atomic_int* request_deallocation_count,
+ std::atomic_int* messages_deallocation_count)
: request_deallocation_count_(request_deallocation_count),
messages_deallocation_count_(messages_deallocation_count) {
set_request(new EchoRequest);
@@ -262,8 +262,8 @@ class SimpleAllocatorTest : public MessageAllocatorEnd2endTestBase {
}
private:
- std::atomic_int* const request_deallocation_count_;
- std::atomic_int* const messages_deallocation_count_;
+ std::atomic_int* const request_deallocation_count_;
+ std::atomic_int* const messages_deallocation_count_;
};
experimental::MessageHolder<EchoRequest, EchoResponse>* AllocateMessages()
override {
@@ -272,8 +272,8 @@ class SimpleAllocatorTest : public MessageAllocatorEnd2endTestBase {
&messages_deallocation_count);
}
int allocation_count = 0;
- std::atomic_int request_deallocation_count{0};
- std::atomic_int messages_deallocation_count{0};
+ std::atomic_int request_deallocation_count{0};
+ std::atomic_int messages_deallocation_count{0};
};
};
@@ -284,9 +284,9 @@ TEST_P(SimpleAllocatorTest, SimpleRpc) {
CreateServer(allocator.get());
ResetStub();
SendRpcs(kRpcCount);
- // messages_deallocaton_count is updated in Release after server side OnDone.
- // Destroy server to make sure it has been updated.
- DestroyServer();
+ // messages_deallocaton_count is updated in Release after server side OnDone.
+ // Destroy server to make sure it has been updated.
+ DestroyServer();
EXPECT_EQ(kRpcCount, allocator->allocation_count);
EXPECT_EQ(kRpcCount, allocator->messages_deallocation_count);
EXPECT_EQ(0, allocator->request_deallocation_count);
@@ -309,9 +309,9 @@ TEST_P(SimpleAllocatorTest, RpcWithEarlyFreeRequest) {
CreateServer(allocator.get());
ResetStub();
SendRpcs(kRpcCount);
- // messages_deallocaton_count is updated in Release after server side OnDone.
- // Destroy server to make sure it has been updated.
- DestroyServer();
+ // messages_deallocaton_count is updated in Release after server side OnDone.
+ // Destroy server to make sure it has been updated.
+ DestroyServer();
EXPECT_EQ(kRpcCount, allocator->allocation_count);
EXPECT_EQ(kRpcCount, allocator->messages_deallocation_count);
EXPECT_EQ(kRpcCount, allocator->request_deallocation_count);
@@ -336,9 +336,9 @@ TEST_P(SimpleAllocatorTest, RpcWithReleaseRequest) {
CreateServer(allocator.get());
ResetStub();
SendRpcs(kRpcCount);
- // messages_deallocaton_count is updated in Release after server side OnDone.
- // Destroy server to make sure it has been updated.
- DestroyServer();
+ // messages_deallocaton_count is updated in Release after server side OnDone.
+ // Destroy server to make sure it has been updated.
+ DestroyServer();
EXPECT_EQ(kRpcCount, allocator->allocation_count);
EXPECT_EQ(kRpcCount, allocator->messages_deallocation_count);
EXPECT_EQ(0, allocator->request_deallocation_count);
@@ -389,7 +389,7 @@ TEST_P(ArenaAllocatorTest, SimpleRpc) {
std::vector<TestScenario> CreateTestScenarios(bool test_insecure) {
std::vector<TestScenario> scenarios;
- std::vector<TString> credentials_types{
+ std::vector<TString> credentials_types{
GetCredentialsProvider()->GetSecureCredentialsTypeList()};
auto insec_ok = [] {
// Only allow insecure credentials type when it is registered with the
diff --git a/contrib/libs/grpc/test/cpp/end2end/mock_test.cc b/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
index da67357f93..a3d61c4e98 100644
--- a/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
@@ -42,14 +42,14 @@
#include <iostream>
-using grpc::testing::DefaultReactorTestPeer;
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using grpc::testing::EchoTestService;
-using grpc::testing::MockClientReaderWriter;
-using std::vector;
-using std::chrono::system_clock;
-using ::testing::_;
+using grpc::testing::DefaultReactorTestPeer;
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using grpc::testing::EchoTestService;
+using grpc::testing::MockClientReaderWriter;
+using std::vector;
+using std::chrono::system_clock;
+using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::Invoke;
@@ -81,8 +81,8 @@ class FakeClient {
EchoResponse response;
ClientContext context;
- TString msg("hello");
- TString exp(msg);
+ TString msg("hello");
+ TString exp(msg);
std::unique_ptr<ClientWriterInterface<EchoRequest>> cstream =
stub_->RequestStream(&context, &response);
@@ -111,7 +111,7 @@ class FakeClient {
std::unique_ptr<ClientReaderInterface<EchoResponse>> cstream =
stub_->ResponseStream(&context, request);
- TString exp = "";
+ TString exp = "";
EXPECT_TRUE(cstream->Read(&response));
exp.append(response.message() + " ");
@@ -129,7 +129,7 @@ class FakeClient {
EchoRequest request;
EchoResponse response;
ClientContext context;
- TString msg("hello");
+ TString msg("hello");
std::unique_ptr<ClientReaderWriterInterface<EchoRequest, EchoResponse>>
stream = stub_->BidiStream(&context);
@@ -256,7 +256,7 @@ class TestServiceImpl : public EchoTestService::Service {
ServerReader<EchoRequest>* reader,
EchoResponse* response) override {
EchoRequest request;
- TString resp("");
+ TString resp("");
while (reader->Read(&request)) {
gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
resp.append(request.message());
@@ -268,8 +268,8 @@ class TestServiceImpl : public EchoTestService::Service {
Status ResponseStream(ServerContext* /*context*/, const EchoRequest* request,
ServerWriter<EchoResponse>* writer) override {
EchoResponse response;
- vector<TString> tokens = split(request->message());
- for (const TString& token : tokens) {
+ vector<TString> tokens = split(request->message());
+ for (const TString& token : tokens) {
response.set_message(token);
writer->Write(response);
}
@@ -290,9 +290,9 @@ class TestServiceImpl : public EchoTestService::Service {
}
private:
- const vector<TString> split(const TString& input) {
- TString buff("");
- vector<TString> result;
+ const vector<TString> split(const TString& input) {
+ TString buff("");
+ vector<TString> result;
for (auto n : input) {
if (n != ' ') {
diff --git a/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc b/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
index bb54f17f03..4be070ec71 100644
--- a/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
@@ -39,18 +39,18 @@
#ifdef GRPC_POSIX_SOCKET
// Thread-local variable to so that only polls from this test assert
-// non-blocking (not polls from resolver, timer thread, etc), and only when the
-// thread is waiting on polls caused by CompletionQueue::AsyncNext (not for
-// picking a port or other reasons).
-GPR_TLS_DECL(g_is_nonblocking_poll);
+// non-blocking (not polls from resolver, timer thread, etc), and only when the
+// thread is waiting on polls caused by CompletionQueue::AsyncNext (not for
+// picking a port or other reasons).
+GPR_TLS_DECL(g_is_nonblocking_poll);
namespace {
int maybe_assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds,
int timeout) {
- // Only assert that this poll should have zero timeout if we're in the
- // middle of a zero-timeout CQ Next.
- if (gpr_tls_get(&g_is_nonblocking_poll)) {
+ // Only assert that this poll should have zero timeout if we're in the
+ // middle of a zero-timeout CQ Next.
+ if (gpr_tls_get(&g_is_nonblocking_poll)) {
GPR_ASSERT(timeout == 0);
}
return poll(pfds, nfds, timeout);
@@ -78,17 +78,17 @@ class NonblockingTest : public ::testing::Test {
}
bool LoopForTag(void** tag, bool* ok) {
- // Temporarily set the thread-local nonblocking poll flag so that the polls
- // caused by this loop are indeed sent by the library with zero timeout.
- intptr_t orig_val = gpr_tls_get(&g_is_nonblocking_poll);
- gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(true));
+ // Temporarily set the thread-local nonblocking poll flag so that the polls
+ // caused by this loop are indeed sent by the library with zero timeout.
+ intptr_t orig_val = gpr_tls_get(&g_is_nonblocking_poll);
+ gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(true));
for (;;) {
auto r = cq_->AsyncNext(tag, ok, gpr_time_0(GPR_CLOCK_REALTIME));
if (r == CompletionQueue::SHUTDOWN) {
- gpr_tls_set(&g_is_nonblocking_poll, orig_val);
+ gpr_tls_set(&g_is_nonblocking_poll, orig_val);
return false;
} else if (r == CompletionQueue::GOT_EVENT) {
- gpr_tls_set(&g_is_nonblocking_poll, orig_val);
+ gpr_tls_set(&g_is_nonblocking_poll, orig_val);
return true;
}
}
@@ -198,17 +198,17 @@ int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
::testing::InitGoogleTest(&argc, argv);
- gpr_tls_init(&g_is_nonblocking_poll);
-
- // Start the nonblocking poll thread-local variable as false because the
- // thread that issues RPCs starts by picking a port (which has non-zero
- // timeout).
- gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(false));
-
+ gpr_tls_init(&g_is_nonblocking_poll);
+
+ // Start the nonblocking poll thread-local variable as false because the
+ // thread that issues RPCs starts by picking a port (which has non-zero
+ // timeout).
+ gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(false));
+
int ret = RUN_ALL_TESTS();
- gpr_tls_destroy(&g_is_nonblocking_poll);
+ gpr_tls_destroy(&g_is_nonblocking_poll);
return ret;
-#else // GRPC_POSIX_SOCKET
- return 0;
-#endif // GRPC_POSIX_SOCKET
+#else // GRPC_POSIX_SOCKET
+ return 0;
+#endif // GRPC_POSIX_SOCKET
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc
index 4c1c768bea..b69d1dd2be 100644
--- a/contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc
@@ -58,7 +58,7 @@ namespace {
class TestScenario {
public:
TestScenario(bool server_port, bool pending_data,
- const TString& creds_type)
+ const TString& creds_type)
: server_has_port(server_port),
queue_pending_data(pending_data),
credentials_type(creds_type) {}
@@ -67,7 +67,7 @@ class TestScenario {
bool server_has_port;
// whether tcp server should read some data before handoff
bool queue_pending_data;
- const TString credentials_type;
+ const TString credentials_type;
};
static std::ostream& operator<<(std::ostream& out,
@@ -115,7 +115,7 @@ class TestTcpServer {
gpr_log(GPR_INFO, "Test TCP server started at %s", address_.c_str());
}
- const TString& address() { return address_; }
+ const TString& address() { return address_; }
void SetAcceptor(
std::unique_ptr<experimental::ExternalConnectionAcceptor> acceptor) {
@@ -156,8 +156,8 @@ class TestTcpServer {
private:
void OnConnect(grpc_endpoint* tcp, grpc_pollset* /*accepting_pollset*/,
grpc_tcp_server_acceptor* acceptor) {
- TString peer(grpc_endpoint_get_peer(tcp));
- gpr_log(GPR_INFO, "Got incoming connection! from %s", peer.c_str());
+ TString peer(grpc_endpoint_get_peer(tcp));
+ gpr_log(GPR_INFO, "Got incoming connection! from %s", peer.c_str());
EXPECT_FALSE(acceptor->external_connection);
listener_fd_ = grpc_tcp_server_port_fd(
acceptor->from_server, acceptor->port_index, acceptor->fd_index);
@@ -194,7 +194,7 @@ class TestTcpServer {
grpc_closure on_fd_released_;
std::thread running_thread_;
int port_ = -1;
- TString address_;
+ TString address_;
std::unique_ptr<experimental::ExternalConnectionAcceptor>
connection_acceptor_;
test_tcp_server tcp_server_;
@@ -309,7 +309,7 @@ static void SendRpc(EchoTestService::Stub* stub, int num_rpcs) {
std::vector<TestScenario> CreateTestScenarios() {
std::vector<TestScenario> scenarios;
- std::vector<TString> credentials_types;
+ std::vector<TString> credentials_types;
#if TARGET_OS_IPHONE
// Workaround Apple CFStream bug
diff --git a/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc b/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
index db14932d0c..d79b33da70 100644
--- a/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
@@ -47,7 +47,7 @@ class ProtoServerReflectionTest : public ::testing::Test {
ref_desc_pool_ = protobuf::DescriptorPool::generated_pool();
ServerBuilder builder;
- TString server_address = "localhost:" + to_string(port_);
+ TString server_address = "localhost:" + to_string(port_);
builder.AddListeningPort(server_address, InsecureServerCredentials());
server_ = builder.BuildAndStart();
}
@@ -67,7 +67,7 @@ class ProtoServerReflectionTest : public ::testing::Test {
return strs.str();
}
- void CompareService(const TString& service) {
+ void CompareService(const TString& service) {
const protobuf::ServiceDescriptor* service_desc =
desc_pool_->FindServiceByName(service);
const protobuf::ServiceDescriptor* ref_service_desc =
@@ -89,7 +89,7 @@ class ProtoServerReflectionTest : public ::testing::Test {
}
}
- void CompareMethod(const TString& method) {
+ void CompareMethod(const TString& method) {
const protobuf::MethodDescriptor* method_desc =
desc_pool_->FindMethodByName(method);
const protobuf::MethodDescriptor* ref_method_desc =
@@ -102,7 +102,7 @@ class ProtoServerReflectionTest : public ::testing::Test {
CompareType(method_desc->output_type()->full_name());
}
- void CompareType(const TString& type) {
+ void CompareType(const TString& type) {
if (known_types_.find(type) != known_types_.end()) {
return;
}
@@ -130,7 +130,7 @@ class ProtoServerReflectionTest : public ::testing::Test {
TEST_F(ProtoServerReflectionTest, CheckResponseWithLocalDescriptorPool) {
ResetStub();
- std::vector<TString> services;
+ std::vector<TString> services;
desc_db_->GetServices(&services);
// The service list has at least one service (reflection servcie).
EXPECT_TRUE(services.size() > 0);
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
index 4adbb0e506..004902cad3 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
@@ -52,7 +52,7 @@ class TestServerBuilderPlugin : public ServerBuilderPlugin {
register_service_ = false;
}
- TString name() override { return PLUGIN_NAME; }
+ TString name() override { return PLUGIN_NAME; }
void InitServer(ServerInitializer* si) override {
init_server_is_called_ = true;
@@ -63,7 +63,7 @@ class TestServerBuilderPlugin : public ServerBuilderPlugin {
void Finish(ServerInitializer* /*si*/) override { finish_is_called_ = true; }
- void ChangeArguments(const TString& /*name*/, void* /*value*/) override {
+ void ChangeArguments(const TString& /*name*/, void* /*value*/) override {
change_arguments_is_called_ = true;
}
@@ -123,10 +123,10 @@ std::unique_ptr<ServerBuilderPlugin> CreateTestServerBuilderPlugin() {
// Force AddServerBuilderPlugin() to be called at static initialization time.
struct StaticTestPluginInitializer {
- StaticTestPluginInitializer() {
- ::grpc::ServerBuilder::InternalAddPluginFactory(
- &CreateTestServerBuilderPlugin);
- }
+ StaticTestPluginInitializer() {
+ ::grpc::ServerBuilder::InternalAddPluginFactory(
+ &CreateTestServerBuilderPlugin);
+ }
} static_plugin_initializer_test_;
// When the param boolean is true, the ServerBuilder plugin will be added at the
@@ -168,7 +168,7 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
}
void StartServer() {
- TString server_address = "localhost:" + to_string(port_);
+ TString server_address = "localhost:" + to_string(port_);
builder_->AddListeningPort(server_address, InsecureServerCredentials());
// we run some tests without a service, and for those we need to supply a
// frequently polled completion queue
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
index 92e37b1d41..3616d680f9 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
@@ -38,7 +38,7 @@ using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
using std::chrono::system_clock;
-static TString g_root;
+static TString g_root;
namespace grpc {
namespace testing {
@@ -94,7 +94,7 @@ class CrashTest : public ::testing::Test {
protected:
CrashTest() {}
- std::unique_ptr<Server> CreateServerAndClient(const TString& mode) {
+ std::unique_ptr<Server> CreateServerAndClient(const TString& mode) {
auto port = grpc_pick_unused_port_or_die();
std::ostringstream addr_stream;
addr_stream << "localhost:" << port;
@@ -146,9 +146,9 @@ TEST_F(CrashTest, BidiStream) {
} // namespace grpc
int main(int argc, char** argv) {
- TString me = argv[0];
+ TString me = argv[0];
auto lslash = me.rfind('/');
- if (lslash != TString::npos) {
+ if (lslash != TString::npos) {
g_root = me.substr(0, lslash);
} else {
g_root = ".";
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc b/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
index 317283b94b..202fb2836c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
@@ -20,7 +20,7 @@
#include <iostream>
#include <memory>
#include <sstream>
-#include <util/generic/string.h>
+#include <util/generic/string.h>
#include <grpc/support/log.h>
#include <grpcpp/channel.h>
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
index 9dc8230326..0f340516b0 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
@@ -152,7 +152,7 @@ class ServerEarlyReturnTest : public ::testing::Test {
auto stream = stub_->BidiStream(&context);
for (int i = 0; i < 20; i++) {
- request.set_message(TString("hello") + ToString(i));
+ request.set_message(TString("hello") + ToString(i));
bool write_ok = stream->Write(request);
bool read_ok = stream->Read(&response);
if (i < 10) {
@@ -189,7 +189,7 @@ class ServerEarlyReturnTest : public ::testing::Test {
auto stream = stub_->RequestStream(&context, &response);
for (int i = 0; i < 20; i++) {
- request.set_message(TString("hello") + ToString(i));
+ request.set_message(TString("hello") + ToString(i));
bool written = stream->Write(request);
if (i < 10) {
EXPECT_TRUE(written);
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make b/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make
index ca91767643..161176f141 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make
+++ b/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make
@@ -6,7 +6,7 @@ OWNER(
)
ADDINCL(
- ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
+ ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
${ARCADIA_ROOT}/contrib/libs/grpc
)
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
index d48f54e175..6d2dc772ef 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
@@ -155,7 +155,7 @@ class SyncSendMessageTester : public experimental::Interceptor {
string old_msg =
static_cast<const EchoRequest*>(methods->GetSendMessage())->message();
EXPECT_EQ(old_msg.find("Hello"), 0u);
- new_msg_.set_message(TString("World" + old_msg).c_str());
+ new_msg_.set_message(TString("World" + old_msg).c_str());
methods->ModifySendMessage(&new_msg_);
}
methods->Proceed();
@@ -183,7 +183,7 @@ class SyncSendMessageVerifier : public experimental::Interceptor {
if (methods->QueryInterceptionHookPoint(
experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
// Make sure that the changes made in SyncSendMessageTester persisted
- string old_msg =
+ string old_msg =
static_cast<const EchoRequest*>(methods->GetSendMessage())->message();
EXPECT_EQ(old_msg.find("World"), 0u);
@@ -217,7 +217,7 @@ void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) {
ctx.AddMetadata("testkey", "testvalue");
auto stream = stub->BidiStream(&ctx);
for (auto i = 0; i < 10; i++) {
- req.set_message("Hello" + ::ToString(i));
+ req.set_message("Hello" + ::ToString(i));
stream->Write(req);
stream->Read(&resp);
EXPECT_EQ(req.message(), resp.message());
@@ -233,7 +233,7 @@ class ServerInterceptorsEnd2endSyncUnaryTest : public ::testing::Test {
int port = 5004; // grpc_pick_unused_port_or_die();
ServerBuilder builder;
- server_address_ = "localhost:" + ::ToString(port);
+ server_address_ = "localhost:" + ::ToString(port);
builder.AddListeningPort(server_address_, InsecureServerCredentials());
builder.RegisterService(&service_);
@@ -259,7 +259,7 @@ class ServerInterceptorsEnd2endSyncUnaryTest : public ::testing::Test {
builder.experimental().SetInterceptorCreators(std::move(creators));
server_ = builder.BuildAndStart();
}
- TString server_address_;
+ TString server_address_;
TestServiceImpl service_;
std::unique_ptr<Server> server_;
};
@@ -280,7 +280,7 @@ class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test {
int port = 5005; // grpc_pick_unused_port_or_die();
ServerBuilder builder;
- server_address_ = "localhost:" + ::ToString(port);
+ server_address_ = "localhost:" + ::ToString(port);
builder.AddListeningPort(server_address_, InsecureServerCredentials());
builder.RegisterService(&service_);
@@ -303,7 +303,7 @@ class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test {
builder.experimental().SetInterceptorCreators(std::move(creators));
server_ = builder.BuildAndStart();
}
- TString server_address_;
+ TString server_address_;
EchoTestServiceStreamingImpl service_;
std::unique_ptr<Server> server_;
};
@@ -343,7 +343,7 @@ class ServerInterceptorsAsyncEnd2endTest : public ::testing::Test {};
TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) {
DummyInterceptor::Reset();
int port = 5006; // grpc_pick_unused_port_or_die();
- string server_address = "localhost:" + ::ToString(port);
+ string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
EchoTestService::AsyncService service;
builder.AddListeningPort(server_address, InsecureServerCredentials());
@@ -416,7 +416,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) {
TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) {
DummyInterceptor::Reset();
int port = 5007; // grpc_pick_unused_port_or_die();
- string server_address = "localhost:" + ::ToString(port);
+ string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
EchoTestService::AsyncService service;
builder.AddListeningPort(server_address, InsecureServerCredentials());
@@ -499,7 +499,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) {
TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
DummyInterceptor::Reset();
int port = 5008; // grpc_pick_unused_port_or_die();
- string server_address = "localhost:" + ::ToString(port);
+ string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
AsyncGenericService service;
builder.AddListeningPort(server_address, InsecureServerCredentials());
@@ -521,7 +521,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
grpc::CreateChannel(server_address, InsecureChannelCredentials());
GenericStub generic_stub(channel);
- const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
+ const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
EchoRequest send_request;
EchoRequest recv_request;
EchoResponse send_response;
@@ -536,8 +536,8 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
send_request.set_message("Hello");
cli_ctx.AddMetadata("testkey", "testvalue");
- CompletionQueue* cq = srv_cq.get();
- std::thread request_call([cq]() { Verifier().Expect(4, true).Verify(cq); });
+ CompletionQueue* cq = srv_cq.get();
+ std::thread request_call([cq]() { Verifier().Expect(4, true).Verify(cq); });
std::unique_ptr<GenericClientAsyncReaderWriter> call =
generic_stub.PrepareCall(&cli_ctx, kMethodName, &cli_cq);
call->StartCall(tag(1));
@@ -553,7 +553,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
service.RequestCall(&srv_ctx, &stream, srv_cq.get(), srv_cq.get(), tag(4));
- request_call.join();
+ request_call.join();
EXPECT_EQ(kMethodName, srv_ctx.method());
EXPECT_TRUE(CheckMetadata(srv_ctx.client_metadata(), "testkey", "testvalue"));
srv_ctx.AddTrailingMetadata("testkey", "testvalue");
@@ -607,7 +607,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
TEST_F(ServerInterceptorsAsyncEnd2endTest, UnimplementedRpcTest) {
DummyInterceptor::Reset();
int port = 5009; // grpc_pick_unused_port_or_die();
- string server_address = "localhost:" + ::ToString(port);
+ string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
builder.AddListeningPort(server_address, InsecureServerCredentials());
std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
@@ -659,7 +659,7 @@ class ServerInterceptorsSyncUnimplementedEnd2endTest : public ::testing::Test {
TEST_F(ServerInterceptorsSyncUnimplementedEnd2endTest, UnimplementedRpcTest) {
DummyInterceptor::Reset();
int port = 5010; // grpc_pick_unused_port_or_die();
- string server_address = "localhost:" + ::ToString(port);
+ string server_address = "localhost:" + ::ToString(port);
ServerBuilder builder;
TestServiceImpl service;
builder.RegisterService(&service);
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
index d8c1b598e8..13833cf66c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
@@ -33,7 +33,7 @@
#include "src/proto/grpc/lb/v1/load_reporter.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
+#include "test/core/util/test_config.h"
namespace grpc {
namespace testing {
@@ -71,7 +71,7 @@ class ServerLoadReportingEnd2endTest : public ::testing::Test {
protected:
void SetUp() override {
server_address_ =
- "localhost:" + ToString(grpc_pick_unused_port_or_die());
+ "localhost:" + ToString(grpc_pick_unused_port_or_die());
server_ =
ServerBuilder()
.AddListeningPort(server_address_, InsecureServerCredentials())
@@ -91,11 +91,11 @@ class ServerLoadReportingEnd2endTest : public ::testing::Test {
server_thread_.join();
}
- void ClientMakeEchoCalls(const TString& lb_id, const TString& lb_tag,
- const TString& message, size_t num_requests) {
+ void ClientMakeEchoCalls(const TString& lb_id, const TString& lb_tag,
+ const TString& message, size_t num_requests) {
auto stub = EchoTestService::NewStub(
grpc::CreateChannel(server_address_, InsecureChannelCredentials()));
- TString lb_token = lb_id + lb_tag;
+ TString lb_token = lb_id + lb_tag;
for (int i = 0; i < num_requests; ++i) {
ClientContext ctx;
if (!lb_token.empty()) ctx.AddMetadata(GRPC_LB_TOKEN_MD_KEY, lb_token);
@@ -114,7 +114,7 @@ class ServerLoadReportingEnd2endTest : public ::testing::Test {
}
}
- TString server_address_;
+ TString server_address_;
std::unique_ptr<Server> server_;
std::thread server_thread_;
EchoTestServiceImpl echo_service_;
@@ -139,7 +139,7 @@ TEST_F(ServerLoadReportingEnd2endTest, BasicReport) {
gpr_log(GPR_INFO, "Initial request sent.");
::grpc::lb::v1::LoadReportResponse response;
stream->Read(&response);
- const TString& lb_id = response.initial_response().load_balancer_id();
+ const TString& lb_id = response.initial_response().load_balancer_id();
gpr_log(GPR_INFO, "Initial response received (lb_id: %s).", lb_id.c_str());
ClientMakeEchoCalls(lb_id, "LB_TAG", kOkMessage, 1);
while (true) {
@@ -186,7 +186,7 @@ TEST_F(ServerLoadReportingEnd2endTest, BasicReport) {
} // namespace grpc
int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
+ grpc::testing::TestEnvironment env(argc, argv);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc
index 391c4b735a..cee33343c1 100644
--- a/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc
@@ -21,11 +21,11 @@
#include <mutex>
#include <random>
#include <set>
-#include <util/generic/string.h>
+#include <util/generic/string.h>
#include <thread>
-#include "y_absl/strings/str_cat.h"
-
+#include "y_absl/strings/str_cat.h"
+
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
@@ -48,7 +48,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
-#include "src/core/lib/iomgr/parse_address.h"
+#include "src/core/lib/iomgr/parse_address.h"
#include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/cpp/client/secure_credentials.h"
@@ -96,13 +96,13 @@ class MyTestServiceImpl : public TestServiceImpl {
request_count_ = 0;
}
- std::set<TString> clients() {
+ std::set<TString> clients() {
grpc::internal::MutexLock lock(&clients_mu_);
return clients_;
}
private:
- void AddClient(const TString& client) {
+ void AddClient(const TString& client) {
grpc::internal::MutexLock lock(&clients_mu_);
clients_.insert(client);
}
@@ -110,7 +110,7 @@ class MyTestServiceImpl : public TestServiceImpl {
grpc::internal::Mutex mu_;
int request_count_;
grpc::internal::Mutex clients_mu_;
- std::set<TString> clients_;
+ std::set<TString> clients_;
};
class ServiceConfigEnd2endTest : public ::testing::Test {
@@ -169,8 +169,8 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
grpc_core::Resolver::Result BuildFakeResults(const std::vector<int>& ports) {
grpc_core::Resolver::Result result;
for (const int& port : ports) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
+ TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
+ grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
GPR_ASSERT(lb_uri != nullptr);
grpc_resolved_address address;
GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
@@ -190,16 +190,16 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
void SetNextResolutionValidServiceConfig(const std::vector<int>& ports) {
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result = BuildFakeResults(ports);
- result.service_config = grpc_core::ServiceConfig::Create(
- nullptr, "{}", &result.service_config_error);
+ result.service_config = grpc_core::ServiceConfig::Create(
+ nullptr, "{}", &result.service_config_error);
response_generator_->SetResponse(result);
}
void SetNextResolutionInvalidServiceConfig(const std::vector<int>& ports) {
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result = BuildFakeResults(ports);
- result.service_config = grpc_core::ServiceConfig::Create(
- nullptr, "{", &result.service_config_error);
+ result.service_config = grpc_core::ServiceConfig::Create(
+ nullptr, "{", &result.service_config_error);
response_generator_->SetResponse(result);
}
@@ -207,8 +207,8 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
const char* svc_cfg) {
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result = BuildFakeResults(ports);
- result.service_config = grpc_core::ServiceConfig::Create(
- nullptr, svc_cfg, &result.service_config_error);
+ result.service_config = grpc_core::ServiceConfig::Create(
+ nullptr, svc_cfg, &result.service_config_error);
response_generator_->SetResponse(result);
}
@@ -245,9 +245,9 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
std::shared_ptr<Channel> BuildChannelWithInvalidDefaultServiceConfig() {
ChannelArguments args;
- EXPECT_THAT(grpc::experimental::ValidateServiceConfigJSON(
- InvalidDefaultServiceConfig()),
- ::testing::HasSubstr("JSON parse error"));
+ EXPECT_THAT(grpc::experimental::ValidateServiceConfigJSON(
+ InvalidDefaultServiceConfig()),
+ ::testing::HasSubstr("JSON parse error"));
args.SetServiceConfigJSON(InvalidDefaultServiceConfig());
args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
response_generator_.get());
@@ -305,7 +305,7 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
port_ = port > 0 ? port : grpc_pick_unused_port_or_die();
}
- void Start(const TString& server_host) {
+ void Start(const TString& server_host) {
gpr_log(GPR_INFO, "starting server on port %d", port_);
started_ = true;
grpc::internal::Mutex mu;
@@ -318,7 +318,7 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
gpr_log(GPR_INFO, "server startup complete");
}
- void Serve(const TString& server_host, grpc::internal::Mutex* mu,
+ void Serve(const TString& server_host, grpc::internal::Mutex* mu,
grpc::internal::CondVar* cond) {
std::ostringstream server_address;
server_address << server_host << ":" << port_;
@@ -340,7 +340,7 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
started_ = false;
}
- void SetServingStatus(const TString& service, bool serving) {
+ void SetServingStatus(const TString& service, bool serving) {
server_->GetHealthCheckService()->SetServingStatus(service, serving);
}
};
@@ -422,12 +422,12 @@ class ServiceConfigEnd2endTest : public ::testing::Test {
return "{\"version\": \"invalid_default\"";
}
- const TString server_host_;
+ const TString server_host_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::vector<std::unique_ptr<ServerData>> servers_;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
response_generator_;
- const TString kRequestMessage_;
+ const TString kRequestMessage_;
std::shared_ptr<ChannelCredentials> creds_;
};
@@ -437,7 +437,7 @@ TEST_F(ServiceConfigEnd2endTest, NoServiceConfigTest) {
auto stub = BuildStub(channel);
SetNextResolutionNoServiceConfig(GetServersPorts());
CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str());
+ EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str());
}
TEST_F(ServiceConfigEnd2endTest, NoServiceConfigWithDefaultConfigTest) {
@@ -480,7 +480,7 @@ TEST_F(ServiceConfigEnd2endTest,
EXPECT_STREQ(ValidServiceConfigV1(), channel->GetServiceConfigJSON().c_str());
SetNextResolutionNoServiceConfig(GetServersPorts());
CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str());
+ EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str());
}
TEST_F(ServiceConfigEnd2endTest,
@@ -542,7 +542,7 @@ TEST_F(ServiceConfigEnd2endTest, NoServiceConfigAfterInvalidServiceConfigTest) {
CheckRpcSendFailure(stub);
SetNextResolutionNoServiceConfig(GetServersPorts());
CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str());
+ EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str());
}
TEST_F(ServiceConfigEnd2endTest,
diff --git a/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc b/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
index 3b7489da07..3aa7a766c4 100644
--- a/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
@@ -68,7 +68,7 @@ class ShutdownTest : public ::testing::TestWithParam<string> {
}
std::unique_ptr<Server> SetUpServer(const int port) {
- TString server_address = "localhost:" + to_string(port);
+ TString server_address = "localhost:" + to_string(port);
ServerBuilder builder;
auto server_creds =
@@ -117,7 +117,7 @@ class ShutdownTest : public ::testing::TestWithParam<string> {
};
std::vector<string> GetAllCredentialsTypeList() {
- std::vector<TString> credentials_types;
+ std::vector<TString> credentials_types;
if (GetCredentialsProvider()->GetChannelCredentials(kInsecureCredentialsType,
nullptr) != nullptr) {
credentials_types.push_back(kInsecureCredentialsType);
@@ -128,7 +128,7 @@ std::vector<string> GetAllCredentialsTypeList() {
}
GPR_ASSERT(!credentials_types.empty());
- TString credentials_type_list("credentials types:");
+ TString credentials_type_list("credentials types:");
for (const string& type : credentials_types) {
credentials_type_list.append(" " + type);
}
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc
index 06c6981db7..5b212cba31 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc
@@ -64,7 +64,7 @@ Status HealthCheckServiceImpl::Watch(
}
void HealthCheckServiceImpl::SetStatus(
- const TString& service_name,
+ const TString& service_name,
HealthCheckResponse::ServingStatus status) {
std::lock_guard<std::mutex> lock(mu_);
if (shutdown_) {
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h
index 24f475dfde..d370e4693a 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h
+++ b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h
@@ -39,7 +39,7 @@ class HealthCheckServiceImpl : public health::v1::Health::Service {
Status Watch(ServerContext* context,
const health::v1::HealthCheckRequest* request,
ServerWriter<health::v1::HealthCheckResponse>* writer) override;
- void SetStatus(const TString& service_name,
+ void SetStatus(const TString& service_name,
health::v1::HealthCheckResponse::ServingStatus status);
void SetAll(health::v1::HealthCheckResponse::ServingStatus status);
@@ -48,7 +48,7 @@ class HealthCheckServiceImpl : public health::v1::Health::Service {
private:
std::mutex mu_;
bool shutdown_ = false;
- std::map<const TString, health::v1::HealthCheckResponse::ServingStatus>
+ std::map<const TString, health::v1::HealthCheckResponse::ServingStatus>
status_map_;
};
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc
index 62b805632c..078977e824 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc
@@ -19,12 +19,12 @@
#include "test/cpp/end2end/test_service_impl.h"
#include <grpc/support/log.h>
-#include <grpcpp/alarm.h>
+#include <grpcpp/alarm.h>
#include <grpcpp/security/credentials.h>
#include <grpcpp/server_context.h>
#include <gtest/gtest.h>
-#include <util/generic/string.h>
+#include <util/generic/string.h>
#include <thread>
#include "src/proto/grpc/testing/echo.grpc.pb.h"
@@ -34,7 +34,7 @@ using std::chrono::system_clock;
namespace grpc {
namespace testing {
-namespace internal {
+namespace internal {
// When echo_deadline is requested, deadline seen in the ServerContext is set in
// the response in seconds.
@@ -49,9 +49,9 @@ void MaybeEchoDeadline(experimental::ServerContextBase* context,
}
}
-void CheckServerAuthContext(const experimental::ServerContextBase* context,
- const TString& expected_transport_security_type,
- const TString& expected_client_identity) {
+void CheckServerAuthContext(const experimental::ServerContextBase* context,
+ const TString& expected_transport_security_type,
+ const TString& expected_client_identity) {
std::shared_ptr<const AuthContext> auth_ctx = context->auth_context();
std::vector<grpc::string_ref> tst =
auth_ctx->FindPropertyValues("transport_security_type");
@@ -65,7 +65,7 @@ void CheckServerAuthContext(const experimental::ServerContextBase* context,
auto identity = auth_ctx->GetPeerIdentity();
EXPECT_TRUE(auth_ctx->IsPeerAuthenticated());
EXPECT_EQ(1u, identity.size());
- EXPECT_EQ(expected_client_identity.c_str(), ToString(identity[0]));
+ EXPECT_EQ(expected_client_identity.c_str(), ToString(identity[0]));
}
}
@@ -73,7 +73,7 @@ void CheckServerAuthContext(const experimental::ServerContextBase* context,
// key-value pair. Returns -1 if the pair wasn't found.
int MetadataMatchCount(
const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
- const TString& key, const TString& value) {
+ const TString& key, const TString& value) {
int count = 0;
for (const auto& metadatum : metadata) {
if (ToString(metadatum.first) == key &&
@@ -118,11 +118,11 @@ void ServerTryCancel(ServerContext* context) {
void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) {
EXPECT_FALSE(context->IsCancelled());
context->TryCancel();
- gpr_log(GPR_INFO,
- "Server called TryCancelNonblocking() to cancel the request");
+ gpr_log(GPR_INFO,
+ "Server called TryCancelNonblocking() to cancel the request");
}
-} // namespace internal
+} // namespace internal
experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
experimental::CallbackServerContext* context, const EchoRequest* request,
@@ -135,38 +135,38 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
: service_(service), ctx_(ctx), req_(request), resp_(response) {
// It should be safe to call IsCancelled here, even though we don't know
// the result. Call it asynchronously to see if we trigger any data races.
- // Join it in OnDone (technically that could be blocking but shouldn't be
- // for very long).
+ // Join it in OnDone (technically that could be blocking but shouldn't be
+ // for very long).
async_cancel_check_ = std::thread([this] { (void)ctx_->IsCancelled(); });
- started_ = true;
-
- if (request->has_param() &&
- request->param().server_notify_client_when_started()) {
- service->signaller_.SignalClientThatRpcStarted();
- // Block on the "wait to continue" decision in a different thread since
- // we can't tie up an EM thread with blocking events. We can join it in
- // OnDone since it would definitely be done by then.
- rpc_wait_thread_ = std::thread([this] {
- service_->signaller_.ServerWaitToContinue();
- StartRpc();
- });
- } else {
- StartRpc();
- }
- }
-
- void StartRpc() {
- if (req_->has_param() && req_->param().server_sleep_us() > 0) {
+ started_ = true;
+
+ if (request->has_param() &&
+ request->param().server_notify_client_when_started()) {
+ service->signaller_.SignalClientThatRpcStarted();
+ // Block on the "wait to continue" decision in a different thread since
+ // we can't tie up an EM thread with blocking events. We can join it in
+ // OnDone since it would definitely be done by then.
+ rpc_wait_thread_ = std::thread([this] {
+ service_->signaller_.ServerWaitToContinue();
+ StartRpc();
+ });
+ } else {
+ StartRpc();
+ }
+ }
+
+ void StartRpc() {
+ if (req_->has_param() && req_->param().server_sleep_us() > 0) {
// Set an alarm for that much time
alarm_.experimental().Set(
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_micros(req_->param().server_sleep_us(),
- GPR_TIMESPAN)),
+ gpr_time_from_micros(req_->param().server_sleep_us(),
+ GPR_TIMESPAN)),
[this](bool ok) { NonDelayed(ok); });
- return;
+ return;
}
- NonDelayed(true);
+ NonDelayed(true);
}
void OnSendInitialMetadataDone(bool ok) override {
EXPECT_TRUE(ok);
@@ -176,25 +176,25 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
EXPECT_TRUE(started_);
EXPECT_TRUE(ctx_->IsCancelled());
on_cancel_invoked_ = true;
- std::lock_guard<std::mutex> l(cancel_mu_);
- cancel_cv_.notify_one();
+ std::lock_guard<std::mutex> l(cancel_mu_);
+ cancel_cv_.notify_one();
}
void OnDone() override {
if (req_->has_param() && req_->param().echo_metadata_initially()) {
EXPECT_TRUE(initial_metadata_sent_);
}
EXPECT_EQ(ctx_->IsCancelled(), on_cancel_invoked_);
- // Validate that finishing with a non-OK status doesn't cause cancellation
- if (req_->has_param() && req_->param().has_expected_error()) {
- EXPECT_FALSE(on_cancel_invoked_);
- }
+ // Validate that finishing with a non-OK status doesn't cause cancellation
+ if (req_->has_param() && req_->param().has_expected_error()) {
+ EXPECT_FALSE(on_cancel_invoked_);
+ }
async_cancel_check_.join();
- if (rpc_wait_thread_.joinable()) {
- rpc_wait_thread_.join();
- }
- if (finish_when_cancelled_.joinable()) {
- finish_when_cancelled_.join();
- }
+ if (rpc_wait_thread_.joinable()) {
+ rpc_wait_thread_.join();
+ }
+ if (finish_when_cancelled_.joinable()) {
+ finish_when_cancelled_.join();
+ }
delete this;
}
@@ -215,7 +215,7 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
error.error_message(), error.binary_error_details()));
return;
}
- int server_try_cancel = internal::GetIntValueFromMetadata(
+ int server_try_cancel = internal::GetIntValueFromMetadata(
kServerTryCancelRequest, ctx_->client_metadata(), DO_NOT_CANCEL);
if (server_try_cancel != DO_NOT_CANCEL) {
// Since this is a unary RPC, by the time this server handler is called,
@@ -225,11 +225,11 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
EXPECT_FALSE(ctx_->IsCancelled());
ctx_->TryCancel();
gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request");
- FinishWhenCancelledAsync();
+ FinishWhenCancelledAsync();
return;
}
resp_->set_message(req_->message());
- internal::MaybeEchoDeadline(ctx_, req_, resp_);
+ internal::MaybeEchoDeadline(ctx_, req_, resp_);
if (service_->host_) {
resp_->mutable_param()->set_host(*service_->host_);
}
@@ -238,7 +238,7 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
std::unique_lock<std::mutex> lock(service_->mu_);
service_->signal_client_ = true;
}
- FinishWhenCancelledAsync();
+ FinishWhenCancelledAsync();
return;
} else if (req_->has_param() && req_->param().server_cancel_after_us()) {
alarm_.experimental().Set(
@@ -272,7 +272,7 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
// Terminate rpc with error and debug info in trailer.
if (req_->param().debug_info().stack_entries_size() ||
!req_->param().debug_info().detail().empty()) {
- TString serialized_debug_info =
+ TString serialized_debug_info =
req_->param().debug_info().SerializeAsString();
ctx_->AddTrailingMetadata(kDebugInfoTrailerKey,
serialized_debug_info);
@@ -283,25 +283,25 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
if (req_->has_param() &&
(req_->param().expected_client_identity().length() > 0 ||
req_->param().check_auth_context())) {
- internal::CheckServerAuthContext(
- ctx_, req_->param().expected_transport_security_type(),
- req_->param().expected_client_identity());
+ internal::CheckServerAuthContext(
+ ctx_, req_->param().expected_transport_security_type(),
+ req_->param().expected_client_identity());
}
if (req_->has_param() && req_->param().response_message_length() > 0) {
resp_->set_message(
- TString(req_->param().response_message_length(), '\0'));
+ TString(req_->param().response_message_length(), '\0'));
}
if (req_->has_param() && req_->param().echo_peer()) {
resp_->mutable_param()->set_peer(ctx_->peer().c_str());
}
Finish(Status::OK);
}
- void FinishWhenCancelledAsync() {
- finish_when_cancelled_ = std::thread([this] {
- std::unique_lock<std::mutex> l(cancel_mu_);
- cancel_cv_.wait(l, [this] { return ctx_->IsCancelled(); });
+ void FinishWhenCancelledAsync() {
+ finish_when_cancelled_ = std::thread([this] {
+ std::unique_lock<std::mutex> l(cancel_mu_);
+ cancel_cv_.wait(l, [this] { return ctx_->IsCancelled(); });
Finish(Status::CANCELLED);
- });
+ });
}
CallbackTestServiceImpl* const service_;
@@ -309,14 +309,14 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
const EchoRequest* const req_;
EchoResponse* const resp_;
Alarm alarm_;
- std::mutex cancel_mu_;
- std::condition_variable cancel_cv_;
- bool initial_metadata_sent_ = false;
- bool started_ = false;
- bool on_cancel_invoked_ = false;
+ std::mutex cancel_mu_;
+ std::condition_variable cancel_cv_;
+ bool initial_metadata_sent_ = false;
+ bool started_ = false;
+ bool on_cancel_invoked_ = false;
std::thread async_cancel_check_;
- std::thread rpc_wait_thread_;
- std::thread finish_when_cancelled_;
+ std::thread rpc_wait_thread_;
+ std::thread finish_when_cancelled_;
};
return new Reactor(this, context, request, response);
@@ -324,14 +324,14 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
experimental::ServerUnaryReactor*
CallbackTestServiceImpl::CheckClientInitialMetadata(
- experimental::CallbackServerContext* context, const SimpleRequest42*,
- SimpleResponse42*) {
+ experimental::CallbackServerContext* context, const SimpleRequest42*,
+ SimpleResponse42*) {
class Reactor : public ::grpc::experimental::ServerUnaryReactor {
public:
explicit Reactor(experimental::CallbackServerContext* ctx) {
- EXPECT_EQ(internal::MetadataMatchCount(ctx->client_metadata(),
- kCheckClientInitialMetadataKey,
- kCheckClientInitialMetadataVal),
+ EXPECT_EQ(internal::MetadataMatchCount(ctx->client_metadata(),
+ kCheckClientInitialMetadataKey,
+ kCheckClientInitialMetadataVal),
1);
EXPECT_EQ(ctx->client_metadata().count(kCheckClientInitialMetadataKey),
1u);
@@ -354,10 +354,10 @@ CallbackTestServiceImpl::RequestStream(
// is cancelled while the server is reading messages from the client
// CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
// all the messages from the client
- int server_try_cancel = internal::GetIntValueFromMetadata(
+ int server_try_cancel = internal::GetIntValueFromMetadata(
kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- internal::ServerTryCancelNonblocking(context);
+ internal::ServerTryCancelNonblocking(context);
// Don't need to provide a reactor since the RPC is canceled
return nullptr;
}
@@ -398,7 +398,7 @@ CallbackTestServiceImpl::RequestStream(
return;
}
if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
- internal::ServerTryCancelNonblocking(ctx_);
+ internal::ServerTryCancelNonblocking(ctx_);
return;
}
FinishOnce(Status::OK);
@@ -440,10 +440,10 @@ CallbackTestServiceImpl::ResponseStream(
// is cancelled while the server is reading messages from the client
// CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
// all the messages from the client
- int server_try_cancel = internal::GetIntValueFromMetadata(
+ int server_try_cancel = internal::GetIntValueFromMetadata(
kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- internal::ServerTryCancelNonblocking(context);
+ internal::ServerTryCancelNonblocking(context);
}
class Reactor
@@ -452,9 +452,9 @@ CallbackTestServiceImpl::ResponseStream(
Reactor(experimental::CallbackServerContext* ctx,
const EchoRequest* request, int server_try_cancel)
: ctx_(ctx), request_(request), server_try_cancel_(server_try_cancel) {
- server_coalescing_api_ = internal::GetIntValueFromMetadata(
+ server_coalescing_api_ = internal::GetIntValueFromMetadata(
kServerUseCoalescingApi, ctx->client_metadata(), 0);
- server_responses_to_send_ = internal::GetIntValueFromMetadata(
+ server_responses_to_send_ = internal::GetIntValueFromMetadata(
kServerResponseStreamsToSend, ctx->client_metadata(),
kServerDefaultResponseStreamsToSend);
if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
@@ -481,7 +481,7 @@ CallbackTestServiceImpl::ResponseStream(
} else if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
// Let OnCancel recover this
} else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
- internal::ServerTryCancelNonblocking(ctx_);
+ internal::ServerTryCancelNonblocking(ctx_);
} else {
FinishOnce(Status::OK);
}
@@ -498,24 +498,24 @@ CallbackTestServiceImpl::ResponseStream(
void NextWrite() {
response_.set_message(request_->message() +
- ::ToString(num_msgs_sent_));
+ ::ToString(num_msgs_sent_));
if (num_msgs_sent_ == server_responses_to_send_ - 1 &&
server_coalescing_api_ != 0) {
- {
- std::lock_guard<std::mutex> l(finish_mu_);
- if (!finished_) {
- num_msgs_sent_++;
- StartWriteLast(&response_, WriteOptions());
- }
- }
+ {
+ std::lock_guard<std::mutex> l(finish_mu_);
+ if (!finished_) {
+ num_msgs_sent_++;
+ StartWriteLast(&response_, WriteOptions());
+ }
+ }
// If we use WriteLast, we shouldn't wait before attempting Finish
FinishOnce(Status::OK);
} else {
- std::lock_guard<std::mutex> l(finish_mu_);
- if (!finished_) {
- num_msgs_sent_++;
- StartWrite(&response_);
- }
+ std::lock_guard<std::mutex> l(finish_mu_);
+ if (!finished_) {
+ num_msgs_sent_++;
+ StartWrite(&response_);
+ }
}
}
experimental::CallbackServerContext* const ctx_;
@@ -547,12 +547,12 @@ CallbackTestServiceImpl::BidiStream(
// is cancelled while the server is reading messages from the client
// CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
// all the messages from the client
- server_try_cancel_ = internal::GetIntValueFromMetadata(
+ server_try_cancel_ = internal::GetIntValueFromMetadata(
kServerTryCancelRequest, ctx->client_metadata(), DO_NOT_CANCEL);
- server_write_last_ = internal::GetIntValueFromMetadata(
- kServerFinishAfterNReads, ctx->client_metadata(), 0);
+ server_write_last_ = internal::GetIntValueFromMetadata(
+ kServerFinishAfterNReads, ctx->client_metadata(), 0);
if (server_try_cancel_ == CANCEL_BEFORE_PROCESSING) {
- internal::ServerTryCancelNonblocking(ctx);
+ internal::ServerTryCancelNonblocking(ctx);
} else {
if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
ctx->TryCancel();
@@ -561,15 +561,15 @@ CallbackTestServiceImpl::BidiStream(
}
setup_done_ = true;
}
- void OnDone() override {
- {
- // Use the same lock as finish to make sure that OnDone isn't inlined.
- std::lock_guard<std::mutex> l(finish_mu_);
- EXPECT_TRUE(finished_);
- finish_thread_.join();
- }
- delete this;
- }
+ void OnDone() override {
+ {
+ // Use the same lock as finish to make sure that OnDone isn't inlined.
+ std::lock_guard<std::mutex> l(finish_mu_);
+ EXPECT_TRUE(finished_);
+ finish_thread_.join();
+ }
+ delete this;
+ }
void OnCancel() override {
EXPECT_TRUE(setup_done_);
EXPECT_TRUE(ctx_->IsCancelled());
@@ -579,22 +579,22 @@ CallbackTestServiceImpl::BidiStream(
if (ok) {
num_msgs_read_++;
response_.set_message(request_.message());
- std::lock_guard<std::mutex> l(finish_mu_);
- if (!finished_) {
- if (num_msgs_read_ == server_write_last_) {
- StartWriteLast(&response_, WriteOptions());
- // If we use WriteLast, we shouldn't wait before attempting Finish
- } else {
- StartWrite(&response_);
- return;
- }
+ std::lock_guard<std::mutex> l(finish_mu_);
+ if (!finished_) {
+ if (num_msgs_read_ == server_write_last_) {
+ StartWriteLast(&response_, WriteOptions());
+ // If we use WriteLast, we shouldn't wait before attempting Finish
+ } else {
+ StartWrite(&response_);
+ return;
+ }
}
}
if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
// Let OnCancel handle this
} else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
- internal::ServerTryCancelNonblocking(ctx_);
+ internal::ServerTryCancelNonblocking(ctx_);
} else {
FinishOnce(Status::OK);
}
@@ -611,11 +611,11 @@ CallbackTestServiceImpl::BidiStream(
std::lock_guard<std::mutex> l(finish_mu_);
if (!finished_) {
finished_ = true;
- // Finish asynchronously to make sure that there are no deadlocks.
- finish_thread_ = std::thread([this, s] {
- std::lock_guard<std::mutex> l(finish_mu_);
- Finish(s);
- });
+ // Finish asynchronously to make sure that there are no deadlocks.
+ finish_thread_ = std::thread([this, s] {
+ std::lock_guard<std::mutex> l(finish_mu_);
+ Finish(s);
+ });
}
}
@@ -628,7 +628,7 @@ CallbackTestServiceImpl::BidiStream(
std::mutex finish_mu_;
bool finished_{false};
bool setup_done_{false};
- std::thread finish_thread_;
+ std::thread finish_thread_;
};
return new Reactor(context);
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
index 83ae90fe22..5f207f1979 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
+++ b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
@@ -15,31 +15,31 @@
* limitations under the License.
*
*/
-
+
#ifndef GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H
#define GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H
-#include <condition_variable>
+#include <condition_variable>
#include <memory>
#include <mutex>
#include <grpc/grpc.h>
-#include <grpc/support/log.h>
-#include <grpcpp/alarm.h>
-#include <grpcpp/security/credentials.h>
+#include <grpc/support/log.h>
+#include <grpcpp/alarm.h>
+#include <grpcpp/security/credentials.h>
#include <grpcpp/server_context.h>
-#include <gtest/gtest.h>
+#include <gtest/gtest.h>
+
+#include <util/generic/string.h>
+#include <thread>
-#include <util/generic/string.h>
-#include <thread>
-
#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/cpp/util/string_ref_helper.h"
+#include "test/cpp/util/string_ref_helper.h"
+
+#include <util/string/cast.h>
+
+using std::chrono::system_clock;
-#include <util/string/cast.h>
-
-using std::chrono::system_clock;
-
namespace grpc {
namespace testing {
@@ -59,406 +59,406 @@ typedef enum {
CANCEL_AFTER_PROCESSING
} ServerTryCancelRequestPhase;
-namespace internal {
-// When echo_deadline is requested, deadline seen in the ServerContext is set in
-// the response in seconds.
-void MaybeEchoDeadline(experimental::ServerContextBase* context,
- const EchoRequest* request, EchoResponse* response);
-
-void CheckServerAuthContext(const experimental::ServerContextBase* context,
- const TString& expected_transport_security_type,
- const TString& expected_client_identity);
-
-// Returns the number of pairs in metadata that exactly match the given
-// key-value pair. Returns -1 if the pair wasn't found.
-int MetadataMatchCount(
- const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
- const TString& key, const TString& value);
-
-int GetIntValueFromMetadataHelper(
- const char* key,
- const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
- int default_value);
-
-int GetIntValueFromMetadata(
- const char* key,
- const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
- int default_value);
-
-void ServerTryCancel(ServerContext* context);
-} // namespace internal
-
-class TestServiceSignaller {
- public:
- void ClientWaitUntilRpcStarted() {
- std::unique_lock<std::mutex> lock(mu_);
- cv_rpc_started_.wait(lock, [this] { return rpc_started_; });
- }
- void ServerWaitToContinue() {
- std::unique_lock<std::mutex> lock(mu_);
- cv_server_continue_.wait(lock, [this] { return server_should_continue_; });
- }
- void SignalClientThatRpcStarted() {
- std::unique_lock<std::mutex> lock(mu_);
- rpc_started_ = true;
- cv_rpc_started_.notify_one();
- }
- void SignalServerToContinue() {
- std::unique_lock<std::mutex> lock(mu_);
- server_should_continue_ = true;
- cv_server_continue_.notify_one();
- }
-
- private:
- std::mutex mu_;
- std::condition_variable cv_rpc_started_;
- bool rpc_started_ /* GUARDED_BY(mu_) */ = false;
- std::condition_variable cv_server_continue_;
- bool server_should_continue_ /* GUARDED_BY(mu_) */ = false;
-};
-
-template <typename RpcService>
-class TestMultipleServiceImpl : public RpcService {
+namespace internal {
+// When echo_deadline is requested, deadline seen in the ServerContext is set in
+// the response in seconds.
+void MaybeEchoDeadline(experimental::ServerContextBase* context,
+ const EchoRequest* request, EchoResponse* response);
+
+void CheckServerAuthContext(const experimental::ServerContextBase* context,
+ const TString& expected_transport_security_type,
+ const TString& expected_client_identity);
+
+// Returns the number of pairs in metadata that exactly match the given
+// key-value pair. Returns -1 if the pair wasn't found.
+int MetadataMatchCount(
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ const TString& key, const TString& value);
+
+int GetIntValueFromMetadataHelper(
+ const char* key,
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ int default_value);
+
+int GetIntValueFromMetadata(
+ const char* key,
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ int default_value);
+
+void ServerTryCancel(ServerContext* context);
+} // namespace internal
+
+class TestServiceSignaller {
public:
- TestMultipleServiceImpl() : signal_client_(false), host_() {}
- explicit TestMultipleServiceImpl(const TString& host)
- : signal_client_(false), host_(new TString(host)) {}
+ void ClientWaitUntilRpcStarted() {
+ std::unique_lock<std::mutex> lock(mu_);
+ cv_rpc_started_.wait(lock, [this] { return rpc_started_; });
+ }
+ void ServerWaitToContinue() {
+ std::unique_lock<std::mutex> lock(mu_);
+ cv_server_continue_.wait(lock, [this] { return server_should_continue_; });
+ }
+ void SignalClientThatRpcStarted() {
+ std::unique_lock<std::mutex> lock(mu_);
+ rpc_started_ = true;
+ cv_rpc_started_.notify_one();
+ }
+ void SignalServerToContinue() {
+ std::unique_lock<std::mutex> lock(mu_);
+ server_should_continue_ = true;
+ cv_server_continue_.notify_one();
+ }
+
+ private:
+ std::mutex mu_;
+ std::condition_variable cv_rpc_started_;
+ bool rpc_started_ /* GUARDED_BY(mu_) */ = false;
+ std::condition_variable cv_server_continue_;
+ bool server_should_continue_ /* GUARDED_BY(mu_) */ = false;
+};
+
+template <typename RpcService>
+class TestMultipleServiceImpl : public RpcService {
+ public:
+ TestMultipleServiceImpl() : signal_client_(false), host_() {}
+ explicit TestMultipleServiceImpl(const TString& host)
+ : signal_client_(false), host_(new TString(host)) {}
Status Echo(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) {
- if (request->has_param() &&
- request->param().server_notify_client_when_started()) {
- signaller_.SignalClientThatRpcStarted();
- signaller_.ServerWaitToContinue();
- }
-
- // A bit of sleep to make sure that short deadline tests fail
- if (request->has_param() && request->param().server_sleep_us() > 0) {
- gpr_sleep_until(
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_micros(request->param().server_sleep_us(),
- GPR_TIMESPAN)));
- }
-
- if (request->has_param() && request->param().server_die()) {
- gpr_log(GPR_ERROR, "The request should not reach application handler.");
- GPR_ASSERT(0);
- }
- if (request->has_param() && request->param().has_expected_error()) {
- const auto& error = request->param().expected_error();
- return Status(static_cast<StatusCode>(error.code()),
- error.error_message(), error.binary_error_details());
- }
- int server_try_cancel = internal::GetIntValueFromMetadata(
- kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
- if (server_try_cancel > DO_NOT_CANCEL) {
- // Since this is a unary RPC, by the time this server handler is called,
- // the 'request' message is already read from the client. So the scenarios
- // in server_try_cancel don't make much sense. Just cancel the RPC as long
- // as server_try_cancel is not DO_NOT_CANCEL
- internal::ServerTryCancel(context);
- return Status::CANCELLED;
- }
-
- response->set_message(request->message());
- internal::MaybeEchoDeadline(context, request, response);
- if (host_) {
- response->mutable_param()->set_host(*host_);
- }
- if (request->has_param() && request->param().client_cancel_after_us()) {
- {
- std::unique_lock<std::mutex> lock(mu_);
- signal_client_ = true;
- ++rpcs_waiting_for_client_cancel_;
- }
- while (!context->IsCancelled()) {
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(request->param().client_cancel_after_us(),
- GPR_TIMESPAN)));
- }
- {
- std::unique_lock<std::mutex> lock(mu_);
- --rpcs_waiting_for_client_cancel_;
- }
- return Status::CANCELLED;
- } else if (request->has_param() &&
- request->param().server_cancel_after_us()) {
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(request->param().server_cancel_after_us(),
- GPR_TIMESPAN)));
- return Status::CANCELLED;
- } else if (!request->has_param() ||
- !request->param().skip_cancelled_check()) {
- EXPECT_FALSE(context->IsCancelled());
- }
-
- if (request->has_param() && request->param().echo_metadata_initially()) {
- const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata =
- context->client_metadata();
- for (const auto& metadatum : client_metadata) {
- context->AddInitialMetadata(::ToString(metadatum.first),
- ::ToString(metadatum.second));
- }
- }
-
- if (request->has_param() && request->param().echo_metadata()) {
- const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata =
- context->client_metadata();
- for (const auto& metadatum : client_metadata) {
- context->AddTrailingMetadata(::ToString(metadatum.first),
- ::ToString(metadatum.second));
- }
- // Terminate rpc with error and debug info in trailer.
- if (request->param().debug_info().stack_entries_size() ||
- !request->param().debug_info().detail().empty()) {
- TString serialized_debug_info =
- request->param().debug_info().SerializeAsString();
- context->AddTrailingMetadata(kDebugInfoTrailerKey,
- serialized_debug_info);
- return Status::CANCELLED;
- }
- }
- if (request->has_param() &&
- (request->param().expected_client_identity().length() > 0 ||
- request->param().check_auth_context())) {
- internal::CheckServerAuthContext(
- context, request->param().expected_transport_security_type(),
- request->param().expected_client_identity());
- }
- if (request->has_param() &&
- request->param().response_message_length() > 0) {
- response->set_message(
- TString(request->param().response_message_length(), '\0'));
- }
- if (request->has_param() && request->param().echo_peer()) {
- response->mutable_param()->set_peer(context->peer());
- }
- return Status::OK;
- }
-
- Status Echo1(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) {
- return Echo(context, request, response);
- }
-
- Status Echo2(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) {
- return Echo(context, request, response);
- }
-
+ EchoResponse* response) {
+ if (request->has_param() &&
+ request->param().server_notify_client_when_started()) {
+ signaller_.SignalClientThatRpcStarted();
+ signaller_.ServerWaitToContinue();
+ }
+
+ // A bit of sleep to make sure that short deadline tests fail
+ if (request->has_param() && request->param().server_sleep_us() > 0) {
+ gpr_sleep_until(
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_micros(request->param().server_sleep_us(),
+ GPR_TIMESPAN)));
+ }
+
+ if (request->has_param() && request->param().server_die()) {
+ gpr_log(GPR_ERROR, "The request should not reach application handler.");
+ GPR_ASSERT(0);
+ }
+ if (request->has_param() && request->param().has_expected_error()) {
+ const auto& error = request->param().expected_error();
+ return Status(static_cast<StatusCode>(error.code()),
+ error.error_message(), error.binary_error_details());
+ }
+ int server_try_cancel = internal::GetIntValueFromMetadata(
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+ if (server_try_cancel > DO_NOT_CANCEL) {
+ // Since this is a unary RPC, by the time this server handler is called,
+ // the 'request' message is already read from the client. So the scenarios
+ // in server_try_cancel don't make much sense. Just cancel the RPC as long
+ // as server_try_cancel is not DO_NOT_CANCEL
+ internal::ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ response->set_message(request->message());
+ internal::MaybeEchoDeadline(context, request, response);
+ if (host_) {
+ response->mutable_param()->set_host(*host_);
+ }
+ if (request->has_param() && request->param().client_cancel_after_us()) {
+ {
+ std::unique_lock<std::mutex> lock(mu_);
+ signal_client_ = true;
+ ++rpcs_waiting_for_client_cancel_;
+ }
+ while (!context->IsCancelled()) {
+ gpr_sleep_until(gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_micros(request->param().client_cancel_after_us(),
+ GPR_TIMESPAN)));
+ }
+ {
+ std::unique_lock<std::mutex> lock(mu_);
+ --rpcs_waiting_for_client_cancel_;
+ }
+ return Status::CANCELLED;
+ } else if (request->has_param() &&
+ request->param().server_cancel_after_us()) {
+ gpr_sleep_until(gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_micros(request->param().server_cancel_after_us(),
+ GPR_TIMESPAN)));
+ return Status::CANCELLED;
+ } else if (!request->has_param() ||
+ !request->param().skip_cancelled_check()) {
+ EXPECT_FALSE(context->IsCancelled());
+ }
+
+ if (request->has_param() && request->param().echo_metadata_initially()) {
+ const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata =
+ context->client_metadata();
+ for (const auto& metadatum : client_metadata) {
+ context->AddInitialMetadata(::ToString(metadatum.first),
+ ::ToString(metadatum.second));
+ }
+ }
+
+ if (request->has_param() && request->param().echo_metadata()) {
+ const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata =
+ context->client_metadata();
+ for (const auto& metadatum : client_metadata) {
+ context->AddTrailingMetadata(::ToString(metadatum.first),
+ ::ToString(metadatum.second));
+ }
+ // Terminate rpc with error and debug info in trailer.
+ if (request->param().debug_info().stack_entries_size() ||
+ !request->param().debug_info().detail().empty()) {
+ TString serialized_debug_info =
+ request->param().debug_info().SerializeAsString();
+ context->AddTrailingMetadata(kDebugInfoTrailerKey,
+ serialized_debug_info);
+ return Status::CANCELLED;
+ }
+ }
+ if (request->has_param() &&
+ (request->param().expected_client_identity().length() > 0 ||
+ request->param().check_auth_context())) {
+ internal::CheckServerAuthContext(
+ context, request->param().expected_transport_security_type(),
+ request->param().expected_client_identity());
+ }
+ if (request->has_param() &&
+ request->param().response_message_length() > 0) {
+ response->set_message(
+ TString(request->param().response_message_length(), '\0'));
+ }
+ if (request->has_param() && request->param().echo_peer()) {
+ response->mutable_param()->set_peer(context->peer());
+ }
+ return Status::OK;
+ }
+
+ Status Echo1(ServerContext* context, const EchoRequest* request,
+ EchoResponse* response) {
+ return Echo(context, request, response);
+ }
+
+ Status Echo2(ServerContext* context, const EchoRequest* request,
+ EchoResponse* response) {
+ return Echo(context, request, response);
+ }
+
Status CheckClientInitialMetadata(ServerContext* context,
- const SimpleRequest42* /*request*/,
- SimpleResponse42* /*response*/) {
- EXPECT_EQ(internal::MetadataMatchCount(context->client_metadata(),
- kCheckClientInitialMetadataKey,
- kCheckClientInitialMetadataVal),
- 1);
- EXPECT_EQ(1u,
- context->client_metadata().count(kCheckClientInitialMetadataKey));
- return Status::OK;
- }
+ const SimpleRequest42* /*request*/,
+ SimpleResponse42* /*response*/) {
+ EXPECT_EQ(internal::MetadataMatchCount(context->client_metadata(),
+ kCheckClientInitialMetadataKey,
+ kCheckClientInitialMetadataVal),
+ 1);
+ EXPECT_EQ(1u,
+ context->client_metadata().count(kCheckClientInitialMetadataKey));
+ return Status::OK;
+ }
// Unimplemented is left unimplemented to test the returned error.
Status RequestStream(ServerContext* context,
ServerReader<EchoRequest>* reader,
- EchoResponse* response) {
- // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
- // the server by calling ServerContext::TryCancel() depending on the value:
- // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads
- // any message from the client
- // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
- // reading messages from the client
- // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
- // all the messages from the client
- int server_try_cancel = internal::GetIntValueFromMetadata(
- kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
-
- EchoRequest request;
- response->set_message("");
-
- if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- internal::ServerTryCancel(context);
- return Status::CANCELLED;
- }
-
- std::thread* server_try_cancel_thd = nullptr;
- if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd =
- new std::thread([context] { internal::ServerTryCancel(context); });
- }
-
- int num_msgs_read = 0;
- while (reader->Read(&request)) {
- response->mutable_message()->append(request.message());
- }
- gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read);
-
- if (server_try_cancel_thd != nullptr) {
- server_try_cancel_thd->join();
- delete server_try_cancel_thd;
- return Status::CANCELLED;
- }
-
- if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- internal::ServerTryCancel(context);
- return Status::CANCELLED;
- }
-
- return Status::OK;
- }
-
- // Return 'kNumResponseStreamMsgs' messages.
- // TODO(yangg) make it generic by adding a parameter into EchoRequest
+ EchoResponse* response) {
+ // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
+ // the server by calling ServerContext::TryCancel() depending on the value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads
+ // any message from the client
+ // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
+ // reading messages from the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
+ // all the messages from the client
+ int server_try_cancel = internal::GetIntValueFromMetadata(
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+
+ EchoRequest request;
+ response->set_message("");
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ internal::ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ std::thread* server_try_cancel_thd = nullptr;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread([context] { internal::ServerTryCancel(context); });
+ }
+
+ int num_msgs_read = 0;
+ while (reader->Read(&request)) {
+ response->mutable_message()->append(request.message());
+ }
+ gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read);
+
+ if (server_try_cancel_thd != nullptr) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ return Status::CANCELLED;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ internal::ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ return Status::OK;
+ }
+
+ // Return 'kNumResponseStreamMsgs' messages.
+ // TODO(yangg) make it generic by adding a parameter into EchoRequest
Status ResponseStream(ServerContext* context, const EchoRequest* request,
- ServerWriter<EchoResponse>* writer) {
- // If server_try_cancel is set in the metadata, the RPC is cancelled by the
- // server by calling ServerContext::TryCancel() depending on the value:
- // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes
- // any messages to the client
- // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
- // writing messages to the client
- // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes
- // all the messages to the client
- int server_try_cancel = internal::GetIntValueFromMetadata(
- kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
-
- int server_coalescing_api = internal::GetIntValueFromMetadata(
- kServerUseCoalescingApi, context->client_metadata(), 0);
-
- int server_responses_to_send = internal::GetIntValueFromMetadata(
- kServerResponseStreamsToSend, context->client_metadata(),
- kServerDefaultResponseStreamsToSend);
-
- if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- internal::ServerTryCancel(context);
- return Status::CANCELLED;
- }
-
- EchoResponse response;
- std::thread* server_try_cancel_thd = nullptr;
- if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd =
- new std::thread([context] { internal::ServerTryCancel(context); });
- }
-
- for (int i = 0; i < server_responses_to_send; i++) {
- response.set_message(request->message() + ::ToString(i));
- if (i == server_responses_to_send - 1 && server_coalescing_api != 0) {
- writer->WriteLast(response, WriteOptions());
- } else {
- writer->Write(response);
- }
- }
-
- if (server_try_cancel_thd != nullptr) {
- server_try_cancel_thd->join();
- delete server_try_cancel_thd;
- return Status::CANCELLED;
- }
-
- if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- internal::ServerTryCancel(context);
- return Status::CANCELLED;
- }
-
- return Status::OK;
- }
-
- Status BidiStream(ServerContext* context,
- ServerReaderWriter<EchoResponse, EchoRequest>* stream) {
- // If server_try_cancel is set in the metadata, the RPC is cancelled by the
- // server by calling ServerContext::TryCancel() depending on the value:
- // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/
- // writes any messages from/to the client
- // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
- // reading/writing messages from/to the client
- // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server
- // reads/writes all messages from/to the client
- int server_try_cancel = internal::GetIntValueFromMetadata(
- kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
-
- EchoRequest request;
- EchoResponse response;
-
- if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- internal::ServerTryCancel(context);
- return Status::CANCELLED;
- }
-
- std::thread* server_try_cancel_thd = nullptr;
- if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd =
- new std::thread([context] { internal::ServerTryCancel(context); });
- }
-
- // kServerFinishAfterNReads suggests after how many reads, the server should
- // write the last message and send status (coalesced using WriteLast)
- int server_write_last = internal::GetIntValueFromMetadata(
- kServerFinishAfterNReads, context->client_metadata(), 0);
-
- int read_counts = 0;
- while (stream->Read(&request)) {
- read_counts++;
- gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
- response.set_message(request.message());
- if (read_counts == server_write_last) {
- stream->WriteLast(response, WriteOptions());
- } else {
- stream->Write(response);
- }
- }
-
- if (server_try_cancel_thd != nullptr) {
- server_try_cancel_thd->join();
- delete server_try_cancel_thd;
- return Status::CANCELLED;
- }
-
- if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- internal::ServerTryCancel(context);
- return Status::CANCELLED;
- }
-
- return Status::OK;
- }
-
- // Unimplemented is left unimplemented to test the returned error.
+ ServerWriter<EchoResponse>* writer) {
+ // If server_try_cancel is set in the metadata, the RPC is cancelled by the
+ // server by calling ServerContext::TryCancel() depending on the value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes
+ // any messages to the client
+ // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
+ // writing messages to the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes
+ // all the messages to the client
+ int server_try_cancel = internal::GetIntValueFromMetadata(
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+
+ int server_coalescing_api = internal::GetIntValueFromMetadata(
+ kServerUseCoalescingApi, context->client_metadata(), 0);
+
+ int server_responses_to_send = internal::GetIntValueFromMetadata(
+ kServerResponseStreamsToSend, context->client_metadata(),
+ kServerDefaultResponseStreamsToSend);
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ internal::ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ EchoResponse response;
+ std::thread* server_try_cancel_thd = nullptr;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread([context] { internal::ServerTryCancel(context); });
+ }
+
+ for (int i = 0; i < server_responses_to_send; i++) {
+ response.set_message(request->message() + ::ToString(i));
+ if (i == server_responses_to_send - 1 && server_coalescing_api != 0) {
+ writer->WriteLast(response, WriteOptions());
+ } else {
+ writer->Write(response);
+ }
+ }
+
+ if (server_try_cancel_thd != nullptr) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ return Status::CANCELLED;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ internal::ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ return Status::OK;
+ }
+
+ Status BidiStream(ServerContext* context,
+ ServerReaderWriter<EchoResponse, EchoRequest>* stream) {
+ // If server_try_cancel is set in the metadata, the RPC is cancelled by the
+ // server by calling ServerContext::TryCancel() depending on the value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/
+ // writes any messages from/to the client
+ // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is
+ // reading/writing messages from/to the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server
+ // reads/writes all messages from/to the client
+ int server_try_cancel = internal::GetIntValueFromMetadata(
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+
+ EchoRequest request;
+ EchoResponse response;
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ internal::ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ std::thread* server_try_cancel_thd = nullptr;
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread([context] { internal::ServerTryCancel(context); });
+ }
+
+ // kServerFinishAfterNReads suggests after how many reads, the server should
+ // write the last message and send status (coalesced using WriteLast)
+ int server_write_last = internal::GetIntValueFromMetadata(
+ kServerFinishAfterNReads, context->client_metadata(), 0);
+
+ int read_counts = 0;
+ while (stream->Read(&request)) {
+ read_counts++;
+ gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
+ response.set_message(request.message());
+ if (read_counts == server_write_last) {
+ stream->WriteLast(response, WriteOptions());
+ } else {
+ stream->Write(response);
+ }
+ }
+
+ if (server_try_cancel_thd != nullptr) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ return Status::CANCELLED;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ internal::ServerTryCancel(context);
+ return Status::CANCELLED;
+ }
+
+ return Status::OK;
+ }
+
+ // Unimplemented is left unimplemented to test the returned error.
bool signal_client() {
std::unique_lock<std::mutex> lock(mu_);
return signal_client_;
}
- void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); }
- void SignalServerToContinue() { signaller_.SignalServerToContinue(); }
- uint64_t RpcsWaitingForClientCancel() {
- std::unique_lock<std::mutex> lock(mu_);
- return rpcs_waiting_for_client_cancel_;
- }
+ void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); }
+ void SignalServerToContinue() { signaller_.SignalServerToContinue(); }
+ uint64_t RpcsWaitingForClientCancel() {
+ std::unique_lock<std::mutex> lock(mu_);
+ return rpcs_waiting_for_client_cancel_;
+ }
private:
bool signal_client_;
std::mutex mu_;
- TestServiceSignaller signaller_;
- std::unique_ptr<TString> host_;
- uint64_t rpcs_waiting_for_client_cancel_ = 0;
+ TestServiceSignaller signaller_;
+ std::unique_ptr<TString> host_;
+ uint64_t rpcs_waiting_for_client_cancel_ = 0;
};
class CallbackTestServiceImpl
: public ::grpc::testing::EchoTestService::ExperimentalCallbackService {
public:
CallbackTestServiceImpl() : signal_client_(false), host_() {}
- explicit CallbackTestServiceImpl(const TString& host)
- : signal_client_(false), host_(new TString(host)) {}
+ explicit CallbackTestServiceImpl(const TString& host)
+ : signal_client_(false), host_(new TString(host)) {}
experimental::ServerUnaryReactor* Echo(
experimental::CallbackServerContext* context, const EchoRequest* request,
EchoResponse* response) override;
experimental::ServerUnaryReactor* CheckClientInitialMetadata(
- experimental::CallbackServerContext* context, const SimpleRequest42*,
- SimpleResponse42*) override;
+ experimental::CallbackServerContext* context, const SimpleRequest42*,
+ SimpleResponse42*) override;
experimental::ServerReadReactor<EchoRequest>* RequestStream(
experimental::CallbackServerContext* context,
@@ -476,19 +476,19 @@ class CallbackTestServiceImpl
std::unique_lock<std::mutex> lock(mu_);
return signal_client_;
}
- void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); }
- void SignalServerToContinue() { signaller_.SignalServerToContinue(); }
+ void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); }
+ void SignalServerToContinue() { signaller_.SignalServerToContinue(); }
private:
bool signal_client_;
std::mutex mu_;
- TestServiceSignaller signaller_;
- std::unique_ptr<TString> host_;
+ TestServiceSignaller signaller_;
+ std::unique_ptr<TString> host_;
};
-using TestServiceImpl =
- TestMultipleServiceImpl<::grpc::testing::EchoTestService::Service>;
-
+using TestServiceImpl =
+ TestMultipleServiceImpl<::grpc::testing::EchoTestService::Service>;
+
} // namespace testing
} // namespace grpc
diff --git a/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc b/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
index d738569b3e..8acb953729 100644
--- a/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
@@ -367,7 +367,7 @@ class AsyncClientEnd2endTest : public ::testing::Test {
for (int i = 0; i < num_rpcs; ++i) {
AsyncClientCall* call = new AsyncClientCall;
EchoRequest request;
- request.set_message(TString("Hello: " + grpc::to_string(i)).c_str());
+ request.set_message(TString("Hello: " + grpc::to_string(i)).c_str());
call->response_reader =
common_.GetStub()->AsyncEcho(&call->context, request, &cq_);
call->response_reader->Finish(&call->response, &call->status,
diff --git a/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc b/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
index 7ab30f80e2..48b9eace12 100644
--- a/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
@@ -40,7 +40,7 @@
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
-static TString g_root;
+static TString g_root;
static gpr_mu g_mu;
extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
@@ -167,12 +167,12 @@ class TimeChangeTest : public ::testing::Test {
const int TIME_OFFSET2 = 5678;
private:
- static TString server_address_;
+ static TString server_address_;
static std::unique_ptr<SubProcess> server_;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
};
-TString TimeChangeTest::server_address_;
+TString TimeChangeTest::server_address_;
std::unique_ptr<SubProcess> TimeChangeTest::server_;
// Wall-clock time jumps forward on client before bidi stream is created
@@ -347,11 +347,11 @@ TEST_F(TimeChangeTest, TimeJumpForwardAndBackDuringCall) {
} // namespace grpc
int main(int argc, char** argv) {
- TString me = argv[0];
+ TString me = argv[0];
// get index of last slash in path to test binary
auto lslash = me.rfind('/');
// set g_root = path to directory containing test binary
- if (lslash != TString::npos) {
+ if (lslash != TString::npos) {
g_root = me.substr(0, lslash);
} else {
g_root = ".";
diff --git a/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc
index db5631b845..603e6186bf 100644
--- a/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc
@@ -16,22 +16,22 @@
*
*/
-#include <deque>
+#include <deque>
#include <memory>
#include <mutex>
#include <numeric>
#include <set>
#include <sstream>
-#include <util/generic/string.h>
+#include <util/generic/string.h>
#include <thread>
-#include <vector>
-
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-
-#include "y_absl/strings/str_cat.h"
-#include "y_absl/types/optional.h"
-
+#include <vector>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "y_absl/strings/str_cat.h"
+#include "y_absl/types/optional.h"
+
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -45,16 +45,16 @@
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/server_address.h"
-#include "src/core/ext/xds/xds_api.h"
-#include "src/core/ext/xds/xds_channel_args.h"
-#include "src/core/ext/xds/xds_client.h"
-#include "src/core/lib/channel/channel_args.h"
+#include "src/core/ext/xds/xds_api.h"
+#include "src/core/ext/xds/xds_channel_args.h"
+#include "src/core/ext/xds/xds_client.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gpr/tmpfile.h"
#include "src/core/lib/gprpp/map.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
-#include "src/core/lib/iomgr/parse_address.h"
+#include "src/core/lib/iomgr/parse_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/cpp/client/secure_credentials.h"
@@ -66,19 +66,19 @@
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "src/proto/grpc/testing/xds/ads_for_test.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/cds_for_test.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/cds_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/eds_for_test.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/v3/ads.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/v3/cluster.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/v3/discovery.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/v3/listener.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/v3/lrs.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/v3/route.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/ads.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/cluster.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/discovery.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/listener.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/lrs.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/v3/route.grpc.pb.h"
namespace grpc {
namespace testing {
@@ -86,78 +86,78 @@ namespace {
using std::chrono::system_clock;
-using ::envoy::config::cluster::v3::CircuitBreakers;
-using ::envoy::config::cluster::v3::Cluster;
-using ::envoy::config::cluster::v3::RoutingPriority;
-using ::envoy::config::endpoint::v3::ClusterLoadAssignment;
-using ::envoy::config::endpoint::v3::HealthStatus;
-using ::envoy::config::listener::v3::Listener;
-using ::envoy::config::route::v3::RouteConfiguration;
-using ::envoy::extensions::filters::network::http_connection_manager::v3::
- HttpConnectionManager;
-using ::envoy::type::v3::FractionalPercent;
-
-constexpr char kLdsTypeUrl[] =
- "type.googleapis.com/envoy.config.listener.v3.Listener";
-constexpr char kRdsTypeUrl[] =
- "type.googleapis.com/envoy.config.route.v3.RouteConfiguration";
-constexpr char kCdsTypeUrl[] =
- "type.googleapis.com/envoy.config.cluster.v3.Cluster";
-constexpr char kEdsTypeUrl[] =
- "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment";
-
-constexpr char kLdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Listener";
-constexpr char kRdsV2TypeUrl[] =
- "type.googleapis.com/envoy.api.v2.RouteConfiguration";
-constexpr char kCdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Cluster";
-constexpr char kEdsV2TypeUrl[] =
+using ::envoy::config::cluster::v3::CircuitBreakers;
+using ::envoy::config::cluster::v3::Cluster;
+using ::envoy::config::cluster::v3::RoutingPriority;
+using ::envoy::config::endpoint::v3::ClusterLoadAssignment;
+using ::envoy::config::endpoint::v3::HealthStatus;
+using ::envoy::config::listener::v3::Listener;
+using ::envoy::config::route::v3::RouteConfiguration;
+using ::envoy::extensions::filters::network::http_connection_manager::v3::
+ HttpConnectionManager;
+using ::envoy::type::v3::FractionalPercent;
+
+constexpr char kLdsTypeUrl[] =
+ "type.googleapis.com/envoy.config.listener.v3.Listener";
+constexpr char kRdsTypeUrl[] =
+ "type.googleapis.com/envoy.config.route.v3.RouteConfiguration";
+constexpr char kCdsTypeUrl[] =
+ "type.googleapis.com/envoy.config.cluster.v3.Cluster";
+constexpr char kEdsTypeUrl[] =
+ "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment";
+
+constexpr char kLdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Listener";
+constexpr char kRdsV2TypeUrl[] =
+ "type.googleapis.com/envoy.api.v2.RouteConfiguration";
+constexpr char kCdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Cluster";
+constexpr char kEdsV2TypeUrl[] =
"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment";
-
+
constexpr char kDefaultLocalityRegion[] = "xds_default_locality_region";
constexpr char kDefaultLocalityZone[] = "xds_default_locality_zone";
constexpr char kLbDropType[] = "lb";
constexpr char kThrottleDropType[] = "throttle";
-constexpr char kServerName[] = "server.example.com";
-constexpr char kDefaultRouteConfigurationName[] = "route_config_name";
-constexpr char kDefaultClusterName[] = "cluster_name";
-constexpr char kDefaultEdsServiceName[] = "eds_service_name";
+constexpr char kServerName[] = "server.example.com";
+constexpr char kDefaultRouteConfigurationName[] = "route_config_name";
+constexpr char kDefaultClusterName[] = "cluster_name";
+constexpr char kDefaultEdsServiceName[] = "eds_service_name";
constexpr int kDefaultLocalityWeight = 3;
constexpr int kDefaultLocalityPriority = 0;
-constexpr char kRequestMessage[] = "Live long and prosper.";
-constexpr char kDefaultServiceConfig[] =
+constexpr char kRequestMessage[] = "Live long and prosper.";
+constexpr char kDefaultServiceConfig[] =
"{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"does_not_exist\":{} },\n"
- " { \"eds_experimental\":{\n"
- " \"clusterName\": \"server.example.com\",\n"
- " \"lrsLoadReportingServerName\": \"\"\n"
- " } }\n"
- " ]\n"
- "}";
-constexpr char kDefaultServiceConfigWithoutLoadReporting[] =
- "{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"does_not_exist\":{} },\n"
- " { \"eds_experimental\":{\n"
- " \"clusterName\": \"server.example.com\"\n"
- " } }\n"
- " ]\n"
- "}";
-
-constexpr char kBootstrapFileV3[] =
- "{\n"
- " \"xds_servers\": [\n"
- " {\n"
- " \"server_uri\": \"fake:///xds_server\",\n"
- " \"channel_creds\": [\n"
- " {\n"
- " \"type\": \"fake\"\n"
- " }\n"
- " ],\n"
- " \"server_features\": [\"xds_v3\"]\n"
- " }\n"
- " ],\n"
+ " \"loadBalancingConfig\":[\n"
+ " { \"does_not_exist\":{} },\n"
+ " { \"eds_experimental\":{\n"
+ " \"clusterName\": \"server.example.com\",\n"
+ " \"lrsLoadReportingServerName\": \"\"\n"
+ " } }\n"
+ " ]\n"
+ "}";
+constexpr char kDefaultServiceConfigWithoutLoadReporting[] =
+ "{\n"
+ " \"loadBalancingConfig\":[\n"
+ " { \"does_not_exist\":{} },\n"
+ " { \"eds_experimental\":{\n"
+ " \"clusterName\": \"server.example.com\"\n"
+ " } }\n"
+ " ]\n"
+ "}";
+
+constexpr char kBootstrapFileV3[] =
+ "{\n"
+ " \"xds_servers\": [\n"
+ " {\n"
+ " \"server_uri\": \"fake:///xds_server\",\n"
+ " \"channel_creds\": [\n"
+ " {\n"
+ " \"type\": \"fake\"\n"
+ " }\n"
+ " ],\n"
+ " \"server_features\": [\"xds_v3\"]\n"
+ " }\n"
+ " ],\n"
" \"node\": {\n"
" \"id\": \"xds_end2end_test\",\n"
" \"cluster\": \"test\",\n"
@@ -172,45 +172,45 @@ constexpr char kBootstrapFileV3[] =
" }\n"
"}\n";
-constexpr char kBootstrapFileV2[] =
+constexpr char kBootstrapFileV2[] =
"{\n"
- " \"xds_servers\": [\n"
- " {\n"
- " \"server_uri\": \"fake:///xds_server\",\n"
- " \"channel_creds\": [\n"
- " {\n"
- " \"type\": \"fake\"\n"
- " }\n"
- " ]\n"
- " }\n"
- " ],\n"
+ " \"xds_servers\": [\n"
+ " {\n"
+ " \"server_uri\": \"fake:///xds_server\",\n"
+ " \"channel_creds\": [\n"
+ " {\n"
+ " \"type\": \"fake\"\n"
+ " }\n"
+ " ]\n"
+ " }\n"
+ " ],\n"
" \"node\": {\n"
- " \"id\": \"xds_end2end_test\",\n"
- " \"cluster\": \"test\",\n"
- " \"metadata\": {\n"
- " \"foo\": \"bar\"\n"
- " },\n"
- " \"locality\": {\n"
- " \"region\": \"corp\",\n"
- " \"zone\": \"svl\",\n"
- " \"subzone\": \"mp3\"\n"
- " }\n"
+ " \"id\": \"xds_end2end_test\",\n"
+ " \"cluster\": \"test\",\n"
+ " \"metadata\": {\n"
+ " \"foo\": \"bar\"\n"
+ " },\n"
+ " \"locality\": {\n"
+ " \"region\": \"corp\",\n"
+ " \"zone\": \"svl\",\n"
+ " \"subzone\": \"mp3\"\n"
+ " }\n"
" }\n"
"}\n";
-char* g_bootstrap_file_v3;
-char* g_bootstrap_file_v2;
+char* g_bootstrap_file_v3;
+char* g_bootstrap_file_v2;
void WriteBootstrapFiles() {
char* bootstrap_file;
- FILE* out = gpr_tmpfile("xds_bootstrap_v3", &bootstrap_file);
- fputs(kBootstrapFileV3, out);
+ FILE* out = gpr_tmpfile("xds_bootstrap_v3", &bootstrap_file);
+ fputs(kBootstrapFileV3, out);
fclose(out);
- g_bootstrap_file_v3 = bootstrap_file;
- out = gpr_tmpfile("xds_bootstrap_v2", &bootstrap_file);
- fputs(kBootstrapFileV2, out);
+ g_bootstrap_file_v3 = bootstrap_file;
+ out = gpr_tmpfile("xds_bootstrap_v2", &bootstrap_file);
+ fputs(kBootstrapFileV2, out);
fclose(out);
- g_bootstrap_file_v2 = bootstrap_file;
+ g_bootstrap_file_v2 = bootstrap_file;
}
// Helper class to minimize the number of unique ports we use for this test.
@@ -260,7 +260,7 @@ class CountedService : public ServiceType {
response_count_ = 0;
}
- private:
+ private:
grpc_core::Mutex mu_;
size_t request_count_ = 0;
size_t response_count_ = 0;
@@ -269,9 +269,9 @@ class CountedService : public ServiceType {
const char g_kCallCredsMdKey[] = "Balancer should not ...";
const char g_kCallCredsMdValue[] = "... receive me";
-template <typename RpcService>
-class BackendServiceImpl
- : public CountedService<TestMultipleServiceImpl<RpcService>> {
+template <typename RpcService>
+class BackendServiceImpl
+ : public CountedService<TestMultipleServiceImpl<RpcService>> {
public:
BackendServiceImpl() {}
@@ -284,50 +284,50 @@ class BackendServiceImpl
if (call_credentials_entry != context->client_metadata().end()) {
EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue);
}
- CountedService<TestMultipleServiceImpl<RpcService>>::IncreaseRequestCount();
- const auto status =
- TestMultipleServiceImpl<RpcService>::Echo(context, request, response);
- CountedService<
- TestMultipleServiceImpl<RpcService>>::IncreaseResponseCount();
+ CountedService<TestMultipleServiceImpl<RpcService>>::IncreaseRequestCount();
+ const auto status =
+ TestMultipleServiceImpl<RpcService>::Echo(context, request, response);
+ CountedService<
+ TestMultipleServiceImpl<RpcService>>::IncreaseResponseCount();
AddClient(context->peer());
return status;
}
- Status Echo1(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) override {
- return Echo(context, request, response);
- }
-
- Status Echo2(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) override {
- return Echo(context, request, response);
- }
-
+ Status Echo1(ServerContext* context, const EchoRequest* request,
+ EchoResponse* response) override {
+ return Echo(context, request, response);
+ }
+
+ Status Echo2(ServerContext* context, const EchoRequest* request,
+ EchoResponse* response) override {
+ return Echo(context, request, response);
+ }
+
void Start() {}
void Shutdown() {}
- std::set<TString> clients() {
+ std::set<TString> clients() {
grpc_core::MutexLock lock(&clients_mu_);
return clients_;
}
private:
- void AddClient(const TString& client) {
+ void AddClient(const TString& client) {
grpc_core::MutexLock lock(&clients_mu_);
clients_.insert(client);
}
grpc_core::Mutex clients_mu_;
- std::set<TString> clients_;
+ std::set<TString> clients_;
};
class ClientStats {
public:
struct LocalityStats {
- LocalityStats() {}
-
+ LocalityStats() {}
+
// Converts from proto message class.
- template <class UpstreamLocalityStats>
+ template <class UpstreamLocalityStats>
LocalityStats(const UpstreamLocalityStats& upstream_locality_stats)
: total_successful_requests(
upstream_locality_stats.total_successful_requests()),
@@ -337,27 +337,27 @@ class ClientStats {
total_issued_requests(
upstream_locality_stats.total_issued_requests()) {}
- LocalityStats& operator+=(const LocalityStats& other) {
- total_successful_requests += other.total_successful_requests;
- total_requests_in_progress += other.total_requests_in_progress;
- total_error_requests += other.total_error_requests;
- total_issued_requests += other.total_issued_requests;
- return *this;
- }
-
- uint64_t total_successful_requests = 0;
- uint64_t total_requests_in_progress = 0;
- uint64_t total_error_requests = 0;
- uint64_t total_issued_requests = 0;
+ LocalityStats& operator+=(const LocalityStats& other) {
+ total_successful_requests += other.total_successful_requests;
+ total_requests_in_progress += other.total_requests_in_progress;
+ total_error_requests += other.total_error_requests;
+ total_issued_requests += other.total_issued_requests;
+ return *this;
+ }
+
+ uint64_t total_successful_requests = 0;
+ uint64_t total_requests_in_progress = 0;
+ uint64_t total_error_requests = 0;
+ uint64_t total_issued_requests = 0;
};
- ClientStats() {}
-
+ ClientStats() {}
+
// Converts from proto message class.
- template <class ClusterStats>
- explicit ClientStats(const ClusterStats& cluster_stats)
- : cluster_name_(cluster_stats.cluster_name()),
- total_dropped_requests_(cluster_stats.total_dropped_requests()) {
+ template <class ClusterStats>
+ explicit ClientStats(const ClusterStats& cluster_stats)
+ : cluster_name_(cluster_stats.cluster_name()),
+ total_dropped_requests_(cluster_stats.total_dropped_requests()) {
for (const auto& input_locality_stats :
cluster_stats.upstream_locality_stats()) {
locality_stats_.emplace(input_locality_stats.locality().sub_zone(),
@@ -370,11 +370,11 @@ class ClientStats {
}
}
- const TString& cluster_name() const { return cluster_name_; }
-
- const std::map<TString, LocalityStats>& locality_stats() const {
- return locality_stats_;
- }
+ const TString& cluster_name() const { return cluster_name_; }
+
+ const std::map<TString, LocalityStats>& locality_stats() const {
+ return locality_stats_;
+ }
uint64_t total_successful_requests() const {
uint64_t sum = 0;
for (auto& p : locality_stats_) {
@@ -403,216 +403,216 @@ class ClientStats {
}
return sum;
}
-
+
uint64_t total_dropped_requests() const { return total_dropped_requests_; }
-
- uint64_t dropped_requests(const TString& category) const {
+
+ uint64_t dropped_requests(const TString& category) const {
auto iter = dropped_requests_.find(category);
GPR_ASSERT(iter != dropped_requests_.end());
return iter->second;
}
- ClientStats& operator+=(const ClientStats& other) {
- for (const auto& p : other.locality_stats_) {
- locality_stats_[p.first] += p.second;
- }
- total_dropped_requests_ += other.total_dropped_requests_;
- for (const auto& p : other.dropped_requests_) {
- dropped_requests_[p.first] += p.second;
- }
- return *this;
- }
-
+ ClientStats& operator+=(const ClientStats& other) {
+ for (const auto& p : other.locality_stats_) {
+ locality_stats_[p.first] += p.second;
+ }
+ total_dropped_requests_ += other.total_dropped_requests_;
+ for (const auto& p : other.dropped_requests_) {
+ dropped_requests_[p.first] += p.second;
+ }
+ return *this;
+ }
+
private:
- TString cluster_name_;
- std::map<TString, LocalityStats> locality_stats_;
- uint64_t total_dropped_requests_ = 0;
- std::map<TString, uint64_t> dropped_requests_;
+ TString cluster_name_;
+ std::map<TString, LocalityStats> locality_stats_;
+ uint64_t total_dropped_requests_ = 0;
+ std::map<TString, uint64_t> dropped_requests_;
};
-class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
+class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
public:
- struct ResponseState {
- enum State { NOT_SENT, SENT, ACKED, NACKED };
- State state = NOT_SENT;
- TString error_message;
- };
-
- struct EdsResourceArgs {
+ struct ResponseState {
+ enum State { NOT_SENT, SENT, ACKED, NACKED };
+ State state = NOT_SENT;
+ TString error_message;
+ };
+
+ struct EdsResourceArgs {
struct Locality {
- Locality(const TString& sub_zone, std::vector<int> ports,
+ Locality(const TString& sub_zone, std::vector<int> ports,
int lb_weight = kDefaultLocalityWeight,
int priority = kDefaultLocalityPriority,
- std::vector<HealthStatus> health_statuses = {})
+ std::vector<HealthStatus> health_statuses = {})
: sub_zone(std::move(sub_zone)),
ports(std::move(ports)),
lb_weight(lb_weight),
priority(priority),
health_statuses(std::move(health_statuses)) {}
- const TString sub_zone;
+ const TString sub_zone;
std::vector<int> ports;
int lb_weight;
int priority;
- std::vector<HealthStatus> health_statuses;
+ std::vector<HealthStatus> health_statuses;
};
- EdsResourceArgs() = default;
- explicit EdsResourceArgs(std::vector<Locality> locality_list)
+ EdsResourceArgs() = default;
+ explicit EdsResourceArgs(std::vector<Locality> locality_list)
: locality_list(std::move(locality_list)) {}
std::vector<Locality> locality_list;
- std::map<TString, uint32_t> drop_categories;
+ std::map<TString, uint32_t> drop_categories;
FractionalPercent::DenominatorType drop_denominator =
FractionalPercent::MILLION;
};
- explicit AdsServiceImpl(bool enable_load_reporting)
- : v2_rpc_service_(this, /*is_v2=*/true),
- v3_rpc_service_(this, /*is_v2=*/false) {
- // Construct RDS response data.
- default_route_config_.set_name(kDefaultRouteConfigurationName);
- auto* virtual_host = default_route_config_.add_virtual_hosts();
- virtual_host->add_domains("*");
- auto* route = virtual_host->add_routes();
- route->mutable_match()->set_prefix("");
- route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRdsResource(default_route_config_);
- // Construct LDS response data (with inlined RDS result).
- default_listener_ = BuildListener(default_route_config_);
- SetLdsResource(default_listener_);
- // Construct CDS response data.
- default_cluster_.set_name(kDefaultClusterName);
- default_cluster_.set_type(Cluster::EDS);
- auto* eds_config = default_cluster_.mutable_eds_cluster_config();
- eds_config->mutable_eds_config()->mutable_ads();
- eds_config->set_service_name(kDefaultEdsServiceName);
- default_cluster_.set_lb_policy(Cluster::ROUND_ROBIN);
- if (enable_load_reporting) {
- default_cluster_.mutable_lrs_server()->mutable_self();
- }
- SetCdsResource(default_cluster_);
- }
-
- bool seen_v2_client() const { return seen_v2_client_; }
- bool seen_v3_client() const { return seen_v3_client_; }
-
- ::envoy::service::discovery::v2::AggregatedDiscoveryService::Service*
- v2_rpc_service() {
- return &v2_rpc_service_;
- }
-
- ::envoy::service::discovery::v3::AggregatedDiscoveryService::Service*
- v3_rpc_service() {
- return &v3_rpc_service_;
- }
-
- Listener default_listener() const { return default_listener_; }
- RouteConfiguration default_route_config() const {
- return default_route_config_;
- }
- Cluster default_cluster() const { return default_cluster_; }
-
- ResponseState lds_response_state() {
- grpc_core::MutexLock lock(&ads_mu_);
- return resource_type_response_state_[kLdsTypeUrl];
- }
-
- ResponseState rds_response_state() {
- grpc_core::MutexLock lock(&ads_mu_);
- return resource_type_response_state_[kRdsTypeUrl];
- }
-
- ResponseState cds_response_state() {
+ explicit AdsServiceImpl(bool enable_load_reporting)
+ : v2_rpc_service_(this, /*is_v2=*/true),
+ v3_rpc_service_(this, /*is_v2=*/false) {
+ // Construct RDS response data.
+ default_route_config_.set_name(kDefaultRouteConfigurationName);
+ auto* virtual_host = default_route_config_.add_virtual_hosts();
+ virtual_host->add_domains("*");
+ auto* route = virtual_host->add_routes();
+ route->mutable_match()->set_prefix("");
+ route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRdsResource(default_route_config_);
+ // Construct LDS response data (with inlined RDS result).
+ default_listener_ = BuildListener(default_route_config_);
+ SetLdsResource(default_listener_);
+ // Construct CDS response data.
+ default_cluster_.set_name(kDefaultClusterName);
+ default_cluster_.set_type(Cluster::EDS);
+ auto* eds_config = default_cluster_.mutable_eds_cluster_config();
+ eds_config->mutable_eds_config()->mutable_ads();
+ eds_config->set_service_name(kDefaultEdsServiceName);
+ default_cluster_.set_lb_policy(Cluster::ROUND_ROBIN);
+ if (enable_load_reporting) {
+ default_cluster_.mutable_lrs_server()->mutable_self();
+ }
+ SetCdsResource(default_cluster_);
+ }
+
+ bool seen_v2_client() const { return seen_v2_client_; }
+ bool seen_v3_client() const { return seen_v3_client_; }
+
+ ::envoy::service::discovery::v2::AggregatedDiscoveryService::Service*
+ v2_rpc_service() {
+ return &v2_rpc_service_;
+ }
+
+ ::envoy::service::discovery::v3::AggregatedDiscoveryService::Service*
+ v3_rpc_service() {
+ return &v3_rpc_service_;
+ }
+
+ Listener default_listener() const { return default_listener_; }
+ RouteConfiguration default_route_config() const {
+ return default_route_config_;
+ }
+ Cluster default_cluster() const { return default_cluster_; }
+
+ ResponseState lds_response_state() {
+ grpc_core::MutexLock lock(&ads_mu_);
+ return resource_type_response_state_[kLdsTypeUrl];
+ }
+
+ ResponseState rds_response_state() {
grpc_core::MutexLock lock(&ads_mu_);
- return resource_type_response_state_[kCdsTypeUrl];
- }
-
- ResponseState eds_response_state() {
- grpc_core::MutexLock lock(&ads_mu_);
- return resource_type_response_state_[kEdsTypeUrl];
- }
-
- void SetResourceIgnore(const TString& type_url) {
- grpc_core::MutexLock lock(&ads_mu_);
- resource_types_to_ignore_.emplace(type_url);
- }
-
- void UnsetResource(const TString& type_url, const TString& name) {
- grpc_core::MutexLock lock(&ads_mu_);
- ResourceState& state = resource_map_[type_url][name];
- ++state.version;
- state.resource.reset();
- gpr_log(GPR_INFO, "ADS[%p]: Unsetting %s resource %s to version %u", this,
- type_url.c_str(), name.c_str(), state.version);
- for (SubscriptionState* subscription : state.subscriptions) {
- subscription->update_queue->emplace_back(type_url, name);
- }
- }
-
- void SetResource(google::protobuf::Any resource, const TString& type_url,
- const TString& name) {
- grpc_core::MutexLock lock(&ads_mu_);
- ResourceState& state = resource_map_[type_url][name];
- ++state.version;
- state.resource = std::move(resource);
- gpr_log(GPR_INFO, "ADS[%p]: Updating %s resource %s to version %u", this,
- type_url.c_str(), name.c_str(), state.version);
- for (SubscriptionState* subscription : state.subscriptions) {
- subscription->update_queue->emplace_back(type_url, name);
- }
- }
-
- void SetLdsResource(const Listener& listener) {
- google::protobuf::Any resource;
- resource.PackFrom(listener);
- SetResource(std::move(resource), kLdsTypeUrl, listener.name());
- }
-
- void SetRdsResource(const RouteConfiguration& route) {
- google::protobuf::Any resource;
- resource.PackFrom(route);
- SetResource(std::move(resource), kRdsTypeUrl, route.name());
- }
-
- void SetCdsResource(const Cluster& cluster) {
- google::protobuf::Any resource;
- resource.PackFrom(cluster);
- SetResource(std::move(resource), kCdsTypeUrl, cluster.name());
- }
-
- void SetEdsResource(const ClusterLoadAssignment& assignment) {
- google::protobuf::Any resource;
- resource.PackFrom(assignment);
- SetResource(std::move(resource), kEdsTypeUrl, assignment.cluster_name());
- }
-
- void SetLdsToUseDynamicRds() {
- auto listener = default_listener_;
- HttpConnectionManager http_connection_manager;
- auto* rds = http_connection_manager.mutable_rds();
- rds->set_route_config_name(kDefaultRouteConfigurationName);
- rds->mutable_config_source()->mutable_ads();
- listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
- http_connection_manager);
- SetLdsResource(listener);
- }
-
- static Listener BuildListener(const RouteConfiguration& route_config) {
- HttpConnectionManager http_connection_manager;
- *(http_connection_manager.mutable_route_config()) = route_config;
- Listener listener;
- listener.set_name(kServerName);
- listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
- http_connection_manager);
- return listener;
- }
-
- static ClusterLoadAssignment BuildEdsResource(
- const EdsResourceArgs& args,
- const char* eds_service_name = kDefaultEdsServiceName) {
+ return resource_type_response_state_[kRdsTypeUrl];
+ }
+
+ ResponseState cds_response_state() {
+ grpc_core::MutexLock lock(&ads_mu_);
+ return resource_type_response_state_[kCdsTypeUrl];
+ }
+
+ ResponseState eds_response_state() {
+ grpc_core::MutexLock lock(&ads_mu_);
+ return resource_type_response_state_[kEdsTypeUrl];
+ }
+
+ void SetResourceIgnore(const TString& type_url) {
+ grpc_core::MutexLock lock(&ads_mu_);
+ resource_types_to_ignore_.emplace(type_url);
+ }
+
+ void UnsetResource(const TString& type_url, const TString& name) {
+ grpc_core::MutexLock lock(&ads_mu_);
+ ResourceState& state = resource_map_[type_url][name];
+ ++state.version;
+ state.resource.reset();
+ gpr_log(GPR_INFO, "ADS[%p]: Unsetting %s resource %s to version %u", this,
+ type_url.c_str(), name.c_str(), state.version);
+ for (SubscriptionState* subscription : state.subscriptions) {
+ subscription->update_queue->emplace_back(type_url, name);
+ }
+ }
+
+ void SetResource(google::protobuf::Any resource, const TString& type_url,
+ const TString& name) {
+ grpc_core::MutexLock lock(&ads_mu_);
+ ResourceState& state = resource_map_[type_url][name];
+ ++state.version;
+ state.resource = std::move(resource);
+ gpr_log(GPR_INFO, "ADS[%p]: Updating %s resource %s to version %u", this,
+ type_url.c_str(), name.c_str(), state.version);
+ for (SubscriptionState* subscription : state.subscriptions) {
+ subscription->update_queue->emplace_back(type_url, name);
+ }
+ }
+
+ void SetLdsResource(const Listener& listener) {
+ google::protobuf::Any resource;
+ resource.PackFrom(listener);
+ SetResource(std::move(resource), kLdsTypeUrl, listener.name());
+ }
+
+ void SetRdsResource(const RouteConfiguration& route) {
+ google::protobuf::Any resource;
+ resource.PackFrom(route);
+ SetResource(std::move(resource), kRdsTypeUrl, route.name());
+ }
+
+ void SetCdsResource(const Cluster& cluster) {
+ google::protobuf::Any resource;
+ resource.PackFrom(cluster);
+ SetResource(std::move(resource), kCdsTypeUrl, cluster.name());
+ }
+
+ void SetEdsResource(const ClusterLoadAssignment& assignment) {
+ google::protobuf::Any resource;
+ resource.PackFrom(assignment);
+ SetResource(std::move(resource), kEdsTypeUrl, assignment.cluster_name());
+ }
+
+ void SetLdsToUseDynamicRds() {
+ auto listener = default_listener_;
+ HttpConnectionManager http_connection_manager;
+ auto* rds = http_connection_manager.mutable_rds();
+ rds->set_route_config_name(kDefaultRouteConfigurationName);
+ rds->mutable_config_source()->mutable_ads();
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ SetLdsResource(listener);
+ }
+
+ static Listener BuildListener(const RouteConfiguration& route_config) {
+ HttpConnectionManager http_connection_manager;
+ *(http_connection_manager.mutable_route_config()) = route_config;
+ Listener listener;
+ listener.set_name(kServerName);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ return listener;
+ }
+
+ static ClusterLoadAssignment BuildEdsResource(
+ const EdsResourceArgs& args,
+ const char* eds_service_name = kDefaultEdsServiceName) {
ClusterLoadAssignment assignment;
- assignment.set_cluster_name(eds_service_name);
+ assignment.set_cluster_name(eds_service_name);
for (const auto& locality : args.locality_list) {
auto* endpoints = assignment.add_endpoints();
endpoints->mutable_load_balancing_weight()->set_value(locality.lb_weight);
@@ -624,7 +624,7 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
const int& port = locality.ports[i];
auto* lb_endpoints = endpoints->add_lb_endpoints();
if (locality.health_statuses.size() > i &&
- locality.health_statuses[i] != HealthStatus::UNKNOWN) {
+ locality.health_statuses[i] != HealthStatus::UNKNOWN) {
lb_endpoints->set_health_status(locality.health_statuses[i]);
}
auto* endpoint = lb_endpoints->mutable_endpoint();
@@ -637,7 +637,7 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
if (!args.drop_categories.empty()) {
auto* policy = assignment.mutable_policy();
for (const auto& p : args.drop_categories) {
- const TString& name = p.first;
+ const TString& name = p.first;
const uint32_t parts_per_million = p.second;
auto* drop_overload = policy->add_drop_overloads();
drop_overload->set_category(name);
@@ -646,23 +646,23 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
drop_percentage->set_denominator(args.drop_denominator);
}
}
- return assignment;
- }
-
- void Start() {
- grpc_core::MutexLock lock(&ads_mu_);
- ads_done_ = false;
- }
-
- void Shutdown() {
- {
- grpc_core::MutexLock lock(&ads_mu_);
- NotifyDoneWithAdsCallLocked();
- resource_type_response_state_.clear();
- }
- gpr_log(GPR_INFO, "ADS[%p]: shut down", this);
- }
-
+ return assignment;
+ }
+
+ void Start() {
+ grpc_core::MutexLock lock(&ads_mu_);
+ ads_done_ = false;
+ }
+
+ void Shutdown() {
+ {
+ grpc_core::MutexLock lock(&ads_mu_);
+ NotifyDoneWithAdsCallLocked();
+ resource_type_response_state_.clear();
+ }
+ gpr_log(GPR_INFO, "ADS[%p]: shut down", this);
+ }
+
void NotifyDoneWithAdsCall() {
grpc_core::MutexLock lock(&ads_mu_);
NotifyDoneWithAdsCallLocked();
@@ -675,509 +675,509 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
}
}
- std::set<TString> clients() {
- grpc_core::MutexLock lock(&clients_mu_);
- return clients_;
- }
-
+ std::set<TString> clients() {
+ grpc_core::MutexLock lock(&clients_mu_);
+ return clients_;
+ }
+
private:
- // A queue of resource type/name pairs that have changed since the client
- // subscribed to them.
- using UpdateQueue = std::deque<
- std::pair<TString /* type url */, TString /* resource name */>>;
-
- // A struct representing a client's subscription to a particular resource.
- struct SubscriptionState {
- // Version that the client currently knows about.
- int current_version = 0;
- // The queue upon which to place updates when the resource is updated.
- UpdateQueue* update_queue;
- };
-
- // A struct representing the a client's subscription to all the resources.
- using SubscriptionNameMap =
- std::map<TString /* resource_name */, SubscriptionState>;
- using SubscriptionMap =
- std::map<TString /* type_url */, SubscriptionNameMap>;
-
- // A struct representing the current state for a resource:
- // - the version of the resource that is set by the SetResource() methods.
- // - a list of subscriptions interested in this resource.
- struct ResourceState {
- int version = 0;
- y_absl::optional<google::protobuf::Any> resource;
- std::set<SubscriptionState*> subscriptions;
- };
-
- // A struct representing the current state for all resources:
- // LDS, CDS, EDS, and RDS for the class as a whole.
- using ResourceNameMap =
- std::map<TString /* resource_name */, ResourceState>;
- using ResourceMap = std::map<TString /* type_url */, ResourceNameMap>;
-
- template <class RpcApi, class DiscoveryRequest, class DiscoveryResponse>
- class RpcService : public RpcApi::Service {
- public:
- using Stream = ServerReaderWriter<DiscoveryResponse, DiscoveryRequest>;
-
- RpcService(AdsServiceImpl* parent, bool is_v2)
- : parent_(parent), is_v2_(is_v2) {}
-
- Status StreamAggregatedResources(ServerContext* context,
- Stream* stream) override {
- gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources starts", this);
- parent_->AddClient(context->peer());
- if (is_v2_) {
- parent_->seen_v2_client_ = true;
- } else {
- parent_->seen_v3_client_ = true;
- }
- // Resources (type/name pairs) that have changed since the client
- // subscribed to them.
- UpdateQueue update_queue;
- // Resources that the client will be subscribed to keyed by resource type
- // url.
- SubscriptionMap subscription_map;
- [&]() {
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- if (parent_->ads_done_) return;
- }
- // Balancer shouldn't receive the call credentials metadata.
- EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey),
- context->client_metadata().end());
- // Current Version map keyed by resource type url.
- std::map<TString, int> resource_type_version;
- // Creating blocking thread to read from stream.
- std::deque<DiscoveryRequest> requests;
- bool stream_closed = false;
- // Take a reference of the AdsServiceImpl object, reference will go
- // out of scope after the reader thread is joined.
- std::shared_ptr<AdsServiceImpl> ads_service_impl =
- parent_->shared_from_this();
- std::thread reader(std::bind(&RpcService::BlockingRead, this, stream,
- &requests, &stream_closed));
- // Main loop to look for requests and updates.
- while (true) {
- // Look for new requests and and decide what to handle.
- y_absl::optional<DiscoveryResponse> response;
- // Boolean to keep track if the loop received any work to do: a
- // request or an update; regardless whether a response was actually
- // sent out.
- bool did_work = false;
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- if (stream_closed) break;
- if (!requests.empty()) {
- DiscoveryRequest request = std::move(requests.front());
- requests.pop_front();
- did_work = true;
- gpr_log(GPR_INFO,
- "ADS[%p]: Received request for type %s with content %s",
- this, request.type_url().c_str(),
- request.DebugString().c_str());
- const TString v3_resource_type =
- TypeUrlToV3(request.type_url());
- // As long as we are not in shutdown, identify ACK and NACK by
- // looking for version information and comparing it to nonce (this
- // server ensures they are always set to the same in a response.)
- auto it =
- parent_->resource_type_response_state_.find(v3_resource_type);
- if (it != parent_->resource_type_response_state_.end()) {
- if (!request.response_nonce().empty()) {
- it->second.state =
- (!request.version_info().empty() &&
- request.version_info() == request.response_nonce())
- ? ResponseState::ACKED
- : ResponseState::NACKED;
- }
- if (request.has_error_detail()) {
- it->second.error_message = request.error_detail().message();
- }
- }
- // As long as the test did not tell us to ignore this type of
- // request, look at all the resource names.
- if (parent_->resource_types_to_ignore_.find(v3_resource_type) ==
- parent_->resource_types_to_ignore_.end()) {
- auto& subscription_name_map =
- subscription_map[v3_resource_type];
- auto& resource_name_map =
- parent_->resource_map_[v3_resource_type];
- std::set<TString> resources_in_current_request;
- std::set<TString> resources_added_to_response;
- for (const TString& resource_name :
- request.resource_names()) {
- resources_in_current_request.emplace(resource_name);
- auto& subscription_state =
- subscription_name_map[resource_name];
- auto& resource_state = resource_name_map[resource_name];
- // Subscribe if needed.
- parent_->MaybeSubscribe(v3_resource_type, resource_name,
- &subscription_state, &resource_state,
- &update_queue);
- // Send update if needed.
- if (ClientNeedsResourceUpdate(resource_state,
- &subscription_state)) {
- gpr_log(GPR_INFO,
- "ADS[%p]: Sending update for type=%s name=%s "
- "version=%d",
- this, request.type_url().c_str(),
- resource_name.c_str(), resource_state.version);
- resources_added_to_response.emplace(resource_name);
- if (!response.has_value()) response.emplace();
- if (resource_state.resource.has_value()) {
- auto* resource = response->add_resources();
- resource->CopyFrom(resource_state.resource.value());
- if (is_v2_) {
- resource->set_type_url(request.type_url());
- }
- }
- } else {
- gpr_log(GPR_INFO,
- "ADS[%p]: client does not need update for "
- "type=%s name=%s version=%d",
- this, request.type_url().c_str(),
- resource_name.c_str(), resource_state.version);
- }
- }
- // Process unsubscriptions for any resource no longer
- // present in the request's resource list.
- parent_->ProcessUnsubscriptions(
- v3_resource_type, resources_in_current_request,
- &subscription_name_map, &resource_name_map);
- // Send response if needed.
- if (!resources_added_to_response.empty()) {
- CompleteBuildingDiscoveryResponse(
- v3_resource_type, request.type_url(),
- ++resource_type_version[v3_resource_type],
- subscription_name_map, resources_added_to_response,
- &response.value());
- }
- }
- }
- }
- if (response.has_value()) {
- gpr_log(GPR_INFO, "ADS[%p]: Sending response: %s", this,
- response->DebugString().c_str());
- stream->Write(response.value());
- }
- response.reset();
- // Look for updates and decide what to handle.
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- if (!update_queue.empty()) {
- const TString resource_type =
- std::move(update_queue.front().first);
- const TString resource_name =
- std::move(update_queue.front().second);
- update_queue.pop_front();
- const TString v2_resource_type = TypeUrlToV2(resource_type);
- did_work = true;
- gpr_log(GPR_INFO, "ADS[%p]: Received update for type=%s name=%s",
- this, resource_type.c_str(), resource_name.c_str());
- auto& subscription_name_map = subscription_map[resource_type];
- auto& resource_name_map = parent_->resource_map_[resource_type];
- auto it = subscription_name_map.find(resource_name);
- if (it != subscription_name_map.end()) {
- SubscriptionState& subscription_state = it->second;
- ResourceState& resource_state =
- resource_name_map[resource_name];
- if (ClientNeedsResourceUpdate(resource_state,
- &subscription_state)) {
- gpr_log(
- GPR_INFO,
- "ADS[%p]: Sending update for type=%s name=%s version=%d",
- this, resource_type.c_str(), resource_name.c_str(),
- resource_state.version);
- response.emplace();
- if (resource_state.resource.has_value()) {
- auto* resource = response->add_resources();
- resource->CopyFrom(resource_state.resource.value());
- if (is_v2_) {
- resource->set_type_url(v2_resource_type);
- }
- }
- CompleteBuildingDiscoveryResponse(
- resource_type, v2_resource_type,
- ++resource_type_version[resource_type],
- subscription_name_map, {resource_name},
- &response.value());
- }
- }
- }
- }
- if (response.has_value()) {
- gpr_log(GPR_INFO, "ADS[%p]: Sending update response: %s", this,
- response->DebugString().c_str());
- stream->Write(response.value());
- }
- // If we didn't find anything to do, delay before the next loop
- // iteration; otherwise, check whether we should exit and then
- // immediately continue.
- gpr_timespec deadline =
- grpc_timeout_milliseconds_to_deadline(did_work ? 0 : 10);
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- if (!parent_->ads_cond_.WaitUntil(
- &parent_->ads_mu_, [this] { return parent_->ads_done_; },
- deadline)) {
- break;
- }
- }
- }
- reader.join();
- }();
- // Clean up any subscriptions that were still active when the call
- // finished.
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- for (auto& p : subscription_map) {
- const TString& type_url = p.first;
- SubscriptionNameMap& subscription_name_map = p.second;
- for (auto& q : subscription_name_map) {
- const TString& resource_name = q.first;
- SubscriptionState& subscription_state = q.second;
- ResourceState& resource_state =
- parent_->resource_map_[type_url][resource_name];
- resource_state.subscriptions.erase(&subscription_state);
- }
- }
- }
- gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources done", this);
- parent_->RemoveClient(context->peer());
- return Status::OK;
- }
-
- private:
- static TString TypeUrlToV2(const TString& resource_type) {
- if (resource_type == kLdsTypeUrl) return kLdsV2TypeUrl;
- if (resource_type == kRdsTypeUrl) return kRdsV2TypeUrl;
- if (resource_type == kCdsTypeUrl) return kCdsV2TypeUrl;
- if (resource_type == kEdsTypeUrl) return kEdsV2TypeUrl;
- return resource_type;
- }
-
- static TString TypeUrlToV3(const TString& resource_type) {
- if (resource_type == kLdsV2TypeUrl) return kLdsTypeUrl;
- if (resource_type == kRdsV2TypeUrl) return kRdsTypeUrl;
- if (resource_type == kCdsV2TypeUrl) return kCdsTypeUrl;
- if (resource_type == kEdsV2TypeUrl) return kEdsTypeUrl;
- return resource_type;
- }
-
- // Starting a thread to do blocking read on the stream until cancel.
- void BlockingRead(Stream* stream, std::deque<DiscoveryRequest>* requests,
- bool* stream_closed) {
- DiscoveryRequest request;
- bool seen_first_request = false;
- while (stream->Read(&request)) {
- if (!seen_first_request) {
- EXPECT_TRUE(request.has_node());
- ASSERT_FALSE(request.node().client_features().empty());
- EXPECT_EQ(request.node().client_features(0),
- "envoy.lb.does_not_support_overprovisioning");
- CheckBuildVersion(request);
- seen_first_request = true;
- }
- {
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- requests->emplace_back(std::move(request));
- }
- }
- gpr_log(GPR_INFO, "ADS[%p]: Null read, stream closed", this);
- grpc_core::MutexLock lock(&parent_->ads_mu_);
- *stream_closed = true;
- }
-
- static void CheckBuildVersion(
- const ::envoy::api::v2::DiscoveryRequest& request) {
- EXPECT_FALSE(request.node().build_version().empty());
- }
-
- static void CheckBuildVersion(
- const ::envoy::service::discovery::v3::DiscoveryRequest& request) {}
-
- // Completing the building a DiscoveryResponse by adding common information
- // for all resources and by adding all subscribed resources for LDS and CDS.
- void CompleteBuildingDiscoveryResponse(
- const TString& resource_type, const TString& v2_resource_type,
- const int version, const SubscriptionNameMap& subscription_name_map,
- const std::set<TString>& resources_added_to_response,
- DiscoveryResponse* response) {
- auto& response_state =
- parent_->resource_type_response_state_[resource_type];
- if (response_state.state == ResponseState::NOT_SENT) {
- response_state.state = ResponseState::SENT;
- }
- response->set_type_url(is_v2_ ? v2_resource_type : resource_type);
- response->set_version_info(y_absl::StrCat(version));
- response->set_nonce(y_absl::StrCat(version));
- if (resource_type == kLdsTypeUrl || resource_type == kCdsTypeUrl) {
- // For LDS and CDS we must send back all subscribed resources
- // (even the unchanged ones)
- for (const auto& p : subscription_name_map) {
- const TString& resource_name = p.first;
- if (resources_added_to_response.find(resource_name) ==
- resources_added_to_response.end()) {
- const ResourceState& resource_state =
- parent_->resource_map_[resource_type][resource_name];
- if (resource_state.resource.has_value()) {
- auto* resource = response->add_resources();
- resource->CopyFrom(resource_state.resource.value());
- if (is_v2_) {
- resource->set_type_url(v2_resource_type);
- }
- }
- }
- }
- }
- }
-
- AdsServiceImpl* parent_;
- const bool is_v2_;
- };
-
- // Checks whether the client needs to receive a newer version of
- // the resource. If so, updates subscription_state->current_version and
- // returns true.
- static bool ClientNeedsResourceUpdate(const ResourceState& resource_state,
- SubscriptionState* subscription_state) {
- if (subscription_state->current_version < resource_state.version) {
- subscription_state->current_version = resource_state.version;
- return true;
- }
- return false;
- }
-
- // Subscribes to a resource if not already subscribed:
- // 1. Sets the update_queue field in subscription_state.
- // 2. Adds subscription_state to resource_state->subscriptions.
- void MaybeSubscribe(const TString& resource_type,
- const TString& resource_name,
- SubscriptionState* subscription_state,
- ResourceState* resource_state,
- UpdateQueue* update_queue) {
- // The update_queue will be null if we were not previously subscribed.
- if (subscription_state->update_queue != nullptr) return;
- subscription_state->update_queue = update_queue;
- resource_state->subscriptions.emplace(subscription_state);
- gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p",
- this, resource_type.c_str(), resource_name.c_str(),
- &subscription_state);
- }
-
- // Removes subscriptions for resources no longer present in the
- // current request.
- void ProcessUnsubscriptions(
- const TString& resource_type,
- const std::set<TString>& resources_in_current_request,
- SubscriptionNameMap* subscription_name_map,
- ResourceNameMap* resource_name_map) {
- for (auto it = subscription_name_map->begin();
- it != subscription_name_map->end();) {
- const TString& resource_name = it->first;
- SubscriptionState& subscription_state = it->second;
- if (resources_in_current_request.find(resource_name) !=
- resources_in_current_request.end()) {
- ++it;
- continue;
- }
- gpr_log(GPR_INFO, "ADS[%p]: Unsubscribe to type=%s name=%s state=%p",
- this, resource_type.c_str(), resource_name.c_str(),
- &subscription_state);
- auto resource_it = resource_name_map->find(resource_name);
- GPR_ASSERT(resource_it != resource_name_map->end());
- auto& resource_state = resource_it->second;
- resource_state.subscriptions.erase(&subscription_state);
- if (resource_state.subscriptions.empty() &&
- !resource_state.resource.has_value()) {
- resource_name_map->erase(resource_it);
- }
- it = subscription_name_map->erase(it);
- }
- }
-
- void AddClient(const TString& client) {
- grpc_core::MutexLock lock(&clients_mu_);
- clients_.insert(client);
- }
-
- void RemoveClient(const TString& client) {
- grpc_core::MutexLock lock(&clients_mu_);
- clients_.erase(client);
- }
-
- RpcService<::envoy::service::discovery::v2::AggregatedDiscoveryService,
- ::envoy::api::v2::DiscoveryRequest,
- ::envoy::api::v2::DiscoveryResponse>
- v2_rpc_service_;
- RpcService<::envoy::service::discovery::v3::AggregatedDiscoveryService,
- ::envoy::service::discovery::v3::DiscoveryRequest,
- ::envoy::service::discovery::v3::DiscoveryResponse>
- v3_rpc_service_;
-
- std::atomic_bool seen_v2_client_{false};
- std::atomic_bool seen_v3_client_{false};
-
+ // A queue of resource type/name pairs that have changed since the client
+ // subscribed to them.
+ using UpdateQueue = std::deque<
+ std::pair<TString /* type url */, TString /* resource name */>>;
+
+ // A struct representing a client's subscription to a particular resource.
+ struct SubscriptionState {
+ // Version that the client currently knows about.
+ int current_version = 0;
+ // The queue upon which to place updates when the resource is updated.
+ UpdateQueue* update_queue;
+ };
+
+ // A struct representing the a client's subscription to all the resources.
+ using SubscriptionNameMap =
+ std::map<TString /* resource_name */, SubscriptionState>;
+ using SubscriptionMap =
+ std::map<TString /* type_url */, SubscriptionNameMap>;
+
+ // A struct representing the current state for a resource:
+ // - the version of the resource that is set by the SetResource() methods.
+ // - a list of subscriptions interested in this resource.
+ struct ResourceState {
+ int version = 0;
+ y_absl::optional<google::protobuf::Any> resource;
+ std::set<SubscriptionState*> subscriptions;
+ };
+
+ // A struct representing the current state for all resources:
+ // LDS, CDS, EDS, and RDS for the class as a whole.
+ using ResourceNameMap =
+ std::map<TString /* resource_name */, ResourceState>;
+ using ResourceMap = std::map<TString /* type_url */, ResourceNameMap>;
+
+ template <class RpcApi, class DiscoveryRequest, class DiscoveryResponse>
+ class RpcService : public RpcApi::Service {
+ public:
+ using Stream = ServerReaderWriter<DiscoveryResponse, DiscoveryRequest>;
+
+ RpcService(AdsServiceImpl* parent, bool is_v2)
+ : parent_(parent), is_v2_(is_v2) {}
+
+ Status StreamAggregatedResources(ServerContext* context,
+ Stream* stream) override {
+ gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources starts", this);
+ parent_->AddClient(context->peer());
+ if (is_v2_) {
+ parent_->seen_v2_client_ = true;
+ } else {
+ parent_->seen_v3_client_ = true;
+ }
+ // Resources (type/name pairs) that have changed since the client
+ // subscribed to them.
+ UpdateQueue update_queue;
+ // Resources that the client will be subscribed to keyed by resource type
+ // url.
+ SubscriptionMap subscription_map;
+ [&]() {
+ {
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ if (parent_->ads_done_) return;
+ }
+ // Balancer shouldn't receive the call credentials metadata.
+ EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey),
+ context->client_metadata().end());
+ // Current Version map keyed by resource type url.
+ std::map<TString, int> resource_type_version;
+ // Creating blocking thread to read from stream.
+ std::deque<DiscoveryRequest> requests;
+ bool stream_closed = false;
+ // Take a reference of the AdsServiceImpl object, reference will go
+ // out of scope after the reader thread is joined.
+ std::shared_ptr<AdsServiceImpl> ads_service_impl =
+ parent_->shared_from_this();
+ std::thread reader(std::bind(&RpcService::BlockingRead, this, stream,
+ &requests, &stream_closed));
+ // Main loop to look for requests and updates.
+ while (true) {
+ // Look for new requests and and decide what to handle.
+ y_absl::optional<DiscoveryResponse> response;
+ // Boolean to keep track if the loop received any work to do: a
+ // request or an update; regardless whether a response was actually
+ // sent out.
+ bool did_work = false;
+ {
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ if (stream_closed) break;
+ if (!requests.empty()) {
+ DiscoveryRequest request = std::move(requests.front());
+ requests.pop_front();
+ did_work = true;
+ gpr_log(GPR_INFO,
+ "ADS[%p]: Received request for type %s with content %s",
+ this, request.type_url().c_str(),
+ request.DebugString().c_str());
+ const TString v3_resource_type =
+ TypeUrlToV3(request.type_url());
+ // As long as we are not in shutdown, identify ACK and NACK by
+ // looking for version information and comparing it to nonce (this
+ // server ensures they are always set to the same in a response.)
+ auto it =
+ parent_->resource_type_response_state_.find(v3_resource_type);
+ if (it != parent_->resource_type_response_state_.end()) {
+ if (!request.response_nonce().empty()) {
+ it->second.state =
+ (!request.version_info().empty() &&
+ request.version_info() == request.response_nonce())
+ ? ResponseState::ACKED
+ : ResponseState::NACKED;
+ }
+ if (request.has_error_detail()) {
+ it->second.error_message = request.error_detail().message();
+ }
+ }
+ // As long as the test did not tell us to ignore this type of
+ // request, look at all the resource names.
+ if (parent_->resource_types_to_ignore_.find(v3_resource_type) ==
+ parent_->resource_types_to_ignore_.end()) {
+ auto& subscription_name_map =
+ subscription_map[v3_resource_type];
+ auto& resource_name_map =
+ parent_->resource_map_[v3_resource_type];
+ std::set<TString> resources_in_current_request;
+ std::set<TString> resources_added_to_response;
+ for (const TString& resource_name :
+ request.resource_names()) {
+ resources_in_current_request.emplace(resource_name);
+ auto& subscription_state =
+ subscription_name_map[resource_name];
+ auto& resource_state = resource_name_map[resource_name];
+ // Subscribe if needed.
+ parent_->MaybeSubscribe(v3_resource_type, resource_name,
+ &subscription_state, &resource_state,
+ &update_queue);
+ // Send update if needed.
+ if (ClientNeedsResourceUpdate(resource_state,
+ &subscription_state)) {
+ gpr_log(GPR_INFO,
+ "ADS[%p]: Sending update for type=%s name=%s "
+ "version=%d",
+ this, request.type_url().c_str(),
+ resource_name.c_str(), resource_state.version);
+ resources_added_to_response.emplace(resource_name);
+ if (!response.has_value()) response.emplace();
+ if (resource_state.resource.has_value()) {
+ auto* resource = response->add_resources();
+ resource->CopyFrom(resource_state.resource.value());
+ if (is_v2_) {
+ resource->set_type_url(request.type_url());
+ }
+ }
+ } else {
+ gpr_log(GPR_INFO,
+ "ADS[%p]: client does not need update for "
+ "type=%s name=%s version=%d",
+ this, request.type_url().c_str(),
+ resource_name.c_str(), resource_state.version);
+ }
+ }
+ // Process unsubscriptions for any resource no longer
+ // present in the request's resource list.
+ parent_->ProcessUnsubscriptions(
+ v3_resource_type, resources_in_current_request,
+ &subscription_name_map, &resource_name_map);
+ // Send response if needed.
+ if (!resources_added_to_response.empty()) {
+ CompleteBuildingDiscoveryResponse(
+ v3_resource_type, request.type_url(),
+ ++resource_type_version[v3_resource_type],
+ subscription_name_map, resources_added_to_response,
+ &response.value());
+ }
+ }
+ }
+ }
+ if (response.has_value()) {
+ gpr_log(GPR_INFO, "ADS[%p]: Sending response: %s", this,
+ response->DebugString().c_str());
+ stream->Write(response.value());
+ }
+ response.reset();
+ // Look for updates and decide what to handle.
+ {
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ if (!update_queue.empty()) {
+ const TString resource_type =
+ std::move(update_queue.front().first);
+ const TString resource_name =
+ std::move(update_queue.front().second);
+ update_queue.pop_front();
+ const TString v2_resource_type = TypeUrlToV2(resource_type);
+ did_work = true;
+ gpr_log(GPR_INFO, "ADS[%p]: Received update for type=%s name=%s",
+ this, resource_type.c_str(), resource_name.c_str());
+ auto& subscription_name_map = subscription_map[resource_type];
+ auto& resource_name_map = parent_->resource_map_[resource_type];
+ auto it = subscription_name_map.find(resource_name);
+ if (it != subscription_name_map.end()) {
+ SubscriptionState& subscription_state = it->second;
+ ResourceState& resource_state =
+ resource_name_map[resource_name];
+ if (ClientNeedsResourceUpdate(resource_state,
+ &subscription_state)) {
+ gpr_log(
+ GPR_INFO,
+ "ADS[%p]: Sending update for type=%s name=%s version=%d",
+ this, resource_type.c_str(), resource_name.c_str(),
+ resource_state.version);
+ response.emplace();
+ if (resource_state.resource.has_value()) {
+ auto* resource = response->add_resources();
+ resource->CopyFrom(resource_state.resource.value());
+ if (is_v2_) {
+ resource->set_type_url(v2_resource_type);
+ }
+ }
+ CompleteBuildingDiscoveryResponse(
+ resource_type, v2_resource_type,
+ ++resource_type_version[resource_type],
+ subscription_name_map, {resource_name},
+ &response.value());
+ }
+ }
+ }
+ }
+ if (response.has_value()) {
+ gpr_log(GPR_INFO, "ADS[%p]: Sending update response: %s", this,
+ response->DebugString().c_str());
+ stream->Write(response.value());
+ }
+ // If we didn't find anything to do, delay before the next loop
+ // iteration; otherwise, check whether we should exit and then
+ // immediately continue.
+ gpr_timespec deadline =
+ grpc_timeout_milliseconds_to_deadline(did_work ? 0 : 10);
+ {
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ if (!parent_->ads_cond_.WaitUntil(
+ &parent_->ads_mu_, [this] { return parent_->ads_done_; },
+ deadline)) {
+ break;
+ }
+ }
+ }
+ reader.join();
+ }();
+ // Clean up any subscriptions that were still active when the call
+ // finished.
+ {
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ for (auto& p : subscription_map) {
+ const TString& type_url = p.first;
+ SubscriptionNameMap& subscription_name_map = p.second;
+ for (auto& q : subscription_name_map) {
+ const TString& resource_name = q.first;
+ SubscriptionState& subscription_state = q.second;
+ ResourceState& resource_state =
+ parent_->resource_map_[type_url][resource_name];
+ resource_state.subscriptions.erase(&subscription_state);
+ }
+ }
+ }
+ gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources done", this);
+ parent_->RemoveClient(context->peer());
+ return Status::OK;
+ }
+
+ private:
+ static TString TypeUrlToV2(const TString& resource_type) {
+ if (resource_type == kLdsTypeUrl) return kLdsV2TypeUrl;
+ if (resource_type == kRdsTypeUrl) return kRdsV2TypeUrl;
+ if (resource_type == kCdsTypeUrl) return kCdsV2TypeUrl;
+ if (resource_type == kEdsTypeUrl) return kEdsV2TypeUrl;
+ return resource_type;
+ }
+
+ static TString TypeUrlToV3(const TString& resource_type) {
+ if (resource_type == kLdsV2TypeUrl) return kLdsTypeUrl;
+ if (resource_type == kRdsV2TypeUrl) return kRdsTypeUrl;
+ if (resource_type == kCdsV2TypeUrl) return kCdsTypeUrl;
+ if (resource_type == kEdsV2TypeUrl) return kEdsTypeUrl;
+ return resource_type;
+ }
+
+ // Starting a thread to do blocking read on the stream until cancel.
+ void BlockingRead(Stream* stream, std::deque<DiscoveryRequest>* requests,
+ bool* stream_closed) {
+ DiscoveryRequest request;
+ bool seen_first_request = false;
+ while (stream->Read(&request)) {
+ if (!seen_first_request) {
+ EXPECT_TRUE(request.has_node());
+ ASSERT_FALSE(request.node().client_features().empty());
+ EXPECT_EQ(request.node().client_features(0),
+ "envoy.lb.does_not_support_overprovisioning");
+ CheckBuildVersion(request);
+ seen_first_request = true;
+ }
+ {
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ requests->emplace_back(std::move(request));
+ }
+ }
+ gpr_log(GPR_INFO, "ADS[%p]: Null read, stream closed", this);
+ grpc_core::MutexLock lock(&parent_->ads_mu_);
+ *stream_closed = true;
+ }
+
+ static void CheckBuildVersion(
+ const ::envoy::api::v2::DiscoveryRequest& request) {
+ EXPECT_FALSE(request.node().build_version().empty());
+ }
+
+ static void CheckBuildVersion(
+ const ::envoy::service::discovery::v3::DiscoveryRequest& request) {}
+
+ // Completing the building a DiscoveryResponse by adding common information
+ // for all resources and by adding all subscribed resources for LDS and CDS.
+ void CompleteBuildingDiscoveryResponse(
+ const TString& resource_type, const TString& v2_resource_type,
+ const int version, const SubscriptionNameMap& subscription_name_map,
+ const std::set<TString>& resources_added_to_response,
+ DiscoveryResponse* response) {
+ auto& response_state =
+ parent_->resource_type_response_state_[resource_type];
+ if (response_state.state == ResponseState::NOT_SENT) {
+ response_state.state = ResponseState::SENT;
+ }
+ response->set_type_url(is_v2_ ? v2_resource_type : resource_type);
+ response->set_version_info(y_absl::StrCat(version));
+ response->set_nonce(y_absl::StrCat(version));
+ if (resource_type == kLdsTypeUrl || resource_type == kCdsTypeUrl) {
+ // For LDS and CDS we must send back all subscribed resources
+ // (even the unchanged ones)
+ for (const auto& p : subscription_name_map) {
+ const TString& resource_name = p.first;
+ if (resources_added_to_response.find(resource_name) ==
+ resources_added_to_response.end()) {
+ const ResourceState& resource_state =
+ parent_->resource_map_[resource_type][resource_name];
+ if (resource_state.resource.has_value()) {
+ auto* resource = response->add_resources();
+ resource->CopyFrom(resource_state.resource.value());
+ if (is_v2_) {
+ resource->set_type_url(v2_resource_type);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ AdsServiceImpl* parent_;
+ const bool is_v2_;
+ };
+
+ // Checks whether the client needs to receive a newer version of
+ // the resource. If so, updates subscription_state->current_version and
+ // returns true.
+ static bool ClientNeedsResourceUpdate(const ResourceState& resource_state,
+ SubscriptionState* subscription_state) {
+ if (subscription_state->current_version < resource_state.version) {
+ subscription_state->current_version = resource_state.version;
+ return true;
+ }
+ return false;
+ }
+
+ // Subscribes to a resource if not already subscribed:
+ // 1. Sets the update_queue field in subscription_state.
+ // 2. Adds subscription_state to resource_state->subscriptions.
+ void MaybeSubscribe(const TString& resource_type,
+ const TString& resource_name,
+ SubscriptionState* subscription_state,
+ ResourceState* resource_state,
+ UpdateQueue* update_queue) {
+ // The update_queue will be null if we were not previously subscribed.
+ if (subscription_state->update_queue != nullptr) return;
+ subscription_state->update_queue = update_queue;
+ resource_state->subscriptions.emplace(subscription_state);
+ gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p",
+ this, resource_type.c_str(), resource_name.c_str(),
+ &subscription_state);
+ }
+
+ // Removes subscriptions for resources no longer present in the
+ // current request.
+ void ProcessUnsubscriptions(
+ const TString& resource_type,
+ const std::set<TString>& resources_in_current_request,
+ SubscriptionNameMap* subscription_name_map,
+ ResourceNameMap* resource_name_map) {
+ for (auto it = subscription_name_map->begin();
+ it != subscription_name_map->end();) {
+ const TString& resource_name = it->first;
+ SubscriptionState& subscription_state = it->second;
+ if (resources_in_current_request.find(resource_name) !=
+ resources_in_current_request.end()) {
+ ++it;
+ continue;
+ }
+ gpr_log(GPR_INFO, "ADS[%p]: Unsubscribe to type=%s name=%s state=%p",
+ this, resource_type.c_str(), resource_name.c_str(),
+ &subscription_state);
+ auto resource_it = resource_name_map->find(resource_name);
+ GPR_ASSERT(resource_it != resource_name_map->end());
+ auto& resource_state = resource_it->second;
+ resource_state.subscriptions.erase(&subscription_state);
+ if (resource_state.subscriptions.empty() &&
+ !resource_state.resource.has_value()) {
+ resource_name_map->erase(resource_it);
+ }
+ it = subscription_name_map->erase(it);
+ }
+ }
+
+ void AddClient(const TString& client) {
+ grpc_core::MutexLock lock(&clients_mu_);
+ clients_.insert(client);
+ }
+
+ void RemoveClient(const TString& client) {
+ grpc_core::MutexLock lock(&clients_mu_);
+ clients_.erase(client);
+ }
+
+ RpcService<::envoy::service::discovery::v2::AggregatedDiscoveryService,
+ ::envoy::api::v2::DiscoveryRequest,
+ ::envoy::api::v2::DiscoveryResponse>
+ v2_rpc_service_;
+ RpcService<::envoy::service::discovery::v3::AggregatedDiscoveryService,
+ ::envoy::service::discovery::v3::DiscoveryRequest,
+ ::envoy::service::discovery::v3::DiscoveryResponse>
+ v3_rpc_service_;
+
+ std::atomic_bool seen_v2_client_{false};
+ std::atomic_bool seen_v3_client_{false};
+
grpc_core::CondVar ads_cond_;
// Protect the members below.
grpc_core::Mutex ads_mu_;
bool ads_done_ = false;
- Listener default_listener_;
- RouteConfiguration default_route_config_;
- Cluster default_cluster_;
- std::map<TString /* type_url */, ResponseState>
- resource_type_response_state_;
- std::set<TString /*resource_type*/> resource_types_to_ignore_;
- // An instance data member containing the current state of all resources.
- // Note that an entry will exist whenever either of the following is true:
- // - The resource exists (i.e., has been created by SetResource() and has not
- // yet been destroyed by UnsetResource()).
- // - There is at least one subscription for the resource.
- ResourceMap resource_map_;
-
- grpc_core::Mutex clients_mu_;
- std::set<TString> clients_;
+ Listener default_listener_;
+ RouteConfiguration default_route_config_;
+ Cluster default_cluster_;
+ std::map<TString /* type_url */, ResponseState>
+ resource_type_response_state_;
+ std::set<TString /*resource_type*/> resource_types_to_ignore_;
+ // An instance data member containing the current state of all resources.
+ // Note that an entry will exist whenever either of the following is true:
+ // - The resource exists (i.e., has been created by SetResource() and has not
+ // yet been destroyed by UnsetResource()).
+ // - There is at least one subscription for the resource.
+ ResourceMap resource_map_;
+
+ grpc_core::Mutex clients_mu_;
+ std::set<TString> clients_;
};
-class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
+class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
public:
explicit LrsServiceImpl(int client_load_reporting_interval_seconds)
- : v2_rpc_service_(this),
- v3_rpc_service_(this),
- client_load_reporting_interval_seconds_(
- client_load_reporting_interval_seconds),
- cluster_names_({kDefaultClusterName}) {}
-
- ::envoy::service::load_stats::v2::LoadReportingService::Service*
- v2_rpc_service() {
- return &v2_rpc_service_;
- }
-
- ::envoy::service::load_stats::v3::LoadReportingService::Service*
- v3_rpc_service() {
- return &v3_rpc_service_;
- }
-
- size_t request_count() {
- return v2_rpc_service_.request_count() + v3_rpc_service_.request_count();
- }
-
- size_t response_count() {
- return v2_rpc_service_.response_count() + v3_rpc_service_.response_count();
- }
-
- // Must be called before the LRS call is started.
- void set_send_all_clusters(bool send_all_clusters) {
- send_all_clusters_ = send_all_clusters;
- }
- void set_cluster_names(const std::set<TString>& cluster_names) {
- cluster_names_ = cluster_names;
- }
-
+ : v2_rpc_service_(this),
+ v3_rpc_service_(this),
+ client_load_reporting_interval_seconds_(
+ client_load_reporting_interval_seconds),
+ cluster_names_({kDefaultClusterName}) {}
+
+ ::envoy::service::load_stats::v2::LoadReportingService::Service*
+ v2_rpc_service() {
+ return &v2_rpc_service_;
+ }
+
+ ::envoy::service::load_stats::v3::LoadReportingService::Service*
+ v3_rpc_service() {
+ return &v3_rpc_service_;
+ }
+
+ size_t request_count() {
+ return v2_rpc_service_.request_count() + v3_rpc_service_.request_count();
+ }
+
+ size_t response_count() {
+ return v2_rpc_service_.response_count() + v3_rpc_service_.response_count();
+ }
+
+ // Must be called before the LRS call is started.
+ void set_send_all_clusters(bool send_all_clusters) {
+ send_all_clusters_ = send_all_clusters;
+ }
+ void set_cluster_names(const std::set<TString>& cluster_names) {
+ cluster_names_ = cluster_names;
+ }
+
void Start() {
- lrs_done_ = false;
- result_queue_.clear();
+ lrs_done_ = false;
+ result_queue_.clear();
}
void Shutdown() {
@@ -1188,18 +1188,18 @@ class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
gpr_log(GPR_INFO, "LRS[%p]: shut down", this);
}
- std::vector<ClientStats> WaitForLoadReport() {
+ std::vector<ClientStats> WaitForLoadReport() {
grpc_core::MutexLock lock(&load_report_mu_);
- grpc_core::CondVar cv;
- if (result_queue_.empty()) {
- load_report_cond_ = &cv;
- load_report_cond_->WaitUntil(&load_report_mu_,
- [this] { return !result_queue_.empty(); });
- load_report_cond_ = nullptr;
- }
- std::vector<ClientStats> result = std::move(result_queue_.front());
- result_queue_.pop_front();
- return result;
+ grpc_core::CondVar cv;
+ if (result_queue_.empty()) {
+ load_report_cond_ = &cv;
+ load_report_cond_->WaitUntil(&load_report_mu_,
+ [this] { return !result_queue_.empty(); });
+ load_report_cond_ = nullptr;
+ }
+ std::vector<ClientStats> result = std::move(result_queue_.front());
+ result_queue_.pop_front();
+ return result;
}
void NotifyDoneWithLrsCall() {
@@ -1207,134 +1207,134 @@ class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
NotifyDoneWithLrsCallLocked();
}
- private:
- template <class RpcApi, class LoadStatsRequest, class LoadStatsResponse>
- class RpcService : public CountedService<typename RpcApi::Service> {
- public:
- using Stream = ServerReaderWriter<LoadStatsResponse, LoadStatsRequest>;
-
- explicit RpcService(LrsServiceImpl* parent) : parent_(parent) {}
-
- Status StreamLoadStats(ServerContext* /*context*/,
- Stream* stream) override {
- gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats starts", this);
- EXPECT_GT(parent_->client_load_reporting_interval_seconds_, 0);
- // Take a reference of the LrsServiceImpl object, reference will go
- // out of scope after this method exits.
- std::shared_ptr<LrsServiceImpl> lrs_service_impl =
- parent_->shared_from_this();
- // Read initial request.
- LoadStatsRequest request;
- if (stream->Read(&request)) {
- CountedService<typename RpcApi::Service>::IncreaseRequestCount();
- // Verify client features.
- EXPECT_THAT(
- request.node().client_features(),
- ::testing::Contains("envoy.lrs.supports_send_all_clusters"));
- // Send initial response.
- LoadStatsResponse response;
- if (parent_->send_all_clusters_) {
- response.set_send_all_clusters(true);
- } else {
- for (const TString& cluster_name : parent_->cluster_names_) {
- response.add_clusters(cluster_name);
- }
- }
- response.mutable_load_reporting_interval()->set_seconds(
- parent_->client_load_reporting_interval_seconds_);
- stream->Write(response);
- CountedService<typename RpcApi::Service>::IncreaseResponseCount();
- // Wait for report.
- request.Clear();
- while (stream->Read(&request)) {
- gpr_log(GPR_INFO, "LRS[%p]: received client load report message: %s",
- this, request.DebugString().c_str());
- std::vector<ClientStats> stats;
- for (const auto& cluster_stats : request.cluster_stats()) {
- stats.emplace_back(cluster_stats);
- }
- grpc_core::MutexLock lock(&parent_->load_report_mu_);
- parent_->result_queue_.emplace_back(std::move(stats));
- if (parent_->load_report_cond_ != nullptr) {
- parent_->load_report_cond_->Signal();
- }
- }
- // Wait until notified done.
- grpc_core::MutexLock lock(&parent_->lrs_mu_);
- parent_->lrs_cv_.WaitUntil(&parent_->lrs_mu_,
- [this] { return parent_->lrs_done_; });
- }
- gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats done", this);
- return Status::OK;
- }
-
- private:
- LrsServiceImpl* parent_;
- };
-
+ private:
+ template <class RpcApi, class LoadStatsRequest, class LoadStatsResponse>
+ class RpcService : public CountedService<typename RpcApi::Service> {
+ public:
+ using Stream = ServerReaderWriter<LoadStatsResponse, LoadStatsRequest>;
+
+ explicit RpcService(LrsServiceImpl* parent) : parent_(parent) {}
+
+ Status StreamLoadStats(ServerContext* /*context*/,
+ Stream* stream) override {
+ gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats starts", this);
+ EXPECT_GT(parent_->client_load_reporting_interval_seconds_, 0);
+ // Take a reference of the LrsServiceImpl object, reference will go
+ // out of scope after this method exits.
+ std::shared_ptr<LrsServiceImpl> lrs_service_impl =
+ parent_->shared_from_this();
+ // Read initial request.
+ LoadStatsRequest request;
+ if (stream->Read(&request)) {
+ CountedService<typename RpcApi::Service>::IncreaseRequestCount();
+ // Verify client features.
+ EXPECT_THAT(
+ request.node().client_features(),
+ ::testing::Contains("envoy.lrs.supports_send_all_clusters"));
+ // Send initial response.
+ LoadStatsResponse response;
+ if (parent_->send_all_clusters_) {
+ response.set_send_all_clusters(true);
+ } else {
+ for (const TString& cluster_name : parent_->cluster_names_) {
+ response.add_clusters(cluster_name);
+ }
+ }
+ response.mutable_load_reporting_interval()->set_seconds(
+ parent_->client_load_reporting_interval_seconds_);
+ stream->Write(response);
+ CountedService<typename RpcApi::Service>::IncreaseResponseCount();
+ // Wait for report.
+ request.Clear();
+ while (stream->Read(&request)) {
+ gpr_log(GPR_INFO, "LRS[%p]: received client load report message: %s",
+ this, request.DebugString().c_str());
+ std::vector<ClientStats> stats;
+ for (const auto& cluster_stats : request.cluster_stats()) {
+ stats.emplace_back(cluster_stats);
+ }
+ grpc_core::MutexLock lock(&parent_->load_report_mu_);
+ parent_->result_queue_.emplace_back(std::move(stats));
+ if (parent_->load_report_cond_ != nullptr) {
+ parent_->load_report_cond_->Signal();
+ }
+ }
+ // Wait until notified done.
+ grpc_core::MutexLock lock(&parent_->lrs_mu_);
+ parent_->lrs_cv_.WaitUntil(&parent_->lrs_mu_,
+ [this] { return parent_->lrs_done_; });
+ }
+ gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats done", this);
+ return Status::OK;
+ }
+
+ private:
+ LrsServiceImpl* parent_;
+ };
+
void NotifyDoneWithLrsCallLocked() {
- if (!lrs_done_) {
- lrs_done_ = true;
+ if (!lrs_done_) {
+ lrs_done_ = true;
lrs_cv_.Broadcast();
}
}
- RpcService<::envoy::service::load_stats::v2::LoadReportingService,
- ::envoy::service::load_stats::v2::LoadStatsRequest,
- ::envoy::service::load_stats::v2::LoadStatsResponse>
- v2_rpc_service_;
- RpcService<::envoy::service::load_stats::v3::LoadReportingService,
- ::envoy::service::load_stats::v3::LoadStatsRequest,
- ::envoy::service::load_stats::v3::LoadStatsResponse>
- v3_rpc_service_;
-
+ RpcService<::envoy::service::load_stats::v2::LoadReportingService,
+ ::envoy::service::load_stats::v2::LoadStatsRequest,
+ ::envoy::service::load_stats::v2::LoadStatsResponse>
+ v2_rpc_service_;
+ RpcService<::envoy::service::load_stats::v3::LoadReportingService,
+ ::envoy::service::load_stats::v3::LoadStatsRequest,
+ ::envoy::service::load_stats::v3::LoadStatsResponse>
+ v3_rpc_service_;
+
const int client_load_reporting_interval_seconds_;
- bool send_all_clusters_ = false;
- std::set<TString> cluster_names_;
+ bool send_all_clusters_ = false;
+ std::set<TString> cluster_names_;
grpc_core::CondVar lrs_cv_;
- grpc_core::Mutex lrs_mu_; // Protects lrs_done_.
- bool lrs_done_ = false;
+ grpc_core::Mutex lrs_mu_; // Protects lrs_done_.
+ bool lrs_done_ = false;
- grpc_core::Mutex load_report_mu_; // Protects the members below.
- grpc_core::CondVar* load_report_cond_ = nullptr;
- std::deque<std::vector<ClientStats>> result_queue_;
+ grpc_core::Mutex load_report_mu_; // Protects the members below.
+ grpc_core::CondVar* load_report_cond_ = nullptr;
+ std::deque<std::vector<ClientStats>> result_queue_;
};
class TestType {
public:
- TestType(bool use_xds_resolver, bool enable_load_reporting,
- bool enable_rds_testing = false, bool use_v2 = false)
+ TestType(bool use_xds_resolver, bool enable_load_reporting,
+ bool enable_rds_testing = false, bool use_v2 = false)
: use_xds_resolver_(use_xds_resolver),
- enable_load_reporting_(enable_load_reporting),
- enable_rds_testing_(enable_rds_testing),
- use_v2_(use_v2) {}
+ enable_load_reporting_(enable_load_reporting),
+ enable_rds_testing_(enable_rds_testing),
+ use_v2_(use_v2) {}
bool use_xds_resolver() const { return use_xds_resolver_; }
bool enable_load_reporting() const { return enable_load_reporting_; }
- bool enable_rds_testing() const { return enable_rds_testing_; }
- bool use_v2() const { return use_v2_; }
+ bool enable_rds_testing() const { return enable_rds_testing_; }
+ bool use_v2() const { return use_v2_; }
- TString AsString() const {
- TString retval = (use_xds_resolver_ ? "XdsResolver" : "FakeResolver");
- retval += (use_v2_ ? "V2" : "V3");
+ TString AsString() const {
+ TString retval = (use_xds_resolver_ ? "XdsResolver" : "FakeResolver");
+ retval += (use_v2_ ? "V2" : "V3");
if (enable_load_reporting_) retval += "WithLoadReporting";
- if (enable_rds_testing_) retval += "Rds";
+ if (enable_rds_testing_) retval += "Rds";
return retval;
}
private:
const bool use_xds_resolver_;
const bool enable_load_reporting_;
- const bool enable_rds_testing_;
- const bool use_v2_;
+ const bool enable_rds_testing_;
+ const bool use_v2_;
};
class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
protected:
XdsEnd2endTest(size_t num_backends, size_t num_balancers,
- int client_load_reporting_interval_seconds = 100)
- : num_backends_(num_backends),
+ int client_load_reporting_interval_seconds = 100)
+ : num_backends_(num_backends),
num_balancers_(num_balancers),
client_load_reporting_interval_seconds_(
client_load_reporting_interval_seconds) {}
@@ -1353,70 +1353,70 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
static void TearDownTestCase() { grpc_shutdown(); }
void SetUp() override {
- gpr_setenv("GRPC_XDS_EXPERIMENTAL_V3_SUPPORT", "true");
- gpr_setenv("GRPC_XDS_BOOTSTRAP",
- GetParam().use_v2() ? g_bootstrap_file_v2 : g_bootstrap_file_v3);
+ gpr_setenv("GRPC_XDS_EXPERIMENTAL_V3_SUPPORT", "true");
+ gpr_setenv("GRPC_XDS_BOOTSTRAP",
+ GetParam().use_v2() ? g_bootstrap_file_v2 : g_bootstrap_file_v3);
g_port_saver->Reset();
response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
- // Inject xDS channel response generator.
+ // Inject xDS channel response generator.
lb_channel_response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
- xds_channel_args_to_add_.emplace_back(
- grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
- lb_channel_response_generator_.get()));
- if (xds_resource_does_not_exist_timeout_ms_ > 0) {
- xds_channel_args_to_add_.emplace_back(grpc_channel_arg_integer_create(
- const_cast<char*>(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS),
- xds_resource_does_not_exist_timeout_ms_));
- }
- xds_channel_args_.num_args = xds_channel_args_to_add_.size();
- xds_channel_args_.args = xds_channel_args_to_add_.data();
- grpc_core::internal::SetXdsChannelArgsForTest(&xds_channel_args_);
- // Make sure each test creates a new XdsClient instance rather than
- // reusing the one from the previous test. This avoids spurious failures
- // caused when a load reporting test runs after a non-load reporting test
- // and the XdsClient is still talking to the old LRS server, which fails
- // because it's not expecting the client to connect. It also
- // ensures that each test can independently set the global channel
- // args for the xDS channel.
- grpc_core::internal::UnsetGlobalXdsClientForTest();
+ xds_channel_args_to_add_.emplace_back(
+ grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
+ lb_channel_response_generator_.get()));
+ if (xds_resource_does_not_exist_timeout_ms_ > 0) {
+ xds_channel_args_to_add_.emplace_back(grpc_channel_arg_integer_create(
+ const_cast<char*>(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS),
+ xds_resource_does_not_exist_timeout_ms_));
+ }
+ xds_channel_args_.num_args = xds_channel_args_to_add_.size();
+ xds_channel_args_.args = xds_channel_args_to_add_.data();
+ grpc_core::internal::SetXdsChannelArgsForTest(&xds_channel_args_);
+ // Make sure each test creates a new XdsClient instance rather than
+ // reusing the one from the previous test. This avoids spurious failures
+ // caused when a load reporting test runs after a non-load reporting test
+ // and the XdsClient is still talking to the old LRS server, which fails
+ // because it's not expecting the client to connect. It also
+ // ensures that each test can independently set the global channel
+ // args for the xDS channel.
+ grpc_core::internal::UnsetGlobalXdsClientForTest();
// Start the backends.
for (size_t i = 0; i < num_backends_; ++i) {
backends_.emplace_back(new BackendServerThread);
- backends_.back()->Start();
+ backends_.back()->Start();
}
// Start the load balancers.
for (size_t i = 0; i < num_balancers_; ++i) {
balancers_.emplace_back(
- new BalancerServerThread(GetParam().enable_load_reporting()
- ? client_load_reporting_interval_seconds_
- : 0));
- balancers_.back()->Start();
- if (GetParam().enable_rds_testing()) {
- balancers_[i]->ads_service()->SetLdsToUseDynamicRds();
- }
+ new BalancerServerThread(GetParam().enable_load_reporting()
+ ? client_load_reporting_interval_seconds_
+ : 0));
+ balancers_.back()->Start();
+ if (GetParam().enable_rds_testing()) {
+ balancers_[i]->ads_service()->SetLdsToUseDynamicRds();
+ }
}
ResetStub();
}
- const char* DefaultEdsServiceName() const {
- return GetParam().use_xds_resolver() ? kDefaultEdsServiceName : kServerName;
- }
-
+ const char* DefaultEdsServiceName() const {
+ return GetParam().use_xds_resolver() ? kDefaultEdsServiceName : kServerName;
+ }
+
void TearDown() override {
ShutdownAllBackends();
for (auto& balancer : balancers_) balancer->Shutdown();
- // Clear global xDS channel args, since they will go out of scope
- // when this test object is destroyed.
- grpc_core::internal::SetXdsChannelArgsForTest(nullptr);
+ // Clear global xDS channel args, since they will go out of scope
+ // when this test object is destroyed.
+ grpc_core::internal::SetXdsChannelArgsForTest(nullptr);
}
void StartAllBackends() {
- for (auto& backend : backends_) backend->Start();
+ for (auto& backend : backends_) backend->Start();
}
- void StartBackend(size_t index) { backends_[index]->Start(); }
+ void StartBackend(size_t index) { backends_[index]->Start(); }
void ShutdownAllBackends() {
for (auto& backend : backends_) backend->Shutdown();
@@ -1424,27 +1424,27 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); }
- void ResetStub(int failover_timeout = 0) {
- channel_ = CreateChannel(failover_timeout);
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- stub1_ = grpc::testing::EchoTest1Service::NewStub(channel_);
- stub2_ = grpc::testing::EchoTest2Service::NewStub(channel_);
- }
-
- std::shared_ptr<Channel> CreateChannel(
- int failover_timeout = 0, const char* server_name = kServerName) {
+ void ResetStub(int failover_timeout = 0) {
+ channel_ = CreateChannel(failover_timeout);
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ stub1_ = grpc::testing::EchoTest1Service::NewStub(channel_);
+ stub2_ = grpc::testing::EchoTest2Service::NewStub(channel_);
+ }
+
+ std::shared_ptr<Channel> CreateChannel(
+ int failover_timeout = 0, const char* server_name = kServerName) {
ChannelArguments args;
if (failover_timeout > 0) {
- args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout);
+ args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout);
}
// If the parent channel is using the fake resolver, we inject the
- // response generator here.
- if (!GetParam().use_xds_resolver()) {
- args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
- response_generator_.get());
+ // response generator here.
+ if (!GetParam().use_xds_resolver()) {
+ args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
+ response_generator_.get());
}
- TString uri = y_absl::StrCat(
- GetParam().use_xds_resolver() ? "xds" : "fake", ":///", server_name);
+ TString uri = y_absl::StrCat(
+ GetParam().use_xds_resolver() ? "xds" : "fake", ":///", server_name);
// TODO(dgq): templatize tests to run everything using both secure and
// insecure channel credentials.
grpc_channel_credentials* channel_creds =
@@ -1456,112 +1456,112 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
channel_creds, call_creds, nullptr)));
call_creds->Unref();
channel_creds->Unref();
- return ::grpc::CreateCustomChannel(uri, creds, args);
- }
-
- enum RpcService {
- SERVICE_ECHO,
- SERVICE_ECHO1,
- SERVICE_ECHO2,
- };
-
- enum RpcMethod {
- METHOD_ECHO,
- METHOD_ECHO1,
- METHOD_ECHO2,
- };
-
- struct RpcOptions {
- RpcService service = SERVICE_ECHO;
- RpcMethod method = METHOD_ECHO;
- int timeout_ms = 1000;
- bool wait_for_ready = false;
- bool server_fail = false;
- std::vector<std::pair<TString, TString>> metadata;
-
- RpcOptions() {}
-
- RpcOptions& set_rpc_service(RpcService rpc_service) {
- service = rpc_service;
- return *this;
- }
-
- RpcOptions& set_rpc_method(RpcMethod rpc_method) {
- method = rpc_method;
- return *this;
- }
-
- RpcOptions& set_timeout_ms(int rpc_timeout_ms) {
- timeout_ms = rpc_timeout_ms;
- return *this;
- }
-
- RpcOptions& set_wait_for_ready(bool rpc_wait_for_ready) {
- wait_for_ready = rpc_wait_for_ready;
- return *this;
- }
-
- RpcOptions& set_server_fail(bool rpc_server_fail) {
- server_fail = rpc_server_fail;
- return *this;
- }
-
- RpcOptions& set_metadata(
- std::vector<std::pair<TString, TString>> rpc_metadata) {
- metadata = rpc_metadata;
- return *this;
- }
- };
-
- template <typename Stub>
- Status SendRpcMethod(Stub* stub, const RpcOptions& rpc_options,
- ClientContext* context, EchoRequest& request,
- EchoResponse* response) {
- switch (rpc_options.method) {
- case METHOD_ECHO:
- return (*stub)->Echo(context, request, response);
- case METHOD_ECHO1:
- return (*stub)->Echo1(context, request, response);
- case METHOD_ECHO2:
- return (*stub)->Echo2(context, request, response);
- }
- }
-
- void ResetBackendCounters(size_t start_index = 0, size_t stop_index = 0) {
+ return ::grpc::CreateCustomChannel(uri, creds, args);
+ }
+
+ enum RpcService {
+ SERVICE_ECHO,
+ SERVICE_ECHO1,
+ SERVICE_ECHO2,
+ };
+
+ enum RpcMethod {
+ METHOD_ECHO,
+ METHOD_ECHO1,
+ METHOD_ECHO2,
+ };
+
+ struct RpcOptions {
+ RpcService service = SERVICE_ECHO;
+ RpcMethod method = METHOD_ECHO;
+ int timeout_ms = 1000;
+ bool wait_for_ready = false;
+ bool server_fail = false;
+ std::vector<std::pair<TString, TString>> metadata;
+
+ RpcOptions() {}
+
+ RpcOptions& set_rpc_service(RpcService rpc_service) {
+ service = rpc_service;
+ return *this;
+ }
+
+ RpcOptions& set_rpc_method(RpcMethod rpc_method) {
+ method = rpc_method;
+ return *this;
+ }
+
+ RpcOptions& set_timeout_ms(int rpc_timeout_ms) {
+ timeout_ms = rpc_timeout_ms;
+ return *this;
+ }
+
+ RpcOptions& set_wait_for_ready(bool rpc_wait_for_ready) {
+ wait_for_ready = rpc_wait_for_ready;
+ return *this;
+ }
+
+ RpcOptions& set_server_fail(bool rpc_server_fail) {
+ server_fail = rpc_server_fail;
+ return *this;
+ }
+
+ RpcOptions& set_metadata(
+ std::vector<std::pair<TString, TString>> rpc_metadata) {
+ metadata = rpc_metadata;
+ return *this;
+ }
+ };
+
+ template <typename Stub>
+ Status SendRpcMethod(Stub* stub, const RpcOptions& rpc_options,
+ ClientContext* context, EchoRequest& request,
+ EchoResponse* response) {
+ switch (rpc_options.method) {
+ case METHOD_ECHO:
+ return (*stub)->Echo(context, request, response);
+ case METHOD_ECHO1:
+ return (*stub)->Echo1(context, request, response);
+ case METHOD_ECHO2:
+ return (*stub)->Echo2(context, request, response);
+ }
+ }
+
+ void ResetBackendCounters(size_t start_index = 0, size_t stop_index = 0) {
if (stop_index == 0) stop_index = backends_.size();
for (size_t i = start_index; i < stop_index; ++i) {
- backends_[i]->backend_service()->ResetCounters();
- backends_[i]->backend_service1()->ResetCounters();
- backends_[i]->backend_service2()->ResetCounters();
+ backends_[i]->backend_service()->ResetCounters();
+ backends_[i]->backend_service1()->ResetCounters();
+ backends_[i]->backend_service2()->ResetCounters();
+ }
+ }
+
+ bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0,
+ const RpcOptions& rpc_options = RpcOptions()) {
+ if (stop_index == 0) stop_index = backends_.size();
+ for (size_t i = start_index; i < stop_index; ++i) {
+ switch (rpc_options.service) {
+ case SERVICE_ECHO:
+ if (backends_[i]->backend_service()->request_count() == 0)
+ return false;
+ break;
+ case SERVICE_ECHO1:
+ if (backends_[i]->backend_service1()->request_count() == 0)
+ return false;
+ break;
+ case SERVICE_ECHO2:
+ if (backends_[i]->backend_service2()->request_count() == 0)
+ return false;
+ break;
+ }
}
- }
-
- bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0,
- const RpcOptions& rpc_options = RpcOptions()) {
- if (stop_index == 0) stop_index = backends_.size();
- for (size_t i = start_index; i < stop_index; ++i) {
- switch (rpc_options.service) {
- case SERVICE_ECHO:
- if (backends_[i]->backend_service()->request_count() == 0)
- return false;
- break;
- case SERVICE_ECHO1:
- if (backends_[i]->backend_service1()->request_count() == 0)
- return false;
- break;
- case SERVICE_ECHO2:
- if (backends_[i]->backend_service2()->request_count() == 0)
- return false;
- break;
- }
- }
return true;
}
void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure,
- int* num_drops,
- const RpcOptions& rpc_options = RpcOptions()) {
- const Status status = SendRpc(rpc_options);
+ int* num_drops,
+ const RpcOptions& rpc_options = RpcOptions()) {
+ const Status status = SendRpc(rpc_options);
if (status.ok()) {
++*num_ok;
} else {
@@ -1574,37 +1574,37 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
++*num_total;
}
- std::tuple<int, int, int> WaitForAllBackends(
- size_t start_index = 0, size_t stop_index = 0, bool reset_counters = true,
- const RpcOptions& rpc_options = RpcOptions(),
- bool allow_failures = false) {
+ std::tuple<int, int, int> WaitForAllBackends(
+ size_t start_index = 0, size_t stop_index = 0, bool reset_counters = true,
+ const RpcOptions& rpc_options = RpcOptions(),
+ bool allow_failures = false) {
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
int num_total = 0;
- while (!SeenAllBackends(start_index, stop_index, rpc_options)) {
- SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops,
- rpc_options);
+ while (!SeenAllBackends(start_index, stop_index, rpc_options)) {
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops,
+ rpc_options);
}
- if (reset_counters) ResetBackendCounters();
+ if (reset_counters) ResetBackendCounters();
gpr_log(GPR_INFO,
"Performed %d warm up requests against the backends. "
"%d succeeded, %d failed, %d dropped.",
num_total, num_ok, num_failure, num_drops);
- if (!allow_failures) EXPECT_EQ(num_failure, 0);
+ if (!allow_failures) EXPECT_EQ(num_failure, 0);
return std::make_tuple(num_ok, num_failure, num_drops);
}
- void WaitForBackend(size_t backend_idx, bool reset_counters = true,
- bool require_success = false) {
+ void WaitForBackend(size_t backend_idx, bool reset_counters = true,
+ bool require_success = false) {
gpr_log(GPR_INFO, "========= WAITING FOR BACKEND %lu ==========",
static_cast<unsigned long>(backend_idx));
do {
- Status status = SendRpc();
- if (require_success) {
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- }
+ Status status = SendRpc();
+ if (require_success) {
+ EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
+ << " message=" << status.error_message();
+ }
} while (backends_[backend_idx]->backend_service()->request_count() == 0);
if (reset_counters) ResetBackendCounters();
gpr_log(GPR_INFO, "========= BACKEND %lu READY ==========",
@@ -1615,8 +1615,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
const std::vector<int>& ports) {
grpc_core::ServerAddressList addresses;
for (int port : ports) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
+ TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
+ grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
GPR_ASSERT(lb_uri != nullptr);
grpc_resolved_address address;
GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
@@ -1626,7 +1626,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
return addresses;
}
- void SetNextResolution(const std::vector<int>& ports) {
+ void SetNextResolution(const std::vector<int>& ports) {
if (GetParam().use_xds_resolver()) return; // Not used with xds resolver.
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
@@ -1634,46 +1634,46 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
grpc_error* error = GRPC_ERROR_NONE;
const char* service_config_json =
GetParam().enable_load_reporting()
- ? kDefaultServiceConfig
- : kDefaultServiceConfigWithoutLoadReporting;
+ ? kDefaultServiceConfig
+ : kDefaultServiceConfigWithoutLoadReporting;
result.service_config =
- grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error);
- ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
- ASSERT_NE(result.service_config.get(), nullptr);
+ grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error);
+ ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
+ ASSERT_NE(result.service_config.get(), nullptr);
response_generator_->SetResponse(std::move(result));
}
void SetNextResolutionForLbChannelAllBalancers(
const char* service_config_json = nullptr,
- const char* expected_targets = nullptr) {
+ const char* expected_targets = nullptr) {
std::vector<int> ports;
for (size_t i = 0; i < balancers_.size(); ++i) {
ports.emplace_back(balancers_[i]->port());
}
- SetNextResolutionForLbChannel(ports, service_config_json, expected_targets);
+ SetNextResolutionForLbChannel(ports, service_config_json, expected_targets);
}
- void SetNextResolutionForLbChannel(const std::vector<int>& ports,
- const char* service_config_json = nullptr,
- const char* expected_targets = nullptr) {
+ void SetNextResolutionForLbChannel(const std::vector<int>& ports,
+ const char* service_config_json = nullptr,
+ const char* expected_targets = nullptr) {
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
result.addresses = CreateAddressListFromPortList(ports);
if (service_config_json != nullptr) {
grpc_error* error = GRPC_ERROR_NONE;
- result.service_config = grpc_core::ServiceConfig::Create(
- nullptr, service_config_json, &error);
- ASSERT_NE(result.service_config.get(), nullptr);
- ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
+ result.service_config = grpc_core::ServiceConfig::Create(
+ nullptr, service_config_json, &error);
+ ASSERT_NE(result.service_config.get(), nullptr);
+ ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
}
- if (expected_targets != nullptr) {
- grpc_arg expected_targets_arg = grpc_channel_arg_string_create(
- const_cast<char*>(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS),
- const_cast<char*>(expected_targets));
- result.args =
- grpc_channel_args_copy_and_add(nullptr, &expected_targets_arg, 1);
+ if (expected_targets != nullptr) {
+ grpc_arg expected_targets_arg = grpc_channel_arg_string_create(
+ const_cast<char*>(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS),
+ const_cast<char*>(expected_targets));
+ result.args =
+ grpc_channel_args_copy_and_add(nullptr, &expected_targets_arg, 1);
}
- lb_channel_response_generator_->SetResponse(std::move(result));
+ lb_channel_response_generator_->SetResponse(std::move(result));
}
void SetNextReresolutionResponse(const std::vector<int>& ports) {
@@ -1693,98 +1693,98 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
return backend_ports;
}
- Status SendRpc(const RpcOptions& rpc_options = RpcOptions(),
- EchoResponse* response = nullptr) {
+ Status SendRpc(const RpcOptions& rpc_options = RpcOptions(),
+ EchoResponse* response = nullptr) {
const bool local_response = (response == nullptr);
if (local_response) response = new EchoResponse;
EchoRequest request;
- ClientContext context;
- for (const auto& metadata : rpc_options.metadata) {
- context.AddMetadata(metadata.first, metadata.second);
- }
- if (rpc_options.timeout_ms != 0) {
- context.set_deadline(
- grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms));
- }
- if (rpc_options.wait_for_ready) context.set_wait_for_ready(true);
- request.set_message(kRequestMessage);
- if (rpc_options.server_fail) {
- request.mutable_param()->mutable_expected_error()->set_code(
- GRPC_STATUS_FAILED_PRECONDITION);
- }
- Status status;
- switch (rpc_options.service) {
- case SERVICE_ECHO:
- status =
- SendRpcMethod(&stub_, rpc_options, &context, request, response);
- break;
- case SERVICE_ECHO1:
- status =
- SendRpcMethod(&stub1_, rpc_options, &context, request, response);
- break;
- case SERVICE_ECHO2:
- status =
- SendRpcMethod(&stub2_, rpc_options, &context, request, response);
- break;
- }
+ ClientContext context;
+ for (const auto& metadata : rpc_options.metadata) {
+ context.AddMetadata(metadata.first, metadata.second);
+ }
+ if (rpc_options.timeout_ms != 0) {
+ context.set_deadline(
+ grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms));
+ }
+ if (rpc_options.wait_for_ready) context.set_wait_for_ready(true);
+ request.set_message(kRequestMessage);
+ if (rpc_options.server_fail) {
+ request.mutable_param()->mutable_expected_error()->set_code(
+ GRPC_STATUS_FAILED_PRECONDITION);
+ }
+ Status status;
+ switch (rpc_options.service) {
+ case SERVICE_ECHO:
+ status =
+ SendRpcMethod(&stub_, rpc_options, &context, request, response);
+ break;
+ case SERVICE_ECHO1:
+ status =
+ SendRpcMethod(&stub1_, rpc_options, &context, request, response);
+ break;
+ case SERVICE_ECHO2:
+ status =
+ SendRpcMethod(&stub2_, rpc_options, &context, request, response);
+ break;
+ }
if (local_response) delete response;
return status;
}
- void CheckRpcSendOk(const size_t times = 1,
- const RpcOptions& rpc_options = RpcOptions()) {
+ void CheckRpcSendOk(const size_t times = 1,
+ const RpcOptions& rpc_options = RpcOptions()) {
for (size_t i = 0; i < times; ++i) {
EchoResponse response;
- const Status status = SendRpc(rpc_options, &response);
+ const Status status = SendRpc(rpc_options, &response);
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage);
+ EXPECT_EQ(response.message(), kRequestMessage);
+ }
+ }
+
+ void CheckRpcSendFailure(const size_t times = 1,
+ const RpcOptions& rpc_options = RpcOptions()) {
+ for (size_t i = 0; i < times; ++i) {
+ const Status status = SendRpc(rpc_options);
+ EXPECT_FALSE(status.ok());
}
}
- void CheckRpcSendFailure(const size_t times = 1,
- const RpcOptions& rpc_options = RpcOptions()) {
- for (size_t i = 0; i < times; ++i) {
- const Status status = SendRpc(rpc_options);
- EXPECT_FALSE(status.ok());
- }
- }
-
- void SetRouteConfiguration(int idx, const RouteConfiguration& route_config) {
- if (GetParam().enable_rds_testing()) {
- balancers_[idx]->ads_service()->SetRdsResource(route_config);
- } else {
- balancers_[idx]->ads_service()->SetLdsResource(
- AdsServiceImpl::BuildListener(route_config));
- }
- }
-
- AdsServiceImpl::ResponseState RouteConfigurationResponseState(int idx) const {
- AdsServiceImpl* ads_service = balancers_[idx]->ads_service();
- if (GetParam().enable_rds_testing()) {
- return ads_service->rds_response_state();
- }
- return ads_service->lds_response_state();
- }
-
- public:
- // This method could benefit test subclasses; to make it accessible
- // via bind with a qualified name, it needs to be public.
- void SetEdsResourceWithDelay(size_t i,
- const ClusterLoadAssignment& assignment,
- int delay_ms) {
- GPR_ASSERT(delay_ms > 0);
- gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
- balancers_[i]->ads_service()->SetEdsResource(assignment);
- }
-
- protected:
+ void SetRouteConfiguration(int idx, const RouteConfiguration& route_config) {
+ if (GetParam().enable_rds_testing()) {
+ balancers_[idx]->ads_service()->SetRdsResource(route_config);
+ } else {
+ balancers_[idx]->ads_service()->SetLdsResource(
+ AdsServiceImpl::BuildListener(route_config));
+ }
+ }
+
+ AdsServiceImpl::ResponseState RouteConfigurationResponseState(int idx) const {
+ AdsServiceImpl* ads_service = balancers_[idx]->ads_service();
+ if (GetParam().enable_rds_testing()) {
+ return ads_service->rds_response_state();
+ }
+ return ads_service->lds_response_state();
+ }
+
+ public:
+ // This method could benefit test subclasses; to make it accessible
+ // via bind with a qualified name, it needs to be public.
+ void SetEdsResourceWithDelay(size_t i,
+ const ClusterLoadAssignment& assignment,
+ int delay_ms) {
+ GPR_ASSERT(delay_ms > 0);
+ gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
+ balancers_[i]->ads_service()->SetEdsResource(assignment);
+ }
+
+ protected:
class ServerThread {
public:
ServerThread() : port_(g_port_saver->GetPort()) {}
virtual ~ServerThread(){};
- void Start() {
+ void Start() {
gpr_log(GPR_INFO, "starting %s server on port %d", Type(), port_);
GPR_ASSERT(!running_);
running_ = true;
@@ -1794,18 +1794,18 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
// by ServerThread::Serve from firing before the wait below is hit.
grpc_core::MutexLock lock(&mu);
grpc_core::CondVar cond;
- thread_.reset(
- new std::thread(std::bind(&ServerThread::Serve, this, &mu, &cond)));
+ thread_.reset(
+ new std::thread(std::bind(&ServerThread::Serve, this, &mu, &cond)));
cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", Type());
}
- void Serve(grpc_core::Mutex* mu, grpc_core::CondVar* cond) {
+ void Serve(grpc_core::Mutex* mu, grpc_core::CondVar* cond) {
// We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed.
grpc_core::MutexLock lock(mu);
std::ostringstream server_address;
- server_address << "localhost:" << port_;
+ server_address << "localhost:" << port_;
ServerBuilder builder;
std::shared_ptr<ServerCredentials> creds(new SecureServerCredentials(
grpc_fake_transport_security_server_credentials_create()));
@@ -1842,79 +1842,79 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
class BackendServerThread : public ServerThread {
public:
- BackendServiceImpl<::grpc::testing::EchoTestService::Service>*
- backend_service() {
- return &backend_service_;
- }
- BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>*
- backend_service1() {
- return &backend_service1_;
- }
- BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>*
- backend_service2() {
- return &backend_service2_;
- }
+ BackendServiceImpl<::grpc::testing::EchoTestService::Service>*
+ backend_service() {
+ return &backend_service_;
+ }
+ BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>*
+ backend_service1() {
+ return &backend_service1_;
+ }
+ BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>*
+ backend_service2() {
+ return &backend_service2_;
+ }
private:
void RegisterAllServices(ServerBuilder* builder) override {
builder->RegisterService(&backend_service_);
- builder->RegisterService(&backend_service1_);
- builder->RegisterService(&backend_service2_);
+ builder->RegisterService(&backend_service1_);
+ builder->RegisterService(&backend_service2_);
}
- void StartAllServices() override {
- backend_service_.Start();
- backend_service1_.Start();
- backend_service2_.Start();
- }
+ void StartAllServices() override {
+ backend_service_.Start();
+ backend_service1_.Start();
+ backend_service2_.Start();
+ }
- void ShutdownAllServices() override {
- backend_service_.Shutdown();
- backend_service1_.Shutdown();
- backend_service2_.Shutdown();
- }
+ void ShutdownAllServices() override {
+ backend_service_.Shutdown();
+ backend_service1_.Shutdown();
+ backend_service2_.Shutdown();
+ }
const char* Type() override { return "Backend"; }
- BackendServiceImpl<::grpc::testing::EchoTestService::Service>
- backend_service_;
- BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>
- backend_service1_;
- BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>
- backend_service2_;
+ BackendServiceImpl<::grpc::testing::EchoTestService::Service>
+ backend_service_;
+ BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>
+ backend_service1_;
+ BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>
+ backend_service2_;
};
class BalancerServerThread : public ServerThread {
public:
explicit BalancerServerThread(int client_load_reporting_interval = 0)
- : ads_service_(new AdsServiceImpl(client_load_reporting_interval > 0)),
- lrs_service_(new LrsServiceImpl(client_load_reporting_interval)) {}
+ : ads_service_(new AdsServiceImpl(client_load_reporting_interval > 0)),
+ lrs_service_(new LrsServiceImpl(client_load_reporting_interval)) {}
- AdsServiceImpl* ads_service() { return ads_service_.get(); }
- LrsServiceImpl* lrs_service() { return lrs_service_.get(); }
+ AdsServiceImpl* ads_service() { return ads_service_.get(); }
+ LrsServiceImpl* lrs_service() { return lrs_service_.get(); }
private:
void RegisterAllServices(ServerBuilder* builder) override {
- builder->RegisterService(ads_service_->v2_rpc_service());
- builder->RegisterService(ads_service_->v3_rpc_service());
- builder->RegisterService(lrs_service_->v2_rpc_service());
- builder->RegisterService(lrs_service_->v3_rpc_service());
+ builder->RegisterService(ads_service_->v2_rpc_service());
+ builder->RegisterService(ads_service_->v3_rpc_service());
+ builder->RegisterService(lrs_service_->v2_rpc_service());
+ builder->RegisterService(lrs_service_->v3_rpc_service());
}
void StartAllServices() override {
- ads_service_->Start();
- lrs_service_->Start();
+ ads_service_->Start();
+ lrs_service_->Start();
}
void ShutdownAllServices() override {
- ads_service_->Shutdown();
- lrs_service_->Shutdown();
+ ads_service_->Shutdown();
+ lrs_service_->Shutdown();
}
const char* Type() override { return "Balancer"; }
- std::shared_ptr<AdsServiceImpl> ads_service_;
- std::shared_ptr<LrsServiceImpl> lrs_service_;
+ std::shared_ptr<AdsServiceImpl> ads_service_;
+ std::shared_ptr<LrsServiceImpl> lrs_service_;
};
const size_t num_backends_;
@@ -1922,22 +1922,22 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
const int client_load_reporting_interval_seconds_;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<grpc::testing::EchoTest1Service::Stub> stub1_;
- std::unique_ptr<grpc::testing::EchoTest2Service::Stub> stub2_;
+ std::unique_ptr<grpc::testing::EchoTest1Service::Stub> stub1_;
+ std::unique_ptr<grpc::testing::EchoTest2Service::Stub> stub2_;
std::vector<std::unique_ptr<BackendServerThread>> backends_;
std::vector<std::unique_ptr<BalancerServerThread>> balancers_;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
response_generator_;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
lb_channel_response_generator_;
- int xds_resource_does_not_exist_timeout_ms_ = 0;
- y_absl::InlinedVector<grpc_arg, 2> xds_channel_args_to_add_;
- grpc_channel_args xds_channel_args_;
+ int xds_resource_does_not_exist_timeout_ms_ = 0;
+ y_absl::InlinedVector<grpc_arg, 2> xds_channel_args_to_add_;
+ grpc_channel_args xds_channel_args_;
};
class BasicTest : public XdsEnd2endTest {
public:
- BasicTest() : XdsEnd2endTest(4, 1) {}
+ BasicTest() : XdsEnd2endTest(4, 1) {}
};
// Tests that the balancer sends the correct response to the client, and the
@@ -1946,11 +1946,11 @@ TEST_P(BasicTest, Vanilla) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcsPerAddress = 100;
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -1963,24 +1963,24 @@ TEST_P(BasicTest, Vanilla) {
backends_[i]->backend_service()->request_count());
}
// Check LB policy name for the channel.
- EXPECT_EQ((GetParam().use_xds_resolver() ? "xds_cluster_manager_experimental"
- : "eds_experimental"),
- channel_->GetLoadBalancingPolicyName());
+ EXPECT_EQ((GetParam().use_xds_resolver() ? "xds_cluster_manager_experimental"
+ : "eds_experimental"),
+ channel_->GetLoadBalancingPolicyName());
}
TEST_P(BasicTest, IgnoresUnhealthyEndpoints) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcsPerAddress = 100;
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0",
GetBackendPorts(),
kDefaultLocalityWeight,
kDefaultLocalityPriority,
- {HealthStatus::DRAINING}},
+ {HealthStatus::DRAINING}},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -2001,12 +2001,12 @@ TEST_P(BasicTest, SameBackendListedMultipleTimes) {
SetNextResolutionForLbChannelAllBalancers();
// Same backend listed twice.
std::vector<int> ports(2, backends_[0]->port());
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", ports},
});
const size_t kNumRpcsPerAddress = 10;
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// We need to wait for the backend to come online.
WaitForBackend(0);
// Send kNumRpcsPerAddress RPCs per server.
@@ -2026,24 +2026,24 @@ TEST_P(BasicTest, InitiallyEmptyServerlist) {
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const int kCallDeadlineMs = kServerlistDelayMs * 2;
// First response is an empty serverlist, sent right away.
- AdsServiceImpl::EdsResourceArgs::Locality empty_locality("locality0", {});
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs::Locality empty_locality("locality0", {});
+ AdsServiceImpl::EdsResourceArgs args({
empty_locality,
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Send non-empty serverlist only after kServerlistDelayMs.
- args = AdsServiceImpl::EdsResourceArgs({
+ args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts()},
});
- std::thread delayed_resource_setter(
- std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()),
- kServerlistDelayMs));
+ std::thread delayed_resource_setter(
+ std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()),
+ kServerlistDelayMs));
const auto t0 = system_clock::now();
// Client will block: LB will initially send empty serverlist.
- CheckRpcSendOk(
- 1, RpcOptions().set_timeout_ms(kCallDeadlineMs).set_wait_for_ready(true));
+ CheckRpcSendOk(
+ 1, RpcOptions().set_timeout_ms(kCallDeadlineMs).set_wait_for_ready(true));
const auto ellapsed_ms =
std::chrono::duration_cast<std::chrono::milliseconds>(
system_clock::now() - t0);
@@ -2052,7 +2052,7 @@ TEST_P(BasicTest, InitiallyEmptyServerlist) {
// populated serverlist but under the call's deadline (which is enforced by
// the call's deadline).
EXPECT_GT(ellapsed_ms.count(), kServerlistDelayMs);
- delayed_resource_setter.join();
+ delayed_resource_setter.join();
}
// Tests that RPCs will fail with UNAVAILABLE instead of DEADLINE_EXCEEDED if
@@ -2065,11 +2065,11 @@ TEST_P(BasicTest, AllServersUnreachableFailFast) {
for (size_t i = 0; i < kNumUnreachableServers; ++i) {
ports.push_back(g_port_saver->GetPort());
}
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", ports},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
const Status status = SendRpc();
// The error shouldn't be DEADLINE_EXCEEDED.
EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
@@ -2080,2426 +2080,2426 @@ TEST_P(BasicTest, AllServersUnreachableFailFast) {
TEST_P(BasicTest, BackendsRestart) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Stop backends. RPCs should fail.
ShutdownAllBackends();
- // Sending multiple failed requests instead of just one to ensure that the
- // client notices that all backends are down before we restart them. If we
- // didn't do this, then a single RPC could fail here due to the race condition
- // between the LB pick and the GOAWAY from the chosen backend being shut down,
- // which would not actually prove that the client noticed that all of the
- // backends are down. Then, when we send another request below (which we
- // expect to succeed), if the callbacks happen in the wrong order, the same
- // race condition could happen again due to the client not yet having noticed
- // that the backends were all down.
- CheckRpcSendFailure(num_backends_);
+ // Sending multiple failed requests instead of just one to ensure that the
+ // client notices that all backends are down before we restart them. If we
+ // didn't do this, then a single RPC could fail here due to the race condition
+ // between the LB pick and the GOAWAY from the chosen backend being shut down,
+ // which would not actually prove that the client noticed that all of the
+ // backends are down. Then, when we send another request below (which we
+ // expect to succeed), if the callbacks happen in the wrong order, the same
+ // race condition could happen again due to the client not yet having noticed
+ // that the backends were all down.
+ CheckRpcSendFailure(num_backends_);
// Restart all backends. RPCs should start succeeding again.
StartAllBackends();
- CheckRpcSendOk(1, RpcOptions().set_timeout_ms(2000).set_wait_for_ready(true));
+ CheckRpcSendOk(1, RpcOptions().set_timeout_ms(2000).set_wait_for_ready(true));
+}
+
+TEST_P(BasicTest, IgnoresDuplicateUpdates) {
+ const size_t kNumRpcsPerAddress = 100;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ // Wait for all backends to come online.
+ WaitForAllBackends();
+ // Send kNumRpcsPerAddress RPCs per server, but send an EDS update in
+ // between. If the update is not ignored, this will cause the
+ // round_robin policy to see an update, which will randomly reset its
+ // position in the address list.
+ for (size_t i = 0; i < kNumRpcsPerAddress; ++i) {
+ CheckRpcSendOk(2);
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ CheckRpcSendOk(2);
+ }
+ // Each backend should have gotten the right number of requests.
+ for (size_t i = 1; i < backends_.size(); ++i) {
+ EXPECT_EQ(kNumRpcsPerAddress,
+ backends_[i]->backend_service()->request_count());
+ }
+}
+
+using XdsResolverOnlyTest = BasicTest;
+
+// Tests switching over from one cluster to another.
+TEST_P(XdsResolverOnlyTest, ChangeClusters) {
+ const char* kNewClusterName = "new_cluster_name";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ // We need to wait for all backends to come online.
+ WaitForAllBackends(0, 2);
+ // Populate new EDS resource.
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
+ // Populate new CDS resource.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Change RDS resource to point to new cluster.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ new_route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->set_cluster(kNewClusterName);
+ Listener listener =
+ balancers_[0]->ads_service()->BuildListener(new_route_config);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // Wait for all new backends to be used.
+ std::tuple<int, int, int> counts = WaitForAllBackends(2, 4);
+ // Make sure no RPCs failed in the transition.
+ EXPECT_EQ(0, std::get<1>(counts));
+}
+
+// Tests that we go into TRANSIENT_FAILURE if the Cluster disappears.
+TEST_P(XdsResolverOnlyTest, ClusterRemoved) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ // We need to wait for all backends to come online.
+ WaitForAllBackends();
+ // Unset CDS resource.
+ balancers_[0]->ads_service()->UnsetResource(kCdsTypeUrl, kDefaultClusterName);
+ // Wait for RPCs to start failing.
+ do {
+ } while (SendRpc(RpcOptions(), nullptr).ok());
+ // Make sure RPCs are still failing.
+ CheckRpcSendFailure(1000);
+ // Make sure we ACK'ed the update.
+ EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Tests that we restart all xDS requests when we reestablish the ADS call.
+TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) {
+ balancers_[0]->ads_service()->SetLdsToUseDynamicRds();
+ const char* kNewClusterName = "new_cluster_name";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ // We need to wait for all backends to come online.
+ WaitForAllBackends(0, 2);
+ // Now shut down and restart the balancer. When the client
+ // reconnects, it should automatically restart the requests for all
+ // resource types.
+ balancers_[0]->Shutdown();
+ balancers_[0]->Start();
+ // Make sure things are still working.
+ CheckRpcSendOk(100);
+ // Populate new EDS resource.
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
+ // Populate new CDS resource.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Change RDS resource to point to new cluster.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ new_route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->set_cluster(kNewClusterName);
+ balancers_[0]->ads_service()->SetRdsResource(new_route_config);
+ // Wait for all new backends to be used.
+ std::tuple<int, int, int> counts = WaitForAllBackends(2, 4);
+ // Make sure no RPCs failed in the transition.
+ EXPECT_EQ(0, std::get<1>(counts));
+}
+
+TEST_P(XdsResolverOnlyTest, DefaultRouteSpecifiesSlashPrefix) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_match()
+ ->set_prefix("/");
+ balancers_[0]->ads_service()->SetLdsResource(
+ AdsServiceImpl::BuildListener(route_config));
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ // We need to wait for all backends to come online.
+ WaitForAllBackends();
+}
+
+TEST_P(XdsResolverOnlyTest, CircuitBreaking) {
+ class TestRpc {
+ public:
+ TestRpc() {}
+
+ void StartRpc(grpc::testing::EchoTestService::Stub* stub) {
+ sender_thread_ = std::thread([this, stub]() {
+ EchoResponse response;
+ EchoRequest request;
+ request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000);
+ request.set_message(kRequestMessage);
+ status_ = stub->Echo(&context_, request, &response);
+ });
+ }
+
+ void CancelRpc() {
+ context_.TryCancel();
+ sender_thread_.join();
+ }
+
+ private:
+ std::thread sender_thread_;
+ ClientContext context_;
+ Status status_;
+ };
+
+ gpr_setenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING", "true");
+ constexpr size_t kMaxConcurrentRequests = 10;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ // Update CDS resource to set max concurrent request.
+ CircuitBreakers circuit_breaks;
+ Cluster cluster = balancers_[0]->ads_service()->default_cluster();
+ auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
+ threshold->set_priority(RoutingPriority::DEFAULT);
+ threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // Send exactly max_concurrent_requests long RPCs.
+ TestRpc rpcs[kMaxConcurrentRequests];
+ for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
+ rpcs[i].StartRpc(stub_.get());
+ }
+ // Wait for all RPCs to be in flight.
+ while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
+ kMaxConcurrentRequests) {
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
+ }
+ // Sending a RPC now should fail, the error message should tell us
+ // we hit the max concurrent requests limit and got dropped.
+ Status status = SendRpc();
+ EXPECT_FALSE(status.ok());
+ EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
+ // Cancel one RPC to allow another one through
+ rpcs[0].CancelRpc();
+ status = SendRpc();
+ EXPECT_TRUE(status.ok());
+ for (size_t i = 1; i < kMaxConcurrentRequests; ++i) {
+ rpcs[i].CancelRpc();
+ }
+ // Make sure RPCs go to the correct backend:
+ EXPECT_EQ(kMaxConcurrentRequests + 1,
+ backends_[0]->backend_service()->request_count());
+ gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING");
+}
+
+TEST_P(XdsResolverOnlyTest, CircuitBreakingDisabled) {
+ class TestRpc {
+ public:
+ TestRpc() {}
+
+ void StartRpc(grpc::testing::EchoTestService::Stub* stub) {
+ sender_thread_ = std::thread([this, stub]() {
+ EchoResponse response;
+ EchoRequest request;
+ request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000);
+ request.set_message(kRequestMessage);
+ status_ = stub->Echo(&context_, request, &response);
+ });
+ }
+
+ void CancelRpc() {
+ context_.TryCancel();
+ sender_thread_.join();
+ }
+
+ private:
+ std::thread sender_thread_;
+ ClientContext context_;
+ Status status_;
+ };
+
+ constexpr size_t kMaxConcurrentRequests = 10;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ // Update CDS resource to set max concurrent request.
+ CircuitBreakers circuit_breaks;
+ Cluster cluster = balancers_[0]->ads_service()->default_cluster();
+ auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
+ threshold->set_priority(RoutingPriority::DEFAULT);
+ threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ // Send exactly max_concurrent_requests long RPCs.
+ TestRpc rpcs[kMaxConcurrentRequests];
+ for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
+ rpcs[i].StartRpc(stub_.get());
+ }
+ // Wait for all RPCs to be in flight.
+ while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
+ kMaxConcurrentRequests) {
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
+ }
+ // Sending a RPC now should not fail as circuit breaking is disabled.
+ Status status = SendRpc();
+ EXPECT_TRUE(status.ok());
+ for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
+ rpcs[i].CancelRpc();
+ }
+ // Make sure RPCs go to the correct backend:
+ EXPECT_EQ(kMaxConcurrentRequests + 1,
+ backends_[0]->backend_service()->request_count());
+}
+
+TEST_P(XdsResolverOnlyTest, MultipleChannelsShareXdsClient) {
+ const char* kNewServerName = "new-server.example.com";
+ Listener listener = balancers_[0]->ads_service()->default_listener();
+ listener.set_name(kNewServerName);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ WaitForAllBackends();
+ // Create second channel and tell it to connect to kNewServerName.
+ auto channel2 = CreateChannel(/*failover_timeout=*/0, kNewServerName);
+ channel2->GetState(/*try_to_connect=*/true);
+ ASSERT_TRUE(
+ channel2->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100)));
+ // Make sure there's only one client connected.
+ EXPECT_EQ(1UL, balancers_[0]->ads_service()->clients().size());
+}
+
+class XdsResolverLoadReportingOnlyTest : public XdsEnd2endTest {
+ public:
+ XdsResolverLoadReportingOnlyTest() : XdsEnd2endTest(4, 1, 3) {}
+};
+
+// Tests load reporting when switching over from one cluster to another.
+TEST_P(XdsResolverLoadReportingOnlyTest, ChangeClusters) {
+ const char* kNewClusterName = "new_cluster_name";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ balancers_[0]->lrs_service()->set_cluster_names(
+ {kDefaultClusterName, kNewClusterName});
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // cluster kDefaultClusterName -> locality0 -> backends 0 and 1
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ // cluster kNewClusterName -> locality1 -> backends 2 and 3
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality1", GetBackendPorts(2, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
+ // CDS resource for kNewClusterName.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Wait for all backends to come online.
+ int num_ok = 0;
+ int num_failure = 0;
+ int num_drops = 0;
+ std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(0, 2);
+ // The load report received at the balancer should be correct.
+ std::vector<ClientStats> load_report =
+ balancers_[0]->lrs_service()->WaitForLoadReport();
+ EXPECT_THAT(
+ load_report,
+ ::testing::ElementsAre(::testing::AllOf(
+ ::testing::Property(&ClientStats::cluster_name, kDefaultClusterName),
+ ::testing::Property(
+ &ClientStats::locality_stats,
+ ::testing::ElementsAre(::testing::Pair(
+ "locality0",
+ ::testing::AllOf(
+ ::testing::Field(&ClientStats::LocalityStats::
+ total_successful_requests,
+ num_ok),
+ ::testing::Field(&ClientStats::LocalityStats::
+ total_requests_in_progress,
+ 0UL),
+ ::testing::Field(
+ &ClientStats::LocalityStats::total_error_requests,
+ num_failure),
+ ::testing::Field(
+ &ClientStats::LocalityStats::total_issued_requests,
+ num_failure + num_ok))))),
+ ::testing::Property(&ClientStats::total_dropped_requests,
+ num_drops))));
+ // Change RDS resource to point to new cluster.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ new_route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->set_cluster(kNewClusterName);
+ Listener listener =
+ balancers_[0]->ads_service()->BuildListener(new_route_config);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ // Wait for all new backends to be used.
+ std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(2, 4);
+ // The load report received at the balancer should be correct.
+ load_report = balancers_[0]->lrs_service()->WaitForLoadReport();
+ EXPECT_THAT(
+ load_report,
+ ::testing::ElementsAre(
+ ::testing::AllOf(
+ ::testing::Property(&ClientStats::cluster_name,
+ kDefaultClusterName),
+ ::testing::Property(
+ &ClientStats::locality_stats,
+ ::testing::ElementsAre(::testing::Pair(
+ "locality0",
+ ::testing::AllOf(
+ ::testing::Field(&ClientStats::LocalityStats::
+ total_successful_requests,
+ ::testing::Lt(num_ok)),
+ ::testing::Field(&ClientStats::LocalityStats::
+ total_requests_in_progress,
+ 0UL),
+ ::testing::Field(
+ &ClientStats::LocalityStats::total_error_requests,
+ ::testing::Le(num_failure)),
+ ::testing::Field(
+ &ClientStats::LocalityStats::
+ total_issued_requests,
+ ::testing::Le(num_failure + num_ok)))))),
+ ::testing::Property(&ClientStats::total_dropped_requests,
+ num_drops)),
+ ::testing::AllOf(
+ ::testing::Property(&ClientStats::cluster_name, kNewClusterName),
+ ::testing::Property(
+ &ClientStats::locality_stats,
+ ::testing::ElementsAre(::testing::Pair(
+ "locality1",
+ ::testing::AllOf(
+ ::testing::Field(&ClientStats::LocalityStats::
+ total_successful_requests,
+ ::testing::Le(num_ok)),
+ ::testing::Field(&ClientStats::LocalityStats::
+ total_requests_in_progress,
+ 0UL),
+ ::testing::Field(
+ &ClientStats::LocalityStats::total_error_requests,
+ ::testing::Le(num_failure)),
+ ::testing::Field(
+ &ClientStats::LocalityStats::
+ total_issued_requests,
+ ::testing::Le(num_failure + num_ok)))))),
+ ::testing::Property(&ClientStats::total_dropped_requests,
+ num_drops))));
+ int total_ok = 0;
+ int total_failure = 0;
+ for (const ClientStats& client_stats : load_report) {
+ total_ok += client_stats.total_successful_requests();
+ total_failure += client_stats.total_error_requests();
+ }
+ EXPECT_EQ(total_ok, num_ok);
+ EXPECT_EQ(total_failure, num_failure);
+ // The LRS service got a single request, and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
+ EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
}
-TEST_P(BasicTest, IgnoresDuplicateUpdates) {
- const size_t kNumRpcsPerAddress = 100;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- // Wait for all backends to come online.
- WaitForAllBackends();
- // Send kNumRpcsPerAddress RPCs per server, but send an EDS update in
- // between. If the update is not ignored, this will cause the
- // round_robin policy to see an update, which will randomly reset its
- // position in the address list.
- for (size_t i = 0; i < kNumRpcsPerAddress; ++i) {
- CheckRpcSendOk(2);
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- CheckRpcSendOk(2);
- }
- // Each backend should have gotten the right number of requests.
- for (size_t i = 1; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress,
- backends_[i]->backend_service()->request_count());
- }
-}
-
-using XdsResolverOnlyTest = BasicTest;
-
-// Tests switching over from one cluster to another.
-TEST_P(XdsResolverOnlyTest, ChangeClusters) {
- const char* kNewClusterName = "new_cluster_name";
- const char* kNewEdsServiceName = "new_eds_service_name";
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- // We need to wait for all backends to come online.
- WaitForAllBackends(0, 2);
- // Populate new EDS resource.
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
- // Populate new CDS resource.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Change RDS resource to point to new cluster.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- new_route_config.mutable_virtual_hosts(0)
- ->mutable_routes(0)
- ->mutable_route()
- ->set_cluster(kNewClusterName);
- Listener listener =
- balancers_[0]->ads_service()->BuildListener(new_route_config);
- balancers_[0]->ads_service()->SetLdsResource(listener);
- // Wait for all new backends to be used.
- std::tuple<int, int, int> counts = WaitForAllBackends(2, 4);
- // Make sure no RPCs failed in the transition.
- EXPECT_EQ(0, std::get<1>(counts));
-}
-
-// Tests that we go into TRANSIENT_FAILURE if the Cluster disappears.
-TEST_P(XdsResolverOnlyTest, ClusterRemoved) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- // We need to wait for all backends to come online.
- WaitForAllBackends();
- // Unset CDS resource.
- balancers_[0]->ads_service()->UnsetResource(kCdsTypeUrl, kDefaultClusterName);
- // Wait for RPCs to start failing.
- do {
- } while (SendRpc(RpcOptions(), nullptr).ok());
- // Make sure RPCs are still failing.
- CheckRpcSendFailure(1000);
- // Make sure we ACK'ed the update.
- EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
- AdsServiceImpl::ResponseState::ACKED);
-}
-
-// Tests that we restart all xDS requests when we reestablish the ADS call.
-TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) {
- balancers_[0]->ads_service()->SetLdsToUseDynamicRds();
- const char* kNewClusterName = "new_cluster_name";
- const char* kNewEdsServiceName = "new_eds_service_name";
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- // We need to wait for all backends to come online.
- WaitForAllBackends(0, 2);
- // Now shut down and restart the balancer. When the client
- // reconnects, it should automatically restart the requests for all
- // resource types.
- balancers_[0]->Shutdown();
- balancers_[0]->Start();
- // Make sure things are still working.
- CheckRpcSendOk(100);
- // Populate new EDS resource.
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
- // Populate new CDS resource.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Change RDS resource to point to new cluster.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- new_route_config.mutable_virtual_hosts(0)
- ->mutable_routes(0)
- ->mutable_route()
- ->set_cluster(kNewClusterName);
- balancers_[0]->ads_service()->SetRdsResource(new_route_config);
- // Wait for all new backends to be used.
- std::tuple<int, int, int> counts = WaitForAllBackends(2, 4);
- // Make sure no RPCs failed in the transition.
- EXPECT_EQ(0, std::get<1>(counts));
-}
-
-TEST_P(XdsResolverOnlyTest, DefaultRouteSpecifiesSlashPrefix) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- route_config.mutable_virtual_hosts(0)
- ->mutable_routes(0)
- ->mutable_match()
- ->set_prefix("/");
- balancers_[0]->ads_service()->SetLdsResource(
- AdsServiceImpl::BuildListener(route_config));
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- // We need to wait for all backends to come online.
- WaitForAllBackends();
-}
-
-TEST_P(XdsResolverOnlyTest, CircuitBreaking) {
- class TestRpc {
- public:
- TestRpc() {}
-
- void StartRpc(grpc::testing::EchoTestService::Stub* stub) {
- sender_thread_ = std::thread([this, stub]() {
- EchoResponse response;
- EchoRequest request;
- request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000);
- request.set_message(kRequestMessage);
- status_ = stub->Echo(&context_, request, &response);
- });
- }
-
- void CancelRpc() {
- context_.TryCancel();
- sender_thread_.join();
- }
-
- private:
- std::thread sender_thread_;
- ClientContext context_;
- Status status_;
- };
-
- gpr_setenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING", "true");
- constexpr size_t kMaxConcurrentRequests = 10;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- // Update CDS resource to set max concurrent request.
- CircuitBreakers circuit_breaks;
- Cluster cluster = balancers_[0]->ads_service()->default_cluster();
- auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
- threshold->set_priority(RoutingPriority::DEFAULT);
- threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
- balancers_[0]->ads_service()->SetCdsResource(cluster);
- // Send exactly max_concurrent_requests long RPCs.
- TestRpc rpcs[kMaxConcurrentRequests];
- for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
- rpcs[i].StartRpc(stub_.get());
- }
- // Wait for all RPCs to be in flight.
- while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
- kMaxConcurrentRequests) {
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
- }
- // Sending a RPC now should fail, the error message should tell us
- // we hit the max concurrent requests limit and got dropped.
- Status status = SendRpc();
- EXPECT_FALSE(status.ok());
- EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
- // Cancel one RPC to allow another one through
- rpcs[0].CancelRpc();
- status = SendRpc();
- EXPECT_TRUE(status.ok());
- for (size_t i = 1; i < kMaxConcurrentRequests; ++i) {
- rpcs[i].CancelRpc();
- }
- // Make sure RPCs go to the correct backend:
- EXPECT_EQ(kMaxConcurrentRequests + 1,
- backends_[0]->backend_service()->request_count());
- gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING");
-}
-
-TEST_P(XdsResolverOnlyTest, CircuitBreakingDisabled) {
- class TestRpc {
- public:
- TestRpc() {}
-
- void StartRpc(grpc::testing::EchoTestService::Stub* stub) {
- sender_thread_ = std::thread([this, stub]() {
- EchoResponse response;
- EchoRequest request;
- request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000);
- request.set_message(kRequestMessage);
- status_ = stub->Echo(&context_, request, &response);
- });
- }
-
- void CancelRpc() {
- context_.TryCancel();
- sender_thread_.join();
- }
-
- private:
- std::thread sender_thread_;
- ClientContext context_;
- Status status_;
- };
-
- constexpr size_t kMaxConcurrentRequests = 10;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- // Update CDS resource to set max concurrent request.
- CircuitBreakers circuit_breaks;
- Cluster cluster = balancers_[0]->ads_service()->default_cluster();
- auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
- threshold->set_priority(RoutingPriority::DEFAULT);
- threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
- balancers_[0]->ads_service()->SetCdsResource(cluster);
- // Send exactly max_concurrent_requests long RPCs.
- TestRpc rpcs[kMaxConcurrentRequests];
- for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
- rpcs[i].StartRpc(stub_.get());
- }
- // Wait for all RPCs to be in flight.
- while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
- kMaxConcurrentRequests) {
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
- }
- // Sending a RPC now should not fail as circuit breaking is disabled.
- Status status = SendRpc();
- EXPECT_TRUE(status.ok());
- for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
- rpcs[i].CancelRpc();
- }
- // Make sure RPCs go to the correct backend:
- EXPECT_EQ(kMaxConcurrentRequests + 1,
- backends_[0]->backend_service()->request_count());
-}
-
-TEST_P(XdsResolverOnlyTest, MultipleChannelsShareXdsClient) {
- const char* kNewServerName = "new-server.example.com";
- Listener listener = balancers_[0]->ads_service()->default_listener();
- listener.set_name(kNewServerName);
- balancers_[0]->ads_service()->SetLdsResource(listener);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- WaitForAllBackends();
- // Create second channel and tell it to connect to kNewServerName.
- auto channel2 = CreateChannel(/*failover_timeout=*/0, kNewServerName);
- channel2->GetState(/*try_to_connect=*/true);
- ASSERT_TRUE(
- channel2->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100)));
- // Make sure there's only one client connected.
- EXPECT_EQ(1UL, balancers_[0]->ads_service()->clients().size());
-}
-
-class XdsResolverLoadReportingOnlyTest : public XdsEnd2endTest {
- public:
- XdsResolverLoadReportingOnlyTest() : XdsEnd2endTest(4, 1, 3) {}
-};
-
-// Tests load reporting when switching over from one cluster to another.
-TEST_P(XdsResolverLoadReportingOnlyTest, ChangeClusters) {
- const char* kNewClusterName = "new_cluster_name";
- const char* kNewEdsServiceName = "new_eds_service_name";
- balancers_[0]->lrs_service()->set_cluster_names(
- {kDefaultClusterName, kNewClusterName});
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // cluster kDefaultClusterName -> locality0 -> backends 0 and 1
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- // cluster kNewClusterName -> locality1 -> backends 2 and 3
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality1", GetBackendPorts(2, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName));
- // CDS resource for kNewClusterName.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Wait for all backends to come online.
- int num_ok = 0;
- int num_failure = 0;
- int num_drops = 0;
- std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(0, 2);
- // The load report received at the balancer should be correct.
- std::vector<ClientStats> load_report =
- balancers_[0]->lrs_service()->WaitForLoadReport();
- EXPECT_THAT(
- load_report,
- ::testing::ElementsAre(::testing::AllOf(
- ::testing::Property(&ClientStats::cluster_name, kDefaultClusterName),
- ::testing::Property(
- &ClientStats::locality_stats,
- ::testing::ElementsAre(::testing::Pair(
- "locality0",
- ::testing::AllOf(
- ::testing::Field(&ClientStats::LocalityStats::
- total_successful_requests,
- num_ok),
- ::testing::Field(&ClientStats::LocalityStats::
- total_requests_in_progress,
- 0UL),
- ::testing::Field(
- &ClientStats::LocalityStats::total_error_requests,
- num_failure),
- ::testing::Field(
- &ClientStats::LocalityStats::total_issued_requests,
- num_failure + num_ok))))),
- ::testing::Property(&ClientStats::total_dropped_requests,
- num_drops))));
- // Change RDS resource to point to new cluster.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- new_route_config.mutable_virtual_hosts(0)
- ->mutable_routes(0)
- ->mutable_route()
- ->set_cluster(kNewClusterName);
- Listener listener =
- balancers_[0]->ads_service()->BuildListener(new_route_config);
- balancers_[0]->ads_service()->SetLdsResource(listener);
- // Wait for all new backends to be used.
- std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(2, 4);
- // The load report received at the balancer should be correct.
- load_report = balancers_[0]->lrs_service()->WaitForLoadReport();
- EXPECT_THAT(
- load_report,
- ::testing::ElementsAre(
- ::testing::AllOf(
- ::testing::Property(&ClientStats::cluster_name,
- kDefaultClusterName),
- ::testing::Property(
- &ClientStats::locality_stats,
- ::testing::ElementsAre(::testing::Pair(
- "locality0",
- ::testing::AllOf(
- ::testing::Field(&ClientStats::LocalityStats::
- total_successful_requests,
- ::testing::Lt(num_ok)),
- ::testing::Field(&ClientStats::LocalityStats::
- total_requests_in_progress,
- 0UL),
- ::testing::Field(
- &ClientStats::LocalityStats::total_error_requests,
- ::testing::Le(num_failure)),
- ::testing::Field(
- &ClientStats::LocalityStats::
- total_issued_requests,
- ::testing::Le(num_failure + num_ok)))))),
- ::testing::Property(&ClientStats::total_dropped_requests,
- num_drops)),
- ::testing::AllOf(
- ::testing::Property(&ClientStats::cluster_name, kNewClusterName),
- ::testing::Property(
- &ClientStats::locality_stats,
- ::testing::ElementsAre(::testing::Pair(
- "locality1",
- ::testing::AllOf(
- ::testing::Field(&ClientStats::LocalityStats::
- total_successful_requests,
- ::testing::Le(num_ok)),
- ::testing::Field(&ClientStats::LocalityStats::
- total_requests_in_progress,
- 0UL),
- ::testing::Field(
- &ClientStats::LocalityStats::total_error_requests,
- ::testing::Le(num_failure)),
- ::testing::Field(
- &ClientStats::LocalityStats::
- total_issued_requests,
- ::testing::Le(num_failure + num_ok)))))),
- ::testing::Property(&ClientStats::total_dropped_requests,
- num_drops))));
- int total_ok = 0;
- int total_failure = 0;
- for (const ClientStats& client_stats : load_report) {
- total_ok += client_stats.total_successful_requests();
- total_failure += client_stats.total_error_requests();
- }
- EXPECT_EQ(total_ok, num_ok);
- EXPECT_EQ(total_failure, num_failure);
- // The LRS service got a single request, and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
- EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
-}
-
using SecureNamingTest = BasicTest;
// Tests that secure naming check passes if target name is expected.
TEST_P(SecureNamingTest, TargetNameIsExpected) {
SetNextResolution({});
- SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr, "xds_server");
- AdsServiceImpl::EdsResourceArgs args({
+ SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr, "xds_server");
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- CheckRpcSendOk();
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ CheckRpcSendOk();
}
// Tests that secure naming check fails if target name is unexpected.
TEST_P(SecureNamingTest, TargetNameIsUnexpected) {
::testing::FLAGS_gtest_death_test_style = "threadsafe";
- SetNextResolution({});
- SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr,
- "incorrect_server_name");
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ SetNextResolution({});
+ SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr,
+ "incorrect_server_name");
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that we blow up (via abort() from the security connector) when
// the name from the balancer doesn't match expectations.
- ASSERT_DEATH_IF_SUPPORTED({ CheckRpcSendOk(); }, "");
+ ASSERT_DEATH_IF_SUPPORTED({ CheckRpcSendOk(); }, "");
+}
+
+using LdsTest = BasicTest;
+
+// Tests that LDS client should send a NACK if there is no API listener in the
+// Listener in the LDS response.
+TEST_P(LdsTest, NoApiListener) {
+ auto listener = balancers_[0]->ads_service()->default_listener();
+ listener.clear_api_listener();
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "Listener has no ApiListener.");
+}
+
+// Tests that LDS client should send a NACK if the route_specifier in the
+// http_connection_manager is neither inlined route_config nor RDS.
+TEST_P(LdsTest, WrongRouteSpecifier) {
+ auto listener = balancers_[0]->ads_service()->default_listener();
+ HttpConnectionManager http_connection_manager;
+ http_connection_manager.mutable_scoped_routes();
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "HttpConnectionManager neither has inlined route_config nor RDS.");
+}
+
+// Tests that LDS client should send a NACK if the rds message in the
+// http_connection_manager is missing the config_source field.
+TEST_P(LdsTest, RdsMissingConfigSource) {
+ auto listener = balancers_[0]->ads_service()->default_listener();
+ HttpConnectionManager http_connection_manager;
+ http_connection_manager.mutable_rds()->set_route_config_name(
+ kDefaultRouteConfigurationName);
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "HttpConnectionManager missing config_source for RDS.");
+}
+
+// Tests that LDS client should send a NACK if the rds message in the
+// http_connection_manager has a config_source field that does not specify ADS.
+TEST_P(LdsTest, RdsConfigSourceDoesNotSpecifyAds) {
+ auto listener = balancers_[0]->ads_service()->default_listener();
+ HttpConnectionManager http_connection_manager;
+ auto* rds = http_connection_manager.mutable_rds();
+ rds->set_route_config_name(kDefaultRouteConfigurationName);
+ rds->mutable_config_source()->mutable_self();
+ listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
+ http_connection_manager);
+ balancers_[0]->ads_service()->SetLdsResource(listener);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->lds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "HttpConnectionManager ConfigSource for RDS does not specify ADS.");
+}
+
+using LdsRdsTest = BasicTest;
+
+// Tests that LDS client should send an ACK upon correct LDS response (with
+// inlined RDS result).
+TEST_P(LdsRdsTest, Vanilla) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ (void)SendRpc();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+ // Make sure we actually used the RPC service for the right version of xDS.
+ EXPECT_EQ(balancers_[0]->ads_service()->seen_v2_client(),
+ GetParam().use_v2());
+ EXPECT_NE(balancers_[0]->ads_service()->seen_v3_client(),
+ GetParam().use_v2());
+}
+
+// Tests that we go into TRANSIENT_FAILURE if the Listener is removed.
+TEST_P(LdsRdsTest, ListenerRemoved) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ // We need to wait for all backends to come online.
+ WaitForAllBackends();
+ // Unset LDS resource.
+ balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl, kServerName);
+ // Wait for RPCs to start failing.
+ do {
+ } while (SendRpc(RpcOptions(), nullptr).ok());
+ // Make sure RPCs are still failing.
+ CheckRpcSendFailure(1000);
+ // Make sure we ACK'ed the update.
+ EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Tests that LDS client ACKs but fails if matching domain can't be found in
+// the LDS response.
+TEST_P(LdsRdsTest, NoMatchedDomain) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ route_config.mutable_virtual_hosts(0)->clear_domains();
+ route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ // Do a bit of polling, to allow the ACK to get to the ADS server.
+ channel_->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100));
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Tests that LDS client should choose the virtual host with matching domain if
+// multiple virtual hosts exist in the LDS response.
+TEST_P(LdsRdsTest, ChooseMatchedDomain) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ *(route_config.add_virtual_hosts()) = route_config.virtual_hosts(0);
+ route_config.mutable_virtual_hosts(0)->clear_domains();
+ route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ (void)SendRpc();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Tests that LDS client should choose the last route in the virtual host if
+// multiple routes exist in the LDS response.
+TEST_P(LdsRdsTest, ChooseLastRoute) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ *(route_config.mutable_virtual_hosts(0)->add_routes()) =
+ route_config.virtual_hosts(0).routes(0);
+ route_config.mutable_virtual_hosts(0)
+ ->mutable_routes(0)
+ ->mutable_route()
+ ->mutable_cluster_header();
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ (void)SendRpc();
+ EXPECT_EQ(RouteConfigurationResponseState(0).state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Tests that LDS client should send a NACK if route match has a case_sensitive
+// set to false.
+TEST_P(LdsRdsTest, RouteMatchHasCaseSensitiveFalse) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->mutable_case_sensitive()->set_value(false);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "case_sensitive if set must be set to true.");
+}
+
+// Tests that LDS client should ignore route which has query_parameters.
+TEST_P(LdsRdsTest, RouteMatchHasQueryParameters) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ route1->mutable_match()->add_query_parameters();
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should send a ACK if route match has a prefix
+// that is either empty or a single slash
+TEST_P(LdsRdsTest, RouteMatchHasValidPrefixEmptyOrSingleSlash) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("");
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("/");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ (void)SendRpc();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Tests that LDS client should ignore route which has a path
+// prefix string does not start with "/".
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixNoLeadingSlash) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service/");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should ignore route which has a prefix
+// string with more than 2 slashes.
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixExtraContent) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/Echo1/");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should ignore route which has a prefix
+// string "//".
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixDoubleSlash) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("//");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should ignore route which has path
+// but it's empty.
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPathEmptyPath) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should ignore route which has path
+// string does not start with "/".
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPathNoLeadingSlash) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("grpc.testing.EchoTest1Service/Echo1");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should ignore route which has path
+// string that has too many slashes; for example, ends with "/".
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPathTooManySlashes) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1/");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should ignore route which has path
+// string that has only 1 slash: missing "/" between service and method.
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPathOnlyOneSlash) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service.Echo1");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should ignore route which has path
+// string that is missing service.
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingService) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("//Echo1");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Tests that LDS client should ignore route which has path
+// string that is missing method.
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingMethod) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/");
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No valid routes specified.");
+}
+
+// Test that LDS client should reject route which has invalid path regex.
+TEST_P(LdsRdsTest, RouteMatchHasInvalidPathRegex) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->mutable_safe_regex()->set_regex("a[z-a]");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "Invalid regex string specified in path matcher.");
+}
+
+// Tests that LDS client should send a NACK if route has an action other than
+// RouteAction in the LDS response.
+TEST_P(LdsRdsTest, RouteHasNoRouteAction) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ route_config.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_redirect();
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "No RouteAction found in route.");
+}
+
+TEST_P(LdsRdsTest, RouteActionClusterHasEmptyClusterName) {
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ route1->mutable_route()->set_cluster("");
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "RouteAction cluster contains empty cluster name.");
+}
+
+TEST_P(LdsRdsTest, RouteActionWeightedTargetHasIncorrectTotalWeightSet) {
+ const size_t kWeight75 = 75;
+ const char* kNewCluster1Name = "new_cluster_1";
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* weighted_cluster1 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster1->set_name(kNewCluster1Name);
+ weighted_cluster1->mutable_weight()->set_value(kWeight75);
+ route1->mutable_route()
+ ->mutable_weighted_clusters()
+ ->mutable_total_weight()
+ ->set_value(kWeight75 + 1);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "RouteAction weighted_cluster has incorrect total weight");
+}
+
+TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasEmptyClusterName) {
+ const size_t kWeight75 = 75;
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* weighted_cluster1 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster1->set_name("");
+ weighted_cluster1->mutable_weight()->set_value(kWeight75);
+ route1->mutable_route()
+ ->mutable_weighted_clusters()
+ ->mutable_total_weight()
+ ->set_value(kWeight75);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(
+ response_state.error_message,
+ "RouteAction weighted_cluster cluster contains empty cluster name.");
+}
+
+TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasNoWeight) {
+ const size_t kWeight75 = 75;
+ const char* kNewCluster1Name = "new_cluster_1";
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* weighted_cluster1 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster1->set_name(kNewCluster1Name);
+ route1->mutable_route()
+ ->mutable_weighted_clusters()
+ ->mutable_total_weight()
+ ->set_value(kWeight75);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "RouteAction weighted_cluster cluster missing weight");
+}
+
+TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRegex) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* header_matcher1 = route1->mutable_match()->add_headers();
+ header_matcher1->set_name("header1");
+ header_matcher1->mutable_safe_regex_match()->set_regex("a[z-a]");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "Invalid regex string specified in header matcher.");
+}
+
+TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRange) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* header_matcher1 = route1->mutable_match()->add_headers();
+ header_matcher1->set_name("header1");
+ header_matcher1->mutable_range_match()->set_start(1001);
+ header_matcher1->mutable_range_match()->set_end(1000);
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ SetRouteConfiguration(0, route_config);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "Invalid range header matcher specifier specified: end "
+ "cannot be smaller than start.");
+}
+
+// Tests that LDS client should choose the default route (with no matching
+// specified) after unable to find a match with previous routes.
+TEST_P(LdsRdsTest, XdsRoutingPathMatching) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEcho1Rpcs = 10;
+ const size_t kNumEcho2Rpcs = 20;
+ const size_t kNumEchoRpcs = 30;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(3, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2");
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ auto* route3 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route3->mutable_match()->set_path("/grpc.testing.EchoTest3Service/Echo3");
+ route3->mutable_route()->set_cluster(kDefaultClusterName);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(0, 2);
+ CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
+ .set_rpc_service(SERVICE_ECHO1)
+ .set_rpc_method(METHOD_ECHO1)
+ .set_wait_for_ready(true));
+ CheckRpcSendOk(kNumEcho2Rpcs, RpcOptions()
+ .set_rpc_service(SERVICE_ECHO2)
+ .set_rpc_method(METHOD_ECHO2)
+ .set_wait_for_ready(true));
+ // Make sure RPCs all go to the correct backend.
+ for (size_t i = 0; i < 2; ++i) {
+ EXPECT_EQ(kNumEchoRpcs / 2,
+ backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
+ }
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
+ EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
+}
+
+TEST_P(LdsRdsTest, XdsRoutingPrefixMatching) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEcho1Rpcs = 10;
+ const size_t kNumEcho2Rpcs = 20;
+ const size_t kNumEchoRpcs = 30;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(3, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/");
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(0, 2);
+ CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
+ CheckRpcSendOk(
+ kNumEcho1Rpcs,
+ RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true));
+ CheckRpcSendOk(
+ kNumEcho2Rpcs,
+ RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true));
+ // Make sure RPCs all go to the correct backend.
+ for (size_t i = 0; i < 2; ++i) {
+ EXPECT_EQ(kNumEchoRpcs / 2,
+ backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
+ }
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
+ EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
+}
+
+TEST_P(LdsRdsTest, XdsRoutingPathRegexMatching) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEcho1Rpcs = 10;
+ const size_t kNumEcho2Rpcs = 20;
+ const size_t kNumEchoRpcs = 30;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(3, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ // Will match "/grpc.testing.EchoTest1Service/"
+ route1->mutable_match()->mutable_safe_regex()->set_regex(".*1.*");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ // Will match "/grpc.testing.EchoTest2Service/"
+ route2->mutable_match()->mutable_safe_regex()->set_regex(".*2.*");
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(0, 2);
+ CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
+ CheckRpcSendOk(
+ kNumEcho1Rpcs,
+ RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true));
+ CheckRpcSendOk(
+ kNumEcho2Rpcs,
+ RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true));
+ // Make sure RPCs all go to the correct backend.
+ for (size_t i = 0; i < 2; ++i) {
+ EXPECT_EQ(kNumEchoRpcs / 2,
+ backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
+ }
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
+ EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
+}
+
+TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEcho1Rpcs = 1000;
+ const size_t kNumEchoRpcs = 10;
+ const size_t kWeight75 = 75;
+ const size_t kWeight25 = 25;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* weighted_cluster1 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster1->set_name(kNewCluster1Name);
+ weighted_cluster1->mutable_weight()->set_value(kWeight75);
+ auto* weighted_cluster2 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster2->set_name(kNewCluster2Name);
+ weighted_cluster2->mutable_weight()->set_value(kWeight25);
+ route1->mutable_route()
+ ->mutable_weighted_clusters()
+ ->mutable_total_weight()
+ ->set_value(kWeight75 + kWeight25);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(0, 1);
+ WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ CheckRpcSendOk(kNumEchoRpcs);
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ const int weight_75_request_count =
+ backends_[1]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ const int weight_25_request_count =
+ backends_[2]->backend_service1()->request_count();
+ const double kErrorTolerance = 0.2;
+ EXPECT_THAT(weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
+ (1 + kErrorTolerance))));
+ // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ // test from flaking while debugging potential root cause.
+ const double kErrorToleranceSmallLoad = 0.3;
+ gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
+ weight_75_request_count, weight_25_request_count);
+ EXPECT_THAT(weight_25_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
+ (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
+ (1 + kErrorToleranceSmallLoad))));
+}
+
+TEST_P(LdsRdsTest, RouteActionWeightedTargetDefaultRoute) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEchoRpcs = 1000;
+ const size_t kWeight75 = 75;
+ const size_t kWeight25 = 25;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("");
+ auto* weighted_cluster1 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster1->set_name(kNewCluster1Name);
+ weighted_cluster1->mutable_weight()->set_value(kWeight75);
+ auto* weighted_cluster2 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster2->set_name(kNewCluster2Name);
+ weighted_cluster2->mutable_weight()->set_value(kWeight25);
+ route1->mutable_route()
+ ->mutable_weighted_clusters()
+ ->mutable_total_weight()
+ ->set_value(kWeight75 + kWeight25);
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(1, 3);
+ CheckRpcSendOk(kNumEchoRpcs);
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(0, backends_[0]->backend_service()->request_count());
+ const int weight_75_request_count =
+ backends_[1]->backend_service()->request_count();
+ const int weight_25_request_count =
+ backends_[2]->backend_service()->request_count();
+ const double kErrorTolerance = 0.2;
+ EXPECT_THAT(weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight75 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEchoRpcs * kWeight75 / 100 *
+ (1 + kErrorTolerance))));
+ // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ // test from flaking while debugging potential root cause.
+ const double kErrorToleranceSmallLoad = 0.3;
+ gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
+ weight_75_request_count, weight_25_request_count);
+ EXPECT_THAT(weight_25_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight25 / 100 *
+ (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(kNumEchoRpcs * kWeight25 / 100 *
+ (1 + kErrorToleranceSmallLoad))));
+}
+
+TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const char* kNewCluster3Name = "new_cluster_3";
+ const char* kNewEdsService3Name = "new_eds_service_name_3";
+ const size_t kNumEcho1Rpcs = 1000;
+ const size_t kNumEchoRpcs = 10;
+ const size_t kWeight75 = 75;
+ const size_t kWeight25 = 25;
+ const size_t kWeight50 = 50;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ AdsServiceImpl::EdsResourceArgs args3({
+ {"locality0", GetBackendPorts(3, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster3.set_name(kNewCluster3Name);
+ new_cluster3.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService3Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
+ // Populating Route Configurations.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* weighted_cluster1 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster1->set_name(kNewCluster1Name);
+ weighted_cluster1->mutable_weight()->set_value(kWeight75);
+ auto* weighted_cluster2 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster2->set_name(kNewCluster2Name);
+ weighted_cluster2->mutable_weight()->set_value(kWeight25);
+ route1->mutable_route()
+ ->mutable_weighted_clusters()
+ ->mutable_total_weight()
+ ->set_value(kWeight75 + kWeight25);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(0, 1);
+ WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ CheckRpcSendOk(kNumEchoRpcs);
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ const int weight_75_request_count =
+ backends_[1]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ const int weight_25_request_count =
+ backends_[2]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
+ const double kErrorTolerance = 0.2;
+ EXPECT_THAT(weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
+ (1 + kErrorTolerance))));
+ // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ // test from flaking while debugging potential root cause.
+ const double kErrorToleranceSmallLoad = 0.3;
+ gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
+ weight_75_request_count, weight_25_request_count);
+ EXPECT_THAT(weight_25_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
+ (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
+ (1 + kErrorToleranceSmallLoad))));
+ // Change Route Configurations: same clusters different weights.
+ weighted_cluster1->mutable_weight()->set_value(kWeight50);
+ weighted_cluster2->mutable_weight()->set_value(kWeight50);
+ // Change default route to a new cluster to help to identify when new polices
+ // are seen by the client.
+ default_route->mutable_route()->set_cluster(kNewCluster3Name);
+ SetRouteConfiguration(0, new_route_config);
+ ResetBackendCounters();
+ WaitForAllBackends(3, 4);
+ CheckRpcSendOk(kNumEchoRpcs);
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(0, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ const int weight_50_request_count_1 =
+ backends_[1]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ const int weight_50_request_count_2 =
+ backends_[2]->backend_service1()->request_count();
+ EXPECT_EQ(kNumEchoRpcs, backends_[3]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
+ EXPECT_THAT(weight_50_request_count_1,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
+ (1 + kErrorTolerance))));
+ EXPECT_THAT(weight_50_request_count_2,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
+ (1 + kErrorTolerance))));
+}
+
+TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const char* kNewCluster3Name = "new_cluster_3";
+ const char* kNewEdsService3Name = "new_eds_service_name_3";
+ const size_t kNumEcho1Rpcs = 1000;
+ const size_t kNumEchoRpcs = 10;
+ const size_t kWeight75 = 75;
+ const size_t kWeight25 = 25;
+ const size_t kWeight50 = 50;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ AdsServiceImpl::EdsResourceArgs args3({
+ {"locality0", GetBackendPorts(3, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster3.set_name(kNewCluster3Name);
+ new_cluster3.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService3Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
+ // Populating Route Configurations.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* weighted_cluster1 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster1->set_name(kNewCluster1Name);
+ weighted_cluster1->mutable_weight()->set_value(kWeight75);
+ auto* weighted_cluster2 =
+ route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
+ weighted_cluster2->set_name(kDefaultClusterName);
+ weighted_cluster2->mutable_weight()->set_value(kWeight25);
+ route1->mutable_route()
+ ->mutable_weighted_clusters()
+ ->mutable_total_weight()
+ ->set_value(kWeight75 + kWeight25);
+ auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(0, 1);
+ WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ CheckRpcSendOk(kNumEchoRpcs);
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ int weight_25_request_count =
+ backends_[0]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ int weight_75_request_count =
+ backends_[1]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
+ const double kErrorTolerance = 0.2;
+ EXPECT_THAT(weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
+ (1 + kErrorTolerance))));
+ // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ // test from flaking while debugging potential root cause.
+ const double kErrorToleranceSmallLoad = 0.3;
+ gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
+ weight_75_request_count, weight_25_request_count);
+ EXPECT_THAT(weight_25_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
+ (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
+ (1 + kErrorToleranceSmallLoad))));
+ // Change Route Configurations: new set of clusters with different weights.
+ weighted_cluster1->mutable_weight()->set_value(kWeight50);
+ weighted_cluster2->set_name(kNewCluster2Name);
+ weighted_cluster2->mutable_weight()->set_value(kWeight50);
+ SetRouteConfiguration(0, new_route_config);
+ ResetBackendCounters();
+ WaitForAllBackends(2, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ CheckRpcSendOk(kNumEchoRpcs);
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ const int weight_50_request_count_1 =
+ backends_[1]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ const int weight_50_request_count_2 =
+ backends_[2]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
+ EXPECT_THAT(weight_50_request_count_1,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
+ (1 + kErrorTolerance))));
+ EXPECT_THAT(weight_50_request_count_2,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
+ (1 + kErrorTolerance))));
+ // Change Route Configurations.
+ weighted_cluster1->mutable_weight()->set_value(kWeight75);
+ weighted_cluster2->set_name(kNewCluster3Name);
+ weighted_cluster2->mutable_weight()->set_value(kWeight25);
+ SetRouteConfiguration(0, new_route_config);
+ ResetBackendCounters();
+ WaitForAllBackends(3, 4, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ CheckRpcSendOk(kNumEchoRpcs);
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ weight_75_request_count = backends_[1]->backend_service1()->request_count();
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
+ weight_25_request_count = backends_[3]->backend_service1()->request_count();
+ EXPECT_THAT(weight_75_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
+ (1 - kErrorTolerance)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
+ (1 + kErrorTolerance))));
+ // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
+ // test from flaking while debugging potential root cause.
+ gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
+ weight_75_request_count, weight_25_request_count);
+ EXPECT_THAT(weight_25_request_count,
+ ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
+ (1 - kErrorToleranceSmallLoad)),
+ ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
+ (1 + kErrorToleranceSmallLoad))));
+}
+
+TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClusters) {
+ const char* kNewClusterName = "new_cluster";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ const size_t kNumEchoRpcs = 5;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ // Populate new CDS resources.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Send Route Configuration.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(0, 1);
+ CheckRpcSendOk(kNumEchoRpcs);
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ // Change Route Configurations: new default cluster.
+ auto* default_route =
+ new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ default_route->mutable_route()->set_cluster(kNewClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ WaitForAllBackends(1, 2);
+ CheckRpcSendOk(kNumEchoRpcs);
+ // Make sure RPCs all go to the correct backend.
+ EXPECT_EQ(kNumEchoRpcs, backends_[1]->backend_service()->request_count());
+}
+
+TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClustersWithPickingDelays) {
+ const char* kNewClusterName = "new_cluster";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ // Populate new CDS resources.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Bring down the current backend: 0, this will delay route picking time,
+ // resulting in un-committed RPCs.
+ ShutdownBackend(0);
+ // Send a RouteConfiguration with a default route that points to
+ // backend 0.
+ RouteConfiguration new_route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ SetRouteConfiguration(0, new_route_config);
+ // Send exactly one RPC with no deadline and with wait_for_ready=true.
+ // This RPC will not complete until after backend 0 is started.
+ std::thread sending_rpc([this]() {
+ CheckRpcSendOk(1, RpcOptions().set_wait_for_ready(true).set_timeout_ms(0));
+ });
+ // Send a non-wait_for_ready RPC which should fail, this will tell us
+ // that the client has received the update and attempted to connect.
+ const Status status = SendRpc(RpcOptions().set_timeout_ms(0));
+ EXPECT_FALSE(status.ok());
+ // Send a update RouteConfiguration to use backend 1.
+ auto* default_route =
+ new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ default_route->mutable_route()->set_cluster(kNewClusterName);
+ SetRouteConfiguration(0, new_route_config);
+ // Wait for RPCs to go to the new backend: 1, this ensures that the client has
+ // processed the update.
+ WaitForAllBackends(1, 2, false, RpcOptions(), true);
+ // Bring up the previous backend: 0, this will allow the delayed RPC to
+ // finally call on_call_committed upon completion.
+ StartBackend(0);
+ sending_rpc.join();
+ // Make sure RPCs go to the correct backend:
+ EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(1, backends_[1]->backend_service()->request_count());
+}
+
+TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) {
+ const char* kNewClusterName = "new_cluster";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ const size_t kNumEcho1Rpcs = 100;
+ const size_t kNumEchoRpcs = 5;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ // Populate new CDS resources.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* header_matcher1 = route1->mutable_match()->add_headers();
+ header_matcher1->set_name("header1");
+ header_matcher1->set_exact_match("POST,PUT,GET");
+ auto* header_matcher2 = route1->mutable_match()->add_headers();
+ header_matcher2->set_name("header2");
+ header_matcher2->mutable_safe_regex_match()->set_regex("[a-z]*");
+ auto* header_matcher3 = route1->mutable_match()->add_headers();
+ header_matcher3->set_name("header3");
+ header_matcher3->mutable_range_match()->set_start(1);
+ header_matcher3->mutable_range_match()->set_end(1000);
+ auto* header_matcher4 = route1->mutable_match()->add_headers();
+ header_matcher4->set_name("header4");
+ header_matcher4->set_present_match(false);
+ auto* header_matcher5 = route1->mutable_match()->add_headers();
+ header_matcher5->set_name("header5");
+ header_matcher5->set_prefix_match("/grpc");
+ auto* header_matcher6 = route1->mutable_match()->add_headers();
+ header_matcher6->set_name("header6");
+ header_matcher6->set_suffix_match(".cc");
+ header_matcher6->set_invert_match(true);
+ route1->mutable_route()->set_cluster(kNewClusterName);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ std::vector<std::pair<TString, TString>> metadata = {
+ {"header1", "POST"}, {"header2", "blah"},
+ {"header3", "1"}, {"header5", "/grpc.testing.EchoTest1Service/"},
+ {"header1", "PUT"}, {"header6", "grpc.java"},
+ {"header1", "GET"},
+ };
+ const auto header_match_rpc_options = RpcOptions()
+ .set_rpc_service(SERVICE_ECHO1)
+ .set_rpc_method(METHOD_ECHO1)
+ .set_metadata(std::move(metadata));
+ // Make sure all backends are up.
+ WaitForAllBackends(0, 1);
+ WaitForAllBackends(1, 2, true, header_match_rpc_options);
+ // Send RPCs.
+ CheckRpcSendOk(kNumEchoRpcs);
+ CheckRpcSendOk(kNumEcho1Rpcs, header_match_rpc_options);
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ EXPECT_EQ(kNumEcho1Rpcs, backends_[1]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialHeaderContentType) {
+ const char* kNewClusterName = "new_cluster";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ const size_t kNumEchoRpcs = 100;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ // Populate new CDS resources.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("");
+ auto* header_matcher1 = route1->mutable_match()->add_headers();
+ header_matcher1->set_name("content-type");
+ header_matcher1->set_exact_match("notapplication/grpc");
+ route1->mutable_route()->set_cluster(kNewClusterName);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ auto* header_matcher2 = default_route->mutable_match()->add_headers();
+ header_matcher2->set_name("content-type");
+ header_matcher2->set_exact_match("application/grpc");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ // Make sure the backend is up.
+ WaitForAllBackends(0, 1);
+ // Send RPCs.
+ CheckRpcSendOk(kNumEchoRpcs);
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const size_t kNumEchoRpcs = 100;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("");
+ auto* header_matcher1 = route1->mutable_match()->add_headers();
+ header_matcher1->set_name("grpc-foo-bin");
+ header_matcher1->set_present_match(true);
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto route2 = route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->set_prefix("");
+ auto* header_matcher2 = route2->mutable_match()->add_headers();
+ header_matcher2->set_name("grpc-previous-rpc-attempts");
+ header_matcher2->set_present_match(true);
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ // Send headers which will mismatch each route
+ std::vector<std::pair<TString, TString>> metadata = {
+ {"grpc-foo-bin", "grpc-foo-bin"},
+ {"grpc-previous-rpc-attempts", "grpc-previous-rpc-attempts"},
+ };
+ WaitForAllBackends(0, 1);
+ CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata));
+ // Verify that only the default backend got RPCs since all previous routes
+ // were mismatched.
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+TEST_P(LdsRdsTest, XdsRoutingRuntimeFractionMatching) {
+ const char* kNewClusterName = "new_cluster";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ const size_t kNumRpcs = 1000;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ // Populate new CDS resources.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()
+ ->mutable_runtime_fraction()
+ ->mutable_default_value()
+ ->set_numerator(25);
+ route1->mutable_route()->set_cluster(kNewClusterName);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ WaitForAllBackends(0, 2);
+ CheckRpcSendOk(kNumRpcs);
+ const int default_backend_count =
+ backends_[0]->backend_service()->request_count();
+ const int matched_backend_count =
+ backends_[1]->backend_service()->request_count();
+ const double kErrorTolerance = 0.2;
+ EXPECT_THAT(default_backend_count,
+ ::testing::AllOf(
+ ::testing::Ge(kNumRpcs * 75 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(kNumRpcs * 75 / 100 * (1 + kErrorTolerance))));
+ EXPECT_THAT(matched_backend_count,
+ ::testing::AllOf(
+ ::testing::Ge(kNumRpcs * 25 / 100 * (1 - kErrorTolerance)),
+ ::testing::Le(kNumRpcs * 25 / 100 * (1 + kErrorTolerance))));
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingUnmatchCases) {
+ const char* kNewCluster1Name = "new_cluster_1";
+ const char* kNewEdsService1Name = "new_eds_service_name_1";
+ const char* kNewCluster2Name = "new_cluster_2";
+ const char* kNewEdsService2Name = "new_eds_service_name_2";
+ const char* kNewCluster3Name = "new_cluster_3";
+ const char* kNewEdsService3Name = "new_eds_service_name_3";
+ const size_t kNumEcho1Rpcs = 100;
+ const size_t kNumEchoRpcs = 5;
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ AdsServiceImpl::EdsResourceArgs args2({
+ {"locality0", GetBackendPorts(2, 3)},
+ });
+ AdsServiceImpl::EdsResourceArgs args3({
+ {"locality0", GetBackendPorts(3, 4)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
+ // Populate new CDS resources.
+ Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster1.set_name(kNewCluster1Name);
+ new_cluster1.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService1Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
+ Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster2.set_name(kNewCluster2Name);
+ new_cluster2.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService2Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
+ Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
+ new_cluster3.set_name(kNewCluster3Name);
+ new_cluster3.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsService3Name);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* header_matcher1 = route1->mutable_match()->add_headers();
+ header_matcher1->set_name("header1");
+ header_matcher1->set_exact_match("POST");
+ route1->mutable_route()->set_cluster(kNewCluster1Name);
+ auto route2 = route_config.mutable_virtual_hosts(0)->add_routes();
+ route2->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* header_matcher2 = route2->mutable_match()->add_headers();
+ header_matcher2->set_name("header2");
+ header_matcher2->mutable_range_match()->set_start(1);
+ header_matcher2->mutable_range_match()->set_end(1000);
+ route2->mutable_route()->set_cluster(kNewCluster2Name);
+ auto route3 = route_config.mutable_virtual_hosts(0)->add_routes();
+ route3->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ auto* header_matcher3 = route3->mutable_match()->add_headers();
+ header_matcher3->set_name("header3");
+ header_matcher3->mutable_safe_regex_match()->set_regex("[a-z]*");
+ route3->mutable_route()->set_cluster(kNewCluster3Name);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ // Send headers which will mismatch each route
+ std::vector<std::pair<TString, TString>> metadata = {
+ {"header1", "POST"},
+ {"header2", "1000"},
+ {"header3", "123"},
+ {"header1", "GET"},
+ };
+ WaitForAllBackends(0, 1);
+ CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata));
+ CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
+ .set_rpc_service(SERVICE_ECHO1)
+ .set_rpc_method(METHOD_ECHO1)
+ .set_metadata(metadata));
+ // Verify that only the default backend got RPCs since all previous routes
+ // were mismatched.
+ for (size_t i = 1; i < 4; ++i) {
+ EXPECT_EQ(0, backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
+ }
+ EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(kNumEcho1Rpcs, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
+ const auto& response_state = RouteConfigurationResponseState(0);
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
+}
+
+TEST_P(LdsRdsTest, XdsRoutingChangeRoutesWithoutChangingClusters) {
+ const char* kNewClusterName = "new_cluster";
+ const char* kNewEdsServiceName = "new_eds_service_name";
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // Populate new EDS resources.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ AdsServiceImpl::EdsResourceArgs args1({
+ {"locality0", GetBackendPorts(1, 2)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
+ // Populate new CDS resources.
+ Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
+ new_cluster.set_name(kNewClusterName);
+ new_cluster.mutable_eds_cluster_config()->set_service_name(
+ kNewEdsServiceName);
+ balancers_[0]->ads_service()->SetCdsResource(new_cluster);
+ // Populating Route Configurations for LDS.
+ RouteConfiguration route_config =
+ balancers_[0]->ads_service()->default_route_config();
+ auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
+ route1->mutable_route()->set_cluster(kNewClusterName);
+ auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
+ default_route->mutable_match()->set_prefix("");
+ default_route->mutable_route()->set_cluster(kDefaultClusterName);
+ SetRouteConfiguration(0, route_config);
+ // Make sure all backends are up and that requests for each RPC
+ // service go to the right backends.
+ WaitForAllBackends(0, 1, false);
+ WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO2));
+ // Requests for services Echo and Echo2 should have gone to backend 0.
+ EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(1, backends_[0]->backend_service2()->request_count());
+ // Requests for service Echo1 should have gone to backend 1.
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ EXPECT_EQ(1, backends_[1]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
+ // Now send an update that changes the first route to match a
+ // different RPC service, and wait for the client to make the change.
+ route1->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/");
+ SetRouteConfiguration(0, route_config);
+ WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO2));
+ // Now repeat the earlier test, making sure all traffic goes to the
+ // right place.
+ WaitForAllBackends(0, 1, false);
+ WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO1));
+ WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO2));
+ // Requests for services Echo and Echo1 should have gone to backend 0.
+ EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
+ EXPECT_EQ(1, backends_[0]->backend_service1()->request_count());
+ EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
+ // Requests for service Echo2 should have gone to backend 1.
+ EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
+ EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
+ EXPECT_EQ(1, backends_[1]->backend_service2()->request_count());
+}
+
+using CdsTest = BasicTest;
+
+// Tests that CDS client should send an ACK upon correct CDS response.
+TEST_P(CdsTest, Vanilla) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ (void)SendRpc();
+ EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
+ AdsServiceImpl::ResponseState::ACKED);
+}
+
+// Tests that CDS client should send a NACK if the cluster type in CDS response
+// is other than EDS.
+TEST_P(CdsTest, WrongClusterType) {
+ auto cluster = balancers_[0]->ads_service()->default_cluster();
+ cluster.set_type(Cluster::STATIC);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "DiscoveryType is not EDS.");
+}
+
+// Tests that CDS client should send a NACK if the eds_config in CDS response is
+// other than ADS.
+TEST_P(CdsTest, WrongEdsConfig) {
+ auto cluster = balancers_[0]->ads_service()->default_cluster();
+ cluster.mutable_eds_cluster_config()->mutable_eds_config()->mutable_self();
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "EDS ConfigSource is not ADS.");
+}
+
+// Tests that CDS client should send a NACK if the lb_policy in CDS response is
+// other than ROUND_ROBIN.
+TEST_P(CdsTest, WrongLbPolicy) {
+ auto cluster = balancers_[0]->ads_service()->default_cluster();
+ cluster.set_lb_policy(Cluster::LEAST_REQUEST);
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "LB policy is not ROUND_ROBIN.");
+}
+
+// Tests that CDS client should send a NACK if the lrs_server in CDS response is
+// other than SELF.
+TEST_P(CdsTest, WrongLrsServer) {
+ auto cluster = balancers_[0]->ads_service()->default_cluster();
+ cluster.mutable_lrs_server()->mutable_ads();
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->cds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message, "LRS ConfigSource is not self.");
+}
+
+using EdsTest = BasicTest;
+
+// Tests that EDS client should send a NACK if the EDS update contains
+// sparse priorities.
+TEST_P(EdsTest, NacksSparsePriorityList) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(), kDefaultLocalityWeight, 1},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args));
+ CheckRpcSendFailure();
+ const auto& response_state =
+ balancers_[0]->ads_service()->eds_response_state();
+ EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
+ EXPECT_EQ(response_state.error_message,
+ "EDS update includes sparse priority list");
+}
+
+// In most of our tests, we use different names for different resource
+// types, to make sure that there are no cut-and-paste errors in the code
+// that cause us to look at data for the wrong resource type. So we add
+// this test to make sure that the EDS resource name defaults to the
+// cluster name if not specified in the CDS resource.
+TEST_P(EdsTest, EdsServiceNameDefaultsToClusterName) {
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, kDefaultClusterName));
+ Cluster cluster = balancers_[0]->ads_service()->default_cluster();
+ cluster.mutable_eds_cluster_config()->clear_service_name();
+ balancers_[0]->ads_service()->SetCdsResource(cluster);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendOk();
+}
+
+class TimeoutTest : public BasicTest {
+ protected:
+ void SetUp() override {
+ xds_resource_does_not_exist_timeout_ms_ = 500;
+ BasicTest::SetUp();
+ }
+};
+
+// Tests that LDS client times out when no response received.
+TEST_P(TimeoutTest, Lds) {
+ balancers_[0]->ads_service()->SetResourceIgnore(kLdsTypeUrl);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+}
+
+TEST_P(TimeoutTest, Rds) {
+ balancers_[0]->ads_service()->SetResourceIgnore(kRdsTypeUrl);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+}
+
+// Tests that CDS client times out when no response received.
+TEST_P(TimeoutTest, Cds) {
+ balancers_[0]->ads_service()->SetResourceIgnore(kCdsTypeUrl);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
+}
+
+TEST_P(TimeoutTest, Eds) {
+ balancers_[0]->ads_service()->SetResourceIgnore(kEdsTypeUrl);
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ CheckRpcSendFailure();
}
-using LdsTest = BasicTest;
-
-// Tests that LDS client should send a NACK if there is no API listener in the
-// Listener in the LDS response.
-TEST_P(LdsTest, NoApiListener) {
- auto listener = balancers_[0]->ads_service()->default_listener();
- listener.clear_api_listener();
- balancers_[0]->ads_service()->SetLdsResource(listener);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->lds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "Listener has no ApiListener.");
-}
-
-// Tests that LDS client should send a NACK if the route_specifier in the
-// http_connection_manager is neither inlined route_config nor RDS.
-TEST_P(LdsTest, WrongRouteSpecifier) {
- auto listener = balancers_[0]->ads_service()->default_listener();
- HttpConnectionManager http_connection_manager;
- http_connection_manager.mutable_scoped_routes();
- listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
- http_connection_manager);
- balancers_[0]->ads_service()->SetLdsResource(listener);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->lds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "HttpConnectionManager neither has inlined route_config nor RDS.");
-}
-
-// Tests that LDS client should send a NACK if the rds message in the
-// http_connection_manager is missing the config_source field.
-TEST_P(LdsTest, RdsMissingConfigSource) {
- auto listener = balancers_[0]->ads_service()->default_listener();
- HttpConnectionManager http_connection_manager;
- http_connection_manager.mutable_rds()->set_route_config_name(
- kDefaultRouteConfigurationName);
- listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
- http_connection_manager);
- balancers_[0]->ads_service()->SetLdsResource(listener);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->lds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "HttpConnectionManager missing config_source for RDS.");
-}
-
-// Tests that LDS client should send a NACK if the rds message in the
-// http_connection_manager has a config_source field that does not specify ADS.
-TEST_P(LdsTest, RdsConfigSourceDoesNotSpecifyAds) {
- auto listener = balancers_[0]->ads_service()->default_listener();
- HttpConnectionManager http_connection_manager;
- auto* rds = http_connection_manager.mutable_rds();
- rds->set_route_config_name(kDefaultRouteConfigurationName);
- rds->mutable_config_source()->mutable_self();
- listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
- http_connection_manager);
- balancers_[0]->ads_service()->SetLdsResource(listener);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->lds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "HttpConnectionManager ConfigSource for RDS does not specify ADS.");
-}
-
-using LdsRdsTest = BasicTest;
-
-// Tests that LDS client should send an ACK upon correct LDS response (with
-// inlined RDS result).
-TEST_P(LdsRdsTest, Vanilla) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- (void)SendRpc();
- EXPECT_EQ(RouteConfigurationResponseState(0).state,
- AdsServiceImpl::ResponseState::ACKED);
- // Make sure we actually used the RPC service for the right version of xDS.
- EXPECT_EQ(balancers_[0]->ads_service()->seen_v2_client(),
- GetParam().use_v2());
- EXPECT_NE(balancers_[0]->ads_service()->seen_v3_client(),
- GetParam().use_v2());
-}
-
-// Tests that we go into TRANSIENT_FAILURE if the Listener is removed.
-TEST_P(LdsRdsTest, ListenerRemoved) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- // We need to wait for all backends to come online.
- WaitForAllBackends();
- // Unset LDS resource.
- balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl, kServerName);
- // Wait for RPCs to start failing.
- do {
- } while (SendRpc(RpcOptions(), nullptr).ok());
- // Make sure RPCs are still failing.
- CheckRpcSendFailure(1000);
- // Make sure we ACK'ed the update.
- EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
- AdsServiceImpl::ResponseState::ACKED);
-}
-
-// Tests that LDS client ACKs but fails if matching domain can't be found in
-// the LDS response.
-TEST_P(LdsRdsTest, NoMatchedDomain) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- route_config.mutable_virtual_hosts(0)->clear_domains();
- route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- // Do a bit of polling, to allow the ACK to get to the ADS server.
- channel_->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100));
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
-}
-
-// Tests that LDS client should choose the virtual host with matching domain if
-// multiple virtual hosts exist in the LDS response.
-TEST_P(LdsRdsTest, ChooseMatchedDomain) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- *(route_config.add_virtual_hosts()) = route_config.virtual_hosts(0);
- route_config.mutable_virtual_hosts(0)->clear_domains();
- route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- (void)SendRpc();
- EXPECT_EQ(RouteConfigurationResponseState(0).state,
- AdsServiceImpl::ResponseState::ACKED);
-}
-
-// Tests that LDS client should choose the last route in the virtual host if
-// multiple routes exist in the LDS response.
-TEST_P(LdsRdsTest, ChooseLastRoute) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- *(route_config.mutable_virtual_hosts(0)->add_routes()) =
- route_config.virtual_hosts(0).routes(0);
- route_config.mutable_virtual_hosts(0)
- ->mutable_routes(0)
- ->mutable_route()
- ->mutable_cluster_header();
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- (void)SendRpc();
- EXPECT_EQ(RouteConfigurationResponseState(0).state,
- AdsServiceImpl::ResponseState::ACKED);
-}
-
-// Tests that LDS client should send a NACK if route match has a case_sensitive
-// set to false.
-TEST_P(LdsRdsTest, RouteMatchHasCaseSensitiveFalse) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->mutable_case_sensitive()->set_value(false);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "case_sensitive if set must be set to true.");
-}
-
-// Tests that LDS client should ignore route which has query_parameters.
-TEST_P(LdsRdsTest, RouteMatchHasQueryParameters) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- route1->mutable_match()->add_query_parameters();
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should send a ACK if route match has a prefix
-// that is either empty or a single slash
-TEST_P(LdsRdsTest, RouteMatchHasValidPrefixEmptyOrSingleSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("");
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("/");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- (void)SendRpc();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
-}
-
-// Tests that LDS client should ignore route which has a path
-// prefix string does not start with "/".
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixNoLeadingSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service/");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should ignore route which has a prefix
-// string with more than 2 slashes.
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixExtraContent) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/Echo1/");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should ignore route which has a prefix
-// string "//".
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixDoubleSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("//");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should ignore route which has path
-// but it's empty.
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPathEmptyPath) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_path("");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should ignore route which has path
-// string does not start with "/".
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPathNoLeadingSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_path("grpc.testing.EchoTest1Service/Echo1");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should ignore route which has path
-// string that has too many slashes; for example, ends with "/".
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPathTooManySlashes) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1/");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should ignore route which has path
-// string that has only 1 slash: missing "/" between service and method.
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPathOnlyOneSlash) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service.Echo1");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should ignore route which has path
-// string that is missing service.
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingService) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_path("//Echo1");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Tests that LDS client should ignore route which has path
-// string that is missing method.
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingMethod) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/");
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No valid routes specified.");
-}
-
-// Test that LDS client should reject route which has invalid path regex.
-TEST_P(LdsRdsTest, RouteMatchHasInvalidPathRegex) {
- const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->mutable_safe_regex()->set_regex("a[z-a]");
- route1->mutable_route()->set_cluster(kNewCluster1Name);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "Invalid regex string specified in path matcher.");
-}
-
-// Tests that LDS client should send a NACK if route has an action other than
-// RouteAction in the LDS response.
-TEST_P(LdsRdsTest, RouteHasNoRouteAction) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- route_config.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_redirect();
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "No RouteAction found in route.");
-}
-
-TEST_P(LdsRdsTest, RouteActionClusterHasEmptyClusterName) {
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- route1->mutable_route()->set_cluster("");
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "RouteAction cluster contains empty cluster name.");
-}
-
-TEST_P(LdsRdsTest, RouteActionWeightedTargetHasIncorrectTotalWeightSet) {
- const size_t kWeight75 = 75;
- const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* weighted_cluster1 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster1->set_name(kNewCluster1Name);
- weighted_cluster1->mutable_weight()->set_value(kWeight75);
- route1->mutable_route()
- ->mutable_weighted_clusters()
- ->mutable_total_weight()
- ->set_value(kWeight75 + 1);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "RouteAction weighted_cluster has incorrect total weight");
-}
-
-TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasEmptyClusterName) {
- const size_t kWeight75 = 75;
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* weighted_cluster1 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster1->set_name("");
- weighted_cluster1->mutable_weight()->set_value(kWeight75);
- route1->mutable_route()
- ->mutable_weighted_clusters()
- ->mutable_total_weight()
- ->set_value(kWeight75);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(
- response_state.error_message,
- "RouteAction weighted_cluster cluster contains empty cluster name.");
-}
-
-TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasNoWeight) {
- const size_t kWeight75 = 75;
- const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* weighted_cluster1 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster1->set_name(kNewCluster1Name);
- route1->mutable_route()
- ->mutable_weighted_clusters()
- ->mutable_total_weight()
- ->set_value(kWeight75);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "RouteAction weighted_cluster cluster missing weight");
-}
-
-TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRegex) {
- const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* header_matcher1 = route1->mutable_match()->add_headers();
- header_matcher1->set_name("header1");
- header_matcher1->mutable_safe_regex_match()->set_regex("a[z-a]");
- route1->mutable_route()->set_cluster(kNewCluster1Name);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "Invalid regex string specified in header matcher.");
-}
-
-TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRange) {
- const char* kNewCluster1Name = "new_cluster_1";
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* header_matcher1 = route1->mutable_match()->add_headers();
- header_matcher1->set_name("header1");
- header_matcher1->mutable_range_match()->set_start(1001);
- header_matcher1->mutable_range_match()->set_end(1000);
- route1->mutable_route()->set_cluster(kNewCluster1Name);
- SetRouteConfiguration(0, route_config);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "Invalid range header matcher specifier specified: end "
- "cannot be smaller than start.");
-}
-
-// Tests that LDS client should choose the default route (with no matching
-// specified) after unable to find a match with previous routes.
-TEST_P(LdsRdsTest, XdsRoutingPathMatching) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const size_t kNumEcho1Rpcs = 10;
- const size_t kNumEcho2Rpcs = 20;
- const size_t kNumEchoRpcs = 30;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(2, 3)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(3, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- // Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1");
- route1->mutable_route()->set_cluster(kNewCluster1Name);
- auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
- route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2");
- route2->mutable_route()->set_cluster(kNewCluster2Name);
- auto* route3 = new_route_config.mutable_virtual_hosts(0)->add_routes();
- route3->mutable_match()->set_path("/grpc.testing.EchoTest3Service/Echo3");
- route3->mutable_route()->set_cluster(kDefaultClusterName);
- auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(0, 2);
- CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
- CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
- .set_rpc_service(SERVICE_ECHO1)
- .set_rpc_method(METHOD_ECHO1)
- .set_wait_for_ready(true));
- CheckRpcSendOk(kNumEcho2Rpcs, RpcOptions()
- .set_rpc_service(SERVICE_ECHO2)
- .set_rpc_method(METHOD_ECHO2)
- .set_wait_for_ready(true));
- // Make sure RPCs all go to the correct backend.
- for (size_t i = 0; i < 2; ++i) {
- EXPECT_EQ(kNumEchoRpcs / 2,
- backends_[i]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
- }
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
-}
-
-TEST_P(LdsRdsTest, XdsRoutingPrefixMatching) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const size_t kNumEcho1Rpcs = 10;
- const size_t kNumEcho2Rpcs = 20;
- const size_t kNumEchoRpcs = 30;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(2, 3)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(3, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- // Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- route1->mutable_route()->set_cluster(kNewCluster1Name);
- auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
- route2->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/");
- route2->mutable_route()->set_cluster(kNewCluster2Name);
- auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(0, 2);
- CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
- CheckRpcSendOk(
- kNumEcho1Rpcs,
- RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true));
- CheckRpcSendOk(
- kNumEcho2Rpcs,
- RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true));
- // Make sure RPCs all go to the correct backend.
- for (size_t i = 0; i < 2; ++i) {
- EXPECT_EQ(kNumEchoRpcs / 2,
- backends_[i]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
- }
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
-}
-
-TEST_P(LdsRdsTest, XdsRoutingPathRegexMatching) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const size_t kNumEcho1Rpcs = 10;
- const size_t kNumEcho2Rpcs = 20;
- const size_t kNumEchoRpcs = 30;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(2, 3)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(3, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- // Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- // Will match "/grpc.testing.EchoTest1Service/"
- route1->mutable_match()->mutable_safe_regex()->set_regex(".*1.*");
- route1->mutable_route()->set_cluster(kNewCluster1Name);
- auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
- // Will match "/grpc.testing.EchoTest2Service/"
- route2->mutable_match()->mutable_safe_regex()->set_regex(".*2.*");
- route2->mutable_route()->set_cluster(kNewCluster2Name);
- auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(0, 2);
- CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
- CheckRpcSendOk(
- kNumEcho1Rpcs,
- RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true));
- CheckRpcSendOk(
- kNumEcho2Rpcs,
- RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true));
- // Make sure RPCs all go to the correct backend.
- for (size_t i = 0; i < 2; ++i) {
- EXPECT_EQ(kNumEchoRpcs / 2,
- backends_[i]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
- }
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
-}
-
-TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const size_t kNumEcho1Rpcs = 1000;
- const size_t kNumEchoRpcs = 10;
- const size_t kWeight75 = 75;
- const size_t kWeight25 = 25;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 3)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- // Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* weighted_cluster1 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster1->set_name(kNewCluster1Name);
- weighted_cluster1->mutable_weight()->set_value(kWeight75);
- auto* weighted_cluster2 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster2->set_name(kNewCluster2Name);
- weighted_cluster2->mutable_weight()->set_value(kWeight25);
- route1->mutable_route()
- ->mutable_weighted_clusters()
- ->mutable_total_weight()
- ->set_value(kWeight75 + kWeight25);
- auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(0, 1);
- WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- CheckRpcSendOk(kNumEchoRpcs);
- CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- const int weight_75_request_count =
- backends_[1]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- const int weight_25_request_count =
- backends_[2]->backend_service1()->request_count();
- const double kErrorTolerance = 0.2;
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
- // test from flaking while debugging potential root cause.
- const double kErrorToleranceSmallLoad = 0.3;
- gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
- weight_75_request_count, weight_25_request_count);
- EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
-}
-
-TEST_P(LdsRdsTest, RouteActionWeightedTargetDefaultRoute) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const size_t kNumEchoRpcs = 1000;
- const size_t kWeight75 = 75;
- const size_t kWeight25 = 25;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 3)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- // Populating Route Configurations for LDS.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("");
- auto* weighted_cluster1 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster1->set_name(kNewCluster1Name);
- weighted_cluster1->mutable_weight()->set_value(kWeight75);
- auto* weighted_cluster2 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster2->set_name(kNewCluster2Name);
- weighted_cluster2->mutable_weight()->set_value(kWeight25);
- route1->mutable_route()
- ->mutable_weighted_clusters()
- ->mutable_total_weight()
- ->set_value(kWeight75 + kWeight25);
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(1, 3);
- CheckRpcSendOk(kNumEchoRpcs);
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(0, backends_[0]->backend_service()->request_count());
- const int weight_75_request_count =
- backends_[1]->backend_service()->request_count();
- const int weight_25_request_count =
- backends_[2]->backend_service()->request_count();
- const double kErrorTolerance = 0.2;
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEchoRpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
- // test from flaking while debugging potential root cause.
- const double kErrorToleranceSmallLoad = 0.3;
- gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
- weight_75_request_count, weight_25_request_count);
- EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEchoRpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
-}
-
-TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const char* kNewCluster3Name = "new_cluster_3";
- const char* kNewEdsService3Name = "new_eds_service_name_3";
- const size_t kNumEcho1Rpcs = 1000;
- const size_t kNumEchoRpcs = 10;
- const size_t kWeight75 = 75;
- const size_t kWeight25 = 25;
- const size_t kWeight50 = 50;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 3)},
- });
- AdsServiceImpl::EdsResourceArgs args3({
- {"locality0", GetBackendPorts(3, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
- new_cluster3.set_name(kNewCluster3Name);
- new_cluster3.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService3Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
- // Populating Route Configurations.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* weighted_cluster1 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster1->set_name(kNewCluster1Name);
- weighted_cluster1->mutable_weight()->set_value(kWeight75);
- auto* weighted_cluster2 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster2->set_name(kNewCluster2Name);
- weighted_cluster2->mutable_weight()->set_value(kWeight25);
- route1->mutable_route()
- ->mutable_weighted_clusters()
- ->mutable_total_weight()
- ->set_value(kWeight75 + kWeight25);
- auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(0, 1);
- WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- CheckRpcSendOk(kNumEchoRpcs);
- CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- const int weight_75_request_count =
- backends_[1]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- const int weight_25_request_count =
- backends_[2]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- const double kErrorTolerance = 0.2;
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
- // test from flaking while debugging potential root cause.
- const double kErrorToleranceSmallLoad = 0.3;
- gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
- weight_75_request_count, weight_25_request_count);
- EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
- // Change Route Configurations: same clusters different weights.
- weighted_cluster1->mutable_weight()->set_value(kWeight50);
- weighted_cluster2->mutable_weight()->set_value(kWeight50);
- // Change default route to a new cluster to help to identify when new polices
- // are seen by the client.
- default_route->mutable_route()->set_cluster(kNewCluster3Name);
- SetRouteConfiguration(0, new_route_config);
- ResetBackendCounters();
- WaitForAllBackends(3, 4);
- CheckRpcSendOk(kNumEchoRpcs);
- CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(0, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- const int weight_50_request_count_1 =
- backends_[1]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- const int weight_50_request_count_2 =
- backends_[2]->backend_service1()->request_count();
- EXPECT_EQ(kNumEchoRpcs, backends_[3]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- EXPECT_THAT(weight_50_request_count_1,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 + kErrorTolerance))));
- EXPECT_THAT(weight_50_request_count_2,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 + kErrorTolerance))));
-}
-
-TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const char* kNewCluster3Name = "new_cluster_3";
- const char* kNewEdsService3Name = "new_eds_service_name_3";
- const size_t kNumEcho1Rpcs = 1000;
- const size_t kNumEchoRpcs = 10;
- const size_t kWeight75 = 75;
- const size_t kWeight25 = 25;
- const size_t kWeight50 = 50;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 3)},
- });
- AdsServiceImpl::EdsResourceArgs args3({
- {"locality0", GetBackendPorts(3, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
- new_cluster3.set_name(kNewCluster3Name);
- new_cluster3.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService3Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
- // Populating Route Configurations.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* weighted_cluster1 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster1->set_name(kNewCluster1Name);
- weighted_cluster1->mutable_weight()->set_value(kWeight75);
- auto* weighted_cluster2 =
- route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
- weighted_cluster2->set_name(kDefaultClusterName);
- weighted_cluster2->mutable_weight()->set_value(kWeight25);
- route1->mutable_route()
- ->mutable_weighted_clusters()
- ->mutable_total_weight()
- ->set_value(kWeight75 + kWeight25);
- auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(0, 1);
- WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- CheckRpcSendOk(kNumEchoRpcs);
- CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- int weight_25_request_count =
- backends_[0]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- int weight_75_request_count =
- backends_[1]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[2]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- const double kErrorTolerance = 0.2;
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
- // test from flaking while debugging potential root cause.
- const double kErrorToleranceSmallLoad = 0.3;
- gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
- weight_75_request_count, weight_25_request_count);
- EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
- // Change Route Configurations: new set of clusters with different weights.
- weighted_cluster1->mutable_weight()->set_value(kWeight50);
- weighted_cluster2->set_name(kNewCluster2Name);
- weighted_cluster2->mutable_weight()->set_value(kWeight50);
- SetRouteConfiguration(0, new_route_config);
- ResetBackendCounters();
- WaitForAllBackends(2, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- CheckRpcSendOk(kNumEchoRpcs);
- CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- const int weight_50_request_count_1 =
- backends_[1]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- const int weight_50_request_count_2 =
- backends_[2]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
- EXPECT_THAT(weight_50_request_count_1,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 + kErrorTolerance))));
- EXPECT_THAT(weight_50_request_count_2,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 *
- (1 + kErrorTolerance))));
- // Change Route Configurations.
- weighted_cluster1->mutable_weight()->set_value(kWeight75);
- weighted_cluster2->set_name(kNewCluster3Name);
- weighted_cluster2->mutable_weight()->set_value(kWeight25);
- SetRouteConfiguration(0, new_route_config);
- ResetBackendCounters();
- WaitForAllBackends(3, 4, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- CheckRpcSendOk(kNumEchoRpcs);
- CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- weight_75_request_count = backends_[1]->backend_service1()->request_count();
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[2]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
- weight_25_request_count = backends_[3]->backend_service1()->request_count();
- EXPECT_THAT(weight_75_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 - kErrorTolerance)),
- ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 *
- (1 + kErrorTolerance))));
- // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the
- // test from flaking while debugging potential root cause.
- gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
- weight_75_request_count, weight_25_request_count);
- EXPECT_THAT(weight_25_request_count,
- ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 - kErrorToleranceSmallLoad)),
- ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 *
- (1 + kErrorToleranceSmallLoad))));
-}
-
-TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClusters) {
- const char* kNewClusterName = "new_cluster";
- const char* kNewEdsServiceName = "new_eds_service_name";
- const size_t kNumEchoRpcs = 5;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
- // Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Send Route Configuration.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(0, 1);
- CheckRpcSendOk(kNumEchoRpcs);
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- // Change Route Configurations: new default cluster.
- auto* default_route =
- new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- default_route->mutable_route()->set_cluster(kNewClusterName);
- SetRouteConfiguration(0, new_route_config);
- WaitForAllBackends(1, 2);
- CheckRpcSendOk(kNumEchoRpcs);
- // Make sure RPCs all go to the correct backend.
- EXPECT_EQ(kNumEchoRpcs, backends_[1]->backend_service()->request_count());
-}
-
-TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClustersWithPickingDelays) {
- const char* kNewClusterName = "new_cluster";
- const char* kNewEdsServiceName = "new_eds_service_name";
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
- // Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Bring down the current backend: 0, this will delay route picking time,
- // resulting in un-committed RPCs.
- ShutdownBackend(0);
- // Send a RouteConfiguration with a default route that points to
- // backend 0.
- RouteConfiguration new_route_config =
- balancers_[0]->ads_service()->default_route_config();
- SetRouteConfiguration(0, new_route_config);
- // Send exactly one RPC with no deadline and with wait_for_ready=true.
- // This RPC will not complete until after backend 0 is started.
- std::thread sending_rpc([this]() {
- CheckRpcSendOk(1, RpcOptions().set_wait_for_ready(true).set_timeout_ms(0));
- });
- // Send a non-wait_for_ready RPC which should fail, this will tell us
- // that the client has received the update and attempted to connect.
- const Status status = SendRpc(RpcOptions().set_timeout_ms(0));
- EXPECT_FALSE(status.ok());
- // Send a update RouteConfiguration to use backend 1.
- auto* default_route =
- new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- default_route->mutable_route()->set_cluster(kNewClusterName);
- SetRouteConfiguration(0, new_route_config);
- // Wait for RPCs to go to the new backend: 1, this ensures that the client has
- // processed the update.
- WaitForAllBackends(1, 2, false, RpcOptions(), true);
- // Bring up the previous backend: 0, this will allow the delayed RPC to
- // finally call on_call_committed upon completion.
- StartBackend(0);
- sending_rpc.join();
- // Make sure RPCs go to the correct backend:
- EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(1, backends_[1]->backend_service()->request_count());
-}
-
-TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) {
- const char* kNewClusterName = "new_cluster";
- const char* kNewEdsServiceName = "new_eds_service_name";
- const size_t kNumEcho1Rpcs = 100;
- const size_t kNumEchoRpcs = 5;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
- // Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* header_matcher1 = route1->mutable_match()->add_headers();
- header_matcher1->set_name("header1");
- header_matcher1->set_exact_match("POST,PUT,GET");
- auto* header_matcher2 = route1->mutable_match()->add_headers();
- header_matcher2->set_name("header2");
- header_matcher2->mutable_safe_regex_match()->set_regex("[a-z]*");
- auto* header_matcher3 = route1->mutable_match()->add_headers();
- header_matcher3->set_name("header3");
- header_matcher3->mutable_range_match()->set_start(1);
- header_matcher3->mutable_range_match()->set_end(1000);
- auto* header_matcher4 = route1->mutable_match()->add_headers();
- header_matcher4->set_name("header4");
- header_matcher4->set_present_match(false);
- auto* header_matcher5 = route1->mutable_match()->add_headers();
- header_matcher5->set_name("header5");
- header_matcher5->set_prefix_match("/grpc");
- auto* header_matcher6 = route1->mutable_match()->add_headers();
- header_matcher6->set_name("header6");
- header_matcher6->set_suffix_match(".cc");
- header_matcher6->set_invert_match(true);
- route1->mutable_route()->set_cluster(kNewClusterName);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- std::vector<std::pair<TString, TString>> metadata = {
- {"header1", "POST"}, {"header2", "blah"},
- {"header3", "1"}, {"header5", "/grpc.testing.EchoTest1Service/"},
- {"header1", "PUT"}, {"header6", "grpc.java"},
- {"header1", "GET"},
- };
- const auto header_match_rpc_options = RpcOptions()
- .set_rpc_service(SERVICE_ECHO1)
- .set_rpc_method(METHOD_ECHO1)
- .set_metadata(std::move(metadata));
- // Make sure all backends are up.
- WaitForAllBackends(0, 1);
- WaitForAllBackends(1, 2, true, header_match_rpc_options);
- // Send RPCs.
- CheckRpcSendOk(kNumEchoRpcs);
- CheckRpcSendOk(kNumEcho1Rpcs, header_match_rpc_options);
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- EXPECT_EQ(kNumEcho1Rpcs, backends_[1]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
-}
-
-TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialHeaderContentType) {
- const char* kNewClusterName = "new_cluster";
- const char* kNewEdsServiceName = "new_eds_service_name";
- const size_t kNumEchoRpcs = 100;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
- // Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("");
- auto* header_matcher1 = route1->mutable_match()->add_headers();
- header_matcher1->set_name("content-type");
- header_matcher1->set_exact_match("notapplication/grpc");
- route1->mutable_route()->set_cluster(kNewClusterName);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- auto* header_matcher2 = default_route->mutable_match()->add_headers();
- header_matcher2->set_name("content-type");
- header_matcher2->set_exact_match("application/grpc");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- // Make sure the backend is up.
- WaitForAllBackends(0, 1);
- // Send RPCs.
- CheckRpcSendOk(kNumEchoRpcs);
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
-}
-
-TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const size_t kNumEchoRpcs = 100;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 3)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- // Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("");
- auto* header_matcher1 = route1->mutable_match()->add_headers();
- header_matcher1->set_name("grpc-foo-bin");
- header_matcher1->set_present_match(true);
- route1->mutable_route()->set_cluster(kNewCluster1Name);
- auto route2 = route_config.mutable_virtual_hosts(0)->add_routes();
- route2->mutable_match()->set_prefix("");
- auto* header_matcher2 = route2->mutable_match()->add_headers();
- header_matcher2->set_name("grpc-previous-rpc-attempts");
- header_matcher2->set_present_match(true);
- route2->mutable_route()->set_cluster(kNewCluster2Name);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- // Send headers which will mismatch each route
- std::vector<std::pair<TString, TString>> metadata = {
- {"grpc-foo-bin", "grpc-foo-bin"},
- {"grpc-previous-rpc-attempts", "grpc-previous-rpc-attempts"},
- };
- WaitForAllBackends(0, 1);
- CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata));
- // Verify that only the default backend got RPCs since all previous routes
- // were mismatched.
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
-}
-
-TEST_P(LdsRdsTest, XdsRoutingRuntimeFractionMatching) {
- const char* kNewClusterName = "new_cluster";
- const char* kNewEdsServiceName = "new_eds_service_name";
- const size_t kNumRpcs = 1000;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
- // Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()
- ->mutable_runtime_fraction()
- ->mutable_default_value()
- ->set_numerator(25);
- route1->mutable_route()->set_cluster(kNewClusterName);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- WaitForAllBackends(0, 2);
- CheckRpcSendOk(kNumRpcs);
- const int default_backend_count =
- backends_[0]->backend_service()->request_count();
- const int matched_backend_count =
- backends_[1]->backend_service()->request_count();
- const double kErrorTolerance = 0.2;
- EXPECT_THAT(default_backend_count,
- ::testing::AllOf(
- ::testing::Ge(kNumRpcs * 75 / 100 * (1 - kErrorTolerance)),
- ::testing::Le(kNumRpcs * 75 / 100 * (1 + kErrorTolerance))));
- EXPECT_THAT(matched_backend_count,
- ::testing::AllOf(
- ::testing::Ge(kNumRpcs * 25 / 100 * (1 - kErrorTolerance)),
- ::testing::Le(kNumRpcs * 25 / 100 * (1 + kErrorTolerance))));
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
-}
-
-TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingUnmatchCases) {
- const char* kNewCluster1Name = "new_cluster_1";
- const char* kNewEdsService1Name = "new_eds_service_name_1";
- const char* kNewCluster2Name = "new_cluster_2";
- const char* kNewEdsService2Name = "new_eds_service_name_2";
- const char* kNewCluster3Name = "new_cluster_3";
- const char* kNewEdsService3Name = "new_eds_service_name_3";
- const size_t kNumEcho1Rpcs = 100;
- const size_t kNumEchoRpcs = 5;
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- AdsServiceImpl::EdsResourceArgs args2({
- {"locality0", GetBackendPorts(2, 3)},
- });
- AdsServiceImpl::EdsResourceArgs args3({
- {"locality0", GetBackendPorts(3, 4)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name));
- // Populate new CDS resources.
- Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster();
- new_cluster1.set_name(kNewCluster1Name);
- new_cluster1.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService1Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
- Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster();
- new_cluster2.set_name(kNewCluster2Name);
- new_cluster2.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService2Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
- Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster();
- new_cluster3.set_name(kNewCluster3Name);
- new_cluster3.mutable_eds_cluster_config()->set_service_name(
- kNewEdsService3Name);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
- // Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* header_matcher1 = route1->mutable_match()->add_headers();
- header_matcher1->set_name("header1");
- header_matcher1->set_exact_match("POST");
- route1->mutable_route()->set_cluster(kNewCluster1Name);
- auto route2 = route_config.mutable_virtual_hosts(0)->add_routes();
- route2->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* header_matcher2 = route2->mutable_match()->add_headers();
- header_matcher2->set_name("header2");
- header_matcher2->mutable_range_match()->set_start(1);
- header_matcher2->mutable_range_match()->set_end(1000);
- route2->mutable_route()->set_cluster(kNewCluster2Name);
- auto route3 = route_config.mutable_virtual_hosts(0)->add_routes();
- route3->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- auto* header_matcher3 = route3->mutable_match()->add_headers();
- header_matcher3->set_name("header3");
- header_matcher3->mutable_safe_regex_match()->set_regex("[a-z]*");
- route3->mutable_route()->set_cluster(kNewCluster3Name);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- // Send headers which will mismatch each route
- std::vector<std::pair<TString, TString>> metadata = {
- {"header1", "POST"},
- {"header2", "1000"},
- {"header3", "123"},
- {"header1", "GET"},
- };
- WaitForAllBackends(0, 1);
- CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata));
- CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
- .set_rpc_service(SERVICE_ECHO1)
- .set_rpc_method(METHOD_ECHO1)
- .set_metadata(metadata));
- // Verify that only the default backend got RPCs since all previous routes
- // were mismatched.
- for (size_t i = 1; i < 4; ++i) {
- EXPECT_EQ(0, backends_[i]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
- }
- EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(kNumEcho1Rpcs, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
- const auto& response_state = RouteConfigurationResponseState(0);
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
-}
-
-TEST_P(LdsRdsTest, XdsRoutingChangeRoutesWithoutChangingClusters) {
- const char* kNewClusterName = "new_cluster";
- const char* kNewEdsServiceName = "new_eds_service_name";
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // Populate new EDS resources.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- AdsServiceImpl::EdsResourceArgs args1({
- {"locality0", GetBackendPorts(1, 2)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName));
- // Populate new CDS resources.
- Cluster new_cluster = balancers_[0]->ads_service()->default_cluster();
- new_cluster.set_name(kNewClusterName);
- new_cluster.mutable_eds_cluster_config()->set_service_name(
- kNewEdsServiceName);
- balancers_[0]->ads_service()->SetCdsResource(new_cluster);
- // Populating Route Configurations for LDS.
- RouteConfiguration route_config =
- balancers_[0]->ads_service()->default_route_config();
- auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
- route1->mutable_route()->set_cluster(kNewClusterName);
- auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
- default_route->mutable_match()->set_prefix("");
- default_route->mutable_route()->set_cluster(kDefaultClusterName);
- SetRouteConfiguration(0, route_config);
- // Make sure all backends are up and that requests for each RPC
- // service go to the right backends.
- WaitForAllBackends(0, 1, false);
- WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO2));
- // Requests for services Echo and Echo2 should have gone to backend 0.
- EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(1, backends_[0]->backend_service2()->request_count());
- // Requests for service Echo1 should have gone to backend 1.
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- EXPECT_EQ(1, backends_[1]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
- // Now send an update that changes the first route to match a
- // different RPC service, and wait for the client to make the change.
- route1->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/");
- SetRouteConfiguration(0, route_config);
- WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO2));
- // Now repeat the earlier test, making sure all traffic goes to the
- // right place.
- WaitForAllBackends(0, 1, false);
- WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO1));
- WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO2));
- // Requests for services Echo and Echo1 should have gone to backend 0.
- EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
- EXPECT_EQ(1, backends_[0]->backend_service1()->request_count());
- EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
- // Requests for service Echo2 should have gone to backend 1.
- EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
- EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
- EXPECT_EQ(1, backends_[1]->backend_service2()->request_count());
-}
-
-using CdsTest = BasicTest;
-
-// Tests that CDS client should send an ACK upon correct CDS response.
-TEST_P(CdsTest, Vanilla) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- (void)SendRpc();
- EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
- AdsServiceImpl::ResponseState::ACKED);
-}
-
-// Tests that CDS client should send a NACK if the cluster type in CDS response
-// is other than EDS.
-TEST_P(CdsTest, WrongClusterType) {
- auto cluster = balancers_[0]->ads_service()->default_cluster();
- cluster.set_type(Cluster::STATIC);
- balancers_[0]->ads_service()->SetCdsResource(cluster);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->cds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "DiscoveryType is not EDS.");
-}
-
-// Tests that CDS client should send a NACK if the eds_config in CDS response is
-// other than ADS.
-TEST_P(CdsTest, WrongEdsConfig) {
- auto cluster = balancers_[0]->ads_service()->default_cluster();
- cluster.mutable_eds_cluster_config()->mutable_eds_config()->mutable_self();
- balancers_[0]->ads_service()->SetCdsResource(cluster);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->cds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "EDS ConfigSource is not ADS.");
-}
-
-// Tests that CDS client should send a NACK if the lb_policy in CDS response is
-// other than ROUND_ROBIN.
-TEST_P(CdsTest, WrongLbPolicy) {
- auto cluster = balancers_[0]->ads_service()->default_cluster();
- cluster.set_lb_policy(Cluster::LEAST_REQUEST);
- balancers_[0]->ads_service()->SetCdsResource(cluster);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->cds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "LB policy is not ROUND_ROBIN.");
-}
-
-// Tests that CDS client should send a NACK if the lrs_server in CDS response is
-// other than SELF.
-TEST_P(CdsTest, WrongLrsServer) {
- auto cluster = balancers_[0]->ads_service()->default_cluster();
- cluster.mutable_lrs_server()->mutable_ads();
- balancers_[0]->ads_service()->SetCdsResource(cluster);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->cds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message, "LRS ConfigSource is not self.");
-}
-
-using EdsTest = BasicTest;
-
-// Tests that EDS client should send a NACK if the EDS update contains
-// sparse priorities.
-TEST_P(EdsTest, NacksSparsePriorityList) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(), kDefaultLocalityWeight, 1},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args));
- CheckRpcSendFailure();
- const auto& response_state =
- balancers_[0]->ads_service()->eds_response_state();
- EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
- EXPECT_EQ(response_state.error_message,
- "EDS update includes sparse priority list");
-}
-
-// In most of our tests, we use different names for different resource
-// types, to make sure that there are no cut-and-paste errors in the code
-// that cause us to look at data for the wrong resource type. So we add
-// this test to make sure that the EDS resource name defaults to the
-// cluster name if not specified in the CDS resource.
-TEST_P(EdsTest, EdsServiceNameDefaultsToClusterName) {
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, kDefaultClusterName));
- Cluster cluster = balancers_[0]->ads_service()->default_cluster();
- cluster.mutable_eds_cluster_config()->clear_service_name();
- balancers_[0]->ads_service()->SetCdsResource(cluster);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendOk();
-}
-
-class TimeoutTest : public BasicTest {
- protected:
- void SetUp() override {
- xds_resource_does_not_exist_timeout_ms_ = 500;
- BasicTest::SetUp();
- }
-};
-
-// Tests that LDS client times out when no response received.
-TEST_P(TimeoutTest, Lds) {
- balancers_[0]->ads_service()->SetResourceIgnore(kLdsTypeUrl);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
-}
-
-TEST_P(TimeoutTest, Rds) {
- balancers_[0]->ads_service()->SetResourceIgnore(kRdsTypeUrl);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
-}
-
-// Tests that CDS client times out when no response received.
-TEST_P(TimeoutTest, Cds) {
- balancers_[0]->ads_service()->SetResourceIgnore(kCdsTypeUrl);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
-}
-
-TEST_P(TimeoutTest, Eds) {
- balancers_[0]->ads_service()->SetResourceIgnore(kEdsTypeUrl);
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- CheckRpcSendFailure();
-}
-
using LocalityMapTest = BasicTest;
// Tests that the localities in a locality map are picked according to their
@@ -4516,12 +4516,12 @@ TEST_P(LocalityMapTest, WeightedRoundRobin) {
const double kLocalityWeightRate1 =
static_cast<double>(kLocalityWeight1) / kTotalLocalityWeight;
// ADS response contains 2 localities, each of which contains 1 backend.
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kLocalityWeight0},
{"locality1", GetBackendPorts(1, 2), kLocalityWeight1},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for both backends to be ready.
WaitForAllBackends(0, 2);
// Send kNumRpcs RPCs.
@@ -4544,44 +4544,44 @@ TEST_P(LocalityMapTest, WeightedRoundRobin) {
::testing::Le(kLocalityWeightRate1 * (1 + kErrorTolerance))));
}
-// Tests that we correctly handle a locality containing no endpoints.
-TEST_P(LocalityMapTest, LocalityContainingNoEndpoints) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- const size_t kNumRpcs = 5000;
- // EDS response contains 2 localities, one with no endpoints.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- {"locality1", {}},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- // Wait for both backends to be ready.
- WaitForAllBackends();
- // Send kNumRpcs RPCs.
- CheckRpcSendOk(kNumRpcs);
- // All traffic should go to the reachable locality.
- EXPECT_EQ(backends_[0]->backend_service()->request_count(),
- kNumRpcs / backends_.size());
- EXPECT_EQ(backends_[1]->backend_service()->request_count(),
- kNumRpcs / backends_.size());
- EXPECT_EQ(backends_[2]->backend_service()->request_count(),
- kNumRpcs / backends_.size());
- EXPECT_EQ(backends_[3]->backend_service()->request_count(),
- kNumRpcs / backends_.size());
-}
-
-// EDS update with no localities.
-TEST_P(LocalityMapTest, NoLocalities) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource({}, DefaultEdsServiceName()));
- Status status = SendRpc();
- EXPECT_FALSE(status.ok());
- EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
-}
-
+// Tests that we correctly handle a locality containing no endpoints.
+TEST_P(LocalityMapTest, LocalityContainingNoEndpoints) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ const size_t kNumRpcs = 5000;
+ // EDS response contains 2 localities, one with no endpoints.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ {"locality1", {}},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ // Wait for both backends to be ready.
+ WaitForAllBackends();
+ // Send kNumRpcs RPCs.
+ CheckRpcSendOk(kNumRpcs);
+ // All traffic should go to the reachable locality.
+ EXPECT_EQ(backends_[0]->backend_service()->request_count(),
+ kNumRpcs / backends_.size());
+ EXPECT_EQ(backends_[1]->backend_service()->request_count(),
+ kNumRpcs / backends_.size());
+ EXPECT_EQ(backends_[2]->backend_service()->request_count(),
+ kNumRpcs / backends_.size());
+ EXPECT_EQ(backends_[3]->backend_service()->request_count(),
+ kNumRpcs / backends_.size());
+}
+
+// EDS update with no localities.
+TEST_P(LocalityMapTest, NoLocalities) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource({}, DefaultEdsServiceName()));
+ Status status = SendRpc();
+ EXPECT_FALSE(status.ok());
+ EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
+}
+
// Tests that the locality map can work properly even when it contains a large
// number of localities.
TEST_P(LocalityMapTest, StressTest) {
@@ -4590,23 +4590,23 @@ TEST_P(LocalityMapTest, StressTest) {
const size_t kNumLocalities = 100;
// The first ADS response contains kNumLocalities localities, each of which
// contains backend 0.
- AdsServiceImpl::EdsResourceArgs args;
+ AdsServiceImpl::EdsResourceArgs args;
for (size_t i = 0; i < kNumLocalities; ++i) {
- TString name = y_absl::StrCat("locality", i);
- AdsServiceImpl::EdsResourceArgs::Locality locality(name,
- {backends_[0]->port()});
+ TString name = y_absl::StrCat("locality", i);
+ AdsServiceImpl::EdsResourceArgs::Locality locality(name,
+ {backends_[0]->port()});
args.locality_list.emplace_back(std::move(locality));
}
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// The second ADS response contains 1 locality, which contains backend 1.
- args = AdsServiceImpl::EdsResourceArgs({
+ args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(1, 2)},
});
- std::thread delayed_resource_setter(
- std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()),
- 60 * 1000));
+ std::thread delayed_resource_setter(
+ std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()),
+ 60 * 1000));
// Wait until backend 0 is ready, before which kNumLocalities localities are
// received and handled by the xds policy.
WaitForBackend(0, /*reset_counters=*/false);
@@ -4614,7 +4614,7 @@ TEST_P(LocalityMapTest, StressTest) {
// Wait until backend 1 is ready, before which kNumLocalities localities are
// removed by the xds policy.
WaitForBackend(1);
- delayed_resource_setter.join();
+ delayed_resource_setter.join();
}
// Tests that the localities in a locality map are picked correctly after update
@@ -4622,7 +4622,7 @@ TEST_P(LocalityMapTest, StressTest) {
TEST_P(LocalityMapTest, UpdateMap) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
- const size_t kNumRpcs = 3000;
+ const size_t kNumRpcs = 3000;
// The locality weight for the first 3 localities.
const std::vector<int> kLocalityWeights0 = {2, 3, 4};
const double kTotalLocalityWeight0 =
@@ -4641,13 +4641,13 @@ TEST_P(LocalityMapTest, UpdateMap) {
for (int weight : kLocalityWeights1) {
locality_weight_rate_1.push_back(weight / kTotalLocalityWeight1);
}
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), 2},
{"locality1", GetBackendPorts(1, 2), 3},
{"locality2", GetBackendPorts(2, 3), 4},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for the first 3 backends to be ready.
WaitForAllBackends(0, 3);
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
@@ -4664,26 +4664,26 @@ TEST_P(LocalityMapTest, UpdateMap) {
}
const double kErrorTolerance = 0.2;
for (size_t i = 0; i < 3; ++i) {
- gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i,
- locality_picked_rates[i]);
+ gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i,
+ locality_picked_rates[i]);
EXPECT_THAT(
locality_picked_rates[i],
::testing::AllOf(
::testing::Ge(locality_weight_rate_0[i] * (1 - kErrorTolerance)),
::testing::Le(locality_weight_rate_0[i] * (1 + kErrorTolerance))));
}
- args = AdsServiceImpl::EdsResourceArgs({
- {"locality1", GetBackendPorts(1, 2), 3},
- {"locality2", GetBackendPorts(2, 3), 2},
- {"locality3", GetBackendPorts(3, 4), 6},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ args = AdsServiceImpl::EdsResourceArgs({
+ {"locality1", GetBackendPorts(1, 2), 3},
+ {"locality2", GetBackendPorts(2, 3), 2},
+ {"locality3", GetBackendPorts(3, 4), 6},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Backend 3 hasn't received any request.
EXPECT_EQ(0U, backends_[3]->backend_service()->request_count());
// Wait until the locality update has been processed, as signaled by backend 3
// receiving a request.
- WaitForAllBackends(3, 4);
+ WaitForAllBackends(3, 4);
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
// Send kNumRpcs RPCs.
CheckRpcSendOk(kNumRpcs);
@@ -4699,8 +4699,8 @@ TEST_P(LocalityMapTest, UpdateMap) {
kNumRpcs);
}
for (size_t i = 1; i < 4; ++i) {
- gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i,
- locality_picked_rates[i]);
+ gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i,
+ locality_picked_rates[i]);
EXPECT_THAT(
locality_picked_rates[i],
::testing::AllOf(
@@ -4709,97 +4709,97 @@ TEST_P(LocalityMapTest, UpdateMap) {
}
}
-// Tests that we don't fail RPCs when replacing all of the localities in
-// a given priority.
-TEST_P(LocalityMapTest, ReplaceAllLocalitiesInPriority) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1)},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- args = AdsServiceImpl::EdsResourceArgs({
- {"locality1", GetBackendPorts(1, 2)},
- });
- std::thread delayed_resource_setter(std::bind(
- &BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 5000));
- // Wait for the first backend to be ready.
- WaitForBackend(0);
- // Keep sending RPCs until we switch over to backend 1, which tells us
- // that we received the update. No RPCs should fail during this
- // transition.
- WaitForBackend(1, /*reset_counters=*/true, /*require_success=*/true);
- delayed_resource_setter.join();
-}
-
+// Tests that we don't fail RPCs when replacing all of the localities in
+// a given priority.
+TEST_P(LocalityMapTest, ReplaceAllLocalitiesInPriority) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1)},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ args = AdsServiceImpl::EdsResourceArgs({
+ {"locality1", GetBackendPorts(1, 2)},
+ });
+ std::thread delayed_resource_setter(std::bind(
+ &BasicTest::SetEdsResourceWithDelay, this, 0,
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 5000));
+ // Wait for the first backend to be ready.
+ WaitForBackend(0);
+ // Keep sending RPCs until we switch over to backend 1, which tells us
+ // that we received the update. No RPCs should fail during this
+ // transition.
+ WaitForBackend(1, /*reset_counters=*/true, /*require_success=*/true);
+ delayed_resource_setter.join();
+}
+
class FailoverTest : public BasicTest {
public:
- void SetUp() override {
- BasicTest::SetUp();
- ResetStub(500);
- }
+ void SetUp() override {
+ BasicTest::SetUp();
+ ResetStub(500);
+ }
};
// Localities with the highest priority are used when multiple priority exist.
TEST_P(FailoverTest, ChooseHighestPriority) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(3, false);
for (size_t i = 0; i < 3; ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
+ }
+}
+
+// Does not choose priority with no endpoints.
+TEST_P(FailoverTest, DoesNotUsePriorityWithNoEndpoints) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
+ {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
+ {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
+ {"locality3", {}, kDefaultLocalityWeight, 0},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ WaitForBackend(0, false);
+ for (size_t i = 1; i < 3; ++i) {
+ EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
}
-// Does not choose priority with no endpoints.
-TEST_P(FailoverTest, DoesNotUsePriorityWithNoEndpoints) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
- {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
- {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
- {"locality3", {}, kDefaultLocalityWeight, 0},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- WaitForBackend(0, false);
- for (size_t i = 1; i < 3; ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
- }
-}
-
-// Does not choose locality with no endpoints.
-TEST_P(FailoverTest, DoesNotUseLocalityWithNoEndpoints) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", {}, kDefaultLocalityWeight, 0},
- {"locality1", GetBackendPorts(), kDefaultLocalityWeight, 0},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- // Wait for all backends to be used.
- std::tuple<int, int, int> counts = WaitForAllBackends();
- // Make sure no RPCs failed in the transition.
- EXPECT_EQ(0, std::get<1>(counts));
-}
-
+// Does not choose locality with no endpoints.
+TEST_P(FailoverTest, DoesNotUseLocalityWithNoEndpoints) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", {}, kDefaultLocalityWeight, 0},
+ {"locality1", GetBackendPorts(), kDefaultLocalityWeight, 0},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ // Wait for all backends to be used.
+ std::tuple<int, int, int> counts = WaitForAllBackends();
+ // Make sure no RPCs failed in the transition.
+ EXPECT_EQ(0, std::get<1>(counts));
+}
+
// If the higher priority localities are not reachable, failover to the highest
// priority among the rest.
TEST_P(FailoverTest, Failover) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
@@ -4807,12 +4807,12 @@ TEST_P(FailoverTest, Failover) {
});
ShutdownBackend(3);
ShutdownBackend(0);
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(1, false);
for (size_t i = 0; i < 4; ++i) {
if (i == 1) continue;
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
}
@@ -4822,7 +4822,7 @@ TEST_P(FailoverTest, SwitchBackToHigherPriority) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 100;
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
@@ -4830,12 +4830,12 @@ TEST_P(FailoverTest, SwitchBackToHigherPriority) {
});
ShutdownBackend(3);
ShutdownBackend(0);
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(1, false);
for (size_t i = 0; i < 4; ++i) {
if (i == 1) continue;
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
StartBackend(0);
WaitForBackend(0);
@@ -4848,13 +4848,13 @@ TEST_P(FailoverTest, SwitchBackToHigherPriority) {
TEST_P(FailoverTest, UpdateInitialUnavailable) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 1},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- args = AdsServiceImpl::EdsResourceArgs({
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 1},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 2},
@@ -4862,9 +4862,9 @@ TEST_P(FailoverTest, UpdateInitialUnavailable) {
});
ShutdownBackend(0);
ShutdownBackend(1);
- std::thread delayed_resource_setter(std::bind(
- &BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
+ std::thread delayed_resource_setter(std::bind(
+ &BasicTest::SetEdsResourceWithDelay, this, 0,
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(500, GPR_TIMESPAN));
// Send 0.5 second worth of RPCs.
@@ -4874,9 +4874,9 @@ TEST_P(FailoverTest, UpdateInitialUnavailable) {
WaitForBackend(2, false);
for (size_t i = 0; i < 4; ++i) {
if (i == 2) continue;
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
- delayed_resource_setter.join();
+ delayed_resource_setter.join();
}
// Tests that after the localities' priorities are updated, we still choose the
@@ -4885,72 +4885,72 @@ TEST_P(FailoverTest, UpdatePriority) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 100;
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- args = AdsServiceImpl::EdsResourceArgs({
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 2},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 0},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 1},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 3},
});
- std::thread delayed_resource_setter(std::bind(
- &BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
+ std::thread delayed_resource_setter(std::bind(
+ &BasicTest::SetEdsResourceWithDelay, this, 0,
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
WaitForBackend(3, false);
for (size_t i = 0; i < 3; ++i) {
- EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
+ EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
WaitForBackend(1);
CheckRpcSendOk(kNumRpcs);
EXPECT_EQ(kNumRpcs, backends_[1]->backend_service()->request_count());
- delayed_resource_setter.join();
+ delayed_resource_setter.join();
+}
+
+// Moves all localities in the current priority to a higher priority.
+TEST_P(FailoverTest, MoveAllLocalitiesInCurrentPriorityToHigherPriority) {
+ SetNextResolution({});
+ SetNextResolutionForLbChannelAllBalancers();
+ // First update:
+ // - Priority 0 is locality 0, containing backend 0, which is down.
+ // - Priority 1 is locality 1, containing backends 1 and 2, which are up.
+ ShutdownBackend(0);
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
+ {"locality1", GetBackendPorts(1, 3), kDefaultLocalityWeight, 1},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ // Second update:
+ // - Priority 0 contains both localities 0 and 1.
+ // - Priority 1 is not present.
+ // - We add backend 3 to locality 1, just so we have a way to know
+ // when the update has been seen by the client.
+ args = AdsServiceImpl::EdsResourceArgs({
+ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
+ {"locality1", GetBackendPorts(1, 4), kDefaultLocalityWeight, 0},
+ });
+ std::thread delayed_resource_setter(std::bind(
+ &BasicTest::SetEdsResourceWithDelay, this, 0,
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
+ // When we get the first update, all backends in priority 0 are down,
+ // so we will create priority 1. Backends 1 and 2 should have traffic,
+ // but backend 3 should not.
+ WaitForAllBackends(1, 3, false);
+ EXPECT_EQ(0UL, backends_[3]->backend_service()->request_count());
+ // When backend 3 gets traffic, we know the second update has been seen.
+ WaitForBackend(3);
+ // The ADS service of balancer 0 got at least 1 response.
+ EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT);
+ delayed_resource_setter.join();
}
-// Moves all localities in the current priority to a higher priority.
-TEST_P(FailoverTest, MoveAllLocalitiesInCurrentPriorityToHigherPriority) {
- SetNextResolution({});
- SetNextResolutionForLbChannelAllBalancers();
- // First update:
- // - Priority 0 is locality 0, containing backend 0, which is down.
- // - Priority 1 is locality 1, containing backends 1 and 2, which are up.
- ShutdownBackend(0);
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
- {"locality1", GetBackendPorts(1, 3), kDefaultLocalityWeight, 1},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- // Second update:
- // - Priority 0 contains both localities 0 and 1.
- // - Priority 1 is not present.
- // - We add backend 3 to locality 1, just so we have a way to know
- // when the update has been seen by the client.
- args = AdsServiceImpl::EdsResourceArgs({
- {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
- {"locality1", GetBackendPorts(1, 4), kDefaultLocalityWeight, 0},
- });
- std::thread delayed_resource_setter(std::bind(
- &BasicTest::SetEdsResourceWithDelay, this, 0,
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000));
- // When we get the first update, all backends in priority 0 are down,
- // so we will create priority 1. Backends 1 and 2 should have traffic,
- // but backend 3 should not.
- WaitForAllBackends(1, 3, false);
- EXPECT_EQ(0UL, backends_[3]->backend_service()->request_count());
- // When backend 3 gets traffic, we know the second update has been seen.
- WaitForBackend(3);
- // The ADS service of balancer 0 got at least 1 response.
- EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT);
- delayed_resource_setter.join();
-}
-
using DropTest = BasicTest;
// Tests that RPCs are dropped according to the drop config.
@@ -4965,26 +4965,26 @@ TEST_P(DropTest, Vanilla) {
const double KDropRateForLbAndThrottle =
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle;
// The ADS response contains two drop categories.
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
- const Status status = SendRpc(RpcOptions(), &response);
+ const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage);
+ EXPECT_EQ(response.message(), kRequestMessage);
}
}
// The drop rate should be roughly equal to the expectation.
@@ -5005,26 +5005,26 @@ TEST_P(DropTest, DropPerHundred) {
const uint32_t kDropPerHundredForLb = 10;
const double kDropRateForLb = kDropPerHundredForLb / 100.0;
// The ADS response contains one drop category.
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerHundredForLb}};
args.drop_denominator = FractionalPercent::HUNDRED;
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
- const Status status = SendRpc(RpcOptions(), &response);
+ const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage);
+ EXPECT_EQ(response.message(), kRequestMessage);
}
}
// The drop rate should be roughly equal to the expectation.
@@ -5044,26 +5044,26 @@ TEST_P(DropTest, DropPerTenThousand) {
const uint32_t kDropPerTenThousandForLb = 1000;
const double kDropRateForLb = kDropPerTenThousandForLb / 10000.0;
// The ADS response contains one drop category.
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerTenThousandForLb}};
args.drop_denominator = FractionalPercent::TEN_THOUSAND;
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
- const Status status = SendRpc(RpcOptions(), &response);
+ const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage);
+ EXPECT_EQ(response.message(), kRequestMessage);
}
}
// The drop rate should be roughly equal to the expectation.
@@ -5079,7 +5079,7 @@ TEST_P(DropTest, DropPerTenThousand) {
TEST_P(DropTest, Update) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
- const size_t kNumRpcs = 3000;
+ const size_t kNumRpcs = 3000;
const uint32_t kDropPerMillionForLb = 100000;
const uint32_t kDropPerMillionForThrottle = 200000;
const double kDropRateForLb = kDropPerMillionForLb / 1000000.0;
@@ -5087,43 +5087,43 @@ TEST_P(DropTest, Update) {
const double KDropRateForLbAndThrottle =
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle;
// The first ADS response contains one drop category.
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerMillionForLb}};
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
- const Status status = SendRpc(RpcOptions(), &response);
+ const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage);
+ EXPECT_EQ(response.message(), kRequestMessage);
}
}
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// The drop rate should be roughly equal to the expectation.
double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs;
- gpr_log(GPR_INFO, "First batch drop rate %f", seen_drop_rate);
+ gpr_log(GPR_INFO, "First batch drop rate %f", seen_drop_rate);
const double kErrorTolerance = 0.3;
EXPECT_THAT(
seen_drop_rate,
::testing::AllOf(::testing::Ge(kDropRateForLb * (1 - kErrorTolerance)),
::testing::Le(kDropRateForLb * (1 + kErrorTolerance))));
- // The second ADS response contains two drop categories, send an update EDS
- // response.
- args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
- {kThrottleDropType, kDropPerMillionForThrottle}};
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ // The second ADS response contains two drop categories, send an update EDS
+ // response.
+ args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
+ {kThrottleDropType, kDropPerMillionForThrottle}};
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the drop rate increases to the middle of the two configs, which
// implies that the update has been in effect.
const double kDropRateThreshold =
@@ -5131,7 +5131,7 @@ TEST_P(DropTest, Update) {
size_t num_rpcs = kNumRpcs;
while (seen_drop_rate < kDropRateThreshold) {
EchoResponse response;
- const Status status = SendRpc(RpcOptions(), &response);
+ const Status status = SendRpc(RpcOptions(), &response);
++num_rpcs;
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
@@ -5139,7 +5139,7 @@ TEST_P(DropTest, Update) {
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage);
+ EXPECT_EQ(response.message(), kRequestMessage);
}
seen_drop_rate = static_cast<double>(num_drops) / num_rpcs;
}
@@ -5148,20 +5148,20 @@ TEST_P(DropTest, Update) {
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
- const Status status = SendRpc(RpcOptions(), &response);
+ const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage);
+ EXPECT_EQ(response.message(), kRequestMessage);
}
}
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// The new drop rate should be roughly equal to the expectation.
seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs;
- gpr_log(GPR_INFO, "Second batch drop rate %f", seen_drop_rate);
+ gpr_log(GPR_INFO, "Second batch drop rate %f", seen_drop_rate);
EXPECT_THAT(
seen_drop_rate,
::testing::AllOf(
@@ -5177,23 +5177,23 @@ TEST_P(DropTest, DropAll) {
const uint32_t kDropPerMillionForLb = 100000;
const uint32_t kDropPerMillionForThrottle = 1000000;
// The ADS response contains two drop categories.
- AdsServiceImpl::EdsResourceArgs args;
+ AdsServiceImpl::EdsResourceArgs args;
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Send kNumRpcs RPCs and all of them are dropped.
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
- const Status status = SendRpc(RpcOptions(), &response);
- EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
- EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
+ const Status status = SendRpc(RpcOptions(), &response);
+ EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
+ EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
}
}
class BalancerUpdateTest : public XdsEnd2endTest {
public:
- BalancerUpdateTest() : XdsEnd2endTest(4, 3) {}
+ BalancerUpdateTest() : XdsEnd2endTest(4, 3) {}
};
// Tests that the old LB call is still used after the balancer address update as
@@ -5201,16 +5201,16 @@ class BalancerUpdateTest : public XdsEnd2endTest {
TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", {backends_[0]->port()}},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- args = AdsServiceImpl::EdsResourceArgs({
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
- balancers_[1]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[1]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
@@ -5219,17 +5219,17 @@ TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) {
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
- // The ADS service of balancer 0 sent at least 1 response.
- EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT);
- EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[1]->ads_service()->eds_response_state().error_message;
- EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[2]->ads_service()->eds_response_state().error_message;
+ // The ADS service of balancer 0 sent at least 1 response.
+ EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT);
+ EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[1]->ads_service()->eds_response_state().error_message;
+ EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[2]->ads_service()->eds_response_state().error_message;
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel({balancers_[1]->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
@@ -5243,17 +5243,17 @@ TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) {
// The current LB call is still working, so xds continued using it to the
// first balancer, which doesn't assign the second backend.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
- // The ADS service of balancer 0 sent at least 1 response.
- EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT);
- EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[1]->ads_service()->eds_response_state().error_message;
- EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[2]->ads_service()->eds_response_state().error_message;
+ // The ADS service of balancer 0 sent at least 1 response.
+ EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT);
+ EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[1]->ads_service()->eds_response_state().error_message;
+ EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[2]->ads_service()->eds_response_state().error_message;
}
// Tests that the old LB call is still used after multiple balancer address
@@ -5264,16 +5264,16 @@ TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) {
TEST_P(BalancerUpdateTest, Repeated) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", {backends_[0]->port()}},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- args = AdsServiceImpl::EdsResourceArgs({
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
- balancers_[1]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[1]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
@@ -5282,17 +5282,17 @@ TEST_P(BalancerUpdateTest, Repeated) {
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
- // The ADS service of balancer 0 sent at least 1 response.
- EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT);
- EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[1]->ads_service()->eds_response_state().error_message;
- EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[2]->ads_service()->eds_response_state().error_message;
+ // The ADS service of balancer 0 sent at least 1 response.
+ EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT);
+ EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[1]->ads_service()->eds_response_state().error_message;
+ EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[2]->ads_service()->eds_response_state().error_message;
std::vector<int> ports;
ports.emplace_back(balancers_[0]->port());
ports.emplace_back(balancers_[1]->port());
@@ -5334,33 +5334,33 @@ TEST_P(BalancerUpdateTest, Repeated) {
TEST_P(BalancerUpdateTest, DeadUpdate) {
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", {backends_[0]->port()}},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- args = AdsServiceImpl::EdsResourceArgs({
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
- balancers_[1]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[1]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
- // The ADS service of balancer 0 sent at least 1 response.
- EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT);
- EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[1]->ads_service()->eds_response_state().error_message;
- EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[2]->ads_service()->eds_response_state().error_message;
+ // The ADS service of balancer 0 sent at least 1 response.
+ EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT);
+ EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[1]->ads_service()->eds_response_state().error_message;
+ EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[2]->ads_service()->eds_response_state().error_message;
// Kill balancer 0
gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
balancers_[0]->Shutdown();
@@ -5372,19 +5372,19 @@ TEST_P(BalancerUpdateTest, DeadUpdate) {
// All 10 requests should again have gone to the first backend.
EXPECT_EQ(20U, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
- // The ADS service of no balancers sent anything
- EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[0]->ads_service()->eds_response_state().error_message;
- EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[1]->ads_service()->eds_response_state().error_message;
- EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[2]->ads_service()->eds_response_state().error_message;
+ // The ADS service of no balancers sent anything
+ EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[0]->ads_service()->eds_response_state().error_message;
+ EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[1]->ads_service()->eds_response_state().error_message;
+ EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[2]->ads_service()->eds_response_state().error_message;
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel({balancers_[1]->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
@@ -5400,17 +5400,17 @@ TEST_P(BalancerUpdateTest, DeadUpdate) {
gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
// All 10 requests should have gone to the second backend.
EXPECT_EQ(10U, backends_[1]->backend_service()->request_count());
- // The ADS service of balancer 1 sent at least 1 response.
- EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[0]->ads_service()->eds_response_state().error_message;
- EXPECT_GT(balancers_[1]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT);
- EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
- AdsServiceImpl::ResponseState::NOT_SENT)
- << "Error Message:"
- << balancers_[2]->ads_service()->eds_response_state().error_message;
+ // The ADS service of balancer 1 sent at least 1 response.
+ EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[0]->ads_service()->eds_response_state().error_message;
+ EXPECT_GT(balancers_[1]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT);
+ EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
+ AdsServiceImpl::ResponseState::NOT_SENT)
+ << "Error Message:"
+ << balancers_[2]->ads_service()->eds_response_state().error_message;
}
// The re-resolution tests are deferred because they rely on the fallback mode,
@@ -5428,20 +5428,112 @@ class ClientLoadReportingTest : public XdsEnd2endTest {
// Tests that the load report received at the balancer is correct.
TEST_P(ClientLoadReportingTest, Vanilla) {
- if (!GetParam().use_xds_resolver()) {
- balancers_[0]->lrs_service()->set_cluster_names({kServerName});
- }
+ if (!GetParam().use_xds_resolver()) {
+ balancers_[0]->lrs_service()->set_cluster_names({kServerName});
+ }
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
- const size_t kNumRpcsPerAddress = 10;
- const size_t kNumFailuresPerAddress = 3;
+ const size_t kNumRpcsPerAddress = 10;
+ const size_t kNumFailuresPerAddress = 3;
// TODO(juanlishen): Partition the backends after multiple localities is
// tested.
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ // Wait until all backends are ready.
+ int num_ok = 0;
+ int num_failure = 0;
+ int num_drops = 0;
+ std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
+ CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_,
+ RpcOptions().set_server_fail(true));
+ // Check that each backend got the right number of requests.
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress,
+ backends_[i]->backend_service()->request_count());
+ }
+ // The load report received at the balancer should be correct.
+ std::vector<ClientStats> load_report =
+ balancers_[0]->lrs_service()->WaitForLoadReport();
+ ASSERT_EQ(load_report.size(), 1UL);
+ ClientStats& client_stats = load_report.front();
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
+ client_stats.total_successful_requests());
+ EXPECT_EQ(0U, client_stats.total_requests_in_progress());
+ EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ +
+ num_ok + num_failure,
+ client_stats.total_issued_requests());
+ EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure,
+ client_stats.total_error_requests());
+ EXPECT_EQ(0U, client_stats.total_dropped_requests());
+ // The LRS service got a single request, and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
+ EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
+}
+
+// Tests send_all_clusters.
+TEST_P(ClientLoadReportingTest, SendAllClusters) {
+ balancers_[0]->lrs_service()->set_send_all_clusters(true);
+ SetNextResolution({});
+ SetNextResolutionForLbChannel({balancers_[0]->port()});
+ const size_t kNumRpcsPerAddress = 10;
+ const size_t kNumFailuresPerAddress = 3;
+ // TODO(juanlishen): Partition the backends after multiple localities is
+ // tested.
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ // Wait until all backends are ready.
+ int num_ok = 0;
+ int num_failure = 0;
+ int num_drops = 0;
+ std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
+ CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_,
+ RpcOptions().set_server_fail(true));
+ // Check that each backend got the right number of requests.
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress,
+ backends_[i]->backend_service()->request_count());
+ }
+ // The load report received at the balancer should be correct.
+ std::vector<ClientStats> load_report =
+ balancers_[0]->lrs_service()->WaitForLoadReport();
+ ASSERT_EQ(load_report.size(), 1UL);
+ ClientStats& client_stats = load_report.front();
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
+ client_stats.total_successful_requests());
+ EXPECT_EQ(0U, client_stats.total_requests_in_progress());
+ EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ +
+ num_ok + num_failure,
+ client_stats.total_issued_requests());
+ EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure,
+ client_stats.total_error_requests());
+ EXPECT_EQ(0U, client_stats.total_dropped_requests());
+ // The LRS service got a single request, and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
+ EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
+}
+
+// Tests that we don't include stats for clusters that are not requested
+// by the LRS server.
+TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) {
+ balancers_[0]->lrs_service()->set_cluster_names({"bogus"});
+ SetNextResolution({});
+ SetNextResolutionForLbChannel({balancers_[0]->port()});
+ const size_t kNumRpcsPerAddress = 100;
+ AdsServiceImpl::EdsResourceArgs args({
+ {"locality0", GetBackendPorts()},
+ });
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
@@ -5449,98 +5541,6 @@ TEST_P(ClientLoadReportingTest, Vanilla) {
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
- CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_,
- RpcOptions().set_server_fail(true));
- // Check that each backend got the right number of requests.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress,
- backends_[i]->backend_service()->request_count());
- }
- // The load report received at the balancer should be correct.
- std::vector<ClientStats> load_report =
- balancers_[0]->lrs_service()->WaitForLoadReport();
- ASSERT_EQ(load_report.size(), 1UL);
- ClientStats& client_stats = load_report.front();
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
- client_stats.total_successful_requests());
- EXPECT_EQ(0U, client_stats.total_requests_in_progress());
- EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ +
- num_ok + num_failure,
- client_stats.total_issued_requests());
- EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure,
- client_stats.total_error_requests());
- EXPECT_EQ(0U, client_stats.total_dropped_requests());
- // The LRS service got a single request, and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
- EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
-}
-
-// Tests send_all_clusters.
-TEST_P(ClientLoadReportingTest, SendAllClusters) {
- balancers_[0]->lrs_service()->set_send_all_clusters(true);
- SetNextResolution({});
- SetNextResolutionForLbChannel({balancers_[0]->port()});
- const size_t kNumRpcsPerAddress = 10;
- const size_t kNumFailuresPerAddress = 3;
- // TODO(juanlishen): Partition the backends after multiple localities is
- // tested.
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- // Wait until all backends are ready.
- int num_ok = 0;
- int num_failure = 0;
- int num_drops = 0;
- std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
- // Send kNumRpcsPerAddress RPCs per server.
- CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
- CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_,
- RpcOptions().set_server_fail(true));
- // Check that each backend got the right number of requests.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress,
- backends_[i]->backend_service()->request_count());
- }
- // The load report received at the balancer should be correct.
- std::vector<ClientStats> load_report =
- balancers_[0]->lrs_service()->WaitForLoadReport();
- ASSERT_EQ(load_report.size(), 1UL);
- ClientStats& client_stats = load_report.front();
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
- client_stats.total_successful_requests());
- EXPECT_EQ(0U, client_stats.total_requests_in_progress());
- EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ +
- num_ok + num_failure,
- client_stats.total_issued_requests());
- EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure,
- client_stats.total_error_requests());
- EXPECT_EQ(0U, client_stats.total_dropped_requests());
- // The LRS service got a single request, and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
- EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
-}
-
-// Tests that we don't include stats for clusters that are not requested
-// by the LRS server.
-TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) {
- balancers_[0]->lrs_service()->set_cluster_names({"bogus"});
- SetNextResolution({});
- SetNextResolutionForLbChannel({balancers_[0]->port()});
- const size_t kNumRpcsPerAddress = 100;
- AdsServiceImpl::EdsResourceArgs args({
- {"locality0", GetBackendPorts()},
- });
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
- // Wait until all backends are ready.
- int num_ok = 0;
- int num_failure = 0;
- int num_drops = 0;
- std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
- // Send kNumRpcsPerAddress RPCs per server.
- CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
@@ -5550,27 +5550,27 @@ TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) {
EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
// The load report received at the balancer should be correct.
- std::vector<ClientStats> load_report =
- balancers_[0]->lrs_service()->WaitForLoadReport();
- ASSERT_EQ(load_report.size(), 0UL);
+ std::vector<ClientStats> load_report =
+ balancers_[0]->lrs_service()->WaitForLoadReport();
+ ASSERT_EQ(load_report.size(), 0UL);
}
// Tests that if the balancer restarts, the client load report contains the
// stats before and after the restart correctly.
TEST_P(ClientLoadReportingTest, BalancerRestart) {
- if (!GetParam().use_xds_resolver()) {
- balancers_[0]->lrs_service()->set_cluster_names({kServerName});
- }
+ if (!GetParam().use_xds_resolver()) {
+ balancers_[0]->lrs_service()->set_cluster_names({kServerName});
+ }
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumBackendsFirstPass = backends_.size() / 2;
const size_t kNumBackendsSecondPass =
backends_.size() - kNumBackendsFirstPass;
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, kNumBackendsFirstPass)},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends returned by the balancer are ready.
int num_ok = 0;
int num_failure = 0;
@@ -5578,15 +5578,15 @@ TEST_P(ClientLoadReportingTest, BalancerRestart) {
std::tie(num_ok, num_failure, num_drops) =
WaitForAllBackends(/* start_index */ 0,
/* stop_index */ kNumBackendsFirstPass);
- std::vector<ClientStats> load_report =
- balancers_[0]->lrs_service()->WaitForLoadReport();
- ASSERT_EQ(load_report.size(), 1UL);
- ClientStats client_stats = std::move(load_report.front());
+ std::vector<ClientStats> load_report =
+ balancers_[0]->lrs_service()->WaitForLoadReport();
+ ASSERT_EQ(load_report.size(), 1UL);
+ ClientStats client_stats = std::move(load_report.front());
EXPECT_EQ(static_cast<size_t>(num_ok),
- client_stats.total_successful_requests());
- EXPECT_EQ(0U, client_stats.total_requests_in_progress());
- EXPECT_EQ(0U, client_stats.total_error_requests());
- EXPECT_EQ(0U, client_stats.total_dropped_requests());
+ client_stats.total_successful_requests());
+ EXPECT_EQ(0U, client_stats.total_requests_in_progress());
+ EXPECT_EQ(0U, client_stats.total_error_requests());
+ EXPECT_EQ(0U, client_stats.total_dropped_requests());
// Shut down the balancer.
balancers_[0]->Shutdown();
// We should continue using the last EDS response we received from the
@@ -5603,12 +5603,12 @@ TEST_P(ClientLoadReportingTest, BalancerRestart) {
int num_started = std::get<0>(WaitForAllBackends(
/* start_index */ 0, /* stop_index */ kNumBackendsFirstPass));
// Now restart the balancer, this time pointing to the new backends.
- balancers_[0]->Start();
- args = AdsServiceImpl::EdsResourceArgs({
+ balancers_[0]->Start();
+ args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(kNumBackendsFirstPass)},
});
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for queries to start going to one of the new backends.
// This tells us that we're now using the new serverlist.
std::tie(num_ok, num_failure, num_drops) =
@@ -5618,13 +5618,13 @@ TEST_P(ClientLoadReportingTest, BalancerRestart) {
CheckRpcSendOk(kNumBackendsSecondPass);
num_started += kNumBackendsSecondPass;
// Check client stats.
- load_report = balancers_[0]->lrs_service()->WaitForLoadReport();
- ASSERT_EQ(load_report.size(), 1UL);
- client_stats = std::move(load_report.front());
- EXPECT_EQ(num_started, client_stats.total_successful_requests());
- EXPECT_EQ(0U, client_stats.total_requests_in_progress());
- EXPECT_EQ(0U, client_stats.total_error_requests());
- EXPECT_EQ(0U, client_stats.total_dropped_requests());
+ load_report = balancers_[0]->lrs_service()->WaitForLoadReport();
+ ASSERT_EQ(load_report.size(), 1UL);
+ client_stats = std::move(load_report.front());
+ EXPECT_EQ(num_started, client_stats.total_successful_requests());
+ EXPECT_EQ(0U, client_stats.total_requests_in_progress());
+ EXPECT_EQ(0U, client_stats.total_error_requests());
+ EXPECT_EQ(0U, client_stats.total_dropped_requests());
}
class ClientLoadReportingWithDropTest : public XdsEnd2endTest {
@@ -5634,9 +5634,9 @@ class ClientLoadReportingWithDropTest : public XdsEnd2endTest {
// Tests that the drop stats are correctly reported by client load reporting.
TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
- if (!GetParam().use_xds_resolver()) {
- balancers_[0]->lrs_service()->set_cluster_names({kServerName});
- }
+ if (!GetParam().use_xds_resolver()) {
+ balancers_[0]->lrs_service()->set_cluster_names({kServerName});
+ }
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 3000;
@@ -5647,13 +5647,13 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
const double KDropRateForLbAndThrottle =
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle;
// The ADS response contains two drop categories.
- AdsServiceImpl::EdsResourceArgs args({
+ AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
- balancers_[0]->ads_service()->SetEdsResource(
- AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
+ balancers_[0]->ads_service()->SetEdsResource(
+ AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()));
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
@@ -5662,14 +5662,14 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
// Send kNumRpcs RPCs and count the drops.
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
- const Status status = SendRpc(RpcOptions(), &response);
+ const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage);
+ EXPECT_EQ(response.message(), kRequestMessage);
}
}
// The drop rate should be roughly equal to the expectation.
@@ -5681,24 +5681,24 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
::testing::Ge(KDropRateForLbAndThrottle * (1 - kErrorTolerance)),
::testing::Le(KDropRateForLbAndThrottle * (1 + kErrorTolerance))));
// Check client stats.
- const size_t total_rpc = num_warmup + kNumRpcs;
- ClientStats client_stats;
- do {
- std::vector<ClientStats> load_reports =
- balancers_[0]->lrs_service()->WaitForLoadReport();
- for (const auto& load_report : load_reports) {
- client_stats += load_report;
- }
- } while (client_stats.total_issued_requests() +
- client_stats.total_dropped_requests() <
- total_rpc);
- EXPECT_EQ(num_drops, client_stats.total_dropped_requests());
+ const size_t total_rpc = num_warmup + kNumRpcs;
+ ClientStats client_stats;
+ do {
+ std::vector<ClientStats> load_reports =
+ balancers_[0]->lrs_service()->WaitForLoadReport();
+ for (const auto& load_report : load_reports) {
+ client_stats += load_report;
+ }
+ } while (client_stats.total_issued_requests() +
+ client_stats.total_dropped_requests() <
+ total_rpc);
+ EXPECT_EQ(num_drops, client_stats.total_dropped_requests());
EXPECT_THAT(
- client_stats.dropped_requests(kLbDropType),
+ client_stats.dropped_requests(kLbDropType),
::testing::AllOf(
::testing::Ge(total_rpc * kDropRateForLb * (1 - kErrorTolerance)),
::testing::Le(total_rpc * kDropRateForLb * (1 + kErrorTolerance))));
- EXPECT_THAT(client_stats.dropped_requests(kThrottleDropType),
+ EXPECT_THAT(client_stats.dropped_requests(kThrottleDropType),
::testing::AllOf(
::testing::Ge(total_rpc * (1 - kDropRateForLb) *
kDropRateForThrottle * (1 - kErrorTolerance)),
@@ -5706,97 +5706,97 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
kDropRateForThrottle * (1 + kErrorTolerance))));
}
-TString TestTypeName(const ::testing::TestParamInfo<TestType>& info) {
+TString TestTypeName(const ::testing::TestParamInfo<TestType>& info) {
return info.param.AsString();
}
-// TestType params:
-// - use_xds_resolver
-// - enable_load_reporting
-// - enable_rds_testing = false
-// - use_v2 = false
-
+// TestType params:
+// - use_xds_resolver
+// - enable_load_reporting
+// - enable_rds_testing = false
+// - use_v2 = false
+
INSTANTIATE_TEST_SUITE_P(XdsTest, BasicTest,
::testing::Values(TestType(false, true),
TestType(false, false),
- TestType(true, false),
+ TestType(true, false),
TestType(true, true)),
&TestTypeName);
-// Run with both fake resolver and xds resolver.
-// Don't run with load reporting or v2 or RDS, since they are irrelevant to
-// the tests.
+// Run with both fake resolver and xds resolver.
+// Don't run with load reporting or v2 or RDS, since they are irrelevant to
+// the tests.
INSTANTIATE_TEST_SUITE_P(XdsTest, SecureNamingTest,
- ::testing::Values(TestType(false, false),
- TestType(true, false)),
+ ::testing::Values(TestType(false, false),
+ TestType(true, false)),
+ &TestTypeName);
+
+// LDS depends on XdsResolver.
+INSTANTIATE_TEST_SUITE_P(XdsTest, LdsTest,
+ ::testing::Values(TestType(true, false),
+ TestType(true, true)),
+ &TestTypeName);
+
+// LDS/RDS commmon tests depend on XdsResolver.
+INSTANTIATE_TEST_SUITE_P(XdsTest, LdsRdsTest,
+ ::testing::Values(TestType(true, false),
+ TestType(true, true),
+ TestType(true, false, true),
+ TestType(true, true, true),
+ // Also test with xDS v2.
+ TestType(true, true, true, true)),
+ &TestTypeName);
+
+// CDS depends on XdsResolver.
+INSTANTIATE_TEST_SUITE_P(XdsTest, CdsTest,
+ ::testing::Values(TestType(true, false),
+ TestType(true, true)),
+ &TestTypeName);
+
+// EDS could be tested with or without XdsResolver, but the tests would
+// be the same either way, so we test it only with XdsResolver.
+INSTANTIATE_TEST_SUITE_P(XdsTest, EdsTest,
+ ::testing::Values(TestType(true, false),
+ TestType(true, true)),
+ &TestTypeName);
+
+// Test initial resource timeouts for each resource type.
+// Do this only for XdsResolver with RDS enabled, so that we can test
+// all resource types.
+// Run with V3 only, since the functionality is no different in V2.
+INSTANTIATE_TEST_SUITE_P(XdsTest, TimeoutTest,
+ ::testing::Values(TestType(true, false, true)),
+ &TestTypeName);
+
+// XdsResolverOnlyTest depends on XdsResolver.
+INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverOnlyTest,
+ ::testing::Values(TestType(true, false),
+ TestType(true, true)),
+ &TestTypeName);
+
+// XdsResolverLoadReprtingOnlyTest depends on XdsResolver and load reporting.
+INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverLoadReportingOnlyTest,
+ ::testing::Values(TestType(true, true)),
&TestTypeName);
-// LDS depends on XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, LdsTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
-
-// LDS/RDS commmon tests depend on XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, LdsRdsTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true),
- TestType(true, false, true),
- TestType(true, true, true),
- // Also test with xDS v2.
- TestType(true, true, true, true)),
- &TestTypeName);
-
-// CDS depends on XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, CdsTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
-
-// EDS could be tested with or without XdsResolver, but the tests would
-// be the same either way, so we test it only with XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, EdsTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
-
-// Test initial resource timeouts for each resource type.
-// Do this only for XdsResolver with RDS enabled, so that we can test
-// all resource types.
-// Run with V3 only, since the functionality is no different in V2.
-INSTANTIATE_TEST_SUITE_P(XdsTest, TimeoutTest,
- ::testing::Values(TestType(true, false, true)),
- &TestTypeName);
-
-// XdsResolverOnlyTest depends on XdsResolver.
-INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverOnlyTest,
- ::testing::Values(TestType(true, false),
- TestType(true, true)),
- &TestTypeName);
-
-// XdsResolverLoadReprtingOnlyTest depends on XdsResolver and load reporting.
-INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverLoadReportingOnlyTest,
- ::testing::Values(TestType(true, true)),
- &TestTypeName);
-
INSTANTIATE_TEST_SUITE_P(XdsTest, LocalityMapTest,
::testing::Values(TestType(false, true),
TestType(false, false),
- TestType(true, false),
+ TestType(true, false),
TestType(true, true)),
&TestTypeName);
INSTANTIATE_TEST_SUITE_P(XdsTest, FailoverTest,
::testing::Values(TestType(false, true),
TestType(false, false),
- TestType(true, false),
+ TestType(true, false),
TestType(true, true)),
&TestTypeName);
INSTANTIATE_TEST_SUITE_P(XdsTest, DropTest,
::testing::Values(TestType(false, true),
TestType(false, false),
- TestType(true, false),
+ TestType(true, false),
TestType(true, true)),
&TestTypeName);
diff --git a/contrib/libs/grpc/test/cpp/end2end/ya.make b/contrib/libs/grpc/test/cpp/end2end/ya.make
index d297bbbb27..b9c1dc7fe0 100644
--- a/contrib/libs/grpc/test/cpp/end2end/ya.make
+++ b/contrib/libs/grpc/test/cpp/end2end/ya.make
@@ -2,10 +2,10 @@ LIBRARY()
LICENSE(Apache-2.0)
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+OWNER(dvshkurko)
-OWNER(dvshkurko)
-
PEERDIR(
contrib/libs/grpc/src/proto/grpc/health/v1
contrib/libs/grpc/src/proto/grpc/testing
@@ -16,10 +16,10 @@ PEERDIR(
contrib/restricted/googletest/googletest
)
-ADDINCL(
- ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
- contrib/libs/grpc
-)
+ADDINCL(
+ ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
+ contrib/libs/grpc
+)
NO_COMPILER_WARNINGS()