aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
diff options
context:
space:
mode:
authorarcadia-devtools <arcadia-devtools@yandex-team.ru>2022-03-15 21:33:41 +0300
committerarcadia-devtools <arcadia-devtools@yandex-team.ru>2022-03-15 21:33:41 +0300
commit3dd665b514943f69657b593eb51af90b99b1206b (patch)
tree0eb633e628bb1fe6c639574b1184d43def7c0a73 /contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
parenta68afc731202027f105bc5723ee11788017c29e2 (diff)
downloadydb-3dd665b514943f69657b593eb51af90b99b1206b.tar.gz
intermediate changes
ref:953ca886ec160075b38c0f3614de029b423f0a9e
Diffstat (limited to 'contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc')
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc213
1 files changed, 105 insertions, 108 deletions
diff --git a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
index 6208dc2535..8cd73aa9a9 100644
--- a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
@@ -24,6 +24,7 @@
#include <util/generic/string.h>
#include <thread>
+#include "y_absl/memory/memory.h"
#include "y_absl/strings/str_cat.h"
#include "y_absl/strings/str_format.h"
@@ -53,6 +54,7 @@
#include "src/cpp/server/secure_server_credentials.h"
#include "test/core/util/port.h"
+#include "test/core/util/resolve_localhost_ip46.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/test_service_impl.h"
@@ -184,6 +186,12 @@ TString Ip4ToPackedString(const char* ip_str) {
return TString(reinterpret_cast<const char*>(&ip4), sizeof(ip4));
}
+TString Ip6ToPackedString(const char* ip_str) {
+ struct in6_addr ip6;
+ GPR_ASSERT(inet_pton(AF_INET6, ip_str, &ip6) == 1);
+ return TString(reinterpret_cast<const char*>(&ip6), sizeof(ip6));
+}
+
struct ClientStats {
size_t num_calls_started = 0;
size_t num_calls_finished = 0;
@@ -266,7 +274,8 @@ class BalancerServiceImpl : public BalancerService {
}
{
grpc::internal::MutexLock lock(&mu_);
- serverlist_cond_.WaitUntil(&mu_, [this] { return serverlist_done_; });
+ grpc::internal::WaitUntil(&serverlist_cond_, &mu_,
+ [this] { return serverlist_done_; });
}
if (client_load_reporting_interval_seconds_ > 0) {
@@ -321,35 +330,13 @@ class BalancerServiceImpl : public BalancerService {
gpr_log(GPR_INFO, "LB[%p]: shut down", this);
}
- static LoadBalanceResponse BuildResponseForBackends(
- const std::vector<int>& backend_ports,
- const std::map<TString, size_t>& drop_token_counts) {
- LoadBalanceResponse response;
- for (const auto& drop_token_count : drop_token_counts) {
- for (size_t i = 0; i < drop_token_count.second; ++i) {
- auto* server = response.mutable_server_list()->add_servers();
- server->set_drop(true);
- server->set_load_balance_token(drop_token_count.first);
- }
- }
- for (const int& backend_port : backend_ports) {
- auto* server = response.mutable_server_list()->add_servers();
- server->set_ip_address(Ip4ToPackedString("127.0.0.1"));
- server->set_port(backend_port);
- static int token_count = 0;
- server->set_load_balance_token(
- y_absl::StrFormat("token%03d", ++token_count));
- }
- return response;
- }
-
ClientStats WaitForLoadReport() {
grpc::internal::MutexLock lock(&mu_);
grpc::internal::CondVar cv;
if (load_report_queue_.empty()) {
load_report_cond_ = &cv;
- load_report_cond_->WaitUntil(
- &mu_, [this] { return !load_report_queue_.empty(); });
+ grpc::internal::WaitUntil(load_report_cond_, &mu_,
+ [this] { return !load_report_queue_.empty(); });
load_report_cond_ = nullptr;
}
ClientStats load_report = std::move(load_report_queue_.front());
@@ -361,7 +348,7 @@ class BalancerServiceImpl : public BalancerService {
grpc::internal::MutexLock lock(&mu_);
if (!serverlist_done_) {
serverlist_done_ = true;
- serverlist_cond_.Broadcast();
+ serverlist_cond_.SignalAll();
}
}
@@ -418,6 +405,11 @@ class GrpclbEnd2endTest : public ::testing::Test {
static void TearDownTestCase() { grpc_shutdown(); }
void SetUp() override {
+ bool localhost_resolves_to_ipv4 = false;
+ bool localhost_resolves_to_ipv6 = false;
+ grpc_core::LocalhostResolves(&localhost_resolves_to_ipv4,
+ &localhost_resolves_to_ipv6);
+ ipv6_only_ = !localhost_resolves_to_ipv4 && localhost_resolves_to_ipv6;
response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
// Start the backends.
@@ -546,26 +538,26 @@ class GrpclbEnd2endTest : public ::testing::Test {
TString balancer_name;
};
- static grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList(
+ grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList(
const std::vector<AddressData>& address_data) {
grpc_core::ServerAddressList addresses;
for (const auto& addr : address_data) {
- TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", addr.port);
- grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
- GPR_ASSERT(lb_uri != nullptr);
+ y_absl::StatusOr<grpc_core::URI> lb_uri =
+ grpc_core::URI::Parse(y_absl::StrCat(
+ ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", addr.port));
+ GPR_ASSERT(lb_uri.ok());
grpc_resolved_address address;
- GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
+ GPR_ASSERT(grpc_parse_uri(*lb_uri, &address));
grpc_arg arg = grpc_core::CreateAuthorityOverrideChannelArg(
addr.balancer_name.c_str());
grpc_channel_args* args =
grpc_channel_args_copy_and_add(nullptr, &arg, 1);
addresses.emplace_back(address.addr, address.len, args);
- grpc_uri_destroy(lb_uri);
}
return addresses;
}
- static grpc_core::Resolver::Result MakeResolverResult(
+ grpc_core::Resolver::Result MakeResolverResult(
const std::vector<AddressData>& balancer_address_data,
const std::vector<AddressData>& backend_address_data = {},
const char* service_config_json = kDefaultServiceConfig) {
@@ -612,8 +604,8 @@ class GrpclbEnd2endTest : public ::testing::Test {
response_generator_->SetReresolutionResponse(std::move(result));
}
- const std::vector<int> GetBackendPorts(size_t start_index = 0,
- size_t stop_index = 0) const {
+ std::vector<int> GetBackendPorts(size_t start_index = 0,
+ size_t stop_index = 0) const {
if (stop_index == 0) stop_index = backends_.size();
std::vector<int> backend_ports;
for (size_t i = start_index; i < stop_index; ++i) {
@@ -628,6 +620,29 @@ class GrpclbEnd2endTest : public ::testing::Test {
balancers_[i]->service_.add_response(response, delay_ms);
}
+ LoadBalanceResponse BuildResponseForBackends(
+ const std::vector<int>& backend_ports,
+ const std::map<TString, size_t>& drop_token_counts) {
+ LoadBalanceResponse response;
+ for (const auto& drop_token_count : drop_token_counts) {
+ for (size_t i = 0; i < drop_token_count.second; ++i) {
+ auto* server = response.mutable_server_list()->add_servers();
+ server->set_drop(true);
+ server->set_load_balance_token(drop_token_count.first);
+ }
+ }
+ for (const int& backend_port : backend_ports) {
+ auto* server = response.mutable_server_list()->add_servers();
+ server->set_ip_address(ipv6_only_ ? Ip6ToPackedString("::1")
+ : Ip4ToPackedString("127.0.0.1"));
+ server->set_port(backend_port);
+ static int token_count = 0;
+ server->set_load_balance_token(
+ y_absl::StrFormat("token%03d", ++token_count));
+ }
+ return response;
+ }
+
Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000,
bool wait_for_ready = false,
const Status& expected_status = Status::OK) {
@@ -682,8 +697,8 @@ class GrpclbEnd2endTest : public ::testing::Test {
// by ServerThread::Serve from firing before the wait below is hit.
grpc::internal::MutexLock lock(&mu);
grpc::internal::CondVar cond;
- thread_.reset(new std::thread(
- std::bind(&ServerThread::Serve, this, server_host, &mu, &cond)));
+ thread_ = y_absl::make_unique<std::thread>(
+ std::bind(&ServerThread::Serve, this, server_host, &mu, &cond));
cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
}
@@ -726,6 +741,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
const size_t num_backends_;
const size_t num_balancers_;
const int client_load_reporting_interval_seconds_;
+ bool ipv6_only_ = false;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::vector<std::unique_ptr<ServerThread<BackendServiceImpl>>> backends_;
@@ -745,8 +761,7 @@ TEST_F(SingleBalancerTest, Vanilla) {
SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -771,8 +786,7 @@ TEST_F(SingleBalancerTest, Vanilla) {
TEST_F(SingleBalancerTest, ReturnServerStatus) {
SetNextResolutionAllBalancers();
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// We need to wait for all backends to come online.
WaitForAllBackends();
// Send a request that the backend will fail, and make sure we get
@@ -793,8 +807,7 @@ TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) {
" ]\n"
"}");
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
CheckRpcSendOk(1, 1000 /* timeout_ms */, true /* wait_for_ready */);
balancers_[0]->service_.NotifyDoneWithServerlists();
// The balancer got a single request.
@@ -841,8 +854,7 @@ TEST_F(SingleBalancerTest, UsePickFirstChildPolicy) {
" ]\n"
"}");
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
const size_t kNumRpcs = num_backends_ * 2;
CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
balancers_[0]->service_.NotifyDoneWithServerlists();
@@ -872,8 +884,7 @@ TEST_F(SingleBalancerTest, SwapChildPolicy) {
" ]\n"
"}");
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
const size_t kNumRpcs = num_backends_ * 2;
CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
// Check that all requests went to the first backend. This verifies
@@ -908,8 +919,7 @@ TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) {
ports.push_back(backends_[0]->port_);
ports.push_back(backends_[0]->port_);
const size_t kNumRpcsPerAddress = 10;
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(ports, {}), 0);
// We need to wait for the backend to come online.
WaitForBackend(0);
// Send kNumRpcsPerAddress RPCs per server.
@@ -927,8 +937,7 @@ TEST_F(SingleBalancerTest, SecureNaming) {
SetNextResolution({AddressData{balancers_[0]->port_, "lb"}});
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -970,8 +979,7 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
ScheduleResponseForBalancer(0, LoadBalanceResponse(), 0);
// Send non-empty serverlist only after kServerlistDelayMs
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- kServerlistDelayMs);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), kServerlistDelayMs);
const auto t0 = system_clock::now();
// Client will block: LB will initially send empty serverlist.
CheckRpcSendOk(1, kCallDeadlineMs, true /* wait_for_ready */);
@@ -997,8 +1005,7 @@ TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) {
for (size_t i = 0; i < kNumUnreachableServers; ++i) {
ports.push_back(grpc_pick_unused_port_or_die());
}
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(ports, {}), 0);
const Status status = SendRpc();
// The error shouldn't be DEADLINE_EXCEEDED.
EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
@@ -1027,7 +1034,7 @@ TEST_F(SingleBalancerTest, Fallback) {
// Send non-empty serverlist only after kServerlistDelayMs.
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
GetBackendPorts(kNumBackendsInResolution /* start_index */), {}),
kServerlistDelayMs);
@@ -1096,7 +1103,7 @@ TEST_F(SingleBalancerTest, FallbackUpdate) {
// Send non-empty serverlist only after kServerlistDelayMs.
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
GetBackendPorts(kNumBackendsInResolution +
kNumBackendsInResolutionUpdate /* start_index */),
{}),
@@ -1201,10 +1208,9 @@ TEST_F(SingleBalancerTest,
balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
}
SetNextResolution(balancer_addresses, backend_addresses);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
// Try to connect.
channel_->GetState(true /* try_to_connect */);
WaitForAllBackends(1 /* num_requests_multiple_of */,
@@ -1234,10 +1240,9 @@ TEST_F(SingleBalancerTest,
// Now start the balancer again. This should cause us to exit
// fallback mode.
balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
WaitForAllBackends(1 /* num_requests_multiple_of */,
kNumFallbackBackends /* start_index */);
}
@@ -1256,10 +1261,9 @@ TEST_F(SingleBalancerTest,
balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
}
SetNextResolution(balancer_addresses, backend_addresses);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
// Try to connect.
channel_->GetState(true /* try_to_connect */);
WaitForAllBackends(1 /* num_requests_multiple_of */,
@@ -1287,10 +1291,9 @@ TEST_F(SingleBalancerTest,
// Now start the balancer again. This should cause us to exit
// fallback mode.
balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
WaitForAllBackends(1 /* num_requests_multiple_of */,
kNumFallbackBackends /* start_index */);
}
@@ -1358,7 +1361,7 @@ TEST_F(SingleBalancerTest, FallbackControlledByBalancer_AfterFirstServerlist) {
// then sends the serverlist again.
// The serverlist points to backend 1.
LoadBalanceResponse serverlist_resp =
- BalancerServiceImpl::BuildResponseForBackends({backends_[1]->port_}, {});
+ BuildResponseForBackends({backends_[1]->port_}, {});
LoadBalanceResponse fallback_resp;
fallback_resp.mutable_fallback_response();
ScheduleResponseForBalancer(0, serverlist_resp, 0);
@@ -1375,8 +1378,7 @@ TEST_F(SingleBalancerTest, BackendsRestart) {
SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// Send kNumRpcsPerAddress RPCs per server.
@@ -1406,8 +1408,7 @@ TEST_F(SingleBalancerTest, ServiceNameFromLbPolicyConfig) {
SetNextResolutionAllBalancers(kServiceConfigWithTarget);
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
@@ -1424,10 +1425,10 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
SetNextResolutionAllBalancers();
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(first_backend, {}),
+ 0);
+ ScheduleResponseForBalancer(1, BuildResponseForBackends(second_backend, {}),
+ 0);
// Wait until the first backend is ready.
WaitForBackend(0);
@@ -1482,10 +1483,10 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[0]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(first_backend, {}),
+ 0);
+ ScheduleResponseForBalancer(1, BuildResponseForBackends(second_backend, {}),
+ 0);
// Wait until the first backend is ready.
WaitForBackend(0);
@@ -1555,10 +1556,10 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(first_backend, {}),
+ 0);
+ ScheduleResponseForBalancer(1, BuildResponseForBackends(second_backend, {}),
+ 0);
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
@@ -1688,10 +1689,10 @@ class UpdatesWithClientLoadReportingTest : public GrpclbEnd2endTest {
TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) {
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+ ScheduleResponseForBalancer(0, BuildResponseForBackends(first_backend, {}),
+ 0);
+ ScheduleResponseForBalancer(1, BuildResponseForBackends(second_backend, {}),
+ 0);
// Ask channel to connect to trigger resolver creation.
channel_->GetState(true);
@@ -1767,7 +1768,7 @@ TEST_F(SingleBalancerTest, Drop) {
const int num_total_addresses = num_backends_ + num_of_drop_addresses;
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
GetBackendPorts(),
{{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
{"load_balancing", num_of_drop_by_load_balancing_addresses}}),
@@ -1806,7 +1807,7 @@ TEST_F(SingleBalancerTest, DropAllFirst) {
const int num_of_drop_by_load_balancing_addresses = 1;
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
{}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
{"load_balancing", num_of_drop_by_load_balancing_addresses}}),
0);
@@ -1818,13 +1819,12 @@ TEST_F(SingleBalancerTest, DropAllFirst) {
TEST_F(SingleBalancerTest, DropAll) {
SetNextResolutionAllBalancers();
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
const int num_of_drop_by_rate_limiting_addresses = 1;
const int num_of_drop_by_load_balancing_addresses = 1;
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
{}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
{"load_balancing", num_of_drop_by_load_balancing_addresses}}),
1000);
@@ -1850,8 +1850,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
+ 0, BuildResponseForBackends(GetBackendPorts(), {}), 0);
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
@@ -1892,8 +1891,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
// Balancer returns backends starting at index 1.
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(0, kNumBackendsFirstPass), {}),
+ BuildResponseForBackends(GetBackendPorts(0, kNumBackendsFirstPass), {}),
0);
// Wait until all backends returned by the balancer are ready.
int num_ok = 0;
@@ -1922,10 +1920,9 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
}
// Now restart the balancer, this time pointing to all backends.
balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumBackendsFirstPass), {}),
- 0);
+ ScheduleResponseForBalancer(
+ 0, BuildResponseForBackends(GetBackendPorts(kNumBackendsFirstPass), {}),
+ 0);
// Wait for queries to start going to one of the new backends.
// This tells us that we're now using the new serverlist.
do {
@@ -1955,7 +1952,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
const int num_total_addresses = num_backends_ + num_of_drop_addresses;
ScheduleResponseForBalancer(
0,
- BalancerServiceImpl::BuildResponseForBackends(
+ BuildResponseForBackends(
GetBackendPorts(),
{{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
{"load_balancing", num_of_drop_by_load_balancing_addresses}}),