diff options
author | heretic <heretic@yandex-team.ru> | 2022-02-10 16:45:46 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:45:46 +0300 |
commit | 81eddc8c0b55990194e112b02d127b87d54164a9 (patch) | |
tree | 9142afc54d335ea52910662635b898e79e192e49 /contrib/libs/grpc/test | |
parent | 397cbe258b9e064f49c4ca575279f02f39fef76e (diff) | |
download | ydb-81eddc8c0b55990194e112b02d127b87d54164a9.tar.gz |
Restoring authorship annotation for <heretic@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/grpc/test')
105 files changed, 8805 insertions, 8805 deletions
diff --git a/contrib/libs/grpc/test/core/util/.yandex_meta/licenses.list.txt b/contrib/libs/grpc/test/core/util/.yandex_meta/licenses.list.txt index 6e3a10d022..8082dbe9c6 100644 --- a/contrib/libs/grpc/test/core/util/.yandex_meta/licenses.list.txt +++ b/contrib/libs/grpc/test/core/util/.yandex_meta/licenses.list.txt @@ -1,68 +1,68 @@ -====================Apache-2.0==================== - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - - -====================Apache-2.0==================== -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -====================Apache-2.0==================== -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - - -====================COPYRIGHT==================== - * Copyright 2015 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2015-2016 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2016 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2017 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2018 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2020 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2020 the gRPC authors. +====================Apache-2.0==================== + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + +====================Apache-2.0==================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +====================Apache-2.0==================== +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +====================COPYRIGHT==================== + * Copyright 2015 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2015-2016 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2016 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2017 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2018 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2020 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2020 the gRPC authors. diff --git a/contrib/libs/grpc/test/core/util/cmdline.cc b/contrib/libs/grpc/test/core/util/cmdline.cc index 0fd090e8e4..62b47f927a 100644 --- a/contrib/libs/grpc/test/core/util/cmdline.cc +++ b/contrib/libs/grpc/test/core/util/cmdline.cc @@ -22,12 +22,12 @@ #include <stdio.h> #include <string.h> -#include <vector> - -#include "y_absl/strings/str_cat.h" -#include "y_absl/strings/str_format.h" -#include "y_absl/strings/str_join.h" - +#include <vector> + +#include "y_absl/strings/str_cat.h" +#include "y_absl/strings/str_format.h" +#include "y_absl/strings/str_join.h" + #include <grpc/support/alloc.h> #include <grpc/support/log.h> #include <grpc/support/string_util.h> @@ -130,42 +130,42 @@ void gpr_cmdline_on_extra_arg( /* recursively descend argument list, adding the last element to s first - so that arguments are added in the order they were added to the list by api calls */ -static void add_args_to_usage(arg* a, std::vector<TString>* s) { - if (a == nullptr) return; - add_args_to_usage(a->next, s); +static void add_args_to_usage(arg* a, std::vector<TString>* s) { + if (a == nullptr) return; + add_args_to_usage(a->next, s); switch (a->type) { case ARGTYPE_BOOL: - s->push_back(y_absl::StrFormat(" [--%s|--no-%s]", a->name, a->name)); + s->push_back(y_absl::StrFormat(" [--%s|--no-%s]", a->name, a->name)); break; case ARGTYPE_STRING: - s->push_back(y_absl::StrFormat(" [--%s=string]", a->name)); + s->push_back(y_absl::StrFormat(" [--%s=string]", a->name)); break; case ARGTYPE_INT: - s->push_back(y_absl::StrFormat(" [--%s=int]", a->name)); + s->push_back(y_absl::StrFormat(" [--%s=int]", a->name)); break; } } -TString gpr_cmdline_usage_string(gpr_cmdline* cl, const char* argv0) { +TString gpr_cmdline_usage_string(gpr_cmdline* cl, const char* argv0) { const char* name = strrchr(argv0, '/'); - if (name != nullptr) { + if (name != nullptr) { name++; } else { name = argv0; } - std::vector<TString> s; - s.push_back(y_absl::StrCat("Usage: ", name)); - add_args_to_usage(cl->args, &s); + std::vector<TString> s; + s.push_back(y_absl::StrCat("Usage: ", name)); + add_args_to_usage(cl->args, &s); if (cl->extra_arg) { - s.push_back(y_absl::StrFormat(" [%s...]", cl->extra_arg_name)); + s.push_back(y_absl::StrFormat(" [%s...]", cl->extra_arg_name)); } - s.push_back("\n"); - return y_absl::StrJoin(s, ""); + s.push_back("\n"); + return y_absl::StrJoin(s, ""); } static int print_usage_and_die(gpr_cmdline* cl) { - fprintf(stderr, "%s", gpr_cmdline_usage_string(cl, cl->argv0).c_str()); + fprintf(stderr, "%s", gpr_cmdline_usage_string(cl, cl->argv0).c_str()); if (!cl->survive_failure) { exit(1); } diff --git a/contrib/libs/grpc/test/core/util/cmdline.h b/contrib/libs/grpc/test/core/util/cmdline.h index e808822485..cc75a8974e 100644 --- a/contrib/libs/grpc/test/core/util/cmdline.h +++ b/contrib/libs/grpc/test/core/util/cmdline.h @@ -21,8 +21,8 @@ #include <grpc/support/port_platform.h> -#include <util/generic/string.h> - +#include <util/generic/string.h> + /** Simple command line parser. Supports flags that can be specified as -foo, --foo, --no-foo, -no-foo, etc @@ -77,6 +77,6 @@ int gpr_cmdline_parse(gpr_cmdline* cl, int argc, char** argv); /** Destroy the parser */ void gpr_cmdline_destroy(gpr_cmdline* cl); /** Get a string describing usage */ -TString gpr_cmdline_usage_string(gpr_cmdline* cl, const char* argv0); +TString gpr_cmdline_usage_string(gpr_cmdline* cl, const char* argv0); #endif /* GRPC_TEST_CORE_UTIL_CMDLINE_H */ diff --git a/contrib/libs/grpc/test/core/util/cmdline_test.cc b/contrib/libs/grpc/test/core/util/cmdline_test.cc index 60d2354a0c..b1b7da6b17 100644 --- a/contrib/libs/grpc/test/core/util/cmdline_test.cc +++ b/contrib/libs/grpc/test/core/util/cmdline_test.cc @@ -321,15 +321,15 @@ static void test_usage(void) { gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb, nullptr); - TString usage = gpr_cmdline_usage_string(cl, "test"); - GPR_ASSERT(usage == - "Usage: test [--str=string] [--x=int] " - "[--flag|--no-flag] [file...]\n"); + TString usage = gpr_cmdline_usage_string(cl, "test"); + GPR_ASSERT(usage == + "Usage: test [--str=string] [--x=int] " + "[--flag|--no-flag] [file...]\n"); usage = gpr_cmdline_usage_string(cl, "/foo/test"); - GPR_ASSERT(usage == - "Usage: test [--str=string] [--x=int] " - "[--flag|--no-flag] [file...]\n"); + GPR_ASSERT(usage == + "Usage: test [--str=string] [--x=int] " + "[--flag|--no-flag] [file...]\n"); gpr_cmdline_destroy(cl); } diff --git a/contrib/libs/grpc/test/core/util/eval_args_mock_endpoint.cc b/contrib/libs/grpc/test/core/util/eval_args_mock_endpoint.cc index a61eee78b4..00d4056ba5 100644 --- a/contrib/libs/grpc/test/core/util/eval_args_mock_endpoint.cc +++ b/contrib/libs/grpc/test/core/util/eval_args_mock_endpoint.cc @@ -1,118 +1,118 @@ -// Copyright 2020 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include <grpc/support/port_platform.h> - -#include "test/core/util/eval_args_mock_endpoint.h" - -#include <inttypes.h> - -#include <util/generic/string.h> - -#include "y_absl/strings/str_format.h" - -#include <grpc/support/alloc.h> -#include <grpc/support/string_util.h> -#include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" - -namespace grpc_core { - -class EvalArgsMockEndpoint { - public: - EvalArgsMockEndpoint(y_absl::string_view local_uri, y_absl::string_view peer_uri) - : local_address_(local_uri), peer_(peer_uri) { - base_.vtable = &vtable_; - } - grpc_endpoint* base() const { return const_cast<grpc_endpoint*>(&base_); } - static void Read(grpc_endpoint* ep, grpc_slice_buffer* slices, - grpc_closure* cb, bool unused) {} - static void Write(grpc_endpoint* ep, grpc_slice_buffer* slices, - grpc_closure* cb, void* unused) {} - static void AddToPollset(grpc_endpoint* ep, grpc_pollset* unused) {} - static void AddToPollsetSet(grpc_endpoint* ep, grpc_pollset_set* unused) {} - static void DeleteFromPollsetSet(grpc_endpoint* ep, - grpc_pollset_set* unused) {} - static void Shutdown(grpc_endpoint* ep, grpc_error* why) {} - static void Destroy(grpc_endpoint* ep) { - EvalArgsMockEndpoint* m = reinterpret_cast<EvalArgsMockEndpoint*>(ep); - delete m; - } - - static y_absl::string_view GetPeer(grpc_endpoint* ep) { - EvalArgsMockEndpoint* m = reinterpret_cast<EvalArgsMockEndpoint*>(ep); - return m->peer_; - } - - static y_absl::string_view GetLocalAddress(grpc_endpoint* ep) { - EvalArgsMockEndpoint* m = reinterpret_cast<EvalArgsMockEndpoint*>(ep); - return m->local_address_; - } - - static grpc_resource_user* GetResourceUser(grpc_endpoint* ep) { - return nullptr; - } - - static int GetFd(grpc_endpoint* unused) { return -1; } - static bool CanTrackErr(grpc_endpoint* unused) { return false; } - - private: - static constexpr grpc_endpoint_vtable vtable_ = { - EvalArgsMockEndpoint::Read, - EvalArgsMockEndpoint::Write, - EvalArgsMockEndpoint::AddToPollset, - EvalArgsMockEndpoint::AddToPollsetSet, - EvalArgsMockEndpoint::DeleteFromPollsetSet, - EvalArgsMockEndpoint::Shutdown, - EvalArgsMockEndpoint::Destroy, - EvalArgsMockEndpoint::GetResourceUser, - EvalArgsMockEndpoint::GetPeer, - EvalArgsMockEndpoint::GetLocalAddress, - EvalArgsMockEndpoint::GetFd, - EvalArgsMockEndpoint::CanTrackErr}; - grpc_endpoint base_; - TString local_address_; - TString peer_; -}; - -constexpr grpc_endpoint_vtable EvalArgsMockEndpoint::vtable_; - -namespace { - -TString NameAndPortToURI(const char* addr, const int port) { - grpc_sockaddr_in address; - memset(&address, 0, sizeof(address)); - address.sin_family = AF_INET; - address.sin_port = htons(port); - inet_pton(AF_INET, addr, &address.sin_addr); - grpc_resolved_address resolved; - memset(&resolved, 0, sizeof(resolved)); - memcpy(resolved.addr, &address, sizeof(address)); - resolved.len = sizeof(address); - return grpc_sockaddr_to_uri(&resolved); -} - -} // namespace - -grpc_endpoint* CreateEvalArgsMockEndpoint(const char* local_address, - const int local_port, - const char* peer_address, - const int peer_port) { - EvalArgsMockEndpoint* m = - new EvalArgsMockEndpoint(NameAndPortToURI(local_address, local_port), - NameAndPortToURI(peer_address, peer_port)); - return m->base(); -} - -} // namespace grpc_core +// Copyright 2020 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <grpc/support/port_platform.h> + +#include "test/core/util/eval_args_mock_endpoint.h" + +#include <inttypes.h> + +#include <util/generic/string.h> + +#include "y_absl/strings/str_format.h" + +#include <grpc/support/alloc.h> +#include <grpc/support/string_util.h> +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" + +namespace grpc_core { + +class EvalArgsMockEndpoint { + public: + EvalArgsMockEndpoint(y_absl::string_view local_uri, y_absl::string_view peer_uri) + : local_address_(local_uri), peer_(peer_uri) { + base_.vtable = &vtable_; + } + grpc_endpoint* base() const { return const_cast<grpc_endpoint*>(&base_); } + static void Read(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb, bool unused) {} + static void Write(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb, void* unused) {} + static void AddToPollset(grpc_endpoint* ep, grpc_pollset* unused) {} + static void AddToPollsetSet(grpc_endpoint* ep, grpc_pollset_set* unused) {} + static void DeleteFromPollsetSet(grpc_endpoint* ep, + grpc_pollset_set* unused) {} + static void Shutdown(grpc_endpoint* ep, grpc_error* why) {} + static void Destroy(grpc_endpoint* ep) { + EvalArgsMockEndpoint* m = reinterpret_cast<EvalArgsMockEndpoint*>(ep); + delete m; + } + + static y_absl::string_view GetPeer(grpc_endpoint* ep) { + EvalArgsMockEndpoint* m = reinterpret_cast<EvalArgsMockEndpoint*>(ep); + return m->peer_; + } + + static y_absl::string_view GetLocalAddress(grpc_endpoint* ep) { + EvalArgsMockEndpoint* m = reinterpret_cast<EvalArgsMockEndpoint*>(ep); + return m->local_address_; + } + + static grpc_resource_user* GetResourceUser(grpc_endpoint* ep) { + return nullptr; + } + + static int GetFd(grpc_endpoint* unused) { return -1; } + static bool CanTrackErr(grpc_endpoint* unused) { return false; } + + private: + static constexpr grpc_endpoint_vtable vtable_ = { + EvalArgsMockEndpoint::Read, + EvalArgsMockEndpoint::Write, + EvalArgsMockEndpoint::AddToPollset, + EvalArgsMockEndpoint::AddToPollsetSet, + EvalArgsMockEndpoint::DeleteFromPollsetSet, + EvalArgsMockEndpoint::Shutdown, + EvalArgsMockEndpoint::Destroy, + EvalArgsMockEndpoint::GetResourceUser, + EvalArgsMockEndpoint::GetPeer, + EvalArgsMockEndpoint::GetLocalAddress, + EvalArgsMockEndpoint::GetFd, + EvalArgsMockEndpoint::CanTrackErr}; + grpc_endpoint base_; + TString local_address_; + TString peer_; +}; + +constexpr grpc_endpoint_vtable EvalArgsMockEndpoint::vtable_; + +namespace { + +TString NameAndPortToURI(const char* addr, const int port) { + grpc_sockaddr_in address; + memset(&address, 0, sizeof(address)); + address.sin_family = AF_INET; + address.sin_port = htons(port); + inet_pton(AF_INET, addr, &address.sin_addr); + grpc_resolved_address resolved; + memset(&resolved, 0, sizeof(resolved)); + memcpy(resolved.addr, &address, sizeof(address)); + resolved.len = sizeof(address); + return grpc_sockaddr_to_uri(&resolved); +} + +} // namespace + +grpc_endpoint* CreateEvalArgsMockEndpoint(const char* local_address, + const int local_port, + const char* peer_address, + const int peer_port) { + EvalArgsMockEndpoint* m = + new EvalArgsMockEndpoint(NameAndPortToURI(local_address, local_port), + NameAndPortToURI(peer_address, peer_port)); + return m->base(); +} + +} // namespace grpc_core diff --git a/contrib/libs/grpc/test/core/util/eval_args_mock_endpoint.h b/contrib/libs/grpc/test/core/util/eval_args_mock_endpoint.h index 8b4f13dc06..68b32cc891 100644 --- a/contrib/libs/grpc/test/core/util/eval_args_mock_endpoint.h +++ b/contrib/libs/grpc/test/core/util/eval_args_mock_endpoint.h @@ -1,31 +1,31 @@ -// Copyright 2020 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef GRPC_TEST_CORE_UTIL_EVAL_ARGS_MOCK_ENDPOINT_H -#define GRPC_TEST_CORE_UTIL_EVAL_ARGS_MOCK_ENDPOINT_H - -#include <grpc/support/port_platform.h> - -#include "src/core/lib/iomgr/endpoint.h" - -namespace grpc_core { - -grpc_endpoint* CreateEvalArgsMockEndpoint(const char* local_address, - const int local_port, - const char* peer_address, - const int peer_port); - -} // namespace grpc_core - -#endif // GRPC_TEST_CORE_UTIL_EVAL_ARGS_MOCK_ENDPOINT_H +// Copyright 2020 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef GRPC_TEST_CORE_UTIL_EVAL_ARGS_MOCK_ENDPOINT_H +#define GRPC_TEST_CORE_UTIL_EVAL_ARGS_MOCK_ENDPOINT_H + +#include <grpc/support/port_platform.h> + +#include "src/core/lib/iomgr/endpoint.h" + +namespace grpc_core { + +grpc_endpoint* CreateEvalArgsMockEndpoint(const char* local_address, + const int local_port, + const char* peer_address, + const int peer_port); + +} // namespace grpc_core + +#endif // GRPC_TEST_CORE_UTIL_EVAL_ARGS_MOCK_ENDPOINT_H diff --git a/contrib/libs/grpc/test/core/util/examine_stack.cc b/contrib/libs/grpc/test/core/util/examine_stack.cc index cc57735de2..4400a343b4 100644 --- a/contrib/libs/grpc/test/core/util/examine_stack.cc +++ b/contrib/libs/grpc/test/core/util/examine_stack.cc @@ -1,102 +1,102 @@ -/* - * - * Copyright 2020 the gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include <grpc/support/port_platform.h> - -#include "test/core/util/examine_stack.h" - -#include <cstdio> -#include <util/generic/string.h> - -#include "y_absl/debugging/stacktrace.h" -#include "y_absl/debugging/symbolize.h" - -namespace { - -static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*); - -static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*), - void* writerfn_arg, void* pc, - void* symbolize_pc, int framesize, - const char* const prefix) { - char tmp[1024]; - const char* symbol = "(unknown)"; - if (y_absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) { - symbol = tmp; - } - char buf[1024]; - if (framesize <= 0) { - snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix, - kPrintfPointerFieldWidth, pc, symbol); - } else { - snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix, - kPrintfPointerFieldWidth, pc, framesize, symbol); - } - writerfn(buf, writerfn_arg); -} - -static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*), - void* writerfn_arg, void* pc, int framesize, - const char* const prefix) { - char buf[100]; - if (framesize <= 0) { - snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix, - kPrintfPointerFieldWidth, pc); - } else { - snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix, - kPrintfPointerFieldWidth, pc, framesize); - } - writerfn(buf, writerfn_arg); -} - -static void DumpStackTrace(void* const stack[], int frame_sizes[], int depth, - bool symbolize_stacktrace, - void (*writerfn)(const char*, void*), - void* writerfn_arg) { - for (int i = 0; i < depth; i++) { - if (symbolize_stacktrace) { - DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i], - reinterpret_cast<char*>(stack[i]) - 1, - frame_sizes[i], " "); - } else { - DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i], - " "); - } - } -} - -static void DebugWriteToString(const char* data, void* str) { - reinterpret_cast<TString*>(str)->append(data); -} - -} // namespace - -namespace grpc_core { - -TString CurrentStackTrace() { - TString result = "Stack trace:\n"; - constexpr int kNumStackFrames = 32; - void* stack[kNumStackFrames]; - int frame_sizes[kNumStackFrames]; - int depth = y_absl::GetStackFrames(stack, frame_sizes, kNumStackFrames, 1); - DumpStackTrace(stack, frame_sizes, depth, true, DebugWriteToString, - (void*)&result); - return result; -} - -} // namespace grpc_core +/* + * + * Copyright 2020 the gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include <grpc/support/port_platform.h> + +#include "test/core/util/examine_stack.h" + +#include <cstdio> +#include <util/generic/string.h> + +#include "y_absl/debugging/stacktrace.h" +#include "y_absl/debugging/symbolize.h" + +namespace { + +static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*); + +static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*), + void* writerfn_arg, void* pc, + void* symbolize_pc, int framesize, + const char* const prefix) { + char tmp[1024]; + const char* symbol = "(unknown)"; + if (y_absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) { + symbol = tmp; + } + char buf[1024]; + if (framesize <= 0) { + snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix, + kPrintfPointerFieldWidth, pc, symbol); + } else { + snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix, + kPrintfPointerFieldWidth, pc, framesize, symbol); + } + writerfn(buf, writerfn_arg); +} + +static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*), + void* writerfn_arg, void* pc, int framesize, + const char* const prefix) { + char buf[100]; + if (framesize <= 0) { + snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix, + kPrintfPointerFieldWidth, pc); + } else { + snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix, + kPrintfPointerFieldWidth, pc, framesize); + } + writerfn(buf, writerfn_arg); +} + +static void DumpStackTrace(void* const stack[], int frame_sizes[], int depth, + bool symbolize_stacktrace, + void (*writerfn)(const char*, void*), + void* writerfn_arg) { + for (int i = 0; i < depth; i++) { + if (symbolize_stacktrace) { + DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i], + reinterpret_cast<char*>(stack[i]) - 1, + frame_sizes[i], " "); + } else { + DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i], + " "); + } + } +} + +static void DebugWriteToString(const char* data, void* str) { + reinterpret_cast<TString*>(str)->append(data); +} + +} // namespace + +namespace grpc_core { + +TString CurrentStackTrace() { + TString result = "Stack trace:\n"; + constexpr int kNumStackFrames = 32; + void* stack[kNumStackFrames]; + int frame_sizes[kNumStackFrames]; + int depth = y_absl::GetStackFrames(stack, frame_sizes, kNumStackFrames, 1); + DumpStackTrace(stack, frame_sizes, depth, true, DebugWriteToString, + (void*)&result); + return result; +} + +} // namespace grpc_core diff --git a/contrib/libs/grpc/test/core/util/examine_stack.h b/contrib/libs/grpc/test/core/util/examine_stack.h index 784365d13a..1eaf95792d 100644 --- a/contrib/libs/grpc/test/core/util/examine_stack.h +++ b/contrib/libs/grpc/test/core/util/examine_stack.h @@ -1,34 +1,34 @@ -/* - * - * Copyright 2020 the gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_TEST_CORE_UTIL_EXAMINE_STACK_H -#define GRPC_TEST_CORE_UTIL_EXAMINE_STACK_H - -#include <grpc/support/port_platform.h> - -#include <util/generic/string.h> - -namespace grpc_core { - -// Return the current stack trace as a string (on multiple lines, beginning with -// "Stack trace:\n") -TString CurrentStackTrace(); - -} // namespace grpc_core - -#endif /* GRPC_TEST_CORE_UTIL_EXAMINE_STACK_H */ +/* + * + * Copyright 2020 the gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_TEST_CORE_UTIL_EXAMINE_STACK_H +#define GRPC_TEST_CORE_UTIL_EXAMINE_STACK_H + +#include <grpc/support/port_platform.h> + +#include <util/generic/string.h> + +namespace grpc_core { + +// Return the current stack trace as a string (on multiple lines, beginning with +// "Stack trace:\n") +TString CurrentStackTrace(); + +} // namespace grpc_core + +#endif /* GRPC_TEST_CORE_UTIL_EXAMINE_STACK_H */ diff --git a/contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc b/contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc index f228d2d778..99ab45120d 100644 --- a/contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc +++ b/contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc @@ -26,8 +26,8 @@ #include <stdio.h> #include <sys/types.h> -#include <grpc/grpc.h> - +#include <grpc/grpc.h> + #include "src/core/lib/gpr/env.h" #include "src/core/lib/iomgr/load_file.h" #include "test/core/util/test_config.h" @@ -47,35 +47,35 @@ using namespace gflags; DEFINE_string(file, "", "Use this file as test data"); DEFINE_string(directory, "", "Use this directory as test data"); -class FuzzerCorpusTest : public ::testing::TestWithParam<TString> {}; +class FuzzerCorpusTest : public ::testing::TestWithParam<TString> {}; TEST_P(FuzzerCorpusTest, RunOneExample) { - // Need to call grpc_init() here to use a slice, but need to shut it - // down before calling LLVMFuzzerTestOneInput(), because most - // implementations of that function will initialize and shutdown gRPC - // internally. - grpc_init(); + // Need to call grpc_init() here to use a slice, but need to shut it + // down before calling LLVMFuzzerTestOneInput(), because most + // implementations of that function will initialize and shutdown gRPC + // internally. + grpc_init(); gpr_log(GPR_DEBUG, "Example file: %s", GetParam().c_str()); grpc_slice buffer; squelch = false; leak_check = false; GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", grpc_load_file(GetParam().c_str(), 0, &buffer))); - size_t length = GRPC_SLICE_LENGTH(buffer); - void* data = gpr_malloc(length); - memcpy(data, GPR_SLICE_START_PTR(buffer), length); + size_t length = GRPC_SLICE_LENGTH(buffer); + void* data = gpr_malloc(length); + memcpy(data, GPR_SLICE_START_PTR(buffer), length); grpc_slice_unref(buffer); - grpc_shutdown_blocking(); - LLVMFuzzerTestOneInput(static_cast<uint8_t*>(data), length); - gpr_free(data); + grpc_shutdown_blocking(); + LLVMFuzzerTestOneInput(static_cast<uint8_t*>(data), length); + gpr_free(data); } class ExampleGenerator - : public ::testing::internal::ParamGeneratorInterface<TString> { + : public ::testing::internal::ParamGeneratorInterface<TString> { public: - virtual ::testing::internal::ParamIteratorInterface<TString>* Begin() + virtual ::testing::internal::ParamIteratorInterface<TString>* Begin() const; - virtual ::testing::internal::ParamIteratorInterface<TString>* End() const; + virtual ::testing::internal::ParamIteratorInterface<TString>* End() const; private: void Materialize() const { @@ -83,21 +83,21 @@ class ExampleGenerator if (!FLAGS_file.empty()) examples_.push_back(FLAGS_file); if (!FLAGS_directory.empty()) { char* test_srcdir = gpr_getenv("TEST_SRCDIR"); - gpr_log(GPR_DEBUG, "test_srcdir=\"%s\"", test_srcdir); - TString directory = FLAGS_directory; + gpr_log(GPR_DEBUG, "test_srcdir=\"%s\"", test_srcdir); + TString directory = FLAGS_directory; if (test_srcdir != nullptr) { - directory = - test_srcdir + TString("/com_github_grpc_grpc/") + directory; + directory = + test_srcdir + TString("/com_github_grpc_grpc/") + directory; } - gpr_log(GPR_DEBUG, "Using corpus directory: %s", directory.c_str()); + gpr_log(GPR_DEBUG, "Using corpus directory: %s", directory.c_str()); DIR* dp; struct dirent* ep; - dp = opendir(directory.c_str()); + dp = opendir(directory.c_str()); if (dp != nullptr) { while ((ep = readdir(dp)) != nullptr) { - if (strcmp(ep->d_name, ".") != 0 && strcmp(ep->d_name, "..") != 0) { - examples_.push_back(directory + "/" + ep->d_name); + if (strcmp(ep->d_name, ".") != 0 && strcmp(ep->d_name, "..") != 0) { + examples_.push_back(directory + "/" + ep->d_name); } } @@ -109,28 +109,28 @@ class ExampleGenerator gpr_free(test_srcdir); } } - // Make sure we don't succeed without doing anything, which caused - // us to be blind to our fuzzers not running for 9 months. - GPR_ASSERT(!examples_.empty()); + // Make sure we don't succeed without doing anything, which caused + // us to be blind to our fuzzers not running for 9 months. + GPR_ASSERT(!examples_.empty()); } - mutable std::vector<TString> examples_; + mutable std::vector<TString> examples_; }; class ExampleIterator - : public ::testing::internal::ParamIteratorInterface<TString> { + : public ::testing::internal::ParamIteratorInterface<TString> { public: ExampleIterator(const ExampleGenerator& base_, - std::vector<TString>::const_iterator begin) + std::vector<TString>::const_iterator begin) : base_(base_), begin_(begin), current_(begin) {} virtual const ExampleGenerator* BaseGenerator() const { return &base_; } virtual void Advance() { current_++; } virtual ExampleIterator* Clone() const { return new ExampleIterator(*this); } - virtual const TString* Current() const { return &*current_; } + virtual const TString* Current() const { return &*current_; } - virtual bool Equals(const ParamIteratorInterface<TString>& other) const { + virtual bool Equals(const ParamIteratorInterface<TString>& other) const { return &base_ == other.BaseGenerator() && current_ == dynamic_cast<const ExampleIterator*>(&other)->current_; } @@ -140,17 +140,17 @@ class ExampleIterator : base_(other.base_), begin_(other.begin_), current_(other.current_) {} const ExampleGenerator& base_; - const std::vector<TString>::const_iterator begin_; - std::vector<TString>::const_iterator current_; + const std::vector<TString>::const_iterator begin_; + std::vector<TString>::const_iterator current_; }; -::testing::internal::ParamIteratorInterface<TString>* +::testing::internal::ParamIteratorInterface<TString>* ExampleGenerator::Begin() const { Materialize(); return new ExampleIterator(*this, examples_.begin()); } -::testing::internal::ParamIteratorInterface<TString>* +::testing::internal::ParamIteratorInterface<TString>* ExampleGenerator::End() const { Materialize(); return new ExampleIterator(*this, examples_.end()); @@ -158,11 +158,11 @@ ExampleGenerator::End() const { INSTANTIATE_TEST_SUITE_P( CorpusExamples, FuzzerCorpusTest, - ::testing::internal::ParamGenerator<TString>(new ExampleGenerator)); + ::testing::internal::ParamGenerator<TString>(new ExampleGenerator)); int main(int argc, char** argv) { grpc::testing::TestEnvironment env(argc, argv); - grpc::testing::InitTest(&argc, &argv, true); + grpc::testing::InitTest(&argc, &argv, true); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl b/contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl index df2a4aa22f..99594b29e1 100644 --- a/contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl +++ b/contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl @@ -14,12 +14,12 @@ load("//bazel:grpc_build_system.bzl", "grpc_cc_test") -def grpc_fuzzer(name, corpus, srcs = [], deps = [], data = [], size = "large", **kwargs): +def grpc_fuzzer(name, corpus, srcs = [], deps = [], data = [], size = "large", **kwargs): grpc_cc_test( name = name, srcs = srcs, deps = deps + ["//test/core/util:fuzzer_corpus_test"], - data = data + native.glob([corpus + "/**"]), + data = data + native.glob([corpus + "/**"]), external_deps = [ "gtest", ], diff --git a/contrib/libs/grpc/test/core/util/mock_endpoint.cc b/contrib/libs/grpc/test/core/util/mock_endpoint.cc index ad00b6baf5..3c2d6a3f8b 100644 --- a/contrib/libs/grpc/test/core/util/mock_endpoint.cc +++ b/contrib/libs/grpc/test/core/util/mock_endpoint.cc @@ -22,12 +22,12 @@ headers. Therefore, sockaddr.h must always be included first */ #include "src/core/lib/iomgr/sockaddr.h" -#include <inttypes.h> - -#include <util/generic/string.h> - -#include "y_absl/strings/str_format.h" - +#include <inttypes.h> + +#include <util/generic/string.h> + +#include "y_absl/strings/str_format.h" + #include "test/core/util/mock_endpoint.h" #include <grpc/support/alloc.h> @@ -98,14 +98,14 @@ static void me_destroy(grpc_endpoint* ep) { gpr_free(m); } -static y_absl::string_view me_get_peer(grpc_endpoint* /*ep*/) { - return "fake:mock_endpoint"; +static y_absl::string_view me_get_peer(grpc_endpoint* /*ep*/) { + return "fake:mock_endpoint"; +} + +static y_absl::string_view me_get_local_address(grpc_endpoint* /*ep*/) { + return "fake:mock_endpoint"; } -static y_absl::string_view me_get_local_address(grpc_endpoint* /*ep*/) { - return "fake:mock_endpoint"; -} - static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) { mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep); return m->resource_user; @@ -124,7 +124,7 @@ static const grpc_endpoint_vtable vtable = {me_read, me_destroy, me_get_resource_user, me_get_peer, - me_get_local_address, + me_get_local_address, me_get_fd, me_can_track_err}; @@ -132,8 +132,8 @@ grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice), grpc_resource_quota* resource_quota) { mock_endpoint* m = static_cast<mock_endpoint*>(gpr_malloc(sizeof(*m))); m->base.vtable = &vtable; - TString name = y_absl::StrFormat("mock_endpoint_%" PRIxPTR, (intptr_t)m); - m->resource_user = grpc_resource_user_create(resource_quota, name.c_str()); + TString name = y_absl::StrFormat("mock_endpoint_%" PRIxPTR, (intptr_t)m); + m->resource_user = grpc_resource_user_create(resource_quota, name.c_str()); grpc_slice_buffer_init(&m->read_buffer); gpr_mu_init(&m->mu); m->on_write = on_write; diff --git a/contrib/libs/grpc/test/core/util/passthru_endpoint.cc b/contrib/libs/grpc/test/core/util/passthru_endpoint.cc index d009195b0a..a5baafad02 100644 --- a/contrib/libs/grpc/test/core/util/passthru_endpoint.cc +++ b/contrib/libs/grpc/test/core/util/passthru_endpoint.cc @@ -27,10 +27,10 @@ #include <inttypes.h> #include <string.h> -#include <util/generic/string.h> - -#include "y_absl/strings/str_format.h" - +#include <util/generic/string.h> + +#include "y_absl/strings/str_format.h" + #include <grpc/support/alloc.h> #include <grpc/support/string_util.h> #include "src/core/lib/iomgr/sockaddr.h" @@ -152,20 +152,20 @@ static void me_destroy(grpc_endpoint* ep) { } } -static y_absl::string_view me_get_peer(grpc_endpoint* ep) { +static y_absl::string_view me_get_peer(grpc_endpoint* ep) { + passthru_endpoint* p = (reinterpret_cast<half*>(ep))->parent; + return (reinterpret_cast<half*>(ep)) == &p->client + ? "fake:mock_client_endpoint" + : "fake:mock_server_endpoint"; +} + +static y_absl::string_view me_get_local_address(grpc_endpoint* ep) { passthru_endpoint* p = (reinterpret_cast<half*>(ep))->parent; return (reinterpret_cast<half*>(ep)) == &p->client - ? "fake:mock_client_endpoint" - : "fake:mock_server_endpoint"; + ? "fake:mock_client_endpoint" + : "fake:mock_server_endpoint"; } -static y_absl::string_view me_get_local_address(grpc_endpoint* ep) { - passthru_endpoint* p = (reinterpret_cast<half*>(ep))->parent; - return (reinterpret_cast<half*>(ep)) == &p->client - ? "fake:mock_client_endpoint" - : "fake:mock_server_endpoint"; -} - static int me_get_fd(grpc_endpoint* /*ep*/) { return -1; } static bool me_can_track_err(grpc_endpoint* /*ep*/) { return false; } @@ -185,7 +185,7 @@ static const grpc_endpoint_vtable vtable = { me_destroy, me_get_resource_user, me_get_peer, - me_get_local_address, + me_get_local_address, me_get_fd, me_can_track_err, }; @@ -197,9 +197,9 @@ static void half_init(half* m, passthru_endpoint* parent, m->parent = parent; grpc_slice_buffer_init(&m->read_buffer); m->on_read = nullptr; - TString name = y_absl::StrFormat("passthru_endpoint_%s_%" PRIxPTR, - half_name, (intptr_t)parent); - m->resource_user = grpc_resource_user_create(resource_quota, name.c_str()); + TString name = y_absl::StrFormat("passthru_endpoint_%s_%" PRIxPTR, + half_name, (intptr_t)parent); + m->resource_user = grpc_resource_user_create(resource_quota, name.c_str()); } void grpc_passthru_endpoint_create(grpc_endpoint** client, diff --git a/contrib/libs/grpc/test/core/util/port.cc b/contrib/libs/grpc/test/core/util/port.cc index 41732fb381..5a34b6026f 100644 --- a/contrib/libs/grpc/test/core/util/port.cc +++ b/contrib/libs/grpc/test/core/util/port.cc @@ -38,14 +38,14 @@ static int* chosen_ports = nullptr; static size_t num_chosen_ports = 0; -static grpc_core::Mutex* g_default_port_picker_mu; -static gpr_once g_default_port_picker_init = GPR_ONCE_INIT; - -static void init_default_port_picker() { - g_default_port_picker_mu = new grpc_core::Mutex(); -} - -static int free_chosen_port_locked(int port) { +static grpc_core::Mutex* g_default_port_picker_mu; +static gpr_once g_default_port_picker_init = GPR_ONCE_INIT; + +static void init_default_port_picker() { + g_default_port_picker_mu = new grpc_core::Mutex(); +} + +static int free_chosen_port_locked(int port) { size_t i; int found = 0; size_t found_at = 0; @@ -67,7 +67,7 @@ static int free_chosen_port_locked(int port) { } static void free_chosen_ports(void) { - grpc_core::MutexLock lock(g_default_port_picker_mu); + grpc_core::MutexLock lock(g_default_port_picker_mu); size_t i; grpc_init(); for (i = 0; i < num_chosen_ports; i++) { @@ -77,7 +77,7 @@ static void free_chosen_ports(void) { gpr_free(chosen_ports); } -static void chose_port_locked(int port) { +static void chose_port_locked(int port) { if (chosen_ports == nullptr) { atexit(free_chosen_ports); } @@ -88,11 +88,11 @@ static void chose_port_locked(int port) { } static int grpc_pick_unused_port_impl(void) { - gpr_once_init(&g_default_port_picker_init, init_default_port_picker); - grpc_core::MutexLock lock(g_default_port_picker_mu); + gpr_once_init(&g_default_port_picker_init, init_default_port_picker); + grpc_core::MutexLock lock(g_default_port_picker_mu); int port = grpc_pick_port_using_server(); if (port != 0) { - chose_port_locked(port); + chose_port_locked(port); } return port; @@ -112,9 +112,9 @@ static int grpc_pick_unused_port_or_die_impl(void) { } static void grpc_recycle_unused_port_impl(int port) { - gpr_once_init(&g_default_port_picker_init, init_default_port_picker); - grpc_core::MutexLock lock(g_default_port_picker_mu); - GPR_ASSERT(free_chosen_port_locked(port)); + gpr_once_init(&g_default_port_picker_init, init_default_port_picker); + grpc_core::MutexLock lock(g_default_port_picker_mu); + GPR_ASSERT(free_chosen_port_locked(port)); } static grpc_pick_port_functions g_pick_port_functions = { diff --git a/contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc b/contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc index 82d6b825d1..5e043e7f97 100644 --- a/contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc +++ b/contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc @@ -43,7 +43,7 @@ static int get_random_port_offset() { static int s_initial_offset = get_random_port_offset(); static gpr_atm s_pick_counter = 0; -static int grpc_pick_unused_port_or_die_impl(void) { +static int grpc_pick_unused_port_or_die_impl(void) { int orig_counter_val = static_cast<int>(gpr_atm_full_fetch_add(&s_pick_counter, 1)); GPR_ASSERT(orig_counter_val < (MAX_PORT - MIN_PORT + 1)); @@ -51,19 +51,19 @@ static int grpc_pick_unused_port_or_die_impl(void) { (s_initial_offset + orig_counter_val) % (MAX_PORT - MIN_PORT + 1); } -int grpc_pick_unused_port_or_die(void) { - while (true) { - int port = grpc_pick_unused_port_or_die_impl(); - // 5985 cannot be bound on Windows RBE and results in - // WSA_ERROR 10013: "An attempt was made to access a socket in a way - // forbidden by its access permissions." - if (port == 5985) { - continue; - } - return port; - } -} - +int grpc_pick_unused_port_or_die(void) { + while (true) { + int port = grpc_pick_unused_port_or_die_impl(); + // 5985 cannot be bound on Windows RBE and results in + // WSA_ERROR 10013: "An attempt was made to access a socket in a way + // forbidden by its access permissions." + if (port == 5985) { + continue; + } + return port; + } +} + void grpc_recycle_unused_port(int port) { (void)port; } #endif /* GRPC_PORT_ISOLATED_RUNTIME */ diff --git a/contrib/libs/grpc/test/core/util/port_server_client.cc b/contrib/libs/grpc/test/core/util/port_server_client.cc index 9101131441..973068dcda 100644 --- a/contrib/libs/grpc/test/core/util/port_server_client.cc +++ b/contrib/libs/grpc/test/core/util/port_server_client.cc @@ -66,54 +66,54 @@ void grpc_free_port_using_server(int port) { grpc_closure* shutdown_closure; grpc_init(); - { - grpc_core::ExecCtx exec_ctx; + { + grpc_core::ExecCtx exec_ctx; - pr = {}; - memset(&req, 0, sizeof(req)); - rsp = {}; + pr = {}; + memset(&req, 0, sizeof(req)); + rsp = {}; - grpc_pollset* pollset = - static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size())); - grpc_pollset_init(pollset, &pr.mu); - pr.pops = grpc_polling_entity_create_from_pollset(pollset); - shutdown_closure = GRPC_CLOSURE_CREATE(destroy_pops_and_shutdown, &pr.pops, - grpc_schedule_on_exec_ctx); + grpc_pollset* pollset = + static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size())); + grpc_pollset_init(pollset, &pr.mu); + pr.pops = grpc_polling_entity_create_from_pollset(pollset); + shutdown_closure = GRPC_CLOSURE_CREATE(destroy_pops_and_shutdown, &pr.pops, + grpc_schedule_on_exec_ctx); - req.host = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS); - gpr_asprintf(&path, "/drop/%d", port); - req.http.path = path; + req.host = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS); + gpr_asprintf(&path, "/drop/%d", port); + req.http.path = path; - grpc_httpcli_context_init(&context); - grpc_resource_quota* resource_quota = - grpc_resource_quota_create("port_server_client/free"); - grpc_httpcli_get(&context, &pr.pops, resource_quota, &req, - grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC, - GRPC_CLOSURE_CREATE(freed_port_from_server, &pr, - grpc_schedule_on_exec_ctx), - &rsp); - grpc_resource_quota_unref_internal(resource_quota); - grpc_core::ExecCtx::Get()->Flush(); - gpr_mu_lock(pr.mu); - while (!pr.done) { - grpc_pollset_worker* worker = nullptr; - if (!GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work( - grpc_polling_entity_pollset(&pr.pops), &worker, - grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) { - pr.done = 1; - } + grpc_httpcli_context_init(&context); + grpc_resource_quota* resource_quota = + grpc_resource_quota_create("port_server_client/free"); + grpc_httpcli_get(&context, &pr.pops, resource_quota, &req, + grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC, + GRPC_CLOSURE_CREATE(freed_port_from_server, &pr, + grpc_schedule_on_exec_ctx), + &rsp); + grpc_resource_quota_unref_internal(resource_quota); + grpc_core::ExecCtx::Get()->Flush(); + gpr_mu_lock(pr.mu); + while (!pr.done) { + grpc_pollset_worker* worker = nullptr; + if (!GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work( + grpc_polling_entity_pollset(&pr.pops), &worker, + grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) { + pr.done = 1; + } } - gpr_mu_unlock(pr.mu); + gpr_mu_unlock(pr.mu); - grpc_httpcli_context_destroy(&context); - grpc_pollset_shutdown(grpc_polling_entity_pollset(&pr.pops), - shutdown_closure); + grpc_httpcli_context_destroy(&context); + grpc_pollset_shutdown(grpc_polling_entity_pollset(&pr.pops), + shutdown_closure); - gpr_free(path); - grpc_http_response_destroy(&rsp); - } + gpr_free(path); + grpc_http_response_destroy(&rsp); + } grpc_shutdown(); } diff --git a/contrib/libs/grpc/test/core/util/reconnect_server.cc b/contrib/libs/grpc/test/core/util/reconnect_server.cc index 0509ac26c7..070ab3fa0a 100644 --- a/contrib/libs/grpc/test/core/util/reconnect_server.cc +++ b/contrib/libs/grpc/test/core/util/reconnect_server.cc @@ -21,12 +21,12 @@ #include <grpc/grpc.h> #include <grpc/support/alloc.h> #include <grpc/support/log.h> -#include <grpc/support/string_util.h> +#include <grpc/support/string_util.h> #include <grpc/support/sync.h> #include <grpc/support/time.h> #include <string.h> -#include "y_absl/strings/string_view.h" +#include "y_absl/strings/string_view.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/tcp_server.h" @@ -61,8 +61,8 @@ static void on_connect(void* arg, grpc_endpoint* tcp, grpc_pollset* /*accepting_pollset*/, grpc_tcp_server_acceptor* acceptor) { gpr_free(acceptor); - y_absl::string_view peer; - int last_colon; + y_absl::string_view peer; + int last_colon; reconnect_server* server = static_cast<reconnect_server*>(arg); gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); timestamp_list* new_tail; @@ -70,16 +70,16 @@ static void on_connect(void* arg, grpc_endpoint* tcp, grpc_endpoint_shutdown(tcp, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Connected")); grpc_endpoint_destroy(tcp); - last_colon = peer.rfind(':'); - if (server->peer == nullptr) { - server->peer = new TString(peer); - } else { - if (last_colon == TString::npos) { - gpr_log(GPR_ERROR, "peer does not contain a ':'"); - } else if (peer.compare(0, static_cast<size_t>(last_colon), - *server->peer) != 0) { - gpr_log(GPR_ERROR, "mismatched peer! %s vs %s", server->peer->c_str(), - TString(peer).c_str()); + last_colon = peer.rfind(':'); + if (server->peer == nullptr) { + server->peer = new TString(peer); + } else { + if (last_colon == TString::npos) { + gpr_log(GPR_ERROR, "peer does not contain a ':'"); + } else if (peer.compare(0, static_cast<size_t>(last_colon), + *server->peer) != 0) { + gpr_log(GPR_ERROR, "mismatched peer! %s vs %s", server->peer->c_str(), + TString(peer).c_str()); } } new_tail = static_cast<timestamp_list*>(gpr_malloc(sizeof(timestamp_list))); @@ -119,7 +119,7 @@ void reconnect_server_clear_timestamps(reconnect_server* server) { server->head = new_head; } server->tail = nullptr; - delete server->peer; + delete server->peer; server->peer = nullptr; } diff --git a/contrib/libs/grpc/test/core/util/reconnect_server.h b/contrib/libs/grpc/test/core/util/reconnect_server.h index 4572bf3726..5fd03821f7 100644 --- a/contrib/libs/grpc/test/core/util/reconnect_server.h +++ b/contrib/libs/grpc/test/core/util/reconnect_server.h @@ -32,7 +32,7 @@ typedef struct reconnect_server { test_tcp_server tcp_server; timestamp_list* head; timestamp_list* tail; - TString* peer; + TString* peer; int max_reconnect_backoff_ms; } reconnect_server; diff --git a/contrib/libs/grpc/test/core/util/test_config.cc b/contrib/libs/grpc/test/core/util/test_config.cc index 5facb59ed0..9e57a486b2 100644 --- a/contrib/libs/grpc/test/core/util/test_config.cc +++ b/contrib/libs/grpc/test/core/util/test_config.cc @@ -18,7 +18,7 @@ #include "test/core/util/test_config.h" -#include <grpc/impl/codegen/gpr_types.h> +#include <grpc/impl/codegen/gpr_types.h> #include <inttypes.h> #include <signal.h> #include <stdbool.h> @@ -26,7 +26,7 @@ #include <stdlib.h> #include <string.h> -#include <grpc/grpc.h> +#include <grpc/grpc.h> #include <grpc/support/alloc.h> #include <grpc/support/log.h> @@ -34,9 +34,9 @@ #include "src/core/lib/gpr/useful.h" #include "src/core/lib/surface/init.h" -#include "y_absl/debugging/failure_signal_handler.h" -#include "y_absl/debugging/symbolize.h" - +#include "y_absl/debugging/failure_signal_handler.h" +#include "y_absl/debugging/symbolize.h" + int64_t g_fixture_slowdown_factor = 1; int64_t g_poller_slowdown_factor = 1; @@ -64,7 +64,7 @@ static unsigned seed(void) { return (unsigned)_getpid(); } #pragma comment(lib, "dbghelp.lib") #endif -static void print_stack_from_context(HANDLE thread, CONTEXT c) { +static void print_stack_from_context(HANDLE thread, CONTEXT c) { STACKFRAME s; // in/out stackframe memset(&s, 0, sizeof(s)); DWORD imageType; @@ -106,45 +106,45 @@ static void print_stack_from_context(HANDLE thread, CONTEXT c) { symbol->MaxNameLen = 255; symbol->SizeOfStruct = sizeof(SYMBOL_INFOW); - const unsigned short MAX_CALLERS_SHOWN = - 8192; // avoid flooding the stderr if stacktrace is way too long - for (int frame = 0; frame < MAX_CALLERS_SHOWN && - StackWalk(imageType, process, thread, &s, &c, 0, - SymFunctionTableAccess, SymGetModuleBase, 0); - frame++) { - PWSTR symbol_name = L"<<no symbol>>"; - DWORD64 symbol_address = 0; - if (SymFromAddrW(process, (DWORD64)(s.AddrPC.Offset), 0, symbol)) { - symbol_name = symbol->Name; - symbol_address = (DWORD64)symbol->Address; - } - - PWSTR file_name = L"<<no line info>>"; - int line_number = 0; - IMAGEHLP_LINE64 line; - line.SizeOfStruct = sizeof(IMAGEHLP_LINE64); - DWORD displacement = 0; - if (SymGetLineFromAddrW64(process, (DWORD64)(s.AddrPC.Offset), - &displacement, &line)) { - file_name = line.FileName; - line_number = (int)line.LineNumber; - } - - fwprintf(stderr, L"*** %d: %016I64X %ls - %016I64X (%ls:%d)\n", frame, - (DWORD64)(s.AddrPC.Offset), symbol_name, symbol_address, file_name, - line_number); + const unsigned short MAX_CALLERS_SHOWN = + 8192; // avoid flooding the stderr if stacktrace is way too long + for (int frame = 0; frame < MAX_CALLERS_SHOWN && + StackWalk(imageType, process, thread, &s, &c, 0, + SymFunctionTableAccess, SymGetModuleBase, 0); + frame++) { + PWSTR symbol_name = L"<<no symbol>>"; + DWORD64 symbol_address = 0; + if (SymFromAddrW(process, (DWORD64)(s.AddrPC.Offset), 0, symbol)) { + symbol_name = symbol->Name; + symbol_address = (DWORD64)symbol->Address; + } + + PWSTR file_name = L"<<no line info>>"; + int line_number = 0; + IMAGEHLP_LINE64 line; + line.SizeOfStruct = sizeof(IMAGEHLP_LINE64); + DWORD displacement = 0; + if (SymGetLineFromAddrW64(process, (DWORD64)(s.AddrPC.Offset), + &displacement, &line)) { + file_name = line.FileName; + line_number = (int)line.LineNumber; + } + + fwprintf(stderr, L"*** %d: %016I64X %ls - %016I64X (%ls:%d)\n", frame, + (DWORD64)(s.AddrPC.Offset), symbol_name, symbol_address, file_name, + line_number); fflush(stderr); } free(symbol); } -static void print_current_stack() { - CONTEXT context; - RtlCaptureContext(&context); - print_stack_from_context(GetCurrentThread(), context); -} - +static void print_current_stack() { + CONTEXT context; + RtlCaptureContext(&context); + print_stack_from_context(GetCurrentThread(), context); +} + static LONG crash_handler(struct _EXCEPTION_POINTERS* ex_info) { fprintf(stderr, "Exception handler called, dumping information\n"); bool try_to_print_stack = true; @@ -158,7 +158,7 @@ static LONG crash_handler(struct _EXCEPTION_POINTERS* ex_info) { exrec = exrec->ExceptionRecord; } if (try_to_print_stack) { - print_stack_from_context(GetCurrentThread(), *ex_info->ContextRecord); + print_stack_from_context(GetCurrentThread(), *ex_info->ContextRecord); } if (IsDebuggerPresent()) { __debugbreak(); @@ -371,17 +371,17 @@ gpr_timespec grpc_timeout_milliseconds_to_deadline(int64_t time_ms) { GPR_TIMESPAN)); } -void grpc_test_init(int argc, char** argv) { -#if GPR_WINDOWS - // Windows cannot use y_absl::InitializeSymbolizer until it fixes mysterious - // SymInitialize failure using Bazel RBE on Windows - // https://github.com/grpc/grpc/issues/24178 +void grpc_test_init(int argc, char** argv) { +#if GPR_WINDOWS + // Windows cannot use y_absl::InitializeSymbolizer until it fixes mysterious + // SymInitialize failure using Bazel RBE on Windows + // https://github.com/grpc/grpc/issues/24178 install_crash_handler(); -#else - y_absl::InitializeSymbolizer(argv[0]); - y_absl::FailureSignalHandlerOptions options; - y_absl::InstallFailureSignalHandler(options); -#endif +#else + y_absl::InitializeSymbolizer(argv[0]); + y_absl::FailureSignalHandlerOptions options; + y_absl::InstallFailureSignalHandler(options); +#endif gpr_log(GPR_DEBUG, "test slowdown factor: sanitizer=%" PRId64 ", fixture=%" PRId64 ", poller=%" PRId64 ", total=%" PRId64, @@ -392,19 +392,19 @@ void grpc_test_init(int argc, char** argv) { srand(seed()); } -bool grpc_wait_until_shutdown(int64_t time_s) { - gpr_timespec deadline = grpc_timeout_seconds_to_deadline(time_s); - while (grpc_is_initialized()) { - grpc_maybe_wait_for_async_shutdown(); - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(1, GPR_TIMESPAN))); - if (gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) > 0) { - return false; - } - } - return true; -} - +bool grpc_wait_until_shutdown(int64_t time_s) { + gpr_timespec deadline = grpc_timeout_seconds_to_deadline(time_s); + while (grpc_is_initialized()) { + grpc_maybe_wait_for_async_shutdown(); + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(1, GPR_TIMESPAN))); + if (gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) > 0) { + return false; + } + } + return true; +} + namespace grpc { namespace testing { @@ -412,32 +412,32 @@ TestEnvironment::TestEnvironment(int argc, char** argv) { grpc_test_init(argc, argv); } -TestEnvironment::~TestEnvironment() { - // This will wait until gRPC shutdown has actually happened to make sure - // no gRPC resources (such as thread) are active. (timeout = 10s) - if (!grpc_wait_until_shutdown(10)) { - gpr_log(GPR_ERROR, "Timeout in waiting for gRPC shutdown"); - } - if (BuiltUnderMsan()) { - // This is a workaround for MSAN. MSAN doesn't like having shutdown thread - // running. Although the code above waits until shutdown is done, chances - // are that thread itself is still alive. To workaround this problem, this - // is going to wait for 0.5 sec to give a chance to the shutdown thread to - // exit. https://github.com/grpc/grpc/issues/23695 - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(500, GPR_TIMESPAN))); - } - gpr_log(GPR_INFO, "TestEnvironment ends"); -} - -TestGrpcScope::TestGrpcScope() { grpc_init(); } - -TestGrpcScope::~TestGrpcScope() { - grpc_shutdown(); - if (!grpc_wait_until_shutdown(10)) { - gpr_log(GPR_ERROR, "Timeout in waiting for gRPC shutdown"); - } -} - +TestEnvironment::~TestEnvironment() { + // This will wait until gRPC shutdown has actually happened to make sure + // no gRPC resources (such as thread) are active. (timeout = 10s) + if (!grpc_wait_until_shutdown(10)) { + gpr_log(GPR_ERROR, "Timeout in waiting for gRPC shutdown"); + } + if (BuiltUnderMsan()) { + // This is a workaround for MSAN. MSAN doesn't like having shutdown thread + // running. Although the code above waits until shutdown is done, chances + // are that thread itself is still alive. To workaround this problem, this + // is going to wait for 0.5 sec to give a chance to the shutdown thread to + // exit. https://github.com/grpc/grpc/issues/23695 + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(500, GPR_TIMESPAN))); + } + gpr_log(GPR_INFO, "TestEnvironment ends"); +} + +TestGrpcScope::TestGrpcScope() { grpc_init(); } + +TestGrpcScope::~TestGrpcScope() { + grpc_shutdown(); + if (!grpc_wait_until_shutdown(10)) { + gpr_log(GPR_ERROR, "Timeout in waiting for gRPC shutdown"); + } +} + } // namespace testing } // namespace grpc diff --git a/contrib/libs/grpc/test/core/util/test_config.h b/contrib/libs/grpc/test/core/util/test_config.h index 3c5ad5dfe1..6ac43de266 100644 --- a/contrib/libs/grpc/test/core/util/test_config.h +++ b/contrib/libs/grpc/test/core/util/test_config.h @@ -37,25 +37,25 @@ gpr_timespec grpc_timeout_milliseconds_to_deadline(int64_t time_ms); #define GRPC_TEST_PICK_PORT #endif -// Returns whether this is built under ThreadSanitizer -bool BuiltUnderTsan(); - -// Returns whether this is built under AddressSanitizer -bool BuiltUnderAsan(); - -// Returns whether this is built under MemorySanitizer -bool BuiltUnderMsan(); - -// Returns whether this is built under UndefinedBehaviorSanitizer -bool BuiltUnderUbsan(); - +// Returns whether this is built under ThreadSanitizer +bool BuiltUnderTsan(); + +// Returns whether this is built under AddressSanitizer +bool BuiltUnderAsan(); + +// Returns whether this is built under MemorySanitizer +bool BuiltUnderMsan(); + +// Returns whether this is built under UndefinedBehaviorSanitizer +bool BuiltUnderUbsan(); + // Prefer TestEnvironment below. void grpc_test_init(int argc, char** argv); -// Wait until gRPC is fully shut down. -// Returns if grpc is shutdown -bool grpc_wait_until_shutdown(int64_t time_s); - +// Wait until gRPC is fully shut down. +// Returns if grpc is shutdown +bool grpc_wait_until_shutdown(int64_t time_s); + namespace grpc { namespace testing { @@ -67,15 +67,15 @@ class TestEnvironment { ~TestEnvironment(); }; -// A TestGrpcScope makes sure that -// - when it's created, gRPC will be initialized -// - when it's destroyed, gRPC will shutdown and it waits until shutdown -class TestGrpcScope { - public: - TestGrpcScope(); - ~TestGrpcScope(); -}; - +// A TestGrpcScope makes sure that +// - when it's created, gRPC will be initialized +// - when it's destroyed, gRPC will shutdown and it waits until shutdown +class TestGrpcScope { + public: + TestGrpcScope(); + ~TestGrpcScope(); +}; + } // namespace testing } // namespace grpc diff --git a/contrib/libs/grpc/test/core/util/test_lb_policies.cc b/contrib/libs/grpc/test/core/util/test_lb_policies.cc index 8e3084bd09..c1c1e9c56a 100644 --- a/contrib/libs/grpc/test/core/util/test_lb_policies.cc +++ b/contrib/libs/grpc/test/core/util/test_lb_policies.cc @@ -18,7 +18,7 @@ #include "test/core/util/test_lb_policies.h" -#include <util/generic/string.h> +#include <util/generic/string.h> #include <grpc/support/log.h> @@ -50,10 +50,10 @@ class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy { public: ForwardingLoadBalancingPolicy( std::unique_ptr<ChannelControlHelper> delegating_helper, Args args, - const TString& delegate_policy_name, intptr_t initial_refcount = 1) + const TString& delegate_policy_name, intptr_t initial_refcount = 1) : LoadBalancingPolicy(std::move(args), initial_refcount) { Args delegate_args; - delegate_args.work_serializer = work_serializer(); + delegate_args.work_serializer = work_serializer(); delegate_args.channel_control_helper = std::move(delegating_helper); delegate_args.args = args.args; delegate_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( @@ -79,118 +79,118 @@ class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy { }; // -// CopyMetadataToVector() -// - -MetadataVector CopyMetadataToVector( - LoadBalancingPolicy::MetadataInterface* metadata) { - MetadataVector result; - for (const auto& p : *metadata) { - result.push_back({TString(p.first), TString(p.second)}); - } - return result; -} - -// -// TestPickArgsLb -// - -constexpr char kTestPickArgsLbPolicyName[] = "test_pick_args_lb"; - -class TestPickArgsLb : public ForwardingLoadBalancingPolicy { - public: - TestPickArgsLb(Args args, TestPickArgsCallback cb) - : ForwardingLoadBalancingPolicy( - y_absl::make_unique<Helper>(RefCountedPtr<TestPickArgsLb>(this), cb), - std::move(args), - /*delegate_lb_policy_name=*/"pick_first", - /*initial_refcount=*/2) {} - - ~TestPickArgsLb() override = default; - - const char* name() const override { return kTestPickArgsLbPolicyName; } - - private: - class Picker : public SubchannelPicker { - public: - Picker(std::unique_ptr<SubchannelPicker> delegate_picker, - TestPickArgsCallback cb) - : delegate_picker_(std::move(delegate_picker)), cb_(std::move(cb)) {} - - PickResult Pick(PickArgs args) override { - // Report args seen. - PickArgsSeen args_seen; - args_seen.path = TString(args.path); - args_seen.metadata = CopyMetadataToVector(args.initial_metadata); - cb_(args_seen); - // Do pick. - return delegate_picker_->Pick(args); - } - - private: - std::unique_ptr<SubchannelPicker> delegate_picker_; - TestPickArgsCallback cb_; - }; - - class Helper : public ChannelControlHelper { - public: - Helper(RefCountedPtr<TestPickArgsLb> parent, TestPickArgsCallback cb) - : parent_(std::move(parent)), cb_(std::move(cb)) {} - - RefCountedPtr<SubchannelInterface> CreateSubchannel( - ServerAddress address, const grpc_channel_args& args) override { - return parent_->channel_control_helper()->CreateSubchannel( - std::move(address), args); - } - - void UpdateState(grpc_connectivity_state state, const y_absl::Status& status, - std::unique_ptr<SubchannelPicker> picker) override { - parent_->channel_control_helper()->UpdateState( - state, status, y_absl::make_unique<Picker>(std::move(picker), cb_)); - } - - void RequestReresolution() override { - parent_->channel_control_helper()->RequestReresolution(); - } - - void AddTraceEvent(TraceSeverity severity, - y_absl::string_view message) override { - parent_->channel_control_helper()->AddTraceEvent(severity, message); - } - - private: - RefCountedPtr<TestPickArgsLb> parent_; - TestPickArgsCallback cb_; - }; -}; - -class TestPickArgsLbConfig : public LoadBalancingPolicy::Config { - public: - const char* name() const override { return kTestPickArgsLbPolicyName; } -}; - -class TestPickArgsLbFactory : public LoadBalancingPolicyFactory { - public: - explicit TestPickArgsLbFactory(TestPickArgsCallback cb) - : cb_(std::move(cb)) {} - - OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy( - LoadBalancingPolicy::Args args) const override { - return MakeOrphanable<TestPickArgsLb>(std::move(args), cb_); - } - - const char* name() const override { return kTestPickArgsLbPolicyName; } - - RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig( - const Json& /*json*/, grpc_error** /*error*/) const override { - return MakeRefCounted<TestPickArgsLbConfig>(); - } - - private: - TestPickArgsCallback cb_; -}; - -// +// CopyMetadataToVector() +// + +MetadataVector CopyMetadataToVector( + LoadBalancingPolicy::MetadataInterface* metadata) { + MetadataVector result; + for (const auto& p : *metadata) { + result.push_back({TString(p.first), TString(p.second)}); + } + return result; +} + +// +// TestPickArgsLb +// + +constexpr char kTestPickArgsLbPolicyName[] = "test_pick_args_lb"; + +class TestPickArgsLb : public ForwardingLoadBalancingPolicy { + public: + TestPickArgsLb(Args args, TestPickArgsCallback cb) + : ForwardingLoadBalancingPolicy( + y_absl::make_unique<Helper>(RefCountedPtr<TestPickArgsLb>(this), cb), + std::move(args), + /*delegate_lb_policy_name=*/"pick_first", + /*initial_refcount=*/2) {} + + ~TestPickArgsLb() override = default; + + const char* name() const override { return kTestPickArgsLbPolicyName; } + + private: + class Picker : public SubchannelPicker { + public: + Picker(std::unique_ptr<SubchannelPicker> delegate_picker, + TestPickArgsCallback cb) + : delegate_picker_(std::move(delegate_picker)), cb_(std::move(cb)) {} + + PickResult Pick(PickArgs args) override { + // Report args seen. + PickArgsSeen args_seen; + args_seen.path = TString(args.path); + args_seen.metadata = CopyMetadataToVector(args.initial_metadata); + cb_(args_seen); + // Do pick. + return delegate_picker_->Pick(args); + } + + private: + std::unique_ptr<SubchannelPicker> delegate_picker_; + TestPickArgsCallback cb_; + }; + + class Helper : public ChannelControlHelper { + public: + Helper(RefCountedPtr<TestPickArgsLb> parent, TestPickArgsCallback cb) + : parent_(std::move(parent)), cb_(std::move(cb)) {} + + RefCountedPtr<SubchannelInterface> CreateSubchannel( + ServerAddress address, const grpc_channel_args& args) override { + return parent_->channel_control_helper()->CreateSubchannel( + std::move(address), args); + } + + void UpdateState(grpc_connectivity_state state, const y_absl::Status& status, + std::unique_ptr<SubchannelPicker> picker) override { + parent_->channel_control_helper()->UpdateState( + state, status, y_absl::make_unique<Picker>(std::move(picker), cb_)); + } + + void RequestReresolution() override { + parent_->channel_control_helper()->RequestReresolution(); + } + + void AddTraceEvent(TraceSeverity severity, + y_absl::string_view message) override { + parent_->channel_control_helper()->AddTraceEvent(severity, message); + } + + private: + RefCountedPtr<TestPickArgsLb> parent_; + TestPickArgsCallback cb_; + }; +}; + +class TestPickArgsLbConfig : public LoadBalancingPolicy::Config { + public: + const char* name() const override { return kTestPickArgsLbPolicyName; } +}; + +class TestPickArgsLbFactory : public LoadBalancingPolicyFactory { + public: + explicit TestPickArgsLbFactory(TestPickArgsCallback cb) + : cb_(std::move(cb)) {} + + OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy( + LoadBalancingPolicy::Args args) const override { + return MakeOrphanable<TestPickArgsLb>(std::move(args), cb_); + } + + const char* name() const override { return kTestPickArgsLbPolicyName; } + + RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig( + const Json& /*json*/, grpc_error** /*error*/) const override { + return MakeRefCounted<TestPickArgsLbConfig>(); + } + + private: + TestPickArgsCallback cb_; +}; + +// // InterceptRecvTrailingMetadataLoadBalancingPolicy // @@ -201,12 +201,12 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy : public ForwardingLoadBalancingPolicy { public: InterceptRecvTrailingMetadataLoadBalancingPolicy( - Args args, InterceptRecvTrailingMetadataCallback cb) + Args args, InterceptRecvTrailingMetadataCallback cb) : ForwardingLoadBalancingPolicy( - y_absl::make_unique<Helper>( + y_absl::make_unique<Helper>( RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy>( this), - std::move(cb)), + std::move(cb)), std::move(args), /*delegate_lb_policy_name=*/"pick_first", /*initial_refcount=*/2) {} @@ -220,9 +220,9 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy private: class Picker : public SubchannelPicker { public: - Picker(std::unique_ptr<SubchannelPicker> delegate_picker, - InterceptRecvTrailingMetadataCallback cb) - : delegate_picker_(std::move(delegate_picker)), cb_(std::move(cb)) {} + Picker(std::unique_ptr<SubchannelPicker> delegate_picker, + InterceptRecvTrailingMetadataCallback cb) + : delegate_picker_(std::move(delegate_picker)), cb_(std::move(cb)) {} PickResult Pick(PickArgs args) override { // Do pick. @@ -231,7 +231,7 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy if (result.type == PickResult::PICK_COMPLETE && result.subchannel != nullptr) { new (args.call_state->Alloc(sizeof(TrailingMetadataHandler))) - TrailingMetadataHandler(&result, cb_); + TrailingMetadataHandler(&result, cb_); } return result; } @@ -245,27 +245,27 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy public: Helper( RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy> parent, - InterceptRecvTrailingMetadataCallback cb) - : parent_(std::move(parent)), cb_(std::move(cb)) {} + InterceptRecvTrailingMetadataCallback cb) + : parent_(std::move(parent)), cb_(std::move(cb)) {} RefCountedPtr<SubchannelInterface> CreateSubchannel( - ServerAddress address, const grpc_channel_args& args) override { - return parent_->channel_control_helper()->CreateSubchannel( - std::move(address), args); + ServerAddress address, const grpc_channel_args& args) override { + return parent_->channel_control_helper()->CreateSubchannel( + std::move(address), args); } - void UpdateState(grpc_connectivity_state state, const y_absl::Status& status, + void UpdateState(grpc_connectivity_state state, const y_absl::Status& status, std::unique_ptr<SubchannelPicker> picker) override { parent_->channel_control_helper()->UpdateState( - state, status, y_absl::make_unique<Picker>(std::move(picker), cb_)); + state, status, y_absl::make_unique<Picker>(std::move(picker), cb_)); } void RequestReresolution() override { parent_->channel_control_helper()->RequestReresolution(); } - void AddTraceEvent(TraceSeverity severity, - y_absl::string_view message) override { + void AddTraceEvent(TraceSeverity severity, + y_absl::string_view message) override { parent_->channel_control_helper()->AddTraceEvent(severity, message); } @@ -277,8 +277,8 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy class TrailingMetadataHandler { public: TrailingMetadataHandler(PickResult* result, - InterceptRecvTrailingMetadataCallback cb) - : cb_(std::move(cb)) { + InterceptRecvTrailingMetadataCallback cb) + : cb_(std::move(cb)) { result->recv_trailing_metadata_ready = [this](grpc_error* error, MetadataInterface* metadata, CallState* call_state) { @@ -290,11 +290,11 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy void RecordRecvTrailingMetadata(grpc_error* /*error*/, MetadataInterface* recv_trailing_metadata, CallState* call_state) { - TrailingMetadataArgsSeen args_seen; - args_seen.backend_metric_data = call_state->GetBackendMetricData(); + TrailingMetadataArgsSeen args_seen; + args_seen.backend_metric_data = call_state->GetBackendMetricData(); GPR_ASSERT(recv_trailing_metadata != nullptr); - args_seen.metadata = CopyMetadataToVector(recv_trailing_metadata); - cb_(args_seen); + args_seen.metadata = CopyMetadataToVector(recv_trailing_metadata); + cb_(args_seen); this->~TrailingMetadataHandler(); } @@ -302,22 +302,22 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy }; }; -class InterceptTrailingConfig : public LoadBalancingPolicy::Config { - public: - const char* name() const override { - return kInterceptRecvTrailingMetadataLbPolicyName; - } -}; - +class InterceptTrailingConfig : public LoadBalancingPolicy::Config { + public: + const char* name() const override { + return kInterceptRecvTrailingMetadataLbPolicyName; + } +}; + class InterceptTrailingFactory : public LoadBalancingPolicyFactory { public: - explicit InterceptTrailingFactory(InterceptRecvTrailingMetadataCallback cb) - : cb_(std::move(cb)) {} + explicit InterceptTrailingFactory(InterceptRecvTrailingMetadataCallback cb) + : cb_(std::move(cb)) {} OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy( LoadBalancingPolicy::Args args) const override { return MakeOrphanable<InterceptRecvTrailingMetadataLoadBalancingPolicy>( - std::move(args), cb_); + std::move(args), cb_); } const char* name() const override { @@ -325,111 +325,111 @@ class InterceptTrailingFactory : public LoadBalancingPolicyFactory { } RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig( - const Json& /*json*/, grpc_error** /*error*/) const override { - return MakeRefCounted<InterceptTrailingConfig>(); + const Json& /*json*/, grpc_error** /*error*/) const override { + return MakeRefCounted<InterceptTrailingConfig>(); } private: InterceptRecvTrailingMetadataCallback cb_; }; -// -// AddressTestLoadBalancingPolicy -// - -constexpr char kAddressTestLbPolicyName[] = "address_test_lb"; - -class AddressTestLoadBalancingPolicy : public ForwardingLoadBalancingPolicy { - public: - AddressTestLoadBalancingPolicy(Args args, AddressTestCallback cb) - : ForwardingLoadBalancingPolicy( - y_absl::make_unique<Helper>( - RefCountedPtr<AddressTestLoadBalancingPolicy>(this), - std::move(cb)), - std::move(args), - /*delegate_lb_policy_name=*/"pick_first", - /*initial_refcount=*/2) {} - - ~AddressTestLoadBalancingPolicy() override = default; - - const char* name() const override { return kAddressTestLbPolicyName; } - - private: - class Helper : public ChannelControlHelper { - public: - Helper(RefCountedPtr<AddressTestLoadBalancingPolicy> parent, - AddressTestCallback cb) - : parent_(std::move(parent)), cb_(std::move(cb)) {} - - RefCountedPtr<SubchannelInterface> CreateSubchannel( - ServerAddress address, const grpc_channel_args& args) override { - cb_(address); - return parent_->channel_control_helper()->CreateSubchannel( - std::move(address), args); - } - - void UpdateState(grpc_connectivity_state state, const y_absl::Status& status, - std::unique_ptr<SubchannelPicker> picker) override { - parent_->channel_control_helper()->UpdateState(state, status, - std::move(picker)); - } - - void RequestReresolution() override { - parent_->channel_control_helper()->RequestReresolution(); - } - - void AddTraceEvent(TraceSeverity severity, - y_absl::string_view message) override { - parent_->channel_control_helper()->AddTraceEvent(severity, message); - } - - private: - RefCountedPtr<AddressTestLoadBalancingPolicy> parent_; - AddressTestCallback cb_; - }; -}; - -class AddressTestConfig : public LoadBalancingPolicy::Config { - public: - const char* name() const override { return kAddressTestLbPolicyName; } -}; - -class AddressTestFactory : public LoadBalancingPolicyFactory { - public: - explicit AddressTestFactory(AddressTestCallback cb) : cb_(std::move(cb)) {} - - OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy( - LoadBalancingPolicy::Args args) const override { - return MakeOrphanable<AddressTestLoadBalancingPolicy>(std::move(args), cb_); - } - - const char* name() const override { return kAddressTestLbPolicyName; } - - RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig( - const Json& /*json*/, grpc_error** /*error*/) const override { - return MakeRefCounted<AddressTestConfig>(); - } - - private: - AddressTestCallback cb_; -}; - +// +// AddressTestLoadBalancingPolicy +// + +constexpr char kAddressTestLbPolicyName[] = "address_test_lb"; + +class AddressTestLoadBalancingPolicy : public ForwardingLoadBalancingPolicy { + public: + AddressTestLoadBalancingPolicy(Args args, AddressTestCallback cb) + : ForwardingLoadBalancingPolicy( + y_absl::make_unique<Helper>( + RefCountedPtr<AddressTestLoadBalancingPolicy>(this), + std::move(cb)), + std::move(args), + /*delegate_lb_policy_name=*/"pick_first", + /*initial_refcount=*/2) {} + + ~AddressTestLoadBalancingPolicy() override = default; + + const char* name() const override { return kAddressTestLbPolicyName; } + + private: + class Helper : public ChannelControlHelper { + public: + Helper(RefCountedPtr<AddressTestLoadBalancingPolicy> parent, + AddressTestCallback cb) + : parent_(std::move(parent)), cb_(std::move(cb)) {} + + RefCountedPtr<SubchannelInterface> CreateSubchannel( + ServerAddress address, const grpc_channel_args& args) override { + cb_(address); + return parent_->channel_control_helper()->CreateSubchannel( + std::move(address), args); + } + + void UpdateState(grpc_connectivity_state state, const y_absl::Status& status, + std::unique_ptr<SubchannelPicker> picker) override { + parent_->channel_control_helper()->UpdateState(state, status, + std::move(picker)); + } + + void RequestReresolution() override { + parent_->channel_control_helper()->RequestReresolution(); + } + + void AddTraceEvent(TraceSeverity severity, + y_absl::string_view message) override { + parent_->channel_control_helper()->AddTraceEvent(severity, message); + } + + private: + RefCountedPtr<AddressTestLoadBalancingPolicy> parent_; + AddressTestCallback cb_; + }; +}; + +class AddressTestConfig : public LoadBalancingPolicy::Config { + public: + const char* name() const override { return kAddressTestLbPolicyName; } +}; + +class AddressTestFactory : public LoadBalancingPolicyFactory { + public: + explicit AddressTestFactory(AddressTestCallback cb) : cb_(std::move(cb)) {} + + OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy( + LoadBalancingPolicy::Args args) const override { + return MakeOrphanable<AddressTestLoadBalancingPolicy>(std::move(args), cb_); + } + + const char* name() const override { return kAddressTestLbPolicyName; } + + RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig( + const Json& /*json*/, grpc_error** /*error*/) const override { + return MakeRefCounted<AddressTestConfig>(); + } + + private: + AddressTestCallback cb_; +}; + } // namespace -void RegisterTestPickArgsLoadBalancingPolicy(TestPickArgsCallback cb) { - LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory( - y_absl::make_unique<TestPickArgsLbFactory>(std::move(cb))); -} - +void RegisterTestPickArgsLoadBalancingPolicy(TestPickArgsCallback cb) { + LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory( + y_absl::make_unique<TestPickArgsLbFactory>(std::move(cb))); +} + void RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy( - InterceptRecvTrailingMetadataCallback cb) { + InterceptRecvTrailingMetadataCallback cb) { + LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory( + y_absl::make_unique<InterceptTrailingFactory>(std::move(cb))); +} + +void RegisterAddressTestLoadBalancingPolicy(AddressTestCallback cb) { LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory( - y_absl::make_unique<InterceptTrailingFactory>(std::move(cb))); + y_absl::make_unique<AddressTestFactory>(std::move(cb))); } -void RegisterAddressTestLoadBalancingPolicy(AddressTestCallback cb) { - LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory( - y_absl::make_unique<AddressTestFactory>(std::move(cb))); -} - } // namespace grpc_core diff --git a/contrib/libs/grpc/test/core/util/test_lb_policies.h b/contrib/libs/grpc/test/core/util/test_lb_policies.h index 4d4b405084..9e14c707a3 100644 --- a/contrib/libs/grpc/test/core/util/test_lb_policies.h +++ b/contrib/libs/grpc/test/core/util/test_lb_policies.h @@ -23,38 +23,38 @@ namespace grpc_core { -using MetadataVector = std::vector<std::pair<TString, TString>>; - -struct PickArgsSeen { - TString path; - MetadataVector metadata; -}; - -using TestPickArgsCallback = std::function<void(const PickArgsSeen&)>; - -// Registers an LB policy called "test_pick_args_lb" that checks the args -// passed to SubchannelPicker::Pick(). -void RegisterTestPickArgsLoadBalancingPolicy(TestPickArgsCallback cb); - -struct TrailingMetadataArgsSeen { - const LoadBalancingPolicy::BackendMetricData* backend_metric_data; - MetadataVector metadata; -}; - -using InterceptRecvTrailingMetadataCallback = - std::function<void(const TrailingMetadataArgsSeen&)>; - +using MetadataVector = std::vector<std::pair<TString, TString>>; + +struct PickArgsSeen { + TString path; + MetadataVector metadata; +}; + +using TestPickArgsCallback = std::function<void(const PickArgsSeen&)>; + +// Registers an LB policy called "test_pick_args_lb" that checks the args +// passed to SubchannelPicker::Pick(). +void RegisterTestPickArgsLoadBalancingPolicy(TestPickArgsCallback cb); + +struct TrailingMetadataArgsSeen { + const LoadBalancingPolicy::BackendMetricData* backend_metric_data; + MetadataVector metadata; +}; + +using InterceptRecvTrailingMetadataCallback = + std::function<void(const TrailingMetadataArgsSeen&)>; + // Registers an LB policy called "intercept_trailing_metadata_lb" that -// invokes cb when trailing metadata is received for each call. +// invokes cb when trailing metadata is received for each call. void RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy( - InterceptRecvTrailingMetadataCallback cb); - -using AddressTestCallback = std::function<void(const ServerAddress&)>; - -// Registers an LB policy called "address_test_lb" that invokes cb for each -// address used to create a subchannel. -void RegisterAddressTestLoadBalancingPolicy(AddressTestCallback cb); - + InterceptRecvTrailingMetadataCallback cb); + +using AddressTestCallback = std::function<void(const ServerAddress&)>; + +// Registers an LB policy called "address_test_lb" that invokes cb for each +// address used to create a subchannel. +void RegisterAddressTestLoadBalancingPolicy(AddressTestCallback cb); + } // namespace grpc_core #endif // GRPC_TEST_CORE_UTIL_TEST_LB_POLICIES_H diff --git a/contrib/libs/grpc/test/core/util/test_tcp_server.cc b/contrib/libs/grpc/test/core/util/test_tcp_server.cc index 7d6e1f47c3..9dd1e2af36 100644 --- a/contrib/libs/grpc/test/core/util/test_tcp_server.cc +++ b/contrib/libs/grpc/test/core/util/test_tcp_server.cc @@ -36,7 +36,7 @@ static void on_server_destroyed(void* data, grpc_error* /*error*/) { test_tcp_server* server = static_cast<test_tcp_server*>(data); - server->shutdown = true; + server->shutdown = true; } void test_tcp_server_init(test_tcp_server* server, @@ -44,11 +44,11 @@ void test_tcp_server_init(test_tcp_server* server, grpc_init(); GRPC_CLOSURE_INIT(&server->shutdown_complete, on_server_destroyed, server, grpc_schedule_on_exec_ctx); - - grpc_pollset* pollset = - static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size())); - grpc_pollset_init(pollset, &server->mu); - server->pollset.push_back(pollset); + + grpc_pollset* pollset = + static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size())); + grpc_pollset_init(pollset, &server->mu); + server->pollset.push_back(pollset); server->on_connect = on_connect; server->cb_data = user_data; } @@ -72,7 +72,7 @@ void test_tcp_server_start(test_tcp_server* server, int port) { GPR_ASSERT(error == GRPC_ERROR_NONE); GPR_ASSERT(port_added == port); - grpc_tcp_server_start(server->tcp_server, &server->pollset, + grpc_tcp_server_start(server->tcp_server, &server->pollset, server->on_connect, server->cb_data); gpr_log(GPR_INFO, "test tcp server listening on 0.0.0.0:%d", port); } @@ -84,7 +84,7 @@ void test_tcp_server_poll(test_tcp_server* server, int milliseconds) { grpc_timeout_milliseconds_to_deadline(milliseconds)); gpr_mu_lock(server->mu); GRPC_LOG_IF_ERROR("pollset_work", - grpc_pollset_work(server->pollset[0], &worker, deadline)); + grpc_pollset_work(server->pollset[0], &worker, deadline)); gpr_mu_unlock(server->mu); } @@ -107,10 +107,10 @@ void test_tcp_server_destroy(test_tcp_server* server) { gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), shutdown_deadline) < 0) { test_tcp_server_poll(server, 1000); } - grpc_pollset_shutdown(server->pollset[0], - GRPC_CLOSURE_CREATE(finish_pollset, server->pollset[0], + grpc_pollset_shutdown(server->pollset[0], + GRPC_CLOSURE_CREATE(finish_pollset, server->pollset[0], grpc_schedule_on_exec_ctx)); grpc_core::ExecCtx::Get()->Flush(); - gpr_free(server->pollset[0]); + gpr_free(server->pollset[0]); grpc_shutdown(); } diff --git a/contrib/libs/grpc/test/core/util/test_tcp_server.h b/contrib/libs/grpc/test/core/util/test_tcp_server.h index e45fd2f16d..8765ea9a22 100644 --- a/contrib/libs/grpc/test/core/util/test_tcp_server.h +++ b/contrib/libs/grpc/test/core/util/test_tcp_server.h @@ -19,24 +19,24 @@ #ifndef GRPC_TEST_CORE_UTIL_TEST_TCP_SERVER_H #define GRPC_TEST_CORE_UTIL_TEST_TCP_SERVER_H -#include <vector> - +#include <vector> + #include <grpc/support/sync.h> #include "src/core/lib/iomgr/tcp_server.h" -// test_tcp_server should be stack-allocated or new'ed, never gpr_malloc'ed -// since it contains C++ objects. -struct test_tcp_server { - grpc_tcp_server* tcp_server = nullptr; +// test_tcp_server should be stack-allocated or new'ed, never gpr_malloc'ed +// since it contains C++ objects. +struct test_tcp_server { + grpc_tcp_server* tcp_server = nullptr; grpc_closure shutdown_complete; - bool shutdown = false; - // mu is filled in by grpc_pollset_init and controls the pollset. - // TODO: Switch this to a Mutex once pollset_init can provide a Mutex + bool shutdown = false; + // mu is filled in by grpc_pollset_init and controls the pollset. + // TODO: Switch this to a Mutex once pollset_init can provide a Mutex gpr_mu* mu; - std::vector<grpc_pollset*> pollset; + std::vector<grpc_pollset*> pollset; grpc_tcp_server_cb on_connect; void* cb_data; -}; +}; void test_tcp_server_init(test_tcp_server* server, grpc_tcp_server_cb on_connect, void* user_data); diff --git a/contrib/libs/grpc/test/core/util/trickle_endpoint.cc b/contrib/libs/grpc/test/core/util/trickle_endpoint.cc index b02f671220..28ffb0e3e7 100644 --- a/contrib/libs/grpc/test/core/util/trickle_endpoint.cc +++ b/contrib/libs/grpc/test/core/util/trickle_endpoint.cc @@ -122,16 +122,16 @@ static grpc_resource_user* te_get_resource_user(grpc_endpoint* ep) { return grpc_endpoint_get_resource_user(te->wrapped); } -static y_absl::string_view te_get_peer(grpc_endpoint* ep) { +static y_absl::string_view te_get_peer(grpc_endpoint* ep) { trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep); return grpc_endpoint_get_peer(te->wrapped); } -static y_absl::string_view te_get_local_address(grpc_endpoint* ep) { - trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep); - return grpc_endpoint_get_local_address(te->wrapped); -} - +static y_absl::string_view te_get_local_address(grpc_endpoint* ep) { + trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep); + return grpc_endpoint_get_local_address(te->wrapped); +} + static int te_get_fd(grpc_endpoint* ep) { trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep); return grpc_endpoint_get_fd(te->wrapped); @@ -156,7 +156,7 @@ static const grpc_endpoint_vtable vtable = {te_read, te_destroy, te_get_resource_user, te_get_peer, - te_get_local_address, + te_get_local_address, te_get_fd, te_can_track_err}; diff --git a/contrib/libs/grpc/test/core/util/ubsan_suppressions.txt b/contrib/libs/grpc/test/core/util/ubsan_suppressions.txt index 863ce9297f..ca233b8a28 100644 --- a/contrib/libs/grpc/test/core/util/ubsan_suppressions.txt +++ b/contrib/libs/grpc/test/core/util/ubsan_suppressions.txt @@ -14,8 +14,8 @@ nonnull-attribute:gsec_test_random_encrypt_decrypt nonnull-attribute:gsec_test_multiple_random_encrypt_decrypt nonnull-attribute:gsec_test_copy nonnull-attribute:gsec_test_encrypt_decrypt_test_vector -alignment:y_absl::little_endian::Store64 -alignment:y_absl::little_endian::Load64 +alignment:y_absl::little_endian::Store64 +alignment:y_absl::little_endian::Load64 float-divide-by-zero:grpc::testing::postprocess_scenario_result enum:grpc_op_string signed-integer-overflow:chrono diff --git a/contrib/libs/grpc/test/core/util/ya.make b/contrib/libs/grpc/test/core/util/ya.make index 93c48e528a..fbaad80cad 100644 --- a/contrib/libs/grpc/test/core/util/ya.make +++ b/contrib/libs/grpc/test/core/util/ya.make @@ -2,19 +2,19 @@ LIBRARY() LICENSE(Apache-2.0) -LICENSE_TEXTS(.yandex_meta/licenses.list.txt) +LICENSE_TEXTS(.yandex_meta/licenses.list.txt) + +OWNER(dvshkurko) -OWNER(dvshkurko) - PEERDIR( contrib/libs/grpc - contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler + contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler ) -ADDINCL( - ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc - contrib/libs/grpc -) +ADDINCL( + ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc + contrib/libs/grpc +) NO_COMPILER_WARNINGS() diff --git a/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt b/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt index ff3e2fd911..a07ea0849d 100644 --- a/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt +++ b/contrib/libs/grpc/test/cpp/end2end/.yandex_meta/licenses.list.txt @@ -1,36 +1,36 @@ -====================Apache-2.0==================== - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - - -====================COPYRIGHT==================== - * Copyright 2015 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2016 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2017 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2018 gRPC authors. - - -====================COPYRIGHT==================== -# Copyright 2019 gRPC authors. - - -====================COPYRIGHT==================== -// Copyright 2019 The gRPC Authors +====================Apache-2.0==================== + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + +====================COPYRIGHT==================== + * Copyright 2015 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2016 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2017 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2018 gRPC authors. + + +====================COPYRIGHT==================== +# Copyright 2019 gRPC authors. + + +====================COPYRIGHT==================== +// Copyright 2019 The gRPC Authors diff --git a/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc index 20f2946565..45df8718f9 100644 --- a/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc @@ -120,7 +120,7 @@ class Verifier { while (!expectations_.empty()) { Next(cq, ignore_ok); } - maybe_expectations_.clear(); + maybe_expectations_.clear(); } // This version of Verify stops after a certain deadline @@ -140,7 +140,7 @@ class Verifier { GotTag(got_tag, ok, false); } } - maybe_expectations_.clear(); + maybe_expectations_.clear(); } // This version of Verify stops after a certain deadline, and uses the @@ -163,7 +163,7 @@ class Verifier { GotTag(got_tag, ok, false); } } - maybe_expectations_.clear(); + maybe_expectations_.clear(); } private: @@ -184,7 +184,7 @@ class Verifier { if (!ignore_ok) { EXPECT_EQ(it2->second.ok, ok); } - maybe_expectations_.erase(it2); + maybe_expectations_.erase(it2); } else { gpr_log(GPR_ERROR, "Unexpected tag: %p", got_tag); abort(); @@ -224,8 +224,8 @@ class ServerBuilderSyncPluginDisabler : public ::grpc::ServerBuilderOption { class TestScenario { public: - TestScenario(bool inproc_stub, const TString& creds_type, bool hcs, - const TString& content) + TestScenario(bool inproc_stub, const TString& creds_type, bool hcs, + const TString& content) : inproc(inproc_stub), health_check_service(hcs), credentials_type(creds_type), @@ -233,8 +233,8 @@ class TestScenario { void Log() const; bool inproc; bool health_check_service; - const TString credentials_type; - const TString message_content; + const TString credentials_type; + const TString message_content; }; static std::ostream& operator<<(std::ostream& out, @@ -355,52 +355,52 @@ TEST_P(AsyncEnd2endTest, SimpleRpc) { SendRpc(1); } -TEST_P(AsyncEnd2endTest, SimpleRpcWithExpectedError) { - ResetStub(); - - EchoRequest send_request; - EchoRequest recv_request; - EchoResponse send_response; - EchoResponse recv_response; - Status recv_status; - - ClientContext cli_ctx; - ServerContext srv_ctx; - grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx); - ErrorStatus error_status; - - send_request.set_message(GetParam().message_content); - error_status.set_code(1); // CANCELLED - error_status.set_error_message("cancel error message"); - *send_request.mutable_param()->mutable_expected_error() = error_status; - - std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - - srv_ctx.AsyncNotifyWhenDone(tag(5)); - service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); - - response_reader->Finish(&recv_response, &recv_status, tag(4)); - - Verifier().Expect(2, true).Verify(cq_.get()); - EXPECT_EQ(send_request.message(), recv_request.message()); - - send_response.set_message(recv_request.message()); - response_writer.Finish( - send_response, - Status( - static_cast<StatusCode>(recv_request.param().expected_error().code()), - recv_request.param().expected_error().error_message()), - tag(3)); - Verifier().Expect(3, true).Expect(4, true).Expect(5, true).Verify(cq_.get()); - - EXPECT_EQ(recv_response.message(), ""); - EXPECT_EQ(recv_status.error_code(), error_status.code()); - EXPECT_EQ(recv_status.error_message(), error_status.error_message()); - EXPECT_FALSE(srv_ctx.IsCancelled()); -} - +TEST_P(AsyncEnd2endTest, SimpleRpcWithExpectedError) { + ResetStub(); + + EchoRequest send_request; + EchoRequest recv_request; + EchoResponse send_response; + EchoResponse recv_response; + Status recv_status; + + ClientContext cli_ctx; + ServerContext srv_ctx; + grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx); + ErrorStatus error_status; + + send_request.set_message(GetParam().message_content); + error_status.set_code(1); // CANCELLED + error_status.set_error_message("cancel error message"); + *send_request.mutable_param()->mutable_expected_error() = error_status; + + std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader( + stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); + + srv_ctx.AsyncNotifyWhenDone(tag(5)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); + + response_reader->Finish(&recv_response, &recv_status, tag(4)); + + Verifier().Expect(2, true).Verify(cq_.get()); + EXPECT_EQ(send_request.message(), recv_request.message()); + + send_response.set_message(recv_request.message()); + response_writer.Finish( + send_response, + Status( + static_cast<StatusCode>(recv_request.param().expected_error().code()), + recv_request.param().expected_error().error_message()), + tag(3)); + Verifier().Expect(3, true).Expect(4, true).Expect(5, true).Verify(cq_.get()); + + EXPECT_EQ(recv_response.message(), ""); + EXPECT_EQ(recv_status.error_code(), error_status.code()); + EXPECT_EQ(recv_status.error_message(), error_status.error_message()); + EXPECT_FALSE(srv_ctx.IsCancelled()); +} + TEST_P(AsyncEnd2endTest, SequentialRpcs) { ResetStub(); SendRpc(10); @@ -931,9 +931,9 @@ TEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) { grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx); send_request.set_message(GetParam().message_content); - std::pair<TString, TString> meta1("key1", "val1"); - std::pair<TString, TString> meta2("key2", "val2"); - std::pair<TString, TString> meta3("g.r.d-bin", "xyz"); + std::pair<TString, TString> meta1("key1", "val1"); + std::pair<TString, TString> meta2("key2", "val2"); + std::pair<TString, TString> meta3("g.r.d-bin", "xyz"); cli_ctx.AddMetadata(meta1.first, meta1.second); cli_ctx.AddMetadata(meta2.first, meta2.second); cli_ctx.AddMetadata(meta3.first, meta3.second); @@ -977,8 +977,8 @@ TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) { grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx); send_request.set_message(GetParam().message_content); - std::pair<TString, TString> meta1("key1", "val1"); - std::pair<TString, TString> meta2("key2", "val2"); + std::pair<TString, TString> meta1("key1", "val1"); + std::pair<TString, TString> meta2("key2", "val2"); std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader( stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); @@ -1020,8 +1020,8 @@ TEST_P(AsyncEnd2endTest, ServerInitialMetadataServerStreaming) { ServerContext srv_ctx; ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx); - std::pair<::TString, ::TString> meta1("key1", "val1"); - std::pair<::TString, ::TString> meta2("key2", "val2"); + std::pair<::TString, ::TString> meta1("key1", "val1"); + std::pair<::TString, ::TString> meta2("key2", "val2"); std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream( stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1))); @@ -1075,8 +1075,8 @@ TEST_P(AsyncEnd2endTest, ServerInitialMetadataServerStreamingImplicit) { ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx); send_request.set_message(GetParam().message_content); - std::pair<::TString, ::TString> meta1("key1", "val1"); - std::pair<::TString, ::TString> meta2("key2", "val2"); + std::pair<::TString, ::TString> meta1("key1", "val1"); + std::pair<::TString, ::TString> meta2("key2", "val2"); std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream( stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1))); @@ -1130,8 +1130,8 @@ TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) { grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx); send_request.set_message(GetParam().message_content); - std::pair<TString, TString> meta1("key1", "val1"); - std::pair<TString, TString> meta2("key2", "val2"); + std::pair<TString, TString> meta1("key1", "val1"); + std::pair<TString, TString> meta2("key2", "val2"); std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader( stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); @@ -1175,19 +1175,19 @@ TEST_P(AsyncEnd2endTest, MetadataRpc) { grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx); send_request.set_message(GetParam().message_content); - std::pair<TString, TString> meta1("key1", "val1"); - std::pair<TString, TString> meta2( + std::pair<TString, TString> meta1("key1", "val1"); + std::pair<TString, TString> meta2( "key2-bin", - TString("\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13)); - std::pair<TString, TString> meta3("key3", "val3"); - std::pair<TString, TString> meta6( + TString("\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13)); + std::pair<TString, TString> meta3("key3", "val3"); + std::pair<TString, TString> meta6( "key4-bin", - TString("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d", - 14)); - std::pair<TString, TString> meta5("key5", "val5"); - std::pair<TString, TString> meta4( + TString("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d", + 14)); + std::pair<TString, TString> meta5("key5", "val5"); + std::pair<TString, TString> meta4( "key6-bin", - TString( + TString( "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee", 15)); cli_ctx.AddMetadata(meta1.first, meta1.second); @@ -1407,7 +1407,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest { EchoRequest send_request; // Client sends 3 messages (tags 3, 4 and 5) for (int tag_idx = 3; tag_idx <= 5; tag_idx++) { - send_request.set_message("Ping " + ToString(tag_idx)); + send_request.set_message("Ping " + ToString(tag_idx)); cli_stream->Write(send_request, tag(tag_idx)); Verifier() .Expect(tag_idx, expected_client_cq_result) @@ -1592,7 +1592,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest { // Server sends three messages (tags 3, 4 and 5) // But if want_done tag is true, we might also see tag 11 for (int tag_idx = 3; tag_idx <= 5; tag_idx++) { - send_response.set_message("Pong " + ToString(tag_idx)); + send_response.set_message("Pong " + ToString(tag_idx)); srv_stream.Write(send_response, tag(tag_idx)); // Note that we'll add something to the verifier and verify that // something was seen, but it might be tag 11 and not what we @@ -1874,8 +1874,8 @@ TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelAfter) { std::vector<TestScenario> CreateTestScenarios(bool /*test_secure*/, bool test_message_size_limit) { std::vector<TestScenario> scenarios; - std::vector<TString> credentials_types; - std::vector<TString> messages; + std::vector<TString> credentials_types; + std::vector<TString> messages; auto insec_ok = [] { // Only allow insecure credentials type when it is registered with the @@ -1897,20 +1897,20 @@ std::vector<TestScenario> CreateTestScenarios(bool /*test_secure*/, if (test_message_size_limit) { for (size_t k = 1; k < GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH / 1024; k *= 32) { - TString big_msg; + TString big_msg; for (size_t i = 0; i < k * 1024; ++i) { char c = 'a' + (i % 26); big_msg += c; } messages.push_back(big_msg); } - if (!BuiltUnderMsan()) { - // 4MB message processing with SSL is very slow under msan - // (causes timeouts) and doesn't really increase the signal from tests. - // Reserve 100 bytes for other fields of the message proto. - messages.push_back( - TString(GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH - 100, 'a')); - } + if (!BuiltUnderMsan()) { + // 4MB message processing with SSL is very slow under msan + // (causes timeouts) and doesn't really increase the signal from tests. + // Reserve 100 bytes for other fields of the message proto. + messages.push_back( + TString(GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH - 100, 'a')); + } } // TODO (sreek) Renable tests with health check service after the issue diff --git a/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc b/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc index bd4de86bd6..e6695982bd 100644 --- a/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/cfstream_test.cc @@ -60,10 +60,10 @@ namespace testing { namespace { struct TestScenario { - TestScenario(const TString& creds_type, const TString& content) + TestScenario(const TString& creds_type, const TString& content) : credentials_type(creds_type), message_content(content) {} - const TString credentials_type; - const TString message_content; + const TString credentials_type; + const TString message_content; }; class CFStreamTest : public ::testing::TestWithParam<TestScenario> { @@ -244,16 +244,16 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> { private: struct ServerData { int port_; - const TString creds_; + const TString creds_; std::unique_ptr<Server> server_; TestServiceImpl service_; std::unique_ptr<std::thread> thread_; bool server_ready_ = false; - ServerData(int port, const TString& creds) + ServerData(int port, const TString& creds) : port_(port), creds_(creds) {} - void Start(const TString& server_host) { + void Start(const TString& server_host) { gpr_log(GPR_INFO, "starting server on port %d", port_); std::mutex mu; std::unique_lock<std::mutex> lock(mu); @@ -265,7 +265,7 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> { gpr_log(GPR_INFO, "server startup complete"); } - void Serve(const TString& server_host, std::mutex* mu, + void Serve(const TString& server_host, std::mutex* mu, std::condition_variable* cond) { std::ostringstream server_address; server_address << server_host << ":" << port_; @@ -287,17 +287,17 @@ class CFStreamTest : public ::testing::TestWithParam<TestScenario> { }; CompletionQueue cq_; - const TString server_host_; - const TString interface_; - const TString ipv4_address_; + const TString server_host_; + const TString interface_; + const TString ipv4_address_; std::unique_ptr<ServerData> server_; int port_; }; std::vector<TestScenario> CreateTestScenarios() { std::vector<TestScenario> scenarios; - std::vector<TString> credentials_types; - std::vector<TString> messages; + std::vector<TString> credentials_types; + std::vector<TString> messages; credentials_types.push_back(kInsecureCredentialsType); auto sec_list = GetCredentialsProvider()->GetSecureCredentialsTypeList(); @@ -307,7 +307,7 @@ std::vector<TestScenario> CreateTestScenarios() { messages.push_back("🖖"); for (size_t k = 1; k < GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH / 1024; k *= 32) { - TString big_msg; + TString big_msg; for (size_t i = 0; i < k * 1024; ++i) { char c = 'a' + (i % 26); big_msg += c; @@ -489,7 +489,7 @@ TEST_P(CFStreamTest, ConcurrentRpc) { int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); - grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::TestEnvironment env(argc, argv); gpr_setenv("grpc_cfstream", "1"); const auto result = RUN_ALL_TESTS(); return result; diff --git a/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc b/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc index fdc6784fb7..9c723bebb6 100644 --- a/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc @@ -118,7 +118,7 @@ class ChannelzServerTest : public ::testing::Test { // We set up a proxy server with channelz enabled. proxy_port_ = grpc_pick_unused_port_or_die(); ServerBuilder proxy_builder; - TString proxy_server_address = "localhost:" + to_string(proxy_port_); + TString proxy_server_address = "localhost:" + to_string(proxy_port_); proxy_builder.AddListeningPort(proxy_server_address, InsecureServerCredentials()); // forces channelz and channel tracing to be enabled. @@ -136,7 +136,7 @@ class ChannelzServerTest : public ::testing::Test { // create a new backend. backends_[i].port = grpc_pick_unused_port_or_die(); ServerBuilder backend_builder; - TString backend_server_address = + TString backend_server_address = "localhost:" + to_string(backends_[i].port); backend_builder.AddListeningPort(backend_server_address, InsecureServerCredentials()); diff --git a/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc index 467f482d3f..12cb40a953 100644 --- a/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc @@ -25,15 +25,15 @@ #include <grpcpp/server_builder.h> #include <grpcpp/server_context.h> #include <grpcpp/support/client_callback.h> -#include <gtest/gtest.h> - -#include <algorithm> -#include <condition_variable> -#include <functional> -#include <mutex> -#include <sstream> -#include <thread> - +#include <gtest/gtest.h> + +#include <algorithm> +#include <condition_variable> +#include <functional> +#include <mutex> +#include <sstream> +#include <thread> + #include "src/core/lib/gpr/env.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" @@ -65,7 +65,7 @@ enum class Protocol { INPROC, TCP }; class TestScenario { public: TestScenario(bool serve_callback, Protocol protocol, bool intercept, - const TString& creds_type) + const TString& creds_type) : callback_server(serve_callback), protocol(protocol), use_interceptors(intercept), @@ -74,7 +74,7 @@ class TestScenario { bool callback_server; Protocol protocol; bool use_interceptors; - const TString credentials_type; + const TString credentials_type; }; static std::ostream& operator<<(std::ostream& out, @@ -180,7 +180,7 @@ class ClientCallbackEnd2endTest } void SendRpcs(int num_rpcs, bool with_binary_metadata) { - TString test_string(""); + TString test_string(""); for (int i = 0; i < num_rpcs; i++) { EchoRequest request; EchoResponse response; @@ -188,12 +188,12 @@ class ClientCallbackEnd2endTest test_string += "Hello world. "; request.set_message(test_string); - TString val; + TString val; if (with_binary_metadata) { request.mutable_param()->set_echo_metadata(true); char bytes[8] = {'\0', '\1', '\2', '\3', '\4', '\5', '\6', static_cast<char>(i)}; - val = TString(bytes, 8); + val = TString(bytes, 8); cli_ctx.AddMetadata("custom-bin", val); } @@ -228,8 +228,8 @@ class ClientCallbackEnd2endTest } void SendRpcsGeneric(int num_rpcs, bool maybe_except) { - const TString kMethodName("/grpc.testing.EchoTestService/Echo"); - TString test_string(""); + const TString kMethodName("/grpc.testing.EchoTestService/Echo"); + TString test_string(""); for (int i = 0; i < num_rpcs; i++) { EchoRequest request; std::unique_ptr<ByteBuffer> send_buf; @@ -269,17 +269,17 @@ class ClientCallbackEnd2endTest } } - void SendGenericEchoAsBidi(int num_rpcs, int reuses, bool do_writes_done) { - const TString kMethodName("/grpc.testing.EchoTestService/Echo"); - TString test_string(""); + void SendGenericEchoAsBidi(int num_rpcs, int reuses, bool do_writes_done) { + const TString kMethodName("/grpc.testing.EchoTestService/Echo"); + TString test_string(""); for (int i = 0; i < num_rpcs; i++) { test_string += "Hello world. "; class Client : public grpc::experimental::ClientBidiReactor<ByteBuffer, ByteBuffer> { public: - Client(ClientCallbackEnd2endTest* test, const TString& method_name, - const TString& test_str, int reuses, bool do_writes_done) - : reuses_remaining_(reuses), do_writes_done_(do_writes_done) { + Client(ClientCallbackEnd2endTest* test, const TString& method_name, + const TString& test_str, int reuses, bool do_writes_done) + : reuses_remaining_(reuses), do_writes_done_(do_writes_done) { activate_ = [this, test, method_name, test_str] { if (reuses_remaining_ > 0) { cli_ctx_.reset(new ClientContext); @@ -299,11 +299,11 @@ class ClientCallbackEnd2endTest }; activate_(); } - void OnWriteDone(bool /*ok*/) override { - if (do_writes_done_) { - StartWritesDone(); - } - } + void OnWriteDone(bool /*ok*/) override { + if (do_writes_done_) { + StartWritesDone(); + } + } void OnReadDone(bool /*ok*/) override { EchoResponse response; EXPECT_TRUE(ParseFromByteBuffer(&recv_buf_, &response)); @@ -329,11 +329,11 @@ class ClientCallbackEnd2endTest std::mutex mu_; std::condition_variable cv_; bool done_ = false; - const bool do_writes_done_; - }; + const bool do_writes_done_; + }; + + Client rpc(this, kMethodName, test_string, reuses, do_writes_done); - Client rpc(this, kMethodName, test_string, reuses, do_writes_done); - rpc.Await(); } } @@ -355,102 +355,102 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpc) { SendRpcs(1, false); } -TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) { +TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) { MAYBE_SKIP_TEST; ResetStub(); - - EchoRequest request; - EchoResponse response; - ClientContext cli_ctx; - ErrorStatus error_status; - - request.set_message("Hello failure"); - error_status.set_code(1); // CANCELLED - error_status.set_error_message("cancel error message"); - *request.mutable_param()->mutable_expected_error() = error_status; - - std::mutex mu; + + EchoRequest request; + EchoResponse response; + ClientContext cli_ctx; + ErrorStatus error_status; + + request.set_message("Hello failure"); + error_status.set_code(1); // CANCELLED + error_status.set_error_message("cancel error message"); + *request.mutable_param()->mutable_expected_error() = error_status; + + std::mutex mu; std::condition_variable cv; bool done = false; - stub_->experimental_async()->Echo( - &cli_ctx, &request, &response, - [&response, &done, &mu, &cv, &error_status](Status s) { - EXPECT_EQ("", response.message()); - EXPECT_EQ(error_status.code(), s.error_code()); - EXPECT_EQ(error_status.error_message(), s.error_message()); - std::lock_guard<std::mutex> l(mu); - done = true; - cv.notify_one(); - }); - - std::unique_lock<std::mutex> l(mu); + stub_->experimental_async()->Echo( + &cli_ctx, &request, &response, + [&response, &done, &mu, &cv, &error_status](Status s) { + EXPECT_EQ("", response.message()); + EXPECT_EQ(error_status.code(), s.error_code()); + EXPECT_EQ(error_status.error_message(), s.error_message()); + std::lock_guard<std::mutex> l(mu); + done = true; + cv.notify_one(); + }); + + std::unique_lock<std::mutex> l(mu); while (!done) { cv.wait(l); } } -TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) { - MAYBE_SKIP_TEST; - ResetStub(); - - // The request/response state associated with an RPC and the synchronization - // variables needed to notify its completion. - struct RpcState { - std::mutex mu; - std::condition_variable cv; - bool done = false; - EchoRequest request; - EchoResponse response; - ClientContext cli_ctx; - - RpcState() = default; - ~RpcState() { - // Grab the lock to prevent destruction while another is still holding - // lock - std::lock_guard<std::mutex> lock(mu); - } - }; - std::vector<RpcState> rpc_state(3); - for (size_t i = 0; i < rpc_state.size(); i++) { - TString message = "Hello locked world"; - message += ToString(i); - rpc_state[i].request.set_message(message); - } - - // Grab a lock and then start an RPC whose callback grabs the same lock and - // then calls this function to start the next RPC under lock (up to a limit of - // the size of the rpc_state vector). - std::function<void(int)> nested_call = [this, &nested_call, - &rpc_state](int index) { - std::lock_guard<std::mutex> l(rpc_state[index].mu); - stub_->experimental_async()->Echo( - &rpc_state[index].cli_ctx, &rpc_state[index].request, - &rpc_state[index].response, - [index, &nested_call, &rpc_state](Status s) { - std::lock_guard<std::mutex> l1(rpc_state[index].mu); - EXPECT_TRUE(s.ok()); - rpc_state[index].done = true; - rpc_state[index].cv.notify_all(); - // Call the next level of nesting if possible - if (index + 1 < rpc_state.size()) { - nested_call(index + 1); - } - }); - }; - - nested_call(0); - - // Wait for completion notifications from all RPCs. Order doesn't matter. - for (RpcState& state : rpc_state) { - std::unique_lock<std::mutex> l(state.mu); - while (!state.done) { - state.cv.wait(l); - } - EXPECT_EQ(state.request.message(), state.response.message()); - } -} - +TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) { + MAYBE_SKIP_TEST; + ResetStub(); + + // The request/response state associated with an RPC and the synchronization + // variables needed to notify its completion. + struct RpcState { + std::mutex mu; + std::condition_variable cv; + bool done = false; + EchoRequest request; + EchoResponse response; + ClientContext cli_ctx; + + RpcState() = default; + ~RpcState() { + // Grab the lock to prevent destruction while another is still holding + // lock + std::lock_guard<std::mutex> lock(mu); + } + }; + std::vector<RpcState> rpc_state(3); + for (size_t i = 0; i < rpc_state.size(); i++) { + TString message = "Hello locked world"; + message += ToString(i); + rpc_state[i].request.set_message(message); + } + + // Grab a lock and then start an RPC whose callback grabs the same lock and + // then calls this function to start the next RPC under lock (up to a limit of + // the size of the rpc_state vector). + std::function<void(int)> nested_call = [this, &nested_call, + &rpc_state](int index) { + std::lock_guard<std::mutex> l(rpc_state[index].mu); + stub_->experimental_async()->Echo( + &rpc_state[index].cli_ctx, &rpc_state[index].request, + &rpc_state[index].response, + [index, &nested_call, &rpc_state](Status s) { + std::lock_guard<std::mutex> l1(rpc_state[index].mu); + EXPECT_TRUE(s.ok()); + rpc_state[index].done = true; + rpc_state[index].cv.notify_all(); + // Call the next level of nesting if possible + if (index + 1 < rpc_state.size()) { + nested_call(index + 1); + } + }); + }; + + nested_call(0); + + // Wait for completion notifications from all RPCs. Order doesn't matter. + for (RpcState& state : rpc_state) { + std::unique_lock<std::mutex> l(state.mu); + while (!state.done) { + state.cv.wait(l); + } + EXPECT_EQ(state.request.message(), state.response.message()); + } +} + TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) { MAYBE_SKIP_TEST; ResetStub(); @@ -533,21 +533,21 @@ TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcs) { TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidi) { MAYBE_SKIP_TEST; ResetStub(); - SendGenericEchoAsBidi(10, 1, /*do_writes_done=*/true); + SendGenericEchoAsBidi(10, 1, /*do_writes_done=*/true); } TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidiWithReactorReuse) { MAYBE_SKIP_TEST; ResetStub(); - SendGenericEchoAsBidi(10, 10, /*do_writes_done=*/true); + SendGenericEchoAsBidi(10, 10, /*do_writes_done=*/true); +} + +TEST_P(ClientCallbackEnd2endTest, GenericRpcNoWritesDone) { + MAYBE_SKIP_TEST; + ResetStub(); + SendGenericEchoAsBidi(1, 1, /*do_writes_done=*/false); } -TEST_P(ClientCallbackEnd2endTest, GenericRpcNoWritesDone) { - MAYBE_SKIP_TEST; - ResetStub(); - SendGenericEchoAsBidi(1, 1, /*do_writes_done=*/false); -} - #if GRPC_ALLOW_EXCEPTIONS TEST_P(ClientCallbackEnd2endTest, ExceptingRpc) { MAYBE_SKIP_TEST; @@ -619,7 +619,7 @@ TEST_P(ClientCallbackEnd2endTest, RequestEchoServerCancel) { ClientContext context; request.set_message("hello"); context.AddMetadata(kServerTryCancelRequest, - ToString(CANCEL_BEFORE_PROCESSING)); + ToString(CANCEL_BEFORE_PROCESSING)); std::mutex mu; std::condition_variable cv; @@ -654,14 +654,14 @@ class WriteClient : public grpc::experimental::ClientWriteReactor<EchoRequest> { : server_try_cancel_(server_try_cancel), num_msgs_to_send_(num_msgs_to_send), client_cancel_{client_cancel} { - TString msg{"Hello server."}; + TString msg{"Hello server."}; for (int i = 0; i < num_msgs_to_send; i++) { desired_ += msg; } if (server_try_cancel != DO_NOT_CANCEL) { // Send server_try_cancel value in the client metadata context_.AddMetadata(kServerTryCancelRequest, - ToString(server_try_cancel)); + ToString(server_try_cancel)); } context_.set_initial_metadata_corked(true); stub->experimental_async()->RequestStream(&context_, &response_, this); @@ -735,7 +735,7 @@ class WriteClient : public grpc::experimental::ClientWriteReactor<EchoRequest> { const ServerTryCancelRequestPhase server_try_cancel_; int num_msgs_sent_{0}; const int num_msgs_to_send_; - TString desired_; + TString desired_; const ClientCancelInfo client_cancel_; std::mutex mu_; std::condition_variable cv_; @@ -860,72 +860,72 @@ TEST_P(ClientCallbackEnd2endTest, UnaryReactor) { } } -TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) { - MAYBE_SKIP_TEST; - ResetStub(); - const TString kMethodName("/grpc.testing.EchoTestService/Echo"); - class UnaryClient : public grpc::experimental::ClientUnaryReactor { - public: - UnaryClient(grpc::GenericStub* stub, const TString& method_name) { - cli_ctx_.AddMetadata("key1", "val1"); - cli_ctx_.AddMetadata("key2", "val2"); - request_.mutable_param()->set_echo_metadata_initially(true); - request_.set_message("Hello metadata"); - send_buf_ = SerializeToByteBuffer(&request_); - - stub->experimental().PrepareUnaryCall(&cli_ctx_, method_name, - send_buf_.get(), &recv_buf_, this); - StartCall(); - } - void OnReadInitialMetadataDone(bool ok) override { - EXPECT_TRUE(ok); - EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key1")); - EXPECT_EQ( - "val1", - ToString(cli_ctx_.GetServerInitialMetadata().find("key1")->second)); - EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key2")); - EXPECT_EQ( - "val2", - ToString(cli_ctx_.GetServerInitialMetadata().find("key2")->second)); - initial_metadata_done_ = true; - } - void OnDone(const Status& s) override { - EXPECT_TRUE(initial_metadata_done_); - EXPECT_EQ(0u, cli_ctx_.GetServerTrailingMetadata().size()); - EXPECT_TRUE(s.ok()); - EchoResponse response; - EXPECT_TRUE(ParseFromByteBuffer(&recv_buf_, &response)); - EXPECT_EQ(request_.message(), response.message()); - std::unique_lock<std::mutex> l(mu_); - done_ = true; - cv_.notify_one(); - } - void Await() { - std::unique_lock<std::mutex> l(mu_); - while (!done_) { - cv_.wait(l); - } - } - - private: - EchoRequest request_; - std::unique_ptr<ByteBuffer> send_buf_; - ByteBuffer recv_buf_; - ClientContext cli_ctx_; - std::mutex mu_; - std::condition_variable cv_; - bool done_{false}; - bool initial_metadata_done_{false}; - }; - - UnaryClient test{generic_stub_.get(), kMethodName}; - test.Await(); - // Make sure that the server interceptors were not notified of a cancel - if (GetParam().use_interceptors) { - EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel()); - } -} - +TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) { + MAYBE_SKIP_TEST; + ResetStub(); + const TString kMethodName("/grpc.testing.EchoTestService/Echo"); + class UnaryClient : public grpc::experimental::ClientUnaryReactor { + public: + UnaryClient(grpc::GenericStub* stub, const TString& method_name) { + cli_ctx_.AddMetadata("key1", "val1"); + cli_ctx_.AddMetadata("key2", "val2"); + request_.mutable_param()->set_echo_metadata_initially(true); + request_.set_message("Hello metadata"); + send_buf_ = SerializeToByteBuffer(&request_); + + stub->experimental().PrepareUnaryCall(&cli_ctx_, method_name, + send_buf_.get(), &recv_buf_, this); + StartCall(); + } + void OnReadInitialMetadataDone(bool ok) override { + EXPECT_TRUE(ok); + EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key1")); + EXPECT_EQ( + "val1", + ToString(cli_ctx_.GetServerInitialMetadata().find("key1")->second)); + EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key2")); + EXPECT_EQ( + "val2", + ToString(cli_ctx_.GetServerInitialMetadata().find("key2")->second)); + initial_metadata_done_ = true; + } + void OnDone(const Status& s) override { + EXPECT_TRUE(initial_metadata_done_); + EXPECT_EQ(0u, cli_ctx_.GetServerTrailingMetadata().size()); + EXPECT_TRUE(s.ok()); + EchoResponse response; + EXPECT_TRUE(ParseFromByteBuffer(&recv_buf_, &response)); + EXPECT_EQ(request_.message(), response.message()); + std::unique_lock<std::mutex> l(mu_); + done_ = true; + cv_.notify_one(); + } + void Await() { + std::unique_lock<std::mutex> l(mu_); + while (!done_) { + cv_.wait(l); + } + } + + private: + EchoRequest request_; + std::unique_ptr<ByteBuffer> send_buf_; + ByteBuffer recv_buf_; + ClientContext cli_ctx_; + std::mutex mu_; + std::condition_variable cv_; + bool done_{false}; + bool initial_metadata_done_{false}; + }; + + UnaryClient test{generic_stub_.get(), kMethodName}; + test.Await(); + // Make sure that the server interceptors were not notified of a cancel + if (GetParam().use_interceptors) { + EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel()); + } +} + class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> { public: ReadClient(grpc::testing::EchoTestService::Stub* stub, @@ -935,7 +935,7 @@ class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> { if (server_try_cancel_ != DO_NOT_CANCEL) { // Send server_try_cancel value in the client metadata context_.AddMetadata(kServerTryCancelRequest, - ToString(server_try_cancel)); + ToString(server_try_cancel)); } request_.set_message("Hello client "); stub->experimental_async()->ResponseStream(&context_, &request_, this); @@ -956,7 +956,7 @@ class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> { } else { EXPECT_LE(reads_complete_, kServerDefaultResponseStreamsToSend); EXPECT_EQ(response_.message(), - request_.message() + ToString(reads_complete_)); + request_.message() + ToString(reads_complete_)); reads_complete_++; if (client_cancel_.cancel && reads_complete_ == client_cancel_.ops_before_cancel) { @@ -1088,20 +1088,20 @@ class BidiClient public: BidiClient(grpc::testing::EchoTestService::Stub* stub, ServerTryCancelRequestPhase server_try_cancel, - int num_msgs_to_send, bool cork_metadata, bool first_write_async, - ClientCancelInfo client_cancel = {}) + int num_msgs_to_send, bool cork_metadata, bool first_write_async, + ClientCancelInfo client_cancel = {}) : server_try_cancel_(server_try_cancel), msgs_to_send_{num_msgs_to_send}, client_cancel_{client_cancel} { if (server_try_cancel_ != DO_NOT_CANCEL) { // Send server_try_cancel value in the client metadata context_.AddMetadata(kServerTryCancelRequest, - ToString(server_try_cancel)); + ToString(server_try_cancel)); } request_.set_message("Hello fren "); - context_.set_initial_metadata_corked(cork_metadata); + context_.set_initial_metadata_corked(cork_metadata); stub->experimental_async()->BidiStream(&context_, this); - MaybeAsyncWrite(first_write_async); + MaybeAsyncWrite(first_write_async); StartRead(&response_); StartCall(); } @@ -1122,10 +1122,10 @@ class BidiClient } } void OnWriteDone(bool ok) override { - if (async_write_thread_.joinable()) { - async_write_thread_.join(); - RemoveHold(); - } + if (async_write_thread_.joinable()) { + async_write_thread_.join(); + RemoveHold(); + } if (server_try_cancel_ == DO_NOT_CANCEL) { EXPECT_TRUE(ok); } else if (!ok) { @@ -1190,26 +1190,26 @@ class BidiClient } private: - void MaybeAsyncWrite(bool first_write_async) { - if (first_write_async) { - // Make sure that we have a write to issue. - // TODO(vjpai): Make this work with 0 writes case as well. - assert(msgs_to_send_ >= 1); - - AddHold(); - async_write_thread_ = std::thread([this] { - std::unique_lock<std::mutex> lock(async_write_thread_mu_); - async_write_thread_cv_.wait( - lock, [this] { return async_write_thread_start_; }); - MaybeWrite(); - }); - std::lock_guard<std::mutex> lock(async_write_thread_mu_); - async_write_thread_start_ = true; - async_write_thread_cv_.notify_one(); - return; - } - MaybeWrite(); - } + void MaybeAsyncWrite(bool first_write_async) { + if (first_write_async) { + // Make sure that we have a write to issue. + // TODO(vjpai): Make this work with 0 writes case as well. + assert(msgs_to_send_ >= 1); + + AddHold(); + async_write_thread_ = std::thread([this] { + std::unique_lock<std::mutex> lock(async_write_thread_mu_); + async_write_thread_cv_.wait( + lock, [this] { return async_write_thread_start_; }); + MaybeWrite(); + }); + std::lock_guard<std::mutex> lock(async_write_thread_mu_); + async_write_thread_start_ = true; + async_write_thread_cv_.notify_one(); + return; + } + MaybeWrite(); + } void MaybeWrite() { if (client_cancel_.cancel && writes_complete_ == client_cancel_.ops_before_cancel) { @@ -1231,18 +1231,57 @@ class BidiClient std::mutex mu_; std::condition_variable cv_; bool done_ = false; - std::thread async_write_thread_; - bool async_write_thread_start_ = false; - std::mutex async_write_thread_mu_; - std::condition_variable async_write_thread_cv_; + std::thread async_write_thread_; + bool async_write_thread_start_ = false; + std::mutex async_write_thread_mu_; + std::condition_variable async_write_thread_cv_; }; TEST_P(ClientCallbackEnd2endTest, BidiStream) { MAYBE_SKIP_TEST; ResetStub(); - BidiClient test(stub_.get(), DO_NOT_CANCEL, - kServerDefaultResponseStreamsToSend, - /*cork_metadata=*/false, /*first_write_async=*/false); + BidiClient test(stub_.get(), DO_NOT_CANCEL, + kServerDefaultResponseStreamsToSend, + /*cork_metadata=*/false, /*first_write_async=*/false); + test.Await(); + // Make sure that the server interceptors were not notified of a cancel + if (GetParam().use_interceptors) { + EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel()); + } +} + +TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) { + MAYBE_SKIP_TEST; + ResetStub(); + BidiClient test(stub_.get(), DO_NOT_CANCEL, + kServerDefaultResponseStreamsToSend, + /*cork_metadata=*/false, /*first_write_async=*/true); + test.Await(); + // Make sure that the server interceptors were not notified of a cancel + if (GetParam().use_interceptors) { + EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel()); + } +} + +TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) { + MAYBE_SKIP_TEST; + ResetStub(); + BidiClient test(stub_.get(), DO_NOT_CANCEL, + kServerDefaultResponseStreamsToSend, + /*cork_metadata=*/true, /*first_write_async=*/false); + test.Await(); + // Make sure that the server interceptors were not notified of a cancel + if (GetParam().use_interceptors) { + EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel()); + } +} + +TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) { + MAYBE_SKIP_TEST; + ResetStub(); + BidiClient test(stub_.get(), DO_NOT_CANCEL, + kServerDefaultResponseStreamsToSend, + /*cork_metadata=*/true, /*first_write_async=*/true); test.Await(); // Make sure that the server interceptors were not notified of a cancel if (GetParam().use_interceptors) { @@ -1250,52 +1289,13 @@ TEST_P(ClientCallbackEnd2endTest, BidiStream) { } } -TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) { - MAYBE_SKIP_TEST; - ResetStub(); - BidiClient test(stub_.get(), DO_NOT_CANCEL, - kServerDefaultResponseStreamsToSend, - /*cork_metadata=*/false, /*first_write_async=*/true); - test.Await(); - // Make sure that the server interceptors were not notified of a cancel - if (GetParam().use_interceptors) { - EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel()); - } -} - -TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) { - MAYBE_SKIP_TEST; - ResetStub(); - BidiClient test(stub_.get(), DO_NOT_CANCEL, - kServerDefaultResponseStreamsToSend, - /*cork_metadata=*/true, /*first_write_async=*/false); - test.Await(); - // Make sure that the server interceptors were not notified of a cancel - if (GetParam().use_interceptors) { - EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel()); - } -} - -TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) { - MAYBE_SKIP_TEST; - ResetStub(); - BidiClient test(stub_.get(), DO_NOT_CANCEL, - kServerDefaultResponseStreamsToSend, - /*cork_metadata=*/true, /*first_write_async=*/true); - test.Await(); - // Make sure that the server interceptors were not notified of a cancel - if (GetParam().use_interceptors) { - EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel()); - } -} - TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) { MAYBE_SKIP_TEST; ResetStub(); - BidiClient test(stub_.get(), DO_NOT_CANCEL, - kServerDefaultResponseStreamsToSend, - /*cork_metadata=*/false, /*first_write_async=*/false, - ClientCancelInfo(2)); + BidiClient test(stub_.get(), DO_NOT_CANCEL, + kServerDefaultResponseStreamsToSend, + /*cork_metadata=*/false, /*first_write_async=*/false, + ClientCancelInfo(2)); test.Await(); // Make sure that the server interceptors were notified of a cancel if (GetParam().use_interceptors) { @@ -1307,8 +1307,8 @@ TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) { TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) { MAYBE_SKIP_TEST; ResetStub(); - BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2, - /*cork_metadata=*/false, /*first_write_async=*/false); + BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2, + /*cork_metadata=*/false, /*first_write_async=*/false); test.Await(); // Make sure that the server interceptors were notified if (GetParam().use_interceptors) { @@ -1321,9 +1321,9 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) { TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) { MAYBE_SKIP_TEST; ResetStub(); - BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING, - /*num_msgs_to_send=*/10, /*cork_metadata=*/false, - /*first_write_async=*/false); + BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING, + /*num_msgs_to_send=*/10, /*cork_metadata=*/false, + /*first_write_async=*/false); test.Await(); // Make sure that the server interceptors were notified if (GetParam().use_interceptors) { @@ -1336,8 +1336,8 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) { TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) { MAYBE_SKIP_TEST; ResetStub(); - BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5, - /*cork_metadata=*/false, /*first_write_async=*/false); + BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5, + /*cork_metadata=*/false, /*first_write_async=*/false); test.Await(); // Make sure that the server interceptors were notified if (GetParam().use_interceptors) { @@ -1452,12 +1452,12 @@ TEST_P(ClientCallbackEnd2endTest, done_cv_.wait(l); } } - // RemoveHold under the same lock used for OnDone to make sure that we don't - // call OnDone directly or indirectly from the RemoveHold function. - void RemoveHoldUnderLock() { - std::unique_lock<std::mutex> l(mu_); - RemoveHold(); - } + // RemoveHold under the same lock used for OnDone to make sure that we don't + // call OnDone directly or indirectly from the RemoveHold function. + void RemoveHoldUnderLock() { + std::unique_lock<std::mutex> l(mu_); + RemoveHold(); + } const Status& status() { std::unique_lock<std::mutex> l(mu_); return status_; @@ -1502,7 +1502,7 @@ TEST_P(ClientCallbackEnd2endTest, ++reads_complete; } } - client.RemoveHoldUnderLock(); + client.RemoveHoldUnderLock(); client.Await(); EXPECT_EQ(kServerDefaultResponseStreamsToSend, reads_complete); @@ -1516,7 +1516,7 @@ std::vector<TestScenario> CreateTestScenarios(bool test_insecure) { #endif std::vector<TestScenario> scenarios; - std::vector<TString> credentials_types{ + std::vector<TString> credentials_types{ GetCredentialsProvider()->GetSecureCredentialsTypeList()}; auto insec_ok = [] { // Only allow insecure credentials type when it is registered with the @@ -1556,8 +1556,8 @@ INSTANTIATE_TEST_SUITE_P(ClientCallbackEnd2endTest, ClientCallbackEnd2endTest, } // namespace grpc int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - grpc::testing::TestEnvironment env(argc, argv); + ::testing::InitGoogleTest(&argc, argv); + grpc::testing::TestEnvironment env(argc, argv); grpc_init(); int ret = RUN_ALL_TESTS(); grpc_shutdown(); diff --git a/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc index 429a4283bc..80e1869396 100644 --- a/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc @@ -38,7 +38,7 @@ using grpc::testing::EchoRequest; using grpc::testing::EchoResponse; using std::chrono::system_clock; -static TString g_root; +static TString g_root; namespace grpc { namespace testing { @@ -127,9 +127,9 @@ TEST_F(CrashTest, KillAfterWrite) { } // namespace grpc int main(int argc, char** argv) { - TString me = argv[0]; + TString me = argv[0]; auto lslash = me.rfind('/'); - if (lslash != TString::npos) { + if (lslash != TString::npos) { g_root = me.substr(0, lslash); } else { g_root = "."; diff --git a/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc b/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc index 2962bd63da..2d5be420f2 100644 --- a/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc +++ b/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc @@ -19,7 +19,7 @@ #include <gflags/gflags.h> #include <iostream> #include <memory> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <grpc/support/log.h> #include <grpcpp/server.h> diff --git a/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc index 99bb3fb6d9..956876d9f6 100644 --- a/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc @@ -43,17 +43,17 @@ namespace grpc { namespace testing { namespace { -enum class RPCType { - kSyncUnary, - kSyncClientStreaming, - kSyncServerStreaming, - kSyncBidiStreaming, - kAsyncCQUnary, - kAsyncCQClientStreaming, - kAsyncCQServerStreaming, - kAsyncCQBidiStreaming, -}; - +enum class RPCType { + kSyncUnary, + kSyncClientStreaming, + kSyncServerStreaming, + kSyncBidiStreaming, + kAsyncCQUnary, + kAsyncCQClientStreaming, + kAsyncCQServerStreaming, + kAsyncCQBidiStreaming, +}; + /* Hijacks Echo RPC and fills in the expected values */ class HijackingInterceptor : public experimental::Interceptor { public: @@ -267,7 +267,7 @@ class HijackingInterceptorMakesAnotherCall : public experimental::Interceptor { private: experimental::ClientRpcInfo* info_; - std::multimap<TString, TString> metadata_map_; + std::multimap<TString, TString> metadata_map_; ClientContext ctx_; EchoRequest req_; EchoResponse resp_; @@ -349,7 +349,7 @@ class BidiStreamingRpcHijackingInterceptor : public experimental::Interceptor { private: experimental::ClientRpcInfo* info_; - TString msg; + TString msg; }; class ClientStreamingRpcHijackingInterceptor @@ -411,7 +411,7 @@ class ServerStreamingRpcHijackingInterceptor public: ServerStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) { info_ = info; - got_failed_message_ = false; + got_failed_message_ = false; } virtual void Intercept(experimental::InterceptorBatchMethods* methods) { @@ -543,22 +543,22 @@ class LoggingInterceptor : public experimental::Interceptor { if (methods->QueryInterceptionHookPoint( experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) { EchoRequest req; - auto* send_msg = methods->GetSendMessage(); - if (send_msg == nullptr) { - // We did not get the non-serialized form of the message. Get the - // serialized form. - auto* buffer = methods->GetSerializedSendMessage(); - auto copied_buffer = *buffer; - EchoRequest req; - EXPECT_TRUE( - SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req) - .ok()); - EXPECT_EQ(req.message(), "Hello"); - } else { - EXPECT_EQ( - static_cast<const EchoRequest*>(send_msg)->message().find("Hello"), - 0u); - } + auto* send_msg = methods->GetSendMessage(); + if (send_msg == nullptr) { + // We did not get the non-serialized form of the message. Get the + // serialized form. + auto* buffer = methods->GetSerializedSendMessage(); + auto copied_buffer = *buffer; + EchoRequest req; + EXPECT_TRUE( + SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req) + .ok()); + EXPECT_EQ(req.message(), "Hello"); + } else { + EXPECT_EQ( + static_cast<const EchoRequest*>(send_msg)->message().find("Hello"), + 0u); + } auto* buffer = methods->GetSerializedSendMessage(); auto copied_buffer = *buffer; EXPECT_TRUE( @@ -606,27 +606,27 @@ class LoggingInterceptor : public experimental::Interceptor { methods->Proceed(); } - static void VerifyCall(RPCType type) { - switch (type) { - case RPCType::kSyncUnary: - case RPCType::kAsyncCQUnary: - VerifyUnaryCall(); - break; - case RPCType::kSyncClientStreaming: - case RPCType::kAsyncCQClientStreaming: - VerifyClientStreamingCall(); - break; - case RPCType::kSyncServerStreaming: - case RPCType::kAsyncCQServerStreaming: - VerifyServerStreamingCall(); - break; - case RPCType::kSyncBidiStreaming: - case RPCType::kAsyncCQBidiStreaming: - VerifyBidiStreamingCall(); - break; - } - } - + static void VerifyCall(RPCType type) { + switch (type) { + case RPCType::kSyncUnary: + case RPCType::kAsyncCQUnary: + VerifyUnaryCall(); + break; + case RPCType::kSyncClientStreaming: + case RPCType::kAsyncCQClientStreaming: + VerifyClientStreamingCall(); + break; + case RPCType::kSyncServerStreaming: + case RPCType::kAsyncCQServerStreaming: + VerifyServerStreamingCall(); + break; + case RPCType::kSyncBidiStreaming: + case RPCType::kAsyncCQBidiStreaming: + VerifyBidiStreamingCall(); + break; + } + } + static void VerifyCallCommon() { EXPECT_TRUE(pre_send_initial_metadata_); EXPECT_TRUE(pre_send_close_); @@ -683,78 +683,78 @@ class LoggingInterceptorFactory } }; -class TestScenario { - public: - explicit TestScenario(const RPCType& type) : type_(type) {} - - RPCType type() const { return type_; } - - private: - RPCType type_; -}; - -std::vector<TestScenario> CreateTestScenarios() { - std::vector<TestScenario> scenarios; - scenarios.emplace_back(RPCType::kSyncUnary); - scenarios.emplace_back(RPCType::kSyncClientStreaming); - scenarios.emplace_back(RPCType::kSyncServerStreaming); - scenarios.emplace_back(RPCType::kSyncBidiStreaming); - scenarios.emplace_back(RPCType::kAsyncCQUnary); - scenarios.emplace_back(RPCType::kAsyncCQServerStreaming); - return scenarios; -} - -class ParameterizedClientInterceptorsEnd2endTest - : public ::testing::TestWithParam<TestScenario> { +class TestScenario { + public: + explicit TestScenario(const RPCType& type) : type_(type) {} + + RPCType type() const { return type_; } + + private: + RPCType type_; +}; + +std::vector<TestScenario> CreateTestScenarios() { + std::vector<TestScenario> scenarios; + scenarios.emplace_back(RPCType::kSyncUnary); + scenarios.emplace_back(RPCType::kSyncClientStreaming); + scenarios.emplace_back(RPCType::kSyncServerStreaming); + scenarios.emplace_back(RPCType::kSyncBidiStreaming); + scenarios.emplace_back(RPCType::kAsyncCQUnary); + scenarios.emplace_back(RPCType::kAsyncCQServerStreaming); + return scenarios; +} + +class ParameterizedClientInterceptorsEnd2endTest + : public ::testing::TestWithParam<TestScenario> { protected: - ParameterizedClientInterceptorsEnd2endTest() { + ParameterizedClientInterceptorsEnd2endTest() { int port = grpc_pick_unused_port_or_die(); ServerBuilder builder; - server_address_ = "localhost:" + ToString(port); + server_address_ = "localhost:" + ToString(port); builder.AddListeningPort(server_address_, InsecureServerCredentials()); builder.RegisterService(&service_); server_ = builder.BuildAndStart(); } - ~ParameterizedClientInterceptorsEnd2endTest() { server_->Shutdown(); } - - void SendRPC(const std::shared_ptr<Channel>& channel) { - switch (GetParam().type()) { - case RPCType::kSyncUnary: - MakeCall(channel); - break; - case RPCType::kSyncClientStreaming: - MakeClientStreamingCall(channel); - break; - case RPCType::kSyncServerStreaming: - MakeServerStreamingCall(channel); - break; - case RPCType::kSyncBidiStreaming: - MakeBidiStreamingCall(channel); - break; - case RPCType::kAsyncCQUnary: - MakeAsyncCQCall(channel); - break; - case RPCType::kAsyncCQClientStreaming: - // TODO(yashykt) : Fill this out - break; - case RPCType::kAsyncCQServerStreaming: - MakeAsyncCQServerStreamingCall(channel); - break; - case RPCType::kAsyncCQBidiStreaming: - // TODO(yashykt) : Fill this out - break; - } - } - - TString server_address_; - EchoTestServiceStreamingImpl service_; + ~ParameterizedClientInterceptorsEnd2endTest() { server_->Shutdown(); } + + void SendRPC(const std::shared_ptr<Channel>& channel) { + switch (GetParam().type()) { + case RPCType::kSyncUnary: + MakeCall(channel); + break; + case RPCType::kSyncClientStreaming: + MakeClientStreamingCall(channel); + break; + case RPCType::kSyncServerStreaming: + MakeServerStreamingCall(channel); + break; + case RPCType::kSyncBidiStreaming: + MakeBidiStreamingCall(channel); + break; + case RPCType::kAsyncCQUnary: + MakeAsyncCQCall(channel); + break; + case RPCType::kAsyncCQClientStreaming: + // TODO(yashykt) : Fill this out + break; + case RPCType::kAsyncCQServerStreaming: + MakeAsyncCQServerStreamingCall(channel); + break; + case RPCType::kAsyncCQBidiStreaming: + // TODO(yashykt) : Fill this out + break; + } + } + + TString server_address_; + EchoTestServiceStreamingImpl service_; std::unique_ptr<Server> server_; }; -TEST_P(ParameterizedClientInterceptorsEnd2endTest, - ClientInterceptorLoggingTest) { +TEST_P(ParameterizedClientInterceptorsEnd2endTest, + ClientInterceptorLoggingTest) { ChannelArguments args; DummyInterceptor::Reset(); std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> @@ -768,36 +768,36 @@ TEST_P(ParameterizedClientInterceptorsEnd2endTest, } auto channel = experimental::CreateCustomChannelWithInterceptors( server_address_, InsecureChannelCredentials(), args, std::move(creators)); - SendRPC(channel); - LoggingInterceptor::VerifyCall(GetParam().type()); + SendRPC(channel); + LoggingInterceptor::VerifyCall(GetParam().type()); // Make sure all 20 dummy interceptors were run EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20); } -INSTANTIATE_TEST_SUITE_P(ParameterizedClientInterceptorsEnd2end, - ParameterizedClientInterceptorsEnd2endTest, - ::testing::ValuesIn(CreateTestScenarios())); - -class ClientInterceptorsEnd2endTest - : public ::testing::TestWithParam<TestScenario> { - protected: - ClientInterceptorsEnd2endTest() { - int port = grpc_pick_unused_port_or_die(); - - ServerBuilder builder; - server_address_ = "localhost:" + ToString(port); - builder.AddListeningPort(server_address_, InsecureServerCredentials()); - builder.RegisterService(&service_); - server_ = builder.BuildAndStart(); - } - - ~ClientInterceptorsEnd2endTest() { server_->Shutdown(); } - - TString server_address_; - TestServiceImpl service_; - std::unique_ptr<Server> server_; -}; - +INSTANTIATE_TEST_SUITE_P(ParameterizedClientInterceptorsEnd2end, + ParameterizedClientInterceptorsEnd2endTest, + ::testing::ValuesIn(CreateTestScenarios())); + +class ClientInterceptorsEnd2endTest + : public ::testing::TestWithParam<TestScenario> { + protected: + ClientInterceptorsEnd2endTest() { + int port = grpc_pick_unused_port_or_die(); + + ServerBuilder builder; + server_address_ = "localhost:" + ToString(port); + builder.AddListeningPort(server_address_, InsecureServerCredentials()); + builder.RegisterService(&service_); + server_ = builder.BuildAndStart(); + } + + ~ClientInterceptorsEnd2endTest() { server_->Shutdown(); } + + TString server_address_; + TestServiceImpl service_; + std::unique_ptr<Server> server_; +}; + TEST_F(ClientInterceptorsEnd2endTest, LameChannelClientInterceptorHijackingTest) { ChannelArguments args; @@ -878,26 +878,26 @@ TEST_F(ClientInterceptorsEnd2endTest, EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 12); } -class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test { - protected: - ClientInterceptorsCallbackEnd2endTest() { - int port = grpc_pick_unused_port_or_die(); - - ServerBuilder builder; - server_address_ = "localhost:" + ToString(port); - builder.AddListeningPort(server_address_, InsecureServerCredentials()); - builder.RegisterService(&service_); - server_ = builder.BuildAndStart(); - } - - ~ClientInterceptorsCallbackEnd2endTest() { server_->Shutdown(); } - - TString server_address_; - TestServiceImpl service_; - std::unique_ptr<Server> server_; -}; - -TEST_F(ClientInterceptorsCallbackEnd2endTest, +class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test { + protected: + ClientInterceptorsCallbackEnd2endTest() { + int port = grpc_pick_unused_port_or_die(); + + ServerBuilder builder; + server_address_ = "localhost:" + ToString(port); + builder.AddListeningPort(server_address_, InsecureServerCredentials()); + builder.RegisterService(&service_); + server_ = builder.BuildAndStart(); + } + + ~ClientInterceptorsCallbackEnd2endTest() { server_->Shutdown(); } + + TString server_address_; + TestServiceImpl service_; + std::unique_ptr<Server> server_; +}; + +TEST_F(ClientInterceptorsCallbackEnd2endTest, ClientInterceptorLoggingTestWithCallback) { ChannelArguments args; DummyInterceptor::Reset(); @@ -918,7 +918,7 @@ TEST_F(ClientInterceptorsCallbackEnd2endTest, EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20); } -TEST_F(ClientInterceptorsCallbackEnd2endTest, +TEST_F(ClientInterceptorsCallbackEnd2endTest, ClientInterceptorFactoryAllowsNullptrReturn) { ChannelArguments args; DummyInterceptor::Reset(); @@ -947,7 +947,7 @@ class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test { int port = grpc_pick_unused_port_or_die(); ServerBuilder builder; - server_address_ = "localhost:" + ToString(port); + server_address_ = "localhost:" + ToString(port); builder.AddListeningPort(server_address_, InsecureServerCredentials()); builder.RegisterService(&service_); server_ = builder.BuildAndStart(); @@ -955,7 +955,7 @@ class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test { ~ClientInterceptorsStreamingEnd2endTest() { server_->Shutdown(); } - TString server_address_; + TString server_address_; EchoTestServiceStreamingImpl service_; std::unique_ptr<Server> server_; }; @@ -1043,21 +1043,21 @@ TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingHijackingTest) { EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage()); } -TEST_F(ClientInterceptorsStreamingEnd2endTest, - AsyncCQServerStreamingHijackingTest) { - ChannelArguments args; - DummyInterceptor::Reset(); - std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> - creators; - creators.push_back( - std::unique_ptr<ServerStreamingRpcHijackingInterceptorFactory>( - new ServerStreamingRpcHijackingInterceptorFactory())); - auto channel = experimental::CreateCustomChannelWithInterceptors( - server_address_, InsecureChannelCredentials(), args, std::move(creators)); - MakeAsyncCQServerStreamingCall(channel); - EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage()); -} - +TEST_F(ClientInterceptorsStreamingEnd2endTest, + AsyncCQServerStreamingHijackingTest) { + ChannelArguments args; + DummyInterceptor::Reset(); + std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> + creators; + creators.push_back( + std::unique_ptr<ServerStreamingRpcHijackingInterceptorFactory>( + new ServerStreamingRpcHijackingInterceptorFactory())); + auto channel = experimental::CreateCustomChannelWithInterceptors( + server_address_, InsecureChannelCredentials(), args, std::move(creators)); + MakeAsyncCQServerStreamingCall(channel); + EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage()); +} + TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingHijackingTest) { ChannelArguments args; DummyInterceptor::Reset(); @@ -1097,7 +1097,7 @@ class ClientGlobalInterceptorEnd2endTest : public ::testing::Test { int port = grpc_pick_unused_port_or_die(); ServerBuilder builder; - server_address_ = "localhost:" + ToString(port); + server_address_ = "localhost:" + ToString(port); builder.AddListeningPort(server_address_, InsecureServerCredentials()); builder.RegisterService(&service_); server_ = builder.BuildAndStart(); @@ -1105,7 +1105,7 @@ class ClientGlobalInterceptorEnd2endTest : public ::testing::Test { ~ClientGlobalInterceptorEnd2endTest() { server_->Shutdown(); } - TString server_address_; + TString server_address_; TestServiceImpl service_; std::unique_ptr<Server> server_; }; diff --git a/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc index e33f45920b..fd08dd163d 100644 --- a/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc @@ -21,11 +21,11 @@ #include <mutex> #include <random> #include <set> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <thread> -#include "y_absl/strings/str_cat.h" - +#include "y_absl/strings/str_cat.h" + #include <grpc/grpc.h> #include <grpc/support/alloc.h> #include <grpc/support/atm.h> @@ -49,7 +49,7 @@ #include "src/core/lib/gpr/env.h" #include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" -#include "src/core/lib/iomgr/parse_address.h" +#include "src/core/lib/iomgr/parse_address.h" #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/security/credentials/fake/fake_credentials.h" #include "src/cpp/client/secure_credentials.h" @@ -127,7 +127,7 @@ class MyTestServiceImpl : public TestServiceImpl { request_count_ = 0; } - std::set<TString> clients() { + std::set<TString> clients() { grpc::internal::MutexLock lock(&clients_mu_); return clients_; } @@ -138,7 +138,7 @@ class MyTestServiceImpl : public TestServiceImpl { } private: - void AddClient(const TString& client) { + void AddClient(const TString& client) { grpc::internal::MutexLock lock(&clients_mu_); clients_.insert(client); } @@ -147,7 +147,7 @@ class MyTestServiceImpl : public TestServiceImpl { int request_count_ = 0; const udpa::data::orca::v1::OrcaLoadReport* load_report_ = nullptr; grpc::internal::Mutex clients_mu_; - std::set<TString> clients_; + std::set<TString> clients_; }; class FakeResolverResponseGeneratorWrapper { @@ -157,18 +157,18 @@ class FakeResolverResponseGeneratorWrapper { grpc_core::FakeResolverResponseGenerator>()) {} FakeResolverResponseGeneratorWrapper( - FakeResolverResponseGeneratorWrapper&& other) noexcept { + FakeResolverResponseGeneratorWrapper&& other) noexcept { response_generator_ = std::move(other.response_generator_); } - void SetNextResolution( - const std::vector<int>& ports, const char* service_config_json = nullptr, - const char* attribute_key = nullptr, - std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute = - nullptr) { + void SetNextResolution( + const std::vector<int>& ports, const char* service_config_json = nullptr, + const char* attribute_key = nullptr, + std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute = + nullptr) { grpc_core::ExecCtx exec_ctx; - response_generator_->SetResponse(BuildFakeResults( - ports, service_config_json, attribute_key, std::move(attribute))); + response_generator_->SetResponse(BuildFakeResults( + ports, service_config_json, attribute_key, std::move(attribute))); } void SetNextResolutionUponError(const std::vector<int>& ports) { @@ -187,30 +187,30 @@ class FakeResolverResponseGeneratorWrapper { private: static grpc_core::Resolver::Result BuildFakeResults( - const std::vector<int>& ports, const char* service_config_json = nullptr, - const char* attribute_key = nullptr, - std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute = - nullptr) { + const std::vector<int>& ports, const char* service_config_json = nullptr, + const char* attribute_key = nullptr, + std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute = + nullptr) { grpc_core::Resolver::Result result; for (const int& port : ports) { - TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port); - grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true); + TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port); + grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true); GPR_ASSERT(lb_uri != nullptr); grpc_resolved_address address; GPR_ASSERT(grpc_parse_uri(lb_uri, &address)); - std::map<const char*, - std::unique_ptr<grpc_core::ServerAddress::AttributeInterface>> - attributes; - if (attribute != nullptr) { - attributes[attribute_key] = attribute->Copy(); - } + std::map<const char*, + std::unique_ptr<grpc_core::ServerAddress::AttributeInterface>> + attributes; + if (attribute != nullptr) { + attributes[attribute_key] = attribute->Copy(); + } result.addresses.emplace_back(address.addr, address.len, - nullptr /* args */, std::move(attributes)); + nullptr /* args */, std::move(attributes)); grpc_uri_destroy(lb_uri); } if (service_config_json != nullptr) { result.service_config = grpc_core::ServiceConfig::Create( - nullptr, service_config_json, &result.service_config_error); + nullptr, service_config_json, &result.service_config_error); GPR_ASSERT(result.service_config != nullptr); } return result; @@ -287,7 +287,7 @@ class ClientLbEnd2endTest : public ::testing::Test { } std::shared_ptr<Channel> BuildChannel( - const TString& lb_policy_name, + const TString& lb_policy_name, const FakeResolverResponseGeneratorWrapper& response_generator, ChannelArguments args = ChannelArguments()) { if (lb_policy_name.size() > 0) { @@ -306,13 +306,13 @@ class ClientLbEnd2endTest : public ::testing::Test { if (local_response) response = new EchoResponse; EchoRequest request; request.set_message(kRequestMessage_); - request.mutable_param()->set_echo_metadata(true); + request.mutable_param()->set_echo_metadata(true); ClientContext context; context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); if (wait_for_ready) context.set_wait_for_ready(true); - context.AddMetadata("foo", "1"); - context.AddMetadata("bar", "2"); - context.AddMetadata("baz", "3"); + context.AddMetadata("foo", "1"); + context.AddMetadata("bar", "2"); + context.AddMetadata("baz", "3"); Status status = stub->Echo(&context, request, response); if (result != nullptr) *result = status; if (local_response) delete response; @@ -353,7 +353,7 @@ class ClientLbEnd2endTest : public ::testing::Test { port_ = port > 0 ? port : 5100; // grpc_pick_unused_port_or_die(); } - void Start(const TString& server_host) { + void Start(const TString& server_host) { gpr_log(GPR_INFO, "starting server on port %d", port_); started_ = true; grpc::internal::Mutex mu; @@ -366,7 +366,7 @@ class ClientLbEnd2endTest : public ::testing::Test { gpr_log(GPR_INFO, "server startup complete"); } - void Serve(const TString& server_host, grpc::internal::Mutex* mu, + void Serve(const TString& server_host, grpc::internal::Mutex* mu, grpc::internal::CondVar* cond) { std::ostringstream server_address; server_address << server_host << ":" << port_; @@ -388,7 +388,7 @@ class ClientLbEnd2endTest : public ::testing::Test { started_ = false; } - void SetServingStatus(const TString& service, bool serving) { + void SetServingStatus(const TString& service, bool serving) { server_->GetHealthCheckService()->SetServingStatus(service, serving); } }; @@ -463,9 +463,9 @@ class ClientLbEnd2endTest : public ::testing::Test { } } - const TString server_host_; + const TString server_host_; std::vector<std::unique_ptr<ServerData>> servers_; - const TString kRequestMessage_; + const TString kRequestMessage_; std::shared_ptr<ChannelCredentials> creds_; }; @@ -642,11 +642,11 @@ TEST_F(ClientLbEnd2endTest, PickFirstResetConnectionBackoff) { channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10))); // Reset connection backoff. experimental::ChannelResetConnectionBackoff(channel.get()); - // Wait for connect. Should happen as soon as the client connects to - // the newly started server, which should be before the initial - // backoff timeout elapses. + // Wait for connect. Should happen as soon as the client connects to + // the newly started server, which should be before the initial + // backoff timeout elapses. EXPECT_TRUE( - channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(20))); + channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(20))); const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC); const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0)); gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms); @@ -1650,96 +1650,96 @@ TEST_F(ClientLbEnd2endTest, ChannelIdleness) { EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); } -class ClientLbPickArgsTest : public ClientLbEnd2endTest { - protected: - void SetUp() override { - ClientLbEnd2endTest::SetUp(); - current_test_instance_ = this; - } - - static void SetUpTestCase() { - grpc_init(); - grpc_core::RegisterTestPickArgsLoadBalancingPolicy(SavePickArgs); - } - - static void TearDownTestCase() { grpc_shutdown_blocking(); } - - const std::vector<grpc_core::PickArgsSeen>& args_seen_list() { - grpc::internal::MutexLock lock(&mu_); - return args_seen_list_; - } - - private: - static void SavePickArgs(const grpc_core::PickArgsSeen& args_seen) { - ClientLbPickArgsTest* self = current_test_instance_; - grpc::internal::MutexLock lock(&self->mu_); - self->args_seen_list_.emplace_back(args_seen); - } - - static ClientLbPickArgsTest* current_test_instance_; - grpc::internal::Mutex mu_; - std::vector<grpc_core::PickArgsSeen> args_seen_list_; -}; - -ClientLbPickArgsTest* ClientLbPickArgsTest::current_test_instance_ = nullptr; - -TEST_F(ClientLbPickArgsTest, Basic) { - const int kNumServers = 1; - StartServers(kNumServers); - auto response_generator = BuildResolverResponseGenerator(); - auto channel = BuildChannel("test_pick_args_lb", response_generator); - auto stub = BuildStub(channel); - response_generator.SetNextResolution(GetServersPorts()); - CheckRpcSendOk(stub, DEBUG_LOCATION, /*wait_for_ready=*/true); - // Check LB policy name for the channel. - EXPECT_EQ("test_pick_args_lb", channel->GetLoadBalancingPolicyName()); - // There will be two entries, one for the pick tried in state - // CONNECTING and another for the pick tried in state READY. - EXPECT_THAT(args_seen_list(), - ::testing::ElementsAre( - ::testing::AllOf( - ::testing::Field(&grpc_core::PickArgsSeen::path, - "/grpc.testing.EchoTestService/Echo"), - ::testing::Field(&grpc_core::PickArgsSeen::metadata, - ::testing::UnorderedElementsAre( - ::testing::Pair("foo", "1"), - ::testing::Pair("bar", "2"), - ::testing::Pair("baz", "3")))), - ::testing::AllOf( - ::testing::Field(&grpc_core::PickArgsSeen::path, - "/grpc.testing.EchoTestService/Echo"), - ::testing::Field(&grpc_core::PickArgsSeen::metadata, - ::testing::UnorderedElementsAre( - ::testing::Pair("foo", "1"), - ::testing::Pair("bar", "2"), - ::testing::Pair("baz", "3")))))); -} - +class ClientLbPickArgsTest : public ClientLbEnd2endTest { + protected: + void SetUp() override { + ClientLbEnd2endTest::SetUp(); + current_test_instance_ = this; + } + + static void SetUpTestCase() { + grpc_init(); + grpc_core::RegisterTestPickArgsLoadBalancingPolicy(SavePickArgs); + } + + static void TearDownTestCase() { grpc_shutdown_blocking(); } + + const std::vector<grpc_core::PickArgsSeen>& args_seen_list() { + grpc::internal::MutexLock lock(&mu_); + return args_seen_list_; + } + + private: + static void SavePickArgs(const grpc_core::PickArgsSeen& args_seen) { + ClientLbPickArgsTest* self = current_test_instance_; + grpc::internal::MutexLock lock(&self->mu_); + self->args_seen_list_.emplace_back(args_seen); + } + + static ClientLbPickArgsTest* current_test_instance_; + grpc::internal::Mutex mu_; + std::vector<grpc_core::PickArgsSeen> args_seen_list_; +}; + +ClientLbPickArgsTest* ClientLbPickArgsTest::current_test_instance_ = nullptr; + +TEST_F(ClientLbPickArgsTest, Basic) { + const int kNumServers = 1; + StartServers(kNumServers); + auto response_generator = BuildResolverResponseGenerator(); + auto channel = BuildChannel("test_pick_args_lb", response_generator); + auto stub = BuildStub(channel); + response_generator.SetNextResolution(GetServersPorts()); + CheckRpcSendOk(stub, DEBUG_LOCATION, /*wait_for_ready=*/true); + // Check LB policy name for the channel. + EXPECT_EQ("test_pick_args_lb", channel->GetLoadBalancingPolicyName()); + // There will be two entries, one for the pick tried in state + // CONNECTING and another for the pick tried in state READY. + EXPECT_THAT(args_seen_list(), + ::testing::ElementsAre( + ::testing::AllOf( + ::testing::Field(&grpc_core::PickArgsSeen::path, + "/grpc.testing.EchoTestService/Echo"), + ::testing::Field(&grpc_core::PickArgsSeen::metadata, + ::testing::UnorderedElementsAre( + ::testing::Pair("foo", "1"), + ::testing::Pair("bar", "2"), + ::testing::Pair("baz", "3")))), + ::testing::AllOf( + ::testing::Field(&grpc_core::PickArgsSeen::path, + "/grpc.testing.EchoTestService/Echo"), + ::testing::Field(&grpc_core::PickArgsSeen::metadata, + ::testing::UnorderedElementsAre( + ::testing::Pair("foo", "1"), + ::testing::Pair("bar", "2"), + ::testing::Pair("baz", "3")))))); +} + class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest { protected: void SetUp() override { ClientLbEnd2endTest::SetUp(); - current_test_instance_ = this; + current_test_instance_ = this; } - static void SetUpTestCase() { - grpc_init(); - grpc_core::RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy( - ReportTrailerIntercepted); - } - - static void TearDownTestCase() { grpc_shutdown_blocking(); } - + static void SetUpTestCase() { + grpc_init(); + grpc_core::RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy( + ReportTrailerIntercepted); + } + + static void TearDownTestCase() { grpc_shutdown_blocking(); } + int trailers_intercepted() { grpc::internal::MutexLock lock(&mu_); return trailers_intercepted_; } - const grpc_core::MetadataVector& trailing_metadata() { - grpc::internal::MutexLock lock(&mu_); - return trailing_metadata_; - } - + const grpc_core::MetadataVector& trailing_metadata() { + grpc::internal::MutexLock lock(&mu_); + return trailing_metadata_; + } + const udpa::data::orca::v1::OrcaLoadReport* backend_load_report() { grpc::internal::MutexLock lock(&mu_); return load_report_.get(); @@ -1747,12 +1747,12 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest { private: static void ReportTrailerIntercepted( - const grpc_core::TrailingMetadataArgsSeen& args_seen) { - const auto* backend_metric_data = args_seen.backend_metric_data; - ClientLbInterceptTrailingMetadataTest* self = current_test_instance_; + const grpc_core::TrailingMetadataArgsSeen& args_seen) { + const auto* backend_metric_data = args_seen.backend_metric_data; + ClientLbInterceptTrailingMetadataTest* self = current_test_instance_; grpc::internal::MutexLock lock(&self->mu_); self->trailers_intercepted_++; - self->trailing_metadata_ = args_seen.metadata; + self->trailing_metadata_ = args_seen.metadata; if (backend_metric_data != nullptr) { self->load_report_.reset(new udpa::data::orca::v1::OrcaLoadReport); self->load_report_->set_cpu_utilization( @@ -1761,28 +1761,28 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest { backend_metric_data->mem_utilization); self->load_report_->set_rps(backend_metric_data->requests_per_second); for (const auto& p : backend_metric_data->request_cost) { - TString name = TString(p.first); - (*self->load_report_->mutable_request_cost())[std::move(name)] = - p.second; + TString name = TString(p.first); + (*self->load_report_->mutable_request_cost())[std::move(name)] = + p.second; } for (const auto& p : backend_metric_data->utilization) { - TString name = TString(p.first); - (*self->load_report_->mutable_utilization())[std::move(name)] = - p.second; + TString name = TString(p.first); + (*self->load_report_->mutable_utilization())[std::move(name)] = + p.second; } } } - static ClientLbInterceptTrailingMetadataTest* current_test_instance_; + static ClientLbInterceptTrailingMetadataTest* current_test_instance_; grpc::internal::Mutex mu_; int trailers_intercepted_ = 0; - grpc_core::MetadataVector trailing_metadata_; + grpc_core::MetadataVector trailing_metadata_; std::unique_ptr<udpa::data::orca::v1::OrcaLoadReport> load_report_; }; -ClientLbInterceptTrailingMetadataTest* - ClientLbInterceptTrailingMetadataTest::current_test_instance_ = nullptr; - +ClientLbInterceptTrailingMetadataTest* + ClientLbInterceptTrailingMetadataTest::current_test_instance_ = nullptr; + TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesDisabled) { const int kNumServers = 1; const int kNumRpcs = 10; @@ -1799,13 +1799,13 @@ TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesDisabled) { EXPECT_EQ("intercept_trailing_metadata_lb", channel->GetLoadBalancingPolicyName()); EXPECT_EQ(kNumRpcs, trailers_intercepted()); - EXPECT_THAT(trailing_metadata(), - ::testing::UnorderedElementsAre( - // TODO(roth): Should grpc-status be visible here? - ::testing::Pair("grpc-status", "0"), - ::testing::Pair("user-agent", ::testing::_), - ::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"), - ::testing::Pair("baz", "3"))); + EXPECT_THAT(trailing_metadata(), + ::testing::UnorderedElementsAre( + // TODO(roth): Should grpc-status be visible here? + ::testing::Pair("grpc-status", "0"), + ::testing::Pair("user-agent", ::testing::_), + ::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"), + ::testing::Pair("baz", "3"))); EXPECT_EQ(nullptr, backend_load_report()); } @@ -1841,13 +1841,13 @@ TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesEnabled) { EXPECT_EQ("intercept_trailing_metadata_lb", channel->GetLoadBalancingPolicyName()); EXPECT_EQ(kNumRpcs, trailers_intercepted()); - EXPECT_THAT(trailing_metadata(), - ::testing::UnorderedElementsAre( - // TODO(roth): Should grpc-status be visible here? - ::testing::Pair("grpc-status", "0"), - ::testing::Pair("user-agent", ::testing::_), - ::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"), - ::testing::Pair("baz", "3"))); + EXPECT_THAT(trailing_metadata(), + ::testing::UnorderedElementsAre( + // TODO(roth): Should grpc-status be visible here? + ::testing::Pair("grpc-status", "0"), + ::testing::Pair("user-agent", ::testing::_), + ::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"), + ::testing::Pair("baz", "3"))); EXPECT_EQ(nullptr, backend_load_report()); } @@ -1901,83 +1901,83 @@ TEST_F(ClientLbInterceptTrailingMetadataTest, BackendMetricData) { EXPECT_EQ(kNumRpcs, trailers_intercepted()); } -class ClientLbAddressTest : public ClientLbEnd2endTest { - protected: - static const char* kAttributeKey; - - class Attribute : public grpc_core::ServerAddress::AttributeInterface { - public: - explicit Attribute(const TString& str) : str_(str) {} - - std::unique_ptr<AttributeInterface> Copy() const override { - return y_absl::make_unique<Attribute>(str_); - } - - int Cmp(const AttributeInterface* other) const override { - return str_.compare(static_cast<const Attribute*>(other)->str_); - } - - TString ToString() const override { return str_; } - - private: - TString str_; - }; - - void SetUp() override { - ClientLbEnd2endTest::SetUp(); - current_test_instance_ = this; - } - - static void SetUpTestCase() { - grpc_init(); - grpc_core::RegisterAddressTestLoadBalancingPolicy(SaveAddress); - } - - static void TearDownTestCase() { grpc_shutdown_blocking(); } - - const std::vector<TString>& addresses_seen() { - grpc::internal::MutexLock lock(&mu_); - return addresses_seen_; - } - - private: - static void SaveAddress(const grpc_core::ServerAddress& address) { - ClientLbAddressTest* self = current_test_instance_; - grpc::internal::MutexLock lock(&self->mu_); - self->addresses_seen_.emplace_back(address.ToString()); - } - - static ClientLbAddressTest* current_test_instance_; - grpc::internal::Mutex mu_; - std::vector<TString> addresses_seen_; -}; - -const char* ClientLbAddressTest::kAttributeKey = "attribute_key"; - -ClientLbAddressTest* ClientLbAddressTest::current_test_instance_ = nullptr; - -TEST_F(ClientLbAddressTest, Basic) { - const int kNumServers = 1; - StartServers(kNumServers); - auto response_generator = BuildResolverResponseGenerator(); - auto channel = BuildChannel("address_test_lb", response_generator); - auto stub = BuildStub(channel); - // Addresses returned by the resolver will have attached attributes. - response_generator.SetNextResolution(GetServersPorts(), nullptr, - kAttributeKey, - y_absl::make_unique<Attribute>("foo")); - CheckRpcSendOk(stub, DEBUG_LOCATION); - // Check LB policy name for the channel. - EXPECT_EQ("address_test_lb", channel->GetLoadBalancingPolicyName()); - // Make sure that the attributes wind up on the subchannels. - std::vector<TString> expected; - for (const int port : GetServersPorts()) { - expected.emplace_back(y_absl::StrCat( - "127.0.0.1:", port, " args={} attributes={", kAttributeKey, "=foo}")); - } - EXPECT_EQ(addresses_seen(), expected); -} - +class ClientLbAddressTest : public ClientLbEnd2endTest { + protected: + static const char* kAttributeKey; + + class Attribute : public grpc_core::ServerAddress::AttributeInterface { + public: + explicit Attribute(const TString& str) : str_(str) {} + + std::unique_ptr<AttributeInterface> Copy() const override { + return y_absl::make_unique<Attribute>(str_); + } + + int Cmp(const AttributeInterface* other) const override { + return str_.compare(static_cast<const Attribute*>(other)->str_); + } + + TString ToString() const override { return str_; } + + private: + TString str_; + }; + + void SetUp() override { + ClientLbEnd2endTest::SetUp(); + current_test_instance_ = this; + } + + static void SetUpTestCase() { + grpc_init(); + grpc_core::RegisterAddressTestLoadBalancingPolicy(SaveAddress); + } + + static void TearDownTestCase() { grpc_shutdown_blocking(); } + + const std::vector<TString>& addresses_seen() { + grpc::internal::MutexLock lock(&mu_); + return addresses_seen_; + } + + private: + static void SaveAddress(const grpc_core::ServerAddress& address) { + ClientLbAddressTest* self = current_test_instance_; + grpc::internal::MutexLock lock(&self->mu_); + self->addresses_seen_.emplace_back(address.ToString()); + } + + static ClientLbAddressTest* current_test_instance_; + grpc::internal::Mutex mu_; + std::vector<TString> addresses_seen_; +}; + +const char* ClientLbAddressTest::kAttributeKey = "attribute_key"; + +ClientLbAddressTest* ClientLbAddressTest::current_test_instance_ = nullptr; + +TEST_F(ClientLbAddressTest, Basic) { + const int kNumServers = 1; + StartServers(kNumServers); + auto response_generator = BuildResolverResponseGenerator(); + auto channel = BuildChannel("address_test_lb", response_generator); + auto stub = BuildStub(channel); + // Addresses returned by the resolver will have attached attributes. + response_generator.SetNextResolution(GetServersPorts(), nullptr, + kAttributeKey, + y_absl::make_unique<Attribute>("foo")); + CheckRpcSendOk(stub, DEBUG_LOCATION); + // Check LB policy name for the channel. + EXPECT_EQ("address_test_lb", channel->GetLoadBalancingPolicyName()); + // Make sure that the attributes wind up on the subchannels. + std::vector<TString> expected; + for (const int port : GetServersPorts()) { + expected.emplace_back(y_absl::StrCat( + "127.0.0.1:", port, " args={} attributes={", kAttributeKey, "=foo}")); + } + EXPECT_EQ(addresses_seen(), expected); +} + } // namespace } // namespace testing } // namespace grpc diff --git a/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc b/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc index c9ed229c1b..5d025ecb94 100644 --- a/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/delegating_channel_test.cc @@ -58,7 +58,7 @@ class DelegatingChannelTest : public ::testing::Test { DelegatingChannelTest() { int port = grpc_pick_unused_port_or_die(); ServerBuilder builder; - server_address_ = "localhost:" + ToString(port); + server_address_ = "localhost:" + ToString(port); builder.AddListeningPort(server_address_, InsecureServerCredentials()); builder.RegisterService(&service_); server_ = builder.BuildAndStart(); @@ -66,7 +66,7 @@ class DelegatingChannelTest : public ::testing::Test { ~DelegatingChannelTest() { server_->Shutdown(); } - TString server_address_; + TString server_address_; TestServiceImpl service_; std::unique_ptr<Server> server_; }; diff --git a/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc index c4e1fac870..ad2ddb7e84 100644 --- a/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc @@ -31,13 +31,13 @@ #include <grpcpp/server.h> #include <grpcpp/server_builder.h> #include <grpcpp/server_context.h> -#include <grpcpp/support/string_ref.h> -#include <grpcpp/test/channel_test_peer.h> +#include <grpcpp/support/string_ref.h> +#include <grpcpp/test/channel_test_peer.h> #include <mutex> #include <thread> -#include "y_absl/strings/str_format.h" +#include "y_absl/strings/str_format.h" #include "src/core/ext/filters/client_channel/backup_poller.h" #include "src/core/lib/gpr/env.h" #include "src/core/lib/iomgr/iomgr.h" @@ -77,71 +77,71 @@ namespace grpc { namespace testing { namespace { -bool CheckIsLocalhost(const TString& addr) { - const TString kIpv6("ipv6:[::1]:"); - const TString kIpv4MappedIpv6("ipv6:[::ffff:127.0.0.1]:"); - const TString kIpv4("ipv4:127.0.0.1:"); +bool CheckIsLocalhost(const TString& addr) { + const TString kIpv6("ipv6:[::1]:"); + const TString kIpv4MappedIpv6("ipv6:[::ffff:127.0.0.1]:"); + const TString kIpv4("ipv4:127.0.0.1:"); return addr.substr(0, kIpv4.size()) == kIpv4 || addr.substr(0, kIpv4MappedIpv6.size()) == kIpv4MappedIpv6 || addr.substr(0, kIpv6.size()) == kIpv6; } -const int kClientChannelBackupPollIntervalMs = 200; - +const int kClientChannelBackupPollIntervalMs = 200; + const char kTestCredsPluginErrorMsg[] = "Could not find plugin metadata."; -const char kFakeToken[] = "fake_token"; -const char kFakeSelector[] = "fake_selector"; -const char kExpectedFakeCredsDebugString[] = - "SecureCallCredentials{GoogleIAMCredentials{Token:present," - "AuthoritySelector:fake_selector}}"; - -const char kWrongToken[] = "wrong_token"; -const char kWrongSelector[] = "wrong_selector"; -const char kExpectedWrongCredsDebugString[] = - "SecureCallCredentials{GoogleIAMCredentials{Token:present," - "AuthoritySelector:wrong_selector}}"; - -const char kFakeToken1[] = "fake_token1"; -const char kFakeSelector1[] = "fake_selector1"; -const char kExpectedFakeCreds1DebugString[] = - "SecureCallCredentials{GoogleIAMCredentials{Token:present," - "AuthoritySelector:fake_selector1}}"; - -const char kFakeToken2[] = "fake_token2"; -const char kFakeSelector2[] = "fake_selector2"; -const char kExpectedFakeCreds2DebugString[] = - "SecureCallCredentials{GoogleIAMCredentials{Token:present," - "AuthoritySelector:fake_selector2}}"; - -const char kExpectedAuthMetadataPluginKeyFailureCredsDebugString[] = - "SecureCallCredentials{TestMetadataCredentials{key:TestPluginMetadata," - "value:Does not matter, will fail the key is invalid.}}"; -const char kExpectedAuthMetadataPluginValueFailureCredsDebugString[] = - "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata," - "value:With illegal \n value.}}"; -const char kExpectedAuthMetadataPluginWithDeadlineCredsDebugString[] = - "SecureCallCredentials{TestMetadataCredentials{key:meta_key,value:Does not " - "matter}}"; -const char kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString[] = - "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata," - "value:Does not matter, will fail anyway (see 3rd param)}}"; -const char - kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString - [] = "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-" - "metadata,value:Dr Jekyll}}"; -const char - kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString - [] = "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-" - "metadata,value:Mr Hyde}}"; -const char kExpectedBlockingAuthMetadataPluginFailureCredsDebugString[] = - "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata," - "value:Does not matter, will fail anyway (see 3rd param)}}"; -const char kExpectedCompositeCallCredsDebugString[] = - "SecureCallCredentials{CompositeCallCredentials{TestMetadataCredentials{" - "key:call-creds-key1,value:call-creds-val1},TestMetadataCredentials{key:" - "call-creds-key2,value:call-creds-val2}}}"; - +const char kFakeToken[] = "fake_token"; +const char kFakeSelector[] = "fake_selector"; +const char kExpectedFakeCredsDebugString[] = + "SecureCallCredentials{GoogleIAMCredentials{Token:present," + "AuthoritySelector:fake_selector}}"; + +const char kWrongToken[] = "wrong_token"; +const char kWrongSelector[] = "wrong_selector"; +const char kExpectedWrongCredsDebugString[] = + "SecureCallCredentials{GoogleIAMCredentials{Token:present," + "AuthoritySelector:wrong_selector}}"; + +const char kFakeToken1[] = "fake_token1"; +const char kFakeSelector1[] = "fake_selector1"; +const char kExpectedFakeCreds1DebugString[] = + "SecureCallCredentials{GoogleIAMCredentials{Token:present," + "AuthoritySelector:fake_selector1}}"; + +const char kFakeToken2[] = "fake_token2"; +const char kFakeSelector2[] = "fake_selector2"; +const char kExpectedFakeCreds2DebugString[] = + "SecureCallCredentials{GoogleIAMCredentials{Token:present," + "AuthoritySelector:fake_selector2}}"; + +const char kExpectedAuthMetadataPluginKeyFailureCredsDebugString[] = + "SecureCallCredentials{TestMetadataCredentials{key:TestPluginMetadata," + "value:Does not matter, will fail the key is invalid.}}"; +const char kExpectedAuthMetadataPluginValueFailureCredsDebugString[] = + "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata," + "value:With illegal \n value.}}"; +const char kExpectedAuthMetadataPluginWithDeadlineCredsDebugString[] = + "SecureCallCredentials{TestMetadataCredentials{key:meta_key,value:Does not " + "matter}}"; +const char kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString[] = + "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata," + "value:Does not matter, will fail anyway (see 3rd param)}}"; +const char + kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString + [] = "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-" + "metadata,value:Dr Jekyll}}"; +const char + kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString + [] = "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-" + "metadata,value:Mr Hyde}}"; +const char kExpectedBlockingAuthMetadataPluginFailureCredsDebugString[] = + "SecureCallCredentials{TestMetadataCredentials{key:test-plugin-metadata," + "value:Does not matter, will fail anyway (see 3rd param)}}"; +const char kExpectedCompositeCallCredsDebugString[] = + "SecureCallCredentials{CompositeCallCredentials{TestMetadataCredentials{" + "key:call-creds-key1,value:call-creds-val1},TestMetadataCredentials{key:" + "call-creds-key2,value:call-creds-val2}}}"; + class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin { public: static const char kGoodMetadataKey[]; @@ -162,7 +162,7 @@ class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin { Status GetMetadata( grpc::string_ref service_url, grpc::string_ref method_name, const grpc::AuthContext& channel_auth_context, - std::multimap<TString, TString>* metadata) override { + std::multimap<TString, TString>* metadata) override { if (delay_ms_ != 0) { gpr_sleep_until( gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), @@ -180,14 +180,14 @@ class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin { } } - TString DebugString() override { - return y_absl::StrFormat("TestMetadataCredentials{key:%s,value:%s}", - metadata_key_.c_str(), metadata_value_.c_str()); - } - + TString DebugString() override { + return y_absl::StrFormat("TestMetadataCredentials{key:%s,value:%s}", + metadata_key_.c_str(), metadata_value_.c_str()); + } + private: - TString metadata_key_; - TString metadata_value_; + TString metadata_key_; + TString metadata_value_; bool is_blocking_; bool is_successful_; int delay_ms_; @@ -284,7 +284,7 @@ class TestServiceImplDupPkg class TestScenario { public: TestScenario(bool interceptors, bool proxy, bool inproc_stub, - const TString& creds_type, bool use_callback_server) + const TString& creds_type, bool use_callback_server) : use_interceptors(interceptors), use_proxy(proxy), inproc(inproc_stub), @@ -294,7 +294,7 @@ class TestScenario { bool use_interceptors; bool use_proxy; bool inproc; - const TString credentials_type; + const TString credentials_type; bool callback_server; }; @@ -482,7 +482,7 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> { CallbackTestServiceImpl callback_service_; TestServiceImpl special_service_; TestServiceImplDupPkg dup_pkg_service_; - TString user_agent_prefix_; + TString user_agent_prefix_; int first_picked_port_; }; @@ -497,7 +497,7 @@ static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs, if (with_binary_metadata) { char bytes[8] = {'\0', '\1', '\2', '\3', '\4', '\5', '\6', static_cast<char>(i)}; - context.AddMetadata("custom-bin", TString(bytes, 8)); + context.AddMetadata("custom-bin", TString(bytes, 8)); } context.set_compression_algorithm(GRPC_COMPRESS_GZIP); Status s = stub->Echo(&context, request, &response); @@ -534,7 +534,7 @@ class End2endServerTryCancelTest : public End2endTest { // Send server_try_cancel value in the client metadata context.AddMetadata(kServerTryCancelRequest, - ToString(server_try_cancel)); + ToString(server_try_cancel)); auto stream = stub_->RequestStream(&context, &response); @@ -613,7 +613,7 @@ class End2endServerTryCancelTest : public End2endTest { // Send server_try_cancel in the client metadata context.AddMetadata(kServerTryCancelRequest, - ToString(server_try_cancel)); + ToString(server_try_cancel)); request.set_message("hello"); auto stream = stub_->ResponseStream(&context, request); @@ -624,7 +624,7 @@ class End2endServerTryCancelTest : public End2endTest { break; } EXPECT_EQ(response.message(), - request.message() + ToString(num_msgs_read)); + request.message() + ToString(num_msgs_read)); num_msgs_read++; } gpr_log(GPR_INFO, "Read %d messages", num_msgs_read); @@ -695,14 +695,14 @@ class End2endServerTryCancelTest : public End2endTest { // Send server_try_cancel in the client metadata context.AddMetadata(kServerTryCancelRequest, - ToString(server_try_cancel)); + ToString(server_try_cancel)); auto stream = stub_->BidiStream(&context); int num_msgs_read = 0; int num_msgs_sent = 0; while (num_msgs_sent < num_messages) { - request.set_message("hello " + ToString(num_msgs_sent)); + request.set_message("hello " + ToString(num_msgs_sent)); if (!stream->Write(request)) { break; } @@ -769,7 +769,7 @@ TEST_P(End2endServerTryCancelTest, RequestEchoServerCancel) { ClientContext context; context.AddMetadata(kServerTryCancelRequest, - ToString(CANCEL_BEFORE_PROCESSING)); + ToString(CANCEL_BEFORE_PROCESSING)); Status s = stub_->Echo(&context, request, &response); EXPECT_FALSE(s.ok()); EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code()); @@ -844,7 +844,7 @@ TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) { const auto& trailing_metadata = context.GetServerTrailingMetadata(); auto iter = trailing_metadata.find("user-agent"); EXPECT_TRUE(iter != trailing_metadata.end()); - TString expected_prefix = user_agent_prefix_ + " grpc-c++/"; + TString expected_prefix = user_agent_prefix_ + " grpc-c++/"; EXPECT_TRUE(iter->second.starts_with(expected_prefix)) << iter->second; } @@ -874,19 +874,19 @@ TEST_P(End2endTest, MultipleRpcs) { } } -TEST_P(End2endTest, ManyStubs) { - MAYBE_SKIP_TEST; - ResetStub(); - ChannelTestPeer peer(channel_.get()); - int registered_calls_pre = peer.registered_calls(); - int registration_attempts_pre = peer.registration_attempts(); - for (int i = 0; i < 1000; ++i) { - grpc::testing::EchoTestService::NewStub(channel_); - } - EXPECT_EQ(peer.registered_calls(), registered_calls_pre); - EXPECT_GT(peer.registration_attempts(), registration_attempts_pre); -} - +TEST_P(End2endTest, ManyStubs) { + MAYBE_SKIP_TEST; + ResetStub(); + ChannelTestPeer peer(channel_.get()); + int registered_calls_pre = peer.registered_calls(); + int registration_attempts_pre = peer.registration_attempts(); + for (int i = 0; i < 1000; ++i) { + grpc::testing::EchoTestService::NewStub(channel_); + } + EXPECT_EQ(peer.registered_calls(), registered_calls_pre); + EXPECT_GT(peer.registration_attempts(), registration_attempts_pre); +} + TEST_P(End2endTest, EmptyBinaryMetadata) { MAYBE_SKIP_TEST; ResetStub(); @@ -918,13 +918,13 @@ TEST_P(End2endTest, ReconnectChannel) { SendRpc(stub_.get(), 1, false); RestartServer(std::shared_ptr<AuthMetadataProcessor>()); // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to - // reconnect the channel. Make it a factor of 5x - gpr_sleep_until( - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(kClientChannelBackupPollIntervalMs * 5 * - poller_slowdown_factor * - grpc_test_slowdown_factor(), - GPR_TIMESPAN))); + // reconnect the channel. Make it a factor of 5x + gpr_sleep_until( + gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(kClientChannelBackupPollIntervalMs * 5 * + poller_slowdown_factor * + grpc_test_slowdown_factor(), + GPR_TIMESPAN))); SendRpc(stub_.get(), 1, false); } @@ -1023,7 +1023,7 @@ TEST_P(End2endTest, ResponseStream) { auto stream = stub_->ResponseStream(&context, request); for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) { EXPECT_TRUE(stream->Read(&response)); - EXPECT_EQ(response.message(), request.message() + ToString(i)); + EXPECT_EQ(response.message(), request.message() + ToString(i)); } EXPECT_FALSE(stream->Read(&response)); @@ -1043,7 +1043,7 @@ TEST_P(End2endTest, ResponseStreamWithCoalescingApi) { auto stream = stub_->ResponseStream(&context, request); for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) { EXPECT_TRUE(stream->Read(&response)); - EXPECT_EQ(response.message(), request.message() + ToString(i)); + EXPECT_EQ(response.message(), request.message() + ToString(i)); } EXPECT_FALSE(stream->Read(&response)); @@ -1081,12 +1081,12 @@ TEST_P(End2endTest, BidiStream) { EchoRequest request; EchoResponse response; ClientContext context; - TString msg("hello"); + TString msg("hello"); auto stream = stub_->BidiStream(&context); for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) { - request.set_message(msg + ToString(i)); + request.set_message(msg + ToString(i)); EXPECT_TRUE(stream->Write(request)); EXPECT_TRUE(stream->Read(&response)); EXPECT_EQ(response.message(), request.message()); @@ -1108,7 +1108,7 @@ TEST_P(End2endTest, BidiStreamWithCoalescingApi) { ClientContext context; context.AddMetadata(kServerFinishAfterNReads, "3"); context.set_initial_metadata_corked(true); - TString msg("hello"); + TString msg("hello"); auto stream = stub_->BidiStream(&context); @@ -1144,7 +1144,7 @@ TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) { ClientContext context; context.AddMetadata(kServerFinishAfterNReads, "1"); context.set_initial_metadata_corked(true); - TString msg("hello"); + TString msg("hello"); auto stream = stub_->BidiStream(&context); @@ -1207,34 +1207,34 @@ TEST_P(End2endTest, CancelRpcBeforeStart) { } } -TEST_P(End2endTest, CancelRpcAfterStart) { +TEST_P(End2endTest, CancelRpcAfterStart) { MAYBE_SKIP_TEST; ResetStub(); EchoRequest request; EchoResponse response; ClientContext context; request.set_message("hello"); - request.mutable_param()->set_server_notify_client_when_started(true); + request.mutable_param()->set_server_notify_client_when_started(true); request.mutable_param()->set_skip_cancelled_check(true); Status s; std::thread echo_thread([this, &s, &context, &request, &response] { s = stub_->Echo(&context, request, &response); EXPECT_EQ(StatusCode::CANCELLED, s.error_code()); }); - if (!GetParam().callback_server) { - service_.ClientWaitUntilRpcStarted(); - } else { - callback_service_.ClientWaitUntilRpcStarted(); - } - + if (!GetParam().callback_server) { + service_.ClientWaitUntilRpcStarted(); + } else { + callback_service_.ClientWaitUntilRpcStarted(); + } + context.TryCancel(); - - if (!GetParam().callback_server) { - service_.SignalServerToContinue(); - } else { - callback_service_.SignalServerToContinue(); - } - + + if (!GetParam().callback_server) { + service_.SignalServerToContinue(); + } else { + callback_service_.SignalServerToContinue(); + } + echo_thread.join(); EXPECT_EQ("", response.message()); EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code()); @@ -1310,7 +1310,7 @@ TEST_P(End2endTest, ClientCancelsBidi) { EchoRequest request; EchoResponse response; ClientContext context; - TString msg("hello"); + TString msg("hello"); auto stream = stub_->BidiStream(&context); @@ -1460,7 +1460,7 @@ TEST_P(End2endTest, BinaryTrailerTest) { info->add_stack_entries("stack_entry_2"); info->add_stack_entries("stack_entry_3"); info->set_detail("detailed debug info"); - TString expected_string = info->SerializeAsString(); + TString expected_string = info->SerializeAsString(); request.set_message("Hello"); Status s = stub_->Echo(&context, request, &response); @@ -1511,12 +1511,12 @@ TEST_P(End2endTest, ExpectErrorTest) { EXPECT_EQ(iter->error_message(), s.error_message()); EXPECT_EQ(iter->binary_error_details(), s.error_details()); EXPECT_TRUE(context.debug_error_string().find("created") != - TString::npos); - EXPECT_TRUE(context.debug_error_string().find("file") != TString::npos); - EXPECT_TRUE(context.debug_error_string().find("line") != TString::npos); + TString::npos); + EXPECT_TRUE(context.debug_error_string().find("file") != TString::npos); + EXPECT_TRUE(context.debug_error_string().find("line") != TString::npos); EXPECT_TRUE(context.debug_error_string().find("status") != - TString::npos); - EXPECT_TRUE(context.debug_error_string().find("13") != TString::npos); + TString::npos); + EXPECT_TRUE(context.debug_error_string().find("13") != TString::npos); } } @@ -1774,7 +1774,7 @@ TEST_P(SecureEnd2endTest, SimpleRpcWithHost) { bool MetadataContains( const std::multimap<grpc::string_ref, grpc::string_ref>& metadata, - const TString& key, const TString& value) { + const TString& key, const TString& value) { int count = 0; for (std::multimap<grpc::string_ref, grpc::string_ref>::const_iterator iter = @@ -1810,7 +1810,7 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) { // Metadata should have been consumed by the processor. EXPECT_FALSE(MetadataContains( context.GetServerTrailingMetadata(), GRPC_AUTHORIZATION_METADATA_KEY, - TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy)); + TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy)); } TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) { @@ -1836,7 +1836,7 @@ TEST_P(SecureEnd2endTest, SetPerCallCredentials) { EchoResponse response; ClientContext context; std::shared_ptr<CallCredentials> creds = - GoogleIAMCredentials(kFakeToken, kFakeSelector); + GoogleIAMCredentials(kFakeToken, kFakeSelector); context.set_credentials(creds); request.set_message("Hello"); request.mutable_param()->set_echo_metadata(true); @@ -1846,12 +1846,12 @@ TEST_P(SecureEnd2endTest, SetPerCallCredentials) { EXPECT_TRUE(s.ok()); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY, - kFakeToken)); + kFakeToken)); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, - kFakeSelector)); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedFakeCredsDebugString); + kFakeSelector)); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedFakeCredsDebugString); } class CredentialsInterceptor : public experimental::Interceptor { @@ -1862,7 +1862,7 @@ class CredentialsInterceptor : public experimental::Interceptor { if (methods->QueryInterceptionHookPoint( experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) { std::shared_ptr<CallCredentials> creds = - GoogleIAMCredentials(kFakeToken, kFakeSelector); + GoogleIAMCredentials(kFakeToken, kFakeSelector); info_->client_context()->set_credentials(creds); } methods->Proceed(); @@ -1902,12 +1902,12 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterception) { EXPECT_TRUE(s.ok()); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY, - kFakeToken)); + kFakeToken)); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, - kFakeSelector)); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedFakeCredsDebugString); + kFakeSelector)); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedFakeCredsDebugString); } TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) { @@ -1924,11 +1924,11 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) { EchoResponse response; ClientContext context; std::shared_ptr<CallCredentials> creds1 = - GoogleIAMCredentials(kWrongToken, kWrongSelector); + GoogleIAMCredentials(kWrongToken, kWrongSelector); context.set_credentials(creds1); EXPECT_EQ(context.credentials(), creds1); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedWrongCredsDebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedWrongCredsDebugString); request.set_message("Hello"); request.mutable_param()->set_echo_metadata(true); @@ -1937,12 +1937,12 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) { EXPECT_TRUE(s.ok()); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY, - kFakeToken)); + kFakeToken)); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, - kFakeSelector)); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedFakeCredsDebugString); + kFakeSelector)); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedFakeCredsDebugString); } TEST_P(SecureEnd2endTest, OverridePerCallCredentials) { @@ -1952,13 +1952,13 @@ TEST_P(SecureEnd2endTest, OverridePerCallCredentials) { EchoResponse response; ClientContext context; std::shared_ptr<CallCredentials> creds1 = - GoogleIAMCredentials(kFakeToken1, kFakeSelector1); + GoogleIAMCredentials(kFakeToken1, kFakeSelector1); context.set_credentials(creds1); EXPECT_EQ(context.credentials(), creds1); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedFakeCreds1DebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedFakeCreds1DebugString); std::shared_ptr<CallCredentials> creds2 = - GoogleIAMCredentials(kFakeToken2, kFakeSelector2); + GoogleIAMCredentials(kFakeToken2, kFakeSelector2); context.set_credentials(creds2); EXPECT_EQ(context.credentials(), creds2); request.set_message("Hello"); @@ -1967,18 +1967,18 @@ TEST_P(SecureEnd2endTest, OverridePerCallCredentials) { Status s = stub_->Echo(&context, request, &response); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY, - kFakeToken2)); + kFakeToken2)); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, - kFakeSelector2)); + kFakeSelector2)); EXPECT_FALSE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY, - kFakeToken1)); + kFakeToken1)); EXPECT_FALSE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, - kFakeSelector1)); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedFakeCreds2DebugString); + kFakeSelector1)); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedFakeCreds2DebugString); EXPECT_EQ(request.message(), response.message()); EXPECT_TRUE(s.ok()); } @@ -2000,8 +2000,8 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) { Status s = stub_->Echo(&context, request, &response); EXPECT_FALSE(s.ok()); EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedAuthMetadataPluginKeyFailureCredsDebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedAuthMetadataPluginKeyFailureCredsDebugString); } TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) { @@ -2020,8 +2020,8 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) { Status s = stub_->Echo(&context, request, &response); EXPECT_FALSE(s.ok()); EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedAuthMetadataPluginValueFailureCredsDebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedAuthMetadataPluginValueFailureCredsDebugString); } TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) { @@ -2046,8 +2046,8 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) { EXPECT_TRUE(s.error_code() == StatusCode::DEADLINE_EXCEEDED || s.error_code() == StatusCode::UNAVAILABLE); } - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedAuthMetadataPluginWithDeadlineCredsDebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedAuthMetadataPluginWithDeadlineCredsDebugString); } TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) { @@ -2075,8 +2075,8 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) { s.error_code() == StatusCode::UNAVAILABLE); } cancel_thread.join(); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedAuthMetadataPluginWithDeadlineCredsDebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedAuthMetadataPluginWithDeadlineCredsDebugString); } TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) { @@ -2097,10 +2097,10 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) { EXPECT_FALSE(s.ok()); EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE); EXPECT_EQ(s.error_message(), - TString("Getting metadata from plugin failed with error: ") + + TString("Getting metadata from plugin failed with error: ") + kTestCredsPluginErrorMsg); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString); } TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) { @@ -2126,10 +2126,10 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) { // Metadata should have been consumed by the processor. EXPECT_FALSE(MetadataContains( context.GetServerTrailingMetadata(), GRPC_AUTHORIZATION_METADATA_KEY, - TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy)); - EXPECT_EQ( - context.credentials()->DebugString(), - kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString); + TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy)); + EXPECT_EQ( + context.credentials()->DebugString(), + kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString); } TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) { @@ -2146,9 +2146,9 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) { Status s = stub_->Echo(&context, request, &response); EXPECT_FALSE(s.ok()); EXPECT_EQ(s.error_code(), StatusCode::UNAUTHENTICATED); - EXPECT_EQ( - context.credentials()->DebugString(), - kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString); + EXPECT_EQ( + context.credentials()->DebugString(), + kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString); } TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) { @@ -2169,10 +2169,10 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) { EXPECT_FALSE(s.ok()); EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE); EXPECT_EQ(s.error_message(), - TString("Getting metadata from plugin failed with error: ") + + TString("Getting metadata from plugin failed with error: ") + kTestCredsPluginErrorMsg); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedBlockingAuthMetadataPluginFailureCredsDebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedBlockingAuthMetadataPluginFailureCredsDebugString); } TEST_P(SecureEnd2endTest, CompositeCallCreds) { @@ -2204,8 +2204,8 @@ TEST_P(SecureEnd2endTest, CompositeCallCreds) { kMetadataKey1, kMetadataVal1)); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), kMetadataKey2, kMetadataVal2)); - EXPECT_EQ(context.credentials()->DebugString(), - kExpectedCompositeCallCredsDebugString); + EXPECT_EQ(context.credentials()->DebugString(), + kExpectedCompositeCallCredsDebugString); } TEST_P(SecureEnd2endTest, ClientAuthContext) { @@ -2274,10 +2274,10 @@ std::vector<TestScenario> CreateTestScenarios(bool use_proxy, bool test_inproc, bool test_callback_server) { std::vector<TestScenario> scenarios; - std::vector<TString> credentials_types; + std::vector<TString> credentials_types; - GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, - kClientChannelBackupPollIntervalMs); + GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, + kClientChannelBackupPollIntervalMs); #if TARGET_OS_IPHONE // Workaround Apple CFStream bug gpr_setenv("grpc_cfstream", "0"); diff --git a/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc index 1026b78044..2f26d0716c 100644 --- a/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc @@ -18,7 +18,7 @@ #include <memory> #include <mutex> -#include <thread> +#include <thread> #include <grpc/grpc.h> #include <grpc/support/time.h> @@ -171,7 +171,7 @@ class FilterEnd2endTest : public ::testing::Test { void client_fail(int i) { verify_ok(&cli_cq_, i, false); } void SendRpc(int num_rpcs) { - const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); + const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); for (int i = 0; i < num_rpcs; i++) { EchoRequest send_request; EchoRequest recv_request; @@ -185,7 +185,7 @@ class FilterEnd2endTest : public ::testing::Test { // The string needs to be long enough to test heap-based slice. send_request.set_message("Hello world. Hello world. Hello world."); - std::thread request_call([this]() { server_ok(4); }); + std::thread request_call([this]() { server_ok(4); }); std::unique_ptr<GenericClientAsyncReaderWriter> call = generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_); call->StartCall(tag(1)); @@ -202,7 +202,7 @@ class FilterEnd2endTest : public ::testing::Test { generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(), srv_cq_.get(), tag(4)); - request_call.join(); + request_call.join(); EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length())); EXPECT_EQ(kMethodName, srv_ctx.method()); ByteBuffer recv_buffer; @@ -239,7 +239,7 @@ class FilterEnd2endTest : public ::testing::Test { std::unique_ptr<grpc::GenericStub> generic_stub_; std::unique_ptr<Server> server_; AsyncGenericService generic_service_; - const TString server_host_; + const TString server_host_; std::ostringstream server_address_; }; @@ -267,7 +267,7 @@ TEST_F(FilterEnd2endTest, SimpleBidiStreaming) { EXPECT_EQ(0, GetConnectionCounterValue()); EXPECT_EQ(0, GetCallCounterValue()); - const TString kMethodName( + const TString kMethodName( "/grpc.cpp.test.util.EchoTestService/BidiStream"); EchoRequest send_request; EchoRequest recv_request; @@ -280,7 +280,7 @@ TEST_F(FilterEnd2endTest, SimpleBidiStreaming) { cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP); send_request.set_message("Hello"); - std::thread request_call([this]() { server_ok(2); }); + std::thread request_call([this]() { server_ok(2); }); std::unique_ptr<GenericClientAsyncReaderWriter> cli_stream = generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_); cli_stream->StartCall(tag(1)); @@ -289,7 +289,7 @@ TEST_F(FilterEnd2endTest, SimpleBidiStreaming) { generic_service_.RequestCall(&srv_ctx, &srv_stream, srv_cq_.get(), srv_cq_.get(), tag(2)); - request_call.join(); + request_call.join(); EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length())); EXPECT_EQ(kMethodName, srv_ctx.method()); diff --git a/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc b/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc index b5fe65bc4b..3ee75952c0 100644 --- a/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/flaky_network_test.cc @@ -56,10 +56,10 @@ namespace testing { namespace { struct TestScenario { - TestScenario(const TString& creds_type, const TString& content) + TestScenario(const TString& creds_type, const TString& content) : credentials_type(creds_type), message_content(content) {} - const TString credentials_type; - const TString message_content; + const TString credentials_type; + const TString message_content; }; class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> { @@ -191,7 +191,7 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> { } std::shared_ptr<Channel> BuildChannel( - const TString& lb_policy_name, + const TString& lb_policy_name, ChannelArguments args = ChannelArguments()) { if (lb_policy_name.size() > 0) { args.SetLoadBalancingPolicyName(lb_policy_name); @@ -213,9 +213,9 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> { ClientContext context; if (timeout_ms > 0) { context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); - // Allow an RPC to be canceled (for deadline exceeded) after it has - // reached the server. - request.mutable_param()->set_skip_cancelled_check(true); + // Allow an RPC to be canceled (for deadline exceeded) after it has + // reached the server. + request.mutable_param()->set_skip_cancelled_check(true); } // See https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md for // details of wait-for-ready semantics @@ -243,16 +243,16 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> { struct ServerData { int port_; - const TString creds_; + const TString creds_; std::unique_ptr<Server> server_; TestServiceImpl service_; std::unique_ptr<std::thread> thread_; bool server_ready_ = false; - ServerData(int port, const TString& creds) + ServerData(int port, const TString& creds) : port_(port), creds_(creds) {} - void Start(const TString& server_host) { + void Start(const TString& server_host) { gpr_log(GPR_INFO, "starting server on port %d", port_); std::mutex mu; std::unique_lock<std::mutex> lock(mu); @@ -264,7 +264,7 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> { gpr_log(GPR_INFO, "server startup complete"); } - void Serve(const TString& server_host, std::mutex* mu, + void Serve(const TString& server_host, std::mutex* mu, std::condition_variable* cond) { std::ostringstream server_address; server_address << server_host << ":" << port_; @@ -308,10 +308,10 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> { } private: - const TString server_host_; - const TString interface_; - const TString ipv4_address_; - const TString netmask_; + const TString server_host_; + const TString interface_; + const TString ipv4_address_; + const TString netmask_; std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_; std::unique_ptr<ServerData> server_; const int SERVER_PORT = 32750; @@ -320,8 +320,8 @@ class FlakyNetworkTest : public ::testing::TestWithParam<TestScenario> { std::vector<TestScenario> CreateTestScenarios() { std::vector<TestScenario> scenarios; - std::vector<TString> credentials_types; - std::vector<TString> messages; + std::vector<TString> credentials_types; + std::vector<TString> messages; credentials_types.push_back(kInsecureCredentialsType); auto sec_list = GetCredentialsProvider()->GetSecureCredentialsTypeList(); @@ -331,7 +331,7 @@ std::vector<TestScenario> CreateTestScenarios() { messages.push_back("🖖"); for (size_t k = 1; k < GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH / 1024; k *= 32) { - TString big_msg; + TString big_msg; for (size_t i = 0; i < k * 1024; ++i) { char c = 'a' + (i % 26); big_msg += c; @@ -552,7 +552,7 @@ TEST_P(FlakyNetworkTest, ServerRestartKeepaliveDisabled) { int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); - grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::TestEnvironment env(argc, argv); auto result = RUN_ALL_TESTS(); return result; } diff --git a/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc index 98ed104446..59eec49fb2 100644 --- a/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc @@ -111,7 +111,7 @@ class GenericEnd2endTest : public ::testing::Test { } void SendRpc(int num_rpcs, bool check_deadline, gpr_timespec deadline) { - const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); + const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); for (int i = 0; i < num_rpcs; i++) { EchoRequest send_request; EchoRequest recv_request; @@ -133,14 +133,14 @@ class GenericEnd2endTest : public ::testing::Test { // Rather than using the original kMethodName, make a short-lived // copy to also confirm that we don't refer to this object beyond // the initial call preparation - const TString* method_name = new TString(kMethodName); + const TString* method_name = new TString(kMethodName); std::unique_ptr<GenericClientAsyncReaderWriter> call = generic_stub_->PrepareCall(&cli_ctx, *method_name, &cli_cq_); delete method_name; // Make sure that this is not needed after invocation - std::thread request_call([this]() { server_ok(4); }); + std::thread request_call([this]() { server_ok(4); }); call->StartCall(tag(1)); client_ok(1); std::unique_ptr<ByteBuffer> send_buffer = @@ -155,7 +155,7 @@ class GenericEnd2endTest : public ::testing::Test { generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(), srv_cq_.get(), tag(4)); - request_call.join(); + request_call.join(); EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length())); EXPECT_EQ(kMethodName, srv_ctx.method()); @@ -246,7 +246,7 @@ class GenericEnd2endTest : public ::testing::Test { std::unique_ptr<grpc::GenericStub> generic_stub_; std::unique_ptr<Server> server_; AsyncGenericService generic_service_; - const TString server_host_; + const TString server_host_; std::ostringstream server_address_; bool shutting_down_; bool shut_down_; @@ -266,7 +266,7 @@ TEST_F(GenericEnd2endTest, SequentialRpcs) { TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) { ResetStub(); const int num_rpcs = 10; - const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); + const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); for (int i = 0; i < num_rpcs; i++) { EchoRequest send_request; EchoRequest recv_request; @@ -283,7 +283,7 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) { std::unique_ptr<ByteBuffer> cli_send_buffer = SerializeToByteBuffer(&send_request); - std::thread request_call([this]() { server_ok(4); }); + std::thread request_call([this]() { server_ok(4); }); std::unique_ptr<GenericClientAsyncResponseReader> call = generic_stub_->PrepareUnaryCall(&cli_ctx, kMethodName, *cli_send_buffer.get(), &cli_cq_); @@ -294,7 +294,7 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) { generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(), srv_cq_.get(), tag(4)); - request_call.join(); + request_call.join(); EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length())); EXPECT_EQ(kMethodName, srv_ctx.method()); @@ -324,7 +324,7 @@ TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) { TEST_F(GenericEnd2endTest, SimpleBidiStreaming) { ResetStub(); - const TString kMethodName( + const TString kMethodName( "/grpc.cpp.test.util.EchoTestService/BidiStream"); EchoRequest send_request; EchoRequest recv_request; @@ -337,7 +337,7 @@ TEST_F(GenericEnd2endTest, SimpleBidiStreaming) { cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP); send_request.set_message("Hello"); - std::thread request_call([this]() { server_ok(2); }); + std::thread request_call([this]() { server_ok(2); }); std::unique_ptr<GenericClientAsyncReaderWriter> cli_stream = generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_); cli_stream->StartCall(tag(1)); @@ -345,7 +345,7 @@ TEST_F(GenericEnd2endTest, SimpleBidiStreaming) { generic_service_.RequestCall(&srv_ctx, &srv_stream, srv_cq_.get(), srv_cq_.get(), tag(2)); - request_call.join(); + request_call.join(); EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length())); EXPECT_EQ(kMethodName, srv_ctx.method()); diff --git a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc index 82d1cdc4c9..6208dc2535 100644 --- a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc @@ -16,17 +16,17 @@ * */ -#include <deque> +#include <deque> #include <memory> #include <mutex> #include <set> #include <sstream> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <thread> -#include "y_absl/strings/str_cat.h" -#include "y_absl/strings/str_format.h" - +#include "y_absl/strings/str_cat.h" +#include "y_absl/strings/str_format.h" + #include <grpc/grpc.h> #include <grpc/support/alloc.h> #include <grpc/support/log.h> @@ -39,16 +39,16 @@ #include <grpcpp/server_builder.h> #include "src/core/ext/filters/client_channel/backup_poller.h" -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" #include "src/core/ext/filters/client_channel/server_address.h" #include "src/core/ext/filters/client_channel/service_config.h" #include "src/core/lib/gpr/env.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" -#include "src/core/lib/iomgr/parse_address.h" +#include "src/core/lib/iomgr/parse_address.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/security/credentials/fake/fake_credentials.h" -#include "src/core/lib/transport/authority_override.h" +#include "src/core/lib/transport/authority_override.h" #include "src/cpp/client/secure_credentials.h" #include "src/cpp/server/secure_server_credentials.h" @@ -81,7 +81,7 @@ using std::chrono::system_clock; -using grpc::lb::v1::LoadBalancer; +using grpc::lb::v1::LoadBalancer; using grpc::lb::v1::LoadBalanceRequest; using grpc::lb::v1::LoadBalanceResponse; @@ -89,13 +89,13 @@ namespace grpc { namespace testing { namespace { -constexpr char kDefaultServiceConfig[] = - "{\n" - " \"loadBalancingConfig\":[\n" - " { \"grpclb\":{} }\n" - " ]\n" - "}"; - +constexpr char kDefaultServiceConfig[] = + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"grpclb\":{} }\n" + " ]\n" + "}"; + template <typename ServiceType> class CountedService : public ServiceType { public: @@ -162,26 +162,26 @@ class BackendServiceImpl : public BackendService { void Shutdown() {} - std::set<TString> clients() { + std::set<TString> clients() { grpc::internal::MutexLock lock(&clients_mu_); return clients_; } private: - void AddClient(const TString& client) { + void AddClient(const TString& client) { grpc::internal::MutexLock lock(&clients_mu_); clients_.insert(client); } grpc::internal::Mutex mu_; grpc::internal::Mutex clients_mu_; - std::set<TString> clients_; + std::set<TString> clients_; }; -TString Ip4ToPackedString(const char* ip_str) { +TString Ip4ToPackedString(const char* ip_str) { struct in_addr ip4; GPR_ASSERT(inet_pton(AF_INET, ip_str, &ip4) == 1); - return TString(reinterpret_cast<const char*>(&ip4), sizeof(ip4)); + return TString(reinterpret_cast<const char*>(&ip4), sizeof(ip4)); } struct ClientStats { @@ -189,7 +189,7 @@ struct ClientStats { size_t num_calls_finished = 0; size_t num_calls_finished_with_client_failed_to_send = 0; size_t num_calls_finished_known_received = 0; - std::map<TString, size_t> drop_token_counts; + std::map<TString, size_t> drop_token_counts; ClientStats& operator+=(const ClientStats& other) { num_calls_started += other.num_calls_started; @@ -237,11 +237,11 @@ class BalancerServiceImpl : public BalancerService { if (!stream->Read(&request)) { goto done; - } else { - if (request.has_initial_request()) { - grpc::internal::MutexLock lock(&mu_); - service_names_.push_back(request.initial_request().name()); - } + } else { + if (request.has_initial_request()) { + grpc::internal::MutexLock lock(&mu_); + service_names_.push_back(request.initial_request().name()); + } } IncreaseRequestCount(); gpr_log(GPR_INFO, "LB[%p]: received initial message '%s'", this, @@ -271,31 +271,31 @@ class BalancerServiceImpl : public BalancerService { if (client_load_reporting_interval_seconds_ > 0) { request.Clear(); - while (stream->Read(&request)) { + while (stream->Read(&request)) { gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'", this, request.DebugString().c_str()); GPR_ASSERT(request.has_client_stats()); - ClientStats load_report; - load_report.num_calls_started = + ClientStats load_report; + load_report.num_calls_started = request.client_stats().num_calls_started(); - load_report.num_calls_finished = + load_report.num_calls_finished = request.client_stats().num_calls_finished(); - load_report.num_calls_finished_with_client_failed_to_send = + load_report.num_calls_finished_with_client_failed_to_send = request.client_stats() .num_calls_finished_with_client_failed_to_send(); - load_report.num_calls_finished_known_received = + load_report.num_calls_finished_known_received = request.client_stats().num_calls_finished_known_received(); for (const auto& drop_token_count : request.client_stats().calls_finished_with_drop()) { - load_report - .drop_token_counts[drop_token_count.load_balance_token()] = + load_report + .drop_token_counts[drop_token_count.load_balance_token()] = drop_token_count.num_calls(); } - // We need to acquire the lock here in order to prevent the notify_one - // below from firing before its corresponding wait is executed. - grpc::internal::MutexLock lock(&mu_); - load_report_queue_.emplace_back(std::move(load_report)); - if (load_report_cond_ != nullptr) load_report_cond_->Signal(); + // We need to acquire the lock here in order to prevent the notify_one + // below from firing before its corresponding wait is executed. + grpc::internal::MutexLock lock(&mu_); + load_report_queue_.emplace_back(std::move(load_report)); + if (load_report_cond_ != nullptr) load_report_cond_->Signal(); } } } @@ -313,7 +313,7 @@ class BalancerServiceImpl : public BalancerService { grpc::internal::MutexLock lock(&mu_); serverlist_done_ = false; responses_and_delays_.clear(); - load_report_queue_.clear(); + load_report_queue_.clear(); } void Shutdown() { @@ -323,7 +323,7 @@ class BalancerServiceImpl : public BalancerService { static LoadBalanceResponse BuildResponseForBackends( const std::vector<int>& backend_ports, - const std::map<TString, size_t>& drop_token_counts) { + const std::map<TString, size_t>& drop_token_counts) { LoadBalanceResponse response; for (const auto& drop_token_count : drop_token_counts) { for (size_t i = 0; i < drop_token_count.second; ++i) { @@ -337,24 +337,24 @@ class BalancerServiceImpl : public BalancerService { server->set_ip_address(Ip4ToPackedString("127.0.0.1")); server->set_port(backend_port); static int token_count = 0; - server->set_load_balance_token( - y_absl::StrFormat("token%03d", ++token_count)); + server->set_load_balance_token( + y_absl::StrFormat("token%03d", ++token_count)); } return response; } - ClientStats WaitForLoadReport() { + ClientStats WaitForLoadReport() { grpc::internal::MutexLock lock(&mu_); - grpc::internal::CondVar cv; - if (load_report_queue_.empty()) { - load_report_cond_ = &cv; - load_report_cond_->WaitUntil( - &mu_, [this] { return !load_report_queue_.empty(); }); - load_report_cond_ = nullptr; - } - ClientStats load_report = std::move(load_report_queue_.front()); - load_report_queue_.pop_front(); - return load_report; + grpc::internal::CondVar cv; + if (load_report_queue_.empty()) { + load_report_cond_ = &cv; + load_report_cond_->WaitUntil( + &mu_, [this] { return !load_report_queue_.empty(); }); + load_report_cond_ = nullptr; + } + ClientStats load_report = std::move(load_report_queue_.front()); + load_report_queue_.pop_front(); + return load_report; } void NotifyDoneWithServerlists() { @@ -365,11 +365,11 @@ class BalancerServiceImpl : public BalancerService { } } - std::vector<TString> service_names() { - grpc::internal::MutexLock lock(&mu_); - return service_names_; - } - + std::vector<TString> service_names() { + grpc::internal::MutexLock lock(&mu_); + return service_names_; + } + private: void SendResponse(Stream* stream, const LoadBalanceResponse& response, int delay_ms) { @@ -385,13 +385,13 @@ class BalancerServiceImpl : public BalancerService { const int client_load_reporting_interval_seconds_; std::vector<ResponseDelayPair> responses_and_delays_; - std::vector<TString> service_names_; - + std::vector<TString> service_names_; + grpc::internal::Mutex mu_; grpc::internal::CondVar serverlist_cond_; bool serverlist_done_ = false; - grpc::internal::CondVar* load_report_cond_ = nullptr; - std::deque<ClientStats> load_report_queue_; + grpc::internal::CondVar* load_report_cond_ = nullptr; + std::deque<ClientStats> load_report_queue_; }; class GrpclbEnd2endTest : public ::testing::Test { @@ -452,7 +452,7 @@ class GrpclbEnd2endTest : public ::testing::Test { void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); } void ResetStub(int fallback_timeout = 0, - const TString& expected_targets = "") { + const TString& expected_targets = "") { ChannelArguments args; if (fallback_timeout > 0) args.SetGrpclbFallbackTimeout(fallback_timeout); args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, @@ -543,72 +543,72 @@ class GrpclbEnd2endTest : public ::testing::Test { struct AddressData { int port; - TString balancer_name; + TString balancer_name; }; - static grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList( + static grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList( const std::vector<AddressData>& address_data) { grpc_core::ServerAddressList addresses; for (const auto& addr : address_data) { - TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", addr.port); - grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true); + TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", addr.port); + grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true); GPR_ASSERT(lb_uri != nullptr); grpc_resolved_address address; GPR_ASSERT(grpc_parse_uri(lb_uri, &address)); - grpc_arg arg = grpc_core::CreateAuthorityOverrideChannelArg( - addr.balancer_name.c_str()); - grpc_channel_args* args = - grpc_channel_args_copy_and_add(nullptr, &arg, 1); + grpc_arg arg = grpc_core::CreateAuthorityOverrideChannelArg( + addr.balancer_name.c_str()); + grpc_channel_args* args = + grpc_channel_args_copy_and_add(nullptr, &arg, 1); addresses.emplace_back(address.addr, address.len, args); grpc_uri_destroy(lb_uri); } return addresses; } - static grpc_core::Resolver::Result MakeResolverResult( - const std::vector<AddressData>& balancer_address_data, - const std::vector<AddressData>& backend_address_data = {}, - const char* service_config_json = kDefaultServiceConfig) { - grpc_core::Resolver::Result result; - result.addresses = - CreateLbAddressesFromAddressDataList(backend_address_data); - grpc_error* error = GRPC_ERROR_NONE; - result.service_config = - grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error); - GPR_ASSERT(error == GRPC_ERROR_NONE); - grpc_core::ServerAddressList balancer_addresses = - CreateLbAddressesFromAddressDataList(balancer_address_data); - grpc_arg arg = CreateGrpclbBalancerAddressesArg(&balancer_addresses); - result.args = grpc_channel_args_copy_and_add(nullptr, &arg, 1); - return result; - } - + static grpc_core::Resolver::Result MakeResolverResult( + const std::vector<AddressData>& balancer_address_data, + const std::vector<AddressData>& backend_address_data = {}, + const char* service_config_json = kDefaultServiceConfig) { + grpc_core::Resolver::Result result; + result.addresses = + CreateLbAddressesFromAddressDataList(backend_address_data); + grpc_error* error = GRPC_ERROR_NONE; + result.service_config = + grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error); + GPR_ASSERT(error == GRPC_ERROR_NONE); + grpc_core::ServerAddressList balancer_addresses = + CreateLbAddressesFromAddressDataList(balancer_address_data); + grpc_arg arg = CreateGrpclbBalancerAddressesArg(&balancer_addresses); + result.args = grpc_channel_args_copy_and_add(nullptr, &arg, 1); + return result; + } + void SetNextResolutionAllBalancers( - const char* service_config_json = kDefaultServiceConfig) { + const char* service_config_json = kDefaultServiceConfig) { std::vector<AddressData> addresses; for (size_t i = 0; i < balancers_.size(); ++i) { - addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); } - SetNextResolution(addresses, {}, service_config_json); + SetNextResolution(addresses, {}, service_config_json); } - void SetNextResolution( - const std::vector<AddressData>& balancer_address_data, - const std::vector<AddressData>& backend_address_data = {}, - const char* service_config_json = kDefaultServiceConfig) { + void SetNextResolution( + const std::vector<AddressData>& balancer_address_data, + const std::vector<AddressData>& backend_address_data = {}, + const char* service_config_json = kDefaultServiceConfig) { grpc_core::ExecCtx exec_ctx; - grpc_core::Resolver::Result result = MakeResolverResult( - balancer_address_data, backend_address_data, service_config_json); + grpc_core::Resolver::Result result = MakeResolverResult( + balancer_address_data, backend_address_data, service_config_json); response_generator_->SetResponse(std::move(result)); } void SetNextReresolutionResponse( - const std::vector<AddressData>& balancer_address_data, - const std::vector<AddressData>& backend_address_data = {}, - const char* service_config_json = kDefaultServiceConfig) { + const std::vector<AddressData>& balancer_address_data, + const std::vector<AddressData>& backend_address_data = {}, + const char* service_config_json = kDefaultServiceConfig) { grpc_core::ExecCtx exec_ctx; - grpc_core::Resolver::Result result = MakeResolverResult( - balancer_address_data, backend_address_data, service_config_json); + grpc_core::Resolver::Result result = MakeResolverResult( + balancer_address_data, backend_address_data, service_config_json); response_generator_->SetReresolutionResponse(std::move(result)); } @@ -629,17 +629,17 @@ class GrpclbEnd2endTest : public ::testing::Test { } Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000, - bool wait_for_ready = false, - const Status& expected_status = Status::OK) { + bool wait_for_ready = false, + const Status& expected_status = Status::OK) { const bool local_response = (response == nullptr); if (local_response) response = new EchoResponse; EchoRequest request; request.set_message(kRequestMessage_); - if (!expected_status.ok()) { - auto* error = request.mutable_param()->mutable_expected_error(); - error->set_code(expected_status.error_code()); - error->set_error_message(expected_status.error_message()); - } + if (!expected_status.ok()) { + auto* error = request.mutable_param()->mutable_expected_error(); + error->set_code(expected_status.error_code()); + error->set_error_message(expected_status.error_message()); + } ClientContext context; context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); if (wait_for_ready) context.set_wait_for_ready(true); @@ -667,12 +667,12 @@ class GrpclbEnd2endTest : public ::testing::Test { template <typename T> struct ServerThread { template <typename... Args> - explicit ServerThread(const TString& type, Args&&... args) + explicit ServerThread(const TString& type, Args&&... args) : port_(grpc_pick_unused_port_or_die()), type_(type), service_(std::forward<Args>(args)...) {} - void Start(const TString& server_host) { + void Start(const TString& server_host) { gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_); GPR_ASSERT(!running_); running_ = true; @@ -688,7 +688,7 @@ class GrpclbEnd2endTest : public ::testing::Test { gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); } - void Serve(const TString& server_host, grpc::internal::Mutex* mu, + void Serve(const TString& server_host, grpc::internal::Mutex* mu, grpc::internal::CondVar* cond) { // We need to acquire the lock here in order to prevent the notify_one // below from firing before its corresponding wait is executed. @@ -715,14 +715,14 @@ class GrpclbEnd2endTest : public ::testing::Test { } const int port_; - TString type_; + TString type_; T service_; std::unique_ptr<Server> server_; std::unique_ptr<std::thread> thread_; bool running_ = false; }; - const TString server_host_; + const TString server_host_; const size_t num_backends_; const size_t num_balancers_; const int client_load_reporting_interval_seconds_; @@ -732,8 +732,8 @@ class GrpclbEnd2endTest : public ::testing::Test { std::vector<std::unique_ptr<ServerThread<BalancerServiceImpl>>> balancers_; grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator> response_generator_; - const TString kRequestMessage_ = "Live long and prosper."; - const TString kApplicationTargetName_ = "application_target_name"; + const TString kRequestMessage_ = "Live long and prosper."; + const TString kApplicationTargetName_ = "application_target_name"; }; class SingleBalancerTest : public GrpclbEnd2endTest { @@ -768,22 +768,22 @@ TEST_F(SingleBalancerTest, Vanilla) { EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName()); } -TEST_F(SingleBalancerTest, ReturnServerStatus) { - SetNextResolutionAllBalancers(); - ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), - 0); - // We need to wait for all backends to come online. - WaitForAllBackends(); - // Send a request that the backend will fail, and make sure we get - // back the right status. - Status expected(StatusCode::INVALID_ARGUMENT, "He's dead, Jim!"); - Status actual = SendRpc(/*response=*/nullptr, /*timeout_ms=*/1000, - /*wait_for_ready=*/false, expected); - EXPECT_EQ(actual.error_code(), expected.error_code()); - EXPECT_EQ(actual.error_message(), expected.error_message()); -} - +TEST_F(SingleBalancerTest, ReturnServerStatus) { + SetNextResolutionAllBalancers(); + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0); + // We need to wait for all backends to come online. + WaitForAllBackends(); + // Send a request that the backend will fail, and make sure we get + // back the right status. + Status expected(StatusCode::INVALID_ARGUMENT, "He's dead, Jim!"); + Status actual = SendRpc(/*response=*/nullptr, /*timeout_ms=*/1000, + /*wait_for_ready=*/false, expected); + EXPECT_EQ(actual.error_code(), expected.error_code()); + EXPECT_EQ(actual.error_message(), expected.error_message()); +} + TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) { SetNextResolutionAllBalancers( "{\n" @@ -809,7 +809,7 @@ TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfigAndNoAddresses) { const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); - SetNextResolution({}, {}, + SetNextResolution({}, {}, "{\n" " \"loadBalancingConfig\":[\n" " { \"does_not_exist\":{} },\n" @@ -883,7 +883,7 @@ TEST_F(SingleBalancerTest, SwapChildPolicy) { EXPECT_EQ(backends_[i]->service_.request_count(), 0UL); } // Send new resolution that removes child policy from service config. - SetNextResolutionAllBalancers(); + SetNextResolutionAllBalancers(); WaitForAllBackends(); CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */); // Check that every backend saw the same number of requests. This verifies @@ -924,7 +924,7 @@ TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) { TEST_F(SingleBalancerTest, SecureNaming) { ResetStub(0, kApplicationTargetName_ + ";lb"); - SetNextResolution({AddressData{balancers_[0]->port_, "lb"}}); + SetNextResolution({AddressData{balancers_[0]->port_, "lb"}}); const size_t kNumRpcsPerAddress = 100; ScheduleResponseForBalancer( 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), @@ -956,7 +956,7 @@ TEST_F(SingleBalancerTest, SecureNamingDeathTest) { ASSERT_DEATH_IF_SUPPORTED( { ResetStub(0, kApplicationTargetName_ + ";lb"); - SetNextResolution({AddressData{balancers_[0]->port_, "woops"}}); + SetNextResolution({AddressData{balancers_[0]->port_, "woops"}}); channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1)); }, ""); @@ -1016,13 +1016,13 @@ TEST_F(SingleBalancerTest, Fallback) { const size_t kNumBackendsInResolution = backends_.size() / 2; ResetStub(kFallbackTimeoutMs); - std::vector<AddressData> balancer_addresses; - balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - std::vector<AddressData> backend_addresses; + std::vector<AddressData> balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector<AddressData> backend_addresses; for (size_t i = 0; i < kNumBackendsInResolution; ++i) { - backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } - SetNextResolution(balancer_addresses, backend_addresses); + SetNextResolution(balancer_addresses, backend_addresses); // Send non-empty serverlist only after kServerlistDelayMs. ScheduleResponseForBalancer( @@ -1085,13 +1085,13 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { const size_t kNumBackendsInResolutionUpdate = backends_.size() / 3; ResetStub(kFallbackTimeoutMs); - std::vector<AddressData> balancer_addresses; - balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - std::vector<AddressData> backend_addresses; + std::vector<AddressData> balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector<AddressData> backend_addresses; for (size_t i = 0; i < kNumBackendsInResolution; ++i) { - backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } - SetNextResolution(balancer_addresses, backend_addresses); + SetNextResolution(balancer_addresses, backend_addresses); // Send non-empty serverlist only after kServerlistDelayMs. ScheduleResponseForBalancer( @@ -1121,14 +1121,14 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { EXPECT_EQ(0U, backends_[i]->service_.request_count()); } - balancer_addresses.clear(); - balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - backend_addresses.clear(); + balancer_addresses.clear(); + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + backend_addresses.clear(); for (size_t i = kNumBackendsInResolution; i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) { - backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } - SetNextResolution(balancer_addresses, backend_addresses); + SetNextResolution(balancer_addresses, backend_addresses); // Wait until the resolution update has been processed and all the new // fallback backends are reachable. @@ -1192,15 +1192,15 @@ TEST_F(SingleBalancerTest, // First two backends are fallback, last two are pointed to by balancer. const size_t kNumFallbackBackends = 2; const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends; - std::vector<AddressData> backend_addresses; + std::vector<AddressData> backend_addresses; for (size_t i = 0; i < kNumFallbackBackends; ++i) { - backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } - std::vector<AddressData> balancer_addresses; + std::vector<AddressData> balancer_addresses; for (size_t i = 0; i < balancers_.size(); ++i) { - balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); + balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); } - SetNextResolution(balancer_addresses, backend_addresses); + SetNextResolution(balancer_addresses, backend_addresses); ScheduleResponseForBalancer(0, BalancerServiceImpl::BuildResponseForBackends( GetBackendPorts(kNumFallbackBackends), {}), @@ -1247,15 +1247,15 @@ TEST_F(SingleBalancerTest, // First two backends are fallback, last two are pointed to by balancer. const size_t kNumFallbackBackends = 2; const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends; - std::vector<AddressData> backend_addresses; + std::vector<AddressData> backend_addresses; for (size_t i = 0; i < kNumFallbackBackends; ++i) { - backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); + backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""}); } - std::vector<AddressData> balancer_addresses; + std::vector<AddressData> balancer_addresses; for (size_t i = 0; i < balancers_.size(); ++i) { - balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); + balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""}); } - SetNextResolution(balancer_addresses, backend_addresses); + SetNextResolution(balancer_addresses, backend_addresses); ScheduleResponseForBalancer(0, BalancerServiceImpl::BuildResponseForBackends( GetBackendPorts(kNumFallbackBackends), {}), @@ -1299,12 +1299,12 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) { const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); // Return an unreachable balancer and one fallback backend. - std::vector<AddressData> balancer_addresses; - balancer_addresses.emplace_back( - AddressData{grpc_pick_unused_port_or_die(), ""}); - std::vector<AddressData> backend_addresses; - backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); - SetNextResolution(balancer_addresses, backend_addresses); + std::vector<AddressData> balancer_addresses; + balancer_addresses.emplace_back( + AddressData{grpc_pick_unused_port_or_die(), ""}); + std::vector<AddressData> backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); // Send RPC with deadline less than the fallback timeout and make sure it // succeeds. CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000, @@ -1314,12 +1314,12 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) { TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) { const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); - // Return one balancer and one fallback backend. - std::vector<AddressData> balancer_addresses; - balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - std::vector<AddressData> backend_addresses; - backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); - SetNextResolution(balancer_addresses, backend_addresses); + // Return one balancer and one fallback backend. + std::vector<AddressData> balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector<AddressData> backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); // Balancer drops call without sending a serverlist. balancers_[0]->service_.NotifyDoneWithServerlists(); // Send RPC with deadline less than the fallback timeout and make sure it @@ -1328,49 +1328,49 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) { /* wait_for_ready */ false); } -TEST_F(SingleBalancerTest, FallbackControlledByBalancer_BeforeFirstServerlist) { - const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); - ResetStub(kFallbackTimeoutMs); - // Return one balancer and one fallback backend. - std::vector<AddressData> balancer_addresses; - balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - std::vector<AddressData> backend_addresses; - backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); - SetNextResolution(balancer_addresses, backend_addresses); - // Balancer explicitly tells client to fallback. - LoadBalanceResponse resp; - resp.mutable_fallback_response(); - ScheduleResponseForBalancer(0, resp, 0); - // Send RPC with deadline less than the fallback timeout and make sure it - // succeeds. - CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000, - /* wait_for_ready */ false); -} - -TEST_F(SingleBalancerTest, FallbackControlledByBalancer_AfterFirstServerlist) { - // Return one balancer and one fallback backend (backend 0). - std::vector<AddressData> balancer_addresses; - balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - std::vector<AddressData> backend_addresses; - backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); - SetNextResolution(balancer_addresses, backend_addresses); - // Balancer initially sends serverlist, then tells client to fall back, - // then sends the serverlist again. - // The serverlist points to backend 1. - LoadBalanceResponse serverlist_resp = - BalancerServiceImpl::BuildResponseForBackends({backends_[1]->port_}, {}); - LoadBalanceResponse fallback_resp; - fallback_resp.mutable_fallback_response(); - ScheduleResponseForBalancer(0, serverlist_resp, 0); - ScheduleResponseForBalancer(0, fallback_resp, 100); - ScheduleResponseForBalancer(0, serverlist_resp, 100); - // Requests initially go to backend 1, then go to backend 0 in - // fallback mode, then go back to backend 1 when we exit fallback. - WaitForBackend(1); - WaitForBackend(0); - WaitForBackend(1); -} - +TEST_F(SingleBalancerTest, FallbackControlledByBalancer_BeforeFirstServerlist) { + const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); + ResetStub(kFallbackTimeoutMs); + // Return one balancer and one fallback backend. + std::vector<AddressData> balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector<AddressData> backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); + // Balancer explicitly tells client to fallback. + LoadBalanceResponse resp; + resp.mutable_fallback_response(); + ScheduleResponseForBalancer(0, resp, 0); + // Send RPC with deadline less than the fallback timeout and make sure it + // succeeds. + CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000, + /* wait_for_ready */ false); +} + +TEST_F(SingleBalancerTest, FallbackControlledByBalancer_AfterFirstServerlist) { + // Return one balancer and one fallback backend (backend 0). + std::vector<AddressData> balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector<AddressData> backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); + // Balancer initially sends serverlist, then tells client to fall back, + // then sends the serverlist again. + // The serverlist points to backend 1. + LoadBalanceResponse serverlist_resp = + BalancerServiceImpl::BuildResponseForBackends({backends_[1]->port_}, {}); + LoadBalanceResponse fallback_resp; + fallback_resp.mutable_fallback_response(); + ScheduleResponseForBalancer(0, serverlist_resp, 0); + ScheduleResponseForBalancer(0, fallback_resp, 100); + ScheduleResponseForBalancer(0, serverlist_resp, 100); + // Requests initially go to backend 1, then go to backend 0 in + // fallback mode, then go back to backend 1 when we exit fallback. + WaitForBackend(1); + WaitForBackend(0); + WaitForBackend(1); +} + TEST_F(SingleBalancerTest, BackendsRestart) { SetNextResolutionAllBalancers(); const size_t kNumRpcsPerAddress = 100; @@ -1394,27 +1394,27 @@ TEST_F(SingleBalancerTest, BackendsRestart) { EXPECT_EQ(1U, balancers_[0]->service_.response_count()); } -TEST_F(SingleBalancerTest, ServiceNameFromLbPolicyConfig) { - constexpr char kServiceConfigWithTarget[] = - "{\n" - " \"loadBalancingConfig\":[\n" - " { \"grpclb\":{\n" - " \"serviceName\":\"test_service\"\n" - " }}\n" - " ]\n" - "}"; - - SetNextResolutionAllBalancers(kServiceConfigWithTarget); - ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), - 0); - // Make sure that trying to connect works without a call. - channel_->GetState(true /* try_to_connect */); - // We need to wait for all backends to come online. - WaitForAllBackends(); - EXPECT_EQ(balancers_[0]->service_.service_names().back(), "test_service"); -} - +TEST_F(SingleBalancerTest, ServiceNameFromLbPolicyConfig) { + constexpr char kServiceConfigWithTarget[] = + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"grpclb\":{\n" + " \"serviceName\":\"test_service\"\n" + " }}\n" + " ]\n" + "}"; + + SetNextResolutionAllBalancers(kServiceConfigWithTarget); + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0); + // Make sure that trying to connect works without a call. + channel_->GetState(true /* try_to_connect */); + // We need to wait for all backends to come online. + WaitForAllBackends(); + EXPECT_EQ(balancers_[0]->service_.service_names().back(), "test_service"); +} + class UpdatesTest : public GrpclbEnd2endTest { public: UpdatesTest() : GrpclbEnd2endTest(4, 3, 0) {} @@ -1450,7 +1450,7 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) { EXPECT_EQ(0U, balancers_[2]->service_.response_count()); std::vector<AddressData> addresses; - addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolution(addresses); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); @@ -1509,9 +1509,9 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { EXPECT_EQ(0U, balancers_[2]->service_.response_count()); std::vector<AddressData> addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); - addresses.emplace_back(AddressData{balancers_[2]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[2]->port_, ""}); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolution(addresses); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); @@ -1529,8 +1529,8 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { balancers_[0]->service_.NotifyDoneWithServerlists(); addresses.clear(); - addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 =========="); SetNextResolution(addresses); gpr_log(GPR_INFO, "========= UPDATE 2 DONE =========="); @@ -1550,7 +1550,7 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { std::vector<AddressData> addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); SetNextResolution(addresses); const std::vector<int> first_backend{GetBackendPorts()[0]}; const std::vector<int> second_backend{GetBackendPorts()[1]}; @@ -1590,7 +1590,7 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { EXPECT_EQ(0U, balancers_[2]->service_.response_count()); addresses.clear(); - addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolution(addresses); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); @@ -1627,20 +1627,20 @@ TEST_F(UpdatesTest, ReresolveDeadBackend) { ResetStub(500); // The first resolution contains the addresses of a balancer that never // responds, and a fallback backend. - std::vector<AddressData> balancer_addresses; - balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - std::vector<AddressData> backend_addresses; - backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); - SetNextResolution(balancer_addresses, backend_addresses); + std::vector<AddressData> balancer_addresses; + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + std::vector<AddressData> backend_addresses; + backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""}); + SetNextResolution(balancer_addresses, backend_addresses); // Ask channel to connect to trigger resolver creation. channel_->GetState(true); // The re-resolution result will contain the addresses of the same balancer // and a new fallback backend. - balancer_addresses.clear(); - balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); - backend_addresses.clear(); - backend_addresses.emplace_back(AddressData{backends_[1]->port_, ""}); - SetNextReresolutionResponse(balancer_addresses, backend_addresses); + balancer_addresses.clear(); + balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + backend_addresses.clear(); + backend_addresses.emplace_back(AddressData{backends_[1]->port_, ""}); + SetNextReresolutionResponse(balancer_addresses, backend_addresses); // Start servers and send 10 RPCs per server. gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); @@ -1686,20 +1686,20 @@ class UpdatesWithClientLoadReportingTest : public GrpclbEnd2endTest { }; TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) { - const std::vector<int> first_backend{GetBackendPorts()[0]}; - const std::vector<int> second_backend{GetBackendPorts()[1]}; - ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); - ScheduleResponseForBalancer( - 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); - + const std::vector<int> first_backend{GetBackendPorts()[0]}; + const std::vector<int> second_backend{GetBackendPorts()[1]}; + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + ScheduleResponseForBalancer( + 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + // Ask channel to connect to trigger resolver creation. channel_->GetState(true); std::vector<AddressData> addresses; - addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[0]->port_, ""}); SetNextResolution(addresses); addresses.clear(); - addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); + addresses.emplace_back(AddressData{balancers_[1]->port_, ""}); SetNextReresolutionResponse(addresses); // Start servers and send 10 RPCs per server. @@ -1869,11 +1869,11 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) { // and sent a single response. EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - ClientStats client_stats; - do { - client_stats += WaitForLoadReports(); - } while (client_stats.num_calls_finished != - kNumRpcsPerAddress * num_backends_ + num_ok); + ClientStats client_stats; + do { + client_stats += WaitForLoadReports(); + } while (client_stats.num_calls_finished != + kNumRpcsPerAddress * num_backends_ + num_ok); EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, client_stats.num_calls_started); EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, diff --git a/contrib/libs/grpc/test/cpp/end2end/health/ya.make b/contrib/libs/grpc/test/cpp/end2end/health/ya.make index ce19862115..7330129b73 100644 --- a/contrib/libs/grpc/test/cpp/end2end/health/ya.make +++ b/contrib/libs/grpc/test/cpp/end2end/health/ya.make @@ -6,7 +6,7 @@ OWNER( ) ADDINCL( - ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc + ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc ${ARCADIA_ROOT}/contrib/libs/grpc ) diff --git a/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc index d39c79afe7..516b3a4c81 100644 --- a/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc @@ -59,7 +59,7 @@ class CustomHealthCheckService : public HealthCheckServiceInterface { : impl_(impl) { impl_->SetStatus("", HealthCheckResponse::SERVING); } - void SetServingStatus(const TString& service_name, + void SetServingStatus(const TString& service_name, bool serving) override { impl_->SetStatus(service_name, serving ? HealthCheckResponse::SERVING : HealthCheckResponse::NOT_SERVING); @@ -130,7 +130,7 @@ class HealthServiceEnd2endTest : public ::testing::Test { } // When the expected_status is NOT OK, we do not care about the response. - void SendHealthCheckRpc(const TString& service_name, + void SendHealthCheckRpc(const TString& service_name, const Status& expected_status) { EXPECT_FALSE(expected_status.ok()); SendHealthCheckRpc(service_name, expected_status, @@ -138,7 +138,7 @@ class HealthServiceEnd2endTest : public ::testing::Test { } void SendHealthCheckRpc( - const TString& service_name, const Status& expected_status, + const TString& service_name, const Status& expected_status, HealthCheckResponse::ServingStatus expected_serving_status) { HealthCheckRequest request; request.set_service(service_name); @@ -154,9 +154,9 @@ class HealthServiceEnd2endTest : public ::testing::Test { void VerifyHealthCheckService() { HealthCheckServiceInterface* service = server_->GetHealthCheckService(); EXPECT_TRUE(service != nullptr); - const TString kHealthyService("healthy_service"); - const TString kUnhealthyService("unhealthy_service"); - const TString kNotRegisteredService("not_registered"); + const TString kHealthyService("healthy_service"); + const TString kUnhealthyService("unhealthy_service"); + const TString kNotRegisteredService("not_registered"); service->SetServingStatus(kHealthyService, true); service->SetServingStatus(kUnhealthyService, false); @@ -181,7 +181,7 @@ class HealthServiceEnd2endTest : public ::testing::Test { } void VerifyHealthCheckServiceStreaming() { - const TString kServiceName("service_name"); + const TString kServiceName("service_name"); HealthCheckServiceInterface* service = server_->GetHealthCheckService(); // Start Watch for service. ClientContext context; @@ -217,10 +217,10 @@ class HealthServiceEnd2endTest : public ::testing::Test { void VerifyHealthCheckServiceShutdown() { HealthCheckServiceInterface* service = server_->GetHealthCheckService(); EXPECT_TRUE(service != nullptr); - const TString kHealthyService("healthy_service"); - const TString kUnhealthyService("unhealthy_service"); - const TString kNotRegisteredService("not_registered"); - const TString kNewService("add_after_shutdown"); + const TString kHealthyService("healthy_service"); + const TString kUnhealthyService("unhealthy_service"); + const TString kNotRegisteredService("not_registered"); + const TString kNewService("add_after_shutdown"); service->SetServingStatus(kHealthyService, true); service->SetServingStatus(kUnhealthyService, false); @@ -305,7 +305,7 @@ TEST_F(HealthServiceEnd2endTest, DefaultHealthService) { VerifyHealthCheckServiceStreaming(); // The default service has a size limit of the service name. - const TString kTooLongServiceName(201, 'x'); + const TString kTooLongServiceName(201, 'x'); SendHealthCheckRpc(kTooLongServiceName, Status(StatusCode::INVALID_ARGUMENT, "")); } diff --git a/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc index d9fd316d0f..e4ebee8e93 100644 --- a/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc @@ -43,12 +43,12 @@ namespace grpc { namespace testing { namespace { -#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL -using ::grpc::experimental::CallbackGenericService; -using ::grpc::experimental::GenericCallbackServerContext; -using ::grpc::experimental::ServerGenericBidiReactor; -#endif - +#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL +using ::grpc::experimental::CallbackGenericService; +using ::grpc::experimental::GenericCallbackServerContext; +using ::grpc::experimental::ServerGenericBidiReactor; +#endif + void* tag(int i) { return (void*)static_cast<intptr_t>(i); } bool VerifyReturnSuccess(CompletionQueue* cq, int i) { @@ -251,10 +251,10 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> { : false; } - bool SetUpServer(::grpc::Service* service1, ::grpc::Service* service2, - AsyncGenericService* generic_service, - CallbackGenericService* callback_generic_service, - int max_message_size = 0) { + bool SetUpServer(::grpc::Service* service1, ::grpc::Service* service2, + AsyncGenericService* generic_service, + CallbackGenericService* callback_generic_service, + int max_message_size = 0) { int port = grpc_pick_unused_port_or_die(); server_address_ << "localhost:" << port; @@ -273,12 +273,12 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> { builder.RegisterAsyncGenericService(generic_service); } if (callback_generic_service) { -#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL - builder.RegisterCallbackGenericService(callback_generic_service); -#else +#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL + builder.RegisterCallbackGenericService(callback_generic_service); +#else builder.experimental().RegisterCallbackGenericService( callback_generic_service); -#endif +#endif } if (max_message_size != 0) { @@ -354,7 +354,7 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> { void SendSimpleClientStreaming() { EchoRequest send_request; EchoResponse recv_response; - TString expected_message; + TString expected_message; ClientContext cli_ctx; cli_ctx.set_wait_for_ready(true); send_request.set_message("Hello"); @@ -417,7 +417,7 @@ class HybridEnd2endTest : public ::testing::TestWithParam<bool> { EchoResponse response; ClientContext context; context.set_wait_for_ready(true); - TString msg("hello"); + TString msg("hello"); auto stream = stub_->BidiStream(&context); @@ -661,7 +661,7 @@ class SplitResponseStreamDupPkg gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz); GPR_ASSERT(stream->Read(&req)); for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) { - resp.set_message(req.message() + ToString(i) + "_dup"); + resp.set_message(req.message() + ToString(i) + "_dup"); GPR_ASSERT(stream->Write(resp)); } return Status::OK; @@ -701,7 +701,7 @@ class FullySplitStreamedDupPkg gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz); GPR_ASSERT(stream->Read(&req)); for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) { - resp.set_message(req.message() + ToString(i) + "_dup"); + resp.set_message(req.message() + ToString(i) + "_dup"); GPR_ASSERT(stream->Write(resp)); } return Status::OK; @@ -753,7 +753,7 @@ class FullyStreamedDupPkg : public duplicate::EchoTestService::StreamedService { gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz); GPR_ASSERT(stream->Read(&req)); for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) { - resp.set_message(req.message() + ToString(i) + "_dup"); + resp.set_message(req.message() + ToString(i) + "_dup"); GPR_ASSERT(stream->Write(resp)); } return Status::OK; @@ -816,15 +816,15 @@ TEST_F(HybridEnd2endTest, GenericEcho) { TEST_P(HybridEnd2endTest, CallbackGenericEcho) { EchoTestService::WithGenericMethod_Echo<TestServiceImpl> service; - class GenericEchoService : public CallbackGenericService { + class GenericEchoService : public CallbackGenericService { private: - ServerGenericBidiReactor* CreateReactor( - GenericCallbackServerContext* context) override { + ServerGenericBidiReactor* CreateReactor( + GenericCallbackServerContext* context) override { EXPECT_EQ(context->method(), "/grpc.testing.EchoTestService/Echo"); - gpr_log(GPR_DEBUG, "Constructor of generic service %d", - static_cast<int>(context->deadline().time_since_epoch().count())); + gpr_log(GPR_DEBUG, "Constructor of generic service %d", + static_cast<int>(context->deadline().time_since_epoch().count())); - class Reactor : public ServerGenericBidiReactor { + class Reactor : public ServerGenericBidiReactor { public: Reactor() { StartRead(&request_); } diff --git a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc index 87bfe91f1a..ff88953651 100644 --- a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc +++ b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc @@ -17,7 +17,7 @@ */ #include "test/cpp/end2end/interceptors_util.h" -#include <util/string/cast.h> +#include <util/string/cast.h> namespace grpc { namespace testing { @@ -47,7 +47,7 @@ void MakeClientStreamingCall(const std::shared_ptr<Channel>& channel) { ctx.AddMetadata("testkey", "testvalue"); req.set_message("Hello"); EchoResponse resp; - string expected_resp = ""; + string expected_resp = ""; auto writer = stub->RequestStream(&ctx, &resp); for (int i = 0; i < kNumStreamingMessages; i++) { writer->Write(req); @@ -84,10 +84,10 @@ void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) { EchoRequest req; EchoResponse resp; ctx.AddMetadata("testkey", "testvalue"); - req.mutable_param()->set_echo_metadata(true); + req.mutable_param()->set_echo_metadata(true); auto stream = stub->BidiStream(&ctx); for (auto i = 0; i < kNumStreamingMessages; i++) { - req.set_message(TString("Hello") + ::ToString(i)); + req.set_message(TString("Hello") + ::ToString(i)); stream->Write(req); stream->Read(&resp); EXPECT_EQ(req.message(), resp.message()); @@ -97,61 +97,61 @@ void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) { EXPECT_EQ(s.ok(), true); } -void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel) { - auto stub = grpc::testing::EchoTestService::NewStub(channel); - CompletionQueue cq; - EchoRequest send_request; - EchoResponse recv_response; - Status recv_status; - ClientContext cli_ctx; - - send_request.set_message("Hello"); - cli_ctx.AddMetadata("testkey", "testvalue"); - std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader( - stub->AsyncEcho(&cli_ctx, send_request, &cq)); - response_reader->Finish(&recv_response, &recv_status, tag(1)); - Verifier().Expect(1, true).Verify(&cq); - EXPECT_EQ(send_request.message(), recv_response.message()); - EXPECT_TRUE(recv_status.ok()); -} - -void MakeAsyncCQClientStreamingCall( - const std::shared_ptr<Channel>& /*channel*/) { - // TODO(yashykt) : Fill this out -} - -void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel) { - auto stub = grpc::testing::EchoTestService::NewStub(channel); - CompletionQueue cq; - EchoRequest send_request; - EchoResponse recv_response; - Status recv_status; - ClientContext cli_ctx; - - cli_ctx.AddMetadata("testkey", "testvalue"); - send_request.set_message("Hello"); - std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream( - stub->AsyncResponseStream(&cli_ctx, send_request, &cq, tag(1))); - Verifier().Expect(1, true).Verify(&cq); - // Read the expected number of messages - for (int i = 0; i < kNumStreamingMessages; i++) { - cli_stream->Read(&recv_response, tag(2)); - Verifier().Expect(2, true).Verify(&cq); - ASSERT_EQ(recv_response.message(), send_request.message()); - } - // The next read should fail - cli_stream->Read(&recv_response, tag(3)); - Verifier().Expect(3, false).Verify(&cq); - // Get the status - cli_stream->Finish(&recv_status, tag(4)); - Verifier().Expect(4, true).Verify(&cq); - EXPECT_TRUE(recv_status.ok()); -} - -void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& /*channel*/) { - // TODO(yashykt) : Fill this out -} - +void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel) { + auto stub = grpc::testing::EchoTestService::NewStub(channel); + CompletionQueue cq; + EchoRequest send_request; + EchoResponse recv_response; + Status recv_status; + ClientContext cli_ctx; + + send_request.set_message("Hello"); + cli_ctx.AddMetadata("testkey", "testvalue"); + std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader( + stub->AsyncEcho(&cli_ctx, send_request, &cq)); + response_reader->Finish(&recv_response, &recv_status, tag(1)); + Verifier().Expect(1, true).Verify(&cq); + EXPECT_EQ(send_request.message(), recv_response.message()); + EXPECT_TRUE(recv_status.ok()); +} + +void MakeAsyncCQClientStreamingCall( + const std::shared_ptr<Channel>& /*channel*/) { + // TODO(yashykt) : Fill this out +} + +void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel) { + auto stub = grpc::testing::EchoTestService::NewStub(channel); + CompletionQueue cq; + EchoRequest send_request; + EchoResponse recv_response; + Status recv_status; + ClientContext cli_ctx; + + cli_ctx.AddMetadata("testkey", "testvalue"); + send_request.set_message("Hello"); + std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream( + stub->AsyncResponseStream(&cli_ctx, send_request, &cq, tag(1))); + Verifier().Expect(1, true).Verify(&cq); + // Read the expected number of messages + for (int i = 0; i < kNumStreamingMessages; i++) { + cli_stream->Read(&recv_response, tag(2)); + Verifier().Expect(2, true).Verify(&cq); + ASSERT_EQ(recv_response.message(), send_request.message()); + } + // The next read should fail + cli_stream->Read(&recv_response, tag(3)); + Verifier().Expect(3, false).Verify(&cq); + // Get the status + cli_stream->Finish(&recv_status, tag(4)); + Verifier().Expect(4, true).Verify(&cq); + EXPECT_TRUE(recv_status.ok()); +} + +void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& /*channel*/) { + // TODO(yashykt) : Fill this out +} + void MakeCallbackCall(const std::shared_ptr<Channel>& channel) { auto stub = grpc::testing::EchoTestService::NewStub(channel); ClientContext ctx; @@ -187,7 +187,7 @@ bool CheckMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& map, return false; } -bool CheckMetadata(const std::multimap<TString, TString>& map, +bool CheckMetadata(const std::multimap<TString, TString>& map, const string& key, const string& value) { for (const auto& pair : map) { if (pair.first == key.c_str() && pair.second == value.c_str()) { diff --git a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h index f332e87762..c95170bbbc 100644 --- a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h +++ b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h @@ -102,16 +102,16 @@ class EchoTestServiceStreamingImpl : public EchoTestService::Service { public: ~EchoTestServiceStreamingImpl() override {} - Status Echo(ServerContext* context, const EchoRequest* request, - EchoResponse* response) override { - auto client_metadata = context->client_metadata(); - for (const auto& pair : client_metadata) { - context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second)); - } - response->set_message(request->message()); - return Status::OK; - } - + Status Echo(ServerContext* context, const EchoRequest* request, + EchoResponse* response) override { + auto client_metadata = context->client_metadata(); + for (const auto& pair : client_metadata) { + context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second)); + } + response->set_message(request->message()); + return Status::OK; + } + Status BidiStream( ServerContext* context, grpc::ServerReaderWriter<EchoResponse, EchoRequest>* stream) override { @@ -138,7 +138,7 @@ class EchoTestServiceStreamingImpl : public EchoTestService::Service { } EchoRequest req; - string response_str = ""; + string response_str = ""; while (reader->Read(&req)) { response_str += req.message(); } @@ -172,20 +172,20 @@ void MakeServerStreamingCall(const std::shared_ptr<Channel>& channel); void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel); -void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel); - -void MakeAsyncCQClientStreamingCall(const std::shared_ptr<Channel>& channel); - -void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel); - -void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& channel); - +void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel); + +void MakeAsyncCQClientStreamingCall(const std::shared_ptr<Channel>& channel); + +void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel); + +void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& channel); + void MakeCallbackCall(const std::shared_ptr<Channel>& channel); bool CheckMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& map, const string& key, const string& value); -bool CheckMetadata(const std::multimap<TString, TString>& map, +bool CheckMetadata(const std::multimap<TString, TString>& map, const string& key, const string& value); std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> diff --git a/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc index a6a42d82fc..4bf755206e 100644 --- a/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/message_allocator_end2end_test.cc @@ -17,7 +17,7 @@ */ #include <algorithm> -#include <atomic> +#include <atomic> #include <condition_variable> #include <functional> #include <memory> @@ -94,11 +94,11 @@ enum class Protocol { INPROC, TCP }; class TestScenario { public: - TestScenario(Protocol protocol, const TString& creds_type) + TestScenario(Protocol protocol, const TString& creds_type) : protocol(protocol), credentials_type(creds_type) {} void Log() const; Protocol protocol; - const TString credentials_type; + const TString credentials_type; }; static std::ostream& operator<<(std::ostream& out, @@ -146,13 +146,13 @@ class MessageAllocatorEnd2endTestBase server_ = builder.BuildAndStart(); } - void DestroyServer() { - if (server_) { - server_->Shutdown(); - server_.reset(); - } - } - + void DestroyServer() { + if (server_) { + server_->Shutdown(); + server_.reset(); + } + } + void ResetStub() { ChannelArguments args; auto channel_creds = GetCredentialsProvider()->GetChannelCredentials( @@ -172,22 +172,22 @@ class MessageAllocatorEnd2endTestBase } void TearDown() override { - DestroyServer(); + DestroyServer(); if (picked_port_ > 0) { grpc_recycle_unused_port(picked_port_); } } void SendRpcs(int num_rpcs) { - TString test_string(""); + TString test_string(""); for (int i = 0; i < num_rpcs; i++) { EchoRequest request; EchoResponse response; ClientContext cli_ctx; - test_string += TString(1024, 'x'); + test_string += TString(1024, 'x'); request.set_message(test_string); - TString val; + TString val; cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP); std::mutex mu; @@ -236,8 +236,8 @@ class SimpleAllocatorTest : public MessageAllocatorEnd2endTestBase { class MessageHolderImpl : public experimental::MessageHolder<EchoRequest, EchoResponse> { public: - MessageHolderImpl(std::atomic_int* request_deallocation_count, - std::atomic_int* messages_deallocation_count) + MessageHolderImpl(std::atomic_int* request_deallocation_count, + std::atomic_int* messages_deallocation_count) : request_deallocation_count_(request_deallocation_count), messages_deallocation_count_(messages_deallocation_count) { set_request(new EchoRequest); @@ -262,8 +262,8 @@ class SimpleAllocatorTest : public MessageAllocatorEnd2endTestBase { } private: - std::atomic_int* const request_deallocation_count_; - std::atomic_int* const messages_deallocation_count_; + std::atomic_int* const request_deallocation_count_; + std::atomic_int* const messages_deallocation_count_; }; experimental::MessageHolder<EchoRequest, EchoResponse>* AllocateMessages() override { @@ -272,8 +272,8 @@ class SimpleAllocatorTest : public MessageAllocatorEnd2endTestBase { &messages_deallocation_count); } int allocation_count = 0; - std::atomic_int request_deallocation_count{0}; - std::atomic_int messages_deallocation_count{0}; + std::atomic_int request_deallocation_count{0}; + std::atomic_int messages_deallocation_count{0}; }; }; @@ -284,9 +284,9 @@ TEST_P(SimpleAllocatorTest, SimpleRpc) { CreateServer(allocator.get()); ResetStub(); SendRpcs(kRpcCount); - // messages_deallocaton_count is updated in Release after server side OnDone. - // Destroy server to make sure it has been updated. - DestroyServer(); + // messages_deallocaton_count is updated in Release after server side OnDone. + // Destroy server to make sure it has been updated. + DestroyServer(); EXPECT_EQ(kRpcCount, allocator->allocation_count); EXPECT_EQ(kRpcCount, allocator->messages_deallocation_count); EXPECT_EQ(0, allocator->request_deallocation_count); @@ -309,9 +309,9 @@ TEST_P(SimpleAllocatorTest, RpcWithEarlyFreeRequest) { CreateServer(allocator.get()); ResetStub(); SendRpcs(kRpcCount); - // messages_deallocaton_count is updated in Release after server side OnDone. - // Destroy server to make sure it has been updated. - DestroyServer(); + // messages_deallocaton_count is updated in Release after server side OnDone. + // Destroy server to make sure it has been updated. + DestroyServer(); EXPECT_EQ(kRpcCount, allocator->allocation_count); EXPECT_EQ(kRpcCount, allocator->messages_deallocation_count); EXPECT_EQ(kRpcCount, allocator->request_deallocation_count); @@ -336,9 +336,9 @@ TEST_P(SimpleAllocatorTest, RpcWithReleaseRequest) { CreateServer(allocator.get()); ResetStub(); SendRpcs(kRpcCount); - // messages_deallocaton_count is updated in Release after server side OnDone. - // Destroy server to make sure it has been updated. - DestroyServer(); + // messages_deallocaton_count is updated in Release after server side OnDone. + // Destroy server to make sure it has been updated. + DestroyServer(); EXPECT_EQ(kRpcCount, allocator->allocation_count); EXPECT_EQ(kRpcCount, allocator->messages_deallocation_count); EXPECT_EQ(0, allocator->request_deallocation_count); @@ -389,7 +389,7 @@ TEST_P(ArenaAllocatorTest, SimpleRpc) { std::vector<TestScenario> CreateTestScenarios(bool test_insecure) { std::vector<TestScenario> scenarios; - std::vector<TString> credentials_types{ + std::vector<TString> credentials_types{ GetCredentialsProvider()->GetSecureCredentialsTypeList()}; auto insec_ok = [] { // Only allow insecure credentials type when it is registered with the diff --git a/contrib/libs/grpc/test/cpp/end2end/mock_test.cc b/contrib/libs/grpc/test/cpp/end2end/mock_test.cc index da67357f93..a3d61c4e98 100644 --- a/contrib/libs/grpc/test/cpp/end2end/mock_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/mock_test.cc @@ -42,14 +42,14 @@ #include <iostream> -using grpc::testing::DefaultReactorTestPeer; -using grpc::testing::EchoRequest; -using grpc::testing::EchoResponse; -using grpc::testing::EchoTestService; -using grpc::testing::MockClientReaderWriter; -using std::vector; -using std::chrono::system_clock; -using ::testing::_; +using grpc::testing::DefaultReactorTestPeer; +using grpc::testing::EchoRequest; +using grpc::testing::EchoResponse; +using grpc::testing::EchoTestService; +using grpc::testing::MockClientReaderWriter; +using std::vector; +using std::chrono::system_clock; +using ::testing::_; using ::testing::AtLeast; using ::testing::DoAll; using ::testing::Invoke; @@ -81,8 +81,8 @@ class FakeClient { EchoResponse response; ClientContext context; - TString msg("hello"); - TString exp(msg); + TString msg("hello"); + TString exp(msg); std::unique_ptr<ClientWriterInterface<EchoRequest>> cstream = stub_->RequestStream(&context, &response); @@ -111,7 +111,7 @@ class FakeClient { std::unique_ptr<ClientReaderInterface<EchoResponse>> cstream = stub_->ResponseStream(&context, request); - TString exp = ""; + TString exp = ""; EXPECT_TRUE(cstream->Read(&response)); exp.append(response.message() + " "); @@ -129,7 +129,7 @@ class FakeClient { EchoRequest request; EchoResponse response; ClientContext context; - TString msg("hello"); + TString msg("hello"); std::unique_ptr<ClientReaderWriterInterface<EchoRequest, EchoResponse>> stream = stub_->BidiStream(&context); @@ -256,7 +256,7 @@ class TestServiceImpl : public EchoTestService::Service { ServerReader<EchoRequest>* reader, EchoResponse* response) override { EchoRequest request; - TString resp(""); + TString resp(""); while (reader->Read(&request)) { gpr_log(GPR_INFO, "recv msg %s", request.message().c_str()); resp.append(request.message()); @@ -268,8 +268,8 @@ class TestServiceImpl : public EchoTestService::Service { Status ResponseStream(ServerContext* /*context*/, const EchoRequest* request, ServerWriter<EchoResponse>* writer) override { EchoResponse response; - vector<TString> tokens = split(request->message()); - for (const TString& token : tokens) { + vector<TString> tokens = split(request->message()); + for (const TString& token : tokens) { response.set_message(token); writer->Write(response); } @@ -290,9 +290,9 @@ class TestServiceImpl : public EchoTestService::Service { } private: - const vector<TString> split(const TString& input) { - TString buff(""); - vector<TString> result; + const vector<TString> split(const TString& input) { + TString buff(""); + vector<TString> result; for (auto n : input) { if (n != ' ') { diff --git a/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc b/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc index bb54f17f03..4be070ec71 100644 --- a/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc @@ -39,18 +39,18 @@ #ifdef GRPC_POSIX_SOCKET // Thread-local variable to so that only polls from this test assert -// non-blocking (not polls from resolver, timer thread, etc), and only when the -// thread is waiting on polls caused by CompletionQueue::AsyncNext (not for -// picking a port or other reasons). -GPR_TLS_DECL(g_is_nonblocking_poll); +// non-blocking (not polls from resolver, timer thread, etc), and only when the +// thread is waiting on polls caused by CompletionQueue::AsyncNext (not for +// picking a port or other reasons). +GPR_TLS_DECL(g_is_nonblocking_poll); namespace { int maybe_assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds, int timeout) { - // Only assert that this poll should have zero timeout if we're in the - // middle of a zero-timeout CQ Next. - if (gpr_tls_get(&g_is_nonblocking_poll)) { + // Only assert that this poll should have zero timeout if we're in the + // middle of a zero-timeout CQ Next. + if (gpr_tls_get(&g_is_nonblocking_poll)) { GPR_ASSERT(timeout == 0); } return poll(pfds, nfds, timeout); @@ -78,17 +78,17 @@ class NonblockingTest : public ::testing::Test { } bool LoopForTag(void** tag, bool* ok) { - // Temporarily set the thread-local nonblocking poll flag so that the polls - // caused by this loop are indeed sent by the library with zero timeout. - intptr_t orig_val = gpr_tls_get(&g_is_nonblocking_poll); - gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(true)); + // Temporarily set the thread-local nonblocking poll flag so that the polls + // caused by this loop are indeed sent by the library with zero timeout. + intptr_t orig_val = gpr_tls_get(&g_is_nonblocking_poll); + gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(true)); for (;;) { auto r = cq_->AsyncNext(tag, ok, gpr_time_0(GPR_CLOCK_REALTIME)); if (r == CompletionQueue::SHUTDOWN) { - gpr_tls_set(&g_is_nonblocking_poll, orig_val); + gpr_tls_set(&g_is_nonblocking_poll, orig_val); return false; } else if (r == CompletionQueue::GOT_EVENT) { - gpr_tls_set(&g_is_nonblocking_poll, orig_val); + gpr_tls_set(&g_is_nonblocking_poll, orig_val); return true; } } @@ -198,17 +198,17 @@ int main(int argc, char** argv) { grpc::testing::TestEnvironment env(argc, argv); ::testing::InitGoogleTest(&argc, argv); - gpr_tls_init(&g_is_nonblocking_poll); - - // Start the nonblocking poll thread-local variable as false because the - // thread that issues RPCs starts by picking a port (which has non-zero - // timeout). - gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(false)); - + gpr_tls_init(&g_is_nonblocking_poll); + + // Start the nonblocking poll thread-local variable as false because the + // thread that issues RPCs starts by picking a port (which has non-zero + // timeout). + gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(false)); + int ret = RUN_ALL_TESTS(); - gpr_tls_destroy(&g_is_nonblocking_poll); + gpr_tls_destroy(&g_is_nonblocking_poll); return ret; -#else // GRPC_POSIX_SOCKET - return 0; -#endif // GRPC_POSIX_SOCKET +#else // GRPC_POSIX_SOCKET + return 0; +#endif // GRPC_POSIX_SOCKET } diff --git a/contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc index 4c1c768bea..b69d1dd2be 100644 --- a/contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/port_sharing_end2end_test.cc @@ -58,7 +58,7 @@ namespace { class TestScenario { public: TestScenario(bool server_port, bool pending_data, - const TString& creds_type) + const TString& creds_type) : server_has_port(server_port), queue_pending_data(pending_data), credentials_type(creds_type) {} @@ -67,7 +67,7 @@ class TestScenario { bool server_has_port; // whether tcp server should read some data before handoff bool queue_pending_data; - const TString credentials_type; + const TString credentials_type; }; static std::ostream& operator<<(std::ostream& out, @@ -115,7 +115,7 @@ class TestTcpServer { gpr_log(GPR_INFO, "Test TCP server started at %s", address_.c_str()); } - const TString& address() { return address_; } + const TString& address() { return address_; } void SetAcceptor( std::unique_ptr<experimental::ExternalConnectionAcceptor> acceptor) { @@ -156,8 +156,8 @@ class TestTcpServer { private: void OnConnect(grpc_endpoint* tcp, grpc_pollset* /*accepting_pollset*/, grpc_tcp_server_acceptor* acceptor) { - TString peer(grpc_endpoint_get_peer(tcp)); - gpr_log(GPR_INFO, "Got incoming connection! from %s", peer.c_str()); + TString peer(grpc_endpoint_get_peer(tcp)); + gpr_log(GPR_INFO, "Got incoming connection! from %s", peer.c_str()); EXPECT_FALSE(acceptor->external_connection); listener_fd_ = grpc_tcp_server_port_fd( acceptor->from_server, acceptor->port_index, acceptor->fd_index); @@ -194,7 +194,7 @@ class TestTcpServer { grpc_closure on_fd_released_; std::thread running_thread_; int port_ = -1; - TString address_; + TString address_; std::unique_ptr<experimental::ExternalConnectionAcceptor> connection_acceptor_; test_tcp_server tcp_server_; @@ -309,7 +309,7 @@ static void SendRpc(EchoTestService::Stub* stub, int num_rpcs) { std::vector<TestScenario> CreateTestScenarios() { std::vector<TestScenario> scenarios; - std::vector<TString> credentials_types; + std::vector<TString> credentials_types; #if TARGET_OS_IPHONE // Workaround Apple CFStream bug diff --git a/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc b/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc index db14932d0c..d79b33da70 100644 --- a/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc @@ -47,7 +47,7 @@ class ProtoServerReflectionTest : public ::testing::Test { ref_desc_pool_ = protobuf::DescriptorPool::generated_pool(); ServerBuilder builder; - TString server_address = "localhost:" + to_string(port_); + TString server_address = "localhost:" + to_string(port_); builder.AddListeningPort(server_address, InsecureServerCredentials()); server_ = builder.BuildAndStart(); } @@ -67,7 +67,7 @@ class ProtoServerReflectionTest : public ::testing::Test { return strs.str(); } - void CompareService(const TString& service) { + void CompareService(const TString& service) { const protobuf::ServiceDescriptor* service_desc = desc_pool_->FindServiceByName(service); const protobuf::ServiceDescriptor* ref_service_desc = @@ -89,7 +89,7 @@ class ProtoServerReflectionTest : public ::testing::Test { } } - void CompareMethod(const TString& method) { + void CompareMethod(const TString& method) { const protobuf::MethodDescriptor* method_desc = desc_pool_->FindMethodByName(method); const protobuf::MethodDescriptor* ref_method_desc = @@ -102,7 +102,7 @@ class ProtoServerReflectionTest : public ::testing::Test { CompareType(method_desc->output_type()->full_name()); } - void CompareType(const TString& type) { + void CompareType(const TString& type) { if (known_types_.find(type) != known_types_.end()) { return; } @@ -130,7 +130,7 @@ class ProtoServerReflectionTest : public ::testing::Test { TEST_F(ProtoServerReflectionTest, CheckResponseWithLocalDescriptorPool) { ResetStub(); - std::vector<TString> services; + std::vector<TString> services; desc_db_->GetServices(&services); // The service list has at least one service (reflection servcie). EXPECT_TRUE(services.size() > 0); diff --git a/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc index 4adbb0e506..004902cad3 100644 --- a/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc @@ -52,7 +52,7 @@ class TestServerBuilderPlugin : public ServerBuilderPlugin { register_service_ = false; } - TString name() override { return PLUGIN_NAME; } + TString name() override { return PLUGIN_NAME; } void InitServer(ServerInitializer* si) override { init_server_is_called_ = true; @@ -63,7 +63,7 @@ class TestServerBuilderPlugin : public ServerBuilderPlugin { void Finish(ServerInitializer* /*si*/) override { finish_is_called_ = true; } - void ChangeArguments(const TString& /*name*/, void* /*value*/) override { + void ChangeArguments(const TString& /*name*/, void* /*value*/) override { change_arguments_is_called_ = true; } @@ -123,10 +123,10 @@ std::unique_ptr<ServerBuilderPlugin> CreateTestServerBuilderPlugin() { // Force AddServerBuilderPlugin() to be called at static initialization time. struct StaticTestPluginInitializer { - StaticTestPluginInitializer() { - ::grpc::ServerBuilder::InternalAddPluginFactory( - &CreateTestServerBuilderPlugin); - } + StaticTestPluginInitializer() { + ::grpc::ServerBuilder::InternalAddPluginFactory( + &CreateTestServerBuilderPlugin); + } } static_plugin_initializer_test_; // When the param boolean is true, the ServerBuilder plugin will be added at the @@ -168,7 +168,7 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> { } void StartServer() { - TString server_address = "localhost:" + to_string(port_); + TString server_address = "localhost:" + to_string(port_); builder_->AddListeningPort(server_address, InsecureServerCredentials()); // we run some tests without a service, and for those we need to supply a // frequently polled completion queue diff --git a/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc index 92e37b1d41..3616d680f9 100644 --- a/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc @@ -38,7 +38,7 @@ using grpc::testing::EchoRequest; using grpc::testing::EchoResponse; using std::chrono::system_clock; -static TString g_root; +static TString g_root; namespace grpc { namespace testing { @@ -94,7 +94,7 @@ class CrashTest : public ::testing::Test { protected: CrashTest() {} - std::unique_ptr<Server> CreateServerAndClient(const TString& mode) { + std::unique_ptr<Server> CreateServerAndClient(const TString& mode) { auto port = grpc_pick_unused_port_or_die(); std::ostringstream addr_stream; addr_stream << "localhost:" << port; @@ -146,9 +146,9 @@ TEST_F(CrashTest, BidiStream) { } // namespace grpc int main(int argc, char** argv) { - TString me = argv[0]; + TString me = argv[0]; auto lslash = me.rfind('/'); - if (lslash != TString::npos) { + if (lslash != TString::npos) { g_root = me.substr(0, lslash); } else { g_root = "."; diff --git a/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc b/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc index 317283b94b..202fb2836c 100644 --- a/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc +++ b/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc @@ -20,7 +20,7 @@ #include <iostream> #include <memory> #include <sstream> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <grpc/support/log.h> #include <grpcpp/channel.h> diff --git a/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc index 9dc8230326..0f340516b0 100644 --- a/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc @@ -152,7 +152,7 @@ class ServerEarlyReturnTest : public ::testing::Test { auto stream = stub_->BidiStream(&context); for (int i = 0; i < 20; i++) { - request.set_message(TString("hello") + ToString(i)); + request.set_message(TString("hello") + ToString(i)); bool write_ok = stream->Write(request); bool read_ok = stream->Read(&response); if (i < 10) { @@ -189,7 +189,7 @@ class ServerEarlyReturnTest : public ::testing::Test { auto stream = stub_->RequestStream(&context, &response); for (int i = 0; i < 20; i++) { - request.set_message(TString("hello") + ToString(i)); + request.set_message(TString("hello") + ToString(i)); bool written = stream->Write(request); if (i < 10) { EXPECT_TRUE(written); diff --git a/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make b/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make index ca91767643..161176f141 100644 --- a/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make +++ b/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make @@ -6,7 +6,7 @@ OWNER( ) ADDINCL( - ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc + ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc ${ARCADIA_ROOT}/contrib/libs/grpc ) diff --git a/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc index d48f54e175..6d2dc772ef 100644 --- a/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc @@ -155,7 +155,7 @@ class SyncSendMessageTester : public experimental::Interceptor { string old_msg = static_cast<const EchoRequest*>(methods->GetSendMessage())->message(); EXPECT_EQ(old_msg.find("Hello"), 0u); - new_msg_.set_message(TString("World" + old_msg).c_str()); + new_msg_.set_message(TString("World" + old_msg).c_str()); methods->ModifySendMessage(&new_msg_); } methods->Proceed(); @@ -183,7 +183,7 @@ class SyncSendMessageVerifier : public experimental::Interceptor { if (methods->QueryInterceptionHookPoint( experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) { // Make sure that the changes made in SyncSendMessageTester persisted - string old_msg = + string old_msg = static_cast<const EchoRequest*>(methods->GetSendMessage())->message(); EXPECT_EQ(old_msg.find("World"), 0u); @@ -217,7 +217,7 @@ void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) { ctx.AddMetadata("testkey", "testvalue"); auto stream = stub->BidiStream(&ctx); for (auto i = 0; i < 10; i++) { - req.set_message("Hello" + ::ToString(i)); + req.set_message("Hello" + ::ToString(i)); stream->Write(req); stream->Read(&resp); EXPECT_EQ(req.message(), resp.message()); @@ -233,7 +233,7 @@ class ServerInterceptorsEnd2endSyncUnaryTest : public ::testing::Test { int port = 5004; // grpc_pick_unused_port_or_die(); ServerBuilder builder; - server_address_ = "localhost:" + ::ToString(port); + server_address_ = "localhost:" + ::ToString(port); builder.AddListeningPort(server_address_, InsecureServerCredentials()); builder.RegisterService(&service_); @@ -259,7 +259,7 @@ class ServerInterceptorsEnd2endSyncUnaryTest : public ::testing::Test { builder.experimental().SetInterceptorCreators(std::move(creators)); server_ = builder.BuildAndStart(); } - TString server_address_; + TString server_address_; TestServiceImpl service_; std::unique_ptr<Server> server_; }; @@ -280,7 +280,7 @@ class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test { int port = 5005; // grpc_pick_unused_port_or_die(); ServerBuilder builder; - server_address_ = "localhost:" + ::ToString(port); + server_address_ = "localhost:" + ::ToString(port); builder.AddListeningPort(server_address_, InsecureServerCredentials()); builder.RegisterService(&service_); @@ -303,7 +303,7 @@ class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test { builder.experimental().SetInterceptorCreators(std::move(creators)); server_ = builder.BuildAndStart(); } - TString server_address_; + TString server_address_; EchoTestServiceStreamingImpl service_; std::unique_ptr<Server> server_; }; @@ -343,7 +343,7 @@ class ServerInterceptorsAsyncEnd2endTest : public ::testing::Test {}; TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) { DummyInterceptor::Reset(); int port = 5006; // grpc_pick_unused_port_or_die(); - string server_address = "localhost:" + ::ToString(port); + string server_address = "localhost:" + ::ToString(port); ServerBuilder builder; EchoTestService::AsyncService service; builder.AddListeningPort(server_address, InsecureServerCredentials()); @@ -416,7 +416,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) { TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) { DummyInterceptor::Reset(); int port = 5007; // grpc_pick_unused_port_or_die(); - string server_address = "localhost:" + ::ToString(port); + string server_address = "localhost:" + ::ToString(port); ServerBuilder builder; EchoTestService::AsyncService service; builder.AddListeningPort(server_address, InsecureServerCredentials()); @@ -499,7 +499,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) { TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) { DummyInterceptor::Reset(); int port = 5008; // grpc_pick_unused_port_or_die(); - string server_address = "localhost:" + ::ToString(port); + string server_address = "localhost:" + ::ToString(port); ServerBuilder builder; AsyncGenericService service; builder.AddListeningPort(server_address, InsecureServerCredentials()); @@ -521,7 +521,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) { grpc::CreateChannel(server_address, InsecureChannelCredentials()); GenericStub generic_stub(channel); - const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); + const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); EchoRequest send_request; EchoRequest recv_request; EchoResponse send_response; @@ -536,8 +536,8 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) { send_request.set_message("Hello"); cli_ctx.AddMetadata("testkey", "testvalue"); - CompletionQueue* cq = srv_cq.get(); - std::thread request_call([cq]() { Verifier().Expect(4, true).Verify(cq); }); + CompletionQueue* cq = srv_cq.get(); + std::thread request_call([cq]() { Verifier().Expect(4, true).Verify(cq); }); std::unique_ptr<GenericClientAsyncReaderWriter> call = generic_stub.PrepareCall(&cli_ctx, kMethodName, &cli_cq); call->StartCall(tag(1)); @@ -553,7 +553,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) { service.RequestCall(&srv_ctx, &stream, srv_cq.get(), srv_cq.get(), tag(4)); - request_call.join(); + request_call.join(); EXPECT_EQ(kMethodName, srv_ctx.method()); EXPECT_TRUE(CheckMetadata(srv_ctx.client_metadata(), "testkey", "testvalue")); srv_ctx.AddTrailingMetadata("testkey", "testvalue"); @@ -607,7 +607,7 @@ TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) { TEST_F(ServerInterceptorsAsyncEnd2endTest, UnimplementedRpcTest) { DummyInterceptor::Reset(); int port = 5009; // grpc_pick_unused_port_or_die(); - string server_address = "localhost:" + ::ToString(port); + string server_address = "localhost:" + ::ToString(port); ServerBuilder builder; builder.AddListeningPort(server_address, InsecureServerCredentials()); std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>> @@ -659,7 +659,7 @@ class ServerInterceptorsSyncUnimplementedEnd2endTest : public ::testing::Test { TEST_F(ServerInterceptorsSyncUnimplementedEnd2endTest, UnimplementedRpcTest) { DummyInterceptor::Reset(); int port = 5010; // grpc_pick_unused_port_or_die(); - string server_address = "localhost:" + ::ToString(port); + string server_address = "localhost:" + ::ToString(port); ServerBuilder builder; TestServiceImpl service; builder.RegisterService(&service); diff --git a/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc index d8c1b598e8..13833cf66c 100644 --- a/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc @@ -33,7 +33,7 @@ #include "src/proto/grpc/lb/v1/load_reporter.grpc.pb.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" #include "test/core/util/port.h" -#include "test/core/util/test_config.h" +#include "test/core/util/test_config.h" namespace grpc { namespace testing { @@ -71,7 +71,7 @@ class ServerLoadReportingEnd2endTest : public ::testing::Test { protected: void SetUp() override { server_address_ = - "localhost:" + ToString(grpc_pick_unused_port_or_die()); + "localhost:" + ToString(grpc_pick_unused_port_or_die()); server_ = ServerBuilder() .AddListeningPort(server_address_, InsecureServerCredentials()) @@ -91,11 +91,11 @@ class ServerLoadReportingEnd2endTest : public ::testing::Test { server_thread_.join(); } - void ClientMakeEchoCalls(const TString& lb_id, const TString& lb_tag, - const TString& message, size_t num_requests) { + void ClientMakeEchoCalls(const TString& lb_id, const TString& lb_tag, + const TString& message, size_t num_requests) { auto stub = EchoTestService::NewStub( grpc::CreateChannel(server_address_, InsecureChannelCredentials())); - TString lb_token = lb_id + lb_tag; + TString lb_token = lb_id + lb_tag; for (int i = 0; i < num_requests; ++i) { ClientContext ctx; if (!lb_token.empty()) ctx.AddMetadata(GRPC_LB_TOKEN_MD_KEY, lb_token); @@ -114,7 +114,7 @@ class ServerLoadReportingEnd2endTest : public ::testing::Test { } } - TString server_address_; + TString server_address_; std::unique_ptr<Server> server_; std::thread server_thread_; EchoTestServiceImpl echo_service_; @@ -139,7 +139,7 @@ TEST_F(ServerLoadReportingEnd2endTest, BasicReport) { gpr_log(GPR_INFO, "Initial request sent."); ::grpc::lb::v1::LoadReportResponse response; stream->Read(&response); - const TString& lb_id = response.initial_response().load_balancer_id(); + const TString& lb_id = response.initial_response().load_balancer_id(); gpr_log(GPR_INFO, "Initial response received (lb_id: %s).", lb_id.c_str()); ClientMakeEchoCalls(lb_id, "LB_TAG", kOkMessage, 1); while (true) { @@ -186,7 +186,7 @@ TEST_F(ServerLoadReportingEnd2endTest, BasicReport) { } // namespace grpc int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::TestEnvironment env(argc, argv); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc index 391c4b735a..cee33343c1 100644 --- a/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/service_config_end2end_test.cc @@ -21,11 +21,11 @@ #include <mutex> #include <random> #include <set> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <thread> -#include "y_absl/strings/str_cat.h" - +#include "y_absl/strings/str_cat.h" + #include <grpc/grpc.h> #include <grpc/support/alloc.h> #include <grpc/support/atm.h> @@ -48,7 +48,7 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" -#include "src/core/lib/iomgr/parse_address.h" +#include "src/core/lib/iomgr/parse_address.h" #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/security/credentials/fake/fake_credentials.h" #include "src/cpp/client/secure_credentials.h" @@ -96,13 +96,13 @@ class MyTestServiceImpl : public TestServiceImpl { request_count_ = 0; } - std::set<TString> clients() { + std::set<TString> clients() { grpc::internal::MutexLock lock(&clients_mu_); return clients_; } private: - void AddClient(const TString& client) { + void AddClient(const TString& client) { grpc::internal::MutexLock lock(&clients_mu_); clients_.insert(client); } @@ -110,7 +110,7 @@ class MyTestServiceImpl : public TestServiceImpl { grpc::internal::Mutex mu_; int request_count_; grpc::internal::Mutex clients_mu_; - std::set<TString> clients_; + std::set<TString> clients_; }; class ServiceConfigEnd2endTest : public ::testing::Test { @@ -169,8 +169,8 @@ class ServiceConfigEnd2endTest : public ::testing::Test { grpc_core::Resolver::Result BuildFakeResults(const std::vector<int>& ports) { grpc_core::Resolver::Result result; for (const int& port : ports) { - TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port); - grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true); + TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port); + grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true); GPR_ASSERT(lb_uri != nullptr); grpc_resolved_address address; GPR_ASSERT(grpc_parse_uri(lb_uri, &address)); @@ -190,16 +190,16 @@ class ServiceConfigEnd2endTest : public ::testing::Test { void SetNextResolutionValidServiceConfig(const std::vector<int>& ports) { grpc_core::ExecCtx exec_ctx; grpc_core::Resolver::Result result = BuildFakeResults(ports); - result.service_config = grpc_core::ServiceConfig::Create( - nullptr, "{}", &result.service_config_error); + result.service_config = grpc_core::ServiceConfig::Create( + nullptr, "{}", &result.service_config_error); response_generator_->SetResponse(result); } void SetNextResolutionInvalidServiceConfig(const std::vector<int>& ports) { grpc_core::ExecCtx exec_ctx; grpc_core::Resolver::Result result = BuildFakeResults(ports); - result.service_config = grpc_core::ServiceConfig::Create( - nullptr, "{", &result.service_config_error); + result.service_config = grpc_core::ServiceConfig::Create( + nullptr, "{", &result.service_config_error); response_generator_->SetResponse(result); } @@ -207,8 +207,8 @@ class ServiceConfigEnd2endTest : public ::testing::Test { const char* svc_cfg) { grpc_core::ExecCtx exec_ctx; grpc_core::Resolver::Result result = BuildFakeResults(ports); - result.service_config = grpc_core::ServiceConfig::Create( - nullptr, svc_cfg, &result.service_config_error); + result.service_config = grpc_core::ServiceConfig::Create( + nullptr, svc_cfg, &result.service_config_error); response_generator_->SetResponse(result); } @@ -245,9 +245,9 @@ class ServiceConfigEnd2endTest : public ::testing::Test { std::shared_ptr<Channel> BuildChannelWithInvalidDefaultServiceConfig() { ChannelArguments args; - EXPECT_THAT(grpc::experimental::ValidateServiceConfigJSON( - InvalidDefaultServiceConfig()), - ::testing::HasSubstr("JSON parse error")); + EXPECT_THAT(grpc::experimental::ValidateServiceConfigJSON( + InvalidDefaultServiceConfig()), + ::testing::HasSubstr("JSON parse error")); args.SetServiceConfigJSON(InvalidDefaultServiceConfig()); args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, response_generator_.get()); @@ -305,7 +305,7 @@ class ServiceConfigEnd2endTest : public ::testing::Test { port_ = port > 0 ? port : grpc_pick_unused_port_or_die(); } - void Start(const TString& server_host) { + void Start(const TString& server_host) { gpr_log(GPR_INFO, "starting server on port %d", port_); started_ = true; grpc::internal::Mutex mu; @@ -318,7 +318,7 @@ class ServiceConfigEnd2endTest : public ::testing::Test { gpr_log(GPR_INFO, "server startup complete"); } - void Serve(const TString& server_host, grpc::internal::Mutex* mu, + void Serve(const TString& server_host, grpc::internal::Mutex* mu, grpc::internal::CondVar* cond) { std::ostringstream server_address; server_address << server_host << ":" << port_; @@ -340,7 +340,7 @@ class ServiceConfigEnd2endTest : public ::testing::Test { started_ = false; } - void SetServingStatus(const TString& service, bool serving) { + void SetServingStatus(const TString& service, bool serving) { server_->GetHealthCheckService()->SetServingStatus(service, serving); } }; @@ -422,12 +422,12 @@ class ServiceConfigEnd2endTest : public ::testing::Test { return "{\"version\": \"invalid_default\""; } - const TString server_host_; + const TString server_host_; std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_; std::vector<std::unique_ptr<ServerData>> servers_; grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator> response_generator_; - const TString kRequestMessage_; + const TString kRequestMessage_; std::shared_ptr<ChannelCredentials> creds_; }; @@ -437,7 +437,7 @@ TEST_F(ServiceConfigEnd2endTest, NoServiceConfigTest) { auto stub = BuildStub(channel); SetNextResolutionNoServiceConfig(GetServersPorts()); CheckRpcSendOk(stub, DEBUG_LOCATION); - EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str()); + EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str()); } TEST_F(ServiceConfigEnd2endTest, NoServiceConfigWithDefaultConfigTest) { @@ -480,7 +480,7 @@ TEST_F(ServiceConfigEnd2endTest, EXPECT_STREQ(ValidServiceConfigV1(), channel->GetServiceConfigJSON().c_str()); SetNextResolutionNoServiceConfig(GetServersPorts()); CheckRpcSendOk(stub, DEBUG_LOCATION); - EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str()); + EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str()); } TEST_F(ServiceConfigEnd2endTest, @@ -542,7 +542,7 @@ TEST_F(ServiceConfigEnd2endTest, NoServiceConfigAfterInvalidServiceConfigTest) { CheckRpcSendFailure(stub); SetNextResolutionNoServiceConfig(GetServersPorts()); CheckRpcSendOk(stub, DEBUG_LOCATION); - EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str()); + EXPECT_STREQ("{}", channel->GetServiceConfigJSON().c_str()); } TEST_F(ServiceConfigEnd2endTest, diff --git a/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc b/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc index 3b7489da07..3aa7a766c4 100644 --- a/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc @@ -68,7 +68,7 @@ class ShutdownTest : public ::testing::TestWithParam<string> { } std::unique_ptr<Server> SetUpServer(const int port) { - TString server_address = "localhost:" + to_string(port); + TString server_address = "localhost:" + to_string(port); ServerBuilder builder; auto server_creds = @@ -117,7 +117,7 @@ class ShutdownTest : public ::testing::TestWithParam<string> { }; std::vector<string> GetAllCredentialsTypeList() { - std::vector<TString> credentials_types; + std::vector<TString> credentials_types; if (GetCredentialsProvider()->GetChannelCredentials(kInsecureCredentialsType, nullptr) != nullptr) { credentials_types.push_back(kInsecureCredentialsType); @@ -128,7 +128,7 @@ std::vector<string> GetAllCredentialsTypeList() { } GPR_ASSERT(!credentials_types.empty()); - TString credentials_type_list("credentials types:"); + TString credentials_type_list("credentials types:"); for (const string& type : credentials_types) { credentials_type_list.append(" " + type); } diff --git a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc index 06c6981db7..5b212cba31 100644 --- a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc +++ b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc @@ -64,7 +64,7 @@ Status HealthCheckServiceImpl::Watch( } void HealthCheckServiceImpl::SetStatus( - const TString& service_name, + const TString& service_name, HealthCheckResponse::ServingStatus status) { std::lock_guard<std::mutex> lock(mu_); if (shutdown_) { diff --git a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h index 24f475dfde..d370e4693a 100644 --- a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h +++ b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h @@ -39,7 +39,7 @@ class HealthCheckServiceImpl : public health::v1::Health::Service { Status Watch(ServerContext* context, const health::v1::HealthCheckRequest* request, ServerWriter<health::v1::HealthCheckResponse>* writer) override; - void SetStatus(const TString& service_name, + void SetStatus(const TString& service_name, health::v1::HealthCheckResponse::ServingStatus status); void SetAll(health::v1::HealthCheckResponse::ServingStatus status); @@ -48,7 +48,7 @@ class HealthCheckServiceImpl : public health::v1::Health::Service { private: std::mutex mu_; bool shutdown_ = false; - std::map<const TString, health::v1::HealthCheckResponse::ServingStatus> + std::map<const TString, health::v1::HealthCheckResponse::ServingStatus> status_map_; }; diff --git a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc index 62b805632c..078977e824 100644 --- a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc +++ b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc @@ -19,12 +19,12 @@ #include "test/cpp/end2end/test_service_impl.h" #include <grpc/support/log.h> -#include <grpcpp/alarm.h> +#include <grpcpp/alarm.h> #include <grpcpp/security/credentials.h> #include <grpcpp/server_context.h> #include <gtest/gtest.h> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <thread> #include "src/proto/grpc/testing/echo.grpc.pb.h" @@ -34,7 +34,7 @@ using std::chrono::system_clock; namespace grpc { namespace testing { -namespace internal { +namespace internal { // When echo_deadline is requested, deadline seen in the ServerContext is set in // the response in seconds. @@ -49,9 +49,9 @@ void MaybeEchoDeadline(experimental::ServerContextBase* context, } } -void CheckServerAuthContext(const experimental::ServerContextBase* context, - const TString& expected_transport_security_type, - const TString& expected_client_identity) { +void CheckServerAuthContext(const experimental::ServerContextBase* context, + const TString& expected_transport_security_type, + const TString& expected_client_identity) { std::shared_ptr<const AuthContext> auth_ctx = context->auth_context(); std::vector<grpc::string_ref> tst = auth_ctx->FindPropertyValues("transport_security_type"); @@ -65,7 +65,7 @@ void CheckServerAuthContext(const experimental::ServerContextBase* context, auto identity = auth_ctx->GetPeerIdentity(); EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); EXPECT_EQ(1u, identity.size()); - EXPECT_EQ(expected_client_identity.c_str(), ToString(identity[0])); + EXPECT_EQ(expected_client_identity.c_str(), ToString(identity[0])); } } @@ -73,7 +73,7 @@ void CheckServerAuthContext(const experimental::ServerContextBase* context, // key-value pair. Returns -1 if the pair wasn't found. int MetadataMatchCount( const std::multimap<grpc::string_ref, grpc::string_ref>& metadata, - const TString& key, const TString& value) { + const TString& key, const TString& value) { int count = 0; for (const auto& metadatum : metadata) { if (ToString(metadatum.first) == key && @@ -118,11 +118,11 @@ void ServerTryCancel(ServerContext* context) { void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) { EXPECT_FALSE(context->IsCancelled()); context->TryCancel(); - gpr_log(GPR_INFO, - "Server called TryCancelNonblocking() to cancel the request"); + gpr_log(GPR_INFO, + "Server called TryCancelNonblocking() to cancel the request"); } -} // namespace internal +} // namespace internal experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( experimental::CallbackServerContext* context, const EchoRequest* request, @@ -135,38 +135,38 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( : service_(service), ctx_(ctx), req_(request), resp_(response) { // It should be safe to call IsCancelled here, even though we don't know // the result. Call it asynchronously to see if we trigger any data races. - // Join it in OnDone (technically that could be blocking but shouldn't be - // for very long). + // Join it in OnDone (technically that could be blocking but shouldn't be + // for very long). async_cancel_check_ = std::thread([this] { (void)ctx_->IsCancelled(); }); - started_ = true; - - if (request->has_param() && - request->param().server_notify_client_when_started()) { - service->signaller_.SignalClientThatRpcStarted(); - // Block on the "wait to continue" decision in a different thread since - // we can't tie up an EM thread with blocking events. We can join it in - // OnDone since it would definitely be done by then. - rpc_wait_thread_ = std::thread([this] { - service_->signaller_.ServerWaitToContinue(); - StartRpc(); - }); - } else { - StartRpc(); - } - } - - void StartRpc() { - if (req_->has_param() && req_->param().server_sleep_us() > 0) { + started_ = true; + + if (request->has_param() && + request->param().server_notify_client_when_started()) { + service->signaller_.SignalClientThatRpcStarted(); + // Block on the "wait to continue" decision in a different thread since + // we can't tie up an EM thread with blocking events. We can join it in + // OnDone since it would definitely be done by then. + rpc_wait_thread_ = std::thread([this] { + service_->signaller_.ServerWaitToContinue(); + StartRpc(); + }); + } else { + StartRpc(); + } + } + + void StartRpc() { + if (req_->has_param() && req_->param().server_sleep_us() > 0) { // Set an alarm for that much time alarm_.experimental().Set( gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_micros(req_->param().server_sleep_us(), - GPR_TIMESPAN)), + gpr_time_from_micros(req_->param().server_sleep_us(), + GPR_TIMESPAN)), [this](bool ok) { NonDelayed(ok); }); - return; + return; } - NonDelayed(true); + NonDelayed(true); } void OnSendInitialMetadataDone(bool ok) override { EXPECT_TRUE(ok); @@ -176,25 +176,25 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( EXPECT_TRUE(started_); EXPECT_TRUE(ctx_->IsCancelled()); on_cancel_invoked_ = true; - std::lock_guard<std::mutex> l(cancel_mu_); - cancel_cv_.notify_one(); + std::lock_guard<std::mutex> l(cancel_mu_); + cancel_cv_.notify_one(); } void OnDone() override { if (req_->has_param() && req_->param().echo_metadata_initially()) { EXPECT_TRUE(initial_metadata_sent_); } EXPECT_EQ(ctx_->IsCancelled(), on_cancel_invoked_); - // Validate that finishing with a non-OK status doesn't cause cancellation - if (req_->has_param() && req_->param().has_expected_error()) { - EXPECT_FALSE(on_cancel_invoked_); - } + // Validate that finishing with a non-OK status doesn't cause cancellation + if (req_->has_param() && req_->param().has_expected_error()) { + EXPECT_FALSE(on_cancel_invoked_); + } async_cancel_check_.join(); - if (rpc_wait_thread_.joinable()) { - rpc_wait_thread_.join(); - } - if (finish_when_cancelled_.joinable()) { - finish_when_cancelled_.join(); - } + if (rpc_wait_thread_.joinable()) { + rpc_wait_thread_.join(); + } + if (finish_when_cancelled_.joinable()) { + finish_when_cancelled_.join(); + } delete this; } @@ -215,7 +215,7 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( error.error_message(), error.binary_error_details())); return; } - int server_try_cancel = internal::GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, ctx_->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel != DO_NOT_CANCEL) { // Since this is a unary RPC, by the time this server handler is called, @@ -225,11 +225,11 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( EXPECT_FALSE(ctx_->IsCancelled()); ctx_->TryCancel(); gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); - FinishWhenCancelledAsync(); + FinishWhenCancelledAsync(); return; } resp_->set_message(req_->message()); - internal::MaybeEchoDeadline(ctx_, req_, resp_); + internal::MaybeEchoDeadline(ctx_, req_, resp_); if (service_->host_) { resp_->mutable_param()->set_host(*service_->host_); } @@ -238,7 +238,7 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( std::unique_lock<std::mutex> lock(service_->mu_); service_->signal_client_ = true; } - FinishWhenCancelledAsync(); + FinishWhenCancelledAsync(); return; } else if (req_->has_param() && req_->param().server_cancel_after_us()) { alarm_.experimental().Set( @@ -272,7 +272,7 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( // Terminate rpc with error and debug info in trailer. if (req_->param().debug_info().stack_entries_size() || !req_->param().debug_info().detail().empty()) { - TString serialized_debug_info = + TString serialized_debug_info = req_->param().debug_info().SerializeAsString(); ctx_->AddTrailingMetadata(kDebugInfoTrailerKey, serialized_debug_info); @@ -283,25 +283,25 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( if (req_->has_param() && (req_->param().expected_client_identity().length() > 0 || req_->param().check_auth_context())) { - internal::CheckServerAuthContext( - ctx_, req_->param().expected_transport_security_type(), - req_->param().expected_client_identity()); + internal::CheckServerAuthContext( + ctx_, req_->param().expected_transport_security_type(), + req_->param().expected_client_identity()); } if (req_->has_param() && req_->param().response_message_length() > 0) { resp_->set_message( - TString(req_->param().response_message_length(), '\0')); + TString(req_->param().response_message_length(), '\0')); } if (req_->has_param() && req_->param().echo_peer()) { resp_->mutable_param()->set_peer(ctx_->peer().c_str()); } Finish(Status::OK); } - void FinishWhenCancelledAsync() { - finish_when_cancelled_ = std::thread([this] { - std::unique_lock<std::mutex> l(cancel_mu_); - cancel_cv_.wait(l, [this] { return ctx_->IsCancelled(); }); + void FinishWhenCancelledAsync() { + finish_when_cancelled_ = std::thread([this] { + std::unique_lock<std::mutex> l(cancel_mu_); + cancel_cv_.wait(l, [this] { return ctx_->IsCancelled(); }); Finish(Status::CANCELLED); - }); + }); } CallbackTestServiceImpl* const service_; @@ -309,14 +309,14 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( const EchoRequest* const req_; EchoResponse* const resp_; Alarm alarm_; - std::mutex cancel_mu_; - std::condition_variable cancel_cv_; - bool initial_metadata_sent_ = false; - bool started_ = false; - bool on_cancel_invoked_ = false; + std::mutex cancel_mu_; + std::condition_variable cancel_cv_; + bool initial_metadata_sent_ = false; + bool started_ = false; + bool on_cancel_invoked_ = false; std::thread async_cancel_check_; - std::thread rpc_wait_thread_; - std::thread finish_when_cancelled_; + std::thread rpc_wait_thread_; + std::thread finish_when_cancelled_; }; return new Reactor(this, context, request, response); @@ -324,14 +324,14 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( experimental::ServerUnaryReactor* CallbackTestServiceImpl::CheckClientInitialMetadata( - experimental::CallbackServerContext* context, const SimpleRequest42*, - SimpleResponse42*) { + experimental::CallbackServerContext* context, const SimpleRequest42*, + SimpleResponse42*) { class Reactor : public ::grpc::experimental::ServerUnaryReactor { public: explicit Reactor(experimental::CallbackServerContext* ctx) { - EXPECT_EQ(internal::MetadataMatchCount(ctx->client_metadata(), - kCheckClientInitialMetadataKey, - kCheckClientInitialMetadataVal), + EXPECT_EQ(internal::MetadataMatchCount(ctx->client_metadata(), + kCheckClientInitialMetadataKey, + kCheckClientInitialMetadataVal), 1); EXPECT_EQ(ctx->client_metadata().count(kCheckClientInitialMetadataKey), 1u); @@ -354,10 +354,10 @@ CallbackTestServiceImpl::RequestStream( // is cancelled while the server is reading messages from the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads // all the messages from the client - int server_try_cancel = internal::GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - internal::ServerTryCancelNonblocking(context); + internal::ServerTryCancelNonblocking(context); // Don't need to provide a reactor since the RPC is canceled return nullptr; } @@ -398,7 +398,7 @@ CallbackTestServiceImpl::RequestStream( return; } if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) { - internal::ServerTryCancelNonblocking(ctx_); + internal::ServerTryCancelNonblocking(ctx_); return; } FinishOnce(Status::OK); @@ -440,10 +440,10 @@ CallbackTestServiceImpl::ResponseStream( // is cancelled while the server is reading messages from the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads // all the messages from the client - int server_try_cancel = internal::GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - internal::ServerTryCancelNonblocking(context); + internal::ServerTryCancelNonblocking(context); } class Reactor @@ -452,9 +452,9 @@ CallbackTestServiceImpl::ResponseStream( Reactor(experimental::CallbackServerContext* ctx, const EchoRequest* request, int server_try_cancel) : ctx_(ctx), request_(request), server_try_cancel_(server_try_cancel) { - server_coalescing_api_ = internal::GetIntValueFromMetadata( + server_coalescing_api_ = internal::GetIntValueFromMetadata( kServerUseCoalescingApi, ctx->client_metadata(), 0); - server_responses_to_send_ = internal::GetIntValueFromMetadata( + server_responses_to_send_ = internal::GetIntValueFromMetadata( kServerResponseStreamsToSend, ctx->client_metadata(), kServerDefaultResponseStreamsToSend); if (server_try_cancel_ == CANCEL_DURING_PROCESSING) { @@ -481,7 +481,7 @@ CallbackTestServiceImpl::ResponseStream( } else if (server_try_cancel_ == CANCEL_DURING_PROCESSING) { // Let OnCancel recover this } else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) { - internal::ServerTryCancelNonblocking(ctx_); + internal::ServerTryCancelNonblocking(ctx_); } else { FinishOnce(Status::OK); } @@ -498,24 +498,24 @@ CallbackTestServiceImpl::ResponseStream( void NextWrite() { response_.set_message(request_->message() + - ::ToString(num_msgs_sent_)); + ::ToString(num_msgs_sent_)); if (num_msgs_sent_ == server_responses_to_send_ - 1 && server_coalescing_api_ != 0) { - { - std::lock_guard<std::mutex> l(finish_mu_); - if (!finished_) { - num_msgs_sent_++; - StartWriteLast(&response_, WriteOptions()); - } - } + { + std::lock_guard<std::mutex> l(finish_mu_); + if (!finished_) { + num_msgs_sent_++; + StartWriteLast(&response_, WriteOptions()); + } + } // If we use WriteLast, we shouldn't wait before attempting Finish FinishOnce(Status::OK); } else { - std::lock_guard<std::mutex> l(finish_mu_); - if (!finished_) { - num_msgs_sent_++; - StartWrite(&response_); - } + std::lock_guard<std::mutex> l(finish_mu_); + if (!finished_) { + num_msgs_sent_++; + StartWrite(&response_); + } } } experimental::CallbackServerContext* const ctx_; @@ -547,12 +547,12 @@ CallbackTestServiceImpl::BidiStream( // is cancelled while the server is reading messages from the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads // all the messages from the client - server_try_cancel_ = internal::GetIntValueFromMetadata( + server_try_cancel_ = internal::GetIntValueFromMetadata( kServerTryCancelRequest, ctx->client_metadata(), DO_NOT_CANCEL); - server_write_last_ = internal::GetIntValueFromMetadata( - kServerFinishAfterNReads, ctx->client_metadata(), 0); + server_write_last_ = internal::GetIntValueFromMetadata( + kServerFinishAfterNReads, ctx->client_metadata(), 0); if (server_try_cancel_ == CANCEL_BEFORE_PROCESSING) { - internal::ServerTryCancelNonblocking(ctx); + internal::ServerTryCancelNonblocking(ctx); } else { if (server_try_cancel_ == CANCEL_DURING_PROCESSING) { ctx->TryCancel(); @@ -561,15 +561,15 @@ CallbackTestServiceImpl::BidiStream( } setup_done_ = true; } - void OnDone() override { - { - // Use the same lock as finish to make sure that OnDone isn't inlined. - std::lock_guard<std::mutex> l(finish_mu_); - EXPECT_TRUE(finished_); - finish_thread_.join(); - } - delete this; - } + void OnDone() override { + { + // Use the same lock as finish to make sure that OnDone isn't inlined. + std::lock_guard<std::mutex> l(finish_mu_); + EXPECT_TRUE(finished_); + finish_thread_.join(); + } + delete this; + } void OnCancel() override { EXPECT_TRUE(setup_done_); EXPECT_TRUE(ctx_->IsCancelled()); @@ -579,22 +579,22 @@ CallbackTestServiceImpl::BidiStream( if (ok) { num_msgs_read_++; response_.set_message(request_.message()); - std::lock_guard<std::mutex> l(finish_mu_); - if (!finished_) { - if (num_msgs_read_ == server_write_last_) { - StartWriteLast(&response_, WriteOptions()); - // If we use WriteLast, we shouldn't wait before attempting Finish - } else { - StartWrite(&response_); - return; - } + std::lock_guard<std::mutex> l(finish_mu_); + if (!finished_) { + if (num_msgs_read_ == server_write_last_) { + StartWriteLast(&response_, WriteOptions()); + // If we use WriteLast, we shouldn't wait before attempting Finish + } else { + StartWrite(&response_); + return; + } } } if (server_try_cancel_ == CANCEL_DURING_PROCESSING) { // Let OnCancel handle this } else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) { - internal::ServerTryCancelNonblocking(ctx_); + internal::ServerTryCancelNonblocking(ctx_); } else { FinishOnce(Status::OK); } @@ -611,11 +611,11 @@ CallbackTestServiceImpl::BidiStream( std::lock_guard<std::mutex> l(finish_mu_); if (!finished_) { finished_ = true; - // Finish asynchronously to make sure that there are no deadlocks. - finish_thread_ = std::thread([this, s] { - std::lock_guard<std::mutex> l(finish_mu_); - Finish(s); - }); + // Finish asynchronously to make sure that there are no deadlocks. + finish_thread_ = std::thread([this, s] { + std::lock_guard<std::mutex> l(finish_mu_); + Finish(s); + }); } } @@ -628,7 +628,7 @@ CallbackTestServiceImpl::BidiStream( std::mutex finish_mu_; bool finished_{false}; bool setup_done_{false}; - std::thread finish_thread_; + std::thread finish_thread_; }; return new Reactor(context); diff --git a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h index 83ae90fe22..5f207f1979 100644 --- a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h +++ b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h @@ -15,31 +15,31 @@ * limitations under the License. * */ - + #ifndef GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H #define GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H -#include <condition_variable> +#include <condition_variable> #include <memory> #include <mutex> #include <grpc/grpc.h> -#include <grpc/support/log.h> -#include <grpcpp/alarm.h> -#include <grpcpp/security/credentials.h> +#include <grpc/support/log.h> +#include <grpcpp/alarm.h> +#include <grpcpp/security/credentials.h> #include <grpcpp/server_context.h> -#include <gtest/gtest.h> +#include <gtest/gtest.h> + +#include <util/generic/string.h> +#include <thread> -#include <util/generic/string.h> -#include <thread> - #include "src/proto/grpc/testing/echo.grpc.pb.h" -#include "test/cpp/util/string_ref_helper.h" +#include "test/cpp/util/string_ref_helper.h" + +#include <util/string/cast.h> + +using std::chrono::system_clock; -#include <util/string/cast.h> - -using std::chrono::system_clock; - namespace grpc { namespace testing { @@ -59,406 +59,406 @@ typedef enum { CANCEL_AFTER_PROCESSING } ServerTryCancelRequestPhase; -namespace internal { -// When echo_deadline is requested, deadline seen in the ServerContext is set in -// the response in seconds. -void MaybeEchoDeadline(experimental::ServerContextBase* context, - const EchoRequest* request, EchoResponse* response); - -void CheckServerAuthContext(const experimental::ServerContextBase* context, - const TString& expected_transport_security_type, - const TString& expected_client_identity); - -// Returns the number of pairs in metadata that exactly match the given -// key-value pair. Returns -1 if the pair wasn't found. -int MetadataMatchCount( - const std::multimap<grpc::string_ref, grpc::string_ref>& metadata, - const TString& key, const TString& value); - -int GetIntValueFromMetadataHelper( - const char* key, - const std::multimap<grpc::string_ref, grpc::string_ref>& metadata, - int default_value); - -int GetIntValueFromMetadata( - const char* key, - const std::multimap<grpc::string_ref, grpc::string_ref>& metadata, - int default_value); - -void ServerTryCancel(ServerContext* context); -} // namespace internal - -class TestServiceSignaller { - public: - void ClientWaitUntilRpcStarted() { - std::unique_lock<std::mutex> lock(mu_); - cv_rpc_started_.wait(lock, [this] { return rpc_started_; }); - } - void ServerWaitToContinue() { - std::unique_lock<std::mutex> lock(mu_); - cv_server_continue_.wait(lock, [this] { return server_should_continue_; }); - } - void SignalClientThatRpcStarted() { - std::unique_lock<std::mutex> lock(mu_); - rpc_started_ = true; - cv_rpc_started_.notify_one(); - } - void SignalServerToContinue() { - std::unique_lock<std::mutex> lock(mu_); - server_should_continue_ = true; - cv_server_continue_.notify_one(); - } - - private: - std::mutex mu_; - std::condition_variable cv_rpc_started_; - bool rpc_started_ /* GUARDED_BY(mu_) */ = false; - std::condition_variable cv_server_continue_; - bool server_should_continue_ /* GUARDED_BY(mu_) */ = false; -}; - -template <typename RpcService> -class TestMultipleServiceImpl : public RpcService { +namespace internal { +// When echo_deadline is requested, deadline seen in the ServerContext is set in +// the response in seconds. +void MaybeEchoDeadline(experimental::ServerContextBase* context, + const EchoRequest* request, EchoResponse* response); + +void CheckServerAuthContext(const experimental::ServerContextBase* context, + const TString& expected_transport_security_type, + const TString& expected_client_identity); + +// Returns the number of pairs in metadata that exactly match the given +// key-value pair. Returns -1 if the pair wasn't found. +int MetadataMatchCount( + const std::multimap<grpc::string_ref, grpc::string_ref>& metadata, + const TString& key, const TString& value); + +int GetIntValueFromMetadataHelper( + const char* key, + const std::multimap<grpc::string_ref, grpc::string_ref>& metadata, + int default_value); + +int GetIntValueFromMetadata( + const char* key, + const std::multimap<grpc::string_ref, grpc::string_ref>& metadata, + int default_value); + +void ServerTryCancel(ServerContext* context); +} // namespace internal + +class TestServiceSignaller { public: - TestMultipleServiceImpl() : signal_client_(false), host_() {} - explicit TestMultipleServiceImpl(const TString& host) - : signal_client_(false), host_(new TString(host)) {} + void ClientWaitUntilRpcStarted() { + std::unique_lock<std::mutex> lock(mu_); + cv_rpc_started_.wait(lock, [this] { return rpc_started_; }); + } + void ServerWaitToContinue() { + std::unique_lock<std::mutex> lock(mu_); + cv_server_continue_.wait(lock, [this] { return server_should_continue_; }); + } + void SignalClientThatRpcStarted() { + std::unique_lock<std::mutex> lock(mu_); + rpc_started_ = true; + cv_rpc_started_.notify_one(); + } + void SignalServerToContinue() { + std::unique_lock<std::mutex> lock(mu_); + server_should_continue_ = true; + cv_server_continue_.notify_one(); + } + + private: + std::mutex mu_; + std::condition_variable cv_rpc_started_; + bool rpc_started_ /* GUARDED_BY(mu_) */ = false; + std::condition_variable cv_server_continue_; + bool server_should_continue_ /* GUARDED_BY(mu_) */ = false; +}; + +template <typename RpcService> +class TestMultipleServiceImpl : public RpcService { + public: + TestMultipleServiceImpl() : signal_client_(false), host_() {} + explicit TestMultipleServiceImpl(const TString& host) + : signal_client_(false), host_(new TString(host)) {} Status Echo(ServerContext* context, const EchoRequest* request, - EchoResponse* response) { - if (request->has_param() && - request->param().server_notify_client_when_started()) { - signaller_.SignalClientThatRpcStarted(); - signaller_.ServerWaitToContinue(); - } - - // A bit of sleep to make sure that short deadline tests fail - if (request->has_param() && request->param().server_sleep_us() > 0) { - gpr_sleep_until( - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_micros(request->param().server_sleep_us(), - GPR_TIMESPAN))); - } - - if (request->has_param() && request->param().server_die()) { - gpr_log(GPR_ERROR, "The request should not reach application handler."); - GPR_ASSERT(0); - } - if (request->has_param() && request->param().has_expected_error()) { - const auto& error = request->param().expected_error(); - return Status(static_cast<StatusCode>(error.code()), - error.error_message(), error.binary_error_details()); - } - int server_try_cancel = internal::GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - if (server_try_cancel > DO_NOT_CANCEL) { - // Since this is a unary RPC, by the time this server handler is called, - // the 'request' message is already read from the client. So the scenarios - // in server_try_cancel don't make much sense. Just cancel the RPC as long - // as server_try_cancel is not DO_NOT_CANCEL - internal::ServerTryCancel(context); - return Status::CANCELLED; - } - - response->set_message(request->message()); - internal::MaybeEchoDeadline(context, request, response); - if (host_) { - response->mutable_param()->set_host(*host_); - } - if (request->has_param() && request->param().client_cancel_after_us()) { - { - std::unique_lock<std::mutex> lock(mu_); - signal_client_ = true; - ++rpcs_waiting_for_client_cancel_; - } - while (!context->IsCancelled()) { - gpr_sleep_until(gpr_time_add( - gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(request->param().client_cancel_after_us(), - GPR_TIMESPAN))); - } - { - std::unique_lock<std::mutex> lock(mu_); - --rpcs_waiting_for_client_cancel_; - } - return Status::CANCELLED; - } else if (request->has_param() && - request->param().server_cancel_after_us()) { - gpr_sleep_until(gpr_time_add( - gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(request->param().server_cancel_after_us(), - GPR_TIMESPAN))); - return Status::CANCELLED; - } else if (!request->has_param() || - !request->param().skip_cancelled_check()) { - EXPECT_FALSE(context->IsCancelled()); - } - - if (request->has_param() && request->param().echo_metadata_initially()) { - const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata = - context->client_metadata(); - for (const auto& metadatum : client_metadata) { - context->AddInitialMetadata(::ToString(metadatum.first), - ::ToString(metadatum.second)); - } - } - - if (request->has_param() && request->param().echo_metadata()) { - const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata = - context->client_metadata(); - for (const auto& metadatum : client_metadata) { - context->AddTrailingMetadata(::ToString(metadatum.first), - ::ToString(metadatum.second)); - } - // Terminate rpc with error and debug info in trailer. - if (request->param().debug_info().stack_entries_size() || - !request->param().debug_info().detail().empty()) { - TString serialized_debug_info = - request->param().debug_info().SerializeAsString(); - context->AddTrailingMetadata(kDebugInfoTrailerKey, - serialized_debug_info); - return Status::CANCELLED; - } - } - if (request->has_param() && - (request->param().expected_client_identity().length() > 0 || - request->param().check_auth_context())) { - internal::CheckServerAuthContext( - context, request->param().expected_transport_security_type(), - request->param().expected_client_identity()); - } - if (request->has_param() && - request->param().response_message_length() > 0) { - response->set_message( - TString(request->param().response_message_length(), '\0')); - } - if (request->has_param() && request->param().echo_peer()) { - response->mutable_param()->set_peer(context->peer()); - } - return Status::OK; - } - - Status Echo1(ServerContext* context, const EchoRequest* request, - EchoResponse* response) { - return Echo(context, request, response); - } - - Status Echo2(ServerContext* context, const EchoRequest* request, - EchoResponse* response) { - return Echo(context, request, response); - } - + EchoResponse* response) { + if (request->has_param() && + request->param().server_notify_client_when_started()) { + signaller_.SignalClientThatRpcStarted(); + signaller_.ServerWaitToContinue(); + } + + // A bit of sleep to make sure that short deadline tests fail + if (request->has_param() && request->param().server_sleep_us() > 0) { + gpr_sleep_until( + gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), + gpr_time_from_micros(request->param().server_sleep_us(), + GPR_TIMESPAN))); + } + + if (request->has_param() && request->param().server_die()) { + gpr_log(GPR_ERROR, "The request should not reach application handler."); + GPR_ASSERT(0); + } + if (request->has_param() && request->param().has_expected_error()) { + const auto& error = request->param().expected_error(); + return Status(static_cast<StatusCode>(error.code()), + error.error_message(), error.binary_error_details()); + } + int server_try_cancel = internal::GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + if (server_try_cancel > DO_NOT_CANCEL) { + // Since this is a unary RPC, by the time this server handler is called, + // the 'request' message is already read from the client. So the scenarios + // in server_try_cancel don't make much sense. Just cancel the RPC as long + // as server_try_cancel is not DO_NOT_CANCEL + internal::ServerTryCancel(context); + return Status::CANCELLED; + } + + response->set_message(request->message()); + internal::MaybeEchoDeadline(context, request, response); + if (host_) { + response->mutable_param()->set_host(*host_); + } + if (request->has_param() && request->param().client_cancel_after_us()) { + { + std::unique_lock<std::mutex> lock(mu_); + signal_client_ = true; + ++rpcs_waiting_for_client_cancel_; + } + while (!context->IsCancelled()) { + gpr_sleep_until(gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(request->param().client_cancel_after_us(), + GPR_TIMESPAN))); + } + { + std::unique_lock<std::mutex> lock(mu_); + --rpcs_waiting_for_client_cancel_; + } + return Status::CANCELLED; + } else if (request->has_param() && + request->param().server_cancel_after_us()) { + gpr_sleep_until(gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(request->param().server_cancel_after_us(), + GPR_TIMESPAN))); + return Status::CANCELLED; + } else if (!request->has_param() || + !request->param().skip_cancelled_check()) { + EXPECT_FALSE(context->IsCancelled()); + } + + if (request->has_param() && request->param().echo_metadata_initially()) { + const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata = + context->client_metadata(); + for (const auto& metadatum : client_metadata) { + context->AddInitialMetadata(::ToString(metadatum.first), + ::ToString(metadatum.second)); + } + } + + if (request->has_param() && request->param().echo_metadata()) { + const std::multimap<grpc::string_ref, grpc::string_ref>& client_metadata = + context->client_metadata(); + for (const auto& metadatum : client_metadata) { + context->AddTrailingMetadata(::ToString(metadatum.first), + ::ToString(metadatum.second)); + } + // Terminate rpc with error and debug info in trailer. + if (request->param().debug_info().stack_entries_size() || + !request->param().debug_info().detail().empty()) { + TString serialized_debug_info = + request->param().debug_info().SerializeAsString(); + context->AddTrailingMetadata(kDebugInfoTrailerKey, + serialized_debug_info); + return Status::CANCELLED; + } + } + if (request->has_param() && + (request->param().expected_client_identity().length() > 0 || + request->param().check_auth_context())) { + internal::CheckServerAuthContext( + context, request->param().expected_transport_security_type(), + request->param().expected_client_identity()); + } + if (request->has_param() && + request->param().response_message_length() > 0) { + response->set_message( + TString(request->param().response_message_length(), '\0')); + } + if (request->has_param() && request->param().echo_peer()) { + response->mutable_param()->set_peer(context->peer()); + } + return Status::OK; + } + + Status Echo1(ServerContext* context, const EchoRequest* request, + EchoResponse* response) { + return Echo(context, request, response); + } + + Status Echo2(ServerContext* context, const EchoRequest* request, + EchoResponse* response) { + return Echo(context, request, response); + } + Status CheckClientInitialMetadata(ServerContext* context, - const SimpleRequest42* /*request*/, - SimpleResponse42* /*response*/) { - EXPECT_EQ(internal::MetadataMatchCount(context->client_metadata(), - kCheckClientInitialMetadataKey, - kCheckClientInitialMetadataVal), - 1); - EXPECT_EQ(1u, - context->client_metadata().count(kCheckClientInitialMetadataKey)); - return Status::OK; - } + const SimpleRequest42* /*request*/, + SimpleResponse42* /*response*/) { + EXPECT_EQ(internal::MetadataMatchCount(context->client_metadata(), + kCheckClientInitialMetadataKey, + kCheckClientInitialMetadataVal), + 1); + EXPECT_EQ(1u, + context->client_metadata().count(kCheckClientInitialMetadataKey)); + return Status::OK; + } // Unimplemented is left unimplemented to test the returned error. Status RequestStream(ServerContext* context, ServerReader<EchoRequest>* reader, - EchoResponse* response) { - // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by - // the server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads - // any message from the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // reading messages from the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads - // all the messages from the client - int server_try_cancel = internal::GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - EchoRequest request; - response->set_message(""); - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - internal::ServerTryCancel(context); - return Status::CANCELLED; - } - - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { internal::ServerTryCancel(context); }); - } - - int num_msgs_read = 0; - while (reader->Read(&request)) { - response->mutable_message()->append(request.message()); - } - gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read); - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - internal::ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; - } - - // Return 'kNumResponseStreamMsgs' messages. - // TODO(yangg) make it generic by adding a parameter into EchoRequest + EchoResponse* response) { + // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by + // the server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads + // any message from the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // reading messages from the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads + // all the messages from the client + int server_try_cancel = internal::GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + + EchoRequest request; + response->set_message(""); + + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + internal::ServerTryCancel(context); + return Status::CANCELLED; + } + + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { internal::ServerTryCancel(context); }); + } + + int num_msgs_read = 0; + while (reader->Read(&request)) { + response->mutable_message()->append(request.message()); + } + gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read); + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + internal::ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + // Return 'kNumResponseStreamMsgs' messages. + // TODO(yangg) make it generic by adding a parameter into EchoRequest Status ResponseStream(ServerContext* context, const EchoRequest* request, - ServerWriter<EchoResponse>* writer) { - // If server_try_cancel is set in the metadata, the RPC is cancelled by the - // server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes - // any messages to the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // writing messages to the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes - // all the messages to the client - int server_try_cancel = internal::GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - int server_coalescing_api = internal::GetIntValueFromMetadata( - kServerUseCoalescingApi, context->client_metadata(), 0); - - int server_responses_to_send = internal::GetIntValueFromMetadata( - kServerResponseStreamsToSend, context->client_metadata(), - kServerDefaultResponseStreamsToSend); - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - internal::ServerTryCancel(context); - return Status::CANCELLED; - } - - EchoResponse response; - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { internal::ServerTryCancel(context); }); - } - - for (int i = 0; i < server_responses_to_send; i++) { - response.set_message(request->message() + ::ToString(i)); - if (i == server_responses_to_send - 1 && server_coalescing_api != 0) { - writer->WriteLast(response, WriteOptions()); - } else { - writer->Write(response); - } - } - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - internal::ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; - } - - Status BidiStream(ServerContext* context, - ServerReaderWriter<EchoResponse, EchoRequest>* stream) { - // If server_try_cancel is set in the metadata, the RPC is cancelled by the - // server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/ - // writes any messages from/to the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // reading/writing messages from/to the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server - // reads/writes all messages from/to the client - int server_try_cancel = internal::GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - EchoRequest request; - EchoResponse response; - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - internal::ServerTryCancel(context); - return Status::CANCELLED; - } - - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { internal::ServerTryCancel(context); }); - } - - // kServerFinishAfterNReads suggests after how many reads, the server should - // write the last message and send status (coalesced using WriteLast) - int server_write_last = internal::GetIntValueFromMetadata( - kServerFinishAfterNReads, context->client_metadata(), 0); - - int read_counts = 0; - while (stream->Read(&request)) { - read_counts++; - gpr_log(GPR_INFO, "recv msg %s", request.message().c_str()); - response.set_message(request.message()); - if (read_counts == server_write_last) { - stream->WriteLast(response, WriteOptions()); - } else { - stream->Write(response); - } - } - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - internal::ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; - } - - // Unimplemented is left unimplemented to test the returned error. + ServerWriter<EchoResponse>* writer) { + // If server_try_cancel is set in the metadata, the RPC is cancelled by the + // server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes + // any messages to the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // writing messages to the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes + // all the messages to the client + int server_try_cancel = internal::GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + + int server_coalescing_api = internal::GetIntValueFromMetadata( + kServerUseCoalescingApi, context->client_metadata(), 0); + + int server_responses_to_send = internal::GetIntValueFromMetadata( + kServerResponseStreamsToSend, context->client_metadata(), + kServerDefaultResponseStreamsToSend); + + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + internal::ServerTryCancel(context); + return Status::CANCELLED; + } + + EchoResponse response; + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { internal::ServerTryCancel(context); }); + } + + for (int i = 0; i < server_responses_to_send; i++) { + response.set_message(request->message() + ::ToString(i)); + if (i == server_responses_to_send - 1 && server_coalescing_api != 0) { + writer->WriteLast(response, WriteOptions()); + } else { + writer->Write(response); + } + } + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + internal::ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + Status BidiStream(ServerContext* context, + ServerReaderWriter<EchoResponse, EchoRequest>* stream) { + // If server_try_cancel is set in the metadata, the RPC is cancelled by the + // server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/ + // writes any messages from/to the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // reading/writing messages from/to the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server + // reads/writes all messages from/to the client + int server_try_cancel = internal::GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + + EchoRequest request; + EchoResponse response; + + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + internal::ServerTryCancel(context); + return Status::CANCELLED; + } + + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { internal::ServerTryCancel(context); }); + } + + // kServerFinishAfterNReads suggests after how many reads, the server should + // write the last message and send status (coalesced using WriteLast) + int server_write_last = internal::GetIntValueFromMetadata( + kServerFinishAfterNReads, context->client_metadata(), 0); + + int read_counts = 0; + while (stream->Read(&request)) { + read_counts++; + gpr_log(GPR_INFO, "recv msg %s", request.message().c_str()); + response.set_message(request.message()); + if (read_counts == server_write_last) { + stream->WriteLast(response, WriteOptions()); + } else { + stream->Write(response); + } + } + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + internal::ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + // Unimplemented is left unimplemented to test the returned error. bool signal_client() { std::unique_lock<std::mutex> lock(mu_); return signal_client_; } - void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); } - void SignalServerToContinue() { signaller_.SignalServerToContinue(); } - uint64_t RpcsWaitingForClientCancel() { - std::unique_lock<std::mutex> lock(mu_); - return rpcs_waiting_for_client_cancel_; - } + void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); } + void SignalServerToContinue() { signaller_.SignalServerToContinue(); } + uint64_t RpcsWaitingForClientCancel() { + std::unique_lock<std::mutex> lock(mu_); + return rpcs_waiting_for_client_cancel_; + } private: bool signal_client_; std::mutex mu_; - TestServiceSignaller signaller_; - std::unique_ptr<TString> host_; - uint64_t rpcs_waiting_for_client_cancel_ = 0; + TestServiceSignaller signaller_; + std::unique_ptr<TString> host_; + uint64_t rpcs_waiting_for_client_cancel_ = 0; }; class CallbackTestServiceImpl : public ::grpc::testing::EchoTestService::ExperimentalCallbackService { public: CallbackTestServiceImpl() : signal_client_(false), host_() {} - explicit CallbackTestServiceImpl(const TString& host) - : signal_client_(false), host_(new TString(host)) {} + explicit CallbackTestServiceImpl(const TString& host) + : signal_client_(false), host_(new TString(host)) {} experimental::ServerUnaryReactor* Echo( experimental::CallbackServerContext* context, const EchoRequest* request, EchoResponse* response) override; experimental::ServerUnaryReactor* CheckClientInitialMetadata( - experimental::CallbackServerContext* context, const SimpleRequest42*, - SimpleResponse42*) override; + experimental::CallbackServerContext* context, const SimpleRequest42*, + SimpleResponse42*) override; experimental::ServerReadReactor<EchoRequest>* RequestStream( experimental::CallbackServerContext* context, @@ -476,19 +476,19 @@ class CallbackTestServiceImpl std::unique_lock<std::mutex> lock(mu_); return signal_client_; } - void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); } - void SignalServerToContinue() { signaller_.SignalServerToContinue(); } + void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); } + void SignalServerToContinue() { signaller_.SignalServerToContinue(); } private: bool signal_client_; std::mutex mu_; - TestServiceSignaller signaller_; - std::unique_ptr<TString> host_; + TestServiceSignaller signaller_; + std::unique_ptr<TString> host_; }; -using TestServiceImpl = - TestMultipleServiceImpl<::grpc::testing::EchoTestService::Service>; - +using TestServiceImpl = + TestMultipleServiceImpl<::grpc::testing::EchoTestService::Service>; + } // namespace testing } // namespace grpc diff --git a/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc b/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc index d738569b3e..8acb953729 100644 --- a/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc @@ -367,7 +367,7 @@ class AsyncClientEnd2endTest : public ::testing::Test { for (int i = 0; i < num_rpcs; ++i) { AsyncClientCall* call = new AsyncClientCall; EchoRequest request; - request.set_message(TString("Hello: " + grpc::to_string(i)).c_str()); + request.set_message(TString("Hello: " + grpc::to_string(i)).c_str()); call->response_reader = common_.GetStub()->AsyncEcho(&call->context, request, &cq_); call->response_reader->Finish(&call->response, &call->status, diff --git a/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc b/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc index 7ab30f80e2..48b9eace12 100644 --- a/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc @@ -40,7 +40,7 @@ using grpc::testing::EchoRequest; using grpc::testing::EchoResponse; -static TString g_root; +static TString g_root; static gpr_mu g_mu; extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type); @@ -167,12 +167,12 @@ class TimeChangeTest : public ::testing::Test { const int TIME_OFFSET2 = 5678; private: - static TString server_address_; + static TString server_address_; static std::unique_ptr<SubProcess> server_; std::shared_ptr<Channel> channel_; std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_; }; -TString TimeChangeTest::server_address_; +TString TimeChangeTest::server_address_; std::unique_ptr<SubProcess> TimeChangeTest::server_; // Wall-clock time jumps forward on client before bidi stream is created @@ -347,11 +347,11 @@ TEST_F(TimeChangeTest, TimeJumpForwardAndBackDuringCall) { } // namespace grpc int main(int argc, char** argv) { - TString me = argv[0]; + TString me = argv[0]; // get index of last slash in path to test binary auto lslash = me.rfind('/'); // set g_root = path to directory containing test binary - if (lslash != TString::npos) { + if (lslash != TString::npos) { g_root = me.substr(0, lslash); } else { g_root = "."; diff --git a/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc index db5631b845..603e6186bf 100644 --- a/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc +++ b/contrib/libs/grpc/test/cpp/end2end/xds_end2end_test.cc @@ -16,22 +16,22 @@ * */ -#include <deque> +#include <deque> #include <memory> #include <mutex> #include <numeric> #include <set> #include <sstream> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <thread> -#include <vector> - -#include <gmock/gmock.h> -#include <gtest/gtest.h> - -#include "y_absl/strings/str_cat.h" -#include "y_absl/types/optional.h" - +#include <vector> + +#include <gmock/gmock.h> +#include <gtest/gtest.h> + +#include "y_absl/strings/str_cat.h" +#include "y_absl/types/optional.h" + #include <grpc/grpc.h> #include <grpc/support/alloc.h> #include <grpc/support/log.h> @@ -45,16 +45,16 @@ #include "src/core/ext/filters/client_channel/backup_poller.h" #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" #include "src/core/ext/filters/client_channel/server_address.h" -#include "src/core/ext/xds/xds_api.h" -#include "src/core/ext/xds/xds_channel_args.h" -#include "src/core/ext/xds/xds_client.h" -#include "src/core/lib/channel/channel_args.h" +#include "src/core/ext/xds/xds_api.h" +#include "src/core/ext/xds/xds_channel_args.h" +#include "src/core/ext/xds/xds_client.h" +#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/gpr/env.h" #include "src/core/lib/gpr/tmpfile.h" #include "src/core/lib/gprpp/map.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/sync.h" -#include "src/core/lib/iomgr/parse_address.h" +#include "src/core/lib/iomgr/parse_address.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/security/credentials/fake/fake_credentials.h" #include "src/cpp/client/secure_credentials.h" @@ -66,19 +66,19 @@ #include "src/proto/grpc/testing/echo.grpc.pb.h" #include "src/proto/grpc/testing/xds/ads_for_test.grpc.pb.h" -#include "src/proto/grpc/testing/xds/cds_for_test.grpc.pb.h" +#include "src/proto/grpc/testing/xds/cds_for_test.grpc.pb.h" #include "src/proto/grpc/testing/xds/eds_for_test.grpc.pb.h" -#include "src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.h" +#include "src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.h" #include "src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.h" -#include "src/proto/grpc/testing/xds/v3/ads.grpc.pb.h" -#include "src/proto/grpc/testing/xds/v3/cluster.grpc.pb.h" -#include "src/proto/grpc/testing/xds/v3/discovery.grpc.pb.h" -#include "src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.h" -#include "src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.h" -#include "src/proto/grpc/testing/xds/v3/listener.grpc.pb.h" -#include "src/proto/grpc/testing/xds/v3/lrs.grpc.pb.h" -#include "src/proto/grpc/testing/xds/v3/route.grpc.pb.h" +#include "src/proto/grpc/testing/xds/v3/ads.grpc.pb.h" +#include "src/proto/grpc/testing/xds/v3/cluster.grpc.pb.h" +#include "src/proto/grpc/testing/xds/v3/discovery.grpc.pb.h" +#include "src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.h" +#include "src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.h" +#include "src/proto/grpc/testing/xds/v3/listener.grpc.pb.h" +#include "src/proto/grpc/testing/xds/v3/lrs.grpc.pb.h" +#include "src/proto/grpc/testing/xds/v3/route.grpc.pb.h" namespace grpc { namespace testing { @@ -86,78 +86,78 @@ namespace { using std::chrono::system_clock; -using ::envoy::config::cluster::v3::CircuitBreakers; -using ::envoy::config::cluster::v3::Cluster; -using ::envoy::config::cluster::v3::RoutingPriority; -using ::envoy::config::endpoint::v3::ClusterLoadAssignment; -using ::envoy::config::endpoint::v3::HealthStatus; -using ::envoy::config::listener::v3::Listener; -using ::envoy::config::route::v3::RouteConfiguration; -using ::envoy::extensions::filters::network::http_connection_manager::v3:: - HttpConnectionManager; -using ::envoy::type::v3::FractionalPercent; - -constexpr char kLdsTypeUrl[] = - "type.googleapis.com/envoy.config.listener.v3.Listener"; -constexpr char kRdsTypeUrl[] = - "type.googleapis.com/envoy.config.route.v3.RouteConfiguration"; -constexpr char kCdsTypeUrl[] = - "type.googleapis.com/envoy.config.cluster.v3.Cluster"; -constexpr char kEdsTypeUrl[] = - "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"; - -constexpr char kLdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Listener"; -constexpr char kRdsV2TypeUrl[] = - "type.googleapis.com/envoy.api.v2.RouteConfiguration"; -constexpr char kCdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Cluster"; -constexpr char kEdsV2TypeUrl[] = +using ::envoy::config::cluster::v3::CircuitBreakers; +using ::envoy::config::cluster::v3::Cluster; +using ::envoy::config::cluster::v3::RoutingPriority; +using ::envoy::config::endpoint::v3::ClusterLoadAssignment; +using ::envoy::config::endpoint::v3::HealthStatus; +using ::envoy::config::listener::v3::Listener; +using ::envoy::config::route::v3::RouteConfiguration; +using ::envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager; +using ::envoy::type::v3::FractionalPercent; + +constexpr char kLdsTypeUrl[] = + "type.googleapis.com/envoy.config.listener.v3.Listener"; +constexpr char kRdsTypeUrl[] = + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration"; +constexpr char kCdsTypeUrl[] = + "type.googleapis.com/envoy.config.cluster.v3.Cluster"; +constexpr char kEdsTypeUrl[] = + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"; + +constexpr char kLdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Listener"; +constexpr char kRdsV2TypeUrl[] = + "type.googleapis.com/envoy.api.v2.RouteConfiguration"; +constexpr char kCdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Cluster"; +constexpr char kEdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"; - + constexpr char kDefaultLocalityRegion[] = "xds_default_locality_region"; constexpr char kDefaultLocalityZone[] = "xds_default_locality_zone"; constexpr char kLbDropType[] = "lb"; constexpr char kThrottleDropType[] = "throttle"; -constexpr char kServerName[] = "server.example.com"; -constexpr char kDefaultRouteConfigurationName[] = "route_config_name"; -constexpr char kDefaultClusterName[] = "cluster_name"; -constexpr char kDefaultEdsServiceName[] = "eds_service_name"; +constexpr char kServerName[] = "server.example.com"; +constexpr char kDefaultRouteConfigurationName[] = "route_config_name"; +constexpr char kDefaultClusterName[] = "cluster_name"; +constexpr char kDefaultEdsServiceName[] = "eds_service_name"; constexpr int kDefaultLocalityWeight = 3; constexpr int kDefaultLocalityPriority = 0; -constexpr char kRequestMessage[] = "Live long and prosper."; -constexpr char kDefaultServiceConfig[] = +constexpr char kRequestMessage[] = "Live long and prosper."; +constexpr char kDefaultServiceConfig[] = "{\n" - " \"loadBalancingConfig\":[\n" - " { \"does_not_exist\":{} },\n" - " { \"eds_experimental\":{\n" - " \"clusterName\": \"server.example.com\",\n" - " \"lrsLoadReportingServerName\": \"\"\n" - " } }\n" - " ]\n" - "}"; -constexpr char kDefaultServiceConfigWithoutLoadReporting[] = - "{\n" - " \"loadBalancingConfig\":[\n" - " { \"does_not_exist\":{} },\n" - " { \"eds_experimental\":{\n" - " \"clusterName\": \"server.example.com\"\n" - " } }\n" - " ]\n" - "}"; - -constexpr char kBootstrapFileV3[] = - "{\n" - " \"xds_servers\": [\n" - " {\n" - " \"server_uri\": \"fake:///xds_server\",\n" - " \"channel_creds\": [\n" - " {\n" - " \"type\": \"fake\"\n" - " }\n" - " ],\n" - " \"server_features\": [\"xds_v3\"]\n" - " }\n" - " ],\n" + " \"loadBalancingConfig\":[\n" + " { \"does_not_exist\":{} },\n" + " { \"eds_experimental\":{\n" + " \"clusterName\": \"server.example.com\",\n" + " \"lrsLoadReportingServerName\": \"\"\n" + " } }\n" + " ]\n" + "}"; +constexpr char kDefaultServiceConfigWithoutLoadReporting[] = + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"does_not_exist\":{} },\n" + " { \"eds_experimental\":{\n" + " \"clusterName\": \"server.example.com\"\n" + " } }\n" + " ]\n" + "}"; + +constexpr char kBootstrapFileV3[] = + "{\n" + " \"xds_servers\": [\n" + " {\n" + " \"server_uri\": \"fake:///xds_server\",\n" + " \"channel_creds\": [\n" + " {\n" + " \"type\": \"fake\"\n" + " }\n" + " ],\n" + " \"server_features\": [\"xds_v3\"]\n" + " }\n" + " ],\n" " \"node\": {\n" " \"id\": \"xds_end2end_test\",\n" " \"cluster\": \"test\",\n" @@ -172,45 +172,45 @@ constexpr char kBootstrapFileV3[] = " }\n" "}\n"; -constexpr char kBootstrapFileV2[] = +constexpr char kBootstrapFileV2[] = "{\n" - " \"xds_servers\": [\n" - " {\n" - " \"server_uri\": \"fake:///xds_server\",\n" - " \"channel_creds\": [\n" - " {\n" - " \"type\": \"fake\"\n" - " }\n" - " ]\n" - " }\n" - " ],\n" + " \"xds_servers\": [\n" + " {\n" + " \"server_uri\": \"fake:///xds_server\",\n" + " \"channel_creds\": [\n" + " {\n" + " \"type\": \"fake\"\n" + " }\n" + " ]\n" + " }\n" + " ],\n" " \"node\": {\n" - " \"id\": \"xds_end2end_test\",\n" - " \"cluster\": \"test\",\n" - " \"metadata\": {\n" - " \"foo\": \"bar\"\n" - " },\n" - " \"locality\": {\n" - " \"region\": \"corp\",\n" - " \"zone\": \"svl\",\n" - " \"subzone\": \"mp3\"\n" - " }\n" + " \"id\": \"xds_end2end_test\",\n" + " \"cluster\": \"test\",\n" + " \"metadata\": {\n" + " \"foo\": \"bar\"\n" + " },\n" + " \"locality\": {\n" + " \"region\": \"corp\",\n" + " \"zone\": \"svl\",\n" + " \"subzone\": \"mp3\"\n" + " }\n" " }\n" "}\n"; -char* g_bootstrap_file_v3; -char* g_bootstrap_file_v2; +char* g_bootstrap_file_v3; +char* g_bootstrap_file_v2; void WriteBootstrapFiles() { char* bootstrap_file; - FILE* out = gpr_tmpfile("xds_bootstrap_v3", &bootstrap_file); - fputs(kBootstrapFileV3, out); + FILE* out = gpr_tmpfile("xds_bootstrap_v3", &bootstrap_file); + fputs(kBootstrapFileV3, out); fclose(out); - g_bootstrap_file_v3 = bootstrap_file; - out = gpr_tmpfile("xds_bootstrap_v2", &bootstrap_file); - fputs(kBootstrapFileV2, out); + g_bootstrap_file_v3 = bootstrap_file; + out = gpr_tmpfile("xds_bootstrap_v2", &bootstrap_file); + fputs(kBootstrapFileV2, out); fclose(out); - g_bootstrap_file_v2 = bootstrap_file; + g_bootstrap_file_v2 = bootstrap_file; } // Helper class to minimize the number of unique ports we use for this test. @@ -260,7 +260,7 @@ class CountedService : public ServiceType { response_count_ = 0; } - private: + private: grpc_core::Mutex mu_; size_t request_count_ = 0; size_t response_count_ = 0; @@ -269,9 +269,9 @@ class CountedService : public ServiceType { const char g_kCallCredsMdKey[] = "Balancer should not ..."; const char g_kCallCredsMdValue[] = "... receive me"; -template <typename RpcService> -class BackendServiceImpl - : public CountedService<TestMultipleServiceImpl<RpcService>> { +template <typename RpcService> +class BackendServiceImpl + : public CountedService<TestMultipleServiceImpl<RpcService>> { public: BackendServiceImpl() {} @@ -284,50 +284,50 @@ class BackendServiceImpl if (call_credentials_entry != context->client_metadata().end()) { EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); } - CountedService<TestMultipleServiceImpl<RpcService>>::IncreaseRequestCount(); - const auto status = - TestMultipleServiceImpl<RpcService>::Echo(context, request, response); - CountedService< - TestMultipleServiceImpl<RpcService>>::IncreaseResponseCount(); + CountedService<TestMultipleServiceImpl<RpcService>>::IncreaseRequestCount(); + const auto status = + TestMultipleServiceImpl<RpcService>::Echo(context, request, response); + CountedService< + TestMultipleServiceImpl<RpcService>>::IncreaseResponseCount(); AddClient(context->peer()); return status; } - Status Echo1(ServerContext* context, const EchoRequest* request, - EchoResponse* response) override { - return Echo(context, request, response); - } - - Status Echo2(ServerContext* context, const EchoRequest* request, - EchoResponse* response) override { - return Echo(context, request, response); - } - + Status Echo1(ServerContext* context, const EchoRequest* request, + EchoResponse* response) override { + return Echo(context, request, response); + } + + Status Echo2(ServerContext* context, const EchoRequest* request, + EchoResponse* response) override { + return Echo(context, request, response); + } + void Start() {} void Shutdown() {} - std::set<TString> clients() { + std::set<TString> clients() { grpc_core::MutexLock lock(&clients_mu_); return clients_; } private: - void AddClient(const TString& client) { + void AddClient(const TString& client) { grpc_core::MutexLock lock(&clients_mu_); clients_.insert(client); } grpc_core::Mutex clients_mu_; - std::set<TString> clients_; + std::set<TString> clients_; }; class ClientStats { public: struct LocalityStats { - LocalityStats() {} - + LocalityStats() {} + // Converts from proto message class. - template <class UpstreamLocalityStats> + template <class UpstreamLocalityStats> LocalityStats(const UpstreamLocalityStats& upstream_locality_stats) : total_successful_requests( upstream_locality_stats.total_successful_requests()), @@ -337,27 +337,27 @@ class ClientStats { total_issued_requests( upstream_locality_stats.total_issued_requests()) {} - LocalityStats& operator+=(const LocalityStats& other) { - total_successful_requests += other.total_successful_requests; - total_requests_in_progress += other.total_requests_in_progress; - total_error_requests += other.total_error_requests; - total_issued_requests += other.total_issued_requests; - return *this; - } - - uint64_t total_successful_requests = 0; - uint64_t total_requests_in_progress = 0; - uint64_t total_error_requests = 0; - uint64_t total_issued_requests = 0; + LocalityStats& operator+=(const LocalityStats& other) { + total_successful_requests += other.total_successful_requests; + total_requests_in_progress += other.total_requests_in_progress; + total_error_requests += other.total_error_requests; + total_issued_requests += other.total_issued_requests; + return *this; + } + + uint64_t total_successful_requests = 0; + uint64_t total_requests_in_progress = 0; + uint64_t total_error_requests = 0; + uint64_t total_issued_requests = 0; }; - ClientStats() {} - + ClientStats() {} + // Converts from proto message class. - template <class ClusterStats> - explicit ClientStats(const ClusterStats& cluster_stats) - : cluster_name_(cluster_stats.cluster_name()), - total_dropped_requests_(cluster_stats.total_dropped_requests()) { + template <class ClusterStats> + explicit ClientStats(const ClusterStats& cluster_stats) + : cluster_name_(cluster_stats.cluster_name()), + total_dropped_requests_(cluster_stats.total_dropped_requests()) { for (const auto& input_locality_stats : cluster_stats.upstream_locality_stats()) { locality_stats_.emplace(input_locality_stats.locality().sub_zone(), @@ -370,11 +370,11 @@ class ClientStats { } } - const TString& cluster_name() const { return cluster_name_; } - - const std::map<TString, LocalityStats>& locality_stats() const { - return locality_stats_; - } + const TString& cluster_name() const { return cluster_name_; } + + const std::map<TString, LocalityStats>& locality_stats() const { + return locality_stats_; + } uint64_t total_successful_requests() const { uint64_t sum = 0; for (auto& p : locality_stats_) { @@ -403,216 +403,216 @@ class ClientStats { } return sum; } - + uint64_t total_dropped_requests() const { return total_dropped_requests_; } - - uint64_t dropped_requests(const TString& category) const { + + uint64_t dropped_requests(const TString& category) const { auto iter = dropped_requests_.find(category); GPR_ASSERT(iter != dropped_requests_.end()); return iter->second; } - ClientStats& operator+=(const ClientStats& other) { - for (const auto& p : other.locality_stats_) { - locality_stats_[p.first] += p.second; - } - total_dropped_requests_ += other.total_dropped_requests_; - for (const auto& p : other.dropped_requests_) { - dropped_requests_[p.first] += p.second; - } - return *this; - } - + ClientStats& operator+=(const ClientStats& other) { + for (const auto& p : other.locality_stats_) { + locality_stats_[p.first] += p.second; + } + total_dropped_requests_ += other.total_dropped_requests_; + for (const auto& p : other.dropped_requests_) { + dropped_requests_[p.first] += p.second; + } + return *this; + } + private: - TString cluster_name_; - std::map<TString, LocalityStats> locality_stats_; - uint64_t total_dropped_requests_ = 0; - std::map<TString, uint64_t> dropped_requests_; + TString cluster_name_; + std::map<TString, LocalityStats> locality_stats_; + uint64_t total_dropped_requests_ = 0; + std::map<TString, uint64_t> dropped_requests_; }; -class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> { +class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> { public: - struct ResponseState { - enum State { NOT_SENT, SENT, ACKED, NACKED }; - State state = NOT_SENT; - TString error_message; - }; - - struct EdsResourceArgs { + struct ResponseState { + enum State { NOT_SENT, SENT, ACKED, NACKED }; + State state = NOT_SENT; + TString error_message; + }; + + struct EdsResourceArgs { struct Locality { - Locality(const TString& sub_zone, std::vector<int> ports, + Locality(const TString& sub_zone, std::vector<int> ports, int lb_weight = kDefaultLocalityWeight, int priority = kDefaultLocalityPriority, - std::vector<HealthStatus> health_statuses = {}) + std::vector<HealthStatus> health_statuses = {}) : sub_zone(std::move(sub_zone)), ports(std::move(ports)), lb_weight(lb_weight), priority(priority), health_statuses(std::move(health_statuses)) {} - const TString sub_zone; + const TString sub_zone; std::vector<int> ports; int lb_weight; int priority; - std::vector<HealthStatus> health_statuses; + std::vector<HealthStatus> health_statuses; }; - EdsResourceArgs() = default; - explicit EdsResourceArgs(std::vector<Locality> locality_list) + EdsResourceArgs() = default; + explicit EdsResourceArgs(std::vector<Locality> locality_list) : locality_list(std::move(locality_list)) {} std::vector<Locality> locality_list; - std::map<TString, uint32_t> drop_categories; + std::map<TString, uint32_t> drop_categories; FractionalPercent::DenominatorType drop_denominator = FractionalPercent::MILLION; }; - explicit AdsServiceImpl(bool enable_load_reporting) - : v2_rpc_service_(this, /*is_v2=*/true), - v3_rpc_service_(this, /*is_v2=*/false) { - // Construct RDS response data. - default_route_config_.set_name(kDefaultRouteConfigurationName); - auto* virtual_host = default_route_config_.add_virtual_hosts(); - virtual_host->add_domains("*"); - auto* route = virtual_host->add_routes(); - route->mutable_match()->set_prefix(""); - route->mutable_route()->set_cluster(kDefaultClusterName); - SetRdsResource(default_route_config_); - // Construct LDS response data (with inlined RDS result). - default_listener_ = BuildListener(default_route_config_); - SetLdsResource(default_listener_); - // Construct CDS response data. - default_cluster_.set_name(kDefaultClusterName); - default_cluster_.set_type(Cluster::EDS); - auto* eds_config = default_cluster_.mutable_eds_cluster_config(); - eds_config->mutable_eds_config()->mutable_ads(); - eds_config->set_service_name(kDefaultEdsServiceName); - default_cluster_.set_lb_policy(Cluster::ROUND_ROBIN); - if (enable_load_reporting) { - default_cluster_.mutable_lrs_server()->mutable_self(); - } - SetCdsResource(default_cluster_); - } - - bool seen_v2_client() const { return seen_v2_client_; } - bool seen_v3_client() const { return seen_v3_client_; } - - ::envoy::service::discovery::v2::AggregatedDiscoveryService::Service* - v2_rpc_service() { - return &v2_rpc_service_; - } - - ::envoy::service::discovery::v3::AggregatedDiscoveryService::Service* - v3_rpc_service() { - return &v3_rpc_service_; - } - - Listener default_listener() const { return default_listener_; } - RouteConfiguration default_route_config() const { - return default_route_config_; - } - Cluster default_cluster() const { return default_cluster_; } - - ResponseState lds_response_state() { - grpc_core::MutexLock lock(&ads_mu_); - return resource_type_response_state_[kLdsTypeUrl]; - } - - ResponseState rds_response_state() { - grpc_core::MutexLock lock(&ads_mu_); - return resource_type_response_state_[kRdsTypeUrl]; - } - - ResponseState cds_response_state() { + explicit AdsServiceImpl(bool enable_load_reporting) + : v2_rpc_service_(this, /*is_v2=*/true), + v3_rpc_service_(this, /*is_v2=*/false) { + // Construct RDS response data. + default_route_config_.set_name(kDefaultRouteConfigurationName); + auto* virtual_host = default_route_config_.add_virtual_hosts(); + virtual_host->add_domains("*"); + auto* route = virtual_host->add_routes(); + route->mutable_match()->set_prefix(""); + route->mutable_route()->set_cluster(kDefaultClusterName); + SetRdsResource(default_route_config_); + // Construct LDS response data (with inlined RDS result). + default_listener_ = BuildListener(default_route_config_); + SetLdsResource(default_listener_); + // Construct CDS response data. + default_cluster_.set_name(kDefaultClusterName); + default_cluster_.set_type(Cluster::EDS); + auto* eds_config = default_cluster_.mutable_eds_cluster_config(); + eds_config->mutable_eds_config()->mutable_ads(); + eds_config->set_service_name(kDefaultEdsServiceName); + default_cluster_.set_lb_policy(Cluster::ROUND_ROBIN); + if (enable_load_reporting) { + default_cluster_.mutable_lrs_server()->mutable_self(); + } + SetCdsResource(default_cluster_); + } + + bool seen_v2_client() const { return seen_v2_client_; } + bool seen_v3_client() const { return seen_v3_client_; } + + ::envoy::service::discovery::v2::AggregatedDiscoveryService::Service* + v2_rpc_service() { + return &v2_rpc_service_; + } + + ::envoy::service::discovery::v3::AggregatedDiscoveryService::Service* + v3_rpc_service() { + return &v3_rpc_service_; + } + + Listener default_listener() const { return default_listener_; } + RouteConfiguration default_route_config() const { + return default_route_config_; + } + Cluster default_cluster() const { return default_cluster_; } + + ResponseState lds_response_state() { + grpc_core::MutexLock lock(&ads_mu_); + return resource_type_response_state_[kLdsTypeUrl]; + } + + ResponseState rds_response_state() { grpc_core::MutexLock lock(&ads_mu_); - return resource_type_response_state_[kCdsTypeUrl]; - } - - ResponseState eds_response_state() { - grpc_core::MutexLock lock(&ads_mu_); - return resource_type_response_state_[kEdsTypeUrl]; - } - - void SetResourceIgnore(const TString& type_url) { - grpc_core::MutexLock lock(&ads_mu_); - resource_types_to_ignore_.emplace(type_url); - } - - void UnsetResource(const TString& type_url, const TString& name) { - grpc_core::MutexLock lock(&ads_mu_); - ResourceState& state = resource_map_[type_url][name]; - ++state.version; - state.resource.reset(); - gpr_log(GPR_INFO, "ADS[%p]: Unsetting %s resource %s to version %u", this, - type_url.c_str(), name.c_str(), state.version); - for (SubscriptionState* subscription : state.subscriptions) { - subscription->update_queue->emplace_back(type_url, name); - } - } - - void SetResource(google::protobuf::Any resource, const TString& type_url, - const TString& name) { - grpc_core::MutexLock lock(&ads_mu_); - ResourceState& state = resource_map_[type_url][name]; - ++state.version; - state.resource = std::move(resource); - gpr_log(GPR_INFO, "ADS[%p]: Updating %s resource %s to version %u", this, - type_url.c_str(), name.c_str(), state.version); - for (SubscriptionState* subscription : state.subscriptions) { - subscription->update_queue->emplace_back(type_url, name); - } - } - - void SetLdsResource(const Listener& listener) { - google::protobuf::Any resource; - resource.PackFrom(listener); - SetResource(std::move(resource), kLdsTypeUrl, listener.name()); - } - - void SetRdsResource(const RouteConfiguration& route) { - google::protobuf::Any resource; - resource.PackFrom(route); - SetResource(std::move(resource), kRdsTypeUrl, route.name()); - } - - void SetCdsResource(const Cluster& cluster) { - google::protobuf::Any resource; - resource.PackFrom(cluster); - SetResource(std::move(resource), kCdsTypeUrl, cluster.name()); - } - - void SetEdsResource(const ClusterLoadAssignment& assignment) { - google::protobuf::Any resource; - resource.PackFrom(assignment); - SetResource(std::move(resource), kEdsTypeUrl, assignment.cluster_name()); - } - - void SetLdsToUseDynamicRds() { - auto listener = default_listener_; - HttpConnectionManager http_connection_manager; - auto* rds = http_connection_manager.mutable_rds(); - rds->set_route_config_name(kDefaultRouteConfigurationName); - rds->mutable_config_source()->mutable_ads(); - listener.mutable_api_listener()->mutable_api_listener()->PackFrom( - http_connection_manager); - SetLdsResource(listener); - } - - static Listener BuildListener(const RouteConfiguration& route_config) { - HttpConnectionManager http_connection_manager; - *(http_connection_manager.mutable_route_config()) = route_config; - Listener listener; - listener.set_name(kServerName); - listener.mutable_api_listener()->mutable_api_listener()->PackFrom( - http_connection_manager); - return listener; - } - - static ClusterLoadAssignment BuildEdsResource( - const EdsResourceArgs& args, - const char* eds_service_name = kDefaultEdsServiceName) { + return resource_type_response_state_[kRdsTypeUrl]; + } + + ResponseState cds_response_state() { + grpc_core::MutexLock lock(&ads_mu_); + return resource_type_response_state_[kCdsTypeUrl]; + } + + ResponseState eds_response_state() { + grpc_core::MutexLock lock(&ads_mu_); + return resource_type_response_state_[kEdsTypeUrl]; + } + + void SetResourceIgnore(const TString& type_url) { + grpc_core::MutexLock lock(&ads_mu_); + resource_types_to_ignore_.emplace(type_url); + } + + void UnsetResource(const TString& type_url, const TString& name) { + grpc_core::MutexLock lock(&ads_mu_); + ResourceState& state = resource_map_[type_url][name]; + ++state.version; + state.resource.reset(); + gpr_log(GPR_INFO, "ADS[%p]: Unsetting %s resource %s to version %u", this, + type_url.c_str(), name.c_str(), state.version); + for (SubscriptionState* subscription : state.subscriptions) { + subscription->update_queue->emplace_back(type_url, name); + } + } + + void SetResource(google::protobuf::Any resource, const TString& type_url, + const TString& name) { + grpc_core::MutexLock lock(&ads_mu_); + ResourceState& state = resource_map_[type_url][name]; + ++state.version; + state.resource = std::move(resource); + gpr_log(GPR_INFO, "ADS[%p]: Updating %s resource %s to version %u", this, + type_url.c_str(), name.c_str(), state.version); + for (SubscriptionState* subscription : state.subscriptions) { + subscription->update_queue->emplace_back(type_url, name); + } + } + + void SetLdsResource(const Listener& listener) { + google::protobuf::Any resource; + resource.PackFrom(listener); + SetResource(std::move(resource), kLdsTypeUrl, listener.name()); + } + + void SetRdsResource(const RouteConfiguration& route) { + google::protobuf::Any resource; + resource.PackFrom(route); + SetResource(std::move(resource), kRdsTypeUrl, route.name()); + } + + void SetCdsResource(const Cluster& cluster) { + google::protobuf::Any resource; + resource.PackFrom(cluster); + SetResource(std::move(resource), kCdsTypeUrl, cluster.name()); + } + + void SetEdsResource(const ClusterLoadAssignment& assignment) { + google::protobuf::Any resource; + resource.PackFrom(assignment); + SetResource(std::move(resource), kEdsTypeUrl, assignment.cluster_name()); + } + + void SetLdsToUseDynamicRds() { + auto listener = default_listener_; + HttpConnectionManager http_connection_manager; + auto* rds = http_connection_manager.mutable_rds(); + rds->set_route_config_name(kDefaultRouteConfigurationName); + rds->mutable_config_source()->mutable_ads(); + listener.mutable_api_listener()->mutable_api_listener()->PackFrom( + http_connection_manager); + SetLdsResource(listener); + } + + static Listener BuildListener(const RouteConfiguration& route_config) { + HttpConnectionManager http_connection_manager; + *(http_connection_manager.mutable_route_config()) = route_config; + Listener listener; + listener.set_name(kServerName); + listener.mutable_api_listener()->mutable_api_listener()->PackFrom( + http_connection_manager); + return listener; + } + + static ClusterLoadAssignment BuildEdsResource( + const EdsResourceArgs& args, + const char* eds_service_name = kDefaultEdsServiceName) { ClusterLoadAssignment assignment; - assignment.set_cluster_name(eds_service_name); + assignment.set_cluster_name(eds_service_name); for (const auto& locality : args.locality_list) { auto* endpoints = assignment.add_endpoints(); endpoints->mutable_load_balancing_weight()->set_value(locality.lb_weight); @@ -624,7 +624,7 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> { const int& port = locality.ports[i]; auto* lb_endpoints = endpoints->add_lb_endpoints(); if (locality.health_statuses.size() > i && - locality.health_statuses[i] != HealthStatus::UNKNOWN) { + locality.health_statuses[i] != HealthStatus::UNKNOWN) { lb_endpoints->set_health_status(locality.health_statuses[i]); } auto* endpoint = lb_endpoints->mutable_endpoint(); @@ -637,7 +637,7 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> { if (!args.drop_categories.empty()) { auto* policy = assignment.mutable_policy(); for (const auto& p : args.drop_categories) { - const TString& name = p.first; + const TString& name = p.first; const uint32_t parts_per_million = p.second; auto* drop_overload = policy->add_drop_overloads(); drop_overload->set_category(name); @@ -646,23 +646,23 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> { drop_percentage->set_denominator(args.drop_denominator); } } - return assignment; - } - - void Start() { - grpc_core::MutexLock lock(&ads_mu_); - ads_done_ = false; - } - - void Shutdown() { - { - grpc_core::MutexLock lock(&ads_mu_); - NotifyDoneWithAdsCallLocked(); - resource_type_response_state_.clear(); - } - gpr_log(GPR_INFO, "ADS[%p]: shut down", this); - } - + return assignment; + } + + void Start() { + grpc_core::MutexLock lock(&ads_mu_); + ads_done_ = false; + } + + void Shutdown() { + { + grpc_core::MutexLock lock(&ads_mu_); + NotifyDoneWithAdsCallLocked(); + resource_type_response_state_.clear(); + } + gpr_log(GPR_INFO, "ADS[%p]: shut down", this); + } + void NotifyDoneWithAdsCall() { grpc_core::MutexLock lock(&ads_mu_); NotifyDoneWithAdsCallLocked(); @@ -675,509 +675,509 @@ class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> { } } - std::set<TString> clients() { - grpc_core::MutexLock lock(&clients_mu_); - return clients_; - } - + std::set<TString> clients() { + grpc_core::MutexLock lock(&clients_mu_); + return clients_; + } + private: - // A queue of resource type/name pairs that have changed since the client - // subscribed to them. - using UpdateQueue = std::deque< - std::pair<TString /* type url */, TString /* resource name */>>; - - // A struct representing a client's subscription to a particular resource. - struct SubscriptionState { - // Version that the client currently knows about. - int current_version = 0; - // The queue upon which to place updates when the resource is updated. - UpdateQueue* update_queue; - }; - - // A struct representing the a client's subscription to all the resources. - using SubscriptionNameMap = - std::map<TString /* resource_name */, SubscriptionState>; - using SubscriptionMap = - std::map<TString /* type_url */, SubscriptionNameMap>; - - // A struct representing the current state for a resource: - // - the version of the resource that is set by the SetResource() methods. - // - a list of subscriptions interested in this resource. - struct ResourceState { - int version = 0; - y_absl::optional<google::protobuf::Any> resource; - std::set<SubscriptionState*> subscriptions; - }; - - // A struct representing the current state for all resources: - // LDS, CDS, EDS, and RDS for the class as a whole. - using ResourceNameMap = - std::map<TString /* resource_name */, ResourceState>; - using ResourceMap = std::map<TString /* type_url */, ResourceNameMap>; - - template <class RpcApi, class DiscoveryRequest, class DiscoveryResponse> - class RpcService : public RpcApi::Service { - public: - using Stream = ServerReaderWriter<DiscoveryResponse, DiscoveryRequest>; - - RpcService(AdsServiceImpl* parent, bool is_v2) - : parent_(parent), is_v2_(is_v2) {} - - Status StreamAggregatedResources(ServerContext* context, - Stream* stream) override { - gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources starts", this); - parent_->AddClient(context->peer()); - if (is_v2_) { - parent_->seen_v2_client_ = true; - } else { - parent_->seen_v3_client_ = true; - } - // Resources (type/name pairs) that have changed since the client - // subscribed to them. - UpdateQueue update_queue; - // Resources that the client will be subscribed to keyed by resource type - // url. - SubscriptionMap subscription_map; - [&]() { - { - grpc_core::MutexLock lock(&parent_->ads_mu_); - if (parent_->ads_done_) return; - } - // Balancer shouldn't receive the call credentials metadata. - EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey), - context->client_metadata().end()); - // Current Version map keyed by resource type url. - std::map<TString, int> resource_type_version; - // Creating blocking thread to read from stream. - std::deque<DiscoveryRequest> requests; - bool stream_closed = false; - // Take a reference of the AdsServiceImpl object, reference will go - // out of scope after the reader thread is joined. - std::shared_ptr<AdsServiceImpl> ads_service_impl = - parent_->shared_from_this(); - std::thread reader(std::bind(&RpcService::BlockingRead, this, stream, - &requests, &stream_closed)); - // Main loop to look for requests and updates. - while (true) { - // Look for new requests and and decide what to handle. - y_absl::optional<DiscoveryResponse> response; - // Boolean to keep track if the loop received any work to do: a - // request or an update; regardless whether a response was actually - // sent out. - bool did_work = false; - { - grpc_core::MutexLock lock(&parent_->ads_mu_); - if (stream_closed) break; - if (!requests.empty()) { - DiscoveryRequest request = std::move(requests.front()); - requests.pop_front(); - did_work = true; - gpr_log(GPR_INFO, - "ADS[%p]: Received request for type %s with content %s", - this, request.type_url().c_str(), - request.DebugString().c_str()); - const TString v3_resource_type = - TypeUrlToV3(request.type_url()); - // As long as we are not in shutdown, identify ACK and NACK by - // looking for version information and comparing it to nonce (this - // server ensures they are always set to the same in a response.) - auto it = - parent_->resource_type_response_state_.find(v3_resource_type); - if (it != parent_->resource_type_response_state_.end()) { - if (!request.response_nonce().empty()) { - it->second.state = - (!request.version_info().empty() && - request.version_info() == request.response_nonce()) - ? ResponseState::ACKED - : ResponseState::NACKED; - } - if (request.has_error_detail()) { - it->second.error_message = request.error_detail().message(); - } - } - // As long as the test did not tell us to ignore this type of - // request, look at all the resource names. - if (parent_->resource_types_to_ignore_.find(v3_resource_type) == - parent_->resource_types_to_ignore_.end()) { - auto& subscription_name_map = - subscription_map[v3_resource_type]; - auto& resource_name_map = - parent_->resource_map_[v3_resource_type]; - std::set<TString> resources_in_current_request; - std::set<TString> resources_added_to_response; - for (const TString& resource_name : - request.resource_names()) { - resources_in_current_request.emplace(resource_name); - auto& subscription_state = - subscription_name_map[resource_name]; - auto& resource_state = resource_name_map[resource_name]; - // Subscribe if needed. - parent_->MaybeSubscribe(v3_resource_type, resource_name, - &subscription_state, &resource_state, - &update_queue); - // Send update if needed. - if (ClientNeedsResourceUpdate(resource_state, - &subscription_state)) { - gpr_log(GPR_INFO, - "ADS[%p]: Sending update for type=%s name=%s " - "version=%d", - this, request.type_url().c_str(), - resource_name.c_str(), resource_state.version); - resources_added_to_response.emplace(resource_name); - if (!response.has_value()) response.emplace(); - if (resource_state.resource.has_value()) { - auto* resource = response->add_resources(); - resource->CopyFrom(resource_state.resource.value()); - if (is_v2_) { - resource->set_type_url(request.type_url()); - } - } - } else { - gpr_log(GPR_INFO, - "ADS[%p]: client does not need update for " - "type=%s name=%s version=%d", - this, request.type_url().c_str(), - resource_name.c_str(), resource_state.version); - } - } - // Process unsubscriptions for any resource no longer - // present in the request's resource list. - parent_->ProcessUnsubscriptions( - v3_resource_type, resources_in_current_request, - &subscription_name_map, &resource_name_map); - // Send response if needed. - if (!resources_added_to_response.empty()) { - CompleteBuildingDiscoveryResponse( - v3_resource_type, request.type_url(), - ++resource_type_version[v3_resource_type], - subscription_name_map, resources_added_to_response, - &response.value()); - } - } - } - } - if (response.has_value()) { - gpr_log(GPR_INFO, "ADS[%p]: Sending response: %s", this, - response->DebugString().c_str()); - stream->Write(response.value()); - } - response.reset(); - // Look for updates and decide what to handle. - { - grpc_core::MutexLock lock(&parent_->ads_mu_); - if (!update_queue.empty()) { - const TString resource_type = - std::move(update_queue.front().first); - const TString resource_name = - std::move(update_queue.front().second); - update_queue.pop_front(); - const TString v2_resource_type = TypeUrlToV2(resource_type); - did_work = true; - gpr_log(GPR_INFO, "ADS[%p]: Received update for type=%s name=%s", - this, resource_type.c_str(), resource_name.c_str()); - auto& subscription_name_map = subscription_map[resource_type]; - auto& resource_name_map = parent_->resource_map_[resource_type]; - auto it = subscription_name_map.find(resource_name); - if (it != subscription_name_map.end()) { - SubscriptionState& subscription_state = it->second; - ResourceState& resource_state = - resource_name_map[resource_name]; - if (ClientNeedsResourceUpdate(resource_state, - &subscription_state)) { - gpr_log( - GPR_INFO, - "ADS[%p]: Sending update for type=%s name=%s version=%d", - this, resource_type.c_str(), resource_name.c_str(), - resource_state.version); - response.emplace(); - if (resource_state.resource.has_value()) { - auto* resource = response->add_resources(); - resource->CopyFrom(resource_state.resource.value()); - if (is_v2_) { - resource->set_type_url(v2_resource_type); - } - } - CompleteBuildingDiscoveryResponse( - resource_type, v2_resource_type, - ++resource_type_version[resource_type], - subscription_name_map, {resource_name}, - &response.value()); - } - } - } - } - if (response.has_value()) { - gpr_log(GPR_INFO, "ADS[%p]: Sending update response: %s", this, - response->DebugString().c_str()); - stream->Write(response.value()); - } - // If we didn't find anything to do, delay before the next loop - // iteration; otherwise, check whether we should exit and then - // immediately continue. - gpr_timespec deadline = - grpc_timeout_milliseconds_to_deadline(did_work ? 0 : 10); - { - grpc_core::MutexLock lock(&parent_->ads_mu_); - if (!parent_->ads_cond_.WaitUntil( - &parent_->ads_mu_, [this] { return parent_->ads_done_; }, - deadline)) { - break; - } - } - } - reader.join(); - }(); - // Clean up any subscriptions that were still active when the call - // finished. - { - grpc_core::MutexLock lock(&parent_->ads_mu_); - for (auto& p : subscription_map) { - const TString& type_url = p.first; - SubscriptionNameMap& subscription_name_map = p.second; - for (auto& q : subscription_name_map) { - const TString& resource_name = q.first; - SubscriptionState& subscription_state = q.second; - ResourceState& resource_state = - parent_->resource_map_[type_url][resource_name]; - resource_state.subscriptions.erase(&subscription_state); - } - } - } - gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources done", this); - parent_->RemoveClient(context->peer()); - return Status::OK; - } - - private: - static TString TypeUrlToV2(const TString& resource_type) { - if (resource_type == kLdsTypeUrl) return kLdsV2TypeUrl; - if (resource_type == kRdsTypeUrl) return kRdsV2TypeUrl; - if (resource_type == kCdsTypeUrl) return kCdsV2TypeUrl; - if (resource_type == kEdsTypeUrl) return kEdsV2TypeUrl; - return resource_type; - } - - static TString TypeUrlToV3(const TString& resource_type) { - if (resource_type == kLdsV2TypeUrl) return kLdsTypeUrl; - if (resource_type == kRdsV2TypeUrl) return kRdsTypeUrl; - if (resource_type == kCdsV2TypeUrl) return kCdsTypeUrl; - if (resource_type == kEdsV2TypeUrl) return kEdsTypeUrl; - return resource_type; - } - - // Starting a thread to do blocking read on the stream until cancel. - void BlockingRead(Stream* stream, std::deque<DiscoveryRequest>* requests, - bool* stream_closed) { - DiscoveryRequest request; - bool seen_first_request = false; - while (stream->Read(&request)) { - if (!seen_first_request) { - EXPECT_TRUE(request.has_node()); - ASSERT_FALSE(request.node().client_features().empty()); - EXPECT_EQ(request.node().client_features(0), - "envoy.lb.does_not_support_overprovisioning"); - CheckBuildVersion(request); - seen_first_request = true; - } - { - grpc_core::MutexLock lock(&parent_->ads_mu_); - requests->emplace_back(std::move(request)); - } - } - gpr_log(GPR_INFO, "ADS[%p]: Null read, stream closed", this); - grpc_core::MutexLock lock(&parent_->ads_mu_); - *stream_closed = true; - } - - static void CheckBuildVersion( - const ::envoy::api::v2::DiscoveryRequest& request) { - EXPECT_FALSE(request.node().build_version().empty()); - } - - static void CheckBuildVersion( - const ::envoy::service::discovery::v3::DiscoveryRequest& request) {} - - // Completing the building a DiscoveryResponse by adding common information - // for all resources and by adding all subscribed resources for LDS and CDS. - void CompleteBuildingDiscoveryResponse( - const TString& resource_type, const TString& v2_resource_type, - const int version, const SubscriptionNameMap& subscription_name_map, - const std::set<TString>& resources_added_to_response, - DiscoveryResponse* response) { - auto& response_state = - parent_->resource_type_response_state_[resource_type]; - if (response_state.state == ResponseState::NOT_SENT) { - response_state.state = ResponseState::SENT; - } - response->set_type_url(is_v2_ ? v2_resource_type : resource_type); - response->set_version_info(y_absl::StrCat(version)); - response->set_nonce(y_absl::StrCat(version)); - if (resource_type == kLdsTypeUrl || resource_type == kCdsTypeUrl) { - // For LDS and CDS we must send back all subscribed resources - // (even the unchanged ones) - for (const auto& p : subscription_name_map) { - const TString& resource_name = p.first; - if (resources_added_to_response.find(resource_name) == - resources_added_to_response.end()) { - const ResourceState& resource_state = - parent_->resource_map_[resource_type][resource_name]; - if (resource_state.resource.has_value()) { - auto* resource = response->add_resources(); - resource->CopyFrom(resource_state.resource.value()); - if (is_v2_) { - resource->set_type_url(v2_resource_type); - } - } - } - } - } - } - - AdsServiceImpl* parent_; - const bool is_v2_; - }; - - // Checks whether the client needs to receive a newer version of - // the resource. If so, updates subscription_state->current_version and - // returns true. - static bool ClientNeedsResourceUpdate(const ResourceState& resource_state, - SubscriptionState* subscription_state) { - if (subscription_state->current_version < resource_state.version) { - subscription_state->current_version = resource_state.version; - return true; - } - return false; - } - - // Subscribes to a resource if not already subscribed: - // 1. Sets the update_queue field in subscription_state. - // 2. Adds subscription_state to resource_state->subscriptions. - void MaybeSubscribe(const TString& resource_type, - const TString& resource_name, - SubscriptionState* subscription_state, - ResourceState* resource_state, - UpdateQueue* update_queue) { - // The update_queue will be null if we were not previously subscribed. - if (subscription_state->update_queue != nullptr) return; - subscription_state->update_queue = update_queue; - resource_state->subscriptions.emplace(subscription_state); - gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p", - this, resource_type.c_str(), resource_name.c_str(), - &subscription_state); - } - - // Removes subscriptions for resources no longer present in the - // current request. - void ProcessUnsubscriptions( - const TString& resource_type, - const std::set<TString>& resources_in_current_request, - SubscriptionNameMap* subscription_name_map, - ResourceNameMap* resource_name_map) { - for (auto it = subscription_name_map->begin(); - it != subscription_name_map->end();) { - const TString& resource_name = it->first; - SubscriptionState& subscription_state = it->second; - if (resources_in_current_request.find(resource_name) != - resources_in_current_request.end()) { - ++it; - continue; - } - gpr_log(GPR_INFO, "ADS[%p]: Unsubscribe to type=%s name=%s state=%p", - this, resource_type.c_str(), resource_name.c_str(), - &subscription_state); - auto resource_it = resource_name_map->find(resource_name); - GPR_ASSERT(resource_it != resource_name_map->end()); - auto& resource_state = resource_it->second; - resource_state.subscriptions.erase(&subscription_state); - if (resource_state.subscriptions.empty() && - !resource_state.resource.has_value()) { - resource_name_map->erase(resource_it); - } - it = subscription_name_map->erase(it); - } - } - - void AddClient(const TString& client) { - grpc_core::MutexLock lock(&clients_mu_); - clients_.insert(client); - } - - void RemoveClient(const TString& client) { - grpc_core::MutexLock lock(&clients_mu_); - clients_.erase(client); - } - - RpcService<::envoy::service::discovery::v2::AggregatedDiscoveryService, - ::envoy::api::v2::DiscoveryRequest, - ::envoy::api::v2::DiscoveryResponse> - v2_rpc_service_; - RpcService<::envoy::service::discovery::v3::AggregatedDiscoveryService, - ::envoy::service::discovery::v3::DiscoveryRequest, - ::envoy::service::discovery::v3::DiscoveryResponse> - v3_rpc_service_; - - std::atomic_bool seen_v2_client_{false}; - std::atomic_bool seen_v3_client_{false}; - + // A queue of resource type/name pairs that have changed since the client + // subscribed to them. + using UpdateQueue = std::deque< + std::pair<TString /* type url */, TString /* resource name */>>; + + // A struct representing a client's subscription to a particular resource. + struct SubscriptionState { + // Version that the client currently knows about. + int current_version = 0; + // The queue upon which to place updates when the resource is updated. + UpdateQueue* update_queue; + }; + + // A struct representing the a client's subscription to all the resources. + using SubscriptionNameMap = + std::map<TString /* resource_name */, SubscriptionState>; + using SubscriptionMap = + std::map<TString /* type_url */, SubscriptionNameMap>; + + // A struct representing the current state for a resource: + // - the version of the resource that is set by the SetResource() methods. + // - a list of subscriptions interested in this resource. + struct ResourceState { + int version = 0; + y_absl::optional<google::protobuf::Any> resource; + std::set<SubscriptionState*> subscriptions; + }; + + // A struct representing the current state for all resources: + // LDS, CDS, EDS, and RDS for the class as a whole. + using ResourceNameMap = + std::map<TString /* resource_name */, ResourceState>; + using ResourceMap = std::map<TString /* type_url */, ResourceNameMap>; + + template <class RpcApi, class DiscoveryRequest, class DiscoveryResponse> + class RpcService : public RpcApi::Service { + public: + using Stream = ServerReaderWriter<DiscoveryResponse, DiscoveryRequest>; + + RpcService(AdsServiceImpl* parent, bool is_v2) + : parent_(parent), is_v2_(is_v2) {} + + Status StreamAggregatedResources(ServerContext* context, + Stream* stream) override { + gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources starts", this); + parent_->AddClient(context->peer()); + if (is_v2_) { + parent_->seen_v2_client_ = true; + } else { + parent_->seen_v3_client_ = true; + } + // Resources (type/name pairs) that have changed since the client + // subscribed to them. + UpdateQueue update_queue; + // Resources that the client will be subscribed to keyed by resource type + // url. + SubscriptionMap subscription_map; + [&]() { + { + grpc_core::MutexLock lock(&parent_->ads_mu_); + if (parent_->ads_done_) return; + } + // Balancer shouldn't receive the call credentials metadata. + EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey), + context->client_metadata().end()); + // Current Version map keyed by resource type url. + std::map<TString, int> resource_type_version; + // Creating blocking thread to read from stream. + std::deque<DiscoveryRequest> requests; + bool stream_closed = false; + // Take a reference of the AdsServiceImpl object, reference will go + // out of scope after the reader thread is joined. + std::shared_ptr<AdsServiceImpl> ads_service_impl = + parent_->shared_from_this(); + std::thread reader(std::bind(&RpcService::BlockingRead, this, stream, + &requests, &stream_closed)); + // Main loop to look for requests and updates. + while (true) { + // Look for new requests and and decide what to handle. + y_absl::optional<DiscoveryResponse> response; + // Boolean to keep track if the loop received any work to do: a + // request or an update; regardless whether a response was actually + // sent out. + bool did_work = false; + { + grpc_core::MutexLock lock(&parent_->ads_mu_); + if (stream_closed) break; + if (!requests.empty()) { + DiscoveryRequest request = std::move(requests.front()); + requests.pop_front(); + did_work = true; + gpr_log(GPR_INFO, + "ADS[%p]: Received request for type %s with content %s", + this, request.type_url().c_str(), + request.DebugString().c_str()); + const TString v3_resource_type = + TypeUrlToV3(request.type_url()); + // As long as we are not in shutdown, identify ACK and NACK by + // looking for version information and comparing it to nonce (this + // server ensures they are always set to the same in a response.) + auto it = + parent_->resource_type_response_state_.find(v3_resource_type); + if (it != parent_->resource_type_response_state_.end()) { + if (!request.response_nonce().empty()) { + it->second.state = + (!request.version_info().empty() && + request.version_info() == request.response_nonce()) + ? ResponseState::ACKED + : ResponseState::NACKED; + } + if (request.has_error_detail()) { + it->second.error_message = request.error_detail().message(); + } + } + // As long as the test did not tell us to ignore this type of + // request, look at all the resource names. + if (parent_->resource_types_to_ignore_.find(v3_resource_type) == + parent_->resource_types_to_ignore_.end()) { + auto& subscription_name_map = + subscription_map[v3_resource_type]; + auto& resource_name_map = + parent_->resource_map_[v3_resource_type]; + std::set<TString> resources_in_current_request; + std::set<TString> resources_added_to_response; + for (const TString& resource_name : + request.resource_names()) { + resources_in_current_request.emplace(resource_name); + auto& subscription_state = + subscription_name_map[resource_name]; + auto& resource_state = resource_name_map[resource_name]; + // Subscribe if needed. + parent_->MaybeSubscribe(v3_resource_type, resource_name, + &subscription_state, &resource_state, + &update_queue); + // Send update if needed. + if (ClientNeedsResourceUpdate(resource_state, + &subscription_state)) { + gpr_log(GPR_INFO, + "ADS[%p]: Sending update for type=%s name=%s " + "version=%d", + this, request.type_url().c_str(), + resource_name.c_str(), resource_state.version); + resources_added_to_response.emplace(resource_name); + if (!response.has_value()) response.emplace(); + if (resource_state.resource.has_value()) { + auto* resource = response->add_resources(); + resource->CopyFrom(resource_state.resource.value()); + if (is_v2_) { + resource->set_type_url(request.type_url()); + } + } + } else { + gpr_log(GPR_INFO, + "ADS[%p]: client does not need update for " + "type=%s name=%s version=%d", + this, request.type_url().c_str(), + resource_name.c_str(), resource_state.version); + } + } + // Process unsubscriptions for any resource no longer + // present in the request's resource list. + parent_->ProcessUnsubscriptions( + v3_resource_type, resources_in_current_request, + &subscription_name_map, &resource_name_map); + // Send response if needed. + if (!resources_added_to_response.empty()) { + CompleteBuildingDiscoveryResponse( + v3_resource_type, request.type_url(), + ++resource_type_version[v3_resource_type], + subscription_name_map, resources_added_to_response, + &response.value()); + } + } + } + } + if (response.has_value()) { + gpr_log(GPR_INFO, "ADS[%p]: Sending response: %s", this, + response->DebugString().c_str()); + stream->Write(response.value()); + } + response.reset(); + // Look for updates and decide what to handle. + { + grpc_core::MutexLock lock(&parent_->ads_mu_); + if (!update_queue.empty()) { + const TString resource_type = + std::move(update_queue.front().first); + const TString resource_name = + std::move(update_queue.front().second); + update_queue.pop_front(); + const TString v2_resource_type = TypeUrlToV2(resource_type); + did_work = true; + gpr_log(GPR_INFO, "ADS[%p]: Received update for type=%s name=%s", + this, resource_type.c_str(), resource_name.c_str()); + auto& subscription_name_map = subscription_map[resource_type]; + auto& resource_name_map = parent_->resource_map_[resource_type]; + auto it = subscription_name_map.find(resource_name); + if (it != subscription_name_map.end()) { + SubscriptionState& subscription_state = it->second; + ResourceState& resource_state = + resource_name_map[resource_name]; + if (ClientNeedsResourceUpdate(resource_state, + &subscription_state)) { + gpr_log( + GPR_INFO, + "ADS[%p]: Sending update for type=%s name=%s version=%d", + this, resource_type.c_str(), resource_name.c_str(), + resource_state.version); + response.emplace(); + if (resource_state.resource.has_value()) { + auto* resource = response->add_resources(); + resource->CopyFrom(resource_state.resource.value()); + if (is_v2_) { + resource->set_type_url(v2_resource_type); + } + } + CompleteBuildingDiscoveryResponse( + resource_type, v2_resource_type, + ++resource_type_version[resource_type], + subscription_name_map, {resource_name}, + &response.value()); + } + } + } + } + if (response.has_value()) { + gpr_log(GPR_INFO, "ADS[%p]: Sending update response: %s", this, + response->DebugString().c_str()); + stream->Write(response.value()); + } + // If we didn't find anything to do, delay before the next loop + // iteration; otherwise, check whether we should exit and then + // immediately continue. + gpr_timespec deadline = + grpc_timeout_milliseconds_to_deadline(did_work ? 0 : 10); + { + grpc_core::MutexLock lock(&parent_->ads_mu_); + if (!parent_->ads_cond_.WaitUntil( + &parent_->ads_mu_, [this] { return parent_->ads_done_; }, + deadline)) { + break; + } + } + } + reader.join(); + }(); + // Clean up any subscriptions that were still active when the call + // finished. + { + grpc_core::MutexLock lock(&parent_->ads_mu_); + for (auto& p : subscription_map) { + const TString& type_url = p.first; + SubscriptionNameMap& subscription_name_map = p.second; + for (auto& q : subscription_name_map) { + const TString& resource_name = q.first; + SubscriptionState& subscription_state = q.second; + ResourceState& resource_state = + parent_->resource_map_[type_url][resource_name]; + resource_state.subscriptions.erase(&subscription_state); + } + } + } + gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources done", this); + parent_->RemoveClient(context->peer()); + return Status::OK; + } + + private: + static TString TypeUrlToV2(const TString& resource_type) { + if (resource_type == kLdsTypeUrl) return kLdsV2TypeUrl; + if (resource_type == kRdsTypeUrl) return kRdsV2TypeUrl; + if (resource_type == kCdsTypeUrl) return kCdsV2TypeUrl; + if (resource_type == kEdsTypeUrl) return kEdsV2TypeUrl; + return resource_type; + } + + static TString TypeUrlToV3(const TString& resource_type) { + if (resource_type == kLdsV2TypeUrl) return kLdsTypeUrl; + if (resource_type == kRdsV2TypeUrl) return kRdsTypeUrl; + if (resource_type == kCdsV2TypeUrl) return kCdsTypeUrl; + if (resource_type == kEdsV2TypeUrl) return kEdsTypeUrl; + return resource_type; + } + + // Starting a thread to do blocking read on the stream until cancel. + void BlockingRead(Stream* stream, std::deque<DiscoveryRequest>* requests, + bool* stream_closed) { + DiscoveryRequest request; + bool seen_first_request = false; + while (stream->Read(&request)) { + if (!seen_first_request) { + EXPECT_TRUE(request.has_node()); + ASSERT_FALSE(request.node().client_features().empty()); + EXPECT_EQ(request.node().client_features(0), + "envoy.lb.does_not_support_overprovisioning"); + CheckBuildVersion(request); + seen_first_request = true; + } + { + grpc_core::MutexLock lock(&parent_->ads_mu_); + requests->emplace_back(std::move(request)); + } + } + gpr_log(GPR_INFO, "ADS[%p]: Null read, stream closed", this); + grpc_core::MutexLock lock(&parent_->ads_mu_); + *stream_closed = true; + } + + static void CheckBuildVersion( + const ::envoy::api::v2::DiscoveryRequest& request) { + EXPECT_FALSE(request.node().build_version().empty()); + } + + static void CheckBuildVersion( + const ::envoy::service::discovery::v3::DiscoveryRequest& request) {} + + // Completing the building a DiscoveryResponse by adding common information + // for all resources and by adding all subscribed resources for LDS and CDS. + void CompleteBuildingDiscoveryResponse( + const TString& resource_type, const TString& v2_resource_type, + const int version, const SubscriptionNameMap& subscription_name_map, + const std::set<TString>& resources_added_to_response, + DiscoveryResponse* response) { + auto& response_state = + parent_->resource_type_response_state_[resource_type]; + if (response_state.state == ResponseState::NOT_SENT) { + response_state.state = ResponseState::SENT; + } + response->set_type_url(is_v2_ ? v2_resource_type : resource_type); + response->set_version_info(y_absl::StrCat(version)); + response->set_nonce(y_absl::StrCat(version)); + if (resource_type == kLdsTypeUrl || resource_type == kCdsTypeUrl) { + // For LDS and CDS we must send back all subscribed resources + // (even the unchanged ones) + for (const auto& p : subscription_name_map) { + const TString& resource_name = p.first; + if (resources_added_to_response.find(resource_name) == + resources_added_to_response.end()) { + const ResourceState& resource_state = + parent_->resource_map_[resource_type][resource_name]; + if (resource_state.resource.has_value()) { + auto* resource = response->add_resources(); + resource->CopyFrom(resource_state.resource.value()); + if (is_v2_) { + resource->set_type_url(v2_resource_type); + } + } + } + } + } + } + + AdsServiceImpl* parent_; + const bool is_v2_; + }; + + // Checks whether the client needs to receive a newer version of + // the resource. If so, updates subscription_state->current_version and + // returns true. + static bool ClientNeedsResourceUpdate(const ResourceState& resource_state, + SubscriptionState* subscription_state) { + if (subscription_state->current_version < resource_state.version) { + subscription_state->current_version = resource_state.version; + return true; + } + return false; + } + + // Subscribes to a resource if not already subscribed: + // 1. Sets the update_queue field in subscription_state. + // 2. Adds subscription_state to resource_state->subscriptions. + void MaybeSubscribe(const TString& resource_type, + const TString& resource_name, + SubscriptionState* subscription_state, + ResourceState* resource_state, + UpdateQueue* update_queue) { + // The update_queue will be null if we were not previously subscribed. + if (subscription_state->update_queue != nullptr) return; + subscription_state->update_queue = update_queue; + resource_state->subscriptions.emplace(subscription_state); + gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p", + this, resource_type.c_str(), resource_name.c_str(), + &subscription_state); + } + + // Removes subscriptions for resources no longer present in the + // current request. + void ProcessUnsubscriptions( + const TString& resource_type, + const std::set<TString>& resources_in_current_request, + SubscriptionNameMap* subscription_name_map, + ResourceNameMap* resource_name_map) { + for (auto it = subscription_name_map->begin(); + it != subscription_name_map->end();) { + const TString& resource_name = it->first; + SubscriptionState& subscription_state = it->second; + if (resources_in_current_request.find(resource_name) != + resources_in_current_request.end()) { + ++it; + continue; + } + gpr_log(GPR_INFO, "ADS[%p]: Unsubscribe to type=%s name=%s state=%p", + this, resource_type.c_str(), resource_name.c_str(), + &subscription_state); + auto resource_it = resource_name_map->find(resource_name); + GPR_ASSERT(resource_it != resource_name_map->end()); + auto& resource_state = resource_it->second; + resource_state.subscriptions.erase(&subscription_state); + if (resource_state.subscriptions.empty() && + !resource_state.resource.has_value()) { + resource_name_map->erase(resource_it); + } + it = subscription_name_map->erase(it); + } + } + + void AddClient(const TString& client) { + grpc_core::MutexLock lock(&clients_mu_); + clients_.insert(client); + } + + void RemoveClient(const TString& client) { + grpc_core::MutexLock lock(&clients_mu_); + clients_.erase(client); + } + + RpcService<::envoy::service::discovery::v2::AggregatedDiscoveryService, + ::envoy::api::v2::DiscoveryRequest, + ::envoy::api::v2::DiscoveryResponse> + v2_rpc_service_; + RpcService<::envoy::service::discovery::v3::AggregatedDiscoveryService, + ::envoy::service::discovery::v3::DiscoveryRequest, + ::envoy::service::discovery::v3::DiscoveryResponse> + v3_rpc_service_; + + std::atomic_bool seen_v2_client_{false}; + std::atomic_bool seen_v3_client_{false}; + grpc_core::CondVar ads_cond_; // Protect the members below. grpc_core::Mutex ads_mu_; bool ads_done_ = false; - Listener default_listener_; - RouteConfiguration default_route_config_; - Cluster default_cluster_; - std::map<TString /* type_url */, ResponseState> - resource_type_response_state_; - std::set<TString /*resource_type*/> resource_types_to_ignore_; - // An instance data member containing the current state of all resources. - // Note that an entry will exist whenever either of the following is true: - // - The resource exists (i.e., has been created by SetResource() and has not - // yet been destroyed by UnsetResource()). - // - There is at least one subscription for the resource. - ResourceMap resource_map_; - - grpc_core::Mutex clients_mu_; - std::set<TString> clients_; + Listener default_listener_; + RouteConfiguration default_route_config_; + Cluster default_cluster_; + std::map<TString /* type_url */, ResponseState> + resource_type_response_state_; + std::set<TString /*resource_type*/> resource_types_to_ignore_; + // An instance data member containing the current state of all resources. + // Note that an entry will exist whenever either of the following is true: + // - The resource exists (i.e., has been created by SetResource() and has not + // yet been destroyed by UnsetResource()). + // - There is at least one subscription for the resource. + ResourceMap resource_map_; + + grpc_core::Mutex clients_mu_; + std::set<TString> clients_; }; -class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> { +class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> { public: explicit LrsServiceImpl(int client_load_reporting_interval_seconds) - : v2_rpc_service_(this), - v3_rpc_service_(this), - client_load_reporting_interval_seconds_( - client_load_reporting_interval_seconds), - cluster_names_({kDefaultClusterName}) {} - - ::envoy::service::load_stats::v2::LoadReportingService::Service* - v2_rpc_service() { - return &v2_rpc_service_; - } - - ::envoy::service::load_stats::v3::LoadReportingService::Service* - v3_rpc_service() { - return &v3_rpc_service_; - } - - size_t request_count() { - return v2_rpc_service_.request_count() + v3_rpc_service_.request_count(); - } - - size_t response_count() { - return v2_rpc_service_.response_count() + v3_rpc_service_.response_count(); - } - - // Must be called before the LRS call is started. - void set_send_all_clusters(bool send_all_clusters) { - send_all_clusters_ = send_all_clusters; - } - void set_cluster_names(const std::set<TString>& cluster_names) { - cluster_names_ = cluster_names; - } - + : v2_rpc_service_(this), + v3_rpc_service_(this), + client_load_reporting_interval_seconds_( + client_load_reporting_interval_seconds), + cluster_names_({kDefaultClusterName}) {} + + ::envoy::service::load_stats::v2::LoadReportingService::Service* + v2_rpc_service() { + return &v2_rpc_service_; + } + + ::envoy::service::load_stats::v3::LoadReportingService::Service* + v3_rpc_service() { + return &v3_rpc_service_; + } + + size_t request_count() { + return v2_rpc_service_.request_count() + v3_rpc_service_.request_count(); + } + + size_t response_count() { + return v2_rpc_service_.response_count() + v3_rpc_service_.response_count(); + } + + // Must be called before the LRS call is started. + void set_send_all_clusters(bool send_all_clusters) { + send_all_clusters_ = send_all_clusters; + } + void set_cluster_names(const std::set<TString>& cluster_names) { + cluster_names_ = cluster_names; + } + void Start() { - lrs_done_ = false; - result_queue_.clear(); + lrs_done_ = false; + result_queue_.clear(); } void Shutdown() { @@ -1188,18 +1188,18 @@ class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> { gpr_log(GPR_INFO, "LRS[%p]: shut down", this); } - std::vector<ClientStats> WaitForLoadReport() { + std::vector<ClientStats> WaitForLoadReport() { grpc_core::MutexLock lock(&load_report_mu_); - grpc_core::CondVar cv; - if (result_queue_.empty()) { - load_report_cond_ = &cv; - load_report_cond_->WaitUntil(&load_report_mu_, - [this] { return !result_queue_.empty(); }); - load_report_cond_ = nullptr; - } - std::vector<ClientStats> result = std::move(result_queue_.front()); - result_queue_.pop_front(); - return result; + grpc_core::CondVar cv; + if (result_queue_.empty()) { + load_report_cond_ = &cv; + load_report_cond_->WaitUntil(&load_report_mu_, + [this] { return !result_queue_.empty(); }); + load_report_cond_ = nullptr; + } + std::vector<ClientStats> result = std::move(result_queue_.front()); + result_queue_.pop_front(); + return result; } void NotifyDoneWithLrsCall() { @@ -1207,134 +1207,134 @@ class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> { NotifyDoneWithLrsCallLocked(); } - private: - template <class RpcApi, class LoadStatsRequest, class LoadStatsResponse> - class RpcService : public CountedService<typename RpcApi::Service> { - public: - using Stream = ServerReaderWriter<LoadStatsResponse, LoadStatsRequest>; - - explicit RpcService(LrsServiceImpl* parent) : parent_(parent) {} - - Status StreamLoadStats(ServerContext* /*context*/, - Stream* stream) override { - gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats starts", this); - EXPECT_GT(parent_->client_load_reporting_interval_seconds_, 0); - // Take a reference of the LrsServiceImpl object, reference will go - // out of scope after this method exits. - std::shared_ptr<LrsServiceImpl> lrs_service_impl = - parent_->shared_from_this(); - // Read initial request. - LoadStatsRequest request; - if (stream->Read(&request)) { - CountedService<typename RpcApi::Service>::IncreaseRequestCount(); - // Verify client features. - EXPECT_THAT( - request.node().client_features(), - ::testing::Contains("envoy.lrs.supports_send_all_clusters")); - // Send initial response. - LoadStatsResponse response; - if (parent_->send_all_clusters_) { - response.set_send_all_clusters(true); - } else { - for (const TString& cluster_name : parent_->cluster_names_) { - response.add_clusters(cluster_name); - } - } - response.mutable_load_reporting_interval()->set_seconds( - parent_->client_load_reporting_interval_seconds_); - stream->Write(response); - CountedService<typename RpcApi::Service>::IncreaseResponseCount(); - // Wait for report. - request.Clear(); - while (stream->Read(&request)) { - gpr_log(GPR_INFO, "LRS[%p]: received client load report message: %s", - this, request.DebugString().c_str()); - std::vector<ClientStats> stats; - for (const auto& cluster_stats : request.cluster_stats()) { - stats.emplace_back(cluster_stats); - } - grpc_core::MutexLock lock(&parent_->load_report_mu_); - parent_->result_queue_.emplace_back(std::move(stats)); - if (parent_->load_report_cond_ != nullptr) { - parent_->load_report_cond_->Signal(); - } - } - // Wait until notified done. - grpc_core::MutexLock lock(&parent_->lrs_mu_); - parent_->lrs_cv_.WaitUntil(&parent_->lrs_mu_, - [this] { return parent_->lrs_done_; }); - } - gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats done", this); - return Status::OK; - } - - private: - LrsServiceImpl* parent_; - }; - + private: + template <class RpcApi, class LoadStatsRequest, class LoadStatsResponse> + class RpcService : public CountedService<typename RpcApi::Service> { + public: + using Stream = ServerReaderWriter<LoadStatsResponse, LoadStatsRequest>; + + explicit RpcService(LrsServiceImpl* parent) : parent_(parent) {} + + Status StreamLoadStats(ServerContext* /*context*/, + Stream* stream) override { + gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats starts", this); + EXPECT_GT(parent_->client_load_reporting_interval_seconds_, 0); + // Take a reference of the LrsServiceImpl object, reference will go + // out of scope after this method exits. + std::shared_ptr<LrsServiceImpl> lrs_service_impl = + parent_->shared_from_this(); + // Read initial request. + LoadStatsRequest request; + if (stream->Read(&request)) { + CountedService<typename RpcApi::Service>::IncreaseRequestCount(); + // Verify client features. + EXPECT_THAT( + request.node().client_features(), + ::testing::Contains("envoy.lrs.supports_send_all_clusters")); + // Send initial response. + LoadStatsResponse response; + if (parent_->send_all_clusters_) { + response.set_send_all_clusters(true); + } else { + for (const TString& cluster_name : parent_->cluster_names_) { + response.add_clusters(cluster_name); + } + } + response.mutable_load_reporting_interval()->set_seconds( + parent_->client_load_reporting_interval_seconds_); + stream->Write(response); + CountedService<typename RpcApi::Service>::IncreaseResponseCount(); + // Wait for report. + request.Clear(); + while (stream->Read(&request)) { + gpr_log(GPR_INFO, "LRS[%p]: received client load report message: %s", + this, request.DebugString().c_str()); + std::vector<ClientStats> stats; + for (const auto& cluster_stats : request.cluster_stats()) { + stats.emplace_back(cluster_stats); + } + grpc_core::MutexLock lock(&parent_->load_report_mu_); + parent_->result_queue_.emplace_back(std::move(stats)); + if (parent_->load_report_cond_ != nullptr) { + parent_->load_report_cond_->Signal(); + } + } + // Wait until notified done. + grpc_core::MutexLock lock(&parent_->lrs_mu_); + parent_->lrs_cv_.WaitUntil(&parent_->lrs_mu_, + [this] { return parent_->lrs_done_; }); + } + gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats done", this); + return Status::OK; + } + + private: + LrsServiceImpl* parent_; + }; + void NotifyDoneWithLrsCallLocked() { - if (!lrs_done_) { - lrs_done_ = true; + if (!lrs_done_) { + lrs_done_ = true; lrs_cv_.Broadcast(); } } - RpcService<::envoy::service::load_stats::v2::LoadReportingService, - ::envoy::service::load_stats::v2::LoadStatsRequest, - ::envoy::service::load_stats::v2::LoadStatsResponse> - v2_rpc_service_; - RpcService<::envoy::service::load_stats::v3::LoadReportingService, - ::envoy::service::load_stats::v3::LoadStatsRequest, - ::envoy::service::load_stats::v3::LoadStatsResponse> - v3_rpc_service_; - + RpcService<::envoy::service::load_stats::v2::LoadReportingService, + ::envoy::service::load_stats::v2::LoadStatsRequest, + ::envoy::service::load_stats::v2::LoadStatsResponse> + v2_rpc_service_; + RpcService<::envoy::service::load_stats::v3::LoadReportingService, + ::envoy::service::load_stats::v3::LoadStatsRequest, + ::envoy::service::load_stats::v3::LoadStatsResponse> + v3_rpc_service_; + const int client_load_reporting_interval_seconds_; - bool send_all_clusters_ = false; - std::set<TString> cluster_names_; + bool send_all_clusters_ = false; + std::set<TString> cluster_names_; grpc_core::CondVar lrs_cv_; - grpc_core::Mutex lrs_mu_; // Protects lrs_done_. - bool lrs_done_ = false; + grpc_core::Mutex lrs_mu_; // Protects lrs_done_. + bool lrs_done_ = false; - grpc_core::Mutex load_report_mu_; // Protects the members below. - grpc_core::CondVar* load_report_cond_ = nullptr; - std::deque<std::vector<ClientStats>> result_queue_; + grpc_core::Mutex load_report_mu_; // Protects the members below. + grpc_core::CondVar* load_report_cond_ = nullptr; + std::deque<std::vector<ClientStats>> result_queue_; }; class TestType { public: - TestType(bool use_xds_resolver, bool enable_load_reporting, - bool enable_rds_testing = false, bool use_v2 = false) + TestType(bool use_xds_resolver, bool enable_load_reporting, + bool enable_rds_testing = false, bool use_v2 = false) : use_xds_resolver_(use_xds_resolver), - enable_load_reporting_(enable_load_reporting), - enable_rds_testing_(enable_rds_testing), - use_v2_(use_v2) {} + enable_load_reporting_(enable_load_reporting), + enable_rds_testing_(enable_rds_testing), + use_v2_(use_v2) {} bool use_xds_resolver() const { return use_xds_resolver_; } bool enable_load_reporting() const { return enable_load_reporting_; } - bool enable_rds_testing() const { return enable_rds_testing_; } - bool use_v2() const { return use_v2_; } + bool enable_rds_testing() const { return enable_rds_testing_; } + bool use_v2() const { return use_v2_; } - TString AsString() const { - TString retval = (use_xds_resolver_ ? "XdsResolver" : "FakeResolver"); - retval += (use_v2_ ? "V2" : "V3"); + TString AsString() const { + TString retval = (use_xds_resolver_ ? "XdsResolver" : "FakeResolver"); + retval += (use_v2_ ? "V2" : "V3"); if (enable_load_reporting_) retval += "WithLoadReporting"; - if (enable_rds_testing_) retval += "Rds"; + if (enable_rds_testing_) retval += "Rds"; return retval; } private: const bool use_xds_resolver_; const bool enable_load_reporting_; - const bool enable_rds_testing_; - const bool use_v2_; + const bool enable_rds_testing_; + const bool use_v2_; }; class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { protected: XdsEnd2endTest(size_t num_backends, size_t num_balancers, - int client_load_reporting_interval_seconds = 100) - : num_backends_(num_backends), + int client_load_reporting_interval_seconds = 100) + : num_backends_(num_backends), num_balancers_(num_balancers), client_load_reporting_interval_seconds_( client_load_reporting_interval_seconds) {} @@ -1353,70 +1353,70 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { static void TearDownTestCase() { grpc_shutdown(); } void SetUp() override { - gpr_setenv("GRPC_XDS_EXPERIMENTAL_V3_SUPPORT", "true"); - gpr_setenv("GRPC_XDS_BOOTSTRAP", - GetParam().use_v2() ? g_bootstrap_file_v2 : g_bootstrap_file_v3); + gpr_setenv("GRPC_XDS_EXPERIMENTAL_V3_SUPPORT", "true"); + gpr_setenv("GRPC_XDS_BOOTSTRAP", + GetParam().use_v2() ? g_bootstrap_file_v2 : g_bootstrap_file_v3); g_port_saver->Reset(); response_generator_ = grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>(); - // Inject xDS channel response generator. + // Inject xDS channel response generator. lb_channel_response_generator_ = grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>(); - xds_channel_args_to_add_.emplace_back( - grpc_core::FakeResolverResponseGenerator::MakeChannelArg( - lb_channel_response_generator_.get())); - if (xds_resource_does_not_exist_timeout_ms_ > 0) { - xds_channel_args_to_add_.emplace_back(grpc_channel_arg_integer_create( - const_cast<char*>(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS), - xds_resource_does_not_exist_timeout_ms_)); - } - xds_channel_args_.num_args = xds_channel_args_to_add_.size(); - xds_channel_args_.args = xds_channel_args_to_add_.data(); - grpc_core::internal::SetXdsChannelArgsForTest(&xds_channel_args_); - // Make sure each test creates a new XdsClient instance rather than - // reusing the one from the previous test. This avoids spurious failures - // caused when a load reporting test runs after a non-load reporting test - // and the XdsClient is still talking to the old LRS server, which fails - // because it's not expecting the client to connect. It also - // ensures that each test can independently set the global channel - // args for the xDS channel. - grpc_core::internal::UnsetGlobalXdsClientForTest(); + xds_channel_args_to_add_.emplace_back( + grpc_core::FakeResolverResponseGenerator::MakeChannelArg( + lb_channel_response_generator_.get())); + if (xds_resource_does_not_exist_timeout_ms_ > 0) { + xds_channel_args_to_add_.emplace_back(grpc_channel_arg_integer_create( + const_cast<char*>(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS), + xds_resource_does_not_exist_timeout_ms_)); + } + xds_channel_args_.num_args = xds_channel_args_to_add_.size(); + xds_channel_args_.args = xds_channel_args_to_add_.data(); + grpc_core::internal::SetXdsChannelArgsForTest(&xds_channel_args_); + // Make sure each test creates a new XdsClient instance rather than + // reusing the one from the previous test. This avoids spurious failures + // caused when a load reporting test runs after a non-load reporting test + // and the XdsClient is still talking to the old LRS server, which fails + // because it's not expecting the client to connect. It also + // ensures that each test can independently set the global channel + // args for the xDS channel. + grpc_core::internal::UnsetGlobalXdsClientForTest(); // Start the backends. for (size_t i = 0; i < num_backends_; ++i) { backends_.emplace_back(new BackendServerThread); - backends_.back()->Start(); + backends_.back()->Start(); } // Start the load balancers. for (size_t i = 0; i < num_balancers_; ++i) { balancers_.emplace_back( - new BalancerServerThread(GetParam().enable_load_reporting() - ? client_load_reporting_interval_seconds_ - : 0)); - balancers_.back()->Start(); - if (GetParam().enable_rds_testing()) { - balancers_[i]->ads_service()->SetLdsToUseDynamicRds(); - } + new BalancerServerThread(GetParam().enable_load_reporting() + ? client_load_reporting_interval_seconds_ + : 0)); + balancers_.back()->Start(); + if (GetParam().enable_rds_testing()) { + balancers_[i]->ads_service()->SetLdsToUseDynamicRds(); + } } ResetStub(); } - const char* DefaultEdsServiceName() const { - return GetParam().use_xds_resolver() ? kDefaultEdsServiceName : kServerName; - } - + const char* DefaultEdsServiceName() const { + return GetParam().use_xds_resolver() ? kDefaultEdsServiceName : kServerName; + } + void TearDown() override { ShutdownAllBackends(); for (auto& balancer : balancers_) balancer->Shutdown(); - // Clear global xDS channel args, since they will go out of scope - // when this test object is destroyed. - grpc_core::internal::SetXdsChannelArgsForTest(nullptr); + // Clear global xDS channel args, since they will go out of scope + // when this test object is destroyed. + grpc_core::internal::SetXdsChannelArgsForTest(nullptr); } void StartAllBackends() { - for (auto& backend : backends_) backend->Start(); + for (auto& backend : backends_) backend->Start(); } - void StartBackend(size_t index) { backends_[index]->Start(); } + void StartBackend(size_t index) { backends_[index]->Start(); } void ShutdownAllBackends() { for (auto& backend : backends_) backend->Shutdown(); @@ -1424,27 +1424,27 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); } - void ResetStub(int failover_timeout = 0) { - channel_ = CreateChannel(failover_timeout); - stub_ = grpc::testing::EchoTestService::NewStub(channel_); - stub1_ = grpc::testing::EchoTest1Service::NewStub(channel_); - stub2_ = grpc::testing::EchoTest2Service::NewStub(channel_); - } - - std::shared_ptr<Channel> CreateChannel( - int failover_timeout = 0, const char* server_name = kServerName) { + void ResetStub(int failover_timeout = 0) { + channel_ = CreateChannel(failover_timeout); + stub_ = grpc::testing::EchoTestService::NewStub(channel_); + stub1_ = grpc::testing::EchoTest1Service::NewStub(channel_); + stub2_ = grpc::testing::EchoTest2Service::NewStub(channel_); + } + + std::shared_ptr<Channel> CreateChannel( + int failover_timeout = 0, const char* server_name = kServerName) { ChannelArguments args; if (failover_timeout > 0) { - args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout); + args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout); } // If the parent channel is using the fake resolver, we inject the - // response generator here. - if (!GetParam().use_xds_resolver()) { - args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, - response_generator_.get()); + // response generator here. + if (!GetParam().use_xds_resolver()) { + args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, + response_generator_.get()); } - TString uri = y_absl::StrCat( - GetParam().use_xds_resolver() ? "xds" : "fake", ":///", server_name); + TString uri = y_absl::StrCat( + GetParam().use_xds_resolver() ? "xds" : "fake", ":///", server_name); // TODO(dgq): templatize tests to run everything using both secure and // insecure channel credentials. grpc_channel_credentials* channel_creds = @@ -1456,112 +1456,112 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { channel_creds, call_creds, nullptr))); call_creds->Unref(); channel_creds->Unref(); - return ::grpc::CreateCustomChannel(uri, creds, args); - } - - enum RpcService { - SERVICE_ECHO, - SERVICE_ECHO1, - SERVICE_ECHO2, - }; - - enum RpcMethod { - METHOD_ECHO, - METHOD_ECHO1, - METHOD_ECHO2, - }; - - struct RpcOptions { - RpcService service = SERVICE_ECHO; - RpcMethod method = METHOD_ECHO; - int timeout_ms = 1000; - bool wait_for_ready = false; - bool server_fail = false; - std::vector<std::pair<TString, TString>> metadata; - - RpcOptions() {} - - RpcOptions& set_rpc_service(RpcService rpc_service) { - service = rpc_service; - return *this; - } - - RpcOptions& set_rpc_method(RpcMethod rpc_method) { - method = rpc_method; - return *this; - } - - RpcOptions& set_timeout_ms(int rpc_timeout_ms) { - timeout_ms = rpc_timeout_ms; - return *this; - } - - RpcOptions& set_wait_for_ready(bool rpc_wait_for_ready) { - wait_for_ready = rpc_wait_for_ready; - return *this; - } - - RpcOptions& set_server_fail(bool rpc_server_fail) { - server_fail = rpc_server_fail; - return *this; - } - - RpcOptions& set_metadata( - std::vector<std::pair<TString, TString>> rpc_metadata) { - metadata = rpc_metadata; - return *this; - } - }; - - template <typename Stub> - Status SendRpcMethod(Stub* stub, const RpcOptions& rpc_options, - ClientContext* context, EchoRequest& request, - EchoResponse* response) { - switch (rpc_options.method) { - case METHOD_ECHO: - return (*stub)->Echo(context, request, response); - case METHOD_ECHO1: - return (*stub)->Echo1(context, request, response); - case METHOD_ECHO2: - return (*stub)->Echo2(context, request, response); - } - } - - void ResetBackendCounters(size_t start_index = 0, size_t stop_index = 0) { + return ::grpc::CreateCustomChannel(uri, creds, args); + } + + enum RpcService { + SERVICE_ECHO, + SERVICE_ECHO1, + SERVICE_ECHO2, + }; + + enum RpcMethod { + METHOD_ECHO, + METHOD_ECHO1, + METHOD_ECHO2, + }; + + struct RpcOptions { + RpcService service = SERVICE_ECHO; + RpcMethod method = METHOD_ECHO; + int timeout_ms = 1000; + bool wait_for_ready = false; + bool server_fail = false; + std::vector<std::pair<TString, TString>> metadata; + + RpcOptions() {} + + RpcOptions& set_rpc_service(RpcService rpc_service) { + service = rpc_service; + return *this; + } + + RpcOptions& set_rpc_method(RpcMethod rpc_method) { + method = rpc_method; + return *this; + } + + RpcOptions& set_timeout_ms(int rpc_timeout_ms) { + timeout_ms = rpc_timeout_ms; + return *this; + } + + RpcOptions& set_wait_for_ready(bool rpc_wait_for_ready) { + wait_for_ready = rpc_wait_for_ready; + return *this; + } + + RpcOptions& set_server_fail(bool rpc_server_fail) { + server_fail = rpc_server_fail; + return *this; + } + + RpcOptions& set_metadata( + std::vector<std::pair<TString, TString>> rpc_metadata) { + metadata = rpc_metadata; + return *this; + } + }; + + template <typename Stub> + Status SendRpcMethod(Stub* stub, const RpcOptions& rpc_options, + ClientContext* context, EchoRequest& request, + EchoResponse* response) { + switch (rpc_options.method) { + case METHOD_ECHO: + return (*stub)->Echo(context, request, response); + case METHOD_ECHO1: + return (*stub)->Echo1(context, request, response); + case METHOD_ECHO2: + return (*stub)->Echo2(context, request, response); + } + } + + void ResetBackendCounters(size_t start_index = 0, size_t stop_index = 0) { if (stop_index == 0) stop_index = backends_.size(); for (size_t i = start_index; i < stop_index; ++i) { - backends_[i]->backend_service()->ResetCounters(); - backends_[i]->backend_service1()->ResetCounters(); - backends_[i]->backend_service2()->ResetCounters(); + backends_[i]->backend_service()->ResetCounters(); + backends_[i]->backend_service1()->ResetCounters(); + backends_[i]->backend_service2()->ResetCounters(); + } + } + + bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0, + const RpcOptions& rpc_options = RpcOptions()) { + if (stop_index == 0) stop_index = backends_.size(); + for (size_t i = start_index; i < stop_index; ++i) { + switch (rpc_options.service) { + case SERVICE_ECHO: + if (backends_[i]->backend_service()->request_count() == 0) + return false; + break; + case SERVICE_ECHO1: + if (backends_[i]->backend_service1()->request_count() == 0) + return false; + break; + case SERVICE_ECHO2: + if (backends_[i]->backend_service2()->request_count() == 0) + return false; + break; + } } - } - - bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0, - const RpcOptions& rpc_options = RpcOptions()) { - if (stop_index == 0) stop_index = backends_.size(); - for (size_t i = start_index; i < stop_index; ++i) { - switch (rpc_options.service) { - case SERVICE_ECHO: - if (backends_[i]->backend_service()->request_count() == 0) - return false; - break; - case SERVICE_ECHO1: - if (backends_[i]->backend_service1()->request_count() == 0) - return false; - break; - case SERVICE_ECHO2: - if (backends_[i]->backend_service2()->request_count() == 0) - return false; - break; - } - } return true; } void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure, - int* num_drops, - const RpcOptions& rpc_options = RpcOptions()) { - const Status status = SendRpc(rpc_options); + int* num_drops, + const RpcOptions& rpc_options = RpcOptions()) { + const Status status = SendRpc(rpc_options); if (status.ok()) { ++*num_ok; } else { @@ -1574,37 +1574,37 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { ++*num_total; } - std::tuple<int, int, int> WaitForAllBackends( - size_t start_index = 0, size_t stop_index = 0, bool reset_counters = true, - const RpcOptions& rpc_options = RpcOptions(), - bool allow_failures = false) { + std::tuple<int, int, int> WaitForAllBackends( + size_t start_index = 0, size_t stop_index = 0, bool reset_counters = true, + const RpcOptions& rpc_options = RpcOptions(), + bool allow_failures = false) { int num_ok = 0; int num_failure = 0; int num_drops = 0; int num_total = 0; - while (!SeenAllBackends(start_index, stop_index, rpc_options)) { - SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops, - rpc_options); + while (!SeenAllBackends(start_index, stop_index, rpc_options)) { + SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops, + rpc_options); } - if (reset_counters) ResetBackendCounters(); + if (reset_counters) ResetBackendCounters(); gpr_log(GPR_INFO, "Performed %d warm up requests against the backends. " "%d succeeded, %d failed, %d dropped.", num_total, num_ok, num_failure, num_drops); - if (!allow_failures) EXPECT_EQ(num_failure, 0); + if (!allow_failures) EXPECT_EQ(num_failure, 0); return std::make_tuple(num_ok, num_failure, num_drops); } - void WaitForBackend(size_t backend_idx, bool reset_counters = true, - bool require_success = false) { + void WaitForBackend(size_t backend_idx, bool reset_counters = true, + bool require_success = false) { gpr_log(GPR_INFO, "========= WAITING FOR BACKEND %lu ==========", static_cast<unsigned long>(backend_idx)); do { - Status status = SendRpc(); - if (require_success) { - EXPECT_TRUE(status.ok()) << "code=" << status.error_code() - << " message=" << status.error_message(); - } + Status status = SendRpc(); + if (require_success) { + EXPECT_TRUE(status.ok()) << "code=" << status.error_code() + << " message=" << status.error_message(); + } } while (backends_[backend_idx]->backend_service()->request_count() == 0); if (reset_counters) ResetBackendCounters(); gpr_log(GPR_INFO, "========= BACKEND %lu READY ==========", @@ -1615,8 +1615,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { const std::vector<int>& ports) { grpc_core::ServerAddressList addresses; for (int port : ports) { - TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port); - grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true); + TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port); + grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true); GPR_ASSERT(lb_uri != nullptr); grpc_resolved_address address; GPR_ASSERT(grpc_parse_uri(lb_uri, &address)); @@ -1626,7 +1626,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { return addresses; } - void SetNextResolution(const std::vector<int>& ports) { + void SetNextResolution(const std::vector<int>& ports) { if (GetParam().use_xds_resolver()) return; // Not used with xds resolver. grpc_core::ExecCtx exec_ctx; grpc_core::Resolver::Result result; @@ -1634,46 +1634,46 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { grpc_error* error = GRPC_ERROR_NONE; const char* service_config_json = GetParam().enable_load_reporting() - ? kDefaultServiceConfig - : kDefaultServiceConfigWithoutLoadReporting; + ? kDefaultServiceConfig + : kDefaultServiceConfigWithoutLoadReporting; result.service_config = - grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error); - ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); - ASSERT_NE(result.service_config.get(), nullptr); + grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error); + ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); + ASSERT_NE(result.service_config.get(), nullptr); response_generator_->SetResponse(std::move(result)); } void SetNextResolutionForLbChannelAllBalancers( const char* service_config_json = nullptr, - const char* expected_targets = nullptr) { + const char* expected_targets = nullptr) { std::vector<int> ports; for (size_t i = 0; i < balancers_.size(); ++i) { ports.emplace_back(balancers_[i]->port()); } - SetNextResolutionForLbChannel(ports, service_config_json, expected_targets); + SetNextResolutionForLbChannel(ports, service_config_json, expected_targets); } - void SetNextResolutionForLbChannel(const std::vector<int>& ports, - const char* service_config_json = nullptr, - const char* expected_targets = nullptr) { + void SetNextResolutionForLbChannel(const std::vector<int>& ports, + const char* service_config_json = nullptr, + const char* expected_targets = nullptr) { grpc_core::ExecCtx exec_ctx; grpc_core::Resolver::Result result; result.addresses = CreateAddressListFromPortList(ports); if (service_config_json != nullptr) { grpc_error* error = GRPC_ERROR_NONE; - result.service_config = grpc_core::ServiceConfig::Create( - nullptr, service_config_json, &error); - ASSERT_NE(result.service_config.get(), nullptr); - ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); + result.service_config = grpc_core::ServiceConfig::Create( + nullptr, service_config_json, &error); + ASSERT_NE(result.service_config.get(), nullptr); + ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); } - if (expected_targets != nullptr) { - grpc_arg expected_targets_arg = grpc_channel_arg_string_create( - const_cast<char*>(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS), - const_cast<char*>(expected_targets)); - result.args = - grpc_channel_args_copy_and_add(nullptr, &expected_targets_arg, 1); + if (expected_targets != nullptr) { + grpc_arg expected_targets_arg = grpc_channel_arg_string_create( + const_cast<char*>(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS), + const_cast<char*>(expected_targets)); + result.args = + grpc_channel_args_copy_and_add(nullptr, &expected_targets_arg, 1); } - lb_channel_response_generator_->SetResponse(std::move(result)); + lb_channel_response_generator_->SetResponse(std::move(result)); } void SetNextReresolutionResponse(const std::vector<int>& ports) { @@ -1693,98 +1693,98 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { return backend_ports; } - Status SendRpc(const RpcOptions& rpc_options = RpcOptions(), - EchoResponse* response = nullptr) { + Status SendRpc(const RpcOptions& rpc_options = RpcOptions(), + EchoResponse* response = nullptr) { const bool local_response = (response == nullptr); if (local_response) response = new EchoResponse; EchoRequest request; - ClientContext context; - for (const auto& metadata : rpc_options.metadata) { - context.AddMetadata(metadata.first, metadata.second); - } - if (rpc_options.timeout_ms != 0) { - context.set_deadline( - grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms)); - } - if (rpc_options.wait_for_ready) context.set_wait_for_ready(true); - request.set_message(kRequestMessage); - if (rpc_options.server_fail) { - request.mutable_param()->mutable_expected_error()->set_code( - GRPC_STATUS_FAILED_PRECONDITION); - } - Status status; - switch (rpc_options.service) { - case SERVICE_ECHO: - status = - SendRpcMethod(&stub_, rpc_options, &context, request, response); - break; - case SERVICE_ECHO1: - status = - SendRpcMethod(&stub1_, rpc_options, &context, request, response); - break; - case SERVICE_ECHO2: - status = - SendRpcMethod(&stub2_, rpc_options, &context, request, response); - break; - } + ClientContext context; + for (const auto& metadata : rpc_options.metadata) { + context.AddMetadata(metadata.first, metadata.second); + } + if (rpc_options.timeout_ms != 0) { + context.set_deadline( + grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms)); + } + if (rpc_options.wait_for_ready) context.set_wait_for_ready(true); + request.set_message(kRequestMessage); + if (rpc_options.server_fail) { + request.mutable_param()->mutable_expected_error()->set_code( + GRPC_STATUS_FAILED_PRECONDITION); + } + Status status; + switch (rpc_options.service) { + case SERVICE_ECHO: + status = + SendRpcMethod(&stub_, rpc_options, &context, request, response); + break; + case SERVICE_ECHO1: + status = + SendRpcMethod(&stub1_, rpc_options, &context, request, response); + break; + case SERVICE_ECHO2: + status = + SendRpcMethod(&stub2_, rpc_options, &context, request, response); + break; + } if (local_response) delete response; return status; } - void CheckRpcSendOk(const size_t times = 1, - const RpcOptions& rpc_options = RpcOptions()) { + void CheckRpcSendOk(const size_t times = 1, + const RpcOptions& rpc_options = RpcOptions()) { for (size_t i = 0; i < times; ++i) { EchoResponse response; - const Status status = SendRpc(rpc_options, &response); + const Status status = SendRpc(rpc_options, &response); EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage); + EXPECT_EQ(response.message(), kRequestMessage); + } + } + + void CheckRpcSendFailure(const size_t times = 1, + const RpcOptions& rpc_options = RpcOptions()) { + for (size_t i = 0; i < times; ++i) { + const Status status = SendRpc(rpc_options); + EXPECT_FALSE(status.ok()); } } - void CheckRpcSendFailure(const size_t times = 1, - const RpcOptions& rpc_options = RpcOptions()) { - for (size_t i = 0; i < times; ++i) { - const Status status = SendRpc(rpc_options); - EXPECT_FALSE(status.ok()); - } - } - - void SetRouteConfiguration(int idx, const RouteConfiguration& route_config) { - if (GetParam().enable_rds_testing()) { - balancers_[idx]->ads_service()->SetRdsResource(route_config); - } else { - balancers_[idx]->ads_service()->SetLdsResource( - AdsServiceImpl::BuildListener(route_config)); - } - } - - AdsServiceImpl::ResponseState RouteConfigurationResponseState(int idx) const { - AdsServiceImpl* ads_service = balancers_[idx]->ads_service(); - if (GetParam().enable_rds_testing()) { - return ads_service->rds_response_state(); - } - return ads_service->lds_response_state(); - } - - public: - // This method could benefit test subclasses; to make it accessible - // via bind with a qualified name, it needs to be public. - void SetEdsResourceWithDelay(size_t i, - const ClusterLoadAssignment& assignment, - int delay_ms) { - GPR_ASSERT(delay_ms > 0); - gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms)); - balancers_[i]->ads_service()->SetEdsResource(assignment); - } - - protected: + void SetRouteConfiguration(int idx, const RouteConfiguration& route_config) { + if (GetParam().enable_rds_testing()) { + balancers_[idx]->ads_service()->SetRdsResource(route_config); + } else { + balancers_[idx]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config)); + } + } + + AdsServiceImpl::ResponseState RouteConfigurationResponseState(int idx) const { + AdsServiceImpl* ads_service = balancers_[idx]->ads_service(); + if (GetParam().enable_rds_testing()) { + return ads_service->rds_response_state(); + } + return ads_service->lds_response_state(); + } + + public: + // This method could benefit test subclasses; to make it accessible + // via bind with a qualified name, it needs to be public. + void SetEdsResourceWithDelay(size_t i, + const ClusterLoadAssignment& assignment, + int delay_ms) { + GPR_ASSERT(delay_ms > 0); + gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms)); + balancers_[i]->ads_service()->SetEdsResource(assignment); + } + + protected: class ServerThread { public: ServerThread() : port_(g_port_saver->GetPort()) {} virtual ~ServerThread(){}; - void Start() { + void Start() { gpr_log(GPR_INFO, "starting %s server on port %d", Type(), port_); GPR_ASSERT(!running_); running_ = true; @@ -1794,18 +1794,18 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { // by ServerThread::Serve from firing before the wait below is hit. grpc_core::MutexLock lock(&mu); grpc_core::CondVar cond; - thread_.reset( - new std::thread(std::bind(&ServerThread::Serve, this, &mu, &cond))); + thread_.reset( + new std::thread(std::bind(&ServerThread::Serve, this, &mu, &cond))); cond.Wait(&mu); gpr_log(GPR_INFO, "%s server startup complete", Type()); } - void Serve(grpc_core::Mutex* mu, grpc_core::CondVar* cond) { + void Serve(grpc_core::Mutex* mu, grpc_core::CondVar* cond) { // We need to acquire the lock here in order to prevent the notify_one // below from firing before its corresponding wait is executed. grpc_core::MutexLock lock(mu); std::ostringstream server_address; - server_address << "localhost:" << port_; + server_address << "localhost:" << port_; ServerBuilder builder; std::shared_ptr<ServerCredentials> creds(new SecureServerCredentials( grpc_fake_transport_security_server_credentials_create())); @@ -1842,79 +1842,79 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { class BackendServerThread : public ServerThread { public: - BackendServiceImpl<::grpc::testing::EchoTestService::Service>* - backend_service() { - return &backend_service_; - } - BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>* - backend_service1() { - return &backend_service1_; - } - BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>* - backend_service2() { - return &backend_service2_; - } + BackendServiceImpl<::grpc::testing::EchoTestService::Service>* + backend_service() { + return &backend_service_; + } + BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>* + backend_service1() { + return &backend_service1_; + } + BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>* + backend_service2() { + return &backend_service2_; + } private: void RegisterAllServices(ServerBuilder* builder) override { builder->RegisterService(&backend_service_); - builder->RegisterService(&backend_service1_); - builder->RegisterService(&backend_service2_); + builder->RegisterService(&backend_service1_); + builder->RegisterService(&backend_service2_); } - void StartAllServices() override { - backend_service_.Start(); - backend_service1_.Start(); - backend_service2_.Start(); - } + void StartAllServices() override { + backend_service_.Start(); + backend_service1_.Start(); + backend_service2_.Start(); + } - void ShutdownAllServices() override { - backend_service_.Shutdown(); - backend_service1_.Shutdown(); - backend_service2_.Shutdown(); - } + void ShutdownAllServices() override { + backend_service_.Shutdown(); + backend_service1_.Shutdown(); + backend_service2_.Shutdown(); + } const char* Type() override { return "Backend"; } - BackendServiceImpl<::grpc::testing::EchoTestService::Service> - backend_service_; - BackendServiceImpl<::grpc::testing::EchoTest1Service::Service> - backend_service1_; - BackendServiceImpl<::grpc::testing::EchoTest2Service::Service> - backend_service2_; + BackendServiceImpl<::grpc::testing::EchoTestService::Service> + backend_service_; + BackendServiceImpl<::grpc::testing::EchoTest1Service::Service> + backend_service1_; + BackendServiceImpl<::grpc::testing::EchoTest2Service::Service> + backend_service2_; }; class BalancerServerThread : public ServerThread { public: explicit BalancerServerThread(int client_load_reporting_interval = 0) - : ads_service_(new AdsServiceImpl(client_load_reporting_interval > 0)), - lrs_service_(new LrsServiceImpl(client_load_reporting_interval)) {} + : ads_service_(new AdsServiceImpl(client_load_reporting_interval > 0)), + lrs_service_(new LrsServiceImpl(client_load_reporting_interval)) {} - AdsServiceImpl* ads_service() { return ads_service_.get(); } - LrsServiceImpl* lrs_service() { return lrs_service_.get(); } + AdsServiceImpl* ads_service() { return ads_service_.get(); } + LrsServiceImpl* lrs_service() { return lrs_service_.get(); } private: void RegisterAllServices(ServerBuilder* builder) override { - builder->RegisterService(ads_service_->v2_rpc_service()); - builder->RegisterService(ads_service_->v3_rpc_service()); - builder->RegisterService(lrs_service_->v2_rpc_service()); - builder->RegisterService(lrs_service_->v3_rpc_service()); + builder->RegisterService(ads_service_->v2_rpc_service()); + builder->RegisterService(ads_service_->v3_rpc_service()); + builder->RegisterService(lrs_service_->v2_rpc_service()); + builder->RegisterService(lrs_service_->v3_rpc_service()); } void StartAllServices() override { - ads_service_->Start(); - lrs_service_->Start(); + ads_service_->Start(); + lrs_service_->Start(); } void ShutdownAllServices() override { - ads_service_->Shutdown(); - lrs_service_->Shutdown(); + ads_service_->Shutdown(); + lrs_service_->Shutdown(); } const char* Type() override { return "Balancer"; } - std::shared_ptr<AdsServiceImpl> ads_service_; - std::shared_ptr<LrsServiceImpl> lrs_service_; + std::shared_ptr<AdsServiceImpl> ads_service_; + std::shared_ptr<LrsServiceImpl> lrs_service_; }; const size_t num_backends_; @@ -1922,22 +1922,22 @@ class XdsEnd2endTest : public ::testing::TestWithParam<TestType> { const int client_load_reporting_interval_seconds_; std::shared_ptr<Channel> channel_; std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_; - std::unique_ptr<grpc::testing::EchoTest1Service::Stub> stub1_; - std::unique_ptr<grpc::testing::EchoTest2Service::Stub> stub2_; + std::unique_ptr<grpc::testing::EchoTest1Service::Stub> stub1_; + std::unique_ptr<grpc::testing::EchoTest2Service::Stub> stub2_; std::vector<std::unique_ptr<BackendServerThread>> backends_; std::vector<std::unique_ptr<BalancerServerThread>> balancers_; grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator> response_generator_; grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator> lb_channel_response_generator_; - int xds_resource_does_not_exist_timeout_ms_ = 0; - y_absl::InlinedVector<grpc_arg, 2> xds_channel_args_to_add_; - grpc_channel_args xds_channel_args_; + int xds_resource_does_not_exist_timeout_ms_ = 0; + y_absl::InlinedVector<grpc_arg, 2> xds_channel_args_to_add_; + grpc_channel_args xds_channel_args_; }; class BasicTest : public XdsEnd2endTest { public: - BasicTest() : XdsEnd2endTest(4, 1) {} + BasicTest() : XdsEnd2endTest(4, 1) {} }; // Tests that the balancer sends the correct response to the client, and the @@ -1946,11 +1946,11 @@ TEST_P(BasicTest, Vanilla) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumRpcsPerAddress = 100; - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Make sure that trying to connect works without a call. channel_->GetState(true /* try_to_connect */); // We need to wait for all backends to come online. @@ -1963,24 +1963,24 @@ TEST_P(BasicTest, Vanilla) { backends_[i]->backend_service()->request_count()); } // Check LB policy name for the channel. - EXPECT_EQ((GetParam().use_xds_resolver() ? "xds_cluster_manager_experimental" - : "eds_experimental"), - channel_->GetLoadBalancingPolicyName()); + EXPECT_EQ((GetParam().use_xds_resolver() ? "xds_cluster_manager_experimental" + : "eds_experimental"), + channel_->GetLoadBalancingPolicyName()); } TEST_P(BasicTest, IgnoresUnhealthyEndpoints) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumRpcsPerAddress = 100; - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(), kDefaultLocalityWeight, kDefaultLocalityPriority, - {HealthStatus::DRAINING}}, + {HealthStatus::DRAINING}}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Make sure that trying to connect works without a call. channel_->GetState(true /* try_to_connect */); // We need to wait for all backends to come online. @@ -2001,12 +2001,12 @@ TEST_P(BasicTest, SameBackendListedMultipleTimes) { SetNextResolutionForLbChannelAllBalancers(); // Same backend listed twice. std::vector<int> ports(2, backends_[0]->port()); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", ports}, }); const size_t kNumRpcsPerAddress = 10; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // We need to wait for the backend to come online. WaitForBackend(0); // Send kNumRpcsPerAddress RPCs per server. @@ -2026,24 +2026,24 @@ TEST_P(BasicTest, InitiallyEmptyServerlist) { const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); const int kCallDeadlineMs = kServerlistDelayMs * 2; // First response is an empty serverlist, sent right away. - AdsServiceImpl::EdsResourceArgs::Locality empty_locality("locality0", {}); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs::Locality empty_locality("locality0", {}); + AdsServiceImpl::EdsResourceArgs args({ empty_locality, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Send non-empty serverlist only after kServerlistDelayMs. - args = AdsServiceImpl::EdsResourceArgs({ + args = AdsServiceImpl::EdsResourceArgs({ {"locality0", GetBackendPorts()}, }); - std::thread delayed_resource_setter( - std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0, - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), - kServerlistDelayMs)); + std::thread delayed_resource_setter( + std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0, + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), + kServerlistDelayMs)); const auto t0 = system_clock::now(); // Client will block: LB will initially send empty serverlist. - CheckRpcSendOk( - 1, RpcOptions().set_timeout_ms(kCallDeadlineMs).set_wait_for_ready(true)); + CheckRpcSendOk( + 1, RpcOptions().set_timeout_ms(kCallDeadlineMs).set_wait_for_ready(true)); const auto ellapsed_ms = std::chrono::duration_cast<std::chrono::milliseconds>( system_clock::now() - t0); @@ -2052,7 +2052,7 @@ TEST_P(BasicTest, InitiallyEmptyServerlist) { // populated serverlist but under the call's deadline (which is enforced by // the call's deadline). EXPECT_GT(ellapsed_ms.count(), kServerlistDelayMs); - delayed_resource_setter.join(); + delayed_resource_setter.join(); } // Tests that RPCs will fail with UNAVAILABLE instead of DEADLINE_EXCEEDED if @@ -2065,11 +2065,11 @@ TEST_P(BasicTest, AllServersUnreachableFailFast) { for (size_t i = 0; i < kNumUnreachableServers; ++i) { ports.push_back(g_port_saver->GetPort()); } - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", ports}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); const Status status = SendRpc(); // The error shouldn't be DEADLINE_EXCEEDED. EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code()); @@ -2080,2426 +2080,2426 @@ TEST_P(BasicTest, AllServersUnreachableFailFast) { TEST_P(BasicTest, BackendsRestart) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); WaitForAllBackends(); // Stop backends. RPCs should fail. ShutdownAllBackends(); - // Sending multiple failed requests instead of just one to ensure that the - // client notices that all backends are down before we restart them. If we - // didn't do this, then a single RPC could fail here due to the race condition - // between the LB pick and the GOAWAY from the chosen backend being shut down, - // which would not actually prove that the client noticed that all of the - // backends are down. Then, when we send another request below (which we - // expect to succeed), if the callbacks happen in the wrong order, the same - // race condition could happen again due to the client not yet having noticed - // that the backends were all down. - CheckRpcSendFailure(num_backends_); + // Sending multiple failed requests instead of just one to ensure that the + // client notices that all backends are down before we restart them. If we + // didn't do this, then a single RPC could fail here due to the race condition + // between the LB pick and the GOAWAY from the chosen backend being shut down, + // which would not actually prove that the client noticed that all of the + // backends are down. Then, when we send another request below (which we + // expect to succeed), if the callbacks happen in the wrong order, the same + // race condition could happen again due to the client not yet having noticed + // that the backends were all down. + CheckRpcSendFailure(num_backends_); // Restart all backends. RPCs should start succeeding again. StartAllBackends(); - CheckRpcSendOk(1, RpcOptions().set_timeout_ms(2000).set_wait_for_ready(true)); + CheckRpcSendOk(1, RpcOptions().set_timeout_ms(2000).set_wait_for_ready(true)); +} + +TEST_P(BasicTest, IgnoresDuplicateUpdates) { + const size_t kNumRpcsPerAddress = 100; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + // Wait for all backends to come online. + WaitForAllBackends(); + // Send kNumRpcsPerAddress RPCs per server, but send an EDS update in + // between. If the update is not ignored, this will cause the + // round_robin policy to see an update, which will randomly reset its + // position in the address list. + for (size_t i = 0; i < kNumRpcsPerAddress; ++i) { + CheckRpcSendOk(2); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + CheckRpcSendOk(2); + } + // Each backend should have gotten the right number of requests. + for (size_t i = 1; i < backends_.size(); ++i) { + EXPECT_EQ(kNumRpcsPerAddress, + backends_[i]->backend_service()->request_count()); + } +} + +using XdsResolverOnlyTest = BasicTest; + +// Tests switching over from one cluster to another. +TEST_P(XdsResolverOnlyTest, ChangeClusters) { + const char* kNewClusterName = "new_cluster_name"; + const char* kNewEdsServiceName = "new_eds_service_name"; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + // We need to wait for all backends to come online. + WaitForAllBackends(0, 2); + // Populate new EDS resource. + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName)); + // Populate new CDS resource. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Change RDS resource to point to new cluster. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + new_route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster(kNewClusterName); + Listener listener = + balancers_[0]->ads_service()->BuildListener(new_route_config); + balancers_[0]->ads_service()->SetLdsResource(listener); + // Wait for all new backends to be used. + std::tuple<int, int, int> counts = WaitForAllBackends(2, 4); + // Make sure no RPCs failed in the transition. + EXPECT_EQ(0, std::get<1>(counts)); +} + +// Tests that we go into TRANSIENT_FAILURE if the Cluster disappears. +TEST_P(XdsResolverOnlyTest, ClusterRemoved) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + // We need to wait for all backends to come online. + WaitForAllBackends(); + // Unset CDS resource. + balancers_[0]->ads_service()->UnsetResource(kCdsTypeUrl, kDefaultClusterName); + // Wait for RPCs to start failing. + do { + } while (SendRpc(RpcOptions(), nullptr).ok()); + // Make sure RPCs are still failing. + CheckRpcSendFailure(1000); + // Make sure we ACK'ed the update. + EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state, + AdsServiceImpl::ResponseState::ACKED); +} + +// Tests that we restart all xDS requests when we reestablish the ADS call. +TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) { + balancers_[0]->ads_service()->SetLdsToUseDynamicRds(); + const char* kNewClusterName = "new_cluster_name"; + const char* kNewEdsServiceName = "new_eds_service_name"; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + // We need to wait for all backends to come online. + WaitForAllBackends(0, 2); + // Now shut down and restart the balancer. When the client + // reconnects, it should automatically restart the requests for all + // resource types. + balancers_[0]->Shutdown(); + balancers_[0]->Start(); + // Make sure things are still working. + CheckRpcSendOk(100); + // Populate new EDS resource. + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName)); + // Populate new CDS resource. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Change RDS resource to point to new cluster. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + new_route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster(kNewClusterName); + balancers_[0]->ads_service()->SetRdsResource(new_route_config); + // Wait for all new backends to be used. + std::tuple<int, int, int> counts = WaitForAllBackends(2, 4); + // Make sure no RPCs failed in the transition. + EXPECT_EQ(0, std::get<1>(counts)); +} + +TEST_P(XdsResolverOnlyTest, DefaultRouteSpecifiesSlashPrefix) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_match() + ->set_prefix("/"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config)); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + // We need to wait for all backends to come online. + WaitForAllBackends(); +} + +TEST_P(XdsResolverOnlyTest, CircuitBreaking) { + class TestRpc { + public: + TestRpc() {} + + void StartRpc(grpc::testing::EchoTestService::Stub* stub) { + sender_thread_ = std::thread([this, stub]() { + EchoResponse response; + EchoRequest request; + request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000); + request.set_message(kRequestMessage); + status_ = stub->Echo(&context_, request, &response); + }); + } + + void CancelRpc() { + context_.TryCancel(); + sender_thread_.join(); + } + + private: + std::thread sender_thread_; + ClientContext context_; + Status status_; + }; + + gpr_setenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING", "true"); + constexpr size_t kMaxConcurrentRequests = 10; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + // Update CDS resource to set max concurrent request. + CircuitBreakers circuit_breaks; + Cluster cluster = balancers_[0]->ads_service()->default_cluster(); + auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds(); + threshold->set_priority(RoutingPriority::DEFAULT); + threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests); + balancers_[0]->ads_service()->SetCdsResource(cluster); + // Send exactly max_concurrent_requests long RPCs. + TestRpc rpcs[kMaxConcurrentRequests]; + for (size_t i = 0; i < kMaxConcurrentRequests; ++i) { + rpcs[i].StartRpc(stub_.get()); + } + // Wait for all RPCs to be in flight. + while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() < + kMaxConcurrentRequests) { + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(1 * 1000, GPR_TIMESPAN))); + } + // Sending a RPC now should fail, the error message should tell us + // we hit the max concurrent requests limit and got dropped. + Status status = SendRpc(); + EXPECT_FALSE(status.ok()); + EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy"); + // Cancel one RPC to allow another one through + rpcs[0].CancelRpc(); + status = SendRpc(); + EXPECT_TRUE(status.ok()); + for (size_t i = 1; i < kMaxConcurrentRequests; ++i) { + rpcs[i].CancelRpc(); + } + // Make sure RPCs go to the correct backend: + EXPECT_EQ(kMaxConcurrentRequests + 1, + backends_[0]->backend_service()->request_count()); + gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING"); +} + +TEST_P(XdsResolverOnlyTest, CircuitBreakingDisabled) { + class TestRpc { + public: + TestRpc() {} + + void StartRpc(grpc::testing::EchoTestService::Stub* stub) { + sender_thread_ = std::thread([this, stub]() { + EchoResponse response; + EchoRequest request; + request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000); + request.set_message(kRequestMessage); + status_ = stub->Echo(&context_, request, &response); + }); + } + + void CancelRpc() { + context_.TryCancel(); + sender_thread_.join(); + } + + private: + std::thread sender_thread_; + ClientContext context_; + Status status_; + }; + + constexpr size_t kMaxConcurrentRequests = 10; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + // Update CDS resource to set max concurrent request. + CircuitBreakers circuit_breaks; + Cluster cluster = balancers_[0]->ads_service()->default_cluster(); + auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds(); + threshold->set_priority(RoutingPriority::DEFAULT); + threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests); + balancers_[0]->ads_service()->SetCdsResource(cluster); + // Send exactly max_concurrent_requests long RPCs. + TestRpc rpcs[kMaxConcurrentRequests]; + for (size_t i = 0; i < kMaxConcurrentRequests; ++i) { + rpcs[i].StartRpc(stub_.get()); + } + // Wait for all RPCs to be in flight. + while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() < + kMaxConcurrentRequests) { + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(1 * 1000, GPR_TIMESPAN))); + } + // Sending a RPC now should not fail as circuit breaking is disabled. + Status status = SendRpc(); + EXPECT_TRUE(status.ok()); + for (size_t i = 0; i < kMaxConcurrentRequests; ++i) { + rpcs[i].CancelRpc(); + } + // Make sure RPCs go to the correct backend: + EXPECT_EQ(kMaxConcurrentRequests + 1, + backends_[0]->backend_service()->request_count()); +} + +TEST_P(XdsResolverOnlyTest, MultipleChannelsShareXdsClient) { + const char* kNewServerName = "new-server.example.com"; + Listener listener = balancers_[0]->ads_service()->default_listener(); + listener.set_name(kNewServerName); + balancers_[0]->ads_service()->SetLdsResource(listener); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + WaitForAllBackends(); + // Create second channel and tell it to connect to kNewServerName. + auto channel2 = CreateChannel(/*failover_timeout=*/0, kNewServerName); + channel2->GetState(/*try_to_connect=*/true); + ASSERT_TRUE( + channel2->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100))); + // Make sure there's only one client connected. + EXPECT_EQ(1UL, balancers_[0]->ads_service()->clients().size()); +} + +class XdsResolverLoadReportingOnlyTest : public XdsEnd2endTest { + public: + XdsResolverLoadReportingOnlyTest() : XdsEnd2endTest(4, 1, 3) {} +}; + +// Tests load reporting when switching over from one cluster to another. +TEST_P(XdsResolverLoadReportingOnlyTest, ChangeClusters) { + const char* kNewClusterName = "new_cluster_name"; + const char* kNewEdsServiceName = "new_eds_service_name"; + balancers_[0]->lrs_service()->set_cluster_names( + {kDefaultClusterName, kNewClusterName}); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // cluster kDefaultClusterName -> locality0 -> backends 0 and 1 + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + // cluster kNewClusterName -> locality1 -> backends 2 and 3 + AdsServiceImpl::EdsResourceArgs args2({ + {"locality1", GetBackendPorts(2, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName)); + // CDS resource for kNewClusterName. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Wait for all backends to come online. + int num_ok = 0; + int num_failure = 0; + int num_drops = 0; + std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(0, 2); + // The load report received at the balancer should be correct. + std::vector<ClientStats> load_report = + balancers_[0]->lrs_service()->WaitForLoadReport(); + EXPECT_THAT( + load_report, + ::testing::ElementsAre(::testing::AllOf( + ::testing::Property(&ClientStats::cluster_name, kDefaultClusterName), + ::testing::Property( + &ClientStats::locality_stats, + ::testing::ElementsAre(::testing::Pair( + "locality0", + ::testing::AllOf( + ::testing::Field(&ClientStats::LocalityStats:: + total_successful_requests, + num_ok), + ::testing::Field(&ClientStats::LocalityStats:: + total_requests_in_progress, + 0UL), + ::testing::Field( + &ClientStats::LocalityStats::total_error_requests, + num_failure), + ::testing::Field( + &ClientStats::LocalityStats::total_issued_requests, + num_failure + num_ok))))), + ::testing::Property(&ClientStats::total_dropped_requests, + num_drops)))); + // Change RDS resource to point to new cluster. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + new_route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster(kNewClusterName); + Listener listener = + balancers_[0]->ads_service()->BuildListener(new_route_config); + balancers_[0]->ads_service()->SetLdsResource(listener); + // Wait for all new backends to be used. + std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(2, 4); + // The load report received at the balancer should be correct. + load_report = balancers_[0]->lrs_service()->WaitForLoadReport(); + EXPECT_THAT( + load_report, + ::testing::ElementsAre( + ::testing::AllOf( + ::testing::Property(&ClientStats::cluster_name, + kDefaultClusterName), + ::testing::Property( + &ClientStats::locality_stats, + ::testing::ElementsAre(::testing::Pair( + "locality0", + ::testing::AllOf( + ::testing::Field(&ClientStats::LocalityStats:: + total_successful_requests, + ::testing::Lt(num_ok)), + ::testing::Field(&ClientStats::LocalityStats:: + total_requests_in_progress, + 0UL), + ::testing::Field( + &ClientStats::LocalityStats::total_error_requests, + ::testing::Le(num_failure)), + ::testing::Field( + &ClientStats::LocalityStats:: + total_issued_requests, + ::testing::Le(num_failure + num_ok)))))), + ::testing::Property(&ClientStats::total_dropped_requests, + num_drops)), + ::testing::AllOf( + ::testing::Property(&ClientStats::cluster_name, kNewClusterName), + ::testing::Property( + &ClientStats::locality_stats, + ::testing::ElementsAre(::testing::Pair( + "locality1", + ::testing::AllOf( + ::testing::Field(&ClientStats::LocalityStats:: + total_successful_requests, + ::testing::Le(num_ok)), + ::testing::Field(&ClientStats::LocalityStats:: + total_requests_in_progress, + 0UL), + ::testing::Field( + &ClientStats::LocalityStats::total_error_requests, + ::testing::Le(num_failure)), + ::testing::Field( + &ClientStats::LocalityStats:: + total_issued_requests, + ::testing::Le(num_failure + num_ok)))))), + ::testing::Property(&ClientStats::total_dropped_requests, + num_drops)))); + int total_ok = 0; + int total_failure = 0; + for (const ClientStats& client_stats : load_report) { + total_ok += client_stats.total_successful_requests(); + total_failure += client_stats.total_error_requests(); + } + EXPECT_EQ(total_ok, num_ok); + EXPECT_EQ(total_failure, num_failure); + // The LRS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count()); } -TEST_P(BasicTest, IgnoresDuplicateUpdates) { - const size_t kNumRpcsPerAddress = 100; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - // Wait for all backends to come online. - WaitForAllBackends(); - // Send kNumRpcsPerAddress RPCs per server, but send an EDS update in - // between. If the update is not ignored, this will cause the - // round_robin policy to see an update, which will randomly reset its - // position in the address list. - for (size_t i = 0; i < kNumRpcsPerAddress; ++i) { - CheckRpcSendOk(2); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - CheckRpcSendOk(2); - } - // Each backend should have gotten the right number of requests. - for (size_t i = 1; i < backends_.size(); ++i) { - EXPECT_EQ(kNumRpcsPerAddress, - backends_[i]->backend_service()->request_count()); - } -} - -using XdsResolverOnlyTest = BasicTest; - -// Tests switching over from one cluster to another. -TEST_P(XdsResolverOnlyTest, ChangeClusters) { - const char* kNewClusterName = "new_cluster_name"; - const char* kNewEdsServiceName = "new_eds_service_name"; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - // We need to wait for all backends to come online. - WaitForAllBackends(0, 2); - // Populate new EDS resource. - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName)); - // Populate new CDS resource. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Change RDS resource to point to new cluster. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - new_route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_route() - ->set_cluster(kNewClusterName); - Listener listener = - balancers_[0]->ads_service()->BuildListener(new_route_config); - balancers_[0]->ads_service()->SetLdsResource(listener); - // Wait for all new backends to be used. - std::tuple<int, int, int> counts = WaitForAllBackends(2, 4); - // Make sure no RPCs failed in the transition. - EXPECT_EQ(0, std::get<1>(counts)); -} - -// Tests that we go into TRANSIENT_FAILURE if the Cluster disappears. -TEST_P(XdsResolverOnlyTest, ClusterRemoved) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - // We need to wait for all backends to come online. - WaitForAllBackends(); - // Unset CDS resource. - balancers_[0]->ads_service()->UnsetResource(kCdsTypeUrl, kDefaultClusterName); - // Wait for RPCs to start failing. - do { - } while (SendRpc(RpcOptions(), nullptr).ok()); - // Make sure RPCs are still failing. - CheckRpcSendFailure(1000); - // Make sure we ACK'ed the update. - EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state, - AdsServiceImpl::ResponseState::ACKED); -} - -// Tests that we restart all xDS requests when we reestablish the ADS call. -TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) { - balancers_[0]->ads_service()->SetLdsToUseDynamicRds(); - const char* kNewClusterName = "new_cluster_name"; - const char* kNewEdsServiceName = "new_eds_service_name"; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - // We need to wait for all backends to come online. - WaitForAllBackends(0, 2); - // Now shut down and restart the balancer. When the client - // reconnects, it should automatically restart the requests for all - // resource types. - balancers_[0]->Shutdown(); - balancers_[0]->Start(); - // Make sure things are still working. - CheckRpcSendOk(100); - // Populate new EDS resource. - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName)); - // Populate new CDS resource. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Change RDS resource to point to new cluster. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - new_route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_route() - ->set_cluster(kNewClusterName); - balancers_[0]->ads_service()->SetRdsResource(new_route_config); - // Wait for all new backends to be used. - std::tuple<int, int, int> counts = WaitForAllBackends(2, 4); - // Make sure no RPCs failed in the transition. - EXPECT_EQ(0, std::get<1>(counts)); -} - -TEST_P(XdsResolverOnlyTest, DefaultRouteSpecifiesSlashPrefix) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_match() - ->set_prefix("/"); - balancers_[0]->ads_service()->SetLdsResource( - AdsServiceImpl::BuildListener(route_config)); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - // We need to wait for all backends to come online. - WaitForAllBackends(); -} - -TEST_P(XdsResolverOnlyTest, CircuitBreaking) { - class TestRpc { - public: - TestRpc() {} - - void StartRpc(grpc::testing::EchoTestService::Stub* stub) { - sender_thread_ = std::thread([this, stub]() { - EchoResponse response; - EchoRequest request; - request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000); - request.set_message(kRequestMessage); - status_ = stub->Echo(&context_, request, &response); - }); - } - - void CancelRpc() { - context_.TryCancel(); - sender_thread_.join(); - } - - private: - std::thread sender_thread_; - ClientContext context_; - Status status_; - }; - - gpr_setenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING", "true"); - constexpr size_t kMaxConcurrentRequests = 10; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - // Update CDS resource to set max concurrent request. - CircuitBreakers circuit_breaks; - Cluster cluster = balancers_[0]->ads_service()->default_cluster(); - auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds(); - threshold->set_priority(RoutingPriority::DEFAULT); - threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests); - balancers_[0]->ads_service()->SetCdsResource(cluster); - // Send exactly max_concurrent_requests long RPCs. - TestRpc rpcs[kMaxConcurrentRequests]; - for (size_t i = 0; i < kMaxConcurrentRequests; ++i) { - rpcs[i].StartRpc(stub_.get()); - } - // Wait for all RPCs to be in flight. - while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() < - kMaxConcurrentRequests) { - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(1 * 1000, GPR_TIMESPAN))); - } - // Sending a RPC now should fail, the error message should tell us - // we hit the max concurrent requests limit and got dropped. - Status status = SendRpc(); - EXPECT_FALSE(status.ok()); - EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy"); - // Cancel one RPC to allow another one through - rpcs[0].CancelRpc(); - status = SendRpc(); - EXPECT_TRUE(status.ok()); - for (size_t i = 1; i < kMaxConcurrentRequests; ++i) { - rpcs[i].CancelRpc(); - } - // Make sure RPCs go to the correct backend: - EXPECT_EQ(kMaxConcurrentRequests + 1, - backends_[0]->backend_service()->request_count()); - gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING"); -} - -TEST_P(XdsResolverOnlyTest, CircuitBreakingDisabled) { - class TestRpc { - public: - TestRpc() {} - - void StartRpc(grpc::testing::EchoTestService::Stub* stub) { - sender_thread_ = std::thread([this, stub]() { - EchoResponse response; - EchoRequest request; - request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000); - request.set_message(kRequestMessage); - status_ = stub->Echo(&context_, request, &response); - }); - } - - void CancelRpc() { - context_.TryCancel(); - sender_thread_.join(); - } - - private: - std::thread sender_thread_; - ClientContext context_; - Status status_; - }; - - constexpr size_t kMaxConcurrentRequests = 10; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - // Update CDS resource to set max concurrent request. - CircuitBreakers circuit_breaks; - Cluster cluster = balancers_[0]->ads_service()->default_cluster(); - auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds(); - threshold->set_priority(RoutingPriority::DEFAULT); - threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests); - balancers_[0]->ads_service()->SetCdsResource(cluster); - // Send exactly max_concurrent_requests long RPCs. - TestRpc rpcs[kMaxConcurrentRequests]; - for (size_t i = 0; i < kMaxConcurrentRequests; ++i) { - rpcs[i].StartRpc(stub_.get()); - } - // Wait for all RPCs to be in flight. - while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() < - kMaxConcurrentRequests) { - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(1 * 1000, GPR_TIMESPAN))); - } - // Sending a RPC now should not fail as circuit breaking is disabled. - Status status = SendRpc(); - EXPECT_TRUE(status.ok()); - for (size_t i = 0; i < kMaxConcurrentRequests; ++i) { - rpcs[i].CancelRpc(); - } - // Make sure RPCs go to the correct backend: - EXPECT_EQ(kMaxConcurrentRequests + 1, - backends_[0]->backend_service()->request_count()); -} - -TEST_P(XdsResolverOnlyTest, MultipleChannelsShareXdsClient) { - const char* kNewServerName = "new-server.example.com"; - Listener listener = balancers_[0]->ads_service()->default_listener(); - listener.set_name(kNewServerName); - balancers_[0]->ads_service()->SetLdsResource(listener); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - WaitForAllBackends(); - // Create second channel and tell it to connect to kNewServerName. - auto channel2 = CreateChannel(/*failover_timeout=*/0, kNewServerName); - channel2->GetState(/*try_to_connect=*/true); - ASSERT_TRUE( - channel2->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100))); - // Make sure there's only one client connected. - EXPECT_EQ(1UL, balancers_[0]->ads_service()->clients().size()); -} - -class XdsResolverLoadReportingOnlyTest : public XdsEnd2endTest { - public: - XdsResolverLoadReportingOnlyTest() : XdsEnd2endTest(4, 1, 3) {} -}; - -// Tests load reporting when switching over from one cluster to another. -TEST_P(XdsResolverLoadReportingOnlyTest, ChangeClusters) { - const char* kNewClusterName = "new_cluster_name"; - const char* kNewEdsServiceName = "new_eds_service_name"; - balancers_[0]->lrs_service()->set_cluster_names( - {kDefaultClusterName, kNewClusterName}); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // cluster kDefaultClusterName -> locality0 -> backends 0 and 1 - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - // cluster kNewClusterName -> locality1 -> backends 2 and 3 - AdsServiceImpl::EdsResourceArgs args2({ - {"locality1", GetBackendPorts(2, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsServiceName)); - // CDS resource for kNewClusterName. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Wait for all backends to come online. - int num_ok = 0; - int num_failure = 0; - int num_drops = 0; - std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(0, 2); - // The load report received at the balancer should be correct. - std::vector<ClientStats> load_report = - balancers_[0]->lrs_service()->WaitForLoadReport(); - EXPECT_THAT( - load_report, - ::testing::ElementsAre(::testing::AllOf( - ::testing::Property(&ClientStats::cluster_name, kDefaultClusterName), - ::testing::Property( - &ClientStats::locality_stats, - ::testing::ElementsAre(::testing::Pair( - "locality0", - ::testing::AllOf( - ::testing::Field(&ClientStats::LocalityStats:: - total_successful_requests, - num_ok), - ::testing::Field(&ClientStats::LocalityStats:: - total_requests_in_progress, - 0UL), - ::testing::Field( - &ClientStats::LocalityStats::total_error_requests, - num_failure), - ::testing::Field( - &ClientStats::LocalityStats::total_issued_requests, - num_failure + num_ok))))), - ::testing::Property(&ClientStats::total_dropped_requests, - num_drops)))); - // Change RDS resource to point to new cluster. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - new_route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_route() - ->set_cluster(kNewClusterName); - Listener listener = - balancers_[0]->ads_service()->BuildListener(new_route_config); - balancers_[0]->ads_service()->SetLdsResource(listener); - // Wait for all new backends to be used. - std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(2, 4); - // The load report received at the balancer should be correct. - load_report = balancers_[0]->lrs_service()->WaitForLoadReport(); - EXPECT_THAT( - load_report, - ::testing::ElementsAre( - ::testing::AllOf( - ::testing::Property(&ClientStats::cluster_name, - kDefaultClusterName), - ::testing::Property( - &ClientStats::locality_stats, - ::testing::ElementsAre(::testing::Pair( - "locality0", - ::testing::AllOf( - ::testing::Field(&ClientStats::LocalityStats:: - total_successful_requests, - ::testing::Lt(num_ok)), - ::testing::Field(&ClientStats::LocalityStats:: - total_requests_in_progress, - 0UL), - ::testing::Field( - &ClientStats::LocalityStats::total_error_requests, - ::testing::Le(num_failure)), - ::testing::Field( - &ClientStats::LocalityStats:: - total_issued_requests, - ::testing::Le(num_failure + num_ok)))))), - ::testing::Property(&ClientStats::total_dropped_requests, - num_drops)), - ::testing::AllOf( - ::testing::Property(&ClientStats::cluster_name, kNewClusterName), - ::testing::Property( - &ClientStats::locality_stats, - ::testing::ElementsAre(::testing::Pair( - "locality1", - ::testing::AllOf( - ::testing::Field(&ClientStats::LocalityStats:: - total_successful_requests, - ::testing::Le(num_ok)), - ::testing::Field(&ClientStats::LocalityStats:: - total_requests_in_progress, - 0UL), - ::testing::Field( - &ClientStats::LocalityStats::total_error_requests, - ::testing::Le(num_failure)), - ::testing::Field( - &ClientStats::LocalityStats:: - total_issued_requests, - ::testing::Le(num_failure + num_ok)))))), - ::testing::Property(&ClientStats::total_dropped_requests, - num_drops)))); - int total_ok = 0; - int total_failure = 0; - for (const ClientStats& client_stats : load_report) { - total_ok += client_stats.total_successful_requests(); - total_failure += client_stats.total_error_requests(); - } - EXPECT_EQ(total_ok, num_ok); - EXPECT_EQ(total_failure, num_failure); - // The LRS service got a single request, and sent a single response. - EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count()); - EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count()); -} - using SecureNamingTest = BasicTest; // Tests that secure naming check passes if target name is expected. TEST_P(SecureNamingTest, TargetNameIsExpected) { SetNextResolution({}); - SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr, "xds_server"); - AdsServiceImpl::EdsResourceArgs args({ + SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr, "xds_server"); + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - CheckRpcSendOk(); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + CheckRpcSendOk(); } // Tests that secure naming check fails if target name is unexpected. TEST_P(SecureNamingTest, TargetNameIsUnexpected) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - SetNextResolution({}); - SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr, - "incorrect_server_name"); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + SetNextResolution({}); + SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr, + "incorrect_server_name"); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Make sure that we blow up (via abort() from the security connector) when // the name from the balancer doesn't match expectations. - ASSERT_DEATH_IF_SUPPORTED({ CheckRpcSendOk(); }, ""); + ASSERT_DEATH_IF_SUPPORTED({ CheckRpcSendOk(); }, ""); +} + +using LdsTest = BasicTest; + +// Tests that LDS client should send a NACK if there is no API listener in the +// Listener in the LDS response. +TEST_P(LdsTest, NoApiListener) { + auto listener = balancers_[0]->ads_service()->default_listener(); + listener.clear_api_listener(); + balancers_[0]->ads_service()->SetLdsResource(listener); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->lds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "Listener has no ApiListener."); +} + +// Tests that LDS client should send a NACK if the route_specifier in the +// http_connection_manager is neither inlined route_config nor RDS. +TEST_P(LdsTest, WrongRouteSpecifier) { + auto listener = balancers_[0]->ads_service()->default_listener(); + HttpConnectionManager http_connection_manager; + http_connection_manager.mutable_scoped_routes(); + listener.mutable_api_listener()->mutable_api_listener()->PackFrom( + http_connection_manager); + balancers_[0]->ads_service()->SetLdsResource(listener); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->lds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "HttpConnectionManager neither has inlined route_config nor RDS."); +} + +// Tests that LDS client should send a NACK if the rds message in the +// http_connection_manager is missing the config_source field. +TEST_P(LdsTest, RdsMissingConfigSource) { + auto listener = balancers_[0]->ads_service()->default_listener(); + HttpConnectionManager http_connection_manager; + http_connection_manager.mutable_rds()->set_route_config_name( + kDefaultRouteConfigurationName); + listener.mutable_api_listener()->mutable_api_listener()->PackFrom( + http_connection_manager); + balancers_[0]->ads_service()->SetLdsResource(listener); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->lds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "HttpConnectionManager missing config_source for RDS."); +} + +// Tests that LDS client should send a NACK if the rds message in the +// http_connection_manager has a config_source field that does not specify ADS. +TEST_P(LdsTest, RdsConfigSourceDoesNotSpecifyAds) { + auto listener = balancers_[0]->ads_service()->default_listener(); + HttpConnectionManager http_connection_manager; + auto* rds = http_connection_manager.mutable_rds(); + rds->set_route_config_name(kDefaultRouteConfigurationName); + rds->mutable_config_source()->mutable_self(); + listener.mutable_api_listener()->mutable_api_listener()->PackFrom( + http_connection_manager); + balancers_[0]->ads_service()->SetLdsResource(listener); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->lds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "HttpConnectionManager ConfigSource for RDS does not specify ADS."); +} + +using LdsRdsTest = BasicTest; + +// Tests that LDS client should send an ACK upon correct LDS response (with +// inlined RDS result). +TEST_P(LdsRdsTest, Vanilla) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + (void)SendRpc(); + EXPECT_EQ(RouteConfigurationResponseState(0).state, + AdsServiceImpl::ResponseState::ACKED); + // Make sure we actually used the RPC service for the right version of xDS. + EXPECT_EQ(balancers_[0]->ads_service()->seen_v2_client(), + GetParam().use_v2()); + EXPECT_NE(balancers_[0]->ads_service()->seen_v3_client(), + GetParam().use_v2()); +} + +// Tests that we go into TRANSIENT_FAILURE if the Listener is removed. +TEST_P(LdsRdsTest, ListenerRemoved) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + // We need to wait for all backends to come online. + WaitForAllBackends(); + // Unset LDS resource. + balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl, kServerName); + // Wait for RPCs to start failing. + do { + } while (SendRpc(RpcOptions(), nullptr).ok()); + // Make sure RPCs are still failing. + CheckRpcSendFailure(1000); + // Make sure we ACK'ed the update. + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state, + AdsServiceImpl::ResponseState::ACKED); +} + +// Tests that LDS client ACKs but fails if matching domain can't be found in +// the LDS response. +TEST_P(LdsRdsTest, NoMatchedDomain) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + route_config.mutable_virtual_hosts(0)->clear_domains(); + route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + // Do a bit of polling, to allow the ACK to get to the ADS server. + channel_->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100)); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); +} + +// Tests that LDS client should choose the virtual host with matching domain if +// multiple virtual hosts exist in the LDS response. +TEST_P(LdsRdsTest, ChooseMatchedDomain) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + *(route_config.add_virtual_hosts()) = route_config.virtual_hosts(0); + route_config.mutable_virtual_hosts(0)->clear_domains(); + route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + (void)SendRpc(); + EXPECT_EQ(RouteConfigurationResponseState(0).state, + AdsServiceImpl::ResponseState::ACKED); +} + +// Tests that LDS client should choose the last route in the virtual host if +// multiple routes exist in the LDS response. +TEST_P(LdsRdsTest, ChooseLastRoute) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + *(route_config.mutable_virtual_hosts(0)->add_routes()) = + route_config.virtual_hosts(0).routes(0); + route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->mutable_cluster_header(); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + (void)SendRpc(); + EXPECT_EQ(RouteConfigurationResponseState(0).state, + AdsServiceImpl::ResponseState::ACKED); +} + +// Tests that LDS client should send a NACK if route match has a case_sensitive +// set to false. +TEST_P(LdsRdsTest, RouteMatchHasCaseSensitiveFalse) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->mutable_case_sensitive()->set_value(false); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "case_sensitive if set must be set to true."); +} + +// Tests that LDS client should ignore route which has query_parameters. +TEST_P(LdsRdsTest, RouteMatchHasQueryParameters) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + route1->mutable_match()->add_query_parameters(); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should send a ACK if route match has a prefix +// that is either empty or a single slash +TEST_P(LdsRdsTest, RouteMatchHasValidPrefixEmptyOrSingleSlash) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix(""); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix("/"); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + (void)SendRpc(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); +} + +// Tests that LDS client should ignore route which has a path +// prefix string does not start with "/". +TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixNoLeadingSlash) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service/"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should ignore route which has a prefix +// string with more than 2 slashes. +TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixExtraContent) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/Echo1/"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should ignore route which has a prefix +// string "//". +TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixDoubleSlash) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("//"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should ignore route which has path +// but it's empty. +TEST_P(LdsRdsTest, RouteMatchHasInvalidPathEmptyPath) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path(""); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should ignore route which has path +// string does not start with "/". +TEST_P(LdsRdsTest, RouteMatchHasInvalidPathNoLeadingSlash) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path("grpc.testing.EchoTest1Service/Echo1"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should ignore route which has path +// string that has too many slashes; for example, ends with "/". +TEST_P(LdsRdsTest, RouteMatchHasInvalidPathTooManySlashes) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1/"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should ignore route which has path +// string that has only 1 slash: missing "/" between service and method. +TEST_P(LdsRdsTest, RouteMatchHasInvalidPathOnlyOneSlash) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service.Echo1"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should ignore route which has path +// string that is missing service. +TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingService) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path("//Echo1"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Tests that LDS client should ignore route which has path +// string that is missing method. +TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingMethod) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/"); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No valid routes specified."); +} + +// Test that LDS client should reject route which has invalid path regex. +TEST_P(LdsRdsTest, RouteMatchHasInvalidPathRegex) { + const char* kNewCluster1Name = "new_cluster_1"; + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->mutable_safe_regex()->set_regex("a[z-a]"); + route1->mutable_route()->set_cluster(kNewCluster1Name); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "Invalid regex string specified in path matcher."); +} + +// Tests that LDS client should send a NACK if route has an action other than +// RouteAction in the LDS response. +TEST_P(LdsRdsTest, RouteHasNoRouteAction) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + route_config.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_redirect(); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "No RouteAction found in route."); +} + +TEST_P(LdsRdsTest, RouteActionClusterHasEmptyClusterName) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + route1->mutable_route()->set_cluster(""); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "RouteAction cluster contains empty cluster name."); +} + +TEST_P(LdsRdsTest, RouteActionWeightedTargetHasIncorrectTotalWeightSet) { + const size_t kWeight75 = 75; + const char* kNewCluster1Name = "new_cluster_1"; + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* weighted_cluster1 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster1->set_name(kNewCluster1Name); + weighted_cluster1->mutable_weight()->set_value(kWeight75); + route1->mutable_route() + ->mutable_weighted_clusters() + ->mutable_total_weight() + ->set_value(kWeight75 + 1); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "RouteAction weighted_cluster has incorrect total weight"); +} + +TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasEmptyClusterName) { + const size_t kWeight75 = 75; + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* weighted_cluster1 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster1->set_name(""); + weighted_cluster1->mutable_weight()->set_value(kWeight75); + route1->mutable_route() + ->mutable_weighted_clusters() + ->mutable_total_weight() + ->set_value(kWeight75); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ( + response_state.error_message, + "RouteAction weighted_cluster cluster contains empty cluster name."); +} + +TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasNoWeight) { + const size_t kWeight75 = 75; + const char* kNewCluster1Name = "new_cluster_1"; + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* weighted_cluster1 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster1->set_name(kNewCluster1Name); + route1->mutable_route() + ->mutable_weighted_clusters() + ->mutable_total_weight() + ->set_value(kWeight75); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "RouteAction weighted_cluster cluster missing weight"); +} + +TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRegex) { + const char* kNewCluster1Name = "new_cluster_1"; + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* header_matcher1 = route1->mutable_match()->add_headers(); + header_matcher1->set_name("header1"); + header_matcher1->mutable_safe_regex_match()->set_regex("a[z-a]"); + route1->mutable_route()->set_cluster(kNewCluster1Name); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "Invalid regex string specified in header matcher."); +} + +TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRange) { + const char* kNewCluster1Name = "new_cluster_1"; + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* header_matcher1 = route1->mutable_match()->add_headers(); + header_matcher1->set_name("header1"); + header_matcher1->mutable_range_match()->set_start(1001); + header_matcher1->mutable_range_match()->set_end(1000); + route1->mutable_route()->set_cluster(kNewCluster1Name); + SetRouteConfiguration(0, route_config); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "Invalid range header matcher specifier specified: end " + "cannot be smaller than start."); +} + +// Tests that LDS client should choose the default route (with no matching +// specified) after unable to find a match with previous routes. +TEST_P(LdsRdsTest, XdsRoutingPathMatching) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const size_t kNumEcho1Rpcs = 10; + const size_t kNumEcho2Rpcs = 20; + const size_t kNumEchoRpcs = 30; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(2, 3)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(3, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + // Populating Route Configurations for LDS. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1"); + route1->mutable_route()->set_cluster(kNewCluster1Name); + auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); + route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2"); + route2->mutable_route()->set_cluster(kNewCluster2Name); + auto* route3 = new_route_config.mutable_virtual_hosts(0)->add_routes(); + route3->mutable_match()->set_path("/grpc.testing.EchoTest3Service/Echo3"); + route3->mutable_route()->set_cluster(kDefaultClusterName); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(0, 2); + CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions() + .set_rpc_service(SERVICE_ECHO1) + .set_rpc_method(METHOD_ECHO1) + .set_wait_for_ready(true)); + CheckRpcSendOk(kNumEcho2Rpcs, RpcOptions() + .set_rpc_service(SERVICE_ECHO2) + .set_rpc_method(METHOD_ECHO2) + .set_wait_for_ready(true)); + // Make sure RPCs all go to the correct backend. + for (size_t i = 0; i < 2; ++i) { + EXPECT_EQ(kNumEchoRpcs / 2, + backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); + } + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service2()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count()); +} + +TEST_P(LdsRdsTest, XdsRoutingPrefixMatching) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const size_t kNumEcho1Rpcs = 10; + const size_t kNumEcho2Rpcs = 20; + const size_t kNumEchoRpcs = 30; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(2, 3)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(3, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + // Populating Route Configurations for LDS. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + route1->mutable_route()->set_cluster(kNewCluster1Name); + auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); + route2->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/"); + route2->mutable_route()->set_cluster(kNewCluster2Name); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(0, 2); + CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); + CheckRpcSendOk( + kNumEcho1Rpcs, + RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true)); + CheckRpcSendOk( + kNumEcho2Rpcs, + RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true)); + // Make sure RPCs all go to the correct backend. + for (size_t i = 0; i < 2; ++i) { + EXPECT_EQ(kNumEchoRpcs / 2, + backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); + } + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service2()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count()); +} + +TEST_P(LdsRdsTest, XdsRoutingPathRegexMatching) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const size_t kNumEcho1Rpcs = 10; + const size_t kNumEcho2Rpcs = 20; + const size_t kNumEchoRpcs = 30; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(2, 3)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(3, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + // Populating Route Configurations for LDS. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + // Will match "/grpc.testing.EchoTest1Service/" + route1->mutable_match()->mutable_safe_regex()->set_regex(".*1.*"); + route1->mutable_route()->set_cluster(kNewCluster1Name); + auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); + // Will match "/grpc.testing.EchoTest2Service/" + route2->mutable_match()->mutable_safe_regex()->set_regex(".*2.*"); + route2->mutable_route()->set_cluster(kNewCluster2Name); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(0, 2); + CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); + CheckRpcSendOk( + kNumEcho1Rpcs, + RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true)); + CheckRpcSendOk( + kNumEcho2Rpcs, + RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true)); + // Make sure RPCs all go to the correct backend. + for (size_t i = 0; i < 2; ++i) { + EXPECT_EQ(kNumEchoRpcs / 2, + backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); + } + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service2()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count()); +} + +TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const size_t kNumEcho1Rpcs = 1000; + const size_t kNumEchoRpcs = 10; + const size_t kWeight75 = 75; + const size_t kWeight25 = 25; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 3)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + // Populating Route Configurations for LDS. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* weighted_cluster1 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster1->set_name(kNewCluster1Name); + weighted_cluster1->mutable_weight()->set_value(kWeight75); + auto* weighted_cluster2 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster2->set_name(kNewCluster2Name); + weighted_cluster2->mutable_weight()->set_value(kWeight25); + route1->mutable_route() + ->mutable_weighted_clusters() + ->mutable_total_weight() + ->set_value(kWeight75 + kWeight25); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(0, 1); + WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + CheckRpcSendOk(kNumEchoRpcs); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + const int weight_75_request_count = + backends_[1]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + const int weight_25_request_count = + backends_[2]->backend_service1()->request_count(); + const double kErrorTolerance = 0.2; + EXPECT_THAT(weight_75_request_count, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 * + (1 + kErrorTolerance)))); + // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the + // test from flaking while debugging potential root cause. + const double kErrorToleranceSmallLoad = 0.3; + gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", + weight_75_request_count, weight_25_request_count); + EXPECT_THAT(weight_25_request_count, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 * + (1 - kErrorToleranceSmallLoad)), + ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 * + (1 + kErrorToleranceSmallLoad)))); +} + +TEST_P(LdsRdsTest, RouteActionWeightedTargetDefaultRoute) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const size_t kNumEchoRpcs = 1000; + const size_t kWeight75 = 75; + const size_t kWeight25 = 25; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 3)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + // Populating Route Configurations for LDS. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix(""); + auto* weighted_cluster1 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster1->set_name(kNewCluster1Name); + weighted_cluster1->mutable_weight()->set_value(kWeight75); + auto* weighted_cluster2 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster2->set_name(kNewCluster2Name); + weighted_cluster2->mutable_weight()->set_value(kWeight25); + route1->mutable_route() + ->mutable_weighted_clusters() + ->mutable_total_weight() + ->set_value(kWeight75 + kWeight25); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(1, 3); + CheckRpcSendOk(kNumEchoRpcs); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(0, backends_[0]->backend_service()->request_count()); + const int weight_75_request_count = + backends_[1]->backend_service()->request_count(); + const int weight_25_request_count = + backends_[2]->backend_service()->request_count(); + const double kErrorTolerance = 0.2; + EXPECT_THAT(weight_75_request_count, + ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight75 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEchoRpcs * kWeight75 / 100 * + (1 + kErrorTolerance)))); + // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the + // test from flaking while debugging potential root cause. + const double kErrorToleranceSmallLoad = 0.3; + gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", + weight_75_request_count, weight_25_request_count); + EXPECT_THAT(weight_25_request_count, + ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight25 / 100 * + (1 - kErrorToleranceSmallLoad)), + ::testing::Le(kNumEchoRpcs * kWeight25 / 100 * + (1 + kErrorToleranceSmallLoad)))); +} + +TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const char* kNewCluster3Name = "new_cluster_3"; + const char* kNewEdsService3Name = "new_eds_service_name_3"; + const size_t kNumEcho1Rpcs = 1000; + const size_t kNumEchoRpcs = 10; + const size_t kWeight75 = 75; + const size_t kWeight25 = 25; + const size_t kWeight50 = 50; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 3)}, + }); + AdsServiceImpl::EdsResourceArgs args3({ + {"locality0", GetBackendPorts(3, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster(); + new_cluster3.set_name(kNewCluster3Name); + new_cluster3.mutable_eds_cluster_config()->set_service_name( + kNewEdsService3Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster3); + // Populating Route Configurations. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* weighted_cluster1 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster1->set_name(kNewCluster1Name); + weighted_cluster1->mutable_weight()->set_value(kWeight75); + auto* weighted_cluster2 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster2->set_name(kNewCluster2Name); + weighted_cluster2->mutable_weight()->set_value(kWeight25); + route1->mutable_route() + ->mutable_weighted_clusters() + ->mutable_total_weight() + ->set_value(kWeight75 + kWeight25); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(0, 1); + WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + CheckRpcSendOk(kNumEchoRpcs); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + const int weight_75_request_count = + backends_[1]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[1]->backend_service2()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + const int weight_25_request_count = + backends_[2]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + const double kErrorTolerance = 0.2; + EXPECT_THAT(weight_75_request_count, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 * + (1 + kErrorTolerance)))); + // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the + // test from flaking while debugging potential root cause. + const double kErrorToleranceSmallLoad = 0.3; + gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", + weight_75_request_count, weight_25_request_count); + EXPECT_THAT(weight_25_request_count, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 * + (1 - kErrorToleranceSmallLoad)), + ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 * + (1 + kErrorToleranceSmallLoad)))); + // Change Route Configurations: same clusters different weights. + weighted_cluster1->mutable_weight()->set_value(kWeight50); + weighted_cluster2->mutable_weight()->set_value(kWeight50); + // Change default route to a new cluster to help to identify when new polices + // are seen by the client. + default_route->mutable_route()->set_cluster(kNewCluster3Name); + SetRouteConfiguration(0, new_route_config); + ResetBackendCounters(); + WaitForAllBackends(3, 4); + CheckRpcSendOk(kNumEchoRpcs); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(0, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + const int weight_50_request_count_1 = + backends_[1]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + const int weight_50_request_count_2 = + backends_[2]->backend_service1()->request_count(); + EXPECT_EQ(kNumEchoRpcs, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + EXPECT_THAT(weight_50_request_count_1, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 * + (1 + kErrorTolerance)))); + EXPECT_THAT(weight_50_request_count_2, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 * + (1 + kErrorTolerance)))); +} + +TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const char* kNewCluster3Name = "new_cluster_3"; + const char* kNewEdsService3Name = "new_eds_service_name_3"; + const size_t kNumEcho1Rpcs = 1000; + const size_t kNumEchoRpcs = 10; + const size_t kWeight75 = 75; + const size_t kWeight25 = 25; + const size_t kWeight50 = 50; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 3)}, + }); + AdsServiceImpl::EdsResourceArgs args3({ + {"locality0", GetBackendPorts(3, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster(); + new_cluster3.set_name(kNewCluster3Name); + new_cluster3.mutable_eds_cluster_config()->set_service_name( + kNewEdsService3Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster3); + // Populating Route Configurations. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* weighted_cluster1 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster1->set_name(kNewCluster1Name); + weighted_cluster1->mutable_weight()->set_value(kWeight75); + auto* weighted_cluster2 = + route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); + weighted_cluster2->set_name(kDefaultClusterName); + weighted_cluster2->mutable_weight()->set_value(kWeight25); + route1->mutable_route() + ->mutable_weighted_clusters() + ->mutable_total_weight() + ->set_value(kWeight75 + kWeight25); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(0, 1); + WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + CheckRpcSendOk(kNumEchoRpcs); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + int weight_25_request_count = + backends_[0]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + int weight_75_request_count = + backends_[1]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + const double kErrorTolerance = 0.2; + EXPECT_THAT(weight_75_request_count, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 * + (1 + kErrorTolerance)))); + // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the + // test from flaking while debugging potential root cause. + const double kErrorToleranceSmallLoad = 0.3; + gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", + weight_75_request_count, weight_25_request_count); + EXPECT_THAT(weight_25_request_count, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 * + (1 - kErrorToleranceSmallLoad)), + ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 * + (1 + kErrorToleranceSmallLoad)))); + // Change Route Configurations: new set of clusters with different weights. + weighted_cluster1->mutable_weight()->set_value(kWeight50); + weighted_cluster2->set_name(kNewCluster2Name); + weighted_cluster2->mutable_weight()->set_value(kWeight50); + SetRouteConfiguration(0, new_route_config); + ResetBackendCounters(); + WaitForAllBackends(2, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + CheckRpcSendOk(kNumEchoRpcs); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + const int weight_50_request_count_1 = + backends_[1]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + const int weight_50_request_count_2 = + backends_[2]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + EXPECT_THAT(weight_50_request_count_1, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 * + (1 + kErrorTolerance)))); + EXPECT_THAT(weight_50_request_count_2, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 * + (1 + kErrorTolerance)))); + // Change Route Configurations. + weighted_cluster1->mutable_weight()->set_value(kWeight75); + weighted_cluster2->set_name(kNewCluster3Name); + weighted_cluster2->mutable_weight()->set_value(kWeight25); + SetRouteConfiguration(0, new_route_config); + ResetBackendCounters(); + WaitForAllBackends(3, 4, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + CheckRpcSendOk(kNumEchoRpcs); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + weight_75_request_count = backends_[1]->backend_service1()->request_count(); + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + weight_25_request_count = backends_[3]->backend_service1()->request_count(); + EXPECT_THAT(weight_75_request_count, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 * + (1 - kErrorTolerance)), + ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 * + (1 + kErrorTolerance)))); + // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the + // test from flaking while debugging potential root cause. + gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", + weight_75_request_count, weight_25_request_count); + EXPECT_THAT(weight_25_request_count, + ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 * + (1 - kErrorToleranceSmallLoad)), + ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 * + (1 + kErrorToleranceSmallLoad)))); +} + +TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClusters) { + const char* kNewClusterName = "new_cluster"; + const char* kNewEdsServiceName = "new_eds_service_name"; + const size_t kNumEchoRpcs = 5; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); + // Populate new CDS resources. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Send Route Configuration. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(0, 1); + CheckRpcSendOk(kNumEchoRpcs); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + // Change Route Configurations: new default cluster. + auto* default_route = + new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + default_route->mutable_route()->set_cluster(kNewClusterName); + SetRouteConfiguration(0, new_route_config); + WaitForAllBackends(1, 2); + CheckRpcSendOk(kNumEchoRpcs); + // Make sure RPCs all go to the correct backend. + EXPECT_EQ(kNumEchoRpcs, backends_[1]->backend_service()->request_count()); +} + +TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClustersWithPickingDelays) { + const char* kNewClusterName = "new_cluster"; + const char* kNewEdsServiceName = "new_eds_service_name"; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); + // Populate new CDS resources. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Bring down the current backend: 0, this will delay route picking time, + // resulting in un-committed RPCs. + ShutdownBackend(0); + // Send a RouteConfiguration with a default route that points to + // backend 0. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + SetRouteConfiguration(0, new_route_config); + // Send exactly one RPC with no deadline and with wait_for_ready=true. + // This RPC will not complete until after backend 0 is started. + std::thread sending_rpc([this]() { + CheckRpcSendOk(1, RpcOptions().set_wait_for_ready(true).set_timeout_ms(0)); + }); + // Send a non-wait_for_ready RPC which should fail, this will tell us + // that the client has received the update and attempted to connect. + const Status status = SendRpc(RpcOptions().set_timeout_ms(0)); + EXPECT_FALSE(status.ok()); + // Send a update RouteConfiguration to use backend 1. + auto* default_route = + new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + default_route->mutable_route()->set_cluster(kNewClusterName); + SetRouteConfiguration(0, new_route_config); + // Wait for RPCs to go to the new backend: 1, this ensures that the client has + // processed the update. + WaitForAllBackends(1, 2, false, RpcOptions(), true); + // Bring up the previous backend: 0, this will allow the delayed RPC to + // finally call on_call_committed upon completion. + StartBackend(0); + sending_rpc.join(); + // Make sure RPCs go to the correct backend: + EXPECT_EQ(1, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(1, backends_[1]->backend_service()->request_count()); +} + +TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) { + const char* kNewClusterName = "new_cluster"; + const char* kNewEdsServiceName = "new_eds_service_name"; + const size_t kNumEcho1Rpcs = 100; + const size_t kNumEchoRpcs = 5; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); + // Populate new CDS resources. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Populating Route Configurations for LDS. + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* header_matcher1 = route1->mutable_match()->add_headers(); + header_matcher1->set_name("header1"); + header_matcher1->set_exact_match("POST,PUT,GET"); + auto* header_matcher2 = route1->mutable_match()->add_headers(); + header_matcher2->set_name("header2"); + header_matcher2->mutable_safe_regex_match()->set_regex("[a-z]*"); + auto* header_matcher3 = route1->mutable_match()->add_headers(); + header_matcher3->set_name("header3"); + header_matcher3->mutable_range_match()->set_start(1); + header_matcher3->mutable_range_match()->set_end(1000); + auto* header_matcher4 = route1->mutable_match()->add_headers(); + header_matcher4->set_name("header4"); + header_matcher4->set_present_match(false); + auto* header_matcher5 = route1->mutable_match()->add_headers(); + header_matcher5->set_name("header5"); + header_matcher5->set_prefix_match("/grpc"); + auto* header_matcher6 = route1->mutable_match()->add_headers(); + header_matcher6->set_name("header6"); + header_matcher6->set_suffix_match(".cc"); + header_matcher6->set_invert_match(true); + route1->mutable_route()->set_cluster(kNewClusterName); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + std::vector<std::pair<TString, TString>> metadata = { + {"header1", "POST"}, {"header2", "blah"}, + {"header3", "1"}, {"header5", "/grpc.testing.EchoTest1Service/"}, + {"header1", "PUT"}, {"header6", "grpc.java"}, + {"header1", "GET"}, + }; + const auto header_match_rpc_options = RpcOptions() + .set_rpc_service(SERVICE_ECHO1) + .set_rpc_method(METHOD_ECHO1) + .set_metadata(std::move(metadata)); + // Make sure all backends are up. + WaitForAllBackends(0, 1); + WaitForAllBackends(1, 2, true, header_match_rpc_options); + // Send RPCs. + CheckRpcSendOk(kNumEchoRpcs); + CheckRpcSendOk(kNumEcho1Rpcs, header_match_rpc_options); + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service2()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + EXPECT_EQ(kNumEcho1Rpcs, backends_[1]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service2()->request_count()); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); +} + +TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialHeaderContentType) { + const char* kNewClusterName = "new_cluster"; + const char* kNewEdsServiceName = "new_eds_service_name"; + const size_t kNumEchoRpcs = 100; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); + // Populate new CDS resources. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Populating Route Configurations for LDS. + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix(""); + auto* header_matcher1 = route1->mutable_match()->add_headers(); + header_matcher1->set_name("content-type"); + header_matcher1->set_exact_match("notapplication/grpc"); + route1->mutable_route()->set_cluster(kNewClusterName); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + auto* header_matcher2 = default_route->mutable_match()->add_headers(); + header_matcher2->set_name("content-type"); + header_matcher2->set_exact_match("application/grpc"); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + // Make sure the backend is up. + WaitForAllBackends(0, 1); + // Send RPCs. + CheckRpcSendOk(kNumEchoRpcs); + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); +} + +TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const size_t kNumEchoRpcs = 100; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 3)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + // Populating Route Configurations for LDS. + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix(""); + auto* header_matcher1 = route1->mutable_match()->add_headers(); + header_matcher1->set_name("grpc-foo-bin"); + header_matcher1->set_present_match(true); + route1->mutable_route()->set_cluster(kNewCluster1Name); + auto route2 = route_config.mutable_virtual_hosts(0)->add_routes(); + route2->mutable_match()->set_prefix(""); + auto* header_matcher2 = route2->mutable_match()->add_headers(); + header_matcher2->set_name("grpc-previous-rpc-attempts"); + header_matcher2->set_present_match(true); + route2->mutable_route()->set_cluster(kNewCluster2Name); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + // Send headers which will mismatch each route + std::vector<std::pair<TString, TString>> metadata = { + {"grpc-foo-bin", "grpc-foo-bin"}, + {"grpc-previous-rpc-attempts", "grpc-previous-rpc-attempts"}, + }; + WaitForAllBackends(0, 1); + CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata)); + // Verify that only the default backend got RPCs since all previous routes + // were mismatched. + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); +} + +TEST_P(LdsRdsTest, XdsRoutingRuntimeFractionMatching) { + const char* kNewClusterName = "new_cluster"; + const char* kNewEdsServiceName = "new_eds_service_name"; + const size_t kNumRpcs = 1000; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); + // Populate new CDS resources. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Populating Route Configurations for LDS. + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match() + ->mutable_runtime_fraction() + ->mutable_default_value() + ->set_numerator(25); + route1->mutable_route()->set_cluster(kNewClusterName); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + WaitForAllBackends(0, 2); + CheckRpcSendOk(kNumRpcs); + const int default_backend_count = + backends_[0]->backend_service()->request_count(); + const int matched_backend_count = + backends_[1]->backend_service()->request_count(); + const double kErrorTolerance = 0.2; + EXPECT_THAT(default_backend_count, + ::testing::AllOf( + ::testing::Ge(kNumRpcs * 75 / 100 * (1 - kErrorTolerance)), + ::testing::Le(kNumRpcs * 75 / 100 * (1 + kErrorTolerance)))); + EXPECT_THAT(matched_backend_count, + ::testing::AllOf( + ::testing::Ge(kNumRpcs * 25 / 100 * (1 - kErrorTolerance)), + ::testing::Le(kNumRpcs * 25 / 100 * (1 + kErrorTolerance)))); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); +} + +TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingUnmatchCases) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewEdsService1Name = "new_eds_service_name_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const char* kNewEdsService2Name = "new_eds_service_name_2"; + const char* kNewCluster3Name = "new_cluster_3"; + const char* kNewEdsService3Name = "new_eds_service_name_3"; + const size_t kNumEcho1Rpcs = 100; + const size_t kNumEchoRpcs = 5; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 3)}, + }); + AdsServiceImpl::EdsResourceArgs args3({ + {"locality0", GetBackendPorts(3, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name)); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + new_cluster1.mutable_eds_cluster_config()->set_service_name( + kNewEdsService1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + new_cluster2.mutable_eds_cluster_config()->set_service_name( + kNewEdsService2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2); + Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster(); + new_cluster3.set_name(kNewCluster3Name); + new_cluster3.mutable_eds_cluster_config()->set_service_name( + kNewEdsService3Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster3); + // Populating Route Configurations for LDS. + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* header_matcher1 = route1->mutable_match()->add_headers(); + header_matcher1->set_name("header1"); + header_matcher1->set_exact_match("POST"); + route1->mutable_route()->set_cluster(kNewCluster1Name); + auto route2 = route_config.mutable_virtual_hosts(0)->add_routes(); + route2->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* header_matcher2 = route2->mutable_match()->add_headers(); + header_matcher2->set_name("header2"); + header_matcher2->mutable_range_match()->set_start(1); + header_matcher2->mutable_range_match()->set_end(1000); + route2->mutable_route()->set_cluster(kNewCluster2Name); + auto route3 = route_config.mutable_virtual_hosts(0)->add_routes(); + route3->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + auto* header_matcher3 = route3->mutable_match()->add_headers(); + header_matcher3->set_name("header3"); + header_matcher3->mutable_safe_regex_match()->set_regex("[a-z]*"); + route3->mutable_route()->set_cluster(kNewCluster3Name); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + // Send headers which will mismatch each route + std::vector<std::pair<TString, TString>> metadata = { + {"header1", "POST"}, + {"header2", "1000"}, + {"header3", "123"}, + {"header1", "GET"}, + }; + WaitForAllBackends(0, 1); + CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata)); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions() + .set_rpc_service(SERVICE_ECHO1) + .set_rpc_method(METHOD_ECHO1) + .set_metadata(metadata)); + // Verify that only the default backend got RPCs since all previous routes + // were mismatched. + for (size_t i = 1; i < 4; ++i) { + EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); + } + EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(kNumEcho1Rpcs, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service2()->request_count()); + const auto& response_state = RouteConfigurationResponseState(0); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); +} + +TEST_P(LdsRdsTest, XdsRoutingChangeRoutesWithoutChangingClusters) { + const char* kNewClusterName = "new_cluster"; + const char* kNewEdsServiceName = "new_eds_service_name"; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(1, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); + // Populate new CDS resources. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + new_cluster.mutable_eds_cluster_config()->set_service_name( + kNewEdsServiceName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster); + // Populating Route Configurations for LDS. + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); + route1->mutable_route()->set_cluster(kNewClusterName); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultClusterName); + SetRouteConfiguration(0, route_config); + // Make sure all backends are up and that requests for each RPC + // service go to the right backends. + WaitForAllBackends(0, 1, false); + WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO2)); + // Requests for services Echo and Echo2 should have gone to backend 0. + EXPECT_EQ(1, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(1, backends_[0]->backend_service2()->request_count()); + // Requests for service Echo1 should have gone to backend 1. + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + EXPECT_EQ(1, backends_[1]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service2()->request_count()); + // Now send an update that changes the first route to match a + // different RPC service, and wait for the client to make the change. + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/"); + SetRouteConfiguration(0, route_config); + WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO2)); + // Now repeat the earlier test, making sure all traffic goes to the + // right place. + WaitForAllBackends(0, 1, false); + WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO1)); + WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO2)); + // Requests for services Echo and Echo1 should have gone to backend 0. + EXPECT_EQ(1, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(1, backends_[0]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[0]->backend_service2()->request_count()); + // Requests for service Echo2 should have gone to backend 1. + EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[1]->backend_service1()->request_count()); + EXPECT_EQ(1, backends_[1]->backend_service2()->request_count()); +} + +using CdsTest = BasicTest; + +// Tests that CDS client should send an ACK upon correct CDS response. +TEST_P(CdsTest, Vanilla) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + (void)SendRpc(); + EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state, + AdsServiceImpl::ResponseState::ACKED); +} + +// Tests that CDS client should send a NACK if the cluster type in CDS response +// is other than EDS. +TEST_P(CdsTest, WrongClusterType) { + auto cluster = balancers_[0]->ads_service()->default_cluster(); + cluster.set_type(Cluster::STATIC); + balancers_[0]->ads_service()->SetCdsResource(cluster); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->cds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "DiscoveryType is not EDS."); +} + +// Tests that CDS client should send a NACK if the eds_config in CDS response is +// other than ADS. +TEST_P(CdsTest, WrongEdsConfig) { + auto cluster = balancers_[0]->ads_service()->default_cluster(); + cluster.mutable_eds_cluster_config()->mutable_eds_config()->mutable_self(); + balancers_[0]->ads_service()->SetCdsResource(cluster); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->cds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "EDS ConfigSource is not ADS."); +} + +// Tests that CDS client should send a NACK if the lb_policy in CDS response is +// other than ROUND_ROBIN. +TEST_P(CdsTest, WrongLbPolicy) { + auto cluster = balancers_[0]->ads_service()->default_cluster(); + cluster.set_lb_policy(Cluster::LEAST_REQUEST); + balancers_[0]->ads_service()->SetCdsResource(cluster); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->cds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "LB policy is not ROUND_ROBIN."); +} + +// Tests that CDS client should send a NACK if the lrs_server in CDS response is +// other than SELF. +TEST_P(CdsTest, WrongLrsServer) { + auto cluster = balancers_[0]->ads_service()->default_cluster(); + cluster.mutable_lrs_server()->mutable_ads(); + balancers_[0]->ads_service()->SetCdsResource(cluster); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->cds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, "LRS ConfigSource is not self."); +} + +using EdsTest = BasicTest; + +// Tests that EDS client should send a NACK if the EDS update contains +// sparse priorities. +TEST_P(EdsTest, NacksSparsePriorityList) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(), kDefaultLocalityWeight, 1}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args)); + CheckRpcSendFailure(); + const auto& response_state = + balancers_[0]->ads_service()->eds_response_state(); + EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); + EXPECT_EQ(response_state.error_message, + "EDS update includes sparse priority list"); +} + +// In most of our tests, we use different names for different resource +// types, to make sure that there are no cut-and-paste errors in the code +// that cause us to look at data for the wrong resource type. So we add +// this test to make sure that the EDS resource name defaults to the +// cluster name if not specified in the CDS resource. +TEST_P(EdsTest, EdsServiceNameDefaultsToClusterName) { + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, kDefaultClusterName)); + Cluster cluster = balancers_[0]->ads_service()->default_cluster(); + cluster.mutable_eds_cluster_config()->clear_service_name(); + balancers_[0]->ads_service()->SetCdsResource(cluster); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendOk(); +} + +class TimeoutTest : public BasicTest { + protected: + void SetUp() override { + xds_resource_does_not_exist_timeout_ms_ = 500; + BasicTest::SetUp(); + } +}; + +// Tests that LDS client times out when no response received. +TEST_P(TimeoutTest, Lds) { + balancers_[0]->ads_service()->SetResourceIgnore(kLdsTypeUrl); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); +} + +TEST_P(TimeoutTest, Rds) { + balancers_[0]->ads_service()->SetResourceIgnore(kRdsTypeUrl); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); +} + +// Tests that CDS client times out when no response received. +TEST_P(TimeoutTest, Cds) { + balancers_[0]->ads_service()->SetResourceIgnore(kCdsTypeUrl); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); +} + +TEST_P(TimeoutTest, Eds) { + balancers_[0]->ads_service()->SetResourceIgnore(kEdsTypeUrl); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); } -using LdsTest = BasicTest; - -// Tests that LDS client should send a NACK if there is no API listener in the -// Listener in the LDS response. -TEST_P(LdsTest, NoApiListener) { - auto listener = balancers_[0]->ads_service()->default_listener(); - listener.clear_api_listener(); - balancers_[0]->ads_service()->SetLdsResource(listener); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->lds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "Listener has no ApiListener."); -} - -// Tests that LDS client should send a NACK if the route_specifier in the -// http_connection_manager is neither inlined route_config nor RDS. -TEST_P(LdsTest, WrongRouteSpecifier) { - auto listener = balancers_[0]->ads_service()->default_listener(); - HttpConnectionManager http_connection_manager; - http_connection_manager.mutable_scoped_routes(); - listener.mutable_api_listener()->mutable_api_listener()->PackFrom( - http_connection_manager); - balancers_[0]->ads_service()->SetLdsResource(listener); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->lds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "HttpConnectionManager neither has inlined route_config nor RDS."); -} - -// Tests that LDS client should send a NACK if the rds message in the -// http_connection_manager is missing the config_source field. -TEST_P(LdsTest, RdsMissingConfigSource) { - auto listener = balancers_[0]->ads_service()->default_listener(); - HttpConnectionManager http_connection_manager; - http_connection_manager.mutable_rds()->set_route_config_name( - kDefaultRouteConfigurationName); - listener.mutable_api_listener()->mutable_api_listener()->PackFrom( - http_connection_manager); - balancers_[0]->ads_service()->SetLdsResource(listener); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->lds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "HttpConnectionManager missing config_source for RDS."); -} - -// Tests that LDS client should send a NACK if the rds message in the -// http_connection_manager has a config_source field that does not specify ADS. -TEST_P(LdsTest, RdsConfigSourceDoesNotSpecifyAds) { - auto listener = balancers_[0]->ads_service()->default_listener(); - HttpConnectionManager http_connection_manager; - auto* rds = http_connection_manager.mutable_rds(); - rds->set_route_config_name(kDefaultRouteConfigurationName); - rds->mutable_config_source()->mutable_self(); - listener.mutable_api_listener()->mutable_api_listener()->PackFrom( - http_connection_manager); - balancers_[0]->ads_service()->SetLdsResource(listener); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->lds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "HttpConnectionManager ConfigSource for RDS does not specify ADS."); -} - -using LdsRdsTest = BasicTest; - -// Tests that LDS client should send an ACK upon correct LDS response (with -// inlined RDS result). -TEST_P(LdsRdsTest, Vanilla) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - (void)SendRpc(); - EXPECT_EQ(RouteConfigurationResponseState(0).state, - AdsServiceImpl::ResponseState::ACKED); - // Make sure we actually used the RPC service for the right version of xDS. - EXPECT_EQ(balancers_[0]->ads_service()->seen_v2_client(), - GetParam().use_v2()); - EXPECT_NE(balancers_[0]->ads_service()->seen_v3_client(), - GetParam().use_v2()); -} - -// Tests that we go into TRANSIENT_FAILURE if the Listener is removed. -TEST_P(LdsRdsTest, ListenerRemoved) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - // We need to wait for all backends to come online. - WaitForAllBackends(); - // Unset LDS resource. - balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl, kServerName); - // Wait for RPCs to start failing. - do { - } while (SendRpc(RpcOptions(), nullptr).ok()); - // Make sure RPCs are still failing. - CheckRpcSendFailure(1000); - // Make sure we ACK'ed the update. - EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state, - AdsServiceImpl::ResponseState::ACKED); -} - -// Tests that LDS client ACKs but fails if matching domain can't be found in -// the LDS response. -TEST_P(LdsRdsTest, NoMatchedDomain) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - route_config.mutable_virtual_hosts(0)->clear_domains(); - route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - // Do a bit of polling, to allow the ACK to get to the ADS server. - channel_->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100)); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); -} - -// Tests that LDS client should choose the virtual host with matching domain if -// multiple virtual hosts exist in the LDS response. -TEST_P(LdsRdsTest, ChooseMatchedDomain) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - *(route_config.add_virtual_hosts()) = route_config.virtual_hosts(0); - route_config.mutable_virtual_hosts(0)->clear_domains(); - route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - (void)SendRpc(); - EXPECT_EQ(RouteConfigurationResponseState(0).state, - AdsServiceImpl::ResponseState::ACKED); -} - -// Tests that LDS client should choose the last route in the virtual host if -// multiple routes exist in the LDS response. -TEST_P(LdsRdsTest, ChooseLastRoute) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - *(route_config.mutable_virtual_hosts(0)->add_routes()) = - route_config.virtual_hosts(0).routes(0); - route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_route() - ->mutable_cluster_header(); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - (void)SendRpc(); - EXPECT_EQ(RouteConfigurationResponseState(0).state, - AdsServiceImpl::ResponseState::ACKED); -} - -// Tests that LDS client should send a NACK if route match has a case_sensitive -// set to false. -TEST_P(LdsRdsTest, RouteMatchHasCaseSensitiveFalse) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->mutable_case_sensitive()->set_value(false); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "case_sensitive if set must be set to true."); -} - -// Tests that LDS client should ignore route which has query_parameters. -TEST_P(LdsRdsTest, RouteMatchHasQueryParameters) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - route1->mutable_match()->add_query_parameters(); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should send a ACK if route match has a prefix -// that is either empty or a single slash -TEST_P(LdsRdsTest, RouteMatchHasValidPrefixEmptyOrSingleSlash) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix(""); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix("/"); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - (void)SendRpc(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); -} - -// Tests that LDS client should ignore route which has a path -// prefix string does not start with "/". -TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixNoLeadingSlash) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service/"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should ignore route which has a prefix -// string with more than 2 slashes. -TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixExtraContent) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/Echo1/"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should ignore route which has a prefix -// string "//". -TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixDoubleSlash) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("//"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should ignore route which has path -// but it's empty. -TEST_P(LdsRdsTest, RouteMatchHasInvalidPathEmptyPath) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_path(""); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should ignore route which has path -// string does not start with "/". -TEST_P(LdsRdsTest, RouteMatchHasInvalidPathNoLeadingSlash) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_path("grpc.testing.EchoTest1Service/Echo1"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should ignore route which has path -// string that has too many slashes; for example, ends with "/". -TEST_P(LdsRdsTest, RouteMatchHasInvalidPathTooManySlashes) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1/"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should ignore route which has path -// string that has only 1 slash: missing "/" between service and method. -TEST_P(LdsRdsTest, RouteMatchHasInvalidPathOnlyOneSlash) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service.Echo1"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should ignore route which has path -// string that is missing service. -TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingService) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_path("//Echo1"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Tests that LDS client should ignore route which has path -// string that is missing method. -TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingMethod) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/"); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No valid routes specified."); -} - -// Test that LDS client should reject route which has invalid path regex. -TEST_P(LdsRdsTest, RouteMatchHasInvalidPathRegex) { - const char* kNewCluster1Name = "new_cluster_1"; - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->mutable_safe_regex()->set_regex("a[z-a]"); - route1->mutable_route()->set_cluster(kNewCluster1Name); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "Invalid regex string specified in path matcher."); -} - -// Tests that LDS client should send a NACK if route has an action other than -// RouteAction in the LDS response. -TEST_P(LdsRdsTest, RouteHasNoRouteAction) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - route_config.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_redirect(); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "No RouteAction found in route."); -} - -TEST_P(LdsRdsTest, RouteActionClusterHasEmptyClusterName) { - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - route1->mutable_route()->set_cluster(""); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "RouteAction cluster contains empty cluster name."); -} - -TEST_P(LdsRdsTest, RouteActionWeightedTargetHasIncorrectTotalWeightSet) { - const size_t kWeight75 = 75; - const char* kNewCluster1Name = "new_cluster_1"; - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* weighted_cluster1 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster1->set_name(kNewCluster1Name); - weighted_cluster1->mutable_weight()->set_value(kWeight75); - route1->mutable_route() - ->mutable_weighted_clusters() - ->mutable_total_weight() - ->set_value(kWeight75 + 1); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "RouteAction weighted_cluster has incorrect total weight"); -} - -TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasEmptyClusterName) { - const size_t kWeight75 = 75; - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* weighted_cluster1 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster1->set_name(""); - weighted_cluster1->mutable_weight()->set_value(kWeight75); - route1->mutable_route() - ->mutable_weighted_clusters() - ->mutable_total_weight() - ->set_value(kWeight75); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ( - response_state.error_message, - "RouteAction weighted_cluster cluster contains empty cluster name."); -} - -TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasNoWeight) { - const size_t kWeight75 = 75; - const char* kNewCluster1Name = "new_cluster_1"; - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* weighted_cluster1 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster1->set_name(kNewCluster1Name); - route1->mutable_route() - ->mutable_weighted_clusters() - ->mutable_total_weight() - ->set_value(kWeight75); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "RouteAction weighted_cluster cluster missing weight"); -} - -TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRegex) { - const char* kNewCluster1Name = "new_cluster_1"; - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* header_matcher1 = route1->mutable_match()->add_headers(); - header_matcher1->set_name("header1"); - header_matcher1->mutable_safe_regex_match()->set_regex("a[z-a]"); - route1->mutable_route()->set_cluster(kNewCluster1Name); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "Invalid regex string specified in header matcher."); -} - -TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRange) { - const char* kNewCluster1Name = "new_cluster_1"; - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* header_matcher1 = route1->mutable_match()->add_headers(); - header_matcher1->set_name("header1"); - header_matcher1->mutable_range_match()->set_start(1001); - header_matcher1->mutable_range_match()->set_end(1000); - route1->mutable_route()->set_cluster(kNewCluster1Name); - SetRouteConfiguration(0, route_config); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "Invalid range header matcher specifier specified: end " - "cannot be smaller than start."); -} - -// Tests that LDS client should choose the default route (with no matching -// specified) after unable to find a match with previous routes. -TEST_P(LdsRdsTest, XdsRoutingPathMatching) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const size_t kNumEcho1Rpcs = 10; - const size_t kNumEcho2Rpcs = 20; - const size_t kNumEchoRpcs = 30; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(2, 3)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(3, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - // Populating Route Configurations for LDS. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1"); - route1->mutable_route()->set_cluster(kNewCluster1Name); - auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); - route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2"); - route2->mutable_route()->set_cluster(kNewCluster2Name); - auto* route3 = new_route_config.mutable_virtual_hosts(0)->add_routes(); - route3->mutable_match()->set_path("/grpc.testing.EchoTest3Service/Echo3"); - route3->mutable_route()->set_cluster(kDefaultClusterName); - auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions() - .set_rpc_service(SERVICE_ECHO1) - .set_rpc_method(METHOD_ECHO1) - .set_wait_for_ready(true)); - CheckRpcSendOk(kNumEcho2Rpcs, RpcOptions() - .set_rpc_service(SERVICE_ECHO2) - .set_rpc_method(METHOD_ECHO2) - .set_wait_for_ready(true)); - // Make sure RPCs all go to the correct backend. - for (size_t i = 0; i < 2; ++i) { - EXPECT_EQ(kNumEchoRpcs / 2, - backends_[i]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); - } - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[2]->backend_service2()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); - EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count()); -} - -TEST_P(LdsRdsTest, XdsRoutingPrefixMatching) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const size_t kNumEcho1Rpcs = 10; - const size_t kNumEcho2Rpcs = 20; - const size_t kNumEchoRpcs = 30; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(2, 3)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(3, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - // Populating Route Configurations for LDS. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - route1->mutable_route()->set_cluster(kNewCluster1Name); - auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); - route2->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/"); - route2->mutable_route()->set_cluster(kNewCluster2Name); - auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); - CheckRpcSendOk( - kNumEcho1Rpcs, - RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true)); - CheckRpcSendOk( - kNumEcho2Rpcs, - RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true)); - // Make sure RPCs all go to the correct backend. - for (size_t i = 0; i < 2; ++i) { - EXPECT_EQ(kNumEchoRpcs / 2, - backends_[i]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); - } - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[2]->backend_service2()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); - EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count()); -} - -TEST_P(LdsRdsTest, XdsRoutingPathRegexMatching) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const size_t kNumEcho1Rpcs = 10; - const size_t kNumEcho2Rpcs = 20; - const size_t kNumEchoRpcs = 30; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(2, 3)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(3, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - // Populating Route Configurations for LDS. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - // Will match "/grpc.testing.EchoTest1Service/" - route1->mutable_match()->mutable_safe_regex()->set_regex(".*1.*"); - route1->mutable_route()->set_cluster(kNewCluster1Name); - auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); - // Will match "/grpc.testing.EchoTest2Service/" - route2->mutable_match()->mutable_safe_regex()->set_regex(".*2.*"); - route2->mutable_route()->set_cluster(kNewCluster2Name); - auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); - CheckRpcSendOk( - kNumEcho1Rpcs, - RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true)); - CheckRpcSendOk( - kNumEcho2Rpcs, - RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true)); - // Make sure RPCs all go to the correct backend. - for (size_t i = 0; i < 2; ++i) { - EXPECT_EQ(kNumEchoRpcs / 2, - backends_[i]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); - } - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[2]->backend_service2()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); - EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count()); -} - -TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const size_t kNumEcho1Rpcs = 1000; - const size_t kNumEchoRpcs = 10; - const size_t kWeight75 = 75; - const size_t kWeight25 = 25; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 3)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - // Populating Route Configurations for LDS. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* weighted_cluster1 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster1->set_name(kNewCluster1Name); - weighted_cluster1->mutable_weight()->set_value(kWeight75); - auto* weighted_cluster2 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster2->set_name(kNewCluster2Name); - weighted_cluster2->mutable_weight()->set_value(kWeight25); - route1->mutable_route() - ->mutable_weighted_clusters() - ->mutable_total_weight() - ->set_value(kWeight75 + kWeight25); - auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(0, 1); - WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - CheckRpcSendOk(kNumEchoRpcs); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - const int weight_75_request_count = - backends_[1]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - const int weight_25_request_count = - backends_[2]->backend_service1()->request_count(); - const double kErrorTolerance = 0.2; - EXPECT_THAT(weight_75_request_count, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 * - (1 + kErrorTolerance)))); - // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the - // test from flaking while debugging potential root cause. - const double kErrorToleranceSmallLoad = 0.3; - gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", - weight_75_request_count, weight_25_request_count); - EXPECT_THAT(weight_25_request_count, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 * - (1 - kErrorToleranceSmallLoad)), - ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 * - (1 + kErrorToleranceSmallLoad)))); -} - -TEST_P(LdsRdsTest, RouteActionWeightedTargetDefaultRoute) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const size_t kNumEchoRpcs = 1000; - const size_t kWeight75 = 75; - const size_t kWeight25 = 25; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 3)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - // Populating Route Configurations for LDS. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix(""); - auto* weighted_cluster1 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster1->set_name(kNewCluster1Name); - weighted_cluster1->mutable_weight()->set_value(kWeight75); - auto* weighted_cluster2 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster2->set_name(kNewCluster2Name); - weighted_cluster2->mutable_weight()->set_value(kWeight25); - route1->mutable_route() - ->mutable_weighted_clusters() - ->mutable_total_weight() - ->set_value(kWeight75 + kWeight25); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(1, 3); - CheckRpcSendOk(kNumEchoRpcs); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(0, backends_[0]->backend_service()->request_count()); - const int weight_75_request_count = - backends_[1]->backend_service()->request_count(); - const int weight_25_request_count = - backends_[2]->backend_service()->request_count(); - const double kErrorTolerance = 0.2; - EXPECT_THAT(weight_75_request_count, - ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight75 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEchoRpcs * kWeight75 / 100 * - (1 + kErrorTolerance)))); - // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the - // test from flaking while debugging potential root cause. - const double kErrorToleranceSmallLoad = 0.3; - gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", - weight_75_request_count, weight_25_request_count); - EXPECT_THAT(weight_25_request_count, - ::testing::AllOf(::testing::Ge(kNumEchoRpcs * kWeight25 / 100 * - (1 - kErrorToleranceSmallLoad)), - ::testing::Le(kNumEchoRpcs * kWeight25 / 100 * - (1 + kErrorToleranceSmallLoad)))); -} - -TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const char* kNewCluster3Name = "new_cluster_3"; - const char* kNewEdsService3Name = "new_eds_service_name_3"; - const size_t kNumEcho1Rpcs = 1000; - const size_t kNumEchoRpcs = 10; - const size_t kWeight75 = 75; - const size_t kWeight25 = 25; - const size_t kWeight50 = 50; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 3)}, - }); - AdsServiceImpl::EdsResourceArgs args3({ - {"locality0", GetBackendPorts(3, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster(); - new_cluster3.set_name(kNewCluster3Name); - new_cluster3.mutable_eds_cluster_config()->set_service_name( - kNewEdsService3Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster3); - // Populating Route Configurations. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* weighted_cluster1 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster1->set_name(kNewCluster1Name); - weighted_cluster1->mutable_weight()->set_value(kWeight75); - auto* weighted_cluster2 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster2->set_name(kNewCluster2Name); - weighted_cluster2->mutable_weight()->set_value(kWeight25); - route1->mutable_route() - ->mutable_weighted_clusters() - ->mutable_total_weight() - ->set_value(kWeight75 + kWeight25); - auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(0, 1); - WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - CheckRpcSendOk(kNumEchoRpcs); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - const int weight_75_request_count = - backends_[1]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[1]->backend_service2()->request_count()); - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - const int weight_25_request_count = - backends_[2]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); - const double kErrorTolerance = 0.2; - EXPECT_THAT(weight_75_request_count, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 * - (1 + kErrorTolerance)))); - // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the - // test from flaking while debugging potential root cause. - const double kErrorToleranceSmallLoad = 0.3; - gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", - weight_75_request_count, weight_25_request_count); - EXPECT_THAT(weight_25_request_count, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 * - (1 - kErrorToleranceSmallLoad)), - ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 * - (1 + kErrorToleranceSmallLoad)))); - // Change Route Configurations: same clusters different weights. - weighted_cluster1->mutable_weight()->set_value(kWeight50); - weighted_cluster2->mutable_weight()->set_value(kWeight50); - // Change default route to a new cluster to help to identify when new polices - // are seen by the client. - default_route->mutable_route()->set_cluster(kNewCluster3Name); - SetRouteConfiguration(0, new_route_config); - ResetBackendCounters(); - WaitForAllBackends(3, 4); - CheckRpcSendOk(kNumEchoRpcs); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(0, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - const int weight_50_request_count_1 = - backends_[1]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - const int weight_50_request_count_2 = - backends_[2]->backend_service1()->request_count(); - EXPECT_EQ(kNumEchoRpcs, backends_[3]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); - EXPECT_THAT(weight_50_request_count_1, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 * - (1 + kErrorTolerance)))); - EXPECT_THAT(weight_50_request_count_2, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 * - (1 + kErrorTolerance)))); -} - -TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const char* kNewCluster3Name = "new_cluster_3"; - const char* kNewEdsService3Name = "new_eds_service_name_3"; - const size_t kNumEcho1Rpcs = 1000; - const size_t kNumEchoRpcs = 10; - const size_t kWeight75 = 75; - const size_t kWeight25 = 25; - const size_t kWeight50 = 50; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 3)}, - }); - AdsServiceImpl::EdsResourceArgs args3({ - {"locality0", GetBackendPorts(3, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster(); - new_cluster3.set_name(kNewCluster3Name); - new_cluster3.mutable_eds_cluster_config()->set_service_name( - kNewEdsService3Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster3); - // Populating Route Configurations. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* weighted_cluster1 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster1->set_name(kNewCluster1Name); - weighted_cluster1->mutable_weight()->set_value(kWeight75); - auto* weighted_cluster2 = - route1->mutable_route()->mutable_weighted_clusters()->add_clusters(); - weighted_cluster2->set_name(kDefaultClusterName); - weighted_cluster2->mutable_weight()->set_value(kWeight25); - route1->mutable_route() - ->mutable_weighted_clusters() - ->mutable_total_weight() - ->set_value(kWeight75 + kWeight25); - auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(0, 1); - WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - CheckRpcSendOk(kNumEchoRpcs); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - int weight_25_request_count = - backends_[0]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - int weight_75_request_count = - backends_[1]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[2]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); - const double kErrorTolerance = 0.2; - EXPECT_THAT(weight_75_request_count, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 * - (1 + kErrorTolerance)))); - // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the - // test from flaking while debugging potential root cause. - const double kErrorToleranceSmallLoad = 0.3; - gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", - weight_75_request_count, weight_25_request_count); - EXPECT_THAT(weight_25_request_count, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 * - (1 - kErrorToleranceSmallLoad)), - ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 * - (1 + kErrorToleranceSmallLoad)))); - // Change Route Configurations: new set of clusters with different weights. - weighted_cluster1->mutable_weight()->set_value(kWeight50); - weighted_cluster2->set_name(kNewCluster2Name); - weighted_cluster2->mutable_weight()->set_value(kWeight50); - SetRouteConfiguration(0, new_route_config); - ResetBackendCounters(); - WaitForAllBackends(2, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - CheckRpcSendOk(kNumEchoRpcs); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - const int weight_50_request_count_1 = - backends_[1]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - const int weight_50_request_count_2 = - backends_[2]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); - EXPECT_THAT(weight_50_request_count_1, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 * - (1 + kErrorTolerance)))); - EXPECT_THAT(weight_50_request_count_2, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight50 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEcho1Rpcs * kWeight50 / 100 * - (1 + kErrorTolerance)))); - // Change Route Configurations. - weighted_cluster1->mutable_weight()->set_value(kWeight75); - weighted_cluster2->set_name(kNewCluster3Name); - weighted_cluster2->mutable_weight()->set_value(kWeight25); - SetRouteConfiguration(0, new_route_config); - ResetBackendCounters(); - WaitForAllBackends(3, 4, true, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - CheckRpcSendOk(kNumEchoRpcs); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - weight_75_request_count = backends_[1]->backend_service1()->request_count(); - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[2]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); - weight_25_request_count = backends_[3]->backend_service1()->request_count(); - EXPECT_THAT(weight_75_request_count, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight75 / 100 * - (1 - kErrorTolerance)), - ::testing::Le(kNumEcho1Rpcs * kWeight75 / 100 * - (1 + kErrorTolerance)))); - // TODO: (@donnadionne) Reduce tolerance: increased the tolerance to keep the - // test from flaking while debugging potential root cause. - gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs", - weight_75_request_count, weight_25_request_count); - EXPECT_THAT(weight_25_request_count, - ::testing::AllOf(::testing::Ge(kNumEcho1Rpcs * kWeight25 / 100 * - (1 - kErrorToleranceSmallLoad)), - ::testing::Le(kNumEcho1Rpcs * kWeight25 / 100 * - (1 + kErrorToleranceSmallLoad)))); -} - -TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClusters) { - const char* kNewClusterName = "new_cluster"; - const char* kNewEdsServiceName = "new_eds_service_name"; - const size_t kNumEchoRpcs = 5; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); - // Populate new CDS resources. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Send Route Configuration. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(0, 1); - CheckRpcSendOk(kNumEchoRpcs); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - // Change Route Configurations: new default cluster. - auto* default_route = - new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - default_route->mutable_route()->set_cluster(kNewClusterName); - SetRouteConfiguration(0, new_route_config); - WaitForAllBackends(1, 2); - CheckRpcSendOk(kNumEchoRpcs); - // Make sure RPCs all go to the correct backend. - EXPECT_EQ(kNumEchoRpcs, backends_[1]->backend_service()->request_count()); -} - -TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClustersWithPickingDelays) { - const char* kNewClusterName = "new_cluster"; - const char* kNewEdsServiceName = "new_eds_service_name"; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); - // Populate new CDS resources. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Bring down the current backend: 0, this will delay route picking time, - // resulting in un-committed RPCs. - ShutdownBackend(0); - // Send a RouteConfiguration with a default route that points to - // backend 0. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - SetRouteConfiguration(0, new_route_config); - // Send exactly one RPC with no deadline and with wait_for_ready=true. - // This RPC will not complete until after backend 0 is started. - std::thread sending_rpc([this]() { - CheckRpcSendOk(1, RpcOptions().set_wait_for_ready(true).set_timeout_ms(0)); - }); - // Send a non-wait_for_ready RPC which should fail, this will tell us - // that the client has received the update and attempted to connect. - const Status status = SendRpc(RpcOptions().set_timeout_ms(0)); - EXPECT_FALSE(status.ok()); - // Send a update RouteConfiguration to use backend 1. - auto* default_route = - new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - default_route->mutable_route()->set_cluster(kNewClusterName); - SetRouteConfiguration(0, new_route_config); - // Wait for RPCs to go to the new backend: 1, this ensures that the client has - // processed the update. - WaitForAllBackends(1, 2, false, RpcOptions(), true); - // Bring up the previous backend: 0, this will allow the delayed RPC to - // finally call on_call_committed upon completion. - StartBackend(0); - sending_rpc.join(); - // Make sure RPCs go to the correct backend: - EXPECT_EQ(1, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(1, backends_[1]->backend_service()->request_count()); -} - -TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) { - const char* kNewClusterName = "new_cluster"; - const char* kNewEdsServiceName = "new_eds_service_name"; - const size_t kNumEcho1Rpcs = 100; - const size_t kNumEchoRpcs = 5; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); - // Populate new CDS resources. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Populating Route Configurations for LDS. - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* header_matcher1 = route1->mutable_match()->add_headers(); - header_matcher1->set_name("header1"); - header_matcher1->set_exact_match("POST,PUT,GET"); - auto* header_matcher2 = route1->mutable_match()->add_headers(); - header_matcher2->set_name("header2"); - header_matcher2->mutable_safe_regex_match()->set_regex("[a-z]*"); - auto* header_matcher3 = route1->mutable_match()->add_headers(); - header_matcher3->set_name("header3"); - header_matcher3->mutable_range_match()->set_start(1); - header_matcher3->mutable_range_match()->set_end(1000); - auto* header_matcher4 = route1->mutable_match()->add_headers(); - header_matcher4->set_name("header4"); - header_matcher4->set_present_match(false); - auto* header_matcher5 = route1->mutable_match()->add_headers(); - header_matcher5->set_name("header5"); - header_matcher5->set_prefix_match("/grpc"); - auto* header_matcher6 = route1->mutable_match()->add_headers(); - header_matcher6->set_name("header6"); - header_matcher6->set_suffix_match(".cc"); - header_matcher6->set_invert_match(true); - route1->mutable_route()->set_cluster(kNewClusterName); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - std::vector<std::pair<TString, TString>> metadata = { - {"header1", "POST"}, {"header2", "blah"}, - {"header3", "1"}, {"header5", "/grpc.testing.EchoTest1Service/"}, - {"header1", "PUT"}, {"header6", "grpc.java"}, - {"header1", "GET"}, - }; - const auto header_match_rpc_options = RpcOptions() - .set_rpc_service(SERVICE_ECHO1) - .set_rpc_method(METHOD_ECHO1) - .set_metadata(std::move(metadata)); - // Make sure all backends are up. - WaitForAllBackends(0, 1); - WaitForAllBackends(1, 2, true, header_match_rpc_options); - // Send RPCs. - CheckRpcSendOk(kNumEchoRpcs); - CheckRpcSendOk(kNumEcho1Rpcs, header_match_rpc_options); - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service2()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - EXPECT_EQ(kNumEcho1Rpcs, backends_[1]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service2()->request_count()); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); -} - -TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialHeaderContentType) { - const char* kNewClusterName = "new_cluster"; - const char* kNewEdsServiceName = "new_eds_service_name"; - const size_t kNumEchoRpcs = 100; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); - // Populate new CDS resources. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Populating Route Configurations for LDS. - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix(""); - auto* header_matcher1 = route1->mutable_match()->add_headers(); - header_matcher1->set_name("content-type"); - header_matcher1->set_exact_match("notapplication/grpc"); - route1->mutable_route()->set_cluster(kNewClusterName); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - auto* header_matcher2 = default_route->mutable_match()->add_headers(); - header_matcher2->set_name("content-type"); - header_matcher2->set_exact_match("application/grpc"); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - // Make sure the backend is up. - WaitForAllBackends(0, 1); - // Send RPCs. - CheckRpcSendOk(kNumEchoRpcs); - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); -} - -TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const size_t kNumEchoRpcs = 100; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 3)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - // Populating Route Configurations for LDS. - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix(""); - auto* header_matcher1 = route1->mutable_match()->add_headers(); - header_matcher1->set_name("grpc-foo-bin"); - header_matcher1->set_present_match(true); - route1->mutable_route()->set_cluster(kNewCluster1Name); - auto route2 = route_config.mutable_virtual_hosts(0)->add_routes(); - route2->mutable_match()->set_prefix(""); - auto* header_matcher2 = route2->mutable_match()->add_headers(); - header_matcher2->set_name("grpc-previous-rpc-attempts"); - header_matcher2->set_present_match(true); - route2->mutable_route()->set_cluster(kNewCluster2Name); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - // Send headers which will mismatch each route - std::vector<std::pair<TString, TString>> metadata = { - {"grpc-foo-bin", "grpc-foo-bin"}, - {"grpc-previous-rpc-attempts", "grpc-previous-rpc-attempts"}, - }; - WaitForAllBackends(0, 1); - CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata)); - // Verify that only the default backend got RPCs since all previous routes - // were mismatched. - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); -} - -TEST_P(LdsRdsTest, XdsRoutingRuntimeFractionMatching) { - const char* kNewClusterName = "new_cluster"; - const char* kNewEdsServiceName = "new_eds_service_name"; - const size_t kNumRpcs = 1000; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); - // Populate new CDS resources. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Populating Route Configurations for LDS. - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match() - ->mutable_runtime_fraction() - ->mutable_default_value() - ->set_numerator(25); - route1->mutable_route()->set_cluster(kNewClusterName); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumRpcs); - const int default_backend_count = - backends_[0]->backend_service()->request_count(); - const int matched_backend_count = - backends_[1]->backend_service()->request_count(); - const double kErrorTolerance = 0.2; - EXPECT_THAT(default_backend_count, - ::testing::AllOf( - ::testing::Ge(kNumRpcs * 75 / 100 * (1 - kErrorTolerance)), - ::testing::Le(kNumRpcs * 75 / 100 * (1 + kErrorTolerance)))); - EXPECT_THAT(matched_backend_count, - ::testing::AllOf( - ::testing::Ge(kNumRpcs * 25 / 100 * (1 - kErrorTolerance)), - ::testing::Le(kNumRpcs * 25 / 100 * (1 + kErrorTolerance)))); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); -} - -TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingUnmatchCases) { - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewEdsService1Name = "new_eds_service_name_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const char* kNewEdsService2Name = "new_eds_service_name_2"; - const char* kNewCluster3Name = "new_cluster_3"; - const char* kNewEdsService3Name = "new_eds_service_name_3"; - const size_t kNumEcho1Rpcs = 100; - const size_t kNumEchoRpcs = 5; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 3)}, - }); - AdsServiceImpl::EdsResourceArgs args3({ - {"locality0", GetBackendPorts(3, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsService1Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewEdsService2Name)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args3, kNewEdsService3Name)); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - new_cluster1.mutable_eds_cluster_config()->set_service_name( - kNewEdsService1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - new_cluster2.mutable_eds_cluster_config()->set_service_name( - kNewEdsService2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2); - Cluster new_cluster3 = balancers_[0]->ads_service()->default_cluster(); - new_cluster3.set_name(kNewCluster3Name); - new_cluster3.mutable_eds_cluster_config()->set_service_name( - kNewEdsService3Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster3); - // Populating Route Configurations for LDS. - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* header_matcher1 = route1->mutable_match()->add_headers(); - header_matcher1->set_name("header1"); - header_matcher1->set_exact_match("POST"); - route1->mutable_route()->set_cluster(kNewCluster1Name); - auto route2 = route_config.mutable_virtual_hosts(0)->add_routes(); - route2->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* header_matcher2 = route2->mutable_match()->add_headers(); - header_matcher2->set_name("header2"); - header_matcher2->mutable_range_match()->set_start(1); - header_matcher2->mutable_range_match()->set_end(1000); - route2->mutable_route()->set_cluster(kNewCluster2Name); - auto route3 = route_config.mutable_virtual_hosts(0)->add_routes(); - route3->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - auto* header_matcher3 = route3->mutable_match()->add_headers(); - header_matcher3->set_name("header3"); - header_matcher3->mutable_safe_regex_match()->set_regex("[a-z]*"); - route3->mutable_route()->set_cluster(kNewCluster3Name); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - // Send headers which will mismatch each route - std::vector<std::pair<TString, TString>> metadata = { - {"header1", "POST"}, - {"header2", "1000"}, - {"header3", "123"}, - {"header1", "GET"}, - }; - WaitForAllBackends(0, 1); - CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata)); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions() - .set_rpc_service(SERVICE_ECHO1) - .set_rpc_method(METHOD_ECHO1) - .set_metadata(metadata)); - // Verify that only the default backend got RPCs since all previous routes - // were mismatched. - for (size_t i = 1; i < 4; ++i) { - EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); - } - EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(kNumEcho1Rpcs, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service2()->request_count()); - const auto& response_state = RouteConfigurationResponseState(0); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED); -} - -TEST_P(LdsRdsTest, XdsRoutingChangeRoutesWithoutChangingClusters) { - const char* kNewClusterName = "new_cluster"; - const char* kNewEdsServiceName = "new_eds_service_name"; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(1, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewEdsServiceName)); - // Populate new CDS resources. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - new_cluster.mutable_eds_cluster_config()->set_service_name( - kNewEdsServiceName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster); - // Populating Route Configurations for LDS. - RouteConfiguration route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); - route1->mutable_route()->set_cluster(kNewClusterName); - auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_route()->set_cluster(kDefaultClusterName); - SetRouteConfiguration(0, route_config); - // Make sure all backends are up and that requests for each RPC - // service go to the right backends. - WaitForAllBackends(0, 1, false); - WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO2)); - // Requests for services Echo and Echo2 should have gone to backend 0. - EXPECT_EQ(1, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(1, backends_[0]->backend_service2()->request_count()); - // Requests for service Echo1 should have gone to backend 1. - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - EXPECT_EQ(1, backends_[1]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service2()->request_count()); - // Now send an update that changes the first route to match a - // different RPC service, and wait for the client to make the change. - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/"); - SetRouteConfiguration(0, route_config); - WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO2)); - // Now repeat the earlier test, making sure all traffic goes to the - // right place. - WaitForAllBackends(0, 1, false); - WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO1)); - WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO2)); - // Requests for services Echo and Echo1 should have gone to backend 0. - EXPECT_EQ(1, backends_[0]->backend_service()->request_count()); - EXPECT_EQ(1, backends_[0]->backend_service1()->request_count()); - EXPECT_EQ(0, backends_[0]->backend_service2()->request_count()); - // Requests for service Echo2 should have gone to backend 1. - EXPECT_EQ(0, backends_[1]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[1]->backend_service1()->request_count()); - EXPECT_EQ(1, backends_[1]->backend_service2()->request_count()); -} - -using CdsTest = BasicTest; - -// Tests that CDS client should send an ACK upon correct CDS response. -TEST_P(CdsTest, Vanilla) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - (void)SendRpc(); - EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state, - AdsServiceImpl::ResponseState::ACKED); -} - -// Tests that CDS client should send a NACK if the cluster type in CDS response -// is other than EDS. -TEST_P(CdsTest, WrongClusterType) { - auto cluster = balancers_[0]->ads_service()->default_cluster(); - cluster.set_type(Cluster::STATIC); - balancers_[0]->ads_service()->SetCdsResource(cluster); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->cds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "DiscoveryType is not EDS."); -} - -// Tests that CDS client should send a NACK if the eds_config in CDS response is -// other than ADS. -TEST_P(CdsTest, WrongEdsConfig) { - auto cluster = balancers_[0]->ads_service()->default_cluster(); - cluster.mutable_eds_cluster_config()->mutable_eds_config()->mutable_self(); - balancers_[0]->ads_service()->SetCdsResource(cluster); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->cds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "EDS ConfigSource is not ADS."); -} - -// Tests that CDS client should send a NACK if the lb_policy in CDS response is -// other than ROUND_ROBIN. -TEST_P(CdsTest, WrongLbPolicy) { - auto cluster = balancers_[0]->ads_service()->default_cluster(); - cluster.set_lb_policy(Cluster::LEAST_REQUEST); - balancers_[0]->ads_service()->SetCdsResource(cluster); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->cds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "LB policy is not ROUND_ROBIN."); -} - -// Tests that CDS client should send a NACK if the lrs_server in CDS response is -// other than SELF. -TEST_P(CdsTest, WrongLrsServer) { - auto cluster = balancers_[0]->ads_service()->default_cluster(); - cluster.mutable_lrs_server()->mutable_ads(); - balancers_[0]->ads_service()->SetCdsResource(cluster); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->cds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, "LRS ConfigSource is not self."); -} - -using EdsTest = BasicTest; - -// Tests that EDS client should send a NACK if the EDS update contains -// sparse priorities. -TEST_P(EdsTest, NacksSparsePriorityList) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(), kDefaultLocalityWeight, 1}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args)); - CheckRpcSendFailure(); - const auto& response_state = - balancers_[0]->ads_service()->eds_response_state(); - EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED); - EXPECT_EQ(response_state.error_message, - "EDS update includes sparse priority list"); -} - -// In most of our tests, we use different names for different resource -// types, to make sure that there are no cut-and-paste errors in the code -// that cause us to look at data for the wrong resource type. So we add -// this test to make sure that the EDS resource name defaults to the -// cluster name if not specified in the CDS resource. -TEST_P(EdsTest, EdsServiceNameDefaultsToClusterName) { - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, kDefaultClusterName)); - Cluster cluster = balancers_[0]->ads_service()->default_cluster(); - cluster.mutable_eds_cluster_config()->clear_service_name(); - balancers_[0]->ads_service()->SetCdsResource(cluster); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendOk(); -} - -class TimeoutTest : public BasicTest { - protected: - void SetUp() override { - xds_resource_does_not_exist_timeout_ms_ = 500; - BasicTest::SetUp(); - } -}; - -// Tests that LDS client times out when no response received. -TEST_P(TimeoutTest, Lds) { - balancers_[0]->ads_service()->SetResourceIgnore(kLdsTypeUrl); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); -} - -TEST_P(TimeoutTest, Rds) { - balancers_[0]->ads_service()->SetResourceIgnore(kRdsTypeUrl); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); -} - -// Tests that CDS client times out when no response received. -TEST_P(TimeoutTest, Cds) { - balancers_[0]->ads_service()->SetResourceIgnore(kCdsTypeUrl); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); -} - -TEST_P(TimeoutTest, Eds) { - balancers_[0]->ads_service()->SetResourceIgnore(kEdsTypeUrl); - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - CheckRpcSendFailure(); -} - using LocalityMapTest = BasicTest; // Tests that the localities in a locality map are picked according to their @@ -4516,12 +4516,12 @@ TEST_P(LocalityMapTest, WeightedRoundRobin) { const double kLocalityWeightRate1 = static_cast<double>(kLocalityWeight1) / kTotalLocalityWeight; // ADS response contains 2 localities, each of which contains 1 backend. - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 1), kLocalityWeight0}, {"locality1", GetBackendPorts(1, 2), kLocalityWeight1}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Wait for both backends to be ready. WaitForAllBackends(0, 2); // Send kNumRpcs RPCs. @@ -4544,44 +4544,44 @@ TEST_P(LocalityMapTest, WeightedRoundRobin) { ::testing::Le(kLocalityWeightRate1 * (1 + kErrorTolerance)))); } -// Tests that we correctly handle a locality containing no endpoints. -TEST_P(LocalityMapTest, LocalityContainingNoEndpoints) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - const size_t kNumRpcs = 5000; - // EDS response contains 2 localities, one with no endpoints. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - {"locality1", {}}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - // Wait for both backends to be ready. - WaitForAllBackends(); - // Send kNumRpcs RPCs. - CheckRpcSendOk(kNumRpcs); - // All traffic should go to the reachable locality. - EXPECT_EQ(backends_[0]->backend_service()->request_count(), - kNumRpcs / backends_.size()); - EXPECT_EQ(backends_[1]->backend_service()->request_count(), - kNumRpcs / backends_.size()); - EXPECT_EQ(backends_[2]->backend_service()->request_count(), - kNumRpcs / backends_.size()); - EXPECT_EQ(backends_[3]->backend_service()->request_count(), - kNumRpcs / backends_.size()); -} - -// EDS update with no localities. -TEST_P(LocalityMapTest, NoLocalities) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource({}, DefaultEdsServiceName())); - Status status = SendRpc(); - EXPECT_FALSE(status.ok()); - EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE); -} - +// Tests that we correctly handle a locality containing no endpoints. +TEST_P(LocalityMapTest, LocalityContainingNoEndpoints) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + const size_t kNumRpcs = 5000; + // EDS response contains 2 localities, one with no endpoints. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + {"locality1", {}}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + // Wait for both backends to be ready. + WaitForAllBackends(); + // Send kNumRpcs RPCs. + CheckRpcSendOk(kNumRpcs); + // All traffic should go to the reachable locality. + EXPECT_EQ(backends_[0]->backend_service()->request_count(), + kNumRpcs / backends_.size()); + EXPECT_EQ(backends_[1]->backend_service()->request_count(), + kNumRpcs / backends_.size()); + EXPECT_EQ(backends_[2]->backend_service()->request_count(), + kNumRpcs / backends_.size()); + EXPECT_EQ(backends_[3]->backend_service()->request_count(), + kNumRpcs / backends_.size()); +} + +// EDS update with no localities. +TEST_P(LocalityMapTest, NoLocalities) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource({}, DefaultEdsServiceName())); + Status status = SendRpc(); + EXPECT_FALSE(status.ok()); + EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE); +} + // Tests that the locality map can work properly even when it contains a large // number of localities. TEST_P(LocalityMapTest, StressTest) { @@ -4590,23 +4590,23 @@ TEST_P(LocalityMapTest, StressTest) { const size_t kNumLocalities = 100; // The first ADS response contains kNumLocalities localities, each of which // contains backend 0. - AdsServiceImpl::EdsResourceArgs args; + AdsServiceImpl::EdsResourceArgs args; for (size_t i = 0; i < kNumLocalities; ++i) { - TString name = y_absl::StrCat("locality", i); - AdsServiceImpl::EdsResourceArgs::Locality locality(name, - {backends_[0]->port()}); + TString name = y_absl::StrCat("locality", i); + AdsServiceImpl::EdsResourceArgs::Locality locality(name, + {backends_[0]->port()}); args.locality_list.emplace_back(std::move(locality)); } - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // The second ADS response contains 1 locality, which contains backend 1. - args = AdsServiceImpl::EdsResourceArgs({ + args = AdsServiceImpl::EdsResourceArgs({ {"locality0", GetBackendPorts(1, 2)}, }); - std::thread delayed_resource_setter( - std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0, - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), - 60 * 1000)); + std::thread delayed_resource_setter( + std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0, + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), + 60 * 1000)); // Wait until backend 0 is ready, before which kNumLocalities localities are // received and handled by the xds policy. WaitForBackend(0, /*reset_counters=*/false); @@ -4614,7 +4614,7 @@ TEST_P(LocalityMapTest, StressTest) { // Wait until backend 1 is ready, before which kNumLocalities localities are // removed by the xds policy. WaitForBackend(1); - delayed_resource_setter.join(); + delayed_resource_setter.join(); } // Tests that the localities in a locality map are picked correctly after update @@ -4622,7 +4622,7 @@ TEST_P(LocalityMapTest, StressTest) { TEST_P(LocalityMapTest, UpdateMap) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - const size_t kNumRpcs = 3000; + const size_t kNumRpcs = 3000; // The locality weight for the first 3 localities. const std::vector<int> kLocalityWeights0 = {2, 3, 4}; const double kTotalLocalityWeight0 = @@ -4641,13 +4641,13 @@ TEST_P(LocalityMapTest, UpdateMap) { for (int weight : kLocalityWeights1) { locality_weight_rate_1.push_back(weight / kTotalLocalityWeight1); } - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 1), 2}, {"locality1", GetBackendPorts(1, 2), 3}, {"locality2", GetBackendPorts(2, 3), 4}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Wait for the first 3 backends to be ready. WaitForAllBackends(0, 3); gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); @@ -4664,26 +4664,26 @@ TEST_P(LocalityMapTest, UpdateMap) { } const double kErrorTolerance = 0.2; for (size_t i = 0; i < 3; ++i) { - gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i, - locality_picked_rates[i]); + gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i, + locality_picked_rates[i]); EXPECT_THAT( locality_picked_rates[i], ::testing::AllOf( ::testing::Ge(locality_weight_rate_0[i] * (1 - kErrorTolerance)), ::testing::Le(locality_weight_rate_0[i] * (1 + kErrorTolerance)))); } - args = AdsServiceImpl::EdsResourceArgs({ - {"locality1", GetBackendPorts(1, 2), 3}, - {"locality2", GetBackendPorts(2, 3), 2}, - {"locality3", GetBackendPorts(3, 4), 6}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + args = AdsServiceImpl::EdsResourceArgs({ + {"locality1", GetBackendPorts(1, 2), 3}, + {"locality2", GetBackendPorts(2, 3), 2}, + {"locality3", GetBackendPorts(3, 4), 6}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Backend 3 hasn't received any request. EXPECT_EQ(0U, backends_[3]->backend_service()->request_count()); // Wait until the locality update has been processed, as signaled by backend 3 // receiving a request. - WaitForAllBackends(3, 4); + WaitForAllBackends(3, 4); gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); // Send kNumRpcs RPCs. CheckRpcSendOk(kNumRpcs); @@ -4699,8 +4699,8 @@ TEST_P(LocalityMapTest, UpdateMap) { kNumRpcs); } for (size_t i = 1; i < 4; ++i) { - gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i, - locality_picked_rates[i]); + gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i, + locality_picked_rates[i]); EXPECT_THAT( locality_picked_rates[i], ::testing::AllOf( @@ -4709,97 +4709,97 @@ TEST_P(LocalityMapTest, UpdateMap) { } } -// Tests that we don't fail RPCs when replacing all of the localities in -// a given priority. -TEST_P(LocalityMapTest, ReplaceAllLocalitiesInPriority) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - args = AdsServiceImpl::EdsResourceArgs({ - {"locality1", GetBackendPorts(1, 2)}, - }); - std::thread delayed_resource_setter(std::bind( - &BasicTest::SetEdsResourceWithDelay, this, 0, - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 5000)); - // Wait for the first backend to be ready. - WaitForBackend(0); - // Keep sending RPCs until we switch over to backend 1, which tells us - // that we received the update. No RPCs should fail during this - // transition. - WaitForBackend(1, /*reset_counters=*/true, /*require_success=*/true); - delayed_resource_setter.join(); -} - +// Tests that we don't fail RPCs when replacing all of the localities in +// a given priority. +TEST_P(LocalityMapTest, ReplaceAllLocalitiesInPriority) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + args = AdsServiceImpl::EdsResourceArgs({ + {"locality1", GetBackendPorts(1, 2)}, + }); + std::thread delayed_resource_setter(std::bind( + &BasicTest::SetEdsResourceWithDelay, this, 0, + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 5000)); + // Wait for the first backend to be ready. + WaitForBackend(0); + // Keep sending RPCs until we switch over to backend 1, which tells us + // that we received the update. No RPCs should fail during this + // transition. + WaitForBackend(1, /*reset_counters=*/true, /*require_success=*/true); + delayed_resource_setter.join(); +} + class FailoverTest : public BasicTest { public: - void SetUp() override { - BasicTest::SetUp(); - ResetStub(500); - } + void SetUp() override { + BasicTest::SetUp(); + ResetStub(500); + } }; // Localities with the highest priority are used when multiple priority exist. TEST_P(FailoverTest, ChooseHighestPriority) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1}, {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2}, {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3}, {"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); WaitForBackend(3, false); for (size_t i = 0; i < 3; ++i) { - EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); + } +} + +// Does not choose priority with no endpoints. +TEST_P(FailoverTest, DoesNotUsePriorityWithNoEndpoints) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1}, + {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2}, + {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3}, + {"locality3", {}, kDefaultLocalityWeight, 0}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + WaitForBackend(0, false); + for (size_t i = 1; i < 3; ++i) { + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } } -// Does not choose priority with no endpoints. -TEST_P(FailoverTest, DoesNotUsePriorityWithNoEndpoints) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1}, - {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2}, - {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3}, - {"locality3", {}, kDefaultLocalityWeight, 0}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - WaitForBackend(0, false); - for (size_t i = 1; i < 3; ++i) { - EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); - } -} - -// Does not choose locality with no endpoints. -TEST_P(FailoverTest, DoesNotUseLocalityWithNoEndpoints) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", {}, kDefaultLocalityWeight, 0}, - {"locality1", GetBackendPorts(), kDefaultLocalityWeight, 0}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - // Wait for all backends to be used. - std::tuple<int, int, int> counts = WaitForAllBackends(); - // Make sure no RPCs failed in the transition. - EXPECT_EQ(0, std::get<1>(counts)); -} - +// Does not choose locality with no endpoints. +TEST_P(FailoverTest, DoesNotUseLocalityWithNoEndpoints) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", {}, kDefaultLocalityWeight, 0}, + {"locality1", GetBackendPorts(), kDefaultLocalityWeight, 0}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + // Wait for all backends to be used. + std::tuple<int, int, int> counts = WaitForAllBackends(); + // Make sure no RPCs failed in the transition. + EXPECT_EQ(0, std::get<1>(counts)); +} + // If the higher priority localities are not reachable, failover to the highest // priority among the rest. TEST_P(FailoverTest, Failover) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1}, {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2}, {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3}, @@ -4807,12 +4807,12 @@ TEST_P(FailoverTest, Failover) { }); ShutdownBackend(3); ShutdownBackend(0); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); WaitForBackend(1, false); for (size_t i = 0; i < 4; ++i) { if (i == 1) continue; - EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } } @@ -4822,7 +4822,7 @@ TEST_P(FailoverTest, SwitchBackToHigherPriority) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumRpcs = 100; - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1}, {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2}, {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3}, @@ -4830,12 +4830,12 @@ TEST_P(FailoverTest, SwitchBackToHigherPriority) { }); ShutdownBackend(3); ShutdownBackend(0); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); WaitForBackend(1, false); for (size_t i = 0; i < 4; ++i) { if (i == 1) continue; - EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } StartBackend(0); WaitForBackend(0); @@ -4848,13 +4848,13 @@ TEST_P(FailoverTest, SwitchBackToHigherPriority) { TEST_P(FailoverTest, UpdateInitialUnavailable) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0}, {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 1}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - args = AdsServiceImpl::EdsResourceArgs({ + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + args = AdsServiceImpl::EdsResourceArgs({ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0}, {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 1}, {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 2}, @@ -4862,9 +4862,9 @@ TEST_P(FailoverTest, UpdateInitialUnavailable) { }); ShutdownBackend(0); ShutdownBackend(1); - std::thread delayed_resource_setter(std::bind( - &BasicTest::SetEdsResourceWithDelay, this, 0, - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000)); + std::thread delayed_resource_setter(std::bind( + &BasicTest::SetEdsResourceWithDelay, this, 0, + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000)); gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(500, GPR_TIMESPAN)); // Send 0.5 second worth of RPCs. @@ -4874,9 +4874,9 @@ TEST_P(FailoverTest, UpdateInitialUnavailable) { WaitForBackend(2, false); for (size_t i = 0; i < 4; ++i) { if (i == 2) continue; - EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } - delayed_resource_setter.join(); + delayed_resource_setter.join(); } // Tests that after the localities' priorities are updated, we still choose the @@ -4885,72 +4885,72 @@ TEST_P(FailoverTest, UpdatePriority) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumRpcs = 100; - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1}, {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2}, {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3}, {"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - args = AdsServiceImpl::EdsResourceArgs({ + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + args = AdsServiceImpl::EdsResourceArgs({ {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 2}, {"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 0}, {"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 1}, {"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 3}, }); - std::thread delayed_resource_setter(std::bind( - &BasicTest::SetEdsResourceWithDelay, this, 0, - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000)); + std::thread delayed_resource_setter(std::bind( + &BasicTest::SetEdsResourceWithDelay, this, 0, + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000)); WaitForBackend(3, false); for (size_t i = 0; i < 3; ++i) { - EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } WaitForBackend(1); CheckRpcSendOk(kNumRpcs); EXPECT_EQ(kNumRpcs, backends_[1]->backend_service()->request_count()); - delayed_resource_setter.join(); + delayed_resource_setter.join(); +} + +// Moves all localities in the current priority to a higher priority. +TEST_P(FailoverTest, MoveAllLocalitiesInCurrentPriorityToHigherPriority) { + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + // First update: + // - Priority 0 is locality 0, containing backend 0, which is down. + // - Priority 1 is locality 1, containing backends 1 and 2, which are up. + ShutdownBackend(0); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0}, + {"locality1", GetBackendPorts(1, 3), kDefaultLocalityWeight, 1}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + // Second update: + // - Priority 0 contains both localities 0 and 1. + // - Priority 1 is not present. + // - We add backend 3 to locality 1, just so we have a way to know + // when the update has been seen by the client. + args = AdsServiceImpl::EdsResourceArgs({ + {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0}, + {"locality1", GetBackendPorts(1, 4), kDefaultLocalityWeight, 0}, + }); + std::thread delayed_resource_setter(std::bind( + &BasicTest::SetEdsResourceWithDelay, this, 0, + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000)); + // When we get the first update, all backends in priority 0 are down, + // so we will create priority 1. Backends 1 and 2 should have traffic, + // but backend 3 should not. + WaitForAllBackends(1, 3, false); + EXPECT_EQ(0UL, backends_[3]->backend_service()->request_count()); + // When backend 3 gets traffic, we know the second update has been seen. + WaitForBackend(3); + // The ADS service of balancer 0 got at least 1 response. + EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT); + delayed_resource_setter.join(); } -// Moves all localities in the current priority to a higher priority. -TEST_P(FailoverTest, MoveAllLocalitiesInCurrentPriorityToHigherPriority) { - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // First update: - // - Priority 0 is locality 0, containing backend 0, which is down. - // - Priority 1 is locality 1, containing backends 1 and 2, which are up. - ShutdownBackend(0); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0}, - {"locality1", GetBackendPorts(1, 3), kDefaultLocalityWeight, 1}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - // Second update: - // - Priority 0 contains both localities 0 and 1. - // - Priority 1 is not present. - // - We add backend 3 to locality 1, just so we have a way to know - // when the update has been seen by the client. - args = AdsServiceImpl::EdsResourceArgs({ - {"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0}, - {"locality1", GetBackendPorts(1, 4), kDefaultLocalityWeight, 0}, - }); - std::thread delayed_resource_setter(std::bind( - &BasicTest::SetEdsResourceWithDelay, this, 0, - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName()), 1000)); - // When we get the first update, all backends in priority 0 are down, - // so we will create priority 1. Backends 1 and 2 should have traffic, - // but backend 3 should not. - WaitForAllBackends(1, 3, false); - EXPECT_EQ(0UL, backends_[3]->backend_service()->request_count()); - // When backend 3 gets traffic, we know the second update has been seen. - WaitForBackend(3); - // The ADS service of balancer 0 got at least 1 response. - EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT); - delayed_resource_setter.join(); -} - using DropTest = BasicTest; // Tests that RPCs are dropped according to the drop config. @@ -4965,26 +4965,26 @@ TEST_P(DropTest, Vanilla) { const double KDropRateForLbAndThrottle = kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle; // The ADS response contains two drop categories. - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); args.drop_categories = {{kLbDropType, kDropPerMillionForLb}, {kThrottleDropType, kDropPerMillionForThrottle}}; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); WaitForAllBackends(); // Send kNumRpcs RPCs and count the drops. size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(RpcOptions(), &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; } else { EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage); + EXPECT_EQ(response.message(), kRequestMessage); } } // The drop rate should be roughly equal to the expectation. @@ -5005,26 +5005,26 @@ TEST_P(DropTest, DropPerHundred) { const uint32_t kDropPerHundredForLb = 10; const double kDropRateForLb = kDropPerHundredForLb / 100.0; // The ADS response contains one drop category. - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); args.drop_categories = {{kLbDropType, kDropPerHundredForLb}}; args.drop_denominator = FractionalPercent::HUNDRED; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); WaitForAllBackends(); // Send kNumRpcs RPCs and count the drops. size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(RpcOptions(), &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; } else { EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage); + EXPECT_EQ(response.message(), kRequestMessage); } } // The drop rate should be roughly equal to the expectation. @@ -5044,26 +5044,26 @@ TEST_P(DropTest, DropPerTenThousand) { const uint32_t kDropPerTenThousandForLb = 1000; const double kDropRateForLb = kDropPerTenThousandForLb / 10000.0; // The ADS response contains one drop category. - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); args.drop_categories = {{kLbDropType, kDropPerTenThousandForLb}}; args.drop_denominator = FractionalPercent::TEN_THOUSAND; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); WaitForAllBackends(); // Send kNumRpcs RPCs and count the drops. size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(RpcOptions(), &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; } else { EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage); + EXPECT_EQ(response.message(), kRequestMessage); } } // The drop rate should be roughly equal to the expectation. @@ -5079,7 +5079,7 @@ TEST_P(DropTest, DropPerTenThousand) { TEST_P(DropTest, Update) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - const size_t kNumRpcs = 3000; + const size_t kNumRpcs = 3000; const uint32_t kDropPerMillionForLb = 100000; const uint32_t kDropPerMillionForThrottle = 200000; const double kDropRateForLb = kDropPerMillionForLb / 1000000.0; @@ -5087,43 +5087,43 @@ TEST_P(DropTest, Update) { const double KDropRateForLbAndThrottle = kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle; // The first ADS response contains one drop category. - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); args.drop_categories = {{kLbDropType, kDropPerMillionForLb}}; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); WaitForAllBackends(); // Send kNumRpcs RPCs and count the drops. size_t num_drops = 0; gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(RpcOptions(), &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; } else { EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage); + EXPECT_EQ(response.message(), kRequestMessage); } } gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); // The drop rate should be roughly equal to the expectation. double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs; - gpr_log(GPR_INFO, "First batch drop rate %f", seen_drop_rate); + gpr_log(GPR_INFO, "First batch drop rate %f", seen_drop_rate); const double kErrorTolerance = 0.3; EXPECT_THAT( seen_drop_rate, ::testing::AllOf(::testing::Ge(kDropRateForLb * (1 - kErrorTolerance)), ::testing::Le(kDropRateForLb * (1 + kErrorTolerance)))); - // The second ADS response contains two drop categories, send an update EDS - // response. - args.drop_categories = {{kLbDropType, kDropPerMillionForLb}, - {kThrottleDropType, kDropPerMillionForThrottle}}; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + // The second ADS response contains two drop categories, send an update EDS + // response. + args.drop_categories = {{kLbDropType, kDropPerMillionForLb}, + {kThrottleDropType, kDropPerMillionForThrottle}}; + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Wait until the drop rate increases to the middle of the two configs, which // implies that the update has been in effect. const double kDropRateThreshold = @@ -5131,7 +5131,7 @@ TEST_P(DropTest, Update) { size_t num_rpcs = kNumRpcs; while (seen_drop_rate < kDropRateThreshold) { EchoResponse response; - const Status status = SendRpc(RpcOptions(), &response); + const Status status = SendRpc(RpcOptions(), &response); ++num_rpcs; if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { @@ -5139,7 +5139,7 @@ TEST_P(DropTest, Update) { } else { EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage); + EXPECT_EQ(response.message(), kRequestMessage); } seen_drop_rate = static_cast<double>(num_drops) / num_rpcs; } @@ -5148,20 +5148,20 @@ TEST_P(DropTest, Update) { gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(RpcOptions(), &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; } else { EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage); + EXPECT_EQ(response.message(), kRequestMessage); } } gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH =========="); // The new drop rate should be roughly equal to the expectation. seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs; - gpr_log(GPR_INFO, "Second batch drop rate %f", seen_drop_rate); + gpr_log(GPR_INFO, "Second batch drop rate %f", seen_drop_rate); EXPECT_THAT( seen_drop_rate, ::testing::AllOf( @@ -5177,23 +5177,23 @@ TEST_P(DropTest, DropAll) { const uint32_t kDropPerMillionForLb = 100000; const uint32_t kDropPerMillionForThrottle = 1000000; // The ADS response contains two drop categories. - AdsServiceImpl::EdsResourceArgs args; + AdsServiceImpl::EdsResourceArgs args; args.drop_categories = {{kLbDropType, kDropPerMillionForLb}, {kThrottleDropType, kDropPerMillionForThrottle}}; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Send kNumRpcs RPCs and all of them are dropped. for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(RpcOptions(), &response); - EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE); - EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy"); + const Status status = SendRpc(RpcOptions(), &response); + EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE); + EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy"); } } class BalancerUpdateTest : public XdsEnd2endTest { public: - BalancerUpdateTest() : XdsEnd2endTest(4, 3) {} + BalancerUpdateTest() : XdsEnd2endTest(4, 3) {} }; // Tests that the old LB call is still used after the balancer address update as @@ -5201,16 +5201,16 @@ class BalancerUpdateTest : public XdsEnd2endTest { TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", {backends_[0]->port()}}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - args = AdsServiceImpl::EdsResourceArgs({ + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + args = AdsServiceImpl::EdsResourceArgs({ {"locality0", {backends_[1]->port()}}, }); - balancers_[1]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[1]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Wait until the first backend is ready. WaitForBackend(0); // Send 10 requests. @@ -5219,17 +5219,17 @@ TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) { gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); // All 10 requests should have gone to the first backend. EXPECT_EQ(10U, backends_[0]->backend_service()->request_count()); - // The ADS service of balancer 0 sent at least 1 response. - EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT); - EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[1]->ads_service()->eds_response_state().error_message; - EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[2]->ads_service()->eds_response_state().error_message; + // The ADS service of balancer 0 sent at least 1 response. + EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT); + EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[1]->ads_service()->eds_response_state().error_message; + EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[2]->ads_service()->eds_response_state().error_message; gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolutionForLbChannel({balancers_[1]->port()}); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); @@ -5243,17 +5243,17 @@ TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) { // The current LB call is still working, so xds continued using it to the // first balancer, which doesn't assign the second backend. EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); - // The ADS service of balancer 0 sent at least 1 response. - EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT); - EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[1]->ads_service()->eds_response_state().error_message; - EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[2]->ads_service()->eds_response_state().error_message; + // The ADS service of balancer 0 sent at least 1 response. + EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT); + EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[1]->ads_service()->eds_response_state().error_message; + EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[2]->ads_service()->eds_response_state().error_message; } // Tests that the old LB call is still used after multiple balancer address @@ -5264,16 +5264,16 @@ TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) { TEST_P(BalancerUpdateTest, Repeated) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", {backends_[0]->port()}}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - args = AdsServiceImpl::EdsResourceArgs({ + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + args = AdsServiceImpl::EdsResourceArgs({ {"locality0", {backends_[1]->port()}}, }); - balancers_[1]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[1]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Wait until the first backend is ready. WaitForBackend(0); // Send 10 requests. @@ -5282,17 +5282,17 @@ TEST_P(BalancerUpdateTest, Repeated) { gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); // All 10 requests should have gone to the first backend. EXPECT_EQ(10U, backends_[0]->backend_service()->request_count()); - // The ADS service of balancer 0 sent at least 1 response. - EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT); - EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[1]->ads_service()->eds_response_state().error_message; - EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[2]->ads_service()->eds_response_state().error_message; + // The ADS service of balancer 0 sent at least 1 response. + EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT); + EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[1]->ads_service()->eds_response_state().error_message; + EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[2]->ads_service()->eds_response_state().error_message; std::vector<int> ports; ports.emplace_back(balancers_[0]->port()); ports.emplace_back(balancers_[1]->port()); @@ -5334,33 +5334,33 @@ TEST_P(BalancerUpdateTest, Repeated) { TEST_P(BalancerUpdateTest, DeadUpdate) { SetNextResolution({}); SetNextResolutionForLbChannel({balancers_[0]->port()}); - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", {backends_[0]->port()}}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - args = AdsServiceImpl::EdsResourceArgs({ + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + args = AdsServiceImpl::EdsResourceArgs({ {"locality0", {backends_[1]->port()}}, }); - balancers_[1]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[1]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Start servers and send 10 RPCs per server. gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); CheckRpcSendOk(10); gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); // All 10 requests should have gone to the first backend. EXPECT_EQ(10U, backends_[0]->backend_service()->request_count()); - // The ADS service of balancer 0 sent at least 1 response. - EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT); - EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[1]->ads_service()->eds_response_state().error_message; - EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[2]->ads_service()->eds_response_state().error_message; + // The ADS service of balancer 0 sent at least 1 response. + EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT); + EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[1]->ads_service()->eds_response_state().error_message; + EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[2]->ads_service()->eds_response_state().error_message; // Kill balancer 0 gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************"); balancers_[0]->Shutdown(); @@ -5372,19 +5372,19 @@ TEST_P(BalancerUpdateTest, DeadUpdate) { // All 10 requests should again have gone to the first backend. EXPECT_EQ(20U, backends_[0]->backend_service()->request_count()); EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); - // The ADS service of no balancers sent anything - EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[0]->ads_service()->eds_response_state().error_message; - EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[1]->ads_service()->eds_response_state().error_message; - EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[2]->ads_service()->eds_response_state().error_message; + // The ADS service of no balancers sent anything + EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[0]->ads_service()->eds_response_state().error_message; + EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[1]->ads_service()->eds_response_state().error_message; + EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[2]->ads_service()->eds_response_state().error_message; gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolutionForLbChannel({balancers_[1]->port()}); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); @@ -5400,17 +5400,17 @@ TEST_P(BalancerUpdateTest, DeadUpdate) { gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH =========="); // All 10 requests should have gone to the second backend. EXPECT_EQ(10U, backends_[1]->backend_service()->request_count()); - // The ADS service of balancer 1 sent at least 1 response. - EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[0]->ads_service()->eds_response_state().error_message; - EXPECT_GT(balancers_[1]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT); - EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, - AdsServiceImpl::ResponseState::NOT_SENT) - << "Error Message:" - << balancers_[2]->ads_service()->eds_response_state().error_message; + // The ADS service of balancer 1 sent at least 1 response. + EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[0]->ads_service()->eds_response_state().error_message; + EXPECT_GT(balancers_[1]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT); + EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state, + AdsServiceImpl::ResponseState::NOT_SENT) + << "Error Message:" + << balancers_[2]->ads_service()->eds_response_state().error_message; } // The re-resolution tests are deferred because they rely on the fallback mode, @@ -5428,20 +5428,112 @@ class ClientLoadReportingTest : public XdsEnd2endTest { // Tests that the load report received at the balancer is correct. TEST_P(ClientLoadReportingTest, Vanilla) { - if (!GetParam().use_xds_resolver()) { - balancers_[0]->lrs_service()->set_cluster_names({kServerName}); - } + if (!GetParam().use_xds_resolver()) { + balancers_[0]->lrs_service()->set_cluster_names({kServerName}); + } SetNextResolution({}); SetNextResolutionForLbChannel({balancers_[0]->port()}); - const size_t kNumRpcsPerAddress = 10; - const size_t kNumFailuresPerAddress = 3; + const size_t kNumRpcsPerAddress = 10; + const size_t kNumFailuresPerAddress = 3; // TODO(juanlishen): Partition the backends after multiple localities is // tested. - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + // Wait until all backends are ready. + int num_ok = 0; + int num_failure = 0; + int num_drops = 0; + std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(); + // Send kNumRpcsPerAddress RPCs per server. + CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); + CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_, + RpcOptions().set_server_fail(true)); + // Check that each backend got the right number of requests. + for (size_t i = 0; i < backends_.size(); ++i) { + EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress, + backends_[i]->backend_service()->request_count()); + } + // The load report received at the balancer should be correct. + std::vector<ClientStats> load_report = + balancers_[0]->lrs_service()->WaitForLoadReport(); + ASSERT_EQ(load_report.size(), 1UL); + ClientStats& client_stats = load_report.front(); + EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, + client_stats.total_successful_requests()); + EXPECT_EQ(0U, client_stats.total_requests_in_progress()); + EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ + + num_ok + num_failure, + client_stats.total_issued_requests()); + EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure, + client_stats.total_error_requests()); + EXPECT_EQ(0U, client_stats.total_dropped_requests()); + // The LRS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count()); +} + +// Tests send_all_clusters. +TEST_P(ClientLoadReportingTest, SendAllClusters) { + balancers_[0]->lrs_service()->set_send_all_clusters(true); + SetNextResolution({}); + SetNextResolutionForLbChannel({balancers_[0]->port()}); + const size_t kNumRpcsPerAddress = 10; + const size_t kNumFailuresPerAddress = 3; + // TODO(juanlishen): Partition the backends after multiple localities is + // tested. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + // Wait until all backends are ready. + int num_ok = 0; + int num_failure = 0; + int num_drops = 0; + std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(); + // Send kNumRpcsPerAddress RPCs per server. + CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); + CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_, + RpcOptions().set_server_fail(true)); + // Check that each backend got the right number of requests. + for (size_t i = 0; i < backends_.size(); ++i) { + EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress, + backends_[i]->backend_service()->request_count()); + } + // The load report received at the balancer should be correct. + std::vector<ClientStats> load_report = + balancers_[0]->lrs_service()->WaitForLoadReport(); + ASSERT_EQ(load_report.size(), 1UL); + ClientStats& client_stats = load_report.front(); + EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, + client_stats.total_successful_requests()); + EXPECT_EQ(0U, client_stats.total_requests_in_progress()); + EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ + + num_ok + num_failure, + client_stats.total_issued_requests()); + EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure, + client_stats.total_error_requests()); + EXPECT_EQ(0U, client_stats.total_dropped_requests()); + // The LRS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count()); +} + +// Tests that we don't include stats for clusters that are not requested +// by the LRS server. +TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) { + balancers_[0]->lrs_service()->set_cluster_names({"bogus"}); + SetNextResolution({}); + SetNextResolutionForLbChannel({balancers_[0]->port()}); + const size_t kNumRpcsPerAddress = 100; + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts()}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Wait until all backends are ready. int num_ok = 0; int num_failure = 0; @@ -5449,98 +5541,6 @@ TEST_P(ClientLoadReportingTest, Vanilla) { std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(); // Send kNumRpcsPerAddress RPCs per server. CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); - CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_, - RpcOptions().set_server_fail(true)); - // Check that each backend got the right number of requests. - for (size_t i = 0; i < backends_.size(); ++i) { - EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress, - backends_[i]->backend_service()->request_count()); - } - // The load report received at the balancer should be correct. - std::vector<ClientStats> load_report = - balancers_[0]->lrs_service()->WaitForLoadReport(); - ASSERT_EQ(load_report.size(), 1UL); - ClientStats& client_stats = load_report.front(); - EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, - client_stats.total_successful_requests()); - EXPECT_EQ(0U, client_stats.total_requests_in_progress()); - EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ + - num_ok + num_failure, - client_stats.total_issued_requests()); - EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure, - client_stats.total_error_requests()); - EXPECT_EQ(0U, client_stats.total_dropped_requests()); - // The LRS service got a single request, and sent a single response. - EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count()); - EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count()); -} - -// Tests send_all_clusters. -TEST_P(ClientLoadReportingTest, SendAllClusters) { - balancers_[0]->lrs_service()->set_send_all_clusters(true); - SetNextResolution({}); - SetNextResolutionForLbChannel({balancers_[0]->port()}); - const size_t kNumRpcsPerAddress = 10; - const size_t kNumFailuresPerAddress = 3; - // TODO(juanlishen): Partition the backends after multiple localities is - // tested. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - // Wait until all backends are ready. - int num_ok = 0; - int num_failure = 0; - int num_drops = 0; - std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(); - // Send kNumRpcsPerAddress RPCs per server. - CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); - CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_, - RpcOptions().set_server_fail(true)); - // Check that each backend got the right number of requests. - for (size_t i = 0; i < backends_.size(); ++i) { - EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress, - backends_[i]->backend_service()->request_count()); - } - // The load report received at the balancer should be correct. - std::vector<ClientStats> load_report = - balancers_[0]->lrs_service()->WaitForLoadReport(); - ASSERT_EQ(load_report.size(), 1UL); - ClientStats& client_stats = load_report.front(); - EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, - client_stats.total_successful_requests()); - EXPECT_EQ(0U, client_stats.total_requests_in_progress()); - EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ + - num_ok + num_failure, - client_stats.total_issued_requests()); - EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure, - client_stats.total_error_requests()); - EXPECT_EQ(0U, client_stats.total_dropped_requests()); - // The LRS service got a single request, and sent a single response. - EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count()); - EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count()); -} - -// Tests that we don't include stats for clusters that are not requested -// by the LRS server. -TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) { - balancers_[0]->lrs_service()->set_cluster_names({"bogus"}); - SetNextResolution({}); - SetNextResolutionForLbChannel({balancers_[0]->port()}); - const size_t kNumRpcsPerAddress = 100; - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts()}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); - // Wait until all backends are ready. - int num_ok = 0; - int num_failure = 0; - int num_drops = 0; - std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(); - // Send kNumRpcsPerAddress RPCs per server. - CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); // Each backend should have gotten 100 requests. for (size_t i = 0; i < backends_.size(); ++i) { EXPECT_EQ(kNumRpcsPerAddress, @@ -5550,27 +5550,27 @@ TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) { EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count()); EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count()); // The load report received at the balancer should be correct. - std::vector<ClientStats> load_report = - balancers_[0]->lrs_service()->WaitForLoadReport(); - ASSERT_EQ(load_report.size(), 0UL); + std::vector<ClientStats> load_report = + balancers_[0]->lrs_service()->WaitForLoadReport(); + ASSERT_EQ(load_report.size(), 0UL); } // Tests that if the balancer restarts, the client load report contains the // stats before and after the restart correctly. TEST_P(ClientLoadReportingTest, BalancerRestart) { - if (!GetParam().use_xds_resolver()) { - balancers_[0]->lrs_service()->set_cluster_names({kServerName}); - } + if (!GetParam().use_xds_resolver()) { + balancers_[0]->lrs_service()->set_cluster_names({kServerName}); + } SetNextResolution({}); SetNextResolutionForLbChannel({balancers_[0]->port()}); const size_t kNumBackendsFirstPass = backends_.size() / 2; const size_t kNumBackendsSecondPass = backends_.size() - kNumBackendsFirstPass; - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, kNumBackendsFirstPass)}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Wait until all backends returned by the balancer are ready. int num_ok = 0; int num_failure = 0; @@ -5578,15 +5578,15 @@ TEST_P(ClientLoadReportingTest, BalancerRestart) { std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(/* start_index */ 0, /* stop_index */ kNumBackendsFirstPass); - std::vector<ClientStats> load_report = - balancers_[0]->lrs_service()->WaitForLoadReport(); - ASSERT_EQ(load_report.size(), 1UL); - ClientStats client_stats = std::move(load_report.front()); + std::vector<ClientStats> load_report = + balancers_[0]->lrs_service()->WaitForLoadReport(); + ASSERT_EQ(load_report.size(), 1UL); + ClientStats client_stats = std::move(load_report.front()); EXPECT_EQ(static_cast<size_t>(num_ok), - client_stats.total_successful_requests()); - EXPECT_EQ(0U, client_stats.total_requests_in_progress()); - EXPECT_EQ(0U, client_stats.total_error_requests()); - EXPECT_EQ(0U, client_stats.total_dropped_requests()); + client_stats.total_successful_requests()); + EXPECT_EQ(0U, client_stats.total_requests_in_progress()); + EXPECT_EQ(0U, client_stats.total_error_requests()); + EXPECT_EQ(0U, client_stats.total_dropped_requests()); // Shut down the balancer. balancers_[0]->Shutdown(); // We should continue using the last EDS response we received from the @@ -5603,12 +5603,12 @@ TEST_P(ClientLoadReportingTest, BalancerRestart) { int num_started = std::get<0>(WaitForAllBackends( /* start_index */ 0, /* stop_index */ kNumBackendsFirstPass)); // Now restart the balancer, this time pointing to the new backends. - balancers_[0]->Start(); - args = AdsServiceImpl::EdsResourceArgs({ + balancers_[0]->Start(); + args = AdsServiceImpl::EdsResourceArgs({ {"locality0", GetBackendPorts(kNumBackendsFirstPass)}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); // Wait for queries to start going to one of the new backends. // This tells us that we're now using the new serverlist. std::tie(num_ok, num_failure, num_drops) = @@ -5618,13 +5618,13 @@ TEST_P(ClientLoadReportingTest, BalancerRestart) { CheckRpcSendOk(kNumBackendsSecondPass); num_started += kNumBackendsSecondPass; // Check client stats. - load_report = balancers_[0]->lrs_service()->WaitForLoadReport(); - ASSERT_EQ(load_report.size(), 1UL); - client_stats = std::move(load_report.front()); - EXPECT_EQ(num_started, client_stats.total_successful_requests()); - EXPECT_EQ(0U, client_stats.total_requests_in_progress()); - EXPECT_EQ(0U, client_stats.total_error_requests()); - EXPECT_EQ(0U, client_stats.total_dropped_requests()); + load_report = balancers_[0]->lrs_service()->WaitForLoadReport(); + ASSERT_EQ(load_report.size(), 1UL); + client_stats = std::move(load_report.front()); + EXPECT_EQ(num_started, client_stats.total_successful_requests()); + EXPECT_EQ(0U, client_stats.total_requests_in_progress()); + EXPECT_EQ(0U, client_stats.total_error_requests()); + EXPECT_EQ(0U, client_stats.total_dropped_requests()); } class ClientLoadReportingWithDropTest : public XdsEnd2endTest { @@ -5634,9 +5634,9 @@ class ClientLoadReportingWithDropTest : public XdsEnd2endTest { // Tests that the drop stats are correctly reported by client load reporting. TEST_P(ClientLoadReportingWithDropTest, Vanilla) { - if (!GetParam().use_xds_resolver()) { - balancers_[0]->lrs_service()->set_cluster_names({kServerName}); - } + if (!GetParam().use_xds_resolver()) { + balancers_[0]->lrs_service()->set_cluster_names({kServerName}); + } SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumRpcs = 3000; @@ -5647,13 +5647,13 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) { const double KDropRateForLbAndThrottle = kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle; // The ADS response contains two drop categories. - AdsServiceImpl::EdsResourceArgs args({ + AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts()}, }); args.drop_categories = {{kLbDropType, kDropPerMillionForLb}, {kThrottleDropType, kDropPerMillionForThrottle}}; - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args, DefaultEdsServiceName())); int num_ok = 0; int num_failure = 0; int num_drops = 0; @@ -5662,14 +5662,14 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) { // Send kNumRpcs RPCs and count the drops. for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(RpcOptions(), &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; } else { EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage); + EXPECT_EQ(response.message(), kRequestMessage); } } // The drop rate should be roughly equal to the expectation. @@ -5681,24 +5681,24 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) { ::testing::Ge(KDropRateForLbAndThrottle * (1 - kErrorTolerance)), ::testing::Le(KDropRateForLbAndThrottle * (1 + kErrorTolerance)))); // Check client stats. - const size_t total_rpc = num_warmup + kNumRpcs; - ClientStats client_stats; - do { - std::vector<ClientStats> load_reports = - balancers_[0]->lrs_service()->WaitForLoadReport(); - for (const auto& load_report : load_reports) { - client_stats += load_report; - } - } while (client_stats.total_issued_requests() + - client_stats.total_dropped_requests() < - total_rpc); - EXPECT_EQ(num_drops, client_stats.total_dropped_requests()); + const size_t total_rpc = num_warmup + kNumRpcs; + ClientStats client_stats; + do { + std::vector<ClientStats> load_reports = + balancers_[0]->lrs_service()->WaitForLoadReport(); + for (const auto& load_report : load_reports) { + client_stats += load_report; + } + } while (client_stats.total_issued_requests() + + client_stats.total_dropped_requests() < + total_rpc); + EXPECT_EQ(num_drops, client_stats.total_dropped_requests()); EXPECT_THAT( - client_stats.dropped_requests(kLbDropType), + client_stats.dropped_requests(kLbDropType), ::testing::AllOf( ::testing::Ge(total_rpc * kDropRateForLb * (1 - kErrorTolerance)), ::testing::Le(total_rpc * kDropRateForLb * (1 + kErrorTolerance)))); - EXPECT_THAT(client_stats.dropped_requests(kThrottleDropType), + EXPECT_THAT(client_stats.dropped_requests(kThrottleDropType), ::testing::AllOf( ::testing::Ge(total_rpc * (1 - kDropRateForLb) * kDropRateForThrottle * (1 - kErrorTolerance)), @@ -5706,97 +5706,97 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) { kDropRateForThrottle * (1 + kErrorTolerance)))); } -TString TestTypeName(const ::testing::TestParamInfo<TestType>& info) { +TString TestTypeName(const ::testing::TestParamInfo<TestType>& info) { return info.param.AsString(); } -// TestType params: -// - use_xds_resolver -// - enable_load_reporting -// - enable_rds_testing = false -// - use_v2 = false - +// TestType params: +// - use_xds_resolver +// - enable_load_reporting +// - enable_rds_testing = false +// - use_v2 = false + INSTANTIATE_TEST_SUITE_P(XdsTest, BasicTest, ::testing::Values(TestType(false, true), TestType(false, false), - TestType(true, false), + TestType(true, false), TestType(true, true)), &TestTypeName); -// Run with both fake resolver and xds resolver. -// Don't run with load reporting or v2 or RDS, since they are irrelevant to -// the tests. +// Run with both fake resolver and xds resolver. +// Don't run with load reporting or v2 or RDS, since they are irrelevant to +// the tests. INSTANTIATE_TEST_SUITE_P(XdsTest, SecureNamingTest, - ::testing::Values(TestType(false, false), - TestType(true, false)), + ::testing::Values(TestType(false, false), + TestType(true, false)), + &TestTypeName); + +// LDS depends on XdsResolver. +INSTANTIATE_TEST_SUITE_P(XdsTest, LdsTest, + ::testing::Values(TestType(true, false), + TestType(true, true)), + &TestTypeName); + +// LDS/RDS commmon tests depend on XdsResolver. +INSTANTIATE_TEST_SUITE_P(XdsTest, LdsRdsTest, + ::testing::Values(TestType(true, false), + TestType(true, true), + TestType(true, false, true), + TestType(true, true, true), + // Also test with xDS v2. + TestType(true, true, true, true)), + &TestTypeName); + +// CDS depends on XdsResolver. +INSTANTIATE_TEST_SUITE_P(XdsTest, CdsTest, + ::testing::Values(TestType(true, false), + TestType(true, true)), + &TestTypeName); + +// EDS could be tested with or without XdsResolver, but the tests would +// be the same either way, so we test it only with XdsResolver. +INSTANTIATE_TEST_SUITE_P(XdsTest, EdsTest, + ::testing::Values(TestType(true, false), + TestType(true, true)), + &TestTypeName); + +// Test initial resource timeouts for each resource type. +// Do this only for XdsResolver with RDS enabled, so that we can test +// all resource types. +// Run with V3 only, since the functionality is no different in V2. +INSTANTIATE_TEST_SUITE_P(XdsTest, TimeoutTest, + ::testing::Values(TestType(true, false, true)), + &TestTypeName); + +// XdsResolverOnlyTest depends on XdsResolver. +INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverOnlyTest, + ::testing::Values(TestType(true, false), + TestType(true, true)), + &TestTypeName); + +// XdsResolverLoadReprtingOnlyTest depends on XdsResolver and load reporting. +INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverLoadReportingOnlyTest, + ::testing::Values(TestType(true, true)), &TestTypeName); -// LDS depends on XdsResolver. -INSTANTIATE_TEST_SUITE_P(XdsTest, LdsTest, - ::testing::Values(TestType(true, false), - TestType(true, true)), - &TestTypeName); - -// LDS/RDS commmon tests depend on XdsResolver. -INSTANTIATE_TEST_SUITE_P(XdsTest, LdsRdsTest, - ::testing::Values(TestType(true, false), - TestType(true, true), - TestType(true, false, true), - TestType(true, true, true), - // Also test with xDS v2. - TestType(true, true, true, true)), - &TestTypeName); - -// CDS depends on XdsResolver. -INSTANTIATE_TEST_SUITE_P(XdsTest, CdsTest, - ::testing::Values(TestType(true, false), - TestType(true, true)), - &TestTypeName); - -// EDS could be tested with or without XdsResolver, but the tests would -// be the same either way, so we test it only with XdsResolver. -INSTANTIATE_TEST_SUITE_P(XdsTest, EdsTest, - ::testing::Values(TestType(true, false), - TestType(true, true)), - &TestTypeName); - -// Test initial resource timeouts for each resource type. -// Do this only for XdsResolver with RDS enabled, so that we can test -// all resource types. -// Run with V3 only, since the functionality is no different in V2. -INSTANTIATE_TEST_SUITE_P(XdsTest, TimeoutTest, - ::testing::Values(TestType(true, false, true)), - &TestTypeName); - -// XdsResolverOnlyTest depends on XdsResolver. -INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverOnlyTest, - ::testing::Values(TestType(true, false), - TestType(true, true)), - &TestTypeName); - -// XdsResolverLoadReprtingOnlyTest depends on XdsResolver and load reporting. -INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverLoadReportingOnlyTest, - ::testing::Values(TestType(true, true)), - &TestTypeName); - INSTANTIATE_TEST_SUITE_P(XdsTest, LocalityMapTest, ::testing::Values(TestType(false, true), TestType(false, false), - TestType(true, false), + TestType(true, false), TestType(true, true)), &TestTypeName); INSTANTIATE_TEST_SUITE_P(XdsTest, FailoverTest, ::testing::Values(TestType(false, true), TestType(false, false), - TestType(true, false), + TestType(true, false), TestType(true, true)), &TestTypeName); INSTANTIATE_TEST_SUITE_P(XdsTest, DropTest, ::testing::Values(TestType(false, true), TestType(false, false), - TestType(true, false), + TestType(true, false), TestType(true, true)), &TestTypeName); diff --git a/contrib/libs/grpc/test/cpp/end2end/ya.make b/contrib/libs/grpc/test/cpp/end2end/ya.make index d297bbbb27..b9c1dc7fe0 100644 --- a/contrib/libs/grpc/test/cpp/end2end/ya.make +++ b/contrib/libs/grpc/test/cpp/end2end/ya.make @@ -2,10 +2,10 @@ LIBRARY() LICENSE(Apache-2.0) -LICENSE_TEXTS(.yandex_meta/licenses.list.txt) +LICENSE_TEXTS(.yandex_meta/licenses.list.txt) + +OWNER(dvshkurko) -OWNER(dvshkurko) - PEERDIR( contrib/libs/grpc/src/proto/grpc/health/v1 contrib/libs/grpc/src/proto/grpc/testing @@ -16,10 +16,10 @@ PEERDIR( contrib/restricted/googletest/googletest ) -ADDINCL( - ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc - contrib/libs/grpc -) +ADDINCL( + ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc + contrib/libs/grpc +) NO_COMPILER_WARNINGS() diff --git a/contrib/libs/grpc/test/cpp/util/.yandex_meta/licenses.list.txt b/contrib/libs/grpc/test/cpp/util/.yandex_meta/licenses.list.txt index d41622f4e5..d2dadabed9 100644 --- a/contrib/libs/grpc/test/cpp/util/.yandex_meta/licenses.list.txt +++ b/contrib/libs/grpc/test/cpp/util/.yandex_meta/licenses.list.txt @@ -1,32 +1,32 @@ -====================Apache-2.0==================== - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - - -====================COPYRIGHT==================== - * Copyright 2015 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2015-2016 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2016 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2017 gRPC authors. - - -====================COPYRIGHT==================== - * Copyright 2018 gRPC authors. +====================Apache-2.0==================== + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + +====================COPYRIGHT==================== + * Copyright 2015 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2015-2016 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2016 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2017 gRPC authors. + + +====================COPYRIGHT==================== + * Copyright 2018 gRPC authors. diff --git a/contrib/libs/grpc/test/cpp/util/byte_buffer_proto_helper.cc b/contrib/libs/grpc/test/cpp/util/byte_buffer_proto_helper.cc index cad822c1f9..5971b53075 100644 --- a/contrib/libs/grpc/test/cpp/util/byte_buffer_proto_helper.cc +++ b/contrib/libs/grpc/test/cpp/util/byte_buffer_proto_helper.cc @@ -24,7 +24,7 @@ namespace testing { bool ParseFromByteBuffer(ByteBuffer* buffer, grpc::protobuf::Message* message) { std::vector<Slice> slices; (void)buffer->Dump(&slices); - TString buf; + TString buf; buf.reserve(buffer->Length()); for (auto s = slices.begin(); s != slices.end(); s++) { buf.append(reinterpret_cast<const char*>(s->begin()), s->size()); @@ -34,7 +34,7 @@ bool ParseFromByteBuffer(ByteBuffer* buffer, grpc::protobuf::Message* message) { std::unique_ptr<ByteBuffer> SerializeToByteBuffer( grpc::protobuf::Message* message) { - TString buf; + TString buf; message->SerializeToString(&buf); Slice slice(buf); return std::unique_ptr<ByteBuffer>(new ByteBuffer(&slice, 1)); @@ -42,7 +42,7 @@ std::unique_ptr<ByteBuffer> SerializeToByteBuffer( bool SerializeToByteBufferInPlace(grpc::protobuf::Message* message, ByteBuffer* buffer) { - TString buf; + TString buf; if (!message->SerializeToString(&buf)) { return false; } diff --git a/contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc b/contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc index 39f80086bd..c63f351a8f 100644 --- a/contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc +++ b/contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc @@ -27,8 +27,8 @@ #include <grpcpp/support/slice.h> #include <gtest/gtest.h> -#include "test/core/util/test_config.h" - +#include "test/core/util/test_config.h" + namespace grpc { static internal::GrpcLibraryInitializer g_gli_initializer; @@ -127,7 +127,7 @@ TEST_F(ByteBufferTest, SerializationMakesCopy) { } // namespace grpc int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::TestEnvironment env(argc, argv); ::testing::InitGoogleTest(&argc, argv); int ret = RUN_ALL_TESTS(); return ret; diff --git a/contrib/libs/grpc/test/cpp/util/channel_trace_proto_helper.cc b/contrib/libs/grpc/test/cpp/util/channel_trace_proto_helper.cc index 78ed41c18c..d4b4026774 100644 --- a/contrib/libs/grpc/test/cpp/util/channel_trace_proto_helper.cc +++ b/contrib/libs/grpc/test/cpp/util/channel_trace_proto_helper.cc @@ -26,8 +26,8 @@ #include <grpcpp/impl/codegen/config_protobuf.h> #include <gtest/gtest.h> -#include "src/core/lib/iomgr/error.h" -#include "src/core/lib/json/json.h" +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/json/json.h" #include "src/proto/grpc/channelz/channelz.pb.h" namespace grpc { @@ -38,7 +38,7 @@ namespace { // then back to json. This ensures that the json string was correctly formatted // according to https://developers.google.com/protocol-buffers/docs/proto3#json template <typename Message> -void VaidateProtoJsonTranslation(const TString& json_str) { +void VaidateProtoJsonTranslation(const TString& json_str) { Message msg; grpc::protobuf::json::JsonParseOptions parse_options; // If the following line is failing, then uncomment the last line of the @@ -49,21 +49,21 @@ void VaidateProtoJsonTranslation(const TString& json_str) { grpc::protobuf::util::Status s = grpc::protobuf::json::JsonStringToMessage(json_str, &msg, parse_options); EXPECT_TRUE(s.ok()); - TString proto_json_str; + TString proto_json_str; grpc::protobuf::json::JsonPrintOptions print_options; // We usually do not want this to be true, however it can be helpful to // uncomment and see the output produced then all fields are printed. // print_options.always_print_primitive_fields = true; s = grpc::protobuf::json::MessageToJsonString(msg, &proto_json_str); EXPECT_TRUE(s.ok()); - // Parse JSON and re-dump to string, to make sure formatting is the - // same as what would be generated by our JSON library. - grpc_error* error = GRPC_ERROR_NONE; - grpc_core::Json parsed_json = - grpc_core::Json::Parse(proto_json_str.c_str(), &error); - ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); - ASSERT_EQ(parsed_json.type(), grpc_core::Json::Type::OBJECT); - proto_json_str = parsed_json.Dump(); + // Parse JSON and re-dump to string, to make sure formatting is the + // same as what would be generated by our JSON library. + grpc_error* error = GRPC_ERROR_NONE; + grpc_core::Json parsed_json = + grpc_core::Json::Parse(proto_json_str.c_str(), &error); + ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error); + ASSERT_EQ(parsed_json.type(), grpc_core::Json::Type::OBJECT); + proto_json_str = parsed_json.Dump(); // uncomment these to compare the json strings. // gpr_log(GPR_ERROR, "tracer json: %s", json_str.c_str()); // gpr_log(GPR_ERROR, "proto json: %s", proto_json_str.c_str()); @@ -74,39 +74,39 @@ void VaidateProtoJsonTranslation(const TString& json_str) { namespace testing { -void ValidateChannelTraceProtoJsonTranslation(const char* json_c_str) { +void ValidateChannelTraceProtoJsonTranslation(const char* json_c_str) { VaidateProtoJsonTranslation<grpc::channelz::v1::ChannelTrace>(json_c_str); } -void ValidateChannelProtoJsonTranslation(const char* json_c_str) { +void ValidateChannelProtoJsonTranslation(const char* json_c_str) { VaidateProtoJsonTranslation<grpc::channelz::v1::Channel>(json_c_str); } -void ValidateGetTopChannelsResponseProtoJsonTranslation( - const char* json_c_str) { +void ValidateGetTopChannelsResponseProtoJsonTranslation( + const char* json_c_str) { VaidateProtoJsonTranslation<grpc::channelz::v1::GetTopChannelsResponse>( json_c_str); } -void ValidateGetChannelResponseProtoJsonTranslation(const char* json_c_str) { +void ValidateGetChannelResponseProtoJsonTranslation(const char* json_c_str) { VaidateProtoJsonTranslation<grpc::channelz::v1::GetChannelResponse>( json_c_str); } -void ValidateGetServerResponseProtoJsonTranslation(const char* json_c_str) { +void ValidateGetServerResponseProtoJsonTranslation(const char* json_c_str) { VaidateProtoJsonTranslation<grpc::channelz::v1::GetServerResponse>( json_c_str); } -void ValidateSubchannelProtoJsonTranslation(const char* json_c_str) { +void ValidateSubchannelProtoJsonTranslation(const char* json_c_str) { VaidateProtoJsonTranslation<grpc::channelz::v1::Subchannel>(json_c_str); } -void ValidateServerProtoJsonTranslation(const char* json_c_str) { +void ValidateServerProtoJsonTranslation(const char* json_c_str) { VaidateProtoJsonTranslation<grpc::channelz::v1::Server>(json_c_str); } -void ValidateGetServersResponseProtoJsonTranslation(const char* json_c_str) { +void ValidateGetServersResponseProtoJsonTranslation(const char* json_c_str) { VaidateProtoJsonTranslation<grpc::channelz::v1::GetServersResponse>( json_c_str); } diff --git a/contrib/libs/grpc/test/cpp/util/channel_trace_proto_helper.h b/contrib/libs/grpc/test/cpp/util/channel_trace_proto_helper.h index 7424182d09..664e899deb 100644 --- a/contrib/libs/grpc/test/cpp/util/channel_trace_proto_helper.h +++ b/contrib/libs/grpc/test/cpp/util/channel_trace_proto_helper.h @@ -22,14 +22,14 @@ namespace grpc { namespace testing { -void ValidateChannelTraceProtoJsonTranslation(const char* json_c_str); -void ValidateChannelProtoJsonTranslation(const char* json_c_str); -void ValidateGetTopChannelsResponseProtoJsonTranslation(const char* json_c_str); -void ValidateGetChannelResponseProtoJsonTranslation(const char* json_c_str); -void ValidateGetServerResponseProtoJsonTranslation(const char* json_c_str); -void ValidateSubchannelProtoJsonTranslation(const char* json_c_str); -void ValidateServerProtoJsonTranslation(const char* json_c_str); -void ValidateGetServersResponseProtoJsonTranslation(const char* json_c_str); +void ValidateChannelTraceProtoJsonTranslation(const char* json_c_str); +void ValidateChannelProtoJsonTranslation(const char* json_c_str); +void ValidateGetTopChannelsResponseProtoJsonTranslation(const char* json_c_str); +void ValidateGetChannelResponseProtoJsonTranslation(const char* json_c_str); +void ValidateGetServerResponseProtoJsonTranslation(const char* json_c_str); +void ValidateSubchannelProtoJsonTranslation(const char* json_c_str); +void ValidateServerProtoJsonTranslation(const char* json_c_str); +void ValidateGetServersResponseProtoJsonTranslation(const char* json_c_str); } // namespace testing } // namespace grpc diff --git a/contrib/libs/grpc/test/cpp/util/channelz_sampler.cc b/contrib/libs/grpc/test/cpp/util/channelz_sampler.cc index 0a7627271f..e6bde68556 100644 --- a/contrib/libs/grpc/test/cpp/util/channelz_sampler.cc +++ b/contrib/libs/grpc/test/cpp/util/channelz_sampler.cc @@ -1,588 +1,588 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -#include <unistd.h> - -#include <cstdlib> -#include <fstream> -#include <iostream> -#include <memory> -#include <ostream> -#include <queue> -#include <util/generic/string.h> - -#include "y_absl/strings/str_format.h" -#include "y_absl/strings/str_join.h" -#include "gflags/gflags.h" -#include "google/protobuf/text_format.h" -#include "grpc/grpc.h" -#include "grpc/support/port_platform.h" -#include "grpcpp/channel.h" -#include "grpcpp/client_context.h" -#include "grpcpp/create_channel.h" -#include "grpcpp/ext/channelz_service_plugin.h" -#include "grpcpp/grpcpp.h" -#include "grpcpp/security/credentials.h" -#include "grpcpp/security/server_credentials.h" -#include "grpcpp/server.h" -#include "grpcpp/server_builder.h" -#include "grpcpp/server_context.h" -#include "src/core/lib/json/json.h" -#include "src/cpp/server/channelz/channelz_service.h" -#include "src/proto/grpc/channelz/channelz.pb.h" -#include "test/core/util/test_config.h" -#include "test/cpp/util/test_config.h" -#include "test/cpp/util/test_credentials_provider.h" - -DEFINE_string(server_address, "", "channelz server address"); -DEFINE_string(custom_credentials_type, "", "custom credentials type"); -DEFINE_int64(sampling_times, 1, "number of sampling"); -DEFINE_int64(sampling_interval_seconds, 0, "sampling interval in seconds"); -DEFINE_string(output_json, "", "output filename in json format"); - -namespace { -using grpc::ClientContext; -using grpc::Status; -using grpc::StatusCode; -using grpc::channelz::v1::GetChannelRequest; -using grpc::channelz::v1::GetChannelResponse; -using grpc::channelz::v1::GetServerRequest; -using grpc::channelz::v1::GetServerResponse; -using grpc::channelz::v1::GetServerSocketsRequest; -using grpc::channelz::v1::GetServerSocketsResponse; -using grpc::channelz::v1::GetServersRequest; -using grpc::channelz::v1::GetServersResponse; -using grpc::channelz::v1::GetSocketRequest; -using grpc::channelz::v1::GetSocketResponse; -using grpc::channelz::v1::GetSubchannelRequest; -using grpc::channelz::v1::GetSubchannelResponse; -using grpc::channelz::v1::GetTopChannelsRequest; -using grpc::channelz::v1::GetTopChannelsResponse; -} // namespace - -class ChannelzSampler final { - public: - // Get server_id of a server - int64_t GetServerID(const grpc::channelz::v1::Server& server) { - return server.ref().server_id(); - } - - // Get channel_id of a channel - inline int64_t GetChannelID(const grpc::channelz::v1::Channel& channel) { - return channel.ref().channel_id(); - } - - // Get subchannel_id of a subchannel - inline int64_t GetSubchannelID( - const grpc::channelz::v1::Subchannel& subchannel) { - return subchannel.ref().subchannel_id(); - } - - // Get socket_id of a socket - inline int64_t GetSocketID(const grpc::channelz::v1::Socket& socket) { - return socket.ref().socket_id(); - } - - // Get name of a server - inline TString GetServerName(const grpc::channelz::v1::Server& server) { - return server.ref().name(); - } - - // Get name of a channel - inline TString GetChannelName( - const grpc::channelz::v1::Channel& channel) { - return channel.ref().name(); - } - - // Get name of a subchannel - inline TString GetSubchannelName( - const grpc::channelz::v1::Subchannel& subchannel) { - return subchannel.ref().name(); - } - - // Get name of a socket - inline TString GetSocketName(const grpc::channelz::v1::Socket& socket) { - return socket.ref().name(); - } - - // Get a channel based on channel_id - grpc::channelz::v1::Channel GetChannelRPC(int64_t channel_id) { - GetChannelRequest get_channel_request; - get_channel_request.set_channel_id(channel_id); - GetChannelResponse get_channel_response; - ClientContext get_channel_context; - get_channel_context.set_deadline( - grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); - Status status = channelz_stub_->GetChannel( - &get_channel_context, get_channel_request, &get_channel_response); - if (!status.ok()) { - gpr_log(GPR_ERROR, "GetChannelRPC failed: %s", - get_channel_context.debug_error_string().c_str()); - GPR_ASSERT(0); - } - return get_channel_response.channel(); - } - - // Get a subchannel based on subchannel_id - grpc::channelz::v1::Subchannel GetSubchannelRPC(int64_t subchannel_id) { - GetSubchannelRequest get_subchannel_request; - get_subchannel_request.set_subchannel_id(subchannel_id); - GetSubchannelResponse get_subchannel_response; - ClientContext get_subchannel_context; - get_subchannel_context.set_deadline( - grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); - Status status = channelz_stub_->GetSubchannel(&get_subchannel_context, - get_subchannel_request, - &get_subchannel_response); - if (!status.ok()) { - gpr_log(GPR_ERROR, "GetSubchannelRPC failed: %s", - get_subchannel_context.debug_error_string().c_str()); - GPR_ASSERT(0); - } - return get_subchannel_response.subchannel(); - } - - // get a socket based on socket_id - grpc::channelz::v1::Socket GetSocketRPC(int64_t socket_id) { - GetSocketRequest get_socket_request; - get_socket_request.set_socket_id(socket_id); - GetSocketResponse get_socket_response; - ClientContext get_socket_context; - get_socket_context.set_deadline( - grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); - Status status = channelz_stub_->GetSocket( - &get_socket_context, get_socket_request, &get_socket_response); - if (!status.ok()) { - gpr_log(GPR_ERROR, "GetSocketRPC failed: %s", - get_socket_context.debug_error_string().c_str()); - GPR_ASSERT(0); - } - return get_socket_response.socket(); - } - - // get the descedent channels/subchannels/sockets of a channel - // push descedent channels/subchannels to queue for layer traverse - // store descedent channels/subchannels/sockets for dumping data - void GetChannelDescedence( - const grpc::channelz::v1::Channel& channel, - std::queue<grpc::channelz::v1::Channel>& channel_queue, - std::queue<grpc::channelz::v1::Subchannel>& subchannel_queue) { - std::cout << " Channel ID" << GetChannelID(channel) << "_" - << GetChannelName(channel) << " descendence - "; - if (channel.channel_ref_size() > 0 || channel.subchannel_ref_size() > 0) { - if (channel.channel_ref_size() > 0) { - std::cout << "channel: "; - for (const auto& _channelref : channel.channel_ref()) { - int64_t ch_id = _channelref.channel_id(); - std::cout << "ID" << ch_id << "_" << _channelref.name() << " "; - grpc::channelz::v1::Channel ch = GetChannelRPC(ch_id); - channel_queue.push(ch); - if (CheckID(ch_id)) { - all_channels_.push_back(ch); - StoreChannelInJson(ch); - } - } - if (channel.subchannel_ref_size() > 0) { - std::cout << ", "; - } - } - if (channel.subchannel_ref_size() > 0) { - std::cout << "subchannel: "; - for (const auto& _subchannelref : channel.subchannel_ref()) { - int64_t subch_id = _subchannelref.subchannel_id(); - std::cout << "ID" << subch_id << "_" << _subchannelref.name() << " "; - grpc::channelz::v1::Subchannel subch = GetSubchannelRPC(subch_id); - subchannel_queue.push(subch); - if (CheckID(subch_id)) { - all_subchannels_.push_back(subch); - StoreSubchannelInJson(subch); - } - } - } - } else if (channel.socket_ref_size() > 0) { - std::cout << "socket: "; - for (const auto& _socketref : channel.socket_ref()) { - int64_t so_id = _socketref.socket_id(); - std::cout << "ID" << so_id << "_" << _socketref.name() << " "; - grpc::channelz::v1::Socket so = GetSocketRPC(so_id); - if (CheckID(so_id)) { - all_sockets_.push_back(so); - StoreSocketInJson(so); - } - } - } - std::cout << std::endl; - } - - // get the descedent channels/subchannels/sockets of a subchannel - // push descedent channels/subchannels to queue for layer traverse - // store descedent channels/subchannels/sockets for dumping data - void GetSubchannelDescedence( - grpc::channelz::v1::Subchannel& subchannel, - std::queue<grpc::channelz::v1::Channel>& channel_queue, - std::queue<grpc::channelz::v1::Subchannel>& subchannel_queue) { - std::cout << " Subchannel ID" << GetSubchannelID(subchannel) << "_" - << GetSubchannelName(subchannel) << " descendence - "; - if (subchannel.channel_ref_size() > 0 || - subchannel.subchannel_ref_size() > 0) { - if (subchannel.channel_ref_size() > 0) { - std::cout << "channel: "; - for (const auto& _channelref : subchannel.channel_ref()) { - int64_t ch_id = _channelref.channel_id(); - std::cout << "ID" << ch_id << "_" << _channelref.name() << " "; - grpc::channelz::v1::Channel ch = GetChannelRPC(ch_id); - channel_queue.push(ch); - if (CheckID(ch_id)) { - all_channels_.push_back(ch); - StoreChannelInJson(ch); - } - } - if (subchannel.subchannel_ref_size() > 0) { - std::cout << ", "; - } - } - if (subchannel.subchannel_ref_size() > 0) { - std::cout << "subchannel: "; - for (const auto& _subchannelref : subchannel.subchannel_ref()) { - int64_t subch_id = _subchannelref.subchannel_id(); - std::cout << "ID" << subch_id << "_" << _subchannelref.name() << " "; - grpc::channelz::v1::Subchannel subch = GetSubchannelRPC(subch_id); - subchannel_queue.push(subch); - if (CheckID(subch_id)) { - all_subchannels_.push_back(subch); - StoreSubchannelInJson(subch); - } - } - } - } else if (subchannel.socket_ref_size() > 0) { - std::cout << "socket: "; - for (const auto& _socketref : subchannel.socket_ref()) { - int64_t so_id = _socketref.socket_id(); - std::cout << "ID" << so_id << "_" << _socketref.name() << " "; - grpc::channelz::v1::Socket so = GetSocketRPC(so_id); - if (CheckID(so_id)) { - all_sockets_.push_back(so); - StoreSocketInJson(so); - } - } - } - std::cout << std::endl; - } - - // Set up the channelz sampler client - // Initialize json as an array - void Setup(const TString& custom_credentials_type, - const TString& server_address) { - json_ = grpc_core::Json::Array(); - rpc_timeout_seconds_ = 20; - grpc::ChannelArguments channel_args; - std::shared_ptr<grpc::ChannelCredentials> channel_creds = - grpc::testing::GetCredentialsProvider()->GetChannelCredentials( - custom_credentials_type, &channel_args); - if (!channel_creds) { - gpr_log(GPR_ERROR, - "Wrong user credential type: %s. Allowed credential types: " - "INSECURE_CREDENTIALS, ssl, alts, google_default_credentials.", - custom_credentials_type.c_str()); - GPR_ASSERT(0); - } - std::shared_ptr<grpc::Channel> channel = - CreateChannel(server_address, channel_creds); - channelz_stub_ = grpc::channelz::v1::Channelz::NewStub(channel); - } - - // Get all servers, keep querying until getting all - // Store servers for dumping data - // Need to check id repeating for servers - void GetServersRPC() { - int64_t server_start_id = 0; - while (true) { - GetServersRequest get_servers_request; - GetServersResponse get_servers_response; - ClientContext get_servers_context; - get_servers_context.set_deadline( - grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); - get_servers_request.set_start_server_id(server_start_id); - Status status = channelz_stub_->GetServers( - &get_servers_context, get_servers_request, &get_servers_response); - if (!status.ok()) { - if (status.error_code() == StatusCode::UNIMPLEMENTED) { - gpr_log(GPR_ERROR, - "Error status UNIMPLEMENTED. Please check and make sure " - "channelz has been registered on the server being queried."); - } else { - gpr_log(GPR_ERROR, - "GetServers RPC with GetServersRequest.server_start_id=%d, " - "failed: %s", - int(server_start_id), - get_servers_context.debug_error_string().c_str()); - } - GPR_ASSERT(0); - } - for (const auto& _server : get_servers_response.server()) { - all_servers_.push_back(_server); - StoreServerInJson(_server); - } - if (!get_servers_response.end()) { - server_start_id = GetServerID(all_servers_.back()) + 1; - } else { - break; - } - } - std::cout << "Number of servers = " << all_servers_.size() << std::endl; - } - - // Get sockets that belongs to servers - // Store sockets for dumping data - void GetSocketsOfServers() { - for (const auto& _server : all_servers_) { - std::cout << "Server ID" << GetServerID(_server) << "_" - << GetServerName(_server) << " listen_socket - "; - for (const auto& _socket : _server.listen_socket()) { - int64_t so_id = _socket.socket_id(); - std::cout << "ID" << so_id << "_" << _socket.name() << " "; - if (CheckID(so_id)) { - grpc::channelz::v1::Socket so = GetSocketRPC(so_id); - all_sockets_.push_back(so); - StoreSocketInJson(so); - } - } - std::cout << std::endl; - } - } - - // Get all top channels, keep querying until getting all - // Store channels for dumping data - // No need to check id repeating for top channels - void GetTopChannelsRPC() { - int64_t channel_start_id = 0; - while (true) { - GetTopChannelsRequest get_top_channels_request; - GetTopChannelsResponse get_top_channels_response; - ClientContext get_top_channels_context; - get_top_channels_context.set_deadline( - grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); - get_top_channels_request.set_start_channel_id(channel_start_id); - Status status = channelz_stub_->GetTopChannels( - &get_top_channels_context, get_top_channels_request, - &get_top_channels_response); - if (!status.ok()) { - gpr_log(GPR_ERROR, - "GetTopChannels RPC with " - "GetTopChannelsRequest.channel_start_id=%d failed: %s", - int(channel_start_id), - get_top_channels_context.debug_error_string().c_str()); - GPR_ASSERT(0); - } - for (const auto& _topchannel : get_top_channels_response.channel()) { - top_channels_.push_back(_topchannel); - all_channels_.push_back(_topchannel); - StoreChannelInJson(_topchannel); - } - if (!get_top_channels_response.end()) { - channel_start_id = GetChannelID(top_channels_.back()) + 1; - } else { - break; - } - } - std::cout << std::endl - << "Number of top channels = " << top_channels_.size() - << std::endl; - } - - // layer traverse for each top channel - void TraverseTopChannels() { - for (const auto& _topchannel : top_channels_) { - int tree_depth = 0; - std::queue<grpc::channelz::v1::Channel> channel_queue; - std::queue<grpc::channelz::v1::Subchannel> subchannel_queue; - std::cout << "Tree depth = " << tree_depth << std::endl; - GetChannelDescedence(_topchannel, channel_queue, subchannel_queue); - while (!channel_queue.empty() || !subchannel_queue.empty()) { - ++tree_depth; - std::cout << "Tree depth = " << tree_depth << std::endl; - int ch_q_size = channel_queue.size(); - int subch_q_size = subchannel_queue.size(); - for (int i = 0; i < ch_q_size; ++i) { - grpc::channelz::v1::Channel ch = channel_queue.front(); - channel_queue.pop(); - GetChannelDescedence(ch, channel_queue, subchannel_queue); - } - for (int i = 0; i < subch_q_size; ++i) { - grpc::channelz::v1::Subchannel subch = subchannel_queue.front(); - subchannel_queue.pop(); - GetSubchannelDescedence(subch, channel_queue, subchannel_queue); - } - } - std::cout << std::endl; - } - } - - // dump data of all entities to stdout - void DumpStdout() { - TString data_str; - for (const auto& _channel : all_channels_) { - std::cout << "channel ID" << GetChannelID(_channel) << "_" - << GetChannelName(_channel) << " data:" << std::endl; - // TODO(mohanli): TextFormat::PrintToString records time as seconds and - // nanos. Need a more human readable way. - ::google::protobuf::TextFormat::PrintToString(_channel.data(), &data_str); - printf("%s\n", data_str.c_str()); - } - for (const auto& _subchannel : all_subchannels_) { - std::cout << "subchannel ID" << GetSubchannelID(_subchannel) << "_" - << GetSubchannelName(_subchannel) << " data:" << std::endl; - ::google::protobuf::TextFormat::PrintToString(_subchannel.data(), - &data_str); - printf("%s\n", data_str.c_str()); - } - for (const auto& _server : all_servers_) { - std::cout << "server ID" << GetServerID(_server) << "_" - << GetServerName(_server) << " data:" << std::endl; - ::google::protobuf::TextFormat::PrintToString(_server.data(), &data_str); - printf("%s\n", data_str.c_str()); - } - for (const auto& _socket : all_sockets_) { - std::cout << "socket ID" << GetSocketID(_socket) << "_" - << GetSocketName(_socket) << " data:" << std::endl; - ::google::protobuf::TextFormat::PrintToString(_socket.data(), &data_str); - printf("%s\n", data_str.c_str()); - } - } - - // Store a channel in Json - void StoreChannelInJson(const grpc::channelz::v1::Channel& channel) { - TString id = grpc::to_string(GetChannelID(channel)); - TString type = "Channel"; - TString description; - ::google::protobuf::TextFormat::PrintToString(channel.data(), &description); - grpc_core::Json description_json = grpc_core::Json(description); - StoreEntityInJson(id, type, description_json); - } - - // Store a subchannel in Json - void StoreSubchannelInJson(const grpc::channelz::v1::Subchannel& subchannel) { - TString id = grpc::to_string(GetSubchannelID(subchannel)); - TString type = "Subchannel"; - TString description; - ::google::protobuf::TextFormat::PrintToString(subchannel.data(), - &description); - grpc_core::Json description_json = grpc_core::Json(description); - StoreEntityInJson(id, type, description_json); - } - - // Store a server in Json - void StoreServerInJson(const grpc::channelz::v1::Server& server) { - TString id = grpc::to_string(GetServerID(server)); - TString type = "Server"; - TString description; - ::google::protobuf::TextFormat::PrintToString(server.data(), &description); - grpc_core::Json description_json = grpc_core::Json(description); - StoreEntityInJson(id, type, description_json); - } - - // Store a socket in Json - void StoreSocketInJson(const grpc::channelz::v1::Socket& socket) { - TString id = grpc::to_string(GetSocketID(socket)); - TString type = "Socket"; - TString description; - ::google::protobuf::TextFormat::PrintToString(socket.data(), &description); - grpc_core::Json description_json = grpc_core::Json(description); - StoreEntityInJson(id, type, description_json); - } - - // Store an entity in Json - void StoreEntityInJson(TString& id, TString& type, - const grpc_core::Json& description) { - TString start, finish; - gpr_timespec ago = gpr_time_sub( - now_, - gpr_time_from_seconds(FLAGS_sampling_interval_seconds, GPR_TIMESPAN)); - std::stringstream ss; - const time_t time_now = now_.tv_sec; - ss << std::put_time(std::localtime(&time_now), "%F %T"); - finish = ss.str(); // example: "2019-02-01 12:12:18" - ss.str(""); - const time_t time_ago = ago.tv_sec; - ss << std::put_time(std::localtime(&time_ago), "%F %T"); - start = ss.str(); - grpc_core::Json obj = - grpc_core::Json::Object{{"Task", y_absl::StrFormat("%s_ID%s", type, id)}, - {"Start", start}, - {"Finish", finish}, - {"ID", id}, - {"Type", type}, - {"Description", description}}; - json_.mutable_array()->push_back(obj); - } - - // Dump data in json - TString DumpJson() { return json_.Dump(); } - - // Check if one entity has been recorded - bool CheckID(int64_t id) { - if (id_set_.count(id) == 0) { - id_set_.insert(id); - return true; - } else { - return false; - } - } - - // Record current time - void RecordNow() { now_ = gpr_now(GPR_CLOCK_REALTIME); } - - private: - std::unique_ptr<grpc::channelz::v1::Channelz::Stub> channelz_stub_; - std::vector<grpc::channelz::v1::Channel> top_channels_; - std::vector<grpc::channelz::v1::Server> all_servers_; - std::vector<grpc::channelz::v1::Channel> all_channels_; - std::vector<grpc::channelz::v1::Subchannel> all_subchannels_; - std::vector<grpc::channelz::v1::Socket> all_sockets_; - std::unordered_set<int64_t> id_set_; - grpc_core::Json json_; - int64_t rpc_timeout_seconds_; - gpr_timespec now_; -}; - -int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); - grpc::testing::InitTest(&argc, &argv, true); - std::ofstream output_file(FLAGS_output_json); - for (int i = 0; i < FLAGS_sampling_times; ++i) { - ChannelzSampler channelz_sampler; - channelz_sampler.Setup(FLAGS_custom_credentials_type, FLAGS_server_address); - std::cout << "Wait for sampling interval " - << FLAGS_sampling_interval_seconds << "s..." << std::endl; - const gpr_timespec kDelay = gpr_time_add( - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_seconds(FLAGS_sampling_interval_seconds, GPR_TIMESPAN)); - gpr_sleep_until(kDelay); - std::cout << "##### " << i << "th sampling #####" << std::endl; - channelz_sampler.RecordNow(); - channelz_sampler.GetServersRPC(); - channelz_sampler.GetSocketsOfServers(); - channelz_sampler.GetTopChannelsRPC(); - channelz_sampler.TraverseTopChannels(); - channelz_sampler.DumpStdout(); - if (!FLAGS_output_json.empty()) { - output_file << channelz_sampler.DumpJson() << "\n" << std::flush; - } - } - output_file.close(); - return 0; -} +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include <unistd.h> + +#include <cstdlib> +#include <fstream> +#include <iostream> +#include <memory> +#include <ostream> +#include <queue> +#include <util/generic/string.h> + +#include "y_absl/strings/str_format.h" +#include "y_absl/strings/str_join.h" +#include "gflags/gflags.h" +#include "google/protobuf/text_format.h" +#include "grpc/grpc.h" +#include "grpc/support/port_platform.h" +#include "grpcpp/channel.h" +#include "grpcpp/client_context.h" +#include "grpcpp/create_channel.h" +#include "grpcpp/ext/channelz_service_plugin.h" +#include "grpcpp/grpcpp.h" +#include "grpcpp/security/credentials.h" +#include "grpcpp/security/server_credentials.h" +#include "grpcpp/server.h" +#include "grpcpp/server_builder.h" +#include "grpcpp/server_context.h" +#include "src/core/lib/json/json.h" +#include "src/cpp/server/channelz/channelz_service.h" +#include "src/proto/grpc/channelz/channelz.pb.h" +#include "test/core/util/test_config.h" +#include "test/cpp/util/test_config.h" +#include "test/cpp/util/test_credentials_provider.h" + +DEFINE_string(server_address, "", "channelz server address"); +DEFINE_string(custom_credentials_type, "", "custom credentials type"); +DEFINE_int64(sampling_times, 1, "number of sampling"); +DEFINE_int64(sampling_interval_seconds, 0, "sampling interval in seconds"); +DEFINE_string(output_json, "", "output filename in json format"); + +namespace { +using grpc::ClientContext; +using grpc::Status; +using grpc::StatusCode; +using grpc::channelz::v1::GetChannelRequest; +using grpc::channelz::v1::GetChannelResponse; +using grpc::channelz::v1::GetServerRequest; +using grpc::channelz::v1::GetServerResponse; +using grpc::channelz::v1::GetServerSocketsRequest; +using grpc::channelz::v1::GetServerSocketsResponse; +using grpc::channelz::v1::GetServersRequest; +using grpc::channelz::v1::GetServersResponse; +using grpc::channelz::v1::GetSocketRequest; +using grpc::channelz::v1::GetSocketResponse; +using grpc::channelz::v1::GetSubchannelRequest; +using grpc::channelz::v1::GetSubchannelResponse; +using grpc::channelz::v1::GetTopChannelsRequest; +using grpc::channelz::v1::GetTopChannelsResponse; +} // namespace + +class ChannelzSampler final { + public: + // Get server_id of a server + int64_t GetServerID(const grpc::channelz::v1::Server& server) { + return server.ref().server_id(); + } + + // Get channel_id of a channel + inline int64_t GetChannelID(const grpc::channelz::v1::Channel& channel) { + return channel.ref().channel_id(); + } + + // Get subchannel_id of a subchannel + inline int64_t GetSubchannelID( + const grpc::channelz::v1::Subchannel& subchannel) { + return subchannel.ref().subchannel_id(); + } + + // Get socket_id of a socket + inline int64_t GetSocketID(const grpc::channelz::v1::Socket& socket) { + return socket.ref().socket_id(); + } + + // Get name of a server + inline TString GetServerName(const grpc::channelz::v1::Server& server) { + return server.ref().name(); + } + + // Get name of a channel + inline TString GetChannelName( + const grpc::channelz::v1::Channel& channel) { + return channel.ref().name(); + } + + // Get name of a subchannel + inline TString GetSubchannelName( + const grpc::channelz::v1::Subchannel& subchannel) { + return subchannel.ref().name(); + } + + // Get name of a socket + inline TString GetSocketName(const grpc::channelz::v1::Socket& socket) { + return socket.ref().name(); + } + + // Get a channel based on channel_id + grpc::channelz::v1::Channel GetChannelRPC(int64_t channel_id) { + GetChannelRequest get_channel_request; + get_channel_request.set_channel_id(channel_id); + GetChannelResponse get_channel_response; + ClientContext get_channel_context; + get_channel_context.set_deadline( + grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); + Status status = channelz_stub_->GetChannel( + &get_channel_context, get_channel_request, &get_channel_response); + if (!status.ok()) { + gpr_log(GPR_ERROR, "GetChannelRPC failed: %s", + get_channel_context.debug_error_string().c_str()); + GPR_ASSERT(0); + } + return get_channel_response.channel(); + } + + // Get a subchannel based on subchannel_id + grpc::channelz::v1::Subchannel GetSubchannelRPC(int64_t subchannel_id) { + GetSubchannelRequest get_subchannel_request; + get_subchannel_request.set_subchannel_id(subchannel_id); + GetSubchannelResponse get_subchannel_response; + ClientContext get_subchannel_context; + get_subchannel_context.set_deadline( + grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); + Status status = channelz_stub_->GetSubchannel(&get_subchannel_context, + get_subchannel_request, + &get_subchannel_response); + if (!status.ok()) { + gpr_log(GPR_ERROR, "GetSubchannelRPC failed: %s", + get_subchannel_context.debug_error_string().c_str()); + GPR_ASSERT(0); + } + return get_subchannel_response.subchannel(); + } + + // get a socket based on socket_id + grpc::channelz::v1::Socket GetSocketRPC(int64_t socket_id) { + GetSocketRequest get_socket_request; + get_socket_request.set_socket_id(socket_id); + GetSocketResponse get_socket_response; + ClientContext get_socket_context; + get_socket_context.set_deadline( + grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); + Status status = channelz_stub_->GetSocket( + &get_socket_context, get_socket_request, &get_socket_response); + if (!status.ok()) { + gpr_log(GPR_ERROR, "GetSocketRPC failed: %s", + get_socket_context.debug_error_string().c_str()); + GPR_ASSERT(0); + } + return get_socket_response.socket(); + } + + // get the descedent channels/subchannels/sockets of a channel + // push descedent channels/subchannels to queue for layer traverse + // store descedent channels/subchannels/sockets for dumping data + void GetChannelDescedence( + const grpc::channelz::v1::Channel& channel, + std::queue<grpc::channelz::v1::Channel>& channel_queue, + std::queue<grpc::channelz::v1::Subchannel>& subchannel_queue) { + std::cout << " Channel ID" << GetChannelID(channel) << "_" + << GetChannelName(channel) << " descendence - "; + if (channel.channel_ref_size() > 0 || channel.subchannel_ref_size() > 0) { + if (channel.channel_ref_size() > 0) { + std::cout << "channel: "; + for (const auto& _channelref : channel.channel_ref()) { + int64_t ch_id = _channelref.channel_id(); + std::cout << "ID" << ch_id << "_" << _channelref.name() << " "; + grpc::channelz::v1::Channel ch = GetChannelRPC(ch_id); + channel_queue.push(ch); + if (CheckID(ch_id)) { + all_channels_.push_back(ch); + StoreChannelInJson(ch); + } + } + if (channel.subchannel_ref_size() > 0) { + std::cout << ", "; + } + } + if (channel.subchannel_ref_size() > 0) { + std::cout << "subchannel: "; + for (const auto& _subchannelref : channel.subchannel_ref()) { + int64_t subch_id = _subchannelref.subchannel_id(); + std::cout << "ID" << subch_id << "_" << _subchannelref.name() << " "; + grpc::channelz::v1::Subchannel subch = GetSubchannelRPC(subch_id); + subchannel_queue.push(subch); + if (CheckID(subch_id)) { + all_subchannels_.push_back(subch); + StoreSubchannelInJson(subch); + } + } + } + } else if (channel.socket_ref_size() > 0) { + std::cout << "socket: "; + for (const auto& _socketref : channel.socket_ref()) { + int64_t so_id = _socketref.socket_id(); + std::cout << "ID" << so_id << "_" << _socketref.name() << " "; + grpc::channelz::v1::Socket so = GetSocketRPC(so_id); + if (CheckID(so_id)) { + all_sockets_.push_back(so); + StoreSocketInJson(so); + } + } + } + std::cout << std::endl; + } + + // get the descedent channels/subchannels/sockets of a subchannel + // push descedent channels/subchannels to queue for layer traverse + // store descedent channels/subchannels/sockets for dumping data + void GetSubchannelDescedence( + grpc::channelz::v1::Subchannel& subchannel, + std::queue<grpc::channelz::v1::Channel>& channel_queue, + std::queue<grpc::channelz::v1::Subchannel>& subchannel_queue) { + std::cout << " Subchannel ID" << GetSubchannelID(subchannel) << "_" + << GetSubchannelName(subchannel) << " descendence - "; + if (subchannel.channel_ref_size() > 0 || + subchannel.subchannel_ref_size() > 0) { + if (subchannel.channel_ref_size() > 0) { + std::cout << "channel: "; + for (const auto& _channelref : subchannel.channel_ref()) { + int64_t ch_id = _channelref.channel_id(); + std::cout << "ID" << ch_id << "_" << _channelref.name() << " "; + grpc::channelz::v1::Channel ch = GetChannelRPC(ch_id); + channel_queue.push(ch); + if (CheckID(ch_id)) { + all_channels_.push_back(ch); + StoreChannelInJson(ch); + } + } + if (subchannel.subchannel_ref_size() > 0) { + std::cout << ", "; + } + } + if (subchannel.subchannel_ref_size() > 0) { + std::cout << "subchannel: "; + for (const auto& _subchannelref : subchannel.subchannel_ref()) { + int64_t subch_id = _subchannelref.subchannel_id(); + std::cout << "ID" << subch_id << "_" << _subchannelref.name() << " "; + grpc::channelz::v1::Subchannel subch = GetSubchannelRPC(subch_id); + subchannel_queue.push(subch); + if (CheckID(subch_id)) { + all_subchannels_.push_back(subch); + StoreSubchannelInJson(subch); + } + } + } + } else if (subchannel.socket_ref_size() > 0) { + std::cout << "socket: "; + for (const auto& _socketref : subchannel.socket_ref()) { + int64_t so_id = _socketref.socket_id(); + std::cout << "ID" << so_id << "_" << _socketref.name() << " "; + grpc::channelz::v1::Socket so = GetSocketRPC(so_id); + if (CheckID(so_id)) { + all_sockets_.push_back(so); + StoreSocketInJson(so); + } + } + } + std::cout << std::endl; + } + + // Set up the channelz sampler client + // Initialize json as an array + void Setup(const TString& custom_credentials_type, + const TString& server_address) { + json_ = grpc_core::Json::Array(); + rpc_timeout_seconds_ = 20; + grpc::ChannelArguments channel_args; + std::shared_ptr<grpc::ChannelCredentials> channel_creds = + grpc::testing::GetCredentialsProvider()->GetChannelCredentials( + custom_credentials_type, &channel_args); + if (!channel_creds) { + gpr_log(GPR_ERROR, + "Wrong user credential type: %s. Allowed credential types: " + "INSECURE_CREDENTIALS, ssl, alts, google_default_credentials.", + custom_credentials_type.c_str()); + GPR_ASSERT(0); + } + std::shared_ptr<grpc::Channel> channel = + CreateChannel(server_address, channel_creds); + channelz_stub_ = grpc::channelz::v1::Channelz::NewStub(channel); + } + + // Get all servers, keep querying until getting all + // Store servers for dumping data + // Need to check id repeating for servers + void GetServersRPC() { + int64_t server_start_id = 0; + while (true) { + GetServersRequest get_servers_request; + GetServersResponse get_servers_response; + ClientContext get_servers_context; + get_servers_context.set_deadline( + grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); + get_servers_request.set_start_server_id(server_start_id); + Status status = channelz_stub_->GetServers( + &get_servers_context, get_servers_request, &get_servers_response); + if (!status.ok()) { + if (status.error_code() == StatusCode::UNIMPLEMENTED) { + gpr_log(GPR_ERROR, + "Error status UNIMPLEMENTED. Please check and make sure " + "channelz has been registered on the server being queried."); + } else { + gpr_log(GPR_ERROR, + "GetServers RPC with GetServersRequest.server_start_id=%d, " + "failed: %s", + int(server_start_id), + get_servers_context.debug_error_string().c_str()); + } + GPR_ASSERT(0); + } + for (const auto& _server : get_servers_response.server()) { + all_servers_.push_back(_server); + StoreServerInJson(_server); + } + if (!get_servers_response.end()) { + server_start_id = GetServerID(all_servers_.back()) + 1; + } else { + break; + } + } + std::cout << "Number of servers = " << all_servers_.size() << std::endl; + } + + // Get sockets that belongs to servers + // Store sockets for dumping data + void GetSocketsOfServers() { + for (const auto& _server : all_servers_) { + std::cout << "Server ID" << GetServerID(_server) << "_" + << GetServerName(_server) << " listen_socket - "; + for (const auto& _socket : _server.listen_socket()) { + int64_t so_id = _socket.socket_id(); + std::cout << "ID" << so_id << "_" << _socket.name() << " "; + if (CheckID(so_id)) { + grpc::channelz::v1::Socket so = GetSocketRPC(so_id); + all_sockets_.push_back(so); + StoreSocketInJson(so); + } + } + std::cout << std::endl; + } + } + + // Get all top channels, keep querying until getting all + // Store channels for dumping data + // No need to check id repeating for top channels + void GetTopChannelsRPC() { + int64_t channel_start_id = 0; + while (true) { + GetTopChannelsRequest get_top_channels_request; + GetTopChannelsResponse get_top_channels_response; + ClientContext get_top_channels_context; + get_top_channels_context.set_deadline( + grpc_timeout_seconds_to_deadline(rpc_timeout_seconds_)); + get_top_channels_request.set_start_channel_id(channel_start_id); + Status status = channelz_stub_->GetTopChannels( + &get_top_channels_context, get_top_channels_request, + &get_top_channels_response); + if (!status.ok()) { + gpr_log(GPR_ERROR, + "GetTopChannels RPC with " + "GetTopChannelsRequest.channel_start_id=%d failed: %s", + int(channel_start_id), + get_top_channels_context.debug_error_string().c_str()); + GPR_ASSERT(0); + } + for (const auto& _topchannel : get_top_channels_response.channel()) { + top_channels_.push_back(_topchannel); + all_channels_.push_back(_topchannel); + StoreChannelInJson(_topchannel); + } + if (!get_top_channels_response.end()) { + channel_start_id = GetChannelID(top_channels_.back()) + 1; + } else { + break; + } + } + std::cout << std::endl + << "Number of top channels = " << top_channels_.size() + << std::endl; + } + + // layer traverse for each top channel + void TraverseTopChannels() { + for (const auto& _topchannel : top_channels_) { + int tree_depth = 0; + std::queue<grpc::channelz::v1::Channel> channel_queue; + std::queue<grpc::channelz::v1::Subchannel> subchannel_queue; + std::cout << "Tree depth = " << tree_depth << std::endl; + GetChannelDescedence(_topchannel, channel_queue, subchannel_queue); + while (!channel_queue.empty() || !subchannel_queue.empty()) { + ++tree_depth; + std::cout << "Tree depth = " << tree_depth << std::endl; + int ch_q_size = channel_queue.size(); + int subch_q_size = subchannel_queue.size(); + for (int i = 0; i < ch_q_size; ++i) { + grpc::channelz::v1::Channel ch = channel_queue.front(); + channel_queue.pop(); + GetChannelDescedence(ch, channel_queue, subchannel_queue); + } + for (int i = 0; i < subch_q_size; ++i) { + grpc::channelz::v1::Subchannel subch = subchannel_queue.front(); + subchannel_queue.pop(); + GetSubchannelDescedence(subch, channel_queue, subchannel_queue); + } + } + std::cout << std::endl; + } + } + + // dump data of all entities to stdout + void DumpStdout() { + TString data_str; + for (const auto& _channel : all_channels_) { + std::cout << "channel ID" << GetChannelID(_channel) << "_" + << GetChannelName(_channel) << " data:" << std::endl; + // TODO(mohanli): TextFormat::PrintToString records time as seconds and + // nanos. Need a more human readable way. + ::google::protobuf::TextFormat::PrintToString(_channel.data(), &data_str); + printf("%s\n", data_str.c_str()); + } + for (const auto& _subchannel : all_subchannels_) { + std::cout << "subchannel ID" << GetSubchannelID(_subchannel) << "_" + << GetSubchannelName(_subchannel) << " data:" << std::endl; + ::google::protobuf::TextFormat::PrintToString(_subchannel.data(), + &data_str); + printf("%s\n", data_str.c_str()); + } + for (const auto& _server : all_servers_) { + std::cout << "server ID" << GetServerID(_server) << "_" + << GetServerName(_server) << " data:" << std::endl; + ::google::protobuf::TextFormat::PrintToString(_server.data(), &data_str); + printf("%s\n", data_str.c_str()); + } + for (const auto& _socket : all_sockets_) { + std::cout << "socket ID" << GetSocketID(_socket) << "_" + << GetSocketName(_socket) << " data:" << std::endl; + ::google::protobuf::TextFormat::PrintToString(_socket.data(), &data_str); + printf("%s\n", data_str.c_str()); + } + } + + // Store a channel in Json + void StoreChannelInJson(const grpc::channelz::v1::Channel& channel) { + TString id = grpc::to_string(GetChannelID(channel)); + TString type = "Channel"; + TString description; + ::google::protobuf::TextFormat::PrintToString(channel.data(), &description); + grpc_core::Json description_json = grpc_core::Json(description); + StoreEntityInJson(id, type, description_json); + } + + // Store a subchannel in Json + void StoreSubchannelInJson(const grpc::channelz::v1::Subchannel& subchannel) { + TString id = grpc::to_string(GetSubchannelID(subchannel)); + TString type = "Subchannel"; + TString description; + ::google::protobuf::TextFormat::PrintToString(subchannel.data(), + &description); + grpc_core::Json description_json = grpc_core::Json(description); + StoreEntityInJson(id, type, description_json); + } + + // Store a server in Json + void StoreServerInJson(const grpc::channelz::v1::Server& server) { + TString id = grpc::to_string(GetServerID(server)); + TString type = "Server"; + TString description; + ::google::protobuf::TextFormat::PrintToString(server.data(), &description); + grpc_core::Json description_json = grpc_core::Json(description); + StoreEntityInJson(id, type, description_json); + } + + // Store a socket in Json + void StoreSocketInJson(const grpc::channelz::v1::Socket& socket) { + TString id = grpc::to_string(GetSocketID(socket)); + TString type = "Socket"; + TString description; + ::google::protobuf::TextFormat::PrintToString(socket.data(), &description); + grpc_core::Json description_json = grpc_core::Json(description); + StoreEntityInJson(id, type, description_json); + } + + // Store an entity in Json + void StoreEntityInJson(TString& id, TString& type, + const grpc_core::Json& description) { + TString start, finish; + gpr_timespec ago = gpr_time_sub( + now_, + gpr_time_from_seconds(FLAGS_sampling_interval_seconds, GPR_TIMESPAN)); + std::stringstream ss; + const time_t time_now = now_.tv_sec; + ss << std::put_time(std::localtime(&time_now), "%F %T"); + finish = ss.str(); // example: "2019-02-01 12:12:18" + ss.str(""); + const time_t time_ago = ago.tv_sec; + ss << std::put_time(std::localtime(&time_ago), "%F %T"); + start = ss.str(); + grpc_core::Json obj = + grpc_core::Json::Object{{"Task", y_absl::StrFormat("%s_ID%s", type, id)}, + {"Start", start}, + {"Finish", finish}, + {"ID", id}, + {"Type", type}, + {"Description", description}}; + json_.mutable_array()->push_back(obj); + } + + // Dump data in json + TString DumpJson() { return json_.Dump(); } + + // Check if one entity has been recorded + bool CheckID(int64_t id) { + if (id_set_.count(id) == 0) { + id_set_.insert(id); + return true; + } else { + return false; + } + } + + // Record current time + void RecordNow() { now_ = gpr_now(GPR_CLOCK_REALTIME); } + + private: + std::unique_ptr<grpc::channelz::v1::Channelz::Stub> channelz_stub_; + std::vector<grpc::channelz::v1::Channel> top_channels_; + std::vector<grpc::channelz::v1::Server> all_servers_; + std::vector<grpc::channelz::v1::Channel> all_channels_; + std::vector<grpc::channelz::v1::Subchannel> all_subchannels_; + std::vector<grpc::channelz::v1::Socket> all_sockets_; + std::unordered_set<int64_t> id_set_; + grpc_core::Json json_; + int64_t rpc_timeout_seconds_; + gpr_timespec now_; +}; + +int main(int argc, char** argv) { + grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::InitTest(&argc, &argv, true); + std::ofstream output_file(FLAGS_output_json); + for (int i = 0; i < FLAGS_sampling_times; ++i) { + ChannelzSampler channelz_sampler; + channelz_sampler.Setup(FLAGS_custom_credentials_type, FLAGS_server_address); + std::cout << "Wait for sampling interval " + << FLAGS_sampling_interval_seconds << "s..." << std::endl; + const gpr_timespec kDelay = gpr_time_add( + gpr_now(GPR_CLOCK_MONOTONIC), + gpr_time_from_seconds(FLAGS_sampling_interval_seconds, GPR_TIMESPAN)); + gpr_sleep_until(kDelay); + std::cout << "##### " << i << "th sampling #####" << std::endl; + channelz_sampler.RecordNow(); + channelz_sampler.GetServersRPC(); + channelz_sampler.GetSocketsOfServers(); + channelz_sampler.GetTopChannelsRPC(); + channelz_sampler.TraverseTopChannels(); + channelz_sampler.DumpStdout(); + if (!FLAGS_output_json.empty()) { + output_file << channelz_sampler.DumpJson() << "\n" << std::flush; + } + } + output_file.close(); + return 0; +} diff --git a/contrib/libs/grpc/test/cpp/util/channelz_sampler_test.cc b/contrib/libs/grpc/test/cpp/util/channelz_sampler_test.cc index 02e44f7931..d81dbb0d05 100644 --- a/contrib/libs/grpc/test/cpp/util/channelz_sampler_test.cc +++ b/contrib/libs/grpc/test/cpp/util/channelz_sampler_test.cc @@ -1,176 +1,176 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -#include <stdlib.h> -#include <unistd.h> - -#include <cstdlib> -#include <iostream> -#include <memory> -#include <util/generic/string.h> -#include <thread> - -#include "grpc/grpc.h" -#include "grpc/support/alloc.h" -#include "grpc/support/port_platform.h" -#include "grpcpp/channel.h" -#include "grpcpp/client_context.h" -#include "grpcpp/create_channel.h" -#include "grpcpp/ext/channelz_service_plugin.h" -#include "grpcpp/grpcpp.h" -#include "grpcpp/security/credentials.h" -#include "grpcpp/security/server_credentials.h" -#include "grpcpp/server.h" -#include "grpcpp/server_builder.h" -#include "grpcpp/server_context.h" -#include "gtest/gtest.h" -#include "src/core/lib/gpr/env.h" -#include "src/cpp/server/channelz/channelz_service.h" -#include "src/proto/grpc/testing/test.grpc.pb.h" -#include "test/core/util/test_config.h" -#include "test/cpp/util/subprocess.h" -#include "test/cpp/util/test_credentials_provider.h" - -static TString g_root; - -namespace { -using grpc::ClientContext; -using grpc::Server; -using grpc::ServerBuilder; -using grpc::ServerContext; -using grpc::Status; -} // namespace - -// Test variables -TString server_address("0.0.0.0:10000"); -TString custom_credentials_type("INSECURE_CREDENTIALS"); -TString sampling_times = "2"; -TString sampling_interval_seconds = "3"; -TString output_json("output.json"); - -// Creata an echo server -class EchoServerImpl final : public grpc::testing::TestService::Service { - Status EmptyCall(::grpc::ServerContext* context, - const grpc::testing::Empty* request, - grpc::testing::Empty* response) { - return Status::OK; - } -}; - -// Run client in a thread -void RunClient(const TString& client_id, gpr_event* done_ev) { - grpc::ChannelArguments channel_args; - std::shared_ptr<grpc::ChannelCredentials> channel_creds = - grpc::testing::GetCredentialsProvider()->GetChannelCredentials( - custom_credentials_type, &channel_args); - std::unique_ptr<grpc::testing::TestService::Stub> stub = - grpc::testing::TestService::NewStub( - grpc::CreateChannel(server_address, channel_creds)); - gpr_log(GPR_INFO, "Client %s is echoing!", client_id.c_str()); - while (true) { - if (gpr_event_wait(done_ev, grpc_timeout_seconds_to_deadline(1)) != - nullptr) { - return; - } - grpc::testing::Empty request; - grpc::testing::Empty response; - ClientContext context; - Status status = stub->EmptyCall(&context, request, &response); - if (!status.ok()) { - gpr_log(GPR_ERROR, "Client echo failed."); - GPR_ASSERT(0); - } - } -} - -// Create the channelz to test the connection to the server -bool WaitForConnection(int wait_server_seconds) { - grpc::ChannelArguments channel_args; - std::shared_ptr<grpc::ChannelCredentials> channel_creds = - grpc::testing::GetCredentialsProvider()->GetChannelCredentials( - custom_credentials_type, &channel_args); - auto channel = grpc::CreateChannel(server_address, channel_creds); - return channel->WaitForConnected( - grpc_timeout_seconds_to_deadline(wait_server_seconds)); -} - -// Test the channelz sampler -TEST(ChannelzSamplerTest, SimpleTest) { - // start server - ::grpc::channelz::experimental::InitChannelzService(); - EchoServerImpl service; - grpc::ServerBuilder builder; - auto server_creds = - grpc::testing::GetCredentialsProvider()->GetServerCredentials( - custom_credentials_type); - builder.AddListeningPort(server_address, server_creds); - builder.RegisterService(&service); - std::unique_ptr<Server> server(builder.BuildAndStart()); - gpr_log(GPR_INFO, "Server listening on %s", server_address.c_str()); - const int kWaitForServerSeconds = 10; - ASSERT_TRUE(WaitForConnection(kWaitForServerSeconds)); - // client threads - gpr_event done_ev1, done_ev2; - gpr_event_init(&done_ev1); - gpr_event_init(&done_ev2); - std::thread client_thread_1(RunClient, "1", &done_ev1); - std::thread client_thread_2(RunClient, "2", &done_ev2); - // Run the channelz sampler - grpc::SubProcess* test_driver = new grpc::SubProcess( - {g_root + "/channelz_sampler", "--server_address=" + server_address, - "--custom_credentials_type=" + custom_credentials_type, - "--sampling_times=" + sampling_times, - "--sampling_interval_seconds=" + sampling_interval_seconds, - "--output_json=" + output_json}); - int status = test_driver->Join(); - if (WIFEXITED(status)) { - if (WEXITSTATUS(status)) { - gpr_log(GPR_ERROR, - "Channelz sampler test test-runner exited with code %d", - WEXITSTATUS(status)); - GPR_ASSERT(0); // log the line number of the assertion failure - } - } else if (WIFSIGNALED(status)) { - gpr_log(GPR_ERROR, "Channelz sampler test test-runner ended from signal %d", - WTERMSIG(status)); - GPR_ASSERT(0); - } else { - gpr_log(GPR_ERROR, - "Channelz sampler test test-runner ended with unknown status %d", - status); - GPR_ASSERT(0); - } - delete test_driver; - gpr_event_set(&done_ev1, (void*)1); - gpr_event_set(&done_ev2, (void*)1); - client_thread_1.join(); - client_thread_2.join(); -} - -int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); - ::testing::InitGoogleTest(&argc, argv); - TString me = argv[0]; - auto lslash = me.rfind('/'); - if (lslash != TString::npos) { - g_root = me.substr(0, lslash); - } else { - g_root = "."; - } - int ret = RUN_ALL_TESTS(); - return ret; -} +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include <stdlib.h> +#include <unistd.h> + +#include <cstdlib> +#include <iostream> +#include <memory> +#include <util/generic/string.h> +#include <thread> + +#include "grpc/grpc.h" +#include "grpc/support/alloc.h" +#include "grpc/support/port_platform.h" +#include "grpcpp/channel.h" +#include "grpcpp/client_context.h" +#include "grpcpp/create_channel.h" +#include "grpcpp/ext/channelz_service_plugin.h" +#include "grpcpp/grpcpp.h" +#include "grpcpp/security/credentials.h" +#include "grpcpp/security/server_credentials.h" +#include "grpcpp/server.h" +#include "grpcpp/server_builder.h" +#include "grpcpp/server_context.h" +#include "gtest/gtest.h" +#include "src/core/lib/gpr/env.h" +#include "src/cpp/server/channelz/channelz_service.h" +#include "src/proto/grpc/testing/test.grpc.pb.h" +#include "test/core/util/test_config.h" +#include "test/cpp/util/subprocess.h" +#include "test/cpp/util/test_credentials_provider.h" + +static TString g_root; + +namespace { +using grpc::ClientContext; +using grpc::Server; +using grpc::ServerBuilder; +using grpc::ServerContext; +using grpc::Status; +} // namespace + +// Test variables +TString server_address("0.0.0.0:10000"); +TString custom_credentials_type("INSECURE_CREDENTIALS"); +TString sampling_times = "2"; +TString sampling_interval_seconds = "3"; +TString output_json("output.json"); + +// Creata an echo server +class EchoServerImpl final : public grpc::testing::TestService::Service { + Status EmptyCall(::grpc::ServerContext* context, + const grpc::testing::Empty* request, + grpc::testing::Empty* response) { + return Status::OK; + } +}; + +// Run client in a thread +void RunClient(const TString& client_id, gpr_event* done_ev) { + grpc::ChannelArguments channel_args; + std::shared_ptr<grpc::ChannelCredentials> channel_creds = + grpc::testing::GetCredentialsProvider()->GetChannelCredentials( + custom_credentials_type, &channel_args); + std::unique_ptr<grpc::testing::TestService::Stub> stub = + grpc::testing::TestService::NewStub( + grpc::CreateChannel(server_address, channel_creds)); + gpr_log(GPR_INFO, "Client %s is echoing!", client_id.c_str()); + while (true) { + if (gpr_event_wait(done_ev, grpc_timeout_seconds_to_deadline(1)) != + nullptr) { + return; + } + grpc::testing::Empty request; + grpc::testing::Empty response; + ClientContext context; + Status status = stub->EmptyCall(&context, request, &response); + if (!status.ok()) { + gpr_log(GPR_ERROR, "Client echo failed."); + GPR_ASSERT(0); + } + } +} + +// Create the channelz to test the connection to the server +bool WaitForConnection(int wait_server_seconds) { + grpc::ChannelArguments channel_args; + std::shared_ptr<grpc::ChannelCredentials> channel_creds = + grpc::testing::GetCredentialsProvider()->GetChannelCredentials( + custom_credentials_type, &channel_args); + auto channel = grpc::CreateChannel(server_address, channel_creds); + return channel->WaitForConnected( + grpc_timeout_seconds_to_deadline(wait_server_seconds)); +} + +// Test the channelz sampler +TEST(ChannelzSamplerTest, SimpleTest) { + // start server + ::grpc::channelz::experimental::InitChannelzService(); + EchoServerImpl service; + grpc::ServerBuilder builder; + auto server_creds = + grpc::testing::GetCredentialsProvider()->GetServerCredentials( + custom_credentials_type); + builder.AddListeningPort(server_address, server_creds); + builder.RegisterService(&service); + std::unique_ptr<Server> server(builder.BuildAndStart()); + gpr_log(GPR_INFO, "Server listening on %s", server_address.c_str()); + const int kWaitForServerSeconds = 10; + ASSERT_TRUE(WaitForConnection(kWaitForServerSeconds)); + // client threads + gpr_event done_ev1, done_ev2; + gpr_event_init(&done_ev1); + gpr_event_init(&done_ev2); + std::thread client_thread_1(RunClient, "1", &done_ev1); + std::thread client_thread_2(RunClient, "2", &done_ev2); + // Run the channelz sampler + grpc::SubProcess* test_driver = new grpc::SubProcess( + {g_root + "/channelz_sampler", "--server_address=" + server_address, + "--custom_credentials_type=" + custom_credentials_type, + "--sampling_times=" + sampling_times, + "--sampling_interval_seconds=" + sampling_interval_seconds, + "--output_json=" + output_json}); + int status = test_driver->Join(); + if (WIFEXITED(status)) { + if (WEXITSTATUS(status)) { + gpr_log(GPR_ERROR, + "Channelz sampler test test-runner exited with code %d", + WEXITSTATUS(status)); + GPR_ASSERT(0); // log the line number of the assertion failure + } + } else if (WIFSIGNALED(status)) { + gpr_log(GPR_ERROR, "Channelz sampler test test-runner ended from signal %d", + WTERMSIG(status)); + GPR_ASSERT(0); + } else { + gpr_log(GPR_ERROR, + "Channelz sampler test test-runner ended with unknown status %d", + status); + GPR_ASSERT(0); + } + delete test_driver; + gpr_event_set(&done_ev1, (void*)1); + gpr_event_set(&done_ev2, (void*)1); + client_thread_1.join(); + client_thread_2.join(); +} + +int main(int argc, char** argv) { + grpc::testing::TestEnvironment env(argc, argv); + ::testing::InitGoogleTest(&argc, argv); + TString me = argv[0]; + auto lslash = me.rfind('/'); + if (lslash != TString::npos) { + g_root = me.substr(0, lslash); + } else { + g_root = "."; + } + int ret = RUN_ALL_TESTS(); + return ret; +} diff --git a/contrib/libs/grpc/test/cpp/util/cli_call.cc b/contrib/libs/grpc/test/cpp/util/cli_call.cc index 722ad88970..5b3631667f 100644 --- a/contrib/libs/grpc/test/cpp/util/cli_call.cc +++ b/contrib/libs/grpc/test/cpp/util/cli_call.cc @@ -25,23 +25,23 @@ #include <grpcpp/client_context.h> #include <grpcpp/support/byte_buffer.h> -#include <cmath> -#include <iostream> -#include <utility> - +#include <cmath> +#include <iostream> +#include <utility> + namespace grpc { namespace testing { namespace { void* tag(int i) { return (void*)static_cast<intptr_t>(i); } } // namespace -Status CliCall::Call(const std::shared_ptr<grpc::Channel>& channel, - const TString& method, const TString& request, - TString* response, +Status CliCall::Call(const std::shared_ptr<grpc::Channel>& channel, + const TString& method, const TString& request, + TString* response, const OutgoingMetadataContainer& metadata, IncomingMetadataContainer* server_initial_metadata, IncomingMetadataContainer* server_trailing_metadata) { - CliCall call(channel, method, metadata); + CliCall call(channel, method, metadata); call.Write(request); call.WritesDone(); if (!call.Read(response, server_initial_metadata)) { @@ -51,8 +51,8 @@ Status CliCall::Call(const std::shared_ptr<grpc::Channel>& channel, } CliCall::CliCall(const std::shared_ptr<grpc::Channel>& channel, - const TString& method, - const OutgoingMetadataContainer& metadata, CliArgs args) + const TString& method, + const OutgoingMetadataContainer& metadata, CliArgs args) : stub_(new grpc::GenericStub(channel)) { gpr_mu_init(&write_mu_); gpr_cv_init(&write_cv_); @@ -62,22 +62,22 @@ CliCall::CliCall(const std::shared_ptr<grpc::Channel>& channel, ctx_.AddMetadata(iter->first, iter->second); } } - - // Set deadline if timeout > 0 (default value -1 if no timeout specified) - if (args.timeout > 0) { - int64_t timeout_in_ns = ceil(args.timeout * 1e9); - - // Convert timeout (in nanoseconds) to a deadline - auto deadline = - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_nanos(timeout_in_ns, GPR_TIMESPAN)); - ctx_.set_deadline(deadline); - } else if (args.timeout != -1) { - fprintf( - stderr, - "WARNING: Non-positive timeout value, skipping setting deadline.\n"); - } - + + // Set deadline if timeout > 0 (default value -1 if no timeout specified) + if (args.timeout > 0) { + int64_t timeout_in_ns = ceil(args.timeout * 1e9); + + // Convert timeout (in nanoseconds) to a deadline + auto deadline = + gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), + gpr_time_from_nanos(timeout_in_ns, GPR_TIMESPAN)); + ctx_.set_deadline(deadline); + } else if (args.timeout != -1) { + fprintf( + stderr, + "WARNING: Non-positive timeout value, skipping setting deadline.\n"); + } + call_ = stub_->PrepareCall(&ctx_, method, &cq_); call_->StartCall(tag(1)); void* got_tag; @@ -91,7 +91,7 @@ CliCall::~CliCall() { gpr_mu_destroy(&write_mu_); } -void CliCall::Write(const TString& request) { +void CliCall::Write(const TString& request) { void* got_tag; bool ok; @@ -103,7 +103,7 @@ void CliCall::Write(const TString& request) { GPR_ASSERT(ok); } -bool CliCall::Read(TString* response, +bool CliCall::Read(TString* response, IncomingMetadataContainer* server_initial_metadata) { void* got_tag; bool ok; @@ -137,7 +137,7 @@ void CliCall::WritesDone() { GPR_ASSERT(ok); } -void CliCall::WriteAndWait(const TString& request) { +void CliCall::WriteAndWait(const TString& request) { grpc::Slice req_slice(request); grpc::ByteBuffer send_buffer(&req_slice, 1); @@ -161,7 +161,7 @@ void CliCall::WritesDoneAndWait() { } bool CliCall::ReadAndMaybeNotifyWrite( - TString* response, IncomingMetadataContainer* server_initial_metadata) { + TString* response, IncomingMetadataContainer* server_initial_metadata) { void* got_tag; bool ok; grpc::ByteBuffer recv_buffer; diff --git a/contrib/libs/grpc/test/cpp/util/cli_call.h b/contrib/libs/grpc/test/cpp/util/cli_call.h index a01279cb74..79d00d99f4 100644 --- a/contrib/libs/grpc/test/cpp/util/cli_call.h +++ b/contrib/libs/grpc/test/cpp/util/cli_call.h @@ -25,16 +25,16 @@ #include <grpcpp/support/status.h> #include <grpcpp/support/string_ref.h> -#include <map> +#include <map> namespace grpc { -class ClientContext; - -struct CliArgs { - double timeout = -1; -}; - +class ClientContext; + +struct CliArgs { + double timeout = -1; +}; + namespace testing { // CliCall handles the sending and receiving of generic messages given the name @@ -42,40 +42,40 @@ namespace testing { // and thread-unsafe methods should not be used together. class CliCall final { public: - typedef std::multimap<TString, TString> OutgoingMetadataContainer; + typedef std::multimap<TString, TString> OutgoingMetadataContainer; typedef std::multimap<grpc::string_ref, grpc::string_ref> IncomingMetadataContainer; CliCall(const std::shared_ptr<grpc::Channel>& channel, - const TString& method, const OutgoingMetadataContainer& metadata, - CliArgs args); - CliCall(const std::shared_ptr<grpc::Channel>& channel, - const TString& method, const OutgoingMetadataContainer& metadata) - : CliCall(channel, method, metadata, CliArgs{}) {} - + const TString& method, const OutgoingMetadataContainer& metadata, + CliArgs args); + CliCall(const std::shared_ptr<grpc::Channel>& channel, + const TString& method, const OutgoingMetadataContainer& metadata) + : CliCall(channel, method, metadata, CliArgs{}) {} + ~CliCall(); // Perform an unary generic RPC. - static Status Call(const std::shared_ptr<grpc::Channel>& channel, - const TString& method, const TString& request, - TString* response, + static Status Call(const std::shared_ptr<grpc::Channel>& channel, + const TString& method, const TString& request, + TString* response, const OutgoingMetadataContainer& metadata, IncomingMetadataContainer* server_initial_metadata, IncomingMetadataContainer* server_trailing_metadata); // Send a generic request message in a synchronous manner. NOT thread-safe. - void Write(const TString& request); + void Write(const TString& request); // Send a generic request message in a synchronous manner. NOT thread-safe. void WritesDone(); // Receive a generic response message in a synchronous manner.NOT thread-safe. - bool Read(TString* response, + bool Read(TString* response, IncomingMetadataContainer* server_initial_metadata); // Thread-safe write. Must be used with ReadAndMaybeNotifyWrite. Send out a // generic request message and wait for ReadAndMaybeNotifyWrite to finish it. - void WriteAndWait(const TString& request); + void WriteAndWait(const TString& request); // Thread-safe WritesDone. Must be used with ReadAndMaybeNotifyWrite. Send out // WritesDone for gereneric request messages and wait for @@ -85,17 +85,17 @@ class CliCall final { // Thread-safe Read. Blockingly receive a generic response message. Notify // writes if they are finished when this read is waiting for a resposne. bool ReadAndMaybeNotifyWrite( - TString* response, + TString* response, IncomingMetadataContainer* server_initial_metadata); // Finish the RPC. Status Finish(IncomingMetadataContainer* server_trailing_metadata); - TString peer() const { return ctx_.peer(); } - + TString peer() const { return ctx_.peer(); } + private: std::unique_ptr<grpc::GenericStub> stub_; - grpc::ClientContext ctx_; + grpc::ClientContext ctx_; std::unique_ptr<grpc::GenericClientAsyncReaderWriter> call_; grpc::CompletionQueue cq_; gpr_mu write_mu_; diff --git a/contrib/libs/grpc/test/cpp/util/cli_call_test.cc b/contrib/libs/grpc/test/cpp/util/cli_call_test.cc index 4bde83facd..4f0544b2e5 100644 --- a/contrib/libs/grpc/test/cpp/util/cli_call_test.cc +++ b/contrib/libs/grpc/test/cpp/util/cli_call_test.cc @@ -100,14 +100,14 @@ TEST_F(CliCallTest, SimpleRpc) { EXPECT_EQ(response.message(), request.message()); EXPECT_TRUE(s.ok()); - const TString kMethod("/grpc.testing.EchoTestService/Echo"); - TString request_bin, response_bin, expected_response_bin; + const TString kMethod("/grpc.testing.EchoTestService/Echo"); + TString request_bin, response_bin, expected_response_bin; EXPECT_TRUE(request.SerializeToString(&request_bin)); EXPECT_TRUE(response.SerializeToString(&expected_response_bin)); - std::multimap<TString, TString> client_metadata; + std::multimap<TString, TString> client_metadata; std::multimap<grpc::string_ref, grpc::string_ref> server_initial_metadata, server_trailing_metadata; - client_metadata.insert(std::pair<TString, TString>("key1", "val1")); + client_metadata.insert(std::pair<TString, TString>("key1", "val1")); Status s2 = CliCall::Call(channel_, kMethod, request_bin, &response_bin, client_metadata, &server_initial_metadata, &server_trailing_metadata); diff --git a/contrib/libs/grpc/test/cpp/util/cli_credentials.cc b/contrib/libs/grpc/test/cpp/util/cli_credentials.cc index c377cb6b80..efd548eb9b 100644 --- a/contrib/libs/grpc/test/cpp/util/cli_credentials.cc +++ b/contrib/libs/grpc/test/cpp/util/cli_credentials.cc @@ -48,13 +48,13 @@ DEFINE_string( "If not empty, load this PEM formatted private key. Requires use of " "--ssl_client_cert"); DEFINE_string( - local_connect_type, "local_tcp", - "The type of local connections for which local channel credentials will " - "be applied. Should be local_tcp or uds."); -DEFINE_string( + local_connect_type, "local_tcp", + "The type of local connections for which local channel credentials will " + "be applied. Should be local_tcp or uds."); +DEFINE_string( channel_creds_type, "", - "The channel creds type: insecure, ssl, gdc (Google Default Credentials), " - "alts, or local."); + "The channel creds type: insecure, ssl, gdc (Google Default Credentials), " + "alts, or local."); DEFINE_string( call_creds, "", "Call credentials to use: none (default), or access_token=<token>. If " @@ -69,21 +69,21 @@ const char ACCESS_TOKEN_PREFIX[] = "access_token="; constexpr int ACCESS_TOKEN_PREFIX_LEN = sizeof(ACCESS_TOKEN_PREFIX) / sizeof(*ACCESS_TOKEN_PREFIX) - 1; -bool IsAccessToken(const TString& auth) { +bool IsAccessToken(const TString& auth) { return auth.length() > ACCESS_TOKEN_PREFIX_LEN && auth.compare(0, ACCESS_TOKEN_PREFIX_LEN, ACCESS_TOKEN_PREFIX) == 0; } -TString AccessToken(const TString& auth) { +TString AccessToken(const TString& auth) { if (!IsAccessToken(auth)) { return ""; } - return TString(auth.c_str(), ACCESS_TOKEN_PREFIX_LEN); + return TString(auth.c_str(), ACCESS_TOKEN_PREFIX_LEN); } } // namespace -TString CliCredentials::GetDefaultChannelCredsType() const { +TString CliCredentials::GetDefaultChannelCredsType() const { // Compatibility logic for --enable_ssl. if (FLAGS_enable_ssl) { fprintf(stderr, @@ -101,12 +101,12 @@ TString CliCredentials::GetDefaultChannelCredsType() const { return "insecure"; } -TString CliCredentials::GetDefaultCallCreds() const { +TString CliCredentials::GetDefaultCallCreds() const { if (!FLAGS_access_token.empty()) { fprintf(stderr, "warning: --access_token is deprecated. Use " "--call_creds=access_token=<token>.\n"); - return TString("access_token=") + FLAGS_access_token; + return TString("access_token=") + FLAGS_access_token; } return "none"; } @@ -142,28 +142,28 @@ CliCredentials::GetChannelCredentials() const { } else if (FLAGS_channel_creds_type.compare("alts") == 0) { return grpc::experimental::AltsCredentials( grpc::experimental::AltsCredentialsOptions()); - } else if (FLAGS_channel_creds_type.compare("local") == 0) { - if (FLAGS_local_connect_type.compare("local_tcp") == 0) { - return grpc::experimental::LocalCredentials(LOCAL_TCP); - } else if (FLAGS_local_connect_type.compare("uds") == 0) { - return grpc::experimental::LocalCredentials(UDS); - } else { - fprintf(stderr, - "--local_connect_type=%s invalid; must be local_tcp or uds.\n", - FLAGS_local_connect_type.c_str()); - } + } else if (FLAGS_channel_creds_type.compare("local") == 0) { + if (FLAGS_local_connect_type.compare("local_tcp") == 0) { + return grpc::experimental::LocalCredentials(LOCAL_TCP); + } else if (FLAGS_local_connect_type.compare("uds") == 0) { + return grpc::experimental::LocalCredentials(UDS); + } else { + fprintf(stderr, + "--local_connect_type=%s invalid; must be local_tcp or uds.\n", + FLAGS_local_connect_type.c_str()); + } } fprintf(stderr, - "--channel_creds_type=%s invalid; must be insecure, ssl, gdc, " - "alts, or local.\n", + "--channel_creds_type=%s invalid; must be insecure, ssl, gdc, " + "alts, or local.\n", FLAGS_channel_creds_type.c_str()); return std::shared_ptr<grpc::ChannelCredentials>(); } std::shared_ptr<grpc::CallCredentials> CliCredentials::GetCallCredentials() const { - if (IsAccessToken(FLAGS_call_creds.c_str())) { - return grpc::AccessTokenCredentials(AccessToken(FLAGS_call_creds.c_str())); + if (IsAccessToken(FLAGS_call_creds.c_str())) { + return grpc::AccessTokenCredentials(AccessToken(FLAGS_call_creds.c_str())); } if (FLAGS_call_creds.compare("none") == 0) { // Nothing to do; creds, if any, are baked into the channel. @@ -180,7 +180,7 @@ std::shared_ptr<grpc::ChannelCredentials> CliCredentials::GetCredentials() const { if (FLAGS_call_creds.empty()) { FLAGS_call_creds = GetDefaultCallCreds(); - } else if (!FLAGS_access_token.empty() && !IsAccessToken(FLAGS_call_creds.c_str())) { + } else if (!FLAGS_access_token.empty() && !IsAccessToken(FLAGS_call_creds.c_str())) { fprintf(stderr, "warning: ignoring --access_token because --call_creds " "already set to %s.\n", @@ -200,7 +200,7 @@ std::shared_ptr<grpc::ChannelCredentials> CliCredentials::GetCredentials() FLAGS_channel_creds_type.c_str()); } // Legacy transport upgrade logic for insecure requests. - if (IsAccessToken(FLAGS_call_creds.c_str()) && + if (IsAccessToken(FLAGS_call_creds.c_str()) && FLAGS_channel_creds_type.compare("insecure") == 0) { fprintf(stderr, "warning: --channel_creds_type=insecure upgraded to ssl because " @@ -216,9 +216,9 @@ std::shared_ptr<grpc::ChannelCredentials> CliCredentials::GetCredentials() : grpc::CompositeChannelCredentials(channel_creds, call_creds); } -const TString CliCredentials::GetCredentialUsage() const { - return " --enable_ssl ; Set whether to use ssl " - "(deprecated)\n" +const TString CliCredentials::GetCredentialUsage() const { + return " --enable_ssl ; Set whether to use ssl " + "(deprecated)\n" " --use_auth ; Set whether to create default google" " credentials\n" " ; (deprecated)\n" @@ -228,14 +228,14 @@ const TString CliCredentials::GetCredentialUsage() const { " --ssl_target ; Set server host for ssl validation\n" " --ssl_client_cert ; Client cert for ssl\n" " --ssl_client_key ; Client private key for ssl\n" - " --local_connect_type ; Set to local_tcp or uds\n" - " --channel_creds_type ; Set to insecure, ssl, gdc, alts, or " - "local\n" + " --local_connect_type ; Set to local_tcp or uds\n" + " --channel_creds_type ; Set to insecure, ssl, gdc, alts, or " + "local\n" " --call_creds ; Set to none, or" " access_token=<token>\n"; } -const TString CliCredentials::GetSslTargetNameOverride() const { +const TString CliCredentials::GetSslTargetNameOverride() const { bool use_ssl = FLAGS_channel_creds_type.compare("ssl") == 0 || FLAGS_channel_creds_type.compare("gdc") == 0; return use_ssl ? FLAGS_ssl_target : ""; diff --git a/contrib/libs/grpc/test/cpp/util/cli_credentials.h b/contrib/libs/grpc/test/cpp/util/cli_credentials.h index 43f098f245..3e695692fa 100644 --- a/contrib/libs/grpc/test/cpp/util/cli_credentials.h +++ b/contrib/libs/grpc/test/cpp/util/cli_credentials.h @@ -29,16 +29,16 @@ class CliCredentials { public: virtual ~CliCredentials() {} std::shared_ptr<grpc::ChannelCredentials> GetCredentials() const; - virtual const TString GetCredentialUsage() const; - virtual const TString GetSslTargetNameOverride() const; + virtual const TString GetCredentialUsage() const; + virtual const TString GetSslTargetNameOverride() const; protected: // Returns the appropriate channel_creds_type value for the set of legacy // flag arguments. - virtual TString GetDefaultChannelCredsType() const; + virtual TString GetDefaultChannelCredsType() const; // Returns the appropriate call_creds value for the set of legacy flag // arguments. - virtual TString GetDefaultCallCreds() const; + virtual TString GetDefaultCallCreds() const; // Returns the base transport channel credentials. Child classes can override // to support additional channel_creds_types unknown to this base class. virtual std::shared_ptr<grpc::ChannelCredentials> GetChannelCredentials() diff --git a/contrib/libs/grpc/test/cpp/util/create_test_channel.cc b/contrib/libs/grpc/test/cpp/util/create_test_channel.cc index faeb6d3171..86d8e22af1 100644 --- a/contrib/libs/grpc/test/cpp/util/create_test_channel.cc +++ b/contrib/libs/grpc/test/cpp/util/create_test_channel.cc @@ -18,21 +18,21 @@ #include "test/cpp/util/create_test_channel.h" -#include <gflags/gflags.h> - +#include <gflags/gflags.h> + #include <grpc/support/log.h> #include <grpcpp/create_channel.h> #include <grpcpp/security/credentials.h> #include "test/cpp/util/test_credentials_provider.h" -DEFINE_string( - grpc_test_use_grpclb_with_child_policy, "", - "If non-empty, set a static service config on channels created by " - "grpc::CreateTestChannel, that configures the grpclb LB policy " - "with a child policy being the value of this flag (e.g. round_robin " - "or pick_first)."); - +DEFINE_string( + grpc_test_use_grpclb_with_child_policy, "", + "If non-empty, set a static service config on channels created by " + "grpc::CreateTestChannel, that configures the grpclb LB policy " + "with a child policy being the value of this flag (e.g. round_robin " + "or pick_first)."); + namespace grpc { namespace { @@ -58,16 +58,16 @@ void AddProdSslType() { new SslCredentialProvider)); } -void MaybeSetCustomChannelArgs(grpc::ChannelArguments* args) { - if (FLAGS_grpc_test_use_grpclb_with_child_policy.size() > 0) { - args->SetString("grpc.service_config", - "{\"loadBalancingConfig\":[{\"grpclb\":{\"childPolicy\":[{" - "\"" + - FLAGS_grpc_test_use_grpclb_with_child_policy + - "\":{}}]}}]}"); - } -} - +void MaybeSetCustomChannelArgs(grpc::ChannelArguments* args) { + if (FLAGS_grpc_test_use_grpclb_with_child_policy.size() > 0) { + args->SetString("grpc.service_config", + "{\"loadBalancingConfig\":[{\"grpclb\":{\"childPolicy\":[{" + "\"" + + FLAGS_grpc_test_use_grpclb_with_child_policy + + "\":{}}]}}]}"); + } +} + } // namespace // When cred_type is 'ssl', if server is empty, override_hostname is used to @@ -86,8 +86,8 @@ void MaybeSetCustomChannelArgs(grpc::ChannelArguments* args) { // same as above // CreateTestChannel("", "ssl", "test.google.com:443", true, creds); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& cred_type, - const TString& override_hostname, bool use_prod_roots, + const TString& server, const TString& cred_type, + const TString& override_hostname, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args) { return CreateTestChannel(server, cred_type, override_hostname, use_prod_roots, @@ -96,7 +96,7 @@ std::shared_ptr<Channel> CreateTestChannel( } std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args) { @@ -106,7 +106,7 @@ std::shared_ptr<Channel> CreateTestChannel( } std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds) { return CreateTestChannel(server, override_hostname, security_type, @@ -114,7 +114,7 @@ std::shared_ptr<Channel> CreateTestChannel( } std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots) { return CreateTestChannel(server, override_hostname, security_type, use_prod_roots, std::shared_ptr<CallCredentials>()); @@ -122,15 +122,15 @@ std::shared_ptr<Channel> CreateTestChannel( // Shortcut for end2end and interop tests. std::shared_ptr<Channel> CreateTestChannel( - const TString& server, testing::transport_security security_type) { + const TString& server, testing::transport_security security_type) { return CreateTestChannel(server, "foo.test.google.fr", security_type, false); } std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& credential_type, + const TString& server, const TString& credential_type, const std::shared_ptr<CallCredentials>& creds) { ChannelArguments channel_args; - MaybeSetCustomChannelArgs(&channel_args); + MaybeSetCustomChannelArgs(&channel_args); std::shared_ptr<ChannelCredentials> channel_creds = testing::GetCredentialsProvider()->GetChannelCredentials(credential_type, &channel_args); @@ -142,22 +142,22 @@ std::shared_ptr<Channel> CreateTestChannel( } std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& cred_type, - const TString& override_hostname, bool use_prod_roots, + const TString& server, const TString& cred_type, + const TString& override_hostname, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args, std::vector< std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> interceptor_creators) { ChannelArguments channel_args(args); - MaybeSetCustomChannelArgs(&channel_args); + MaybeSetCustomChannelArgs(&channel_args); std::shared_ptr<ChannelCredentials> channel_creds; if (cred_type.empty()) { if (interceptor_creators.empty()) { return ::grpc::CreateCustomChannel(server, InsecureChannelCredentials(), - channel_args); + channel_args); } else { return experimental::CreateCustomChannelWithInterceptors( - server, InsecureChannelCredentials(), channel_args, + server, InsecureChannelCredentials(), channel_args, std::move(interceptor_creators)); } } else if (cred_type == testing::kTlsCredentialsType) { // cred_type == "ssl" @@ -175,7 +175,7 @@ std::shared_ptr<Channel> CreateTestChannel( } GPR_ASSERT(channel_creds != nullptr); - const TString& connect_to = server.empty() ? override_hostname : server; + const TString& connect_to = server.empty() ? override_hostname : server; if (creds.get()) { channel_creds = grpc::CompositeChannelCredentials(channel_creds, creds); } @@ -193,22 +193,22 @@ std::shared_ptr<Channel> CreateTestChannel( GPR_ASSERT(channel_creds != nullptr); if (interceptor_creators.empty()) { - return ::grpc::CreateCustomChannel(server, channel_creds, channel_args); + return ::grpc::CreateCustomChannel(server, channel_creds, channel_args); } else { return experimental::CreateCustomChannelWithInterceptors( - server, channel_creds, channel_args, std::move(interceptor_creators)); + server, channel_creds, channel_args, std::move(interceptor_creators)); } } } std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args, std::vector< std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> interceptor_creators) { - TString credential_type = + TString credential_type = security_type == testing::ALTS ? testing::kAltsCredentialsType : (security_type == testing::TLS ? testing::kTlsCredentialsType @@ -219,7 +219,7 @@ std::shared_ptr<Channel> CreateTestChannel( } std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, std::vector< @@ -231,13 +231,13 @@ std::shared_ptr<Channel> CreateTestChannel( } std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& credential_type, + const TString& server, const TString& credential_type, const std::shared_ptr<CallCredentials>& creds, std::vector< std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> interceptor_creators) { ChannelArguments channel_args; - MaybeSetCustomChannelArgs(&channel_args); + MaybeSetCustomChannelArgs(&channel_args); std::shared_ptr<ChannelCredentials> channel_creds = testing::GetCredentialsProvider()->GetChannelCredentials(credential_type, &channel_args); diff --git a/contrib/libs/grpc/test/cpp/util/create_test_channel.h b/contrib/libs/grpc/test/cpp/util/create_test_channel.h index a4ea9d999d..ed4ce6c11b 100644 --- a/contrib/libs/grpc/test/cpp/util/create_test_channel.h +++ b/contrib/libs/grpc/test/cpp/util/create_test_channel.h @@ -26,7 +26,7 @@ #include <grpcpp/security/credentials.h> #include <grpcpp/support/channel_arguments.h> -namespace grpc { +namespace grpc { class Channel; namespace testing { @@ -36,35 +36,35 @@ typedef enum { INSECURE = 0, TLS, ALTS } transport_security; } // namespace testing std::shared_ptr<Channel> CreateTestChannel( - const TString& server, testing::transport_security security_type); + const TString& server, testing::transport_security security_type); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& cred_type, - const TString& override_hostname, bool use_prod_roots, + const TString& server, const TString& cred_type, + const TString& override_hostname, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& credential_type, + const TString& server, const TString& credential_type, const std::shared_ptr<CallCredentials>& creds); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, std::vector< @@ -72,7 +72,7 @@ std::shared_ptr<Channel> CreateTestChannel( interceptor_creators); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& override_hostname, + const TString& server, const TString& override_hostname, testing::transport_security security_type, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args, std::vector< @@ -80,15 +80,15 @@ std::shared_ptr<Channel> CreateTestChannel( interceptor_creators); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& cred_type, - const TString& override_hostname, bool use_prod_roots, + const TString& server, const TString& cred_type, + const TString& override_hostname, bool use_prod_roots, const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args, std::vector< std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> interceptor_creators); std::shared_ptr<Channel> CreateTestChannel( - const TString& server, const TString& credential_type, + const TString& server, const TString& credential_type, const std::shared_ptr<CallCredentials>& creds, std::vector< std::unique_ptr<experimental::ClientInterceptorFactoryInterface>> diff --git a/contrib/libs/grpc/test/cpp/util/error_details_test.cc b/contrib/libs/grpc/test/cpp/util/error_details_test.cc index 2482a6bf2e..630ab1d98f 100644 --- a/contrib/libs/grpc/test/cpp/util/error_details_test.cc +++ b/contrib/libs/grpc/test/cpp/util/error_details_test.cc @@ -21,7 +21,7 @@ #include "src/proto/grpc/status/status.pb.h" #include "src/proto/grpc/testing/echo_messages.pb.h" -#include "test/core/util/test_config.h" +#include "test/core/util/test_config.h" namespace grpc { namespace { @@ -31,11 +31,11 @@ TEST(ExtractTest, Success) { expected.set_code(13); // INTERNAL expected.set_message("I am an error message"); testing::EchoRequest expected_details; - expected_details.set_message(TString(100, '\0')); + expected_details.set_message(TString(100, '\0')); expected.add_details()->PackFrom(expected_details); google::rpc::Status to; - TString error_details = expected.SerializeAsString(); + TString error_details = expected.SerializeAsString(); Status from(static_cast<StatusCode>(expected.code()), expected.message(), error_details); EXPECT_TRUE(ExtractErrorDetails(from, &to).ok()); @@ -53,7 +53,7 @@ TEST(ExtractTest, NullInput) { } TEST(ExtractTest, Unparsable) { - TString error_details("I am not a status object"); + TString error_details("I am not a status object"); Status from(StatusCode::INTERNAL, "", error_details); google::rpc::Status to; EXPECT_EQ(StatusCode::INVALID_ARGUMENT, @@ -65,7 +65,7 @@ TEST(SetTest, Success) { expected.set_code(13); // INTERNAL expected.set_message("I am an error message"); testing::EchoRequest expected_details; - expected_details.set_message(TString(100, '\0')); + expected_details.set_message(TString(100, '\0')); expected.add_details()->PackFrom(expected_details); Status to; @@ -86,7 +86,7 @@ TEST(SetTest, OutOfScopeErrorCode) { expected.set_code(17); // Out of scope (UNAUTHENTICATED is 16). expected.set_message("I am an error message"); testing::EchoRequest expected_details; - expected_details.set_message(TString(100, '\0')); + expected_details.set_message(TString(100, '\0')); expected.add_details()->PackFrom(expected_details); Status to; @@ -103,7 +103,7 @@ TEST(SetTest, ValidScopeErrorCode) { expected.set_code(c); expected.set_message("I am an error message"); testing::EchoRequest expected_details; - expected_details.set_message(TString(100, '\0')); + expected_details.set_message(TString(100, '\0')); expected.add_details()->PackFrom(expected_details); Status to; @@ -119,7 +119,7 @@ TEST(SetTest, ValidScopeErrorCode) { } // namespace grpc int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::TestEnvironment env(argc, argv); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/contrib/libs/grpc/test/cpp/util/grpc_cli.cc b/contrib/libs/grpc/test/cpp/util/grpc_cli.cc index 45e509658e..45c6b94f84 100644 --- a/contrib/libs/grpc/test/cpp/util/grpc_cli.cc +++ b/contrib/libs/grpc/test/cpp/util/grpc_cli.cc @@ -51,11 +51,11 @@ --decode=grpc.testing.SimpleResponse \ src/proto/grpc/testing/messages.proto \ < output.bin > output.txt - 10. --default_service_config, optional default service config to use - on the channel. Note that this may be ignored if the name resolver - returns a service config. - 11. --display_peer_address, on CallMethod commands, log the peer socket - address of the connection that each RPC is made on to stderr. + 10. --default_service_config, optional default service config to use + on the channel. Note that this may be ignored if the name resolver + returns a service config. + 11. --display_peer_address, on CallMethod commands, log the peer socket + address of the connection that each RPC is made on to stderr. */ #include <fstream> @@ -70,12 +70,12 @@ DEFINE_string(outfile, "", "Output file (default is stdout)"); -static bool SimplePrint(const TString& outfile, const TString& output) { +static bool SimplePrint(const TString& outfile, const TString& output) { if (outfile.empty()) { - std::cout << output << std::flush; + std::cout << output << std::flush; } else { std::ofstream output_file(outfile, std::ios::app | std::ios::binary); - output_file << output << std::flush; + output_file << output << std::flush; output_file.close(); } return true; @@ -86,5 +86,5 @@ int main(int argc, char** argv) { return grpc::testing::GrpcToolMainLib( argc, (const char**)argv, grpc::testing::CliCredentials(), - std::bind(SimplePrint, TString(FLAGS_outfile.c_str()), std::placeholders::_1)); + std::bind(SimplePrint, TString(FLAGS_outfile.c_str()), std::placeholders::_1)); } diff --git a/contrib/libs/grpc/test/cpp/util/grpc_tool.cc b/contrib/libs/grpc/test/cpp/util/grpc_tool.cc index aaa5ef3f5d..30f3024e25 100644 --- a/contrib/libs/grpc/test/cpp/util/grpc_tool.cc +++ b/contrib/libs/grpc/test/cpp/util/grpc_tool.cc @@ -27,14 +27,14 @@ #include <grpcpp/security/credentials.h> #include <grpcpp/support/string_ref.h> -#include <cstdio> -#include <fstream> -#include <iostream> -#include <memory> -#include <sstream> -#include <util/generic/string.h> -#include <thread> - +#include <cstdio> +#include <fstream> +#include <iostream> +#include <memory> +#include <sstream> +#include <util/generic/string.h> +#include <thread> + #include "test/cpp/util/cli_call.h" #include "test/cpp/util/proto_file_parser.h" #include "test/cpp/util/proto_reflection_descriptor_database.h" @@ -57,15 +57,15 @@ DEFINE_string(proto_path, ".", "Path to look for the proto file."); DEFINE_string(protofiles, "", "Name of the proto file."); DEFINE_bool(binary_input, false, "Input in binary format"); DEFINE_bool(binary_output, false, "Output in binary format"); -DEFINE_string( - default_service_config, "", - "Default service config to use on the channel, if non-empty. Note " - "that this will be ignored if the name resolver returns a service " - "config."); -DEFINE_bool( - display_peer_address, false, - "Log the peer socket address of the connection that each RPC is made " - "on to stderr."); +DEFINE_string( + default_service_config, "", + "Default service config to use on the channel, if non-empty. Note " + "that this will be ignored if the name resolver returns a service " + "config."); +DEFINE_bool( + display_peer_address, false, + "Log the peer socket address of the connection that each RPC is made " + "on to stderr."); DEFINE_bool(json_input, false, "Input in json format"); DEFINE_bool(json_output, false, "Output in json format"); DEFINE_string(infile, "", "Input file (default is stdin)"); @@ -74,9 +74,9 @@ DEFINE_bool(batch, false, "more than a few RPCs. gRPC CLI has very different performance " "characteristics compared with normal RPC calls which make it " "unsuitable for loadtesting or significant production traffic."); -DEFINE_double(timeout, -1, - "Specify timeout in seconds, used to set the deadline for all " - "RPCs. The default value of -1 means no deadline has been set."); +DEFINE_double(timeout, -1, + "Specify timeout in seconds, used to set the deadline for all " + "RPCs. The default value of -1 means no deadline has been set."); namespace { @@ -113,10 +113,10 @@ class GrpcTool { } private: - void CommandUsage(const TString& usage) const; + void CommandUsage(const TString& usage) const; bool print_command_usage_; int usage_exit_status_; - const TString cred_usage_; + const TString cred_usage_; }; template <typename T> @@ -135,11 +135,11 @@ size_t ArraySize(T& a) { } void ParseMetadataFlag( - std::multimap<TString, TString>* client_metadata) { + std::multimap<TString, TString>* client_metadata) { if (FLAGS_metadata.empty()) { return; } - std::vector<TString> fields; + std::vector<TString> fields; const char delim = ':'; const char escape = '\\'; size_t cur = -1; @@ -172,17 +172,17 @@ void ParseMetadataFlag( } for (size_t i = 0; i < fields.size(); i += 2) { client_metadata->insert( - std::pair<TString, TString>(fields[i], fields[i + 1])); + std::pair<TString, TString>(fields[i], fields[i + 1])); } } template <typename T> -void PrintMetadata(const T& m, const TString& message) { +void PrintMetadata(const T& m, const TString& message) { if (m.empty()) { return; } fprintf(stderr, "%s\n", message.c_str()); - TString pair; + TString pair; for (typename T::const_iterator iter = m.begin(); iter != m.end(); ++iter) { pair.clear(); pair.append(iter->first.data(), iter->first.size()); @@ -192,10 +192,10 @@ void PrintMetadata(const T& m, const TString& message) { } } -void ReadResponse(CliCall* call, const TString& method_name, +void ReadResponse(CliCall* call, const TString& method_name, GrpcToolOutputCallback callback, ProtoFileParser* parser, gpr_mu* parser_mu, bool print_mode) { - TString serialized_response_proto; + TString serialized_response_proto; std::multimap<grpc::string_ref, grpc::string_ref> server_initial_metadata; for (bool receive_initial_metadata = true; call->ReadAndMaybeNotifyWrite( @@ -224,15 +224,15 @@ void ReadResponse(CliCall* call, const TString& method_name, } std::shared_ptr<grpc::Channel> CreateCliChannel( - const TString& server_address, const CliCredentials& cred) { + const TString& server_address, const CliCredentials& cred) { grpc::ChannelArguments args; if (!cred.GetSslTargetNameOverride().empty()) { args.SetSslTargetNameOverride(cred.GetSslTargetNameOverride()); } - if (!FLAGS_default_service_config.empty()) { - args.SetString(GRPC_ARG_SERVICE_CONFIG, - FLAGS_default_service_config.c_str()); - } + if (!FLAGS_default_service_config.empty()) { + args.SetString(GRPC_ARG_SERVICE_CONFIG, + FLAGS_default_service_config.c_str()); + } return ::grpc::CreateCustomChannel(server_address, cred.GetCredentials(), args); } @@ -258,7 +258,7 @@ const Command ops[] = { {"tojson", BindWith5Args(&GrpcTool::ToJson), 2, 3}, }; -void Usage(const TString& msg) { +void Usage(const TString& msg) { fprintf( stderr, "%s\n" @@ -276,7 +276,7 @@ void Usage(const TString& msg) { exit(1); } -const Command* FindCommand(const TString& name) { +const Command* FindCommand(const TString& name) { for (int i = 0; i < (int)ArraySize(ops); i++) { if (name == ops[i].command) { return &ops[i]; @@ -292,7 +292,7 @@ int GrpcToolMainLib(int argc, const char** argv, const CliCredentials& cred, Usage("No command specified"); } - TString command = argv[1]; + TString command = argv[1]; argc -= 2; argv += 2; @@ -308,14 +308,14 @@ int GrpcToolMainLib(int argc, const char** argv, const CliCredentials& cred, const bool ok = cmd->function(&grpc_tool, argc, argv, cred, callback); return ok ? 0 : 1; } else { - Usage("Invalid command '" + TString(command.c_str()) + "'"); + Usage("Invalid command '" + TString(command.c_str()) + "'"); } return 1; } GrpcTool::GrpcTool() : print_command_usage_(false), usage_exit_status_(0) {} -void GrpcTool::CommandUsage(const TString& usage) const { +void GrpcTool::CommandUsage(const TString& usage) const { if (print_command_usage_) { fprintf(stderr, "\n%s%s\n", usage.c_str(), (usage.empty() || usage[usage.size() - 1] != '\n') ? "\n" : ""); @@ -334,7 +334,7 @@ bool GrpcTool::Help(int argc, const char** argv, const CliCredentials& cred, } else { const Command* cmd = FindCommand(argv[0]); if (cmd == nullptr) { - Usage("Unknown command '" + TString(argv[0]) + "'"); + Usage("Unknown command '" + TString(argv[0]) + "'"); } SetPrintCommandMode(0); cmd->function(this, -1, nullptr, cred, callback); @@ -355,20 +355,20 @@ bool GrpcTool::ListServices(int argc, const char** argv, " --outfile ; Output filename (defaults to stdout)\n" + cred.GetCredentialUsage()); - TString server_address(argv[0]); + TString server_address(argv[0]); std::shared_ptr<grpc::Channel> channel = CreateCliChannel(server_address, cred); grpc::ProtoReflectionDescriptorDatabase desc_db(channel); grpc::protobuf::DescriptorPool desc_pool(&desc_db); - std::vector<TString> service_list; + std::vector<TString> service_list; if (!desc_db.GetServices(&service_list)) { fprintf(stderr, "Received an error when querying services endpoint.\n"); return false; } // If no service is specified, dump the list of services. - TString output; + TString output; if (argc < 2) { // List all services, if --l is passed, then include full description, // otherwise include a summarized list only. @@ -382,8 +382,8 @@ bool GrpcTool::ListServices(int argc, const char** argv, } } } else { - std::string service_name; - std::string method_name; + std::string service_name; + std::string method_name; std::stringstream ss(argv[1]); // Remove leading slashes. @@ -453,13 +453,13 @@ bool GrpcTool::PrintType(int /*argc*/, const char** argv, " <type> ; Protocol buffer type name\n" + cred.GetCredentialUsage()); - TString server_address(argv[0]); + TString server_address(argv[0]); std::shared_ptr<grpc::Channel> channel = CreateCliChannel(server_address, cred); grpc::ProtoReflectionDescriptorDatabase desc_db(channel); grpc::protobuf::DescriptorPool desc_pool(&desc_db); - TString output; + TString output; const grpc::protobuf::Descriptor* descriptor = desc_pool.FindMessageTypeByName(argv[1]); if (descriptor != nullptr) { @@ -493,21 +493,21 @@ bool GrpcTool::CallMethod(int argc, const char** argv, " --binary_input ; Input in binary format\n" " --binary_output ; Output in binary format\n" " --json_input ; Input in json format\n" - " --json_output ; Output in json format\n" - " --timeout ; Specify timeout (in seconds), used to " - "set the deadline for RPCs. The default value of -1 means no " - "deadline has been set.\n" + + " --json_output ; Output in json format\n" + " --timeout ; Specify timeout (in seconds), used to " + "set the deadline for RPCs. The default value of -1 means no " + "deadline has been set.\n" + cred.GetCredentialUsage()); std::stringstream output_ss; - TString request_text; - TString server_address(argv[0]); - TString method_name(argv[1]); - TString formatted_method_name; + TString request_text; + TString server_address(argv[0]); + TString method_name(argv[1]); + TString formatted_method_name; std::unique_ptr<ProtoFileParser> parser; - TString serialized_request_proto; - CliArgs cli_args; - cli_args.timeout = FLAGS_timeout; + TString serialized_request_proto; + CliArgs cli_args; + cli_args.timeout = FLAGS_timeout; bool print_mode = false; std::shared_ptr<grpc::Channel> channel = @@ -516,7 +516,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, if (!FLAGS_binary_input || !FLAGS_binary_output) { parser.reset( new grpc::testing::ProtoFileParser(FLAGS_remotedb ? channel : nullptr, - FLAGS_proto_path.c_str(), FLAGS_protofiles.c_str())); + FLAGS_proto_path.c_str(), FLAGS_protofiles.c_str())); if (parser->HasError()) { fprintf( stderr, @@ -548,15 +548,15 @@ bool GrpcTool::CallMethod(int argc, const char** argv, return false; } - std::multimap<TString, TString> client_metadata; + std::multimap<TString, TString> client_metadata; ParseMetadataFlag(&client_metadata); PrintMetadata(client_metadata, "Sending client initial metadata:"); - CliCall call(channel, formatted_method_name, client_metadata, cli_args); - if (FLAGS_display_peer_address) { - fprintf(stderr, "New call for method_name:%s has peer address:|%s|\n", - formatted_method_name.c_str(), call.peer().c_str()); - } + CliCall call(channel, formatted_method_name, client_metadata, cli_args); + if (FLAGS_display_peer_address) { + fprintf(stderr, "New call for method_name:%s has peer address:|%s|\n", + formatted_method_name.c_str(), call.peer().c_str()); + } if (FLAGS_infile.empty()) { if (isatty(fileno(stdin))) { @@ -575,7 +575,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, parser.get(), &parser_mu, print_mode); std::stringstream request_ss; - std::string line; + std::string line; while (!request_text.empty() || (!input_stream->eof() && getline(*input_stream, line))) { if (!request_text.empty()) { @@ -605,7 +605,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, } else { if (line.length() == 0) { request_text = request_ss.str(); - request_ss.str(TString()); + request_ss.str(TString()); request_ss.clear(); } else { request_ss << line << ' '; @@ -655,14 +655,14 @@ bool GrpcTool::CallMethod(int argc, const char** argv, input_stream = &input_file; } - std::multimap<TString, TString> client_metadata; + std::multimap<TString, TString> client_metadata; ParseMetadataFlag(&client_metadata); if (print_mode) { PrintMetadata(client_metadata, "Sending client initial metadata:"); } std::stringstream request_ss; - std::string line; + std::string line; while (!request_text.empty() || (!input_stream->eof() && getline(*input_stream, line))) { if (!request_text.empty()) { @@ -682,16 +682,16 @@ bool GrpcTool::CallMethod(int argc, const char** argv, } } - TString serialized_response_proto; + TString serialized_response_proto; std::multimap<grpc::string_ref, grpc::string_ref> server_initial_metadata, server_trailing_metadata; - CliCall call(channel, formatted_method_name, client_metadata, - cli_args); - if (FLAGS_display_peer_address) { - fprintf(stderr, - "New call for method_name:%s has peer address:|%s|\n", - formatted_method_name.c_str(), call.peer().c_str()); - } + CliCall call(channel, formatted_method_name, client_metadata, + cli_args); + if (FLAGS_display_peer_address) { + fprintf(stderr, + "New call for method_name:%s has peer address:|%s|\n", + formatted_method_name.c_str(), call.peer().c_str()); + } call.Write(serialized_request_proto); call.WritesDone(); if (!call.Read(&serialized_response_proto, @@ -714,7 +714,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, break; } } else { - TString response_text = parser->GetFormattedStringFromMethod( + TString response_text = parser->GetFormattedStringFromMethod( method_name, serialized_response_proto, false /* is_request */, FLAGS_json_output); @@ -736,7 +736,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, } else { if (line.length() == 0) { request_text = request_ss.str(); - request_ss.str(TString()); + request_ss.str(TString()); request_ss.clear(); } else { request_ss << line << ' '; @@ -782,18 +782,18 @@ bool GrpcTool::CallMethod(int argc, const char** argv, } fprintf(stderr, "connecting to %s\n", server_address.c_str()); - TString serialized_response_proto; - std::multimap<TString, TString> client_metadata; + TString serialized_response_proto; + std::multimap<TString, TString> client_metadata; std::multimap<grpc::string_ref, grpc::string_ref> server_initial_metadata, server_trailing_metadata; ParseMetadataFlag(&client_metadata); PrintMetadata(client_metadata, "Sending client initial metadata:"); - CliCall call(channel, formatted_method_name, client_metadata, cli_args); - if (FLAGS_display_peer_address) { - fprintf(stderr, "New call for method_name:%s has peer address:|%s|\n", - formatted_method_name.c_str(), call.peer().c_str()); - } + CliCall call(channel, formatted_method_name, client_metadata, cli_args); + if (FLAGS_display_peer_address) { + fprintf(stderr, "New call for method_name:%s has peer address:|%s|\n", + formatted_method_name.c_str(), call.peer().c_str()); + } call.Write(serialized_request_proto); call.WritesDone(); @@ -858,11 +858,11 @@ bool GrpcTool::ParseMessage(int argc, const char** argv, cred.GetCredentialUsage()); std::stringstream output_ss; - TString message_text; - TString server_address(argv[0]); - TString type_name(argv[1]); + TString message_text; + TString server_address(argv[0]); + TString type_name(argv[1]); std::unique_ptr<grpc::testing::ProtoFileParser> parser; - TString serialized_request_proto; + TString serialized_request_proto; if (argc == 3) { message_text = argv[2]; @@ -889,7 +889,7 @@ bool GrpcTool::ParseMessage(int argc, const char** argv, CreateCliChannel(server_address, cred); parser.reset( new grpc::testing::ProtoFileParser(FLAGS_remotedb ? channel : nullptr, - FLAGS_proto_path.c_str(), FLAGS_protofiles.c_str())); + FLAGS_proto_path.c_str(), FLAGS_protofiles.c_str())); if (parser->HasError()) { fprintf( stderr, @@ -912,7 +912,7 @@ bool GrpcTool::ParseMessage(int argc, const char** argv, if (FLAGS_binary_output) { output_ss << serialized_request_proto; } else { - TString output_text; + TString output_text; output_text = parser->GetFormattedStringFromMessageType( type_name, serialized_request_proto, FLAGS_json_output); if (parser->HasError()) { diff --git a/contrib/libs/grpc/test/cpp/util/grpc_tool.h b/contrib/libs/grpc/test/cpp/util/grpc_tool.h index 7ee0bc9dbe..5bb43430d3 100644 --- a/contrib/libs/grpc/test/cpp/util/grpc_tool.h +++ b/contrib/libs/grpc/test/cpp/util/grpc_tool.h @@ -28,7 +28,7 @@ namespace grpc { namespace testing { -typedef std::function<bool(const TString&)> GrpcToolOutputCallback; +typedef std::function<bool(const TString&)> GrpcToolOutputCallback; int GrpcToolMainLib(int argc, const char** argv, const CliCredentials& cred, GrpcToolOutputCallback callback); diff --git a/contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc b/contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc index 95d78fa778..ff610daadd 100644 --- a/contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc +++ b/contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc @@ -30,11 +30,11 @@ #include <grpcpp/server_context.h> #include <gtest/gtest.h> -#include <chrono> -#include <sstream> - +#include <chrono> +#include <sstream> + #include "src/core/lib/gpr/env.h" -#include "src/core/lib/iomgr/load_file.h" +#include "src/core/lib/iomgr/load_file.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" #include "src/proto/grpc/testing/echo.pb.h" #include "test/core/util/port.h" @@ -42,10 +42,10 @@ #include "test/cpp/util/cli_credentials.h" #include "test/cpp/util/string_ref_helper.h" -#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem" -#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem" -#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key" - +#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem" +#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem" +#define SERVER_KEY_PATH "src/core/tsi/test_creds/server1.key" + using grpc::testing::EchoRequest; using grpc::testing::EchoResponse; @@ -53,41 +53,41 @@ using grpc::testing::EchoResponse; #define ECHO_TEST_SERVICE_SUMMARY \ "Echo\n" \ - "Echo1\n" \ - "Echo2\n" \ - "CheckDeadlineUpperBound\n" \ - "CheckDeadlineSet\n" \ + "Echo1\n" \ + "Echo2\n" \ + "CheckDeadlineUpperBound\n" \ + "CheckDeadlineSet\n" \ "CheckClientInitialMetadata\n" \ "RequestStream\n" \ "ResponseStream\n" \ "BidiStream\n" \ "Unimplemented\n" -#define ECHO_TEST_SERVICE_DESCRIPTION \ - "filename: src/proto/grpc/testing/echo.proto\n" \ - "package: grpc.testing;\n" \ - "service EchoTestService {\n" \ - " rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ - "{}\n" \ - " rpc Echo1(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ - "{}\n" \ - " rpc Echo2(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ - "{}\n" \ - " rpc CheckDeadlineUpperBound(grpc.testing.SimpleRequest) returns " \ - "(grpc.testing.StringValue) {}\n" \ - " rpc CheckDeadlineSet(grpc.testing.SimpleRequest) returns " \ - "(grpc.testing.StringValue) {}\n" \ - " rpc CheckClientInitialMetadata(grpc.testing.SimpleRequest) returns " \ - "(grpc.testing.SimpleResponse) {}\n" \ - " rpc RequestStream(stream grpc.testing.EchoRequest) returns " \ - "(grpc.testing.EchoResponse) {}\n" \ - " rpc ResponseStream(grpc.testing.EchoRequest) returns (stream " \ - "grpc.testing.EchoResponse) {}\n" \ - " rpc BidiStream(stream grpc.testing.EchoRequest) returns (stream " \ - "grpc.testing.EchoResponse) {}\n" \ - " rpc Unimplemented(grpc.testing.EchoRequest) returns " \ - "(grpc.testing.EchoResponse) {}\n" \ - "}\n" \ +#define ECHO_TEST_SERVICE_DESCRIPTION \ + "filename: src/proto/grpc/testing/echo.proto\n" \ + "package: grpc.testing;\n" \ + "service EchoTestService {\n" \ + " rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ + "{}\n" \ + " rpc Echo1(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ + "{}\n" \ + " rpc Echo2(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ + "{}\n" \ + " rpc CheckDeadlineUpperBound(grpc.testing.SimpleRequest) returns " \ + "(grpc.testing.StringValue) {}\n" \ + " rpc CheckDeadlineSet(grpc.testing.SimpleRequest) returns " \ + "(grpc.testing.StringValue) {}\n" \ + " rpc CheckClientInitialMetadata(grpc.testing.SimpleRequest) returns " \ + "(grpc.testing.SimpleResponse) {}\n" \ + " rpc RequestStream(stream grpc.testing.EchoRequest) returns " \ + "(grpc.testing.EchoResponse) {}\n" \ + " rpc ResponseStream(grpc.testing.EchoRequest) returns (stream " \ + "grpc.testing.EchoResponse) {}\n" \ + " rpc BidiStream(stream grpc.testing.EchoRequest) returns (stream " \ + "grpc.testing.EchoResponse) {}\n" \ + " rpc Unimplemented(grpc.testing.EchoRequest) returns " \ + "(grpc.testing.EchoResponse) {}\n" \ + "}\n" \ "\n" #define ECHO_METHOD_DESCRIPTION \ @@ -125,8 +125,8 @@ DECLARE_bool(batch); DECLARE_string(metadata); DECLARE_string(protofiles); DECLARE_string(proto_path); -DECLARE_string(default_service_config); -DECLARE_double(timeout); +DECLARE_string(default_service_config); +DECLARE_double(timeout); namespace { @@ -140,24 +140,24 @@ class TestCliCredentials final : public grpc::testing::CliCredentials { if (!secure_) { return InsecureChannelCredentials(); } - grpc_slice ca_slice; - GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", - grpc_load_file(CA_CERT_PATH, 1, &ca_slice))); - const char* test_root_cert = - reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice); + grpc_slice ca_slice; + GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(CA_CERT_PATH, 1, &ca_slice))); + const char* test_root_cert = + reinterpret_cast<const char*> GRPC_SLICE_START_PTR(ca_slice); SslCredentialsOptions ssl_opts = {test_root_cert, "", ""}; - std::shared_ptr<grpc::ChannelCredentials> credential_ptr = - grpc::SslCredentials(grpc::SslCredentialsOptions(ssl_opts)); - grpc_slice_unref(ca_slice); - return credential_ptr; + std::shared_ptr<grpc::ChannelCredentials> credential_ptr = + grpc::SslCredentials(grpc::SslCredentialsOptions(ssl_opts)); + grpc_slice_unref(ca_slice); + return credential_ptr; } - const TString GetCredentialUsage() const override { return ""; } + const TString GetCredentialUsage() const override { return ""; } private: const bool secure_; }; -bool PrintStream(std::stringstream* ss, const TString& output) { +bool PrintStream(std::stringstream* ss, const TString& output) { (*ss) << output; return true; } @@ -185,29 +185,29 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service { return Status::OK; } - Status CheckDeadlineSet(ServerContext* context, const SimpleRequest* request, - StringValue* response) override { - response->set_message(context->deadline() != - std::chrono::system_clock::time_point::max() - ? "true" - : "false"); - return Status::OK; - } - - // Check if deadline - current time <= timeout - // If deadline set, timeout + current time should be an upper bound for it - Status CheckDeadlineUpperBound(ServerContext* context, - const SimpleRequest* request, - StringValue* response) override { - auto seconds = std::chrono::duration_cast<std::chrono::seconds>( - context->deadline() - std::chrono::system_clock::now()); - - // Returning string instead of bool to avoid using embedded messages in - // proto3 - response->set_message(seconds.count() <= FLAGS_timeout ? "true" : "false"); - return Status::OK; - } - + Status CheckDeadlineSet(ServerContext* context, const SimpleRequest* request, + StringValue* response) override { + response->set_message(context->deadline() != + std::chrono::system_clock::time_point::max() + ? "true" + : "false"); + return Status::OK; + } + + // Check if deadline - current time <= timeout + // If deadline set, timeout + current time should be an upper bound for it + Status CheckDeadlineUpperBound(ServerContext* context, + const SimpleRequest* request, + StringValue* response) override { + auto seconds = std::chrono::duration_cast<std::chrono::seconds>( + context->deadline() - std::chrono::system_clock::now()); + + // Returning string instead of bool to avoid using embedded messages in + // proto3 + response->set_message(seconds.count() <= FLAGS_timeout ? "true" : "false"); + return Status::OK; + } + Status RequestStream(ServerContext* context, ServerReader<EchoRequest>* reader, EchoResponse* response) override { @@ -243,7 +243,7 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service { EchoResponse response; for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) { - response.set_message(request->message() + ToString(i)); + response.set_message(request->message() + ToString(i)); writer->Write(response); } @@ -283,24 +283,24 @@ class GrpcToolTest : public ::testing::Test { // SetUpServer cannot be used with EXPECT_EXIT. grpc_pick_unused_port_or_die() // uses atexit() to free chosen ports, and it will spawn a new thread in // resolve_address_posix.c:192 at exit time. - const TString SetUpServer(bool secure = false) { + const TString SetUpServer(bool secure = false) { std::ostringstream server_address; int port = grpc_pick_unused_port_or_die(); server_address << "localhost:" << port; // Setup server ServerBuilder builder; std::shared_ptr<grpc::ServerCredentials> creds; - grpc_slice cert_slice, key_slice; - GPR_ASSERT(GRPC_LOG_IF_ERROR( - "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice))); - GPR_ASSERT(GRPC_LOG_IF_ERROR( - "load_file", grpc_load_file(SERVER_KEY_PATH, 1, &key_slice))); - const char* server_cert = - reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice); - const char* server_key = - reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice); - SslServerCredentialsOptions::PemKeyCertPair pkcp = {server_key, - server_cert}; + grpc_slice cert_slice, key_slice; + GPR_ASSERT(GRPC_LOG_IF_ERROR( + "load_file", grpc_load_file(SERVER_CERT_PATH, 1, &cert_slice))); + GPR_ASSERT(GRPC_LOG_IF_ERROR( + "load_file", grpc_load_file(SERVER_KEY_PATH, 1, &key_slice))); + const char* server_cert = + reinterpret_cast<const char*> GRPC_SLICE_START_PTR(cert_slice); + const char* server_key = + reinterpret_cast<const char*> GRPC_SLICE_START_PTR(key_slice); + SslServerCredentialsOptions::PemKeyCertPair pkcp = {server_key, + server_cert}; if (secure) { SslServerCredentialsOptions ssl_opts; ssl_opts.pem_root_certs = ""; @@ -312,8 +312,8 @@ class GrpcToolTest : public ::testing::Test { builder.AddListeningPort(server_address.str(), creds); builder.RegisterService(&service_); server_ = builder.BuildAndStart(); - grpc_slice_unref(cert_slice); - grpc_slice_unref(key_slice); + grpc_slice_unref(cert_slice); + grpc_slice_unref(key_slice); return server_address.str(); } @@ -369,7 +369,7 @@ TEST_F(GrpcToolTest, ListCommand) { // Test input "grpc_cli list localhost:<port>" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "ls", server_address.c_str()}; FLAGS_l = false; @@ -387,7 +387,7 @@ TEST_F(GrpcToolTest, ListOneService) { // Test input "grpc_cli list localhost:<port> grpc.testing.EchoTestService" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "ls", server_address.c_str(), "grpc.testing.EchoTestService"}; // without -l flag @@ -400,7 +400,7 @@ TEST_F(GrpcToolTest, ListOneService) { strcmp(output_stream.str().c_str(), ECHO_TEST_SERVICE_SUMMARY)); // with -l flag - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); FLAGS_l = true; EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), @@ -417,7 +417,7 @@ TEST_F(GrpcToolTest, TypeCommand) { // Test input "grpc_cli type localhost:<port> grpc.testing.EchoRequest" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "type", server_address.c_str(), "grpc.testing.EchoRequest"}; @@ -438,7 +438,7 @@ TEST_F(GrpcToolTest, ListOneMethod) { // Test input "grpc_cli list localhost:<port> grpc.testing.EchoTestService" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "ls", server_address.c_str(), "grpc.testing.EchoTestService.Echo"}; // without -l flag @@ -450,7 +450,7 @@ TEST_F(GrpcToolTest, ListOneMethod) { EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), "Echo\n")); // with -l flag - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); FLAGS_l = true; EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), @@ -467,7 +467,7 @@ TEST_F(GrpcToolTest, TypeNotFound) { // Test input "grpc_cli type localhost:<port> grpc.testing.DummyRequest" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "type", server_address.c_str(), "grpc.testing.DummyRequest"}; @@ -481,7 +481,7 @@ TEST_F(GrpcToolTest, CallCommand) { // Test input "grpc_cli call localhost:<port> Echo "message: 'Hello'" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", "message: 'Hello'"}; @@ -493,7 +493,7 @@ TEST_F(GrpcToolTest, CallCommand) { strstr(output_stream.str().c_str(), "message: \"Hello\"")); // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); FLAGS_json_output = true; @@ -516,7 +516,7 @@ TEST_F(GrpcToolTest, CallCommandJsonInput) { // Test input "grpc_cli call localhost:<port> Echo "{ \"message\": \"Hello\"}" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", "{ \"message\": \"Hello\"}"}; @@ -529,7 +529,7 @@ TEST_F(GrpcToolTest, CallCommandJsonInput) { strstr(output_stream.str().c_str(), "message: \"Hello\"")); // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); FLAGS_json_output = true; @@ -553,7 +553,7 @@ TEST_F(GrpcToolTest, CallCommandBatch) { // Test input "grpc_cli call Echo" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", "message: 'Hello0'"}; @@ -574,7 +574,7 @@ TEST_F(GrpcToolTest, CallCommandBatch) { "message: \"Hello0\"\nmessage: " "\"Hello1\"\nmessage: \"Hello2\"\n")); // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); ss.clear(); ss.seekg(0); @@ -613,7 +613,7 @@ TEST_F(GrpcToolTest, CallCommandBatchJsonInput) { // Test input "grpc_cli call Echo" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", "{\"message\": \"Hello0\"}"}; @@ -636,7 +636,7 @@ TEST_F(GrpcToolTest, CallCommandBatchJsonInput) { "message: \"Hello0\"\nmessage: " "\"Hello1\"\nmessage: \"Hello2\"\n")); // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); ss.clear(); ss.seekg(0); @@ -676,7 +676,7 @@ TEST_F(GrpcToolTest, CallCommandBatchWithBadRequest) { // Test input "grpc_cli call Echo" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", "message: 'Hello0'"}; @@ -696,7 +696,7 @@ TEST_F(GrpcToolTest, CallCommandBatchWithBadRequest) { "message: \"Hello0\"\nmessage: \"Hello2\"\n")); // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); ss.clear(); ss.seekg(0); @@ -731,7 +731,7 @@ TEST_F(GrpcToolTest, CallCommandBatchJsonInputWithBadRequest) { // Test input "grpc_cli call Echo" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", "{ \"message\": \"Hello0\"}"}; @@ -754,7 +754,7 @@ TEST_F(GrpcToolTest, CallCommandBatchJsonInputWithBadRequest) { "message: \"Hello0\"\nmessage: \"Hello2\"\n")); // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); ss.clear(); ss.seekg(0); @@ -792,7 +792,7 @@ TEST_F(GrpcToolTest, CallCommandRequestStream) { // 'Hello0'" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "RequestStream", "message: 'Hello0'"}; @@ -817,7 +817,7 @@ TEST_F(GrpcToolTest, CallCommandRequestStreamJsonInput) { // \"Hello0\"}" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "RequestStream", "{ \"message\": \"Hello0\" }"}; @@ -845,7 +845,7 @@ TEST_F(GrpcToolTest, CallCommandRequestStreamWithBadRequest) { // 'Hello0'" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "RequestStream", "message: 'Hello0'"}; @@ -870,7 +870,7 @@ TEST_F(GrpcToolTest, CallCommandRequestStreamWithBadRequestJsonInput) { // 'Hello0'" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "RequestStream", "{ \"message\": \"Hello0\" }"}; @@ -893,99 +893,99 @@ TEST_F(GrpcToolTest, CallCommandRequestStreamWithBadRequestJsonInput) { ShutdownServer(); } -TEST_F(GrpcToolTest, CallCommandWithTimeoutDeadlineSet) { - // Test input "grpc_cli call CheckDeadlineSet --timeout=5000.25" - std::stringstream output_stream; - - const TString server_address = SetUpServer(); - const char* argv[] = {"grpc_cli", "call", server_address.c_str(), - "CheckDeadlineSet"}; - - // Set timeout to 5000.25 seconds - FLAGS_timeout = 5000.25; - - EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), - std::bind(PrintStream, &output_stream, - std::placeholders::_1))); - - // Expected output: "message: "true"", deadline set - EXPECT_TRUE(nullptr != - strstr(output_stream.str().c_str(), "message: \"true\"")); - ShutdownServer(); -} - -TEST_F(GrpcToolTest, CallCommandWithTimeoutDeadlineUpperBound) { - // Test input "grpc_cli call CheckDeadlineUpperBound --timeout=900" - std::stringstream output_stream; - - const TString server_address = SetUpServer(); - const char* argv[] = {"grpc_cli", "call", server_address.c_str(), - "CheckDeadlineUpperBound"}; - - // Set timeout to 900 seconds - FLAGS_timeout = 900; - - EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), - std::bind(PrintStream, &output_stream, - std::placeholders::_1))); - - // Expected output: "message: "true"" - // deadline not greater than timeout + current time - EXPECT_TRUE(nullptr != - strstr(output_stream.str().c_str(), "message: \"true\"")); - ShutdownServer(); -} - -TEST_F(GrpcToolTest, CallCommandWithNegativeTimeoutValue) { - // Test input "grpc_cli call CheckDeadlineSet --timeout=-5" - std::stringstream output_stream; - - const TString server_address = SetUpServer(); - const char* argv[] = {"grpc_cli", "call", server_address.c_str(), - "CheckDeadlineSet"}; - - // Set timeout to -5 (deadline not set) - FLAGS_timeout = -5; - - EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), - std::bind(PrintStream, &output_stream, - std::placeholders::_1))); - - // Expected output: "message: "false"", deadline not set - EXPECT_TRUE(nullptr != - strstr(output_stream.str().c_str(), "message: \"false\"")); - - ShutdownServer(); -} - -TEST_F(GrpcToolTest, CallCommandWithDefaultTimeoutValue) { - // Test input "grpc_cli call CheckDeadlineSet --timeout=-1" - std::stringstream output_stream; - - const TString server_address = SetUpServer(); - const char* argv[] = {"grpc_cli", "call", server_address.c_str(), - "CheckDeadlineSet"}; - - // Set timeout to -1 (default value, deadline not set) - FLAGS_timeout = -1; - - EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), - std::bind(PrintStream, &output_stream, - std::placeholders::_1))); - - // Expected output: "message: "false"", deadline not set - EXPECT_TRUE(nullptr != - strstr(output_stream.str().c_str(), "message: \"false\"")); - - ShutdownServer(); -} - +TEST_F(GrpcToolTest, CallCommandWithTimeoutDeadlineSet) { + // Test input "grpc_cli call CheckDeadlineSet --timeout=5000.25" + std::stringstream output_stream; + + const TString server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "call", server_address.c_str(), + "CheckDeadlineSet"}; + + // Set timeout to 5000.25 seconds + FLAGS_timeout = 5000.25; + + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + + // Expected output: "message: "true"", deadline set + EXPECT_TRUE(nullptr != + strstr(output_stream.str().c_str(), "message: \"true\"")); + ShutdownServer(); +} + +TEST_F(GrpcToolTest, CallCommandWithTimeoutDeadlineUpperBound) { + // Test input "grpc_cli call CheckDeadlineUpperBound --timeout=900" + std::stringstream output_stream; + + const TString server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "call", server_address.c_str(), + "CheckDeadlineUpperBound"}; + + // Set timeout to 900 seconds + FLAGS_timeout = 900; + + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + + // Expected output: "message: "true"" + // deadline not greater than timeout + current time + EXPECT_TRUE(nullptr != + strstr(output_stream.str().c_str(), "message: \"true\"")); + ShutdownServer(); +} + +TEST_F(GrpcToolTest, CallCommandWithNegativeTimeoutValue) { + // Test input "grpc_cli call CheckDeadlineSet --timeout=-5" + std::stringstream output_stream; + + const TString server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "call", server_address.c_str(), + "CheckDeadlineSet"}; + + // Set timeout to -5 (deadline not set) + FLAGS_timeout = -5; + + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + + // Expected output: "message: "false"", deadline not set + EXPECT_TRUE(nullptr != + strstr(output_stream.str().c_str(), "message: \"false\"")); + + ShutdownServer(); +} + +TEST_F(GrpcToolTest, CallCommandWithDefaultTimeoutValue) { + // Test input "grpc_cli call CheckDeadlineSet --timeout=-1" + std::stringstream output_stream; + + const TString server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "call", server_address.c_str(), + "CheckDeadlineSet"}; + + // Set timeout to -1 (default value, deadline not set) + FLAGS_timeout = -1; + + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + + // Expected output: "message: "false"", deadline not set + EXPECT_TRUE(nullptr != + strstr(output_stream.str().c_str(), "message: \"false\"")); + + ShutdownServer(); +} + TEST_F(GrpcToolTest, CallCommandResponseStream) { // Test input: grpc_cli call localhost:<port> ResponseStream "message: // 'Hello'" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "ResponseStream", "message: 'Hello'"}; @@ -995,14 +995,14 @@ TEST_F(GrpcToolTest, CallCommandResponseStream) { // Expected output: "message: \"Hello{n}\"" for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) { - TString expected_response_text = - "message: \"Hello" + ToString(i) + "\"\n"; + TString expected_response_text = + "message: \"Hello" + ToString(i) + "\"\n"; EXPECT_TRUE(nullptr != strstr(output_stream.str().c_str(), expected_response_text.c_str())); } // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); FLAGS_json_output = true; @@ -1013,8 +1013,8 @@ TEST_F(GrpcToolTest, CallCommandResponseStream) { // Expected output: "{\n \"message\": \"Hello{n}\"\n}\n" for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) { - TString expected_response_text = - "{\n \"message\": \"Hello" + ToString(i) + "\"\n}\n"; + TString expected_response_text = + "{\n \"message\": \"Hello" + ToString(i) + "\"\n}\n"; EXPECT_TRUE(nullptr != strstr(output_stream.str().c_str(), expected_response_text.c_str())); } @@ -1026,7 +1026,7 @@ TEST_F(GrpcToolTest, CallCommandBidiStream) { // Test input: grpc_cli call localhost:<port> BidiStream "message: 'Hello0'" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "BidiStream", "message: 'Hello0'"}; @@ -1052,7 +1052,7 @@ TEST_F(GrpcToolTest, CallCommandBidiStreamWithBadRequest) { // Test input: grpc_cli call localhost:<port> BidiStream "message: 'Hello0'" std::stringstream output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "BidiStream", "message: 'Hello0'"}; @@ -1080,7 +1080,7 @@ TEST_F(GrpcToolTest, ParseCommand) { std::stringstream output_stream; std::stringstream binary_output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "parse", server_address.c_str(), "grpc.testing.EchoResponse", ECHO_RESPONSE_MESSAGE_TEXT_FORMAT}; @@ -1095,7 +1095,7 @@ TEST_F(GrpcToolTest, ParseCommand) { ECHO_RESPONSE_MESSAGE_TEXT_FORMAT)); // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); FLAGS_json_output = true; @@ -1109,14 +1109,14 @@ TEST_F(GrpcToolTest, ParseCommand) { ECHO_RESPONSE_MESSAGE_JSON_FORMAT)); // Parse text message to binary message and then parse it back to text message - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); FLAGS_binary_output = true; EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), std::bind(PrintStream, &output_stream, std::placeholders::_1))); - TString binary_data = output_stream.str(); - output_stream.str(TString()); + TString binary_data = output_stream.str(); + output_stream.str(TString()); output_stream.clear(); argv[4] = binary_data.c_str(); FLAGS_binary_input = true; @@ -1140,7 +1140,7 @@ TEST_F(GrpcToolTest, ParseCommandJsonFormat) { std::stringstream output_stream; std::stringstream binary_output_stream; - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "parse", server_address.c_str(), "grpc.testing.EchoResponse", ECHO_RESPONSE_MESSAGE_JSON_FORMAT}; @@ -1155,7 +1155,7 @@ TEST_F(GrpcToolTest, ParseCommandJsonFormat) { ECHO_RESPONSE_MESSAGE_TEXT_FORMAT)); // with json_output - output_stream.str(TString()); + output_stream.str(TString()); output_stream.clear(); FLAGS_json_output = true; @@ -1205,7 +1205,7 @@ TEST_F(GrpcToolTest, TooManyArguments) { TEST_F(GrpcToolTest, CallCommandWithMetadata) { // Test input "grpc_cli call localhost:<port> Echo "message: 'Hello'" - const TString server_address = SetUpServer(); + const TString server_address = SetUpServer(); const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", "message: 'Hello'"}; @@ -1251,13 +1251,13 @@ TEST_F(GrpcToolTest, CallCommandWithMetadata) { TEST_F(GrpcToolTest, CallCommandWithBadMetadata) { // Test input "grpc_cli call localhost:10000 Echo "message: 'Hello'" - const char* argv[] = {"grpc_cli", "call", "localhost:10000", - "grpc.testing.EchoTestService.Echo", + const char* argv[] = {"grpc_cli", "call", "localhost:10000", + "grpc.testing.EchoTestService.Echo", "message: 'Hello'"}; FLAGS_protofiles = "src/proto/grpc/testing/echo.proto"; char* test_srcdir = gpr_getenv("TEST_SRCDIR"); if (test_srcdir != nullptr) { - FLAGS_proto_path = test_srcdir + TString("/com_github_grpc_grpc"); + FLAGS_proto_path = test_srcdir + TString("/com_github_grpc_grpc"); } { @@ -1289,7 +1289,7 @@ TEST_F(GrpcToolTest, CallCommandWithBadMetadata) { } TEST_F(GrpcToolTest, ListCommand_OverrideSslHostName) { - const TString server_address = SetUpServer(true); + const TString server_address = SetUpServer(true); // Test input "grpc_cli ls localhost:<port> --channel_creds_type=ssl // --ssl_target=z.test.google.fr" @@ -1311,28 +1311,28 @@ TEST_F(GrpcToolTest, ListCommand_OverrideSslHostName) { ShutdownServer(); } -TEST_F(GrpcToolTest, ConfiguringDefaultServiceConfig) { - // Test input "grpc_cli list localhost:<port> - // --default_service_config={\"loadBalancingConfig\":[{\"pick_first\":{}}]}" - std::stringstream output_stream; - const TString server_address = SetUpServer(); - const char* argv[] = {"grpc_cli", "ls", server_address.c_str()}; - // Just check that the tool is still operational when --default_service_config - // is configured. This particular service config is in reality redundant with - // the channel's default configuration. - FLAGS_l = false; - FLAGS_default_service_config = - "{\"loadBalancingConfig\":[{\"pick_first\":{}}]}"; - EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), - std::bind(PrintStream, &output_stream, - std::placeholders::_1))); - FLAGS_default_service_config = ""; - EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), - "grpc.testing.EchoTestService\n" - "grpc.reflection.v1alpha.ServerReflection\n")); - ShutdownServer(); -} - +TEST_F(GrpcToolTest, ConfiguringDefaultServiceConfig) { + // Test input "grpc_cli list localhost:<port> + // --default_service_config={\"loadBalancingConfig\":[{\"pick_first\":{}}]}" + std::stringstream output_stream; + const TString server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "ls", server_address.c_str()}; + // Just check that the tool is still operational when --default_service_config + // is configured. This particular service config is in reality redundant with + // the channel's default configuration. + FLAGS_l = false; + FLAGS_default_service_config = + "{\"loadBalancingConfig\":[{\"pick_first\":{}}]}"; + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + FLAGS_default_service_config = ""; + EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), + "grpc.testing.EchoTestService\n" + "grpc.reflection.v1alpha.ServerReflection\n")); + ShutdownServer(); +} + } // namespace testing } // namespace grpc diff --git a/contrib/libs/grpc/test/cpp/util/metrics_server.cc b/contrib/libs/grpc/test/cpp/util/metrics_server.cc index aa212fd4f1..0493da053e 100644 --- a/contrib/libs/grpc/test/cpp/util/metrics_server.cc +++ b/contrib/libs/grpc/test/cpp/util/metrics_server.cc @@ -81,7 +81,7 @@ grpc::Status MetricsServiceImpl::GetGauge(ServerContext* /*context*/, } std::shared_ptr<QpsGauge> MetricsServiceImpl::CreateQpsGauge( - const TString& name, bool* already_present) { + const TString& name, bool* already_present) { std::lock_guard<std::mutex> lock(mu_); std::shared_ptr<QpsGauge> qps_gauge(new QpsGauge()); @@ -100,7 +100,7 @@ std::shared_ptr<QpsGauge> MetricsServiceImpl::CreateQpsGauge( std::unique_ptr<grpc::Server> MetricsServiceImpl::StartServer(int port) { gpr_log(GPR_INFO, "Building metrics server.."); - const TString address = "0.0.0.0:" + ToString(port); + const TString address = "0.0.0.0:" + ToString(port); ServerBuilder builder; builder.AddListeningPort(address, grpc::InsecureServerCredentials()); diff --git a/contrib/libs/grpc/test/cpp/util/metrics_server.h b/contrib/libs/grpc/test/cpp/util/metrics_server.h index 98e727597d..10ffa7b4dd 100644 --- a/contrib/libs/grpc/test/cpp/util/metrics_server.h +++ b/contrib/libs/grpc/test/cpp/util/metrics_server.h @@ -82,7 +82,7 @@ class MetricsServiceImpl final : public MetricsService::Service { // is already present in the map. // NOTE: CreateQpsGauge can be called anytime (i.e before or after calling // StartServer). - std::shared_ptr<QpsGauge> CreateQpsGauge(const TString& name, + std::shared_ptr<QpsGauge> CreateQpsGauge(const TString& name, bool* already_present); std::unique_ptr<grpc::Server> StartServer(int port); diff --git a/contrib/libs/grpc/test/cpp/util/proto_file_parser.cc b/contrib/libs/grpc/test/cpp/util/proto_file_parser.cc index f4be2477f6..b0912a712c 100644 --- a/contrib/libs/grpc/test/cpp/util/proto_file_parser.cc +++ b/contrib/libs/grpc/test/cpp/util/proto_file_parser.cc @@ -30,9 +30,9 @@ namespace testing { namespace { // Match the user input method string to the full_name from method descriptor. -bool MethodNameMatch(const TString& full_name, const TString& input) { - TString clean_input = input; - std::replace(clean_input.begin(), clean_input.vend(), '/', '.'); +bool MethodNameMatch(const TString& full_name, const TString& input) { + TString clean_input = input; + std::replace(clean_input.begin(), clean_input.vend(), '/', '.'); if (clean_input.size() > full_name.size()) { return false; } @@ -64,27 +64,27 @@ class ErrorPrinter : public protobuf::compiler::MultiFileErrorCollector { }; ProtoFileParser::ProtoFileParser(const std::shared_ptr<grpc::Channel>& channel, - const TString& proto_path, - const TString& protofiles) + const TString& proto_path, + const TString& protofiles) : has_error_(false), dynamic_factory_(new protobuf::DynamicMessageFactory()) { - std::vector<TString> service_list; + std::vector<TString> service_list; if (channel) { reflection_db_.reset(new grpc::ProtoReflectionDescriptorDatabase(channel)); reflection_db_->GetServices(&service_list); } - std::unordered_set<TString> known_services; + std::unordered_set<TString> known_services; if (!protofiles.empty()) { source_tree_.MapPath("", google::protobuf::string(proto_path)); error_printer_.reset(new ErrorPrinter(this)); importer_.reset( new protobuf::compiler::Importer(&source_tree_, error_printer_.get())); - std::string file_name; + std::string file_name; std::stringstream ss(protofiles); while (std::getline(ss, file_name, ',')) { - const auto* file_desc = importer_->Import(google::protobuf::string(file_name.c_str())); + const auto* file_desc = importer_->Import(google::protobuf::string(file_name.c_str())); if (file_desc) { for (int i = 0; i < file_desc->service_count(); i++) { service_desc_list_.push_back(file_desc->service(i)); @@ -127,7 +127,7 @@ ProtoFileParser::ProtoFileParser(const std::shared_ptr<grpc::Channel>& channel, ProtoFileParser::~ProtoFileParser() {} -TString ProtoFileParser::GetFullMethodName(const TString& method) { +TString ProtoFileParser::GetFullMethodName(const TString& method) { has_error_ = false; if (known_methods_.find(method) != known_methods_.end()) { @@ -164,24 +164,24 @@ TString ProtoFileParser::GetFullMethodName(const TString& method) { return method_descriptor->full_name(); } -TString ProtoFileParser::GetFormattedMethodName(const TString& method) { +TString ProtoFileParser::GetFormattedMethodName(const TString& method) { has_error_ = false; - TString formatted_method_name = GetFullMethodName(method); + TString formatted_method_name = GetFullMethodName(method); if (has_error_) { return ""; } size_t last_dot = formatted_method_name.find_last_of('.'); - if (last_dot != TString::npos) { + if (last_dot != TString::npos) { formatted_method_name[last_dot] = '/'; } formatted_method_name.insert(formatted_method_name.begin(), '/'); return formatted_method_name; } -TString ProtoFileParser::GetMessageTypeFromMethod(const TString& method, - bool is_request) { +TString ProtoFileParser::GetMessageTypeFromMethod(const TString& method, + bool is_request) { has_error_ = false; - TString full_method_name = GetFullMethodName(method); + TString full_method_name = GetFullMethodName(method); if (has_error_) { return ""; } @@ -196,10 +196,10 @@ TString ProtoFileParser::GetMessageTypeFromMethod(const TString& method, : method_desc->output_type()->full_name(); } -bool ProtoFileParser::IsStreaming(const TString& method, bool is_request) { +bool ProtoFileParser::IsStreaming(const TString& method, bool is_request) { has_error_ = false; - TString full_method_name = GetFullMethodName(method); + TString full_method_name = GetFullMethodName(method); if (has_error_) { return false; } @@ -215,11 +215,11 @@ bool ProtoFileParser::IsStreaming(const TString& method, bool is_request) { : method_desc->server_streaming(); } -TString ProtoFileParser::GetSerializedProtoFromMethod( - const TString& method, const TString& formatted_proto, +TString ProtoFileParser::GetSerializedProtoFromMethod( + const TString& method, const TString& formatted_proto, bool is_request, bool is_json_format) { has_error_ = false; - TString message_type_name = GetMessageTypeFromMethod(method, is_request); + TString message_type_name = GetMessageTypeFromMethod(method, is_request); if (has_error_) { return ""; } @@ -227,11 +227,11 @@ TString ProtoFileParser::GetSerializedProtoFromMethod( is_json_format); } -TString ProtoFileParser::GetFormattedStringFromMethod( - const TString& method, const TString& serialized_proto, +TString ProtoFileParser::GetFormattedStringFromMethod( + const TString& method, const TString& serialized_proto, bool is_request, bool is_json_format) { has_error_ = false; - TString message_type_name = GetMessageTypeFromMethod(method, is_request); + TString message_type_name = GetMessageTypeFromMethod(method, is_request); if (has_error_) { return ""; } @@ -239,8 +239,8 @@ TString ProtoFileParser::GetFormattedStringFromMethod( is_json_format); } -TString ProtoFileParser::GetSerializedProtoFromMessageType( - const TString& message_type_name, const TString& formatted_proto, +TString ProtoFileParser::GetSerializedProtoFromMessageType( + const TString& message_type_name, const TString& formatted_proto, bool is_json_format) { has_error_ = false; google::protobuf::string serialized; @@ -276,8 +276,8 @@ TString ProtoFileParser::GetSerializedProtoFromMessageType( return serialized; } -TString ProtoFileParser::GetFormattedStringFromMessageType( - const TString& message_type_name, const TString& serialized_proto, +TString ProtoFileParser::GetFormattedStringFromMessageType( + const TString& message_type_name, const TString& serialized_proto, bool is_json_format) { has_error_ = false; const protobuf::Descriptor* desc = @@ -312,7 +312,7 @@ TString ProtoFileParser::GetFormattedStringFromMessageType( return formatted_string; } -void ProtoFileParser::LogError(const TString& error_msg) { +void ProtoFileParser::LogError(const TString& error_msg) { if (!error_msg.empty()) { std::cerr << error_msg << std::endl; } diff --git a/contrib/libs/grpc/test/cpp/util/proto_file_parser.h b/contrib/libs/grpc/test/cpp/util/proto_file_parser.h index 51df2c5d76..c0445641c7 100644 --- a/contrib/libs/grpc/test/cpp/util/proto_file_parser.h +++ b/contrib/libs/grpc/test/cpp/util/proto_file_parser.h @@ -37,7 +37,7 @@ class ProtoFileParser { // provided on the given channel. The given protofiles in a source tree rooted // from proto_path will also be searched. ProtoFileParser(const std::shared_ptr<grpc::Channel>& channel, - const TString& proto_path, const TString& protofiles); + const TString& proto_path, const TString& protofiles); ~ProtoFileParser(); @@ -46,11 +46,11 @@ class ProtoFileParser { // there is ambiguity. // Full method name is in the form of Service.Method, it's good to be used in // descriptor database queries. - TString GetFullMethodName(const TString& method); + TString GetFullMethodName(const TString& method); // Formatted method name is in the form of /Service/Method, it's good to be // used as the argument of Stub::Call() - TString GetFormattedMethodName(const TString& method); + TString GetFormattedMethodName(const TString& method); /// Converts a text or json string to its binary proto representation for the /// given method's input or return type. @@ -63,18 +63,18 @@ class ProtoFileParser { /// json-formatted proto, otherwise it is treated as a text-formatted /// proto /// \return the serialised binary proto representation of \c formatted_proto - TString GetSerializedProtoFromMethod(const TString& method, - const TString& formatted_proto, - bool is_request, - bool is_json_format); + TString GetSerializedProtoFromMethod(const TString& method, + const TString& formatted_proto, + bool is_request, + bool is_json_format); /// Converts a text or json string to its proto representation for the given /// message type. /// \param formatted_proto the text- or json-formatted proto string /// \return the serialised binary proto representation of \c formatted_proto - TString GetSerializedProtoFromMessageType( - const TString& message_type_name, const TString& formatted_proto, - bool is_json_format); + TString GetSerializedProtoFromMessageType( + const TString& message_type_name, const TString& formatted_proto, + bool is_json_format); /// Converts a binary proto string to its text or json string representation /// for the given method's input or return type. @@ -83,32 +83,32 @@ class ProtoFileParser { /// \param the serialised binary proto representation of type /// \c message_type_name /// \return the text- or json-formatted proto string of \c serialized_proto - TString GetFormattedStringFromMethod(const TString& method, - const TString& serialized_proto, - bool is_request, - bool is_json_format); + TString GetFormattedStringFromMethod(const TString& method, + const TString& serialized_proto, + bool is_request, + bool is_json_format); /// Converts a binary proto string to its text or json string representation /// for the given message type. /// \param the serialised binary proto representation of type /// \c message_type_name /// \return the text- or json-formatted proto string of \c serialized_proto - TString GetFormattedStringFromMessageType( - const TString& message_type_name, const TString& serialized_proto, - bool is_json_format); + TString GetFormattedStringFromMessageType( + const TString& message_type_name, const TString& serialized_proto, + bool is_json_format); - bool IsStreaming(const TString& method, bool is_request); + bool IsStreaming(const TString& method, bool is_request); bool HasError() const { return has_error_; } - void LogError(const TString& error_msg); + void LogError(const TString& error_msg); private: - TString GetMessageTypeFromMethod(const TString& method, - bool is_request); + TString GetMessageTypeFromMethod(const TString& method, + bool is_request); bool has_error_; - TString request_text_; + TString request_text_; protobuf::compiler::DiskSourceTree source_tree_; std::unique_ptr<ErrorPrinter> error_printer_; std::unique_ptr<protobuf::compiler::Importer> importer_; @@ -119,7 +119,7 @@ class ProtoFileParser { std::unique_ptr<protobuf::DynamicMessageFactory> dynamic_factory_; std::unique_ptr<grpc::protobuf::Message> request_prototype_; std::unique_ptr<grpc::protobuf::Message> response_prototype_; - std::unordered_map<TString, TString> known_methods_; + std::unordered_map<TString, TString> known_methods_; std::vector<const protobuf::ServiceDescriptor*> service_desc_list_; }; diff --git a/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc b/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc index dbc687e1ab..27a4c1e4cf 100644 --- a/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc +++ b/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc @@ -257,7 +257,7 @@ bool ProtoReflectionDescriptorDatabase::FindAllExtensionNumbers( } bool ProtoReflectionDescriptorDatabase::GetServices( - std::vector<TString>* output) { + std::vector<TString>* output) { ServerReflectionRequest request; request.set_list_services(""); ServerReflectionResponse response; @@ -292,7 +292,7 @@ bool ProtoReflectionDescriptorDatabase::GetServices( const protobuf::FileDescriptorProto ProtoReflectionDescriptorDatabase::ParseFileDescriptorProtoResponse( - const TString& byte_fd_proto) { + const TString& byte_fd_proto) { protobuf::FileDescriptorProto file_desc_proto; file_desc_proto.ParseFromString(google::protobuf::string(byte_fd_proto)); return file_desc_proto; diff --git a/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h b/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h index 428044b1d6..cdd6f0cccd 100644 --- a/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h +++ b/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h @@ -74,7 +74,7 @@ class ProtoReflectionDescriptorDatabase : public protobuf::DescriptorDatabase { std::vector<int>* output) override; // Provide a list of full names of registered services - bool GetServices(std::vector<TString>* output); + bool GetServices(std::vector<TString>* output); private: typedef ClientReaderWriter< @@ -83,7 +83,7 @@ class ProtoReflectionDescriptorDatabase : public protobuf::DescriptorDatabase { ClientStream; const protobuf::FileDescriptorProto ParseFileDescriptorProtoResponse( - const TString& byte_fd_proto); + const TString& byte_fd_proto); void AddFileFromResponse( const grpc::reflection::v1alpha::FileDescriptorResponse& response); diff --git a/contrib/libs/grpc/test/cpp/util/service_describer.cc b/contrib/libs/grpc/test/cpp/util/service_describer.cc index 399784751c..2af1104b97 100644 --- a/contrib/libs/grpc/test/cpp/util/service_describer.cc +++ b/contrib/libs/grpc/test/cpp/util/service_describer.cc @@ -20,14 +20,14 @@ #include <iostream> #include <sstream> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <vector> namespace grpc { namespace testing { -TString DescribeServiceList(std::vector<TString> service_list, - grpc::protobuf::DescriptorPool& desc_pool) { +TString DescribeServiceList(std::vector<TString> service_list, + grpc::protobuf::DescriptorPool& desc_pool) { std::stringstream result; for (auto it = service_list.begin(); it != service_list.end(); it++) { auto const& service = *it; @@ -40,16 +40,16 @@ TString DescribeServiceList(std::vector<TString> service_list, return result.str(); } -TString DescribeService(const grpc::protobuf::ServiceDescriptor* service) { - TString result; +TString DescribeService(const grpc::protobuf::ServiceDescriptor* service) { + TString result; if (service->options().deprecated()) { result.append("DEPRECATED\n"); } result.append("filename: " + service->file()->name() + "\n"); - TString package = service->full_name(); + TString package = service->full_name(); size_t pos = package.rfind("." + service->name()); - if (pos != TString::npos) { + if (pos != TString::npos) { package.erase(pos); result.append("package: " + package + ";\n"); } @@ -61,7 +61,7 @@ TString DescribeService(const grpc::protobuf::ServiceDescriptor* service) { return result; } -TString DescribeMethod(const grpc::protobuf::MethodDescriptor* method) { +TString DescribeMethod(const grpc::protobuf::MethodDescriptor* method) { std::stringstream result; result << " rpc " << method->name() << (method->client_streaming() ? "(stream " : "(") @@ -74,16 +74,16 @@ TString DescribeMethod(const grpc::protobuf::MethodDescriptor* method) { return result.str(); } -TString SummarizeService(const grpc::protobuf::ServiceDescriptor* service) { - TString result; +TString SummarizeService(const grpc::protobuf::ServiceDescriptor* service) { + TString result; for (int i = 0; i < service->method_count(); ++i) { result.append(SummarizeMethod(service->method(i))); } return result; } -TString SummarizeMethod(const grpc::protobuf::MethodDescriptor* method) { - TString result = method->name(); +TString SummarizeMethod(const grpc::protobuf::MethodDescriptor* method) { + TString result = method->name(); result.append("\n"); return result; } diff --git a/contrib/libs/grpc/test/cpp/util/service_describer.h b/contrib/libs/grpc/test/cpp/util/service_describer.h index 352fe0f38c..a473f03744 100644 --- a/contrib/libs/grpc/test/cpp/util/service_describer.h +++ b/contrib/libs/grpc/test/cpp/util/service_describer.h @@ -25,16 +25,16 @@ namespace grpc { namespace testing { -TString DescribeServiceList(std::vector<TString> service_list, - grpc::protobuf::DescriptorPool& desc_pool); +TString DescribeServiceList(std::vector<TString> service_list, + grpc::protobuf::DescriptorPool& desc_pool); -TString DescribeService(const grpc::protobuf::ServiceDescriptor* service); +TString DescribeService(const grpc::protobuf::ServiceDescriptor* service); -TString DescribeMethod(const grpc::protobuf::MethodDescriptor* method); +TString DescribeMethod(const grpc::protobuf::MethodDescriptor* method); -TString SummarizeService(const grpc::protobuf::ServiceDescriptor* service); +TString SummarizeService(const grpc::protobuf::ServiceDescriptor* service); -TString SummarizeMethod(const grpc::protobuf::MethodDescriptor* method); +TString SummarizeMethod(const grpc::protobuf::MethodDescriptor* method); } // namespace testing } // namespace grpc diff --git a/contrib/libs/grpc/test/cpp/util/slice_test.cc b/contrib/libs/grpc/test/cpp/util/slice_test.cc index ec370aa799..d7e945ae38 100644 --- a/contrib/libs/grpc/test/cpp/util/slice_test.cc +++ b/contrib/libs/grpc/test/cpp/util/slice_test.cc @@ -23,8 +23,8 @@ #include <grpc/slice.h> #include <gtest/gtest.h> -#include "test/core/util/test_config.h" - +#include "test/core/util/test_config.h" + namespace grpc { static internal::GrpcLibraryInitializer g_gli_initializer; @@ -39,13 +39,13 @@ class SliceTest : public ::testing::Test { static void TearDownTestCase() { grpc_shutdown(); } - void CheckSliceSize(const Slice& s, const TString& content) { + void CheckSliceSize(const Slice& s, const TString& content) { EXPECT_EQ(content.size(), s.size()); } - void CheckSlice(const Slice& s, const TString& content) { + void CheckSlice(const Slice& s, const TString& content) { EXPECT_EQ(content.size(), s.size()); EXPECT_EQ(content, - TString(reinterpret_cast<const char*>(s.begin()), s.size())); + TString(reinterpret_cast<const char*>(s.begin()), s.size())); } }; @@ -137,7 +137,7 @@ TEST_F(SliceTest, Cslice) { } // namespace grpc int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::TestEnvironment env(argc, argv); ::testing::InitGoogleTest(&argc, argv); int ret = RUN_ALL_TESTS(); return ret; diff --git a/contrib/libs/grpc/test/cpp/util/string_ref_helper.cc b/contrib/libs/grpc/test/cpp/util/string_ref_helper.cc index 4da437abe9..e573f5d33a 100644 --- a/contrib/libs/grpc/test/cpp/util/string_ref_helper.cc +++ b/contrib/libs/grpc/test/cpp/util/string_ref_helper.cc @@ -21,8 +21,8 @@ namespace grpc { namespace testing { -TString ToString(const grpc::string_ref& r) { - return TString(r.data(), r.size()); +TString ToString(const grpc::string_ref& r) { + return TString(r.data(), r.size()); } } // namespace testing diff --git a/contrib/libs/grpc/test/cpp/util/string_ref_helper.h b/contrib/libs/grpc/test/cpp/util/string_ref_helper.h index eab876aea8..e9e941f319 100644 --- a/contrib/libs/grpc/test/cpp/util/string_ref_helper.h +++ b/contrib/libs/grpc/test/cpp/util/string_ref_helper.h @@ -24,7 +24,7 @@ namespace grpc { namespace testing { -TString ToString(const grpc::string_ref& r); +TString ToString(const grpc::string_ref& r); } // namespace testing } // namespace grpc diff --git a/contrib/libs/grpc/test/cpp/util/string_ref_test.cc b/contrib/libs/grpc/test/cpp/util/string_ref_test.cc index 3cad17f0b4..8e3259b764 100644 --- a/contrib/libs/grpc/test/cpp/util/string_ref_test.cc +++ b/contrib/libs/grpc/test/cpp/util/string_ref_test.cc @@ -22,8 +22,8 @@ #include <gtest/gtest.h> -#include "test/core/util/test_config.h" - +#include "test/core/util/test_config.h" + namespace grpc { namespace { @@ -199,7 +199,7 @@ TEST_F(StringRefTest, ComparisonOperators) { } // namespace grpc int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::TestEnvironment env(argc, argv); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/contrib/libs/grpc/test/cpp/util/subprocess.cc b/contrib/libs/grpc/test/cpp/util/subprocess.cc index a90802b5af..648bd50274 100644 --- a/contrib/libs/grpc/test/cpp/util/subprocess.cc +++ b/contrib/libs/grpc/test/cpp/util/subprocess.cc @@ -24,7 +24,7 @@ namespace grpc { -static gpr_subprocess* MakeProcess(const std::vector<TString>& args) { +static gpr_subprocess* MakeProcess(const std::vector<TString>& args) { std::vector<const char*> vargs; for (auto it = args.begin(); it != args.end(); ++it) { vargs.push_back(it->c_str()); @@ -32,7 +32,7 @@ static gpr_subprocess* MakeProcess(const std::vector<TString>& args) { return gpr_subprocess_create(vargs.size(), &vargs[0]); } -SubProcess::SubProcess(const std::vector<TString>& args) +SubProcess::SubProcess(const std::vector<TString>& args) : subprocess_(MakeProcess(args)) {} SubProcess::~SubProcess() { gpr_subprocess_destroy(subprocess_); } diff --git a/contrib/libs/grpc/test/cpp/util/subprocess.h b/contrib/libs/grpc/test/cpp/util/subprocess.h index 763c6eb185..84dda31dd1 100644 --- a/contrib/libs/grpc/test/cpp/util/subprocess.h +++ b/contrib/libs/grpc/test/cpp/util/subprocess.h @@ -20,7 +20,7 @@ #define GRPC_TEST_CPP_UTIL_SUBPROCESS_H #include <initializer_list> -#include <util/generic/string.h> +#include <util/generic/string.h> #include <vector> struct gpr_subprocess; @@ -29,7 +29,7 @@ namespace grpc { class SubProcess { public: - SubProcess(const std::vector<TString>& args); + SubProcess(const std::vector<TString>& args); ~SubProcess(); int Join(); diff --git a/contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc b/contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc index 63e5b95ada..f7134b773f 100644 --- a/contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc +++ b/contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc @@ -40,11 +40,11 @@ namespace grpc { namespace testing { namespace { -TString ReadFile(const TString& src_path) { +TString ReadFile(const TString& src_path) { std::ifstream src; src.open(src_path, std::ifstream::in | std::ifstream::binary); - TString contents; + TString contents; src.seekg(0, std::ios::end); contents.reserve(src.tellg()); src.seekg(0, std::ios::beg); @@ -66,7 +66,7 @@ class DefaultCredentialsProvider : public CredentialsProvider { ~DefaultCredentialsProvider() override {} void AddSecureType( - const TString& type, + const TString& type, std::unique_ptr<CredentialTypeProvider> type_provider) override { // This clobbers any existing entry for type, except the defaults, which // can't be clobbered. @@ -83,7 +83,7 @@ class DefaultCredentialsProvider : public CredentialsProvider { } std::shared_ptr<ChannelCredentials> GetChannelCredentials( - const TString& type, ChannelArguments* args) override { + const TString& type, ChannelArguments* args) override { if (type == grpc::testing::kInsecureCredentialsType) { return InsecureChannelCredentials(); } else if (type == grpc::testing::kAltsCredentialsType) { @@ -109,7 +109,7 @@ class DefaultCredentialsProvider : public CredentialsProvider { } std::shared_ptr<ServerCredentials> GetServerCredentials( - const TString& type) override { + const TString& type) override { if (type == grpc::testing::kInsecureCredentialsType) { return InsecureServerCredentials(); } else if (type == grpc::testing::kAltsCredentialsType) { @@ -140,8 +140,8 @@ class DefaultCredentialsProvider : public CredentialsProvider { ->GetServerCredentials(); } } - std::vector<TString> GetSecureCredentialsTypeList() override { - std::vector<TString> types; + std::vector<TString> GetSecureCredentialsTypeList() override { + std::vector<TString> types; types.push_back(grpc::testing::kTlsCredentialsType); std::unique_lock<std::mutex> lock(mu_); for (auto it = added_secure_type_names_.begin(); @@ -153,11 +153,11 @@ class DefaultCredentialsProvider : public CredentialsProvider { private: std::mutex mu_; - std::vector<TString> added_secure_type_names_; + std::vector<TString> added_secure_type_names_; std::vector<std::unique_ptr<CredentialTypeProvider>> added_secure_type_providers_; - TString custom_server_key_; - TString custom_server_cert_; + TString custom_server_key_; + TString custom_server_cert_; }; CredentialsProvider* g_provider = nullptr; diff --git a/contrib/libs/grpc/test/cpp/util/test_credentials_provider.h b/contrib/libs/grpc/test/cpp/util/test_credentials_provider.h index 6b5e7f4b99..acba277ada 100644 --- a/contrib/libs/grpc/test/cpp/util/test_credentials_provider.h +++ b/contrib/libs/grpc/test/cpp/util/test_credentials_provider.h @@ -53,21 +53,21 @@ class CredentialsProvider { // Add a secure type in addition to the defaults. The default provider has // (kInsecureCredentialsType, kTlsCredentialsType). virtual void AddSecureType( - const TString& type, + const TString& type, std::unique_ptr<CredentialTypeProvider> type_provider) = 0; // Provide channel credentials according to the given type. Alter the channel // arguments if needed. Return nullptr if type is not registered. virtual std::shared_ptr<ChannelCredentials> GetChannelCredentials( - const TString& type, ChannelArguments* args) = 0; + const TString& type, ChannelArguments* args) = 0; // Provide server credentials according to the given type. // Return nullptr if type is not registered. virtual std::shared_ptr<ServerCredentials> GetServerCredentials( - const TString& type) = 0; + const TString& type) = 0; // Provide a list of secure credentials type. - virtual std::vector<TString> GetSecureCredentialsTypeList() = 0; + virtual std::vector<TString> GetSecureCredentialsTypeList() = 0; }; // Get the current provider. Create a default one if not set. diff --git a/contrib/libs/grpc/test/cpp/util/time_test.cc b/contrib/libs/grpc/test/cpp/util/time_test.cc index 474458789f..bcbfa14f94 100644 --- a/contrib/libs/grpc/test/cpp/util/time_test.cc +++ b/contrib/libs/grpc/test/cpp/util/time_test.cc @@ -20,8 +20,8 @@ #include <grpcpp/support/time.h> #include <gtest/gtest.h> -#include "test/core/util/test_config.h" - +#include "test/core/util/test_config.h" + using std::chrono::duration_cast; using std::chrono::microseconds; using std::chrono::system_clock; @@ -66,7 +66,7 @@ TEST_F(TimeTest, InfFuture) { } // namespace grpc int main(int argc, char** argv) { - grpc::testing::TestEnvironment env(argc, argv); + grpc::testing::TestEnvironment env(argc, argv); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/contrib/libs/grpc/test/cpp/util/ya.make b/contrib/libs/grpc/test/cpp/util/ya.make index 12d102d8a1..f043cc5b14 100644 --- a/contrib/libs/grpc/test/cpp/util/ya.make +++ b/contrib/libs/grpc/test/cpp/util/ya.make @@ -2,10 +2,10 @@ LIBRARY() LICENSE(Apache-2.0) -LICENSE_TEXTS(.yandex_meta/licenses.list.txt) +LICENSE_TEXTS(.yandex_meta/licenses.list.txt) + +OWNER(orivej) -OWNER(orivej) - PEERDIR( contrib/libs/gflags contrib/libs/protoc @@ -14,10 +14,10 @@ PEERDIR( contrib/restricted/googletest/googletest ) -ADDINCL( - ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc - contrib/libs/grpc -) +ADDINCL( + ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc + contrib/libs/grpc +) NO_COMPILER_WARNINGS() |